summaryrefslogtreecommitdiff
path: root/src/libcamera
diff options
context:
space:
mode:
Diffstat (limited to 'src/libcamera')
-rw-r--r--src/libcamera/base/backtrace.cpp14
-rw-r--r--src/libcamera/base/bound_method.cpp3
-rw-r--r--src/libcamera/base/class.cpp2
-rw-r--r--src/libcamera/base/event_dispatcher.cpp2
-rw-r--r--src/libcamera/base/event_dispatcher_poll.cpp5
-rw-r--r--src/libcamera/base/event_notifier.cpp8
-rw-r--r--src/libcamera/base/file.cpp7
-rw-r--r--src/libcamera/base/flags.cpp2
-rw-r--r--src/libcamera/base/log.cpp221
-rw-r--r--src/libcamera/base/memfd.cpp123
-rw-r--r--src/libcamera/base/meson.build33
-rw-r--r--src/libcamera/base/message.cpp2
-rw-r--r--src/libcamera/base/mutex.cpp2
-rw-r--r--src/libcamera/base/object.cpp63
-rw-r--r--src/libcamera/base/semaphore.cpp6
-rw-r--r--src/libcamera/base/shared_fd.cpp2
-rw-r--r--src/libcamera/base/signal.cpp5
-rw-r--r--src/libcamera/base/thread.cpp102
-rw-r--r--src/libcamera/base/timer.cpp22
-rw-r--r--src/libcamera/base/unique_fd.cpp2
-rw-r--r--src/libcamera/base/utils.cpp226
-rw-r--r--src/libcamera/bayer_format.cpp127
-rw-r--r--src/libcamera/byte_stream_buffer.cpp2
-rw-r--r--src/libcamera/camera.cpp298
-rw-r--r--src/libcamera/camera_controls.cpp2
-rw-r--r--src/libcamera/camera_lens.cpp15
-rw-r--r--src/libcamera/camera_manager.cpp278
-rw-r--r--src/libcamera/camera_sensor_properties.cpp151
-rw-r--r--src/libcamera/color_space.cpp431
-rw-r--r--src/libcamera/control_ids.cpp.in103
-rw-r--r--src/libcamera/control_ids.yaml693
-rw-r--r--src/libcamera/control_ids_core.yaml1052
-rw-r--r--src/libcamera/control_ids_debug.yaml6
-rw-r--r--src/libcamera/control_ids_draft.yaml327
-rw-r--r--src/libcamera/control_ids_rpi.yaml61
-rw-r--r--src/libcamera/control_ranges.yaml20
-rw-r--r--src/libcamera/control_serializer.cpp38
-rw-r--r--src/libcamera/control_validator.cpp2
-rw-r--r--src/libcamera/controls.cpp193
-rw-r--r--src/libcamera/converter.cpp458
-rw-r--r--src/libcamera/converter/converter_v4l2_m2m.cpp751
-rw-r--r--src/libcamera/converter/meson.build5
-rw-r--r--src/libcamera/debug_controls.cpp164
-rw-r--r--src/libcamera/delayed_controls.cpp16
-rw-r--r--src/libcamera/device_enumerator.cpp16
-rw-r--r--src/libcamera/device_enumerator_sysfs.cpp4
-rw-r--r--src/libcamera/device_enumerator_udev.cpp20
-rw-r--r--src/libcamera/dma_buf_allocator.cpp351
-rw-r--r--src/libcamera/fence.cpp4
-rw-r--r--src/libcamera/formats.cpp568
-rw-r--r--src/libcamera/formats.yaml60
-rw-r--r--src/libcamera/framebuffer.cpp85
-rw-r--r--src/libcamera/framebuffer_allocator.cpp21
-rw-r--r--src/libcamera/geometry.cpp144
-rw-r--r--src/libcamera/ipa/meson.build7
-rw-r--r--src/libcamera/ipa_controls.cpp10
-rw-r--r--src/libcamera/ipa_data_serializer.cpp7
-rw-r--r--src/libcamera/ipa_interface.cpp2
-rw-r--r--src/libcamera/ipa_manager.cpp30
-rw-r--r--src/libcamera/ipa_module.cpp30
-rw-r--r--src/libcamera/ipa_proxy.cpp25
-rw-r--r--src/libcamera/ipa_pub_key.cpp.in2
-rw-r--r--src/libcamera/ipc_pipe.cpp2
-rw-r--r--src/libcamera/ipc_pipe_unixsocket.cpp6
-rw-r--r--src/libcamera/ipc_unixsocket.cpp13
-rw-r--r--src/libcamera/mapped_framebuffer.cpp6
-rw-r--r--src/libcamera/matrix.cpp145
-rw-r--r--src/libcamera/media_device.cpp96
-rw-r--r--src/libcamera/media_object.cpp67
-rw-r--r--src/libcamera/meson.build150
-rw-r--r--src/libcamera/orientation.cpp118
-rw-r--r--src/libcamera/pipeline/imx8-isi/imx8-isi.cpp1116
-rw-r--r--src/libcamera/pipeline/imx8-isi/meson.build5
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.cpp32
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.h6
-rw-r--r--src/libcamera/pipeline/ipu3/frames.cpp5
-rw-r--r--src/libcamera/pipeline/ipu3/frames.h2
-rw-r--r--src/libcamera/pipeline/ipu3/imgu.cpp33
-rw-r--r--src/libcamera/pipeline/ipu3/imgu.h2
-rw-r--r--src/libcamera/pipeline/ipu3/ipu3.cpp321
-rw-r--r--src/libcamera/pipeline/ipu3/meson.build2
-rw-r--r--src/libcamera/pipeline/mali-c55/mali-c55.cpp1755
-rw-r--r--src/libcamera/pipeline/mali-c55/meson.build5
-rw-r--r--src/libcamera/pipeline/meson.build15
-rw-r--r--src/libcamera/pipeline/raspberrypi/dma_heaps.cpp90
-rw-r--r--src/libcamera/pipeline/raspberrypi/dma_heaps.h32
-rw-r--r--src/libcamera/pipeline/raspberrypi/meson.build7
-rw-r--r--src/libcamera/pipeline/raspberrypi/raspberrypi.cpp2127
-rw-r--r--src/libcamera/pipeline/raspberrypi/rpi_stream.h179
-rw-r--r--src/libcamera/pipeline/rkisp1/meson.build2
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1.cpp1052
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1_path.cpp414
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1_path.h32
-rw-r--r--src/libcamera/pipeline/rpi/common/delayed_controls.cpp293
-rw-r--r--src/libcamera/pipeline/rpi/common/delayed_controls.h87
-rw-r--r--src/libcamera/pipeline/rpi/common/meson.build7
-rw-r--r--src/libcamera/pipeline/rpi/common/pipeline_base.cpp1528
-rw-r--r--src/libcamera/pipeline/rpi/common/pipeline_base.h300
-rw-r--r--src/libcamera/pipeline/rpi/common/rpi_stream.cpp (renamed from src/libcamera/pipeline/raspberrypi/rpi_stream.cpp)160
-rw-r--r--src/libcamera/pipeline/rpi/common/rpi_stream.h199
-rw-r--r--src/libcamera/pipeline/rpi/meson.build12
-rw-r--r--src/libcamera/pipeline/rpi/vc4/data/example.yaml46
-rw-r--r--src/libcamera/pipeline/rpi/vc4/data/meson.build9
-rw-r--r--src/libcamera/pipeline/rpi/vc4/meson.build7
-rw-r--r--src/libcamera/pipeline/rpi/vc4/vc4.cpp1030
-rw-r--r--src/libcamera/pipeline/simple/converter.cpp399
-rw-r--r--src/libcamera/pipeline/simple/converter.h98
-rw-r--r--src/libcamera/pipeline/simple/meson.build3
-rw-r--r--src/libcamera/pipeline/simple/simple.cpp835
-rw-r--r--src/libcamera/pipeline/uvcvideo/meson.build2
-rw-r--r--src/libcamera/pipeline/uvcvideo/uvcvideo.cpp336
-rw-r--r--src/libcamera/pipeline/vimc/meson.build2
-rw-r--r--src/libcamera/pipeline/vimc/vimc.cpp129
-rw-r--r--src/libcamera/pipeline/virtual/README.md65
-rw-r--r--src/libcamera/pipeline/virtual/config_parser.cpp260
-rw-r--r--src/libcamera/pipeline/virtual/config_parser.h39
-rw-r--r--src/libcamera/pipeline/virtual/data/virtual.yaml36
-rw-r--r--src/libcamera/pipeline/virtual/frame_generator.h29
-rw-r--r--src/libcamera/pipeline/virtual/image_frame_generator.cpp172
-rw-r--r--src/libcamera/pipeline/virtual/image_frame_generator.h49
-rw-r--r--src/libcamera/pipeline/virtual/meson.build13
-rw-r--r--src/libcamera/pipeline/virtual/test_pattern_generator.cpp125
-rw-r--r--src/libcamera/pipeline/virtual/test_pattern_generator.h48
-rw-r--r--src/libcamera/pipeline/virtual/virtual.cpp397
-rw-r--r--src/libcamera/pipeline/virtual/virtual.h61
-rw-r--r--src/libcamera/pipeline/vivid/meson.build5
-rw-r--r--src/libcamera/pipeline/vivid/vivid.cpp409
-rw-r--r--src/libcamera/pipeline_handler.cpp353
-rw-r--r--src/libcamera/pixel_format.cpp14
-rw-r--r--src/libcamera/process.cpp12
-rw-r--r--src/libcamera/property_ids.cpp.in58
-rw-r--r--src/libcamera/property_ids_core.yaml (renamed from src/libcamera/property_ids.yaml)60
-rw-r--r--src/libcamera/property_ids_draft.yaml39
-rw-r--r--src/libcamera/proxy/meson.build5
-rw-r--r--src/libcamera/proxy/worker/meson.build8
-rw-r--r--src/libcamera/pub_key.cpp52
-rw-r--r--src/libcamera/request.cpp47
-rw-r--r--src/libcamera/sensor/camera_sensor.cpp583
-rw-r--r--src/libcamera/sensor/camera_sensor_legacy.cpp (renamed from src/libcamera/camera_sensor.cpp)824
-rw-r--r--src/libcamera/sensor/camera_sensor_properties.cpp473
-rw-r--r--src/libcamera/sensor/camera_sensor_raw.cpp1157
-rw-r--r--src/libcamera/sensor/meson.build8
-rw-r--r--src/libcamera/shared_mem_object.cpp231
-rw-r--r--src/libcamera/software_isp/TODO208
-rw-r--r--src/libcamera/software_isp/debayer.cpp127
-rw-r--r--src/libcamera/software_isp/debayer.h54
-rw-r--r--src/libcamera/software_isp/debayer_cpu.cpp835
-rw-r--r--src/libcamera/software_isp/debayer_cpu.h163
-rw-r--r--src/libcamera/software_isp/meson.build15
-rw-r--r--src/libcamera/software_isp/software_isp.cpp370
-rw-r--r--src/libcamera/software_isp/swstats_cpu.cpp434
-rw-r--r--src/libcamera/software_isp/swstats_cpu.h97
-rw-r--r--src/libcamera/source_paths.cpp2
-rw-r--r--src/libcamera/stream.cpp49
-rw-r--r--src/libcamera/sysfs.cpp2
-rw-r--r--src/libcamera/tracepoints.cpp2
-rw-r--r--src/libcamera/transform.cpp113
-rw-r--r--src/libcamera/v4l2_device.cpp167
-rw-r--r--src/libcamera/v4l2_pixelformat.cpp139
-rw-r--r--src/libcamera/v4l2_subdevice.cpp1550
-rw-r--r--src/libcamera/v4l2_videodevice.cpp418
-rw-r--r--src/libcamera/version.cpp.in2
-rw-r--r--src/libcamera/yaml_parser.cpp784
163 files changed, 25230 insertions, 6683 deletions
diff --git a/src/libcamera/base/backtrace.cpp b/src/libcamera/base/backtrace.cpp
index 483492c3..0b04629c 100644
--- a/src/libcamera/base/backtrace.cpp
+++ b/src/libcamera/base/backtrace.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * backtrace.h - Call stack backtraces
+ * Call stack backtraces
*/
#include <libcamera/base/backtrace.h>
@@ -191,10 +191,22 @@ __attribute__((__noinline__))
bool Backtrace::unwindTrace()
{
#if HAVE_UNWIND
+/*
+ * unw_getcontext() for ARM32 is an inline assembly function using the stmia
+ * instruction to store SP and PC. This is considered by clang-11 as deprecated,
+ * and generates a warning.
+ */
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Winline-asm"
+#endif
unw_context_t uc;
int ret = unw_getcontext(&uc);
if (ret)
return false;
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
unw_cursor_t cursor;
ret = unw_init_local(&cursor, &uc);
diff --git a/src/libcamera/base/bound_method.cpp b/src/libcamera/base/bound_method.cpp
index 3ecec51c..322029a8 100644
--- a/src/libcamera/base/bound_method.cpp
+++ b/src/libcamera/base/bound_method.cpp
@@ -2,11 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * bound_method.cpp - Method bind and invocation
+ * Method bind and invocation
*/
#include <libcamera/base/bound_method.h>
#include <libcamera/base/message.h>
+#include <libcamera/base/object.h>
#include <libcamera/base/semaphore.h>
#include <libcamera/base/thread.h>
diff --git a/src/libcamera/base/class.cpp b/src/libcamera/base/class.cpp
index 9c2d9f21..61998398 100644
--- a/src/libcamera/base/class.cpp
+++ b/src/libcamera/base/class.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * class.cpp - Utilities and helpers for classes
+ * Utilities and helpers for classes
*/
#include <libcamera/base/class.h>
diff --git a/src/libcamera/base/event_dispatcher.cpp b/src/libcamera/base/event_dispatcher.cpp
index 4be89e81..5f4a5cb4 100644
--- a/src/libcamera/base/event_dispatcher.cpp
+++ b/src/libcamera/base/event_dispatcher.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_dispatcher.cpp - Event dispatcher
+ * Event dispatcher
*/
#include <libcamera/base/event_dispatcher.h>
diff --git a/src/libcamera/base/event_dispatcher_poll.cpp b/src/libcamera/base/event_dispatcher_poll.cpp
index 7238a316..52bfb34e 100644
--- a/src/libcamera/base/event_dispatcher_poll.cpp
+++ b/src/libcamera/base/event_dispatcher_poll.cpp
@@ -2,19 +2,18 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_dispatcher_poll.cpp - Poll-based event dispatcher
+ * Poll-based event dispatcher
*/
#include <libcamera/base/event_dispatcher_poll.h>
-#include <algorithm>
-#include <chrono>
#include <iomanip>
#include <poll.h>
#include <stdint.h>
#include <string.h>
#include <sys/eventfd.h>
#include <unistd.h>
+#include <vector>
#include <libcamera/base/event_notifier.h>
#include <libcamera/base/log.h>
diff --git a/src/libcamera/base/event_notifier.cpp b/src/libcamera/base/event_notifier.cpp
index fd93c087..495c281d 100644
--- a/src/libcamera/base/event_notifier.cpp
+++ b/src/libcamera/base/event_notifier.cpp
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_notifier.cpp - File descriptor event notifier
+ * File descriptor event notifier
*/
#include <libcamera/base/event_notifier.h>
#include <libcamera/base/event_dispatcher.h>
+#include <libcamera/base/log.h>
#include <libcamera/base/message.h>
#include <libcamera/base/thread.h>
@@ -20,6 +21,8 @@
namespace libcamera {
+LOG_DECLARE_CATEGORY(Event)
+
/**
* \class EventNotifier
* \brief Notify of activity on a file descriptor
@@ -104,6 +107,9 @@ EventNotifier::~EventNotifier()
*/
void EventNotifier::setEnabled(bool enable)
{
+ if (!assertThreadBound("EventNotifier can't be enabled from another thread"))
+ return;
+
if (enabled_ == enable)
return;
diff --git a/src/libcamera/base/file.cpp b/src/libcamera/base/file.cpp
index fb3e276d..2b83a517 100644
--- a/src/libcamera/base/file.cpp
+++ b/src/libcamera/base/file.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * file.cpp - File I/O operations
+ * File I/O operations
*/
#include <libcamera/base/file.h>
@@ -163,6 +163,9 @@ bool File::exists() const
* attempt to create the file with initial permissions set to 0666 (modified by
* the process' umask).
*
+ * The file is opened with the O_CLOEXEC flag, and will be closed automatically
+ * when a new binary is executed with one of the exec(3) functions.
+ *
* The error() status is updated.
*
* \return True on success, false otherwise
@@ -178,7 +181,7 @@ bool File::open(File::OpenMode mode)
if (mode & OpenModeFlag::WriteOnly)
flags |= O_CREAT;
- fd_ = UniqueFD(::open(name_.c_str(), flags, 0666));
+ fd_ = UniqueFD(::open(name_.c_str(), flags | O_CLOEXEC, 0666));
if (!fd_.isValid()) {
error_ = -errno;
return false;
diff --git a/src/libcamera/base/flags.cpp b/src/libcamera/base/flags.cpp
index 3e4320ac..9981f2ed 100644
--- a/src/libcamera/base/flags.cpp
+++ b/src/libcamera/base/flags.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * flags.cpp - Type-safe enum-based bitfields
+ * Type-safe enum-based bitfields
*/
#include <libcamera/base/flags.h>
diff --git a/src/libcamera/base/log.cpp b/src/libcamera/base/log.cpp
index 64813b66..3a656b8f 100644
--- a/src/libcamera/base/log.cpp
+++ b/src/libcamera/base/log.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * log.cpp - Logging infrastructure
+ * Logging infrastructure
*/
#include <libcamera/base/log.h>
@@ -21,6 +21,7 @@
#include <libcamera/logging.h>
#include <libcamera/base/backtrace.h>
+#include <libcamera/base/mutex.h>
#include <libcamera/base/thread.h>
#include <libcamera/base/utils.h>
@@ -46,11 +47,11 @@
* their category are output to the log, while other messages are silently
* discarded.
*
- * By default log messages are output to stderr. They can be redirected to a log
- * file by setting the LIBCAMERA_LOG_FILE environment variable to the name of
- * the file. The file must be writable and is truncated if it exists. If any
+ * By default log messages are output to std::cerr. They can be redirected to a
+ * log file by setting the LIBCAMERA_LOG_FILE environment variable to the name
+ * of the file. The file must be writable and is truncated if it exists. If any
* error occurs when opening the file, the file is ignored and the log is output
- * to stderr.
+ * to std::cerr.
*/
/**
@@ -104,8 +105,8 @@ static const char *log_severity_name(LogSeverity severity)
class LogOutput
{
public:
- LogOutput(const char *path);
- LogOutput(std::ostream *stream);
+ LogOutput(const char *path, bool color);
+ LogOutput(std::ostream *stream, bool color);
LogOutput();
~LogOutput();
@@ -119,14 +120,16 @@ private:
std::ostream *stream_;
LoggingTarget target_;
+ bool color_;
};
/**
* \brief Construct a log output based on a file
* \param[in] path Full path to log file
+ * \param[in] color True to output colored messages
*/
-LogOutput::LogOutput(const char *path)
- : target_(LoggingTargetFile)
+LogOutput::LogOutput(const char *path, bool color)
+ : target_(LoggingTargetFile), color_(color)
{
stream_ = new std::ofstream(path);
}
@@ -134,9 +137,10 @@ LogOutput::LogOutput(const char *path)
/**
* \brief Construct a log output based on a stream
* \param[in] stream Stream to send log output to
+ * \param[in] color True to output colored messages
*/
-LogOutput::LogOutput(std::ostream *stream)
- : stream_(stream), target_(LoggingTargetStream)
+LogOutput::LogOutput(std::ostream *stream, bool color)
+ : stream_(stream), target_(LoggingTargetStream), color_(color)
{
}
@@ -144,7 +148,7 @@ LogOutput::LogOutput(std::ostream *stream)
* \brief Construct a log output to syslog
*/
LogOutput::LogOutput()
- : stream_(nullptr), target_(LoggingTargetSyslog)
+ : stream_(nullptr), target_(LoggingTargetSyslog), color_(false)
{
openlog("libcamera", LOG_PID, 0);
}
@@ -179,28 +183,72 @@ bool LogOutput::isValid() const
}
}
+namespace {
+
+/*
+ * For more information about ANSI escape codes, see
+ * https://en.wikipedia.org/wiki/ANSI_escape_code#Colors.
+ */
+constexpr const char *kColorReset = "\033[0m";
+constexpr const char *kColorGreen = "\033[0;32m";
+constexpr const char *kColorBrightRed = "\033[1;31m";
+constexpr const char *kColorBrightGreen = "\033[1;32m";
+constexpr const char *kColorBrightYellow = "\033[1;33m";
+constexpr const char *kColorBrightBlue = "\033[1;34m";
+constexpr const char *kColorBrightMagenta = "\033[1;35m";
+constexpr const char *kColorBrightCyan = "\033[1;36m";
+constexpr const char *kColorBrightWhite = "\033[1;37m";
+
+} /* namespace */
+
/**
* \brief Write message to log output
* \param[in] msg Message to write
*/
void LogOutput::write(const LogMessage &msg)
{
+ static const char *const severityColors[] = {
+ kColorBrightCyan,
+ kColorBrightGreen,
+ kColorBrightYellow,
+ kColorBrightRed,
+ kColorBrightMagenta,
+ };
+
+ const char *categoryColor = color_ ? kColorBrightWhite : "";
+ const char *fileColor = color_ ? kColorBrightBlue : "";
+ const char *prefixColor = color_ ? kColorGreen : "";
+ const char *resetColor = color_ ? kColorReset : "";
+ const char *severityColor = "";
+ LogSeverity severity = msg.severity();
std::string str;
+ if (color_) {
+ if (static_cast<unsigned int>(severity) < std::size(severityColors))
+ severityColor = severityColors[severity];
+ else
+ severityColor = kColorBrightWhite;
+ }
+
switch (target_) {
case LoggingTargetSyslog:
- str = std::string(log_severity_name(msg.severity())) + " "
- + msg.category().name() + " " + msg.fileInfo() + " "
- + msg.msg();
- writeSyslog(msg.severity(), str);
+ str = std::string(log_severity_name(severity)) + " "
+ + msg.category().name() + " " + msg.fileInfo() + " ";
+ if (!msg.prefix().empty())
+ str += msg.prefix() + ": ";
+ str += msg.msg();
+ writeSyslog(severity, str);
break;
case LoggingTargetStream:
case LoggingTargetFile:
str = "[" + utils::time_point_to_string(msg.timestamp()) + "] ["
+ std::to_string(Thread::currentId()) + "] "
- + log_severity_name(msg.severity()) + " "
- + msg.category().name() + " " + msg.fileInfo() + " "
- + msg.msg();
+ + severityColor + log_severity_name(severity) + " "
+ + categoryColor + msg.category().name() + " "
+ + fileColor + msg.fileInfo() + " ";
+ if (!msg.prefix().empty())
+ str += prefixColor + msg.prefix() + ": ";
+ str += resetColor + msg.msg();
writeStream(str);
break;
default:
@@ -253,8 +301,8 @@ public:
void write(const LogMessage &msg);
void backtrace();
- int logSetFile(const char *path);
- int logSetStream(std::ostream *stream);
+ int logSetFile(const char *path, bool color);
+ int logSetStream(std::ostream *stream, bool color);
int logSetTarget(LoggingTarget target);
void logSetLevel(const char *category, const char *level);
@@ -267,10 +315,11 @@ private:
friend LogCategory;
void registerCategory(LogCategory *category);
+ LogCategory *findCategory(const char *name) const;
static bool destroyed_;
- std::unordered_set<LogCategory *> categories_;
+ std::vector<LogCategory *> categories_;
std::list<std::pair<std::string, LogSeverity>> levels_;
std::shared_ptr<LogOutput> output_;
@@ -298,35 +347,47 @@ bool Logger::destroyed_ = false;
/**
* \brief Direct logging to a file
* \param[in] path Full path to the log file
+ * \param[in] color True to output colored messages
*
* This function directs the log output to the file identified by \a path. The
* previous log target, if any, is closed, and all new log messages will be
* written to the new log file.
*
+ * \a color controls whether or not the messages will be colored with standard
+ * ANSI escape codes. This is done regardless of whether \a path refers to a
+ * standard file or a TTY, the caller is responsible for disabling coloring when
+ * not suitable for the log target.
+ *
* If the function returns an error, the log target is not changed.
*
* \return Zero on success, or a negative error code otherwise
*/
-int logSetFile(const char *path)
+int logSetFile(const char *path, bool color)
{
- return Logger::instance()->logSetFile(path);
+ return Logger::instance()->logSetFile(path, color);
}
/**
* \brief Direct logging to a stream
* \param[in] stream Stream to send log output to
+ * \param[in] color True to output colored messages
*
* This function directs the log output to \a stream. The previous log target,
* if any, is closed, and all new log messages will be written to the new log
* stream.
*
+ * \a color controls whether or not the messages will be colored with standard
+ * ANSI escape codes. This is done regardless of whether \a stream refers to a
+ * standard file or a TTY, the caller is responsible for disabling coloring when
+ * not suitable for the log target.
+ *
* If the function returns an error, the log file is not changed
*
* \return Zero on success, or a negative error code otherwise.
*/
-int logSetStream(std::ostream *stream)
+int logSetStream(std::ostream *stream, bool color)
{
- return Logger::instance()->logSetStream(stream);
+ return Logger::instance()->logSetStream(stream, color);
}
/**
@@ -437,14 +498,16 @@ void Logger::backtrace()
/**
* \brief Set the log file
* \param[in] path Full path to the log file
+ * \param[in] color True to output colored messages
*
* \sa libcamera::logSetFile()
*
* \return Zero on success, or a negative error code otherwise.
*/
-int Logger::logSetFile(const char *path)
+int Logger::logSetFile(const char *path, bool color)
{
- std::shared_ptr<LogOutput> output = std::make_shared<LogOutput>(path);
+ std::shared_ptr<LogOutput> output =
+ std::make_shared<LogOutput>(path, color);
if (!output->isValid())
return -EINVAL;
@@ -455,14 +518,16 @@ int Logger::logSetFile(const char *path)
/**
* \brief Set the log stream
* \param[in] stream Stream to send log output to
+ * \param[in] color True to output colored messages
*
* \sa libcamera::logSetStream()
*
* \return Zero on success, or a negative error code otherwise.
*/
-int Logger::logSetStream(std::ostream *stream)
+int Logger::logSetStream(std::ostream *stream, bool color)
{
- std::shared_ptr<LogOutput> output = std::make_shared<LogOutput>(stream);
+ std::shared_ptr<LogOutput> output =
+ std::make_shared<LogOutput>(stream, color);
std::atomic_store(&output_, output);
return 0;
}
@@ -477,15 +542,11 @@ int Logger::logSetStream(std::ostream *stream)
*/
int Logger::logSetTarget(enum LoggingTarget target)
{
- std::shared_ptr<LogOutput> output;
-
switch (target) {
case LoggingTargetSyslog:
- output = std::make_shared<LogOutput>();
- std::atomic_store(&output_, output);
+ std::atomic_store(&output_, std::make_shared<LogOutput>());
break;
case LoggingTargetNone:
- output = nullptr;
std::atomic_store(&output_, std::shared_ptr<LogOutput>());
break;
default:
@@ -509,7 +570,7 @@ void Logger::logSetLevel(const char *category, const char *level)
return;
for (LogCategory *c : categories_) {
- if (!strcmp(c->name(), category)) {
+ if (c->name() == category) {
c->setSeverity(severity);
break;
}
@@ -518,9 +579,16 @@ void Logger::logSetLevel(const char *category, const char *level)
/**
* \brief Construct a logger
+ *
+ * If the environment variable is not set, log to std::cerr. The log messages
+ * are then colored by default. This can be overridden by setting the
+ * LIBCAMERA_LOG_NO_COLOR environment variable to disable coloring.
*/
Logger::Logger()
{
+ bool color = !utils::secure_getenv("LIBCAMERA_LOG_NO_COLOR");
+ logSetStream(&std::cerr, color);
+
parseLogFile();
parseLogLevels();
}
@@ -531,22 +599,21 @@ Logger::Logger()
* If the LIBCAMERA_LOG_FILE environment variable is set, open the file it
* points to and redirect the logger output to it. If the environment variable
* is set to "syslog", then the logger output will be directed to syslog. Errors
- * are silently ignored and don't affect the logger output (set to stderr).
+ * are silently ignored and don't affect the logger output (set to std::cerr by
+ * default).
*/
void Logger::parseLogFile()
{
const char *file = utils::secure_getenv("LIBCAMERA_LOG_FILE");
- if (!file) {
- logSetStream(&std::cerr);
+ if (!file)
return;
- }
if (!strcmp(file, "syslog")) {
logSetTarget(LoggingTargetSyslog);
return;
}
- logSetFile(file);
+ logSetFile(file, false);
}
/**
@@ -642,12 +709,12 @@ LogSeverity Logger::parseLogLevel(const std::string &level)
* \brief Register a log category with the logger
* \param[in] category The log category
*
- * Log categories must have unique names. If a category with the same name
- * already exists this function performs no operation.
+ * Log categories must have unique names. It is invalid to call this function
+ * if a log category with the same name already exists.
*/
void Logger::registerCategory(LogCategory *category)
{
- categories_.insert(category);
+ categories_.push_back(category);
const std::string &name = category->name();
for (const std::pair<std::string, LogSeverity> &level : levels_) {
@@ -672,6 +739,22 @@ void Logger::registerCategory(LogCategory *category)
}
/**
+ * \brief Find an existing log category with the given name
+ * \param[in] name Name of the log category
+ * \return The pointer to the found log category or nullptr if not found
+ */
+LogCategory *Logger::findCategory(const char *name) const
+{
+ if (auto it = std::find_if(categories_.begin(), categories_.end(),
+ [name](auto c) { return c->name() == name; });
+ it != categories_.end()) {
+ return *it;
+ }
+
+ return nullptr;
+}
+
+/**
* \enum LogSeverity
* Log message severity
* \var LogDebug
@@ -696,13 +779,35 @@ void Logger::registerCategory(LogCategory *category)
*/
/**
+ * \brief Create a new LogCategory or return an existing one
+ * \param[in] name Name of the log category
+ *
+ * Create and return a new LogCategory with the given name if such a category
+ * does not yet exist, or return the existing one.
+ *
+ * \return The pointer to the LogCategory
+ */
+LogCategory *LogCategory::create(const char *name)
+{
+ static Mutex mutex_;
+ MutexLocker locker(mutex_);
+ LogCategory *category = Logger::instance()->findCategory(name);
+
+ if (!category) {
+ category = new LogCategory(name);
+ Logger::instance()->registerCategory(category);
+ }
+
+ return category;
+}
+
+/**
* \brief Construct a log category
* \param[in] name The category name
*/
LogCategory::LogCategory(const char *name)
: name_(name), severity_(LogSeverity::LogInfo)
{
- Logger::instance()->registerCategory(this);
}
/**
@@ -739,7 +844,7 @@ void LogCategory::setSeverity(LogSeverity severity)
*/
const LogCategory &LogCategory::defaultCategory()
{
- static const LogCategory *category = new LogCategory("default");
+ static const LogCategory *category = LogCategory::create("default");
return *category;
}
@@ -760,14 +865,17 @@ const LogCategory &LogCategory::defaultCategory()
* will be displayed
* \param[in] severity The log message severity, controlling how the message
* will be displayed
+ * \param[in] prefix The log message prefix
*
* Create a log message pertaining to line \a line of file \a fileName. The
* \a severity argument sets the message severity to control whether it will be
- * output or dropped.
+ * output or dropped. The \a prefix optionally identifies the object instance
+ * logging the message.
*/
LogMessage::LogMessage(const char *fileName, unsigned int line,
- const LogCategory &category, LogSeverity severity)
- : category_(category), severity_(severity)
+ const LogCategory &category, LogSeverity severity,
+ const std::string &prefix)
+ : category_(category), severity_(severity), prefix_(prefix)
{
init(fileName, line);
}
@@ -857,6 +965,12 @@ LogMessage::~LogMessage()
*/
/**
+ * \fn LogMessage::prefix()
+ * \brief Retrieve the prefix of the log message
+ * \return The prefix of the message
+ */
+
+/**
* \fn LogMessage::msg()
* \brief Retrieve the message text of the log message
* \return The message text of the message, as a string
@@ -903,12 +1017,9 @@ Loggable::~Loggable()
LogMessage Loggable::_log(const LogCategory *category, LogSeverity severity,
const char *fileName, unsigned int line) const
{
- LogMessage msg(fileName, line,
- category ? *category : LogCategory::defaultCategory(),
- severity);
-
- msg.stream() << logPrefix() << ": ";
- return msg;
+ return LogMessage(fileName, line,
+ category ? *category : LogCategory::defaultCategory(),
+ severity, logPrefix());
}
/**
diff --git a/src/libcamera/base/memfd.cpp b/src/libcamera/base/memfd.cpp
new file mode 100644
index 00000000..ed0b299b
--- /dev/null
+++ b/src/libcamera/base/memfd.cpp
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Anonymous file creation
+ */
+
+#include <libcamera/base/memfd.h>
+
+#include <fcntl.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file base/memfd.h
+ * \brief Anonymous file creation
+ */
+
+#ifndef __DOXYGEN__
+namespace {
+
+/* uClibc doesn't provide the file sealing API. */
+#if not HAVE_FILE_SEALS
+#define F_ADD_SEALS 1033
+#define F_SEAL_SHRINK 0x0002
+#define F_SEAL_GROW 0x0004
+#endif
+
+#if not HAVE_MEMFD_CREATE
+int memfd_create(const char *name, unsigned int flags)
+{
+ return syscall(SYS_memfd_create, name, flags);
+}
+#endif
+
+} /* namespace */
+#endif /* __DOXYGEN__ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(File)
+
+/**
+ * \class MemFd
+ * \brief Helper class to create anonymous files
+ *
+ * Anonymous files behave like regular files, and can be modified, truncated,
+ * memory-mapped and so on. Unlike regular files, they however live in RAM and
+ * don't have permanent backing storage.
+ */
+
+/**
+ * \enum MemFd::Seal
+ * \brief Seals for the MemFd::create() function
+ * \var MemFd::Seal::None
+ * \brief No seals (used as default value)
+ * \var MemFd::Seal::Shrink
+ * \brief Prevent the memfd from shrinking
+ * \var MemFd::Seal::Grow
+ * \brief Prevent the memfd from growing
+ */
+
+/**
+ * \typedef MemFd::Seals
+ * \brief A bitwise combination of MemFd::Seal values
+ */
+
+/**
+ * \brief Create an anonymous file
+ * \param[in] name The file name (displayed in symbolic links in /proc/self/fd/)
+ * \param[in] size The file size
+ * \param[in] seals The file seals
+ *
+ * This function is a helper that wraps anonymous file (memfd) creation and
+ * sets the file size and optional seals.
+ *
+ * \return The descriptor of the anonymous file if creation succeeded, or an
+ * invalid UniqueFD otherwise
+ */
+UniqueFD MemFd::create(const char *name, std::size_t size, Seals seals)
+{
+ int ret = memfd_create(name, MFD_ALLOW_SEALING | MFD_CLOEXEC);
+ if (ret < 0) {
+ ret = errno;
+ LOG(File, Error)
+ << "Failed to allocate memfd storage for " << name
+ << ": " << strerror(ret);
+ return {};
+ }
+
+ UniqueFD memfd(ret);
+
+ ret = ftruncate(memfd.get(), size);
+ if (ret < 0) {
+ ret = errno;
+ LOG(File, Error)
+ << "Failed to set memfd size for " << name
+ << ": " << strerror(ret);
+ return {};
+ }
+
+ if (seals) {
+ int fileSeals = (seals & Seal::Shrink ? F_SEAL_SHRINK : 0)
+ | (seals & Seal::Grow ? F_SEAL_GROW : 0);
+
+ ret = fcntl(memfd.get(), F_ADD_SEALS, fileSeals);
+ if (ret < 0) {
+ ret = errno;
+ LOG(File, Error)
+ << "Failed to seal the memfd for " << name
+ << ": " << strerror(ret);
+ return {};
+ }
+ }
+
+ return memfd;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/meson.build b/src/libcamera/base/meson.build
index 7030ad1f..a742dfdf 100644
--- a/src/libcamera/base/meson.build
+++ b/src/libcamera/base/meson.build
@@ -1,29 +1,33 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_base_sources = files([
- 'backtrace.cpp',
- 'class.cpp',
+libcamera_base_public_sources = files([
'bound_method.cpp',
+ 'class.cpp',
+ 'flags.cpp',
+ 'object.cpp',
+ 'shared_fd.cpp',
+ 'signal.cpp',
+ 'unique_fd.cpp',
+])
+
+libcamera_base_internal_sources = files([
+ 'backtrace.cpp',
'event_dispatcher.cpp',
'event_dispatcher_poll.cpp',
'event_notifier.cpp',
'file.cpp',
- 'flags.cpp',
'log.cpp',
+ 'memfd.cpp',
'message.cpp',
'mutex.cpp',
- 'object.cpp',
'semaphore.cpp',
- 'shared_fd.cpp',
- 'signal.cpp',
'thread.cpp',
'timer.cpp',
- 'unique_fd.cpp',
'utils.cpp',
])
-libdw = cc.find_library('libdw', required : false)
-libunwind = cc.find_library('libunwind', required : false)
+libdw = dependency('libdw', required : false)
+libunwind = dependency('libunwind', required : false)
if cc.has_header_symbol('execinfo.h', 'backtrace')
config_h.set('HAVE_BACKTRACE', 1)
@@ -38,9 +42,9 @@ if libunwind.found()
endif
libcamera_base_deps = [
- dependency('threads'),
libatomic,
libdw,
+ libthreads,
libunwind,
]
@@ -49,8 +53,13 @@ libcamera_base_deps = [
libcamera_base_args = [ '-DLIBCAMERA_BASE_PRIVATE' ]
libcamera_base_lib = shared_library('libcamera-base',
- [libcamera_base_sources, libcamera_base_headers],
+ [
+ libcamera_base_public_sources,
+ libcamera_base_internal_sources,
+ libcamera_base_headers,
+ ],
version : libcamera_version,
+ soversion : libcamera_soversion,
name_prefix : '',
install : true,
cpp_args : libcamera_base_args,
diff --git a/src/libcamera/base/message.cpp b/src/libcamera/base/message.cpp
index 2da2a7ed..098faac6 100644
--- a/src/libcamera/base/message.cpp
+++ b/src/libcamera/base/message.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * message.cpp - Message queue support
+ * Message queue support
*/
#include <libcamera/base/message.h>
diff --git a/src/libcamera/base/mutex.cpp b/src/libcamera/base/mutex.cpp
index e34e8618..2a4542c4 100644
--- a/src/libcamera/base/mutex.cpp
+++ b/src/libcamera/base/mutex.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * mutex.cpp - Mutex classes with clang thread safety annotation
+ * Mutex classes with clang thread safety annotation
*/
#include <libcamera/base/mutex.h>
diff --git a/src/libcamera/base/object.cpp b/src/libcamera/base/object.cpp
index ec5b55d1..745d2565 100644
--- a/src/libcamera/base/object.cpp
+++ b/src/libcamera/base/object.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * object.cpp - Base object
+ * Base object
*/
#include <libcamera/base/object.h>
@@ -40,8 +40,9 @@ LOG_DEFINE_CATEGORY(Object)
* Object class.
*
* Deleting an object from a thread other than the one the object is bound to is
- * unsafe, unless the caller ensures that the object isn't processing any
- * message concurrently.
+ * unsafe, unless the caller ensures that the object's thread is stopped and no
+ * parent or child of the object gets deleted concurrently. See
+ * Object::~Object() for more information.
*
* Object slots connected to signals will also run in the context of the
* object's thread, regardless of whether the signal is emitted in the same or
@@ -84,9 +85,20 @@ Object::Object(Object *parent)
* Object instances shall be destroyed from the thread they are bound to,
* otherwise undefined behaviour may occur. If deletion of an Object needs to
* be scheduled from a different thread, deleteLater() shall be used.
+ *
+ * As an exception to this rule, Object instances may be deleted from a
+ * different thread if the thread the instance is bound to is stopped through
+ * the whole duration of the object's destruction, *and* the parent and children
+ * of the object do not get deleted concurrently. The caller is responsible for
+ * fulfilling those requirements.
+ *
+ * In all cases Object instances shall be deleted before the Thread they are
+ * bound to.
*/
Object::~Object()
{
+ ASSERT(Thread::current() == thread_ || !thread_->isRunning());
+
/*
* Move signals to a private list to avoid concurrent iteration and
* deletion of items from Signal::disconnect().
@@ -116,8 +128,9 @@ Object::~Object()
* event loop that the object belongs to. This ensures the object is destroyed
* from the right context, as required by the libcamera threading model.
*
- * If this function is called before the thread's event loop is started, the
- * object will be deleted when the event loop starts.
+ * If this function is called before the thread's event loop is started or after
+ * it has stopped, the object will be deleted when the event loop (re)starts. If
+ * this never occurs, the object will be leaked.
*
* Deferred deletion can be used to control the destruction context with shared
* pointers. An object managed with shared pointers is deleted when the last
@@ -189,7 +202,11 @@ void Object::message(Message *msg)
{
switch (msg->type()) {
case Message::InvokeMessage: {
- InvokeMessage *iMsg = static_cast<InvokeMessage *>(msg);
+ /*
+ * A static_cast should be enough, but gcc 10 and 11 choke on
+ * it in release mode (with -O2 or -O3).
+ */
+ InvokeMessage *iMsg = dynamic_cast<InvokeMessage *>(msg);
Semaphore *semaphore = iMsg->semaphore();
iMsg->invoke();
@@ -209,6 +226,35 @@ void Object::message(Message *msg)
}
/**
+ * \fn Object::assertThreadBound()
+ * \brief Check if the caller complies with thread-bound constraints
+ * \param[in] message The message to be printed on error
+ *
+ * This function verifies the calling constraints required by the \threadbound
+ * definition. It shall be called at the beginning of member functions of an
+ * Object subclass that are explicitly marked as thread-bound in their
+ * documentation.
+ *
+ * If the thread-bound constraints are not met, the function prints \a message
+ * as an error message. For debug builds, it additionally causes an assertion
+ * error.
+ *
+ * \todo Verify the thread-bound requirements for functions marked as
+ * thread-bound at the class level.
+ *
+ * \return True if the call is thread-bound compliant, false otherwise
+ */
+bool Object::assertThreadBound(const char *message)
+{
+ if (Thread::current() == thread_)
+ return true;
+
+ LOG(Object, Error) << message;
+ ASSERT(false);
+ return false;
+}
+
+/**
* \fn R Object::invokeMethod()
* \brief Invoke a method asynchronously on an Object instance
* \param[in] func The object method to invoke
@@ -255,11 +301,12 @@ void Object::message(Message *msg)
* Moving an object that has a parent is not allowed, and causes undefined
* behaviour.
*
- * \context This function is thread-bound.
+ * \context This function is \threadbound.
*/
void Object::moveToThread(Thread *thread)
{
- ASSERT(Thread::current() == thread_);
+ if (!assertThreadBound("Object can't be moved from another thread"))
+ return;
if (thread_ == thread)
return;
diff --git a/src/libcamera/base/semaphore.cpp b/src/libcamera/base/semaphore.cpp
index 4fe30293..862f3b31 100644
--- a/src/libcamera/base/semaphore.cpp
+++ b/src/libcamera/base/semaphore.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * semaphore.cpp - General-purpose counting semaphore
+ * General-purpose counting semaphore
*/
#include <libcamera/base/semaphore.h>
@@ -56,7 +56,9 @@ unsigned int Semaphore::available()
void Semaphore::acquire(unsigned int n)
{
MutexLocker locker(mutex_);
- cv_.wait(locker, [&] { return available_ >= n; });
+ cv_.wait(locker, [&]() LIBCAMERA_TSA_REQUIRES(mutex_) {
+ return available_ >= n;
+ });
available_ -= n;
}
diff --git a/src/libcamera/base/shared_fd.cpp b/src/libcamera/base/shared_fd.cpp
index c711cf57..7afc8ca5 100644
--- a/src/libcamera/base/shared_fd.cpp
+++ b/src/libcamera/base/shared_fd.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * shared_fd.cpp - File descriptor wrapper with shared ownership
+ * File descriptor wrapper with shared ownership
*/
#include <libcamera/base/shared_fd.h>
diff --git a/src/libcamera/base/signal.cpp b/src/libcamera/base/signal.cpp
index a46386a0..b782e050 100644
--- a/src/libcamera/base/signal.cpp
+++ b/src/libcamera/base/signal.cpp
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * signal.cpp - Signal & slot implementation
+ * Signal & slot implementation
*/
#include <libcamera/base/signal.h>
#include <libcamera/base/mutex.h>
+#include <libcamera/base/object.h>
/**
* \file base/signal.h
@@ -74,7 +75,7 @@ SignalBase::SlotList SignalBase::slots()
*
* Signals and slots are a language construct aimed at communication between
* objects through the observer pattern without the need for boilerplate code.
- * See http://doc.qt.io/qt-5/signalsandslots.html for more information.
+ * See http://doc.qt.io/qt-6/signalsandslots.html for more information.
*
* Signals model events that can be observed from objects unrelated to the event
* source. Slots are functions that are called in response to a signal. Signals
diff --git a/src/libcamera/base/thread.cpp b/src/libcamera/base/thread.cpp
index 6bda9d14..319bfda9 100644
--- a/src/libcamera/base/thread.cpp
+++ b/src/libcamera/base/thread.cpp
@@ -2,13 +2,14 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * thread.cpp - Thread support
+ * Thread support
*/
#include <libcamera/base/thread.h>
#include <atomic>
#include <list>
+#include <optional>
#include <sys/syscall.h>
#include <sys/types.h>
#include <unistd.h>
@@ -18,6 +19,7 @@
#include <libcamera/base/log.h>
#include <libcamera/base/message.h>
#include <libcamera/base/mutex.h>
+#include <libcamera/base/object.h>
/**
* \page thread Thread Support
@@ -63,42 +65,6 @@
* receiver's event loop, running in the receiver's thread. This mechanism can
* be overridden by selecting a different connection type when calling
* Signal::connect().
- *
- * \section thread-reentrancy Reentrancy and Thread-Safety
- *
- * Through the documentation, several terms are used to define how classes and
- * their member functions can be used from multiple threads.
- *
- * - A **reentrant** function may be called simultaneously from multiple
- * threads if and only if each invocation uses a different instance of the
- * class. This is the default for all member functions not explictly marked
- * otherwise.
- *
- * - \anchor thread-safe A **thread-safe** function may be called
- * simultaneously from multiple threads on the same instance of a class. A
- * thread-safe function is thus reentrant. Thread-safe functions may also be
- * called simultaneously with any other reentrant function of the same class
- * on the same instance.
- *
- * - \anchor thread-bound A **thread-bound** function may be called only from
- * the thread that the class instances lives in (see section \ref
- * thread-objects). For instances of classes that do not derive from the
- * Object class, this is the thread in which the instance was created. A
- * thread-bound function is not thread-safe, and may or may not be reentrant.
- *
- * Neither reentrancy nor thread-safety, in this context, mean that a function
- * may be called simultaneously from the same thread, for instance from a
- * callback invoked by the function. This may deadlock and isn't allowed unless
- * separately documented.
- *
- * A class is defined as reentrant, thread-safe or thread-bound if all its
- * member functions are reentrant, thread-safe or thread-bound respectively.
- * Some member functions may additionally be documented as having additional
- * thread-related attributes.
- *
- * Most classes are reentrant but not thread-safe, as making them fully
- * thread-safe would incur locking costs considered prohibitive for the
- * expected use cases.
*/
/**
@@ -151,7 +117,7 @@ private:
friend class ThreadMain;
Thread *thread_;
- bool running_;
+ bool running_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
pid_t tid_;
Mutex mutex_;
@@ -163,6 +129,8 @@ private:
int exitCode_;
MessageQueue messages_;
+
+ std::optional<cpu_set_t> cpuset_;
};
/**
@@ -289,6 +257,8 @@ void Thread::start()
data_->exit_.store(false, std::memory_order_relaxed);
thread_ = std::thread(&Thread::startThread, this);
+
+ setThreadAffinityInternal();
}
void Thread::startThread()
@@ -370,6 +340,12 @@ void Thread::run()
void Thread::finishThread()
{
+ /*
+ * Objects may have been scheduled for deletion right before the thread
+ * exited. Ensure they get deleted now, before the thread stops.
+ */
+ dispatchMessages(Message::Type::DeferredDelete);
+
data_->mutex_.lock();
data_->running_ = false;
data_->mutex_.unlock();
@@ -422,11 +398,15 @@ bool Thread::wait(utils::duration duration)
{
MutexLocker locker(data_->mutex_);
+ auto isRunning = ([&]() LIBCAMERA_TSA_REQUIRES(data_->mutex_) {
+ return !data_->running_;
+ });
+
if (duration == utils::duration::max())
- data_->cv_.wait(locker, [&]() { return !data_->running_; });
+ data_->cv_.wait(locker, isRunning);
else
hasFinished = data_->cv_.wait_for(locker, duration,
- [&]() { return !data_->running_; });
+ isRunning);
}
if (thread_.joinable())
@@ -436,6 +416,48 @@ bool Thread::wait(utils::duration duration)
}
/**
+ * \brief Set the CPU affinity mask of the thread
+ * \param[in] cpus The list of CPU indices that the thread is set affinity to
+ *
+ * The CPU indices should be within [0, std::thread::hardware_concurrency()).
+ * If any index is invalid, this function won't modify the thread affinity and
+ * will return an error.
+ *
+ * \return 0 if all indices are valid, -EINVAL otherwise
+ */
+int Thread::setThreadAffinity(const Span<const unsigned int> &cpus)
+{
+ const unsigned int numCpus = std::thread::hardware_concurrency();
+
+ MutexLocker locker(data_->mutex_);
+ data_->cpuset_ = cpu_set_t();
+ CPU_ZERO(&data_->cpuset_.value());
+
+ for (const unsigned int &cpu : cpus) {
+ if (cpu >= numCpus) {
+ LOG(Thread, Error) << "Invalid CPU " << cpu << "for thread affinity";
+ return -EINVAL;
+ }
+
+ CPU_SET(cpu, &data_->cpuset_.value());
+ }
+
+ if (data_->running_)
+ setThreadAffinityInternal();
+
+ return 0;
+}
+
+void Thread::setThreadAffinityInternal()
+{
+ if (!data_->cpuset_)
+ return;
+
+ const cpu_set_t &cpuset = data_->cpuset_.value();
+ pthread_setaffinity_np(thread_.native_handle(), sizeof(cpuset), &cpuset);
+}
+
+/**
* \brief Check if the thread is running
*
* A Thread instance is considered as running once the underlying thread has
diff --git a/src/libcamera/base/timer.cpp b/src/libcamera/base/timer.cpp
index 187336e3..7b0f3725 100644
--- a/src/libcamera/base/timer.cpp
+++ b/src/libcamera/base/timer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * timer.cpp - Generic timer
+ * Generic timer
*/
#include <libcamera/base/timer.h>
@@ -63,16 +63,6 @@ Timer::~Timer()
}
/**
- * \fn Timer::start(unsigned int msec)
- * \brief Start or restart the timer with a timeout of \a msec
- * \param[in] msec The timer duration in milliseconds
- *
- * If the timer is already running it will be stopped and restarted.
- *
- * \context This function is \threadbound.
- */
-
-/**
* \brief Start or restart the timer with a timeout of \a duration
* \param[in] duration The timer duration in milliseconds
*
@@ -95,10 +85,8 @@ void Timer::start(std::chrono::milliseconds duration)
*/
void Timer::start(std::chrono::steady_clock::time_point deadline)
{
- if (Thread::current() != thread()) {
- LOG(Timer, Error) << "Timer " << this << " << can't be started from another thread";
+ if (!assertThreadBound("Timer can't be started from another thread"))
return;
- }
deadline_ = deadline;
@@ -124,13 +112,11 @@ void Timer::start(std::chrono::steady_clock::time_point deadline)
*/
void Timer::stop()
{
- if (!isRunning())
+ if (!assertThreadBound("Timer can't be stopped from another thread"))
return;
- if (Thread::current() != thread()) {
- LOG(Timer, Error) << "Timer " << this << " can't be stopped from another thread";
+ if (!isRunning())
return;
- }
unregisterTimer();
}
diff --git a/src/libcamera/base/unique_fd.cpp b/src/libcamera/base/unique_fd.cpp
index 83d6919c..d0649e4d 100644
--- a/src/libcamera/base/unique_fd.cpp
+++ b/src/libcamera/base/unique_fd.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * unique_fd.cpp - File descriptor wrapper that owns a file descriptor
+ * File descriptor wrapper that owns a file descriptor
*/
#include <libcamera/base/unique_fd.h>
diff --git a/src/libcamera/base/utils.cpp b/src/libcamera/base/utils.cpp
index 8cf8a5b2..bcfc1941 100644
--- a/src/libcamera/base/utils.cpp
+++ b/src/libcamera/base/utils.cpp
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * utils.cpp - Miscellaneous utility functions
+ * Miscellaneous utility functions
*/
#include <libcamera/base/utils.h>
#include <iomanip>
+#include <locale.h>
#include <sstream>
#include <stdlib.h>
#include <string.h>
@@ -275,21 +276,6 @@ std::string details::StringSplitter::iterator::operator*() const
return ss_->str_.substr(pos_, count);
}
-bool details::StringSplitter::iterator::operator!=(const details::StringSplitter::iterator &other) const
-{
- return pos_ != other.pos_;
-}
-
-details::StringSplitter::iterator details::StringSplitter::begin() const
-{
- return iterator(this, 0);
-}
-
-details::StringSplitter::iterator details::StringSplitter::end() const
-{
- return iterator(this, std::string::npos);
-}
-
/**
* \fn template<typename Container, typename UnaryOp> \
* std::string utils::join(const Container &items, const std::string &sep, UnaryOp op)
@@ -407,6 +393,15 @@ std::string toAscii(const std::string &str)
*/
/**
+ * \fn Duration::Duration(const Rep &r)
+ * \brief Construct a Duration with \a r ticks
+ * \param[in] r The number of ticks
+ *
+ * The constructed \a Duration object is internally represented in double
+ * precision with \a r nanoseconds ticks.
+ */
+
+/**
* \fn Duration::Duration(const std::chrono::duration<Rep, Period> &d)
* \brief Construct a Duration by converting an arbitrary std::chrono::duration
* \param[in] d The std::chrono::duration object to convert from
@@ -454,6 +449,205 @@ std::string toAscii(const std::string &str)
* \a b
*/
+#if HAVE_LOCALE_T
+
+namespace {
+
+/*
+ * RAII wrapper around locale_t instances, to support global locale instances
+ * without leaking memory.
+ */
+class Locale
+{
+public:
+ Locale(const char *locale)
+ {
+ locale_ = newlocale(LC_ALL_MASK, locale, static_cast<locale_t>(0));
+ }
+
+ ~Locale()
+ {
+ freelocale(locale_);
+ }
+
+ locale_t locale() { return locale_; }
+
+private:
+ locale_t locale_;
+};
+
+Locale cLocale("C");
+
+} /* namespace */
+
+#endif /* HAVE_LOCALE_T */
+
+/**
+ * \brief Convert a string to a double independently of the current locale
+ * \param[in] nptr The string to convert
+ * \param[out] endptr Pointer to trailing portion of the string after conversion
+ *
+ * This function is a locale-independent version of the std::strtod() function.
+ * It behaves as the standard function, but uses the "C" locale instead of the
+ * current locale.
+ *
+ * \return The converted value, if any, or 0.0 if the conversion failed.
+ */
+double strtod(const char *__restrict nptr, char **__restrict endptr)
+{
+#if HAVE_LOCALE_T
+ return strtod_l(nptr, endptr, cLocale.locale());
+#else
+ /*
+ * If the libc implementation doesn't provide locale object support,
+ * assume that strtod() is locale-independent.
+ */
+ return ::strtod(nptr, endptr);
+#endif
+}
+
+/**
+ * \fn to_underlying(Enum e)
+ * \brief Convert an enumeration to its underlygin type
+ * \param[in] e Enumeration value to convert
+ *
+ * This function is equivalent to the C++23 std::to_underlying().
+ *
+ * \return The value of e converted to its underlying type
+ */
+
+/**
+ * \class ScopeExitActions
+ * \brief An object that performs actions upon destruction
+ *
+ * The ScopeExitActions class is a simple object that performs user-provided
+ * actions upon destruction. It is meant to simplify cleanup tasks in error
+ * handling paths.
+ *
+ * When the code flow performs multiple sequential actions that each need a
+ * corresponding cleanup action, error handling quickly become tedious:
+ *
+ * \code{.cpp}
+ * {
+ * int ret = allocateMemory();
+ * if (ret)
+ * return ret;
+ *
+ * ret = startProducer();
+ * if (ret) {
+ * freeMemory();
+ * return ret;
+ * }
+ *
+ * ret = startConsumer();
+ * if (ret) {
+ * stopProducer();
+ * freeMemory();
+ * return ret;
+ * }
+ *
+ * return 0;
+ * }
+ * \endcode
+ *
+ * This is prone to programming mistakes, as cleanup actions can easily be
+ * forgotten or ordered incorrectly. One strategy to simplify error handling is
+ * to use goto statements:
+ *
+ * \code{.cpp}
+ * {
+ * int ret = allocateMemory();
+ * if (ret)
+ * return ret;
+ *
+ * ret = startProducer();
+ * if (ret)
+ * goto error_free;
+ *
+ * ret = startConsumer();
+ * if (ret)
+ * goto error_stop;
+ *
+ * return 0;
+ *
+ * error_stop:
+ * stopProducer();
+ * error_free:
+ * freeMemory();
+ * return ret;
+ * }
+ * \endcode
+ *
+ * While this may be considered better, this solution is still quite
+ * error-prone. Beside the risk of picking the wrong error label, the error
+ * handling logic is separated from the normal code flow, which increases the
+ * risk of error when refactoring the code. Additionally, C++ doesn't allow
+ * goto statements to jump over local variable declarations, which can make
+ * usage of this pattern more difficult.
+ *
+ * The ScopeExitActions class solves these issues by allowing code that
+ * requires cleanup actions to be grouped with its corresponding error handling
+ * code:
+ *
+ * \code{.cpp}
+ * {
+ * ScopeExitActions actions;
+ *
+ * int ret = allocateMemory();
+ * if (ret)
+ * return ret;
+ *
+ * actions += [&]() { freeMemory(); };
+ *
+ * ret = startProducer();
+ * if (ret)
+ * return ret;
+ *
+ * actions += [&]() { stopProducer(); };
+ *
+ * ret = startConsumer();
+ * if (ret)
+ * return ret;
+ *
+ * actions.release();
+ * return 0;
+ * }
+ * \endcode
+ *
+ * Error handlers are executed when the ScopeExitActions instance is destroyed,
+ * in the reverse order of their addition.
+ */
+
+ScopeExitActions::~ScopeExitActions()
+{
+ for (const auto &action : utils::reverse(actions_))
+ action();
+}
+
+/**
+ * \brief Add an exit action
+ * \param[in] action The action
+ *
+ * Add an exit action to the ScopeExitActions. Actions will be called upon
+ * destruction in the reverse order of their addition.
+ */
+void ScopeExitActions::operator+=(std::function<void()> &&action)
+{
+ actions_.push_back(std::move(action));
+}
+
+/**
+ * \brief Remove all exit actions
+ *
+ * This function should be called in scope exit paths that don't need the
+ * actions to be executed, such as success return paths from a function when
+ * the ScopeExitActions is used for error cleanup.
+ */
+void ScopeExitActions::release()
+{
+ actions_.clear();
+}
+
} /* namespace utils */
#ifndef __DOXYGEN__
diff --git a/src/libcamera/bayer_format.cpp b/src/libcamera/bayer_format.cpp
index 9aaec0bc..3dab91fc 100644
--- a/src/libcamera/bayer_format.cpp
+++ b/src/libcamera/bayer_format.cpp
@@ -1,14 +1,15 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * bayer_format.cpp - Class to represent Bayer formats
+ * Class to represent Bayer formats
*/
#include "libcamera/internal/bayer_format.h"
#include <algorithm>
#include <map>
+#include <sstream>
#include <unordered_map>
#include <linux/media-bus-format.h>
@@ -60,6 +61,10 @@ namespace libcamera {
* \brief Format uses MIPI CSI-2 style packing
* \var BayerFormat::Packing::IPU3
* \brief Format uses IPU3 style packing
+ * \var BayerFormat::Packing::PISP1
+ * \brief Format uses PISP mode 1 compression
+ * \var BayerFormat::Packing::PISP2
+ * \brief Format uses PISP mode 2 compression
*/
namespace {
@@ -139,6 +144,22 @@ const std::map<BayerFormat, Formats, BayerFormatComparator> bayerToFormat{
{ formats::SGRBG12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12P) } },
{ { BayerFormat::RGGB, 12, BayerFormat::Packing::CSI2 },
{ formats::SRGGB12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P) } },
+ { { BayerFormat::BGGR, 14, BayerFormat::Packing::None },
+ { formats::SBGGR14, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14) } },
+ { { BayerFormat::GBRG, 14, BayerFormat::Packing::None },
+ { formats::SGBRG14, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14) } },
+ { { BayerFormat::GRBG, 14, BayerFormat::Packing::None },
+ { formats::SGRBG14, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14) } },
+ { { BayerFormat::RGGB, 14, BayerFormat::Packing::None },
+ { formats::SRGGB14, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14) } },
+ { { BayerFormat::BGGR, 14, BayerFormat::Packing::CSI2 },
+ { formats::SBGGR14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14P) } },
+ { { BayerFormat::GBRG, 14, BayerFormat::Packing::CSI2 },
+ { formats::SGBRG14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14P) } },
+ { { BayerFormat::GRBG, 14, BayerFormat::Packing::CSI2 },
+ { formats::SGRBG14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14P) } },
+ { { BayerFormat::RGGB, 14, BayerFormat::Packing::CSI2 },
+ { formats::SRGGB14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14P) } },
{ { BayerFormat::BGGR, 16, BayerFormat::Packing::None },
{ formats::SBGGR16, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16) } },
{ { BayerFormat::GBRG, 16, BayerFormat::Packing::None },
@@ -147,12 +168,28 @@ const std::map<BayerFormat, Formats, BayerFormatComparator> bayerToFormat{
{ formats::SGRBG16, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG16) } },
{ { BayerFormat::RGGB, 16, BayerFormat::Packing::None },
{ formats::SRGGB16, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16) } },
+ { { BayerFormat::BGGR, 16, BayerFormat::Packing::PISP1 },
+ { formats::BGGR_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR) } },
+ { { BayerFormat::GBRG, 16, BayerFormat::Packing::PISP1 },
+ { formats::GBRG_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG) } },
+ { { BayerFormat::GRBG, 16, BayerFormat::Packing::PISP1 },
+ { formats::GRBG_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG) } },
+ { { BayerFormat::RGGB, 16, BayerFormat::Packing::PISP1 },
+ { formats::RGGB_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB) } },
{ { BayerFormat::MONO, 8, BayerFormat::Packing::None },
{ formats::R8, V4L2PixelFormat(V4L2_PIX_FMT_GREY) } },
{ { BayerFormat::MONO, 10, BayerFormat::Packing::None },
{ formats::R10, V4L2PixelFormat(V4L2_PIX_FMT_Y10) } },
{ { BayerFormat::MONO, 10, BayerFormat::Packing::CSI2 },
{ formats::R10_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_Y10P) } },
+ { { BayerFormat::MONO, 12, BayerFormat::Packing::None },
+ { formats::R12, V4L2PixelFormat(V4L2_PIX_FMT_Y12) } },
+ { { BayerFormat::MONO, 12, BayerFormat::Packing::CSI2 },
+ { formats::R12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_Y12P) } },
+ { { BayerFormat::MONO, 16, BayerFormat::Packing::None },
+ { formats::R16, V4L2PixelFormat(V4L2_PIX_FMT_Y16) } },
+ { { BayerFormat::MONO, 16, BayerFormat::Packing::PISP1 },
+ { formats::MONO_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO) } },
};
const std::unordered_map<unsigned int, BayerFormat> mbusCodeToBayer{
@@ -188,8 +225,14 @@ const std::unordered_map<unsigned int, BayerFormat> mbusCodeToBayer{
{ MEDIA_BUS_FMT_SGBRG16_1X16, { BayerFormat::GBRG, 16, BayerFormat::Packing::None } },
{ MEDIA_BUS_FMT_SGRBG16_1X16, { BayerFormat::GRBG, 16, BayerFormat::Packing::None } },
{ MEDIA_BUS_FMT_SRGGB16_1X16, { BayerFormat::RGGB, 16, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR20_1X20, { BayerFormat::BGGR, 20, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG20_1X20, { BayerFormat::GBRG, 20, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG20_1X20, { BayerFormat::GRBG, 20, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB20_1X20, { BayerFormat::RGGB, 20, BayerFormat::Packing::None } },
{ MEDIA_BUS_FMT_Y8_1X8, { BayerFormat::MONO, 8, BayerFormat::Packing::None } },
{ MEDIA_BUS_FMT_Y10_1X10, { BayerFormat::MONO, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_Y12_1X12, { BayerFormat::MONO, 12, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_Y16_1X16, { BayerFormat::MONO, 16, BayerFormat::Packing::None } },
};
} /* namespace */
@@ -236,28 +279,10 @@ const BayerFormat &BayerFormat::fromMbusCode(unsigned int mbusCode)
*/
std::string BayerFormat::toString() const
{
- std::string result;
+ std::stringstream ss;
+ ss << *this;
- static const char *orderStrings[] = {
- "BGGR",
- "GBRG",
- "GRBG",
- "RGGB",
- "MONO"
- };
- if (isValid() && order <= MONO)
- result = orderStrings[order];
- else
- return "INVALID";
-
- result += "-" + std::to_string(bitDepth);
-
- if (packing == Packing::CSI2)
- result += "-CSI2P";
- else if (packing == Packing::IPU3)
- result += "-IPU3P";
-
- return result;
+ return ss.str();
}
/**
@@ -271,6 +296,42 @@ bool operator==(const BayerFormat &lhs, const BayerFormat &rhs)
}
/**
+ * \brief Insert a text representation of a BayerFormats into an output stream
+ * \param[in] out The output stream
+ * \param[in] f The BayerFormat
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const BayerFormat &f)
+{
+ static const char *orderStrings[] = {
+ "BGGR-",
+ "GBRG-",
+ "GRBG-",
+ "RGGB-",
+ "MONO-"
+ };
+
+ if (!f.isValid() || f.order > BayerFormat::MONO) {
+ out << "INVALID";
+ return out;
+ }
+
+ /* The cast is required to avoid bitDepth being interpreted as a char. */
+ out << orderStrings[f.order] << static_cast<unsigned int>(f.bitDepth);
+
+ if (f.packing == BayerFormat::Packing::CSI2)
+ out << "-CSI2P";
+ else if (f.packing == BayerFormat::Packing::IPU3)
+ out << "-IPU3P";
+ else if (f.packing == BayerFormat::Packing::PISP1)
+ out << "-PISP1";
+ else if (f.packing == BayerFormat::Packing::PISP2)
+ out << "-PISP2";
+
+ return out;
+}
+
+/**
* \fn bool operator!=(const BayerFormat &lhs, const BayerFormat &rhs)
* \brief Compare two BayerFormats for inequality
* \return True if either order, bitdepth or packing are not equal, or false
@@ -340,11 +401,14 @@ BayerFormat BayerFormat::fromPixelFormat(PixelFormat format)
* \brief Apply a transform to this BayerFormat
* \param[in] t The transform to apply
*
- * Appplying a transform to an image stored in a Bayer format affects the Bayer
- * order. For example, performing a horizontal flip on the Bayer pattern
- * RGGB causes the RG rows of pixels to become GR, and the GB rows to become BG.
- * The transformed image would have a GRBG order. The bit depth and modifiers
- * are not affected.
+ * Applying a transform to an image stored in a Bayer format affects the Bayer
+ * order. For example, performing a horizontal flip on the Bayer pattern RGGB
+ * causes the RG rows of pixels to become GR, and the GB rows to become BG. The
+ * transformed image would have a GRBG order. Performing a vertical flip on the
+ * Bayer pattern RGGB causes the GB rows to come before the RG ones and the
+ * transformed image would have GBRG order. Applying both vertical and
+ * horizontal flips on the Bayer patter RGGB results in transformed images with
+ * BGGR order. The bit depth and modifiers are not affected.
*
* Horizontal and vertical flips are applied before transpose.
*
@@ -359,8 +423,11 @@ BayerFormat BayerFormat::transform(Transform t) const
/*
* Observe that flipping bit 0 of the Order enum performs a horizontal
- * mirror on the Bayer pattern (e.g. RGGB goes to GRBG). Similarly,
- * flipping bit 1 performs a vertical mirror operation on it. Hence:
+ * mirror on the Bayer pattern (e.g. RG/GB goes to GR/BG). Similarly,
+ * flipping bit 1 performs a vertical mirror operation on it (e.g RG/GB
+ * goes to GB/RG). Applying both vertical and horizontal flips
+ * combines vertical and horizontal mirroring on the Bayer pattern
+ * (e.g. RG/GB goes to BG/GR). Hence:
*/
if (!!(t & Transform::HFlip))
result.order = static_cast<Order>(result.order ^ 1);
diff --git a/src/libcamera/byte_stream_buffer.cpp b/src/libcamera/byte_stream_buffer.cpp
index 881cd371..fba9a6f3 100644
--- a/src/libcamera/byte_stream_buffer.cpp
+++ b/src/libcamera/byte_stream_buffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * byte_stream_buffer.cpp - Byte stream buffer
+ * Byte stream buffer
*/
#include "libcamera/internal/byte_stream_buffer.h"
diff --git a/src/libcamera/camera.cpp b/src/libcamera/camera.cpp
index bb856d60..69a7ee53 100644
--- a/src/libcamera/camera.cpp
+++ b/src/libcamera/camera.cpp
@@ -2,14 +2,18 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * camera.cpp - Camera device
+ * Camera device
*/
#include <libcamera/camera.h>
#include <array>
#include <atomic>
-#include <iomanip>
+#include <ios>
+#include <memory>
+#include <optional>
+#include <set>
+#include <sstream>
#include <libcamera/base/log.h>
#include <libcamera/base/thread.h>
@@ -21,8 +25,8 @@
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_controls.h"
-#include "libcamera/internal/formats.h"
#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/request.h"
/**
* \file libcamera/camera.h
@@ -96,6 +100,16 @@
* implemented in the above order at the hardware level. The libcamera pipeline
* handlers translate the pipeline model to the real hardware configuration.
*
+ * \subsection camera-sensor-model Camera Sensor Model
+ *
+ * By default, libcamera configures the camera sensor automatically based on the
+ * configuration of the streams. Applications may instead specify a manual
+ * configuration for the camera sensor. This allows precise control of the frame
+ * geometry and frame rate delivered by the sensor.
+ *
+ * More details about the camera sensor model implemented by libcamera are
+ * available in the libcamera camera-sensor-model documentation page.
+ *
* \subsection digital-zoom Digital Zoom
*
* Digital zoom is implemented as a combination of the cropping and scaling
@@ -106,11 +120,138 @@
* of view is affected by the pipeline.
*/
+/**
+ * \internal
+ * \file libcamera/internal/camera.h
+ * \brief Internal camera device handling
+ */
+
namespace libcamera {
LOG_DECLARE_CATEGORY(Camera)
/**
+ * \class SensorConfiguration
+ * \brief Camera sensor configuration
+ *
+ * The SensorConfiguration class collects parameters to control the operations
+ * of the camera sensor, according to the abstract camera sensor model
+ * implemented by libcamera.
+ *
+ * \todo Applications shall fully populate all fields of the
+ * CameraConfiguration::sensorConfig class members before validating the
+ * CameraConfiguration. If the SensorConfiguration is not fully populated, or if
+ * any of its parameters cannot be applied to the sensor in use, the
+ * CameraConfiguration validation process will fail and return
+ * CameraConfiguration::Status::Invalid.
+ *
+ * Applications that populate the SensorConfiguration class members are
+ * expected to be highly-specialized applications that know what sensor
+ * they are operating with and what parameters are valid for the sensor in use.
+ *
+ * A detailed description of the abstract camera sensor model implemented by
+ * libcamera and the description of its configuration parameters is available
+ * in the libcamera documentation camera-sensor-model file.
+ */
+
+/**
+ * \var SensorConfiguration::bitDepth
+ * \brief The sensor image format bit depth
+ *
+ * The number of bits (resolution) used to represent a pixel sample.
+ */
+
+/**
+ * \var SensorConfiguration::analogCrop
+ * \brief The analog crop rectangle
+ *
+ * The selected portion of the active pixel array used to produce the image
+ * frame.
+ */
+
+/**
+ * \var SensorConfiguration::binning
+ * \brief Sensor binning configuration
+ *
+ * Refer to the camera-sensor-model documentation for an accurate description
+ * of the binning operations. Disabled by default.
+ */
+
+/**
+ * \var SensorConfiguration::binX
+ * \brief Horizontal binning factor
+ *
+ * The horizontal binning factor. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::binY
+ * \brief Vertical binning factor
+ *
+ * The vertical binning factor. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::skipping
+ * \brief The sensor skipping configuration
+ *
+ * Refer to the camera-sensor-model documentation for an accurate description
+ * of the skipping operations.
+ *
+ * If no skipping is performed, all the structure fields should be
+ * set to 1. Disabled by default.
+ */
+
+/**
+ * \var SensorConfiguration::xOddInc
+ * \brief Horizontal increment for odd rows. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::xEvenInc
+ * \brief Horizontal increment for even rows. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::yOddInc
+ * \brief Vertical increment for odd columns. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::yEvenInc
+ * \brief Vertical increment for even columns. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::outputSize
+ * \brief The frame output (visible) size
+ *
+ * The size of the data frame as received by the host processor.
+ */
+
+/**
+ * \brief Check if the sensor configuration is valid
+ *
+ * A sensor configuration is valid if it's fully populated.
+ *
+ * \todo For now allow applications to populate the bitDepth and the outputSize
+ * only as skipping and binnings factors are initialized to 1 and the analog
+ * crop is ignored.
+ *
+ * \return True if the sensor configuration is valid, false otherwise
+ */
+bool SensorConfiguration::isValid() const
+{
+ if (bitDepth && binning.binX && binning.binY &&
+ skipping.xOddInc && skipping.yOddInc &&
+ skipping.xEvenInc && skipping.yEvenInc &&
+ !outputSize.isNull())
+ return true;
+
+ return false;
+}
+
+/**
* \class CameraConfiguration
* \brief Hold configuration for streams of the camera
@@ -159,7 +300,7 @@ LOG_DECLARE_CATEGORY(Camera)
* \brief Create an empty camera configuration
*/
CameraConfiguration::CameraConfiguration()
- : transform(Transform::Identity), config_({})
+ : orientation(Orientation::Rotate0), config_({})
{
}
@@ -183,19 +324,19 @@ void CameraConfiguration::addConfiguration(const StreamConfiguration &cfg)
* This function adjusts the camera configuration to the closest valid
* configuration and returns the validation status.
*
- * \todo: Define exactly when to return each status code. Should stream
+ * \todo Define exactly when to return each status code. Should stream
* parameters set to 0 by the caller be adjusted without returning Adjusted ?
* This would potentially be useful for applications but would get in the way
* in Camera::configure(). Do we need an extra status code to signal this ?
*
- * \todo: Handle validation of buffers count when refactoring the buffers API.
+ * \todo Handle validation of buffers count when refactoring the buffers API.
*
* \return A CameraConfiguration::Status value that describes the validation
* status.
* \retval CameraConfiguration::Invalid The configuration is invalid and can't
* be adjusted. This may only occur in extreme cases such as when the
* configuration is empty.
- * \retval CameraConfigutation::Adjusted The configuration has been adjusted
+ * \retval CameraConfiguration::Adjusted The configuration has been adjusted
* and is now valid. Parameters may have changed for any stream, and stream
* configurations may have been removed. The caller shall check the
* configuration carefully.
@@ -316,17 +457,6 @@ std::size_t CameraConfiguration::size() const
return config_.size();
}
-namespace {
-
-bool isRaw(const PixelFormat &pixFmt)
-{
- const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
- return info.isValid() &&
- info.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
-}
-
-} /* namespace */
-
/**
* \enum CameraConfiguration::ColorSpaceFlag
* \brief Specify the behaviour of validateColorSpaces
@@ -357,8 +487,8 @@ bool isRaw(const PixelFormat &pixFmt)
* \return A CameraConfiguration::Status value that describes the validation
* status.
* \retval CameraConfigutation::Adjusted The configuration has been adjusted
- * and is now valid. The color space of some or all of the streams may bave
- * benn changed. The caller shall check the color spaces carefully.
+ * and is now valid. The color space of some or all of the streams may have
+ * been changed. The caller shall check the color spaces carefully.
* \retval CameraConfiguration::Valid The configuration was already valid and
* hasn't been adjusted.
*/
@@ -367,29 +497,33 @@ CameraConfiguration::Status CameraConfiguration::validateColorSpaces(ColorSpaceF
Status status = Valid;
/*
- * Set all raw streams to the Raw color space, and make a note of the largest
- * non-raw stream with a defined color space (if there is one).
+ * Set all raw streams to the Raw color space, and make a note of the
+ * largest non-raw stream with a defined color space (if there is one).
*/
- int index = -1;
- for (auto [i, cfg] : utils::enumerate(config_)) {
- if (isRaw(cfg.pixelFormat)) {
- if (cfg.colorSpace != ColorSpace::Raw) {
- cfg.colorSpace = ColorSpace::Raw;
- status = Adjusted;
- }
- } else if (cfg.colorSpace && (index == -1 || cfg.size > config_[i].size)) {
- index = i;
+ std::optional<ColorSpace> colorSpace;
+ Size size;
+
+ for (StreamConfiguration &cfg : config_) {
+ if (!cfg.colorSpace)
+ continue;
+
+ if (cfg.colorSpace->adjust(cfg.pixelFormat))
+ status = Adjusted;
+
+ if (cfg.colorSpace != ColorSpace::Raw && cfg.size > size) {
+ colorSpace = cfg.colorSpace;
+ size = cfg.size;
}
}
- if (index < 0 || !(flags & ColorSpaceFlag::StreamsShareColorSpace))
+ if (!colorSpace || !(flags & ColorSpaceFlag::StreamsShareColorSpace))
return status;
/* Make all output color spaces the same, if requested. */
for (auto &cfg : config_) {
- if (!isRaw(cfg.pixelFormat) &&
- cfg.colorSpace != config_[index].colorSpace) {
- cfg.colorSpace = config_[index].colorSpace;
+ if (cfg.colorSpace != ColorSpace::Raw &&
+ cfg.colorSpace != colorSpace) {
+ cfg.colorSpace = colorSpace;
status = Adjusted;
}
}
@@ -398,17 +532,35 @@ CameraConfiguration::Status CameraConfiguration::validateColorSpaces(ColorSpaceF
}
/**
- * \var CameraConfiguration::transform
- * \brief User-specified transform to be applied to the image
+ * \var CameraConfiguration::sensorConfig
+ * \brief The camera sensor configuration
*
- * The transform is a user-specified 2D plane transform that will be applied
- * to the camera images by the processing pipeline before being handed to
- * the application. This is subsequent to any transform that is already
- * required to fix up any platform-defined rotation.
+ * The sensorConfig member allows manual control of the configuration of the
+ * camera sensor. By default, if sensorConfig is not set, the camera will
+ * configure the sensor automatically based on the configuration of the streams.
+ * Applications can override this by manually specifying the full sensor
+ * configuration.
*
- * The usual 2D plane transforms are allowed here (horizontal/vertical
- * flips, multiple of 90-degree rotations etc.), but the validate() function
- * may adjust this field at its discretion if the selection is not supported.
+ * Refer to the camera-sensor-model documentation and to the SensorConfiguration
+ * class documentation for details about the sensor configuration process.
+ *
+ * The camera sensor configuration applies to all streams produced by a camera
+ * from the same image source.
+ */
+
+/**
+ * \var CameraConfiguration::orientation
+ * \brief The desired orientation of the images produced by the camera
+ *
+ * The orientation field is a user-specified 2D plane transformation that
+ * specifies how the application wants the camera images to be rotated in
+ * the memory buffers.
+ *
+ * If the orientation requested by the application cannot be obtained, the
+ * camera will not rotate or flip the images, and the validate() function will
+ * Adjust this value to the native image orientation produced by the camera.
+ *
+ * By default the orientation field is set to Orientation::Rotate0.
*/
/**
@@ -416,6 +568,7 @@ CameraConfiguration::Status CameraConfiguration::validateColorSpaces(ColorSpaceF
* \brief The vector of stream configurations
*/
+#ifndef __DOXYGEN_PUBLIC__
/**
* \class Camera::Private
* \brief Base class for camera private data
@@ -451,6 +604,11 @@ Camera::Private::~Private()
*/
/**
+ * \fn Camera::Private::pipe() const
+ * \copydoc Camera::Private::pipe()
+ */
+
+/**
* \fn Camera::Private::validator()
* \brief Retrieve the control validator related to this camera
* \return The control validator associated with this camera
@@ -496,7 +654,7 @@ Camera::Private::~Private()
* facilitate debugging of internal request usage.
*
* The requestSequence_ tracks the number of requests queued to a camera
- * over its lifetime.
+ * over a single capture session.
*/
static const char *const camera_state_names[] = {
@@ -507,6 +665,11 @@ static const char *const camera_state_names[] = {
"Running",
};
+bool Camera::Private::isAcquired() const
+{
+ return state_.load(std::memory_order_acquire) != CameraAvailable;
+}
+
bool Camera::Private::isRunning() const
{
return state_.load(std::memory_order_acquire) == CameraRunning;
@@ -571,6 +734,7 @@ void Camera::Private::setState(State state)
{
state_.store(state, std::memory_order_release);
}
+#endif /* __DOXYGEN_PUBLIC__ */
/**
* \class Camera
@@ -665,6 +829,7 @@ void Camera::Private::setState(State state)
*/
/**
+ * \internal
* \brief Create a camera instance
* \param[in] d Camera private data
* \param[in] id The ID of the camera device
@@ -810,7 +975,7 @@ int Camera::exportFrameBuffers(Stream *stream,
* not blocking, if the device has already been acquired (by the same or another
* process) the -EBUSY error code is returned.
*
- * Acquiring a camera will limit usage of any other camera(s) provided by the
+ * Acquiring a camera may limit usage of any other camera(s) provided by the
* same pipeline handler to the same instance of libcamera. The limit is in
* effect until all cameras from the pipeline handler are released. Other
* instances of libcamera can still list and examine the cameras but will fail
@@ -838,7 +1003,8 @@ int Camera::acquire()
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
- if (!d->pipe_->lock()) {
+ if (!d->pipe_->invokeMethod(&PipelineHandler::acquire,
+ ConnectionTypeBlocking, this)) {
LOG(Camera, Info)
<< "Pipeline handler in use by another process";
return -EBUSY;
@@ -872,7 +1038,9 @@ int Camera::release()
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
- d->pipe_->unlock();
+ if (d->isAcquired())
+ d->pipe_->invokeMethod(&PipelineHandler::release,
+ ConnectionTypeBlocking, this);
d->setState(Private::CameraAvailable);
@@ -936,10 +1104,9 @@ const std::set<Stream *> &Camera::streams() const
* \context This function is \threadsafe.
*
* \return A CameraConfiguration if the requested roles can be satisfied, or a
- * null pointer otherwise. The ownership of the returned configuration is
- * passed to the caller.
+ * null pointer otherwise.
*/
-std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(Span<const StreamRole> roles)
{
Private *const d = _d();
@@ -951,7 +1118,8 @@ std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamR
if (roles.size() > streams().size())
return nullptr;
- CameraConfiguration *config = d->pipe_->generateConfiguration(this, roles);
+ std::unique_ptr<CameraConfiguration> config =
+ d->pipe_->generateConfiguration(this, roles);
if (!config) {
LOG(Camera, Debug)
<< "Pipeline handler failed to generate configuration";
@@ -968,10 +1136,16 @@ std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamR
LOG(Camera, Debug) << msg.str();
- return std::unique_ptr<CameraConfiguration>(config);
+ return config;
}
/**
+ * \fn std::unique_ptr<CameraConfiguration> \
+ * Camera::generateConfiguration(std::initializer_list<StreamRole> roles)
+ * \overload
+ */
+
+/**
* \brief Configure the camera prior to capture
* \param[in] config The camera configurations to setup
*
@@ -1009,8 +1183,8 @@ int Camera::configure(CameraConfiguration *config)
if (ret < 0)
return ret;
- for (auto it : *config)
- it.setStream(nullptr);
+ for (auto &cfg : *config)
+ cfg.setStream(nullptr);
if (config->validate() != CameraConfiguration::Valid) {
LOG(Camera, Error)
@@ -1108,6 +1282,7 @@ std::unique_ptr<Request> Camera::createRequest(uint64_t cookie)
* \return 0 on success or a negative error code otherwise
* \retval -ENODEV The camera has been disconnected from the system
* \retval -EACCES The camera is not running so requests can't be queued
+ * \retval -EXDEV The request does not belong to this camera
* \retval -EINVAL The request is invalid
* \retval -ENOMEM No buffer memory was available to handle the request
*/
@@ -1119,6 +1294,17 @@ int Camera::queueRequest(Request *request)
if (ret < 0)
return ret;
+ /* Requests can only be queued to the camera that created them. */
+ if (request->_d()->camera() != this) {
+ LOG(Camera, Error) << "Request was not created by this camera";
+ return -EXDEV;
+ }
+
+ if (request->status() != Request::RequestPending) {
+ LOG(Camera, Error) << request->toString() << " is not valid";
+ return -EINVAL;
+ }
+
/*
* The camera state may change until the end of the function. No locking
* is however needed as PipelineHandler::queueRequest() will handle
@@ -1173,6 +1359,8 @@ int Camera::start(const ControlList *controls)
LOG(Camera, Debug) << "Starting capture";
+ ASSERT(d->requestSequence_ == 0);
+
ret = d->pipe_->invokeMethod(&PipelineHandler::start,
ConnectionTypeBlocking, this, controls);
if (ret)
diff --git a/src/libcamera/camera_controls.cpp b/src/libcamera/camera_controls.cpp
index cabdcf75..b672c7cf 100644
--- a/src/libcamera/camera_controls.cpp
+++ b/src/libcamera/camera_controls.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_controls.cpp - Camera controls
+ * Camera controls
*/
#include "libcamera/internal/camera_controls.h"
diff --git a/src/libcamera/camera_lens.cpp b/src/libcamera/camera_lens.cpp
index 189cb025..ccc2a6a6 100644
--- a/src/libcamera/camera_lens.cpp
+++ b/src/libcamera/camera_lens.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_lens.cpp - A camera lens
+ * A camera lens
*/
#include "libcamera/internal/camera_lens.h"
@@ -84,7 +84,7 @@ int CameraLens::init()
*
* \return 0 on success or -EINVAL otherwise
*/
-int CameraLens::setFocusPostion(int32_t position)
+int CameraLens::setFocusPosition(int32_t position)
{
ControlList lensCtrls(subdev_->controls());
lensCtrls.set(V4L2_CID_FOCUS_ABSOLUTE, static_cast<int32_t>(position));
@@ -139,4 +139,15 @@ std::string CameraLens::logPrefix() const
return "'" + entity_->name() + "'";
}
+/**
+ * \fn CameraLens::controls()
+ * \brief Retrieve the V4L2 controls of the lens' subdev
+ *
+ * \return A map of the V4L2 controls supported by the lens' driver
+ */
+const ControlInfoMap &CameraLens::controls() const
+{
+ return subdev_->controls();
+}
+
} /* namespace libcamera */
diff --git a/src/libcamera/camera_manager.cpp b/src/libcamera/camera_manager.cpp
index 70d73822..87e6717e 100644
--- a/src/libcamera/camera_manager.cpp
+++ b/src/libcamera/camera_manager.cpp
@@ -2,80 +2,45 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * camera_manager.h - Camera management
+ * Camera management
*/
-#include <libcamera/camera_manager.h>
-
-#include <map>
-
-#include <libcamera/camera.h>
+#include "libcamera/internal/camera_manager.h"
#include <libcamera/base/log.h>
-#include <libcamera/base/mutex.h>
-#include <libcamera/base/thread.h>
#include <libcamera/base/utils.h>
+#include <libcamera/camera.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/camera.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/pipeline_handler.h"
-#include "libcamera/internal/process.h"
/**
- * \file camera_manager.h
+ * \file libcamera/camera_manager.h
* \brief The camera manager
*/
/**
+ * \internal
+ * \file libcamera/internal/camera_manager.h
+ * \brief Internal camera manager support
+ */
+
+/**
* \brief Top-level libcamera namespace
*/
namespace libcamera {
LOG_DEFINE_CATEGORY(Camera)
-class CameraManager::Private : public Extensible::Private, public Thread
-{
- LIBCAMERA_DECLARE_PUBLIC(CameraManager)
-
-public:
- Private();
-
- int start();
- void addCamera(std::shared_ptr<Camera> camera,
- const std::vector<dev_t> &devnums);
- void removeCamera(Camera *camera);
-
- /*
- * This mutex protects
- *
- * - initialized_ and status_ during initialization
- * - cameras_ and camerasByDevnum_ after initialization
- */
- mutable Mutex mutex_;
- std::vector<std::shared_ptr<Camera>> cameras_;
- std::map<dev_t, std::weak_ptr<Camera>> camerasByDevnum_;
-
-protected:
- void run() override;
-
-private:
- int init();
- void createPipelineHandlers();
- void cleanup();
-
- ConditionVariable cv_;
- bool initialized_;
- int status_;
-
- std::unique_ptr<DeviceEnumerator> enumerator_;
-
- IPAManager ipaManager_;
- ProcessManager processManager_;
-};
-
+#ifndef __DOXYGEN_PUBLIC__
CameraManager::Private::Private()
: initialized_(false)
{
+ ipaManager_ = std::make_unique<IPAManager>();
}
int CameraManager::Private::start()
@@ -87,7 +52,9 @@ int CameraManager::Private::start()
{
MutexLocker locker(mutex_);
- cv_.wait(locker, [&] { return initialized_; });
+ cv_.wait(locker, [&]() LIBCAMERA_TSA_REQUIRES(mutex_) {
+ return initialized_;
+ });
status = status_;
}
@@ -113,8 +80,10 @@ void CameraManager::Private::run()
mutex_.unlock();
cv_.notify_one();
- if (ret < 0)
+ if (ret < 0) {
+ cleanup();
return;
+ }
/* Now start processing events and messages. */
exec();
@@ -129,23 +98,45 @@ int CameraManager::Private::init()
return -ENODEV;
createPipelineHandlers();
+ enumerator_->devicesAdded.connect(this, &Private::createPipelineHandlers);
return 0;
}
void CameraManager::Private::createPipelineHandlers()
{
- CameraManager *const o = LIBCAMERA_O_PTR();
-
/*
* \todo Try to read handlers and order from configuration
- * file and only fallback on all handlers if there is no
- * configuration file.
+ * file and only fallback on environment variable or all handlers, if
+ * there is no configuration file.
*/
- std::vector<PipelineHandlerFactory *> &factories =
- PipelineHandlerFactory::factories();
+ const char *pipesList =
+ utils::secure_getenv("LIBCAMERA_PIPELINES_MATCH_LIST");
+ if (pipesList) {
+ /*
+ * When a list of preferred pipelines is defined, iterate
+ * through the ordered list to match the enumerated devices.
+ */
+ for (const auto &pipeName : utils::split(pipesList, ",")) {
+ const PipelineHandlerFactoryBase *factory;
+ factory = PipelineHandlerFactoryBase::getFactoryByName(pipeName);
+ if (!factory)
+ continue;
+
+ LOG(Camera, Debug)
+ << "Found listed pipeline handler '"
+ << pipeName << "'";
+ pipelineFactoryMatch(factory);
+ }
+
+ return;
+ }
+
+ const std::vector<PipelineHandlerFactoryBase *> &factories =
+ PipelineHandlerFactoryBase::factories();
- for (PipelineHandlerFactory *factory : factories) {
+ /* Match all the registered pipeline handlers. */
+ for (const PipelineHandlerFactoryBase *factory : factories) {
LOG(Camera, Debug)
<< "Found registered pipeline handler '"
<< factory->name() << "'";
@@ -153,18 +144,24 @@ void CameraManager::Private::createPipelineHandlers()
* Try each pipeline handler until it exhaust
* all pipelines it can provide.
*/
- while (1) {
- std::shared_ptr<PipelineHandler> pipe = factory->create(o);
- if (!pipe->match(enumerator_.get()))
- break;
-
- LOG(Camera, Debug)
- << "Pipeline handler \"" << factory->name()
- << "\" matched";
- }
+ pipelineFactoryMatch(factory);
}
+}
- enumerator_->devicesAdded.connect(this, &Private::createPipelineHandlers);
+void CameraManager::Private::pipelineFactoryMatch(const PipelineHandlerFactoryBase *factory)
+{
+ CameraManager *const o = LIBCAMERA_O_PTR();
+
+ /* Provide as many matching pipelines as possible. */
+ while (1) {
+ std::shared_ptr<PipelineHandler> pipe = factory->create(o);
+ if (!pipe->match(enumerator_.get()))
+ break;
+
+ LOG(Camera, Debug)
+ << "Pipeline handler \"" << factory->name()
+ << "\" matched";
+ }
}
void CameraManager::Private::cleanup()
@@ -178,18 +175,36 @@ void CameraManager::Private::cleanup()
* process deletion requests from the thread's message queue as the event
* loop is not in action here.
*/
- cameras_.clear();
+ {
+ MutexLocker locker(mutex_);
+ cameras_.clear();
+ }
+
dispatchMessages(Message::Type::DeferredDelete);
enumerator_.reset(nullptr);
}
-void CameraManager::Private::addCamera(std::shared_ptr<Camera> camera,
- const std::vector<dev_t> &devnums)
+/**
+ * \brief Add a camera to the camera manager
+ * \param[in] camera The camera to be added
+ *
+ * This function is called by pipeline handlers to register the cameras they
+ * handle with the camera manager. Registered cameras are immediately made
+ * available to the system.
+ *
+ * Device numbers from the SystemDevices property are used by the V4L2
+ * compatibility layer to map V4L2 device nodes to Camera instances.
+ *
+ * \context This function shall be called from the CameraManager thread.
+ */
+void CameraManager::Private::addCamera(std::shared_ptr<Camera> camera)
{
+ ASSERT(Thread::current() == this);
+
MutexLocker locker(mutex_);
- for (std::shared_ptr<Camera> c : cameras_) {
+ for (const std::shared_ptr<Camera> &c : cameras_) {
if (c->id() == camera->id()) {
LOG(Camera, Fatal)
<< "Trying to register a camera with a duplicated ID '"
@@ -201,17 +216,31 @@ void CameraManager::Private::addCamera(std::shared_ptr<Camera> camera,
cameras_.push_back(std::move(camera));
unsigned int index = cameras_.size() - 1;
- for (dev_t devnum : devnums)
- camerasByDevnum_[devnum] = cameras_[index];
+
+ /* Report the addition to the public signal */
+ CameraManager *const o = LIBCAMERA_O_PTR();
+ o->cameraAdded.emit(cameras_[index]);
}
-void CameraManager::Private::removeCamera(Camera *camera)
+/**
+ * \brief Remove a camera from the camera manager
+ * \param[in] camera The camera to be removed
+ *
+ * This function is called by pipeline handlers to unregister cameras from the
+ * camera manager. Unregistered cameras won't be reported anymore by the
+ * cameras() and get() calls, but references may still exist in applications.
+ *
+ * \context This function shall be called from the CameraManager thread.
+ */
+void CameraManager::Private::removeCamera(std::shared_ptr<Camera> camera)
{
+ ASSERT(Thread::current() == this);
+
MutexLocker locker(mutex_);
auto iter = std::find_if(cameras_.begin(), cameras_.end(),
[camera](std::shared_ptr<Camera> &c) {
- return c.get() == camera;
+ return c.get() == camera.get();
});
if (iter == cameras_.end())
return;
@@ -219,17 +248,22 @@ void CameraManager::Private::removeCamera(Camera *camera)
LOG(Camera, Debug)
<< "Unregistering camera '" << camera->id() << "'";
- auto iter_d = std::find_if(camerasByDevnum_.begin(), camerasByDevnum_.end(),
- [camera](const std::pair<dev_t, std::weak_ptr<Camera>> &p) {
- return p.second.lock().get() == camera;
- });
- if (iter_d != camerasByDevnum_.end())
- camerasByDevnum_.erase(iter_d);
-
cameras_.erase(iter);
+
+ /* Report the removal to the public signal */
+ CameraManager *const o = LIBCAMERA_O_PTR();
+ o->cameraRemoved.emit(camera);
}
/**
+ * \fn CameraManager::Private::ipaManager() const
+ * \brief Retrieve the IPAManager
+ * \context This function is \threadsafe.
+ * \return The IPAManager for this CameraManager
+ */
+#endif /* __DOXYGEN_PUBLIC__ */
+
+/**
* \class CameraManager
* \brief Provide access and manage all cameras in the system
*
@@ -354,7 +388,7 @@ std::shared_ptr<Camera> CameraManager::get(const std::string &id)
MutexLocker locker(d->mutex_);
- for (std::shared_ptr<Camera> camera : d->cameras_) {
+ for (const std::shared_ptr<Camera> &camera : d->cameras_) {
if (camera->id() == id)
return camera;
}
@@ -363,35 +397,6 @@ std::shared_ptr<Camera> CameraManager::get(const std::string &id)
}
/**
- * \brief Retrieve a camera based on device number
- * \param[in] devnum Device number of camera to get
- *
- * This function is meant solely for the use of the V4L2 compatibility
- * layer, to map device nodes to Camera instances. Applications shall
- * not use it and shall instead retrieve cameras by name.
- *
- * Before calling this function the caller is responsible for ensuring that
- * the camera manager is running.
- *
- * \context This function is \threadsafe.
- *
- * \return Shared pointer to Camera object, which is empty if the camera is
- * not found
- */
-std::shared_ptr<Camera> CameraManager::get(dev_t devnum)
-{
- Private *const d = _d();
-
- MutexLocker locker(d->mutex_);
-
- auto iter = d->camerasByDevnum_.find(devnum);
- if (iter == d->camerasByDevnum_.end())
- return nullptr;
-
- return iter->second.lock();
-}
-
-/**
* \var CameraManager::cameraAdded
* \brief Notify of a new camera added to the system
*
@@ -420,51 +425,6 @@ std::shared_ptr<Camera> CameraManager::get(dev_t devnum)
*/
/**
- * \brief Add a camera to the camera manager
- * \param[in] camera The camera to be added
- * \param[in] devnums The device numbers to associate with \a camera
- *
- * This function is called by pipeline handlers to register the cameras they
- * handle with the camera manager. Registered cameras are immediately made
- * available to the system.
- *
- * \a devnums are used by the V4L2 compatibility layer to map V4L2 device nodes
- * to Camera instances.
- *
- * \context This function shall be called from the CameraManager thread.
- */
-void CameraManager::addCamera(std::shared_ptr<Camera> camera,
- const std::vector<dev_t> &devnums)
-{
- Private *const d = _d();
-
- ASSERT(Thread::current() == d);
-
- d->addCamera(camera, devnums);
- cameraAdded.emit(camera);
-}
-
-/**
- * \brief Remove a camera from the camera manager
- * \param[in] camera The camera to be removed
- *
- * This function is called by pipeline handlers to unregister cameras from the
- * camera manager. Unregistered cameras won't be reported anymore by the
- * cameras() and get() calls, but references may still exist in applications.
- *
- * \context This function shall be called from the CameraManager thread.
- */
-void CameraManager::removeCamera(std::shared_ptr<Camera> camera)
-{
- Private *const d = _d();
-
- ASSERT(Thread::current() == d);
-
- d->removeCamera(camera.get());
- cameraRemoved.emit(camera);
-}
-
-/**
* \fn const std::string &CameraManager::version()
* \brief Retrieve the libcamera version string
* \context This function is \a threadsafe.
diff --git a/src/libcamera/camera_sensor_properties.cpp b/src/libcamera/camera_sensor_properties.cpp
deleted file mode 100644
index 48305ac4..00000000
--- a/src/libcamera/camera_sensor_properties.cpp
+++ /dev/null
@@ -1,151 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2021, Google Inc.
- *
- * camera_sensor_properties.cpp - Database of camera sensor properties
- */
-
-#include "libcamera/internal/camera_sensor_properties.h"
-
-#include <map>
-
-#include <libcamera/base/log.h>
-
-#include <libcamera/control_ids.h>
-
-/**
- * \file camera_sensor_properties.h
- * \brief Database of camera sensor properties
- *
- * The database of camera sensor properties collects static information about
- * camera sensors that is not possible or desirable to retrieve from the device
- * at run time.
- *
- * The database is indexed using the camera sensor model, as reported by the
- * properties::Model property, and for each supported sensor it contains a
- * list of properties.
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(CameraSensorProperties)
-
-/**
- * \struct CameraSensorProperties
- * \brief Database of camera sensor properties
- *
- * \var CameraSensorProperties::unitCellSize
- * \brief The physical size of a pixel, including pixel edges, in nanometers.
- *
- * \var CameraSensorProperties::testPatternModes
- * \brief Map that associates the TestPattern control value with the indexes of
- * the corresponding sensor test pattern modes as returned by
- * V4L2_CID_TEST_PATTERN.
- */
-
-/**
- * \brief Retrieve the properties associated with a sensor
- * \param sensor The sensor model name as reported by properties::Model
- * \return A pointer to the CameraSensorProperties instance associated with a sensor
- * or nullptr if the sensor is not supported
- */
-const CameraSensorProperties *CameraSensorProperties::get(const std::string &sensor)
-{
- static const std::map<std::string, const CameraSensorProperties> sensorProps = {
- { "hi846", {
- .unitCellSize = { 1120, 1120 },
- .testPatternModes = {
- { controls::draft::TestPatternModeOff, 0 },
- { controls::draft::TestPatternModeSolidColor, 1 },
- { controls::draft::TestPatternModeColorBars, 2 },
- { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
- { controls::draft::TestPatternModePn9, 4 },
- /*
- * No corresponding test pattern mode for:
- * 5: "Gradient Horizontal"
- * 6: "Gradient Vertical"
- * 7: "Check Board"
- * 8: "Slant Pattern"
- * 9: "Resolution Pattern"
- */
- },
- } },
- { "imx219", {
- .unitCellSize = { 1120, 1120 },
- .testPatternModes = {
- { controls::draft::TestPatternModeOff, 0 },
- { controls::draft::TestPatternModeColorBars, 1 },
- { controls::draft::TestPatternModeSolidColor, 2 },
- { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
- { controls::draft::TestPatternModePn9, 4 },
- },
- } },
- { "imx258", {
- .unitCellSize = { 1120, 1120 },
- .testPatternModes = {
- { controls::draft::TestPatternModeOff, 0 },
- { controls::draft::TestPatternModeSolidColor, 1 },
- { controls::draft::TestPatternModeColorBars, 2 },
- { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
- { controls::draft::TestPatternModePn9, 4 },
- },
- } },
- { "ov5647", {
- .unitCellSize = { 1400, 1400 },
- .testPatternModes = {},
- } },
- { "ov5670", {
- .unitCellSize = { 1120, 1120 },
- .testPatternModes = {
- { controls::draft::TestPatternModeOff, 0 },
- { controls::draft::TestPatternModeColorBars, 1 },
- },
- } },
- { "ov5693", {
- .unitCellSize = { 1400, 1400 },
- .testPatternModes = {
- { controls::draft::TestPatternModeOff, 0 },
- { controls::draft::TestPatternModeColorBars, 2 },
- /*
- * No corresponding test pattern mode for
- * 1: "Random data" and 3: "Colour Bars with
- * Rolling Bar".
- */
- },
- } },
- { "ov8865", {
- .unitCellSize = { 1400, 1400 },
- .testPatternModes = {
- { controls::draft::TestPatternModeOff, 0 },
- { controls::draft::TestPatternModeColorBars, 2 },
- /*
- * No corresponding test pattern mode for:
- * 1: "Random data"
- * 3: "Color bars with rolling bar"
- * 4: "Color squares"
- * 5: "Color squares with rolling bar"
- */
- },
- } },
- { "ov13858", {
- .unitCellSize = { 1120, 1120 },
- .testPatternModes = {
- { controls::draft::TestPatternModeOff, 0 },
- { controls::draft::TestPatternModeColorBars, 1 },
- },
- } },
- };
-
- const auto it = sensorProps.find(sensor);
- if (it == sensorProps.end()) {
- LOG(CameraSensorProperties, Warning)
- << "No static properties available for '" << sensor << "'";
- LOG(CameraSensorProperties, Warning)
- << "Please consider updating the camera sensor properties database";
- return nullptr;
- }
-
- return &it->second;
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/color_space.cpp b/src/libcamera/color_space.cpp
index 895e5c8e..3d1c456c 100644
--- a/src/libcamera/color_space.cpp
+++ b/src/libcamera/color_space.cpp
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2021, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2021, Raspberry Pi Ltd
*
- * color_space.cpp - color spaces.
+ * color spaces.
*/
#include <libcamera/color_space.h>
@@ -12,6 +12,11 @@
#include <map>
#include <sstream>
#include <utility>
+#include <vector>
+
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/formats.h"
/**
* \file color_space.h
@@ -29,21 +34,33 @@ namespace libcamera {
* (sometimes also referred to as the quantisation) of the color space.
*
* Certain combinations of these fields form well-known standard color
- * spaces such as "JPEG" or "REC709".
+ * spaces such as "sRGB" or "Rec709".
*
* In the strictest sense a "color space" formally only refers to the
* color primaries and white point. Here, however, the ColorSpace class
* adopts the common broader usage that includes the transfer function,
* Y'CbCr encoding method and quantisation.
*
- * For more information on the specific color spaces described here, please
- * see:
+ * More information on color spaces is available in the V4L2 documentation, see
+ * in particular
*
* - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-srgb">sRGB</a>
* - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-jpeg">JPEG</a>
* - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-smpte-170m">SMPTE 170M</a>
* - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-rec709">Rec.709</a>
* - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-bt2020">Rec.2020</a>
+ *
+ * Note that there is no guarantee of a 1:1 mapping between color space names
+ * and definitions in libcamera and V4L2. Two notable differences are
+ *
+ * - The sRGB libcamera color space is defined for RGB formats only with no
+ * Y'CbCr encoding and a full quantization range, while the V4L2 SRGB color
+ * space has a Y'CbCr encoding and a limited quantization range.
+ * - The sYCC libcamera color space is called JPEG in V4L2 due to historical
+ * reasons.
+ *
+ * \todo Define the color space fully in the libcamera API to avoid referencing
+ * V4L2
*/
/**
@@ -118,11 +135,130 @@ namespace libcamera {
*/
/**
+ * \brief A constant representing a raw color space (from a sensor)
+ */
+const ColorSpace ColorSpace::Raw = {
+ Primaries::Raw,
+ TransferFunction::Linear,
+ YcbcrEncoding::None,
+ Range::Full
+};
+
+/**
+ * \brief A constant representing the sRGB color space (RGB formats only)
+ */
+const ColorSpace ColorSpace::Srgb = {
+ Primaries::Rec709,
+ TransferFunction::Srgb,
+ YcbcrEncoding::None,
+ Range::Full
+};
+
+/**
+ * \brief A constant representing the sYCC color space, typically used for
+ * encoding JPEG images
+ */
+const ColorSpace ColorSpace::Sycc = {
+ Primaries::Rec709,
+ TransferFunction::Srgb,
+ YcbcrEncoding::Rec601,
+ Range::Full
+};
+
+/**
+ * \brief A constant representing the SMPTE170M color space
+ */
+const ColorSpace ColorSpace::Smpte170m = {
+ Primaries::Smpte170m,
+ TransferFunction::Rec709,
+ YcbcrEncoding::Rec601,
+ Range::Limited
+};
+
+/**
+ * \brief A constant representing the Rec.709 color space
+ */
+const ColorSpace ColorSpace::Rec709 = {
+ Primaries::Rec709,
+ TransferFunction::Rec709,
+ YcbcrEncoding::Rec709,
+ Range::Limited
+};
+
+/**
+ * \brief A constant representing the Rec.2020 color space
+ */
+const ColorSpace ColorSpace::Rec2020 = {
+ Primaries::Rec2020,
+ TransferFunction::Rec709,
+ YcbcrEncoding::Rec2020,
+ Range::Limited
+};
+
+/**
+ * \var ColorSpace::primaries
+ * \brief The color primaries of this color space
+ */
+
+/**
+ * \var ColorSpace::transferFunction
+ * \brief The transfer function used by this color space
+ */
+
+/**
+ * \var ColorSpace::ycbcrEncoding
+ * \brief The Y'CbCr encoding used by this color space
+ */
+
+/**
+ * \var ColorSpace::range
+ * \brief The pixel range used with by color space
+ */
+
+namespace {
+
+const std::array<std::pair<ColorSpace, const char *>, 6> colorSpaceNames = { {
+ { ColorSpace::Raw, "RAW" },
+ { ColorSpace::Srgb, "sRGB" },
+ { ColorSpace::Sycc, "sYCC" },
+ { ColorSpace::Smpte170m, "SMPTE170M" },
+ { ColorSpace::Rec709, "Rec709" },
+ { ColorSpace::Rec2020, "Rec2020" },
+} };
+
+const std::map<ColorSpace::Primaries, std::string> primariesNames = {
+ { ColorSpace::Primaries::Raw, "RAW" },
+ { ColorSpace::Primaries::Smpte170m, "SMPTE170M" },
+ { ColorSpace::Primaries::Rec709, "Rec709" },
+ { ColorSpace::Primaries::Rec2020, "Rec2020" },
+};
+
+const std::map<ColorSpace::TransferFunction, std::string> transferNames = {
+ { ColorSpace::TransferFunction::Linear, "Linear" },
+ { ColorSpace::TransferFunction::Srgb, "sRGB" },
+ { ColorSpace::TransferFunction::Rec709, "Rec709" },
+};
+
+const std::map<ColorSpace::YcbcrEncoding, std::string> encodingNames = {
+ { ColorSpace::YcbcrEncoding::None, "None" },
+ { ColorSpace::YcbcrEncoding::Rec601, "Rec601" },
+ { ColorSpace::YcbcrEncoding::Rec709, "Rec709" },
+ { ColorSpace::YcbcrEncoding::Rec2020, "Rec2020" },
+};
+
+const std::map<ColorSpace::Range, std::string> rangeNames = {
+ { ColorSpace::Range::Full, "Full" },
+ { ColorSpace::Range::Limited, "Limited" },
+};
+
+} /* namespace */
+
+/**
* \brief Assemble and return a readable string representation of the
* ColorSpace
*
- * If the color space matches a standard ColorSpace (such as ColorSpace::Jpeg)
- * then the short name of the color space ("JPEG") is returned. Otherwise
+ * If the color space matches a standard ColorSpace (such as ColorSpace::Sycc)
+ * then the short name of the color space ("sYCC") is returned. Otherwise
* the four constituent parts of the ColorSpace are assembled into a longer
* string.
*
@@ -132,14 +268,6 @@ std::string ColorSpace::toString() const
{
/* Print out a brief name only for standard color spaces. */
- static const std::array<std::pair<ColorSpace, const char *>, 6> colorSpaceNames = { {
- { ColorSpace::Raw, "RAW" },
- { ColorSpace::Jpeg, "JPEG" },
- { ColorSpace::Srgb, "sRGB" },
- { ColorSpace::Smpte170m, "SMPTE170M" },
- { ColorSpace::Rec709, "Rec709" },
- { ColorSpace::Rec2020, "Rec2020" },
- } };
auto it = std::find_if(colorSpaceNames.begin(), colorSpaceNames.end(),
[this](const auto &item) {
return *this == item.first;
@@ -149,28 +277,6 @@ std::string ColorSpace::toString() const
/* Assemble a name made of the constituent fields. */
- static const std::map<Primaries, std::string> primariesNames = {
- { Primaries::Raw, "RAW" },
- { Primaries::Smpte170m, "SMPTE170M" },
- { Primaries::Rec709, "Rec709" },
- { Primaries::Rec2020, "Rec2020" },
- };
- static const std::map<TransferFunction, std::string> transferNames = {
- { TransferFunction::Linear, "Linear" },
- { TransferFunction::Srgb, "sRGB" },
- { TransferFunction::Rec709, "Rec709" },
- };
- static const std::map<YcbcrEncoding, std::string> encodingNames = {
- { YcbcrEncoding::None, "None" },
- { YcbcrEncoding::Rec601, "Rec601" },
- { YcbcrEncoding::Rec709, "Rec709" },
- { YcbcrEncoding::Rec2020, "Rec2020" },
- };
- static const std::map<Range, std::string> rangeNames = {
- { Range::Full, "Full" },
- { Range::Limited, "Limited" },
- };
-
auto itPrimaries = primariesNames.find(primaries);
std::string primariesName =
itPrimaries == primariesNames.end() ? "Invalid" : itPrimaries->second;
@@ -213,88 +319,185 @@ std::string ColorSpace::toString(const std::optional<ColorSpace> &colorSpace)
}
/**
- * \var ColorSpace::primaries
- * \brief The color primaries of this color space
- */
-
-/**
- * \var ColorSpace::transferFunction
- * \brief The transfer function used by this color space
- */
-
-/**
- * \var ColorSpace::ycbcrEncoding
- * \brief The Y'CbCr encoding used by this color space
- */
-
-/**
- * \var ColorSpace::range
- * \brief The pixel range used with by color space
- */
-
-/**
- * \brief A constant representing a raw color space (from a sensor)
- */
-const ColorSpace ColorSpace::Raw = {
- Primaries::Raw,
- TransferFunction::Linear,
- YcbcrEncoding::None,
- Range::Full
-};
-
-/**
- * \brief A constant representing the JPEG color space used for
- * encoding JPEG images
- */
-const ColorSpace ColorSpace::Jpeg = {
- Primaries::Rec709,
- TransferFunction::Srgb,
- YcbcrEncoding::Rec601,
- Range::Full
-};
-
-/**
- * \brief A constant representing the sRGB color space
+ * \brief Construct a color space from a string
+ * \param[in] str The string
*
- * This is identical to the JPEG color space except that the Y'CbCr
- * range is limited rather than full.
- */
-const ColorSpace ColorSpace::Srgb = {
- Primaries::Rec709,
- TransferFunction::Srgb,
- YcbcrEncoding::Rec601,
- Range::Limited
-};
-
-/**
- * \brief A constant representing the SMPTE170M color space
- */
-const ColorSpace ColorSpace::Smpte170m = {
- Primaries::Smpte170m,
- TransferFunction::Rec709,
- YcbcrEncoding::Rec601,
- Range::Limited
-};
-
-/**
- * \brief A constant representing the Rec.709 color space
+ * The string \a str can contain the name of a well-known color space, or be
+ * made of the four color space components separated by a '/' character, ordered
+ * as
+ *
+ * \verbatim primaries '/' transferFunction '/' ycbcrEncoding '/' range \endverbatim
+ *
+ * Any failure to parse the string, either because it doesn't match the expected
+ * format, or because the one of the names isn't recognized, will cause this
+ * function to return std::nullopt.
+ *
+ * \return The ColorSpace corresponding to the string, or std::nullopt if the
+ * string doesn't describe a known color space
*/
-const ColorSpace ColorSpace::Rec709 = {
- Primaries::Rec709,
- TransferFunction::Rec709,
- YcbcrEncoding::Rec709,
- Range::Limited
-};
+std::optional<ColorSpace> ColorSpace::fromString(const std::string &str)
+{
+ /* First search for a standard color space name match. */
+ auto itColorSpace = std::find_if(colorSpaceNames.begin(), colorSpaceNames.end(),
+ [&str](const auto &item) {
+ return str == item.second;
+ });
+ if (itColorSpace != colorSpaceNames.end())
+ return itColorSpace->first;
+
+ /*
+ * If not found, the string must contain the four color space
+ * components separated by a '/' character.
+ */
+ const auto &split = utils::split(str, "/");
+ std::vector<std::string> components{ split.begin(), split.end() };
+
+ if (components.size() != 4)
+ return std::nullopt;
+
+ ColorSpace colorSpace = ColorSpace::Raw;
+
+ /* Color primaries */
+ auto itPrimaries = std::find_if(primariesNames.begin(), primariesNames.end(),
+ [&components](const auto &item) {
+ return components[0] == item.second;
+ });
+ if (itPrimaries == primariesNames.end())
+ return std::nullopt;
+
+ colorSpace.primaries = itPrimaries->first;
+
+ /* Transfer function */
+ auto itTransfer = std::find_if(transferNames.begin(), transferNames.end(),
+ [&components](const auto &item) {
+ return components[1] == item.second;
+ });
+ if (itTransfer == transferNames.end())
+ return std::nullopt;
+
+ colorSpace.transferFunction = itTransfer->first;
+
+ /* YCbCr encoding */
+ auto itEncoding = std::find_if(encodingNames.begin(), encodingNames.end(),
+ [&components](const auto &item) {
+ return components[2] == item.second;
+ });
+ if (itEncoding == encodingNames.end())
+ return std::nullopt;
+
+ colorSpace.ycbcrEncoding = itEncoding->first;
+
+ /* Quantization range */
+ auto itRange = std::find_if(rangeNames.begin(), rangeNames.end(),
+ [&components](const auto &item) {
+ return components[3] == item.second;
+ });
+ if (itRange == rangeNames.end())
+ return std::nullopt;
+
+ colorSpace.range = itRange->first;
+
+ return colorSpace;
+}
/**
- * \brief A constant representing the Rec.2020 color space
+ * \brief Adjust the color space to match a pixel format
+ * \param[in] format The pixel format
+ *
+ * Not all combinations of pixel formats and color spaces make sense. For
+ * instance, nobody uses a limited quantization range with raw Bayer formats,
+ * and the YcbcrEncoding::None encoding isn't valid for YUV formats. This
+ * function adjusts the ColorSpace to make it compatible with the given \a
+ * format, by applying the following rules:
+ *
+ * - The color space for RAW formats must be Raw.
+ * - The Y'CbCr encoding and quantization range for RGB formats must be
+ * YcbcrEncoding::None and Range::Full respectively.
+ * - The Y'CbCr encoding for YUV formats must not be YcbcrEncoding::None. The
+ * best encoding is in that case guessed based on the primaries and transfer
+ * function.
+ *
+ * \return True if the color space has been adjusted, or false if it was
+ * already compatible with the format and hasn't been changed
*/
-const ColorSpace ColorSpace::Rec2020 = {
- Primaries::Rec2020,
- TransferFunction::Rec709,
- YcbcrEncoding::Rec2020,
- Range::Limited
-};
+bool ColorSpace::adjust(PixelFormat format)
+{
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+ bool adjusted = false;
+
+ switch (info.colourEncoding) {
+ case PixelFormatInfo::ColourEncodingRAW:
+ /* Raw formats must use the raw color space. */
+ if (*this != ColorSpace::Raw) {
+ *this = ColorSpace::Raw;
+ adjusted = true;
+ }
+ break;
+
+ case PixelFormatInfo::ColourEncodingRGB:
+ /*
+ * RGB formats can't have a Y'CbCr encoding, and must use full
+ * range quantization.
+ */
+ if (ycbcrEncoding != YcbcrEncoding::None) {
+ ycbcrEncoding = YcbcrEncoding::None;
+ adjusted = true;
+ }
+
+ if (range != Range::Full) {
+ range = Range::Full;
+ adjusted = true;
+ }
+ break;
+
+ case PixelFormatInfo::ColourEncodingYUV:
+ if (ycbcrEncoding != YcbcrEncoding::None)
+ break;
+
+ /*
+ * YUV formats must have a Y'CbCr encoding. Infer the most
+ * probable option from the transfer function and primaries.
+ */
+ switch (transferFunction) {
+ case TransferFunction::Linear:
+ /*
+ * Linear YUV is not used in any standard color space,
+ * pick the widely supported and used Rec601 as default.
+ */
+ ycbcrEncoding = YcbcrEncoding::Rec601;
+ break;
+
+ case TransferFunction::Rec709:
+ switch (primaries) {
+ /* Raw should never happen. */
+ case Primaries::Raw:
+ case Primaries::Smpte170m:
+ ycbcrEncoding = YcbcrEncoding::Rec601;
+ break;
+ case Primaries::Rec709:
+ ycbcrEncoding = YcbcrEncoding::Rec709;
+ break;
+ case Primaries::Rec2020:
+ ycbcrEncoding = YcbcrEncoding::Rec2020;
+ break;
+ }
+ break;
+
+ case TransferFunction::Srgb:
+ /*
+ * Only the sYCC color space uses the sRGB transfer
+ * function, the corresponding encoding is Rec601.
+ */
+ ycbcrEncoding = YcbcrEncoding::Rec601;
+ break;
+ }
+
+ adjusted = true;
+ break;
+ }
+
+ return adjusted;
+}
/**
* \brief Compare color spaces for equality
diff --git a/src/libcamera/control_ids.cpp.in b/src/libcamera/control_ids.cpp.in
index 5fb1c2c3..65668d48 100644
--- a/src/libcamera/control_ids.cpp.in
+++ b/src/libcamera/control_ids.cpp.in
@@ -2,61 +2,122 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_ids.cpp : Control ID list
+ * {{mode}} ID list
*
* This file is auto-generated. Do not edit.
*/
-#include <libcamera/control_ids.h>
+#include <libcamera/{{filename}}.h>
#include <libcamera/controls.h>
/**
- * \file control_ids.h
- * \brief Camera control identifiers
+ * \file {{filename}}.h
+ * \brief Camera {{mode}} identifiers
*/
namespace libcamera {
/**
- * \brief Namespace for libcamera controls
+ * \brief Namespace for libcamera {{mode}}
*/
-namespace controls {
+namespace {{mode}} {
-${controls_doc}
+{%- for vendor, ctrls in controls -%}
+{%- if vendor != 'libcamera' %}
/**
- * \brief Namespace for libcamera draft controls
+ * \brief Namespace for {{vendor}} {{mode}}
*/
-namespace draft {
+namespace {{vendor}} {
+{%- endif -%}
-${draft_controls_doc}
+{% for ctrl in ctrls %}
-} /* namespace draft */
+{% if ctrl.is_enum -%}
+/**
+ * \enum {{ctrl.name}}Enum
+ * \brief Supported {{ctrl.name}} values
+{%- for enum in ctrl.enum_values %}
+ *
+ * \var {{enum.name}}
+ * \brief {{enum.description|format_description}}
+{%- endfor %}
+ */
+
+/**
+ * \var {{ctrl.name}}Values
+ * \brief List of all {{ctrl.name}} supported values
+ */
+
+/**
+ * \var {{ctrl.name}}NameValueMap
+ * \brief Map of all {{ctrl.name}} supported value names (in std::string format) to value
+ */
+
+{% endif -%}
+/**
+ * \var {{ctrl.name}}
+ * \brief {{ctrl.description|format_description}}
+ */
+{%- endfor %}
+{% if vendor != 'libcamera' %}
+} /* namespace {{vendor}} */
+{% endif -%}
+
+{%- endfor %}
#ifndef __DOXYGEN__
/*
- * Keep the controls definitions hidden from doxygen as it incorrectly parses
+ * Keep the {{mode}} definitions hidden from doxygen as it incorrectly parses
* them as functions.
*/
-${controls_def}
+{% for vendor, ctrls in controls -%}
+
+{% if vendor != 'libcamera' %}
+namespace {{vendor}} {
+{% endif %}
-namespace draft {
+{%- for ctrl in ctrls %}
+{% if ctrl.is_enum -%}
+extern const std::array<const ControlValue, {{ctrl.enum_values_count}}> {{ctrl.name}}Values = {
+{%- for enum in ctrl.enum_values %}
+ static_cast<{{ctrl.type}}>({{enum.name}}),
+{%- endfor %}
+};
+extern const std::map<std::string, {{ctrl.type}}> {{ctrl.name}}NameValueMap = {
+{%- for enum in ctrl.enum_values %}
+ { "{{enum.name}}", {{enum.name}} },
+{%- endfor %}
+};
+extern const Control<{{ctrl.type}}> {{ctrl.name}}({{ctrl.name|snake_case|upper}}, "{{ctrl.name}}", "{{vendor}}", {{ctrl.direction}}, {{ctrl.name}}NameValueMap);
+{% else -%}
+extern const Control<{{ctrl.type}}> {{ctrl.name}}({{ctrl.name|snake_case|upper}}, "{{ctrl.name}}", "{{vendor}}", {{ctrl.direction}});
+{% endif -%}
+{%- endfor %}
-${draft_controls_def}
+{% if vendor != 'libcamera' %}
+} /* namespace {{vendor}} */
+{% endif -%}
-} /* namespace draft */
-#endif
+{%- endfor %}
+#endif /* __DOXYGEN__ */
/**
- * \brief List of all supported libcamera controls
+ * \brief List of all supported libcamera {{mode}}
+{%- if mode == 'controls' %}
*
* Unless otherwise stated, all controls are bi-directional, i.e. they can be
* set through Request::controls() and returned out through Request::metadata().
+{%- endif %}
*/
-extern const ControlIdMap controls {
-${controls_map}
+extern const ControlIdMap {{mode}} {
+{%- for vendor, ctrls in controls -%}
+{%- for ctrl in ctrls %}
+ { {{ctrl.namespace}}{{ctrl.name|snake_case|upper}}, &{{ctrl.namespace}}{{ctrl.name}} },
+{%- endfor -%}
+{%- endfor %}
};
-} /* namespace controls */
+} /* namespace {{mode}} */
} /* namespace libcamera */
diff --git a/src/libcamera/control_ids.yaml b/src/libcamera/control_ids.yaml
deleted file mode 100644
index 9d4638ae..00000000
--- a/src/libcamera/control_ids.yaml
+++ /dev/null
@@ -1,693 +0,0 @@
-# SPDX-License-Identifier: LGPL-2.1-or-later
-#
-# Copyright (C) 2019, Google Inc.
-#
-%YAML 1.2
----
-# Unless otherwise stated, all controls are bi-directional, i.e. they can be
-# set through Request::controls() and returned out through Request::metadata().
-controls:
- - AeEnable:
- type: bool
- description: |
- Enable or disable the AE.
-
- \sa ExposureTime AnalogueGain
-
- - AeLocked:
- type: bool
- description: |
- Report the lock status of a running AE algorithm.
-
- If the AE algorithm is locked the value shall be set to true, if it's
- converging it shall be set to false. If the AE algorithm is not
- running the control shall not be present in the metadata control list.
-
- \sa AeEnable
-
- # AeMeteringMode needs further attention:
- # - Auto-generate max enum value.
- # - Better handling of custom types.
- - AeMeteringMode:
- type: int32_t
- description: |
- Specify a metering mode for the AE algorithm to use. The metering
- modes determine which parts of the image are used to determine the
- scene brightness. Metering modes may be platform specific and not
- all metering modes may be supported.
- enum:
- - name: MeteringCentreWeighted
- value: 0
- description: Centre-weighted metering mode.
- - name: MeteringSpot
- value: 1
- description: Spot metering mode.
- - name: MeteringMatrix
- value: 2
- description: Matrix metering mode.
- - name: MeteringCustom
- value: 3
- description: Custom metering mode.
-
- # AeConstraintMode needs further attention:
- # - Auto-generate max enum value.
- # - Better handling of custom types.
- - AeConstraintMode:
- type: int32_t
- description: |
- Specify a constraint mode for the AE algorithm to use. These determine
- how the measured scene brightness is adjusted to reach the desired
- target exposure. Constraint modes may be platform specific, and not
- all constraint modes may be supported.
- enum:
- - name: ConstraintNormal
- value: 0
- description: Default constraint mode.
- This mode aims to balance the exposure of different parts of the
- image so as to reach a reasonable average level. However, highlights
- in the image may appear over-exposed and lowlights may appear
- under-exposed.
- - name: ConstraintHighlight
- value: 1
- description: Highlight constraint mode.
- This mode adjusts the exposure levels in order to try and avoid
- over-exposing the brightest parts (highlights) of an image.
- Other non-highlight parts of the image may appear under-exposed.
- - name: ConstraintShadows
- value: 2
- description: Shadows constraint mode.
- This mode adjusts the exposure levels in order to try and avoid
- under-exposing the dark parts (shadows) of an image. Other normally
- exposed parts of the image may appear over-exposed.
- - name: ConstraintCustom
- value: 3
- description: Custom constraint mode.
-
- # AeExposureMode needs further attention:
- # - Auto-generate max enum value.
- # - Better handling of custom types.
- - AeExposureMode:
- type: int32_t
- description: |
- Specify an exposure mode for the AE algorithm to use. These specify
- how the desired total exposure is divided between the shutter time
- and the sensor's analogue gain. The exposure modes are platform
- specific, and not all exposure modes may be supported.
- enum:
- - name: ExposureNormal
- value: 0
- description: Default exposure mode.
- - name: ExposureShort
- value: 1
- description: Exposure mode allowing only short exposure times.
- - name: ExposureLong
- value: 2
- description: Exposure mode allowing long exposure times.
- - name: ExposureCustom
- value: 3
- description: Custom exposure mode.
-
- - ExposureValue:
- type: float
- description: |
- Specify an Exposure Value (EV) parameter. The EV parameter will only be
- applied if the AE algorithm is currently enabled.
-
- By convention EV adjusts the exposure as log2. For example
- EV = [-2, -1, 0.5, 0, 0.5, 1, 2] results in an exposure adjustment
- of [1/4x, 1/2x, 1/sqrt(2)x, 1x, sqrt(2)x, 2x, 4x].
-
- \sa AeEnable
-
- - ExposureTime:
- type: int32_t
- description: |
- Exposure time (shutter speed) for the frame applied in the sensor
- device. This value is specified in micro-seconds.
-
- Setting this value means that it is now fixed and the AE algorithm may
- not change it. Setting it back to zero returns it to the control of the
- AE algorithm.
-
- \sa AnalogueGain AeEnable
-
- \todo Document the interactions between AeEnable and setting a fixed
- value for this control. Consider interactions with other AE features,
- such as aperture and aperture/shutter priority mode, and decide if
- control of which features should be automatically adjusted shouldn't
- better be handled through a separate AE mode control.
-
- - AnalogueGain:
- type: float
- description: |
- Analogue gain value applied in the sensor device.
- The value of the control specifies the gain multiplier applied to all
- colour channels. This value cannot be lower than 1.0.
-
- Setting this value means that it is now fixed and the AE algorithm may
- not change it. Setting it back to zero returns it to the control of the
- AE algorithm.
-
- \sa ExposureTime AeEnable
-
- \todo Document the interactions between AeEnable and setting a fixed
- value for this control. Consider interactions with other AE features,
- such as aperture and aperture/shutter priority mode, and decide if
- control of which features should be automatically adjusted shouldn't
- better be handled through a separate AE mode control.
-
- - Brightness:
- type: float
- description: |
- Specify a fixed brightness parameter. Positive values (up to 1.0)
- produce brighter images; negative values (up to -1.0) produce darker
- images and 0.0 leaves pixels unchanged.
-
- - Contrast:
- type: float
- description: |
- Specify a fixed contrast parameter. Normal contrast is given by the
- value 1.0; larger values produce images with more contrast.
-
- - Lux:
- type: float
- description: |
- Report an estimate of the current illuminance level in lux. The Lux
- control can only be returned in metadata.
-
- - AwbEnable:
- type: bool
- description: |
- Enable or disable the AWB.
-
- \sa ColourGains
-
- # AwbMode needs further attention:
- # - Auto-generate max enum value.
- # - Better handling of custom types.
- - AwbMode:
- type: int32_t
- description: |
- Specify the range of illuminants to use for the AWB algorithm. The modes
- supported are platform specific, and not all modes may be supported.
- enum:
- - name: AwbAuto
- value: 0
- description: Search over the whole colour temperature range.
- - name: AwbIncandescent
- value: 1
- description: Incandescent AWB lamp mode.
- - name: AwbTungsten
- value: 2
- description: Tungsten AWB lamp mode.
- - name: AwbFluorescent
- value: 3
- description: Fluorescent AWB lamp mode.
- - name: AwbIndoor
- value: 4
- description: Indoor AWB lighting mode.
- - name: AwbDaylight
- value: 5
- description: Daylight AWB lighting mode.
- - name: AwbCloudy
- value: 6
- description: Cloudy AWB lighting mode.
- - name: AwbCustom
- value: 7
- description: Custom AWB mode.
-
- - AwbLocked:
- type: bool
- description: |
- Report the lock status of a running AWB algorithm.
-
- If the AWB algorithm is locked the value shall be set to true, if it's
- converging it shall be set to false. If the AWB algorithm is not
- running the control shall not be present in the metadata control list.
-
- \sa AwbEnable
-
- - ColourGains:
- type: float
- description: |
- Pair of gain values for the Red and Blue colour channels, in that
- order. ColourGains can only be applied in a Request when the AWB is
- disabled.
-
- \sa AwbEnable
- size: [2]
-
- - ColourTemperature:
- type: int32_t
- description: Report the current estimate of the colour temperature, in
- kelvin, for this frame. The ColourTemperature control can only be
- returned in metadata.
-
- - Saturation:
- type: float
- description: |
- Specify a fixed saturation parameter. Normal saturation is given by
- the value 1.0; larger values produce more saturated colours; 0.0
- produces a greyscale image.
-
- - SensorBlackLevels:
- type: int32_t
- description: |
- Reports the sensor black levels used for processing a frame, in the
- order R, Gr, Gb, B. These values are returned as numbers out of a 16-bit
- pixel range (as if pixels ranged from 0 to 65535). The SensorBlackLevels
- control can only be returned in metadata.
- size: [4]
-
- - Sharpness:
- type: float
- description: |
- A value of 0.0 means no sharpening. The minimum value means
- minimal sharpening, and shall be 0.0 unless the camera can't
- disable sharpening completely. The default value shall give a
- "reasonable" level of sharpening, suitable for most use cases.
- The maximum value may apply extremely high levels of sharpening,
- higher than anyone could reasonably want. Negative values are
- not allowed. Note also that sharpening is not applied to raw
- streams.
-
- - FocusFoM:
- type: int32_t
- description: |
- Reports a Figure of Merit (FoM) to indicate how in-focus the frame is.
- A larger FocusFoM value indicates a more in-focus frame. This control
- depends on the IPA to gather ISP statistics from the defined focus
- region, and combine them in a suitable way to generate a FocusFoM value.
- In this respect, it is not necessarily aimed at providing a way to
- implement a focus algorithm by the application, rather an indication of
- how in-focus a frame is.
-
- - ColourCorrectionMatrix:
- type: float
- description: |
- The 3x3 matrix that converts camera RGB to sRGB within the
- imaging pipeline. This should describe the matrix that is used
- after pixels have been white-balanced, but before any gamma
- transformation. The 3x3 matrix is stored in conventional reading
- order in an array of 9 floating point values.
-
- size: [3x3]
-
- - ScalerCrop:
- type: Rectangle
- description: |
- Sets the image portion that will be scaled to form the whole of
- the final output image. The (x,y) location of this rectangle is
- relative to the PixelArrayActiveAreas that is being used. The units
- remain native sensor pixels, even if the sensor is being used in
- a binning or skipping mode.
-
- This control is only present when the pipeline supports scaling. Its
- maximum valid value is given by the properties::ScalerCropMaximum
- property, and the two can be used to implement digital zoom.
-
- - DigitalGain:
- type: float
- description: |
- Digital gain value applied during the processing steps applied
- to the image as captured from the sensor.
-
- The global digital gain factor is applied to all the colour channels
- of the RAW image. Different pipeline models are free to
- specify how the global gain factor applies to each separate
- channel.
-
- If an imaging pipeline applies digital gain in distinct
- processing steps, this value indicates their total sum.
- Pipelines are free to decide how to adjust each processing
- step to respect the received gain factor and shall report
- their total value in the request metadata.
-
- - FrameDuration:
- type: int64_t
- description: |
- The instantaneous frame duration from start of frame exposure to start
- of next exposure, expressed in microseconds. This control is meant to
- be returned in metadata.
-
- - FrameDurationLimits:
- type: int64_t
- description: |
- The minimum and maximum (in that order) frame duration,
- expressed in microseconds.
-
- When provided by applications, the control specifies the sensor frame
- duration interval the pipeline has to use. This limits the largest
- exposure time the sensor can use. For example, if a maximum frame
- duration of 33ms is requested (corresponding to 30 frames per second),
- the sensor will not be able to raise the exposure time above 33ms.
- A fixed frame duration is achieved by setting the minimum and maximum
- values to be the same. Setting both values to 0 reverts to using the
- IPA provided defaults.
-
- The maximum frame duration provides the absolute limit to the shutter
- speed computed by the AE algorithm and it overrides any exposure mode
- setting specified with controls::AeExposureMode. Similarly, when a
- manual exposure time is set through controls::ExposureTime, it also
- gets clipped to the limits set by this control. When reported in
- metadata, the control expresses the minimum and maximum frame
- durations used after being clipped to the sensor provided frame
- duration limits.
-
- \sa AeExposureMode
- \sa ExposureTime
-
- \todo Define how to calculate the capture frame rate by
- defining controls to report additional delays introduced by
- the capture pipeline or post-processing stages (ie JPEG
- conversion, frame scaling).
-
- \todo Provide an explicit definition of default control values, for
- this and all other controls.
-
- size: [2]
-
- - SensorTimestamp:
- type: int64_t
- description: |
- The time when the first row of the image sensor active array is exposed.
-
- The timestamp, expressed in nanoseconds, represents a monotonically
- increasing counter since the system boot time, as defined by the
- Linux-specific CLOCK_BOOTTIME clock id.
-
- The SensorTimestamp control can only be returned in metadata.
-
- \todo Define how the sensor timestamp has to be used in the reprocessing
- use case.
-
- # ----------------------------------------------------------------------------
- # Draft controls section
-
- - AePrecaptureTrigger:
- type: int32_t
- draft: true
- description: |
- Control for AE metering trigger. Currently identical to
- ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER.
-
- Whether the camera device will trigger a precapture metering sequence
- when it processes this request.
- enum:
- - name: AePrecaptureTriggerIdle
- value: 0
- description: The trigger is idle.
- - name: AePrecaptureTriggerStart
- value: 1
- description: The pre-capture AE metering is started by the camera.
- - name: AePrecaptureTriggerCancel
- value: 2
- description: |
- The camera will cancel any active or completed metering sequence.
- The AE algorithm is reset to its initial state.
-
- - AfTrigger:
- type: int32_t
- draft: true
- description: |
- Control for AF trigger. Currently identical to
- ANDROID_CONTROL_AF_TRIGGER.
-
- Whether the camera device will trigger autofocus for this request.
- enum:
- - name: AfTriggerIdle
- value: 0
- description: The trigger is idle.
- - name: AfTriggerStart
- value: 1
- description: The AF routine is started by the camera.
- - name: AfTriggerCancel
- value: 2
- description: |
- The camera will cancel any active trigger and the AF routine is
- reset to its initial state.
-
- - NoiseReductionMode:
- type: int32_t
- draft: true
- description: |
- Control to select the noise reduction algorithm mode. Currently
- identical to ANDROID_NOISE_REDUCTION_MODE.
-
- Mode of operation for the noise reduction algorithm.
- enum:
- - name: NoiseReductionModeOff
- value: 0
- description: No noise reduction is applied
- - name: NoiseReductionModeFast
- value: 1
- description: |
- Noise reduction is applied without reducing the frame rate.
- - name: NoiseReductionModeHighQuality
- value: 2
- description: |
- High quality noise reduction at the expense of frame rate.
- - name: NoiseReductionModeMinimal
- value: 3
- description: |
- Minimal noise reduction is applied without reducing the frame rate.
- - name: NoiseReductionModeZSL
- value: 4
- description: |
- Noise reduction is applied at different levels to different streams.
-
- - ColorCorrectionAberrationMode:
- type: int32_t
- draft: true
- description: |
- Control to select the color correction aberration mode. Currently
- identical to ANDROID_COLOR_CORRECTION_ABERRATION_MODE.
-
- Mode of operation for the chromatic aberration correction algorithm.
- enum:
- - name: ColorCorrectionAberrationOff
- value: 0
- description: No aberration correction is applied.
- - name: ColorCorrectionAberrationFast
- value: 1
- description: Aberration correction will not slow down the frame rate.
- - name: ColorCorrectionAberrationHighQuality
- value: 2
- description: |
- High quality aberration correction which might reduce the frame
- rate.
-
- - AeState:
- type: int32_t
- draft: true
- description: |
- Control to report the current AE algorithm state. Currently identical to
- ANDROID_CONTROL_AE_STATE.
-
- Current state of the AE algorithm.
- enum:
- - name: AeStateInactive
- value: 0
- description: The AE algorithm is inactive.
- - name: AeStateSearching
- value: 1
- description: The AE algorithm has not converged yet.
- - name: AeStateConverged
- value: 2
- description: The AE algorithm has converged.
- - name: AeStateLocked
- value: 3
- description: The AE algorithm is locked.
- - name: AeStateFlashRequired
- value: 4
- description: The AE algorithm would need a flash for good results
- - name: AeStatePrecapture
- value: 5
- description: |
- The AE algorithm has started a pre-capture metering session.
- \sa AePrecaptureTrigger
-
- - AfState:
- type: int32_t
- draft: true
- description: |
- Control to report the current AF algorithm state. Currently identical to
- ANDROID_CONTROL_AF_STATE.
-
- Current state of the AF algorithm.
- enum:
- - name: AfStateInactive
- value: 0
- description: The AF algorithm is inactive.
- - name: AfStatePassiveScan
- value: 1
- description: |
- AF is performing a passive scan of the scene in continuous
- auto-focus mode.
- - name: AfStatePassiveFocused
- value: 2
- description: |
- AF believes the scene is in focus, but might restart scanning.
- - name: AfStateActiveScan
- value: 3
- description: |
- AF is performing a scan triggered by an AF trigger request.
- \sa AfTrigger
- - name: AfStateFocusedLock
- value: 4
- description: |
- AF believes has focused correctly and has locked focus.
- - name: AfStateNotFocusedLock
- value: 5
- description: |
- AF has not been able to focus and has locked.
- - name: AfStatePassiveUnfocused
- value: 6
- description: |
- AF has completed a passive scan without finding focus.
-
- - AwbState:
- type: int32_t
- draft: true
- description: |
- Control to report the current AWB algorithm state. Currently identical
- to ANDROID_CONTROL_AWB_STATE.
-
- Current state of the AWB algorithm.
- enum:
- - name: AwbStateInactive
- value: 0
- description: The AWB algorithm is inactive.
- - name: AwbStateSearching
- value: 1
- description: The AWB algorithm has not converged yet.
- - name: AwbConverged
- value: 2
- description: The AWB algorithm has converged.
- - name: AwbLocked
- value: 3
- description: The AWB algorithm is locked.
-
- - SensorRollingShutterSkew:
- type: int64_t
- draft: true
- description: |
- Control to report the time between the start of exposure of the first
- row and the start of exposure of the last row. Currently identical to
- ANDROID_SENSOR_ROLLING_SHUTTER_SKEW
-
- - LensShadingMapMode:
- type: int32_t
- draft: true
- description: |
- Control to report if the lens shading map is available. Currently
- identical to ANDROID_STATISTICS_LENS_SHADING_MAP_MODE.
- enum:
- - name: LensShadingMapModeOff
- value: 0
- description: No lens shading map mode is available.
- - name: LensShadingMapModeOn
- value: 1
- description: The lens shading map mode is available.
-
- - SceneFlicker:
- type: int32_t
- draft: true
- description: |
- Control to report the detected scene light frequency. Currently
- identical to ANDROID_STATISTICS_SCENE_FLICKER.
- enum:
- - name: SceneFickerOff
- value: 0
- description: No flickering detected.
- - name: SceneFicker50Hz
- value: 1
- description: 50Hz flickering detected.
- - name: SceneFicker60Hz
- value: 2
- description: 60Hz flickering detected.
-
- - PipelineDepth:
- type: int32_t
- draft: true
- description: |
- Specifies the number of pipeline stages the frame went through from when
- it was exposed to when the final completed result was available to the
- framework. Always less than or equal to PipelineMaxDepth. Currently
- identical to ANDROID_REQUEST_PIPELINE_DEPTH.
-
- The typical value for this control is 3 as a frame is first exposed,
- captured and then processed in a single pass through the ISP. Any
- additional processing step performed after the ISP pass (in example face
- detection, additional format conversions etc) count as an additional
- pipeline stage.
-
- - MaxLatency:
- type: int32_t
- draft: true
- description: |
- The maximum number of frames that can occur after a request (different
- than the previous) has been submitted, and before the result's state
- becomes synchronized. A value of -1 indicates unknown latency, and 0
- indicates per-frame control. Currently identical to
- ANDROID_SYNC_MAX_LATENCY.
-
- - TestPatternMode:
- type: int32_t
- draft: true
- description: |
- Control to select the test pattern mode. Currently identical to
- ANDROID_SENSOR_TEST_PATTERN_MODE.
- enum:
- - name: TestPatternModeOff
- value: 0
- description: |
- No test pattern mode is used. The camera device returns frames from
- the image sensor.
- - name: TestPatternModeSolidColor
- value: 1
- description: |
- Each pixel in [R, G_even, G_odd, B] is replaced by its respective
- color channel provided in test pattern data.
- \todo Add control for test pattern data.
- - name: TestPatternModeColorBars
- value: 2
- description: |
- All pixel data is replaced with an 8-bar color pattern. The vertical
- bars (left-to-right) are as follows; white, yellow, cyan, green,
- magenta, red, blue and black. Each bar should take up 1/8 of the
- sensor pixel array width. When this is not possible, the bar size
- should be rounded down to the nearest integer and the pattern can
- repeat on the right side. Each bar's height must always take up the
- full sensor pixel array height.
- - name: TestPatternModeColorBarsFadeToGray
- value: 3
- description: |
- The test pattern is similar to TestPatternModeColorBars,
- except that each bar should start at its specified color at the top
- and fade to gray at the bottom. Furthermore each bar is further
- subdevided into a left and right half. The left half should have a
- smooth gradient, and the right half should have a quantized
- gradient. In particular, the right half's should consist of blocks
- of the same color for 1/16th active sensor pixel array width. The
- least significant bits in the quantized gradient should be copied
- from the most significant bits of the smooth gradient. The height of
- each bar should always be a multiple of 128. When this is not the
- case, the pattern should repeat at the bottom of the image.
- - name: TestPatternModePn9
- value: 4
- description: |
- All pixel data is replaced by a pseudo-random sequence generated
- from a PN9 512-bit sequence (typically implemented in hardware with
- a linear feedback shift register). The generator should be reset at
- the beginning of each frame, and thus each subsequent raw frame with
- this test pattern should be exactly the same as the last.
- - name: TestPatternModeCustom1
- value: 256
- description: |
- The first custom test pattern. All custom patterns that are
- available only on this camera device are at least this numeric
- value. All of the custom test patterns will be static (that is the
- raw image must not vary from frame to frame).
-
-...
diff --git a/src/libcamera/control_ids_core.yaml b/src/libcamera/control_ids_core.yaml
new file mode 100644
index 00000000..1dfaee0c
--- /dev/null
+++ b/src/libcamera/control_ids_core.yaml
@@ -0,0 +1,1052 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2019, Google Inc.
+#
+%YAML 1.1
+---
+# Unless otherwise stated, all controls are bi-directional, i.e. they can be
+# set through Request::controls() and returned out through Request::metadata().
+vendor: libcamera
+controls:
+ - AeEnable:
+ type: bool
+ direction: inout
+ description: |
+ Enable or disable the AE.
+
+ \sa ExposureTime AnalogueGain
+
+ - AeLocked:
+ type: bool
+ direction: out
+ description: |
+ Report the lock status of a running AE algorithm.
+
+ If the AE algorithm is locked the value shall be set to true, if it's
+ converging it shall be set to false. If the AE algorithm is not
+ running the control shall not be present in the metadata control list.
+
+ \sa AeEnable
+
+ # AeMeteringMode needs further attention:
+ # - Auto-generate max enum value.
+ # - Better handling of custom types.
+ - AeMeteringMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Specify a metering mode for the AE algorithm to use.
+
+ The metering modes determine which parts of the image are used to
+ determine the scene brightness. Metering modes may be platform specific
+ and not all metering modes may be supported.
+ enum:
+ - name: MeteringCentreWeighted
+ value: 0
+ description: Centre-weighted metering mode.
+ - name: MeteringSpot
+ value: 1
+ description: Spot metering mode.
+ - name: MeteringMatrix
+ value: 2
+ description: Matrix metering mode.
+ - name: MeteringCustom
+ value: 3
+ description: Custom metering mode.
+
+ # AeConstraintMode needs further attention:
+ # - Auto-generate max enum value.
+ # - Better handling of custom types.
+ - AeConstraintMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Specify a constraint mode for the AE algorithm to use.
+
+ The constraint modes determine how the measured scene brightness is
+ adjusted to reach the desired target exposure. Constraint modes may be
+ platform specific, and not all constraint modes may be supported.
+ enum:
+ - name: ConstraintNormal
+ value: 0
+ description: |
+ Default constraint mode.
+
+ This mode aims to balance the exposure of different parts of the
+ image so as to reach a reasonable average level. However, highlights
+ in the image may appear over-exposed and lowlights may appear
+ under-exposed.
+ - name: ConstraintHighlight
+ value: 1
+ description: |
+ Highlight constraint mode.
+
+ This mode adjusts the exposure levels in order to try and avoid
+ over-exposing the brightest parts (highlights) of an image.
+ Other non-highlight parts of the image may appear under-exposed.
+ - name: ConstraintShadows
+ value: 2
+ description: |
+ Shadows constraint mode.
+
+ This mode adjusts the exposure levels in order to try and avoid
+ under-exposing the dark parts (shadows) of an image. Other normally
+ exposed parts of the image may appear over-exposed.
+ - name: ConstraintCustom
+ value: 3
+ description: |
+ Custom constraint mode.
+
+ # AeExposureMode needs further attention:
+ # - Auto-generate max enum value.
+ # - Better handling of custom types.
+ - AeExposureMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Specify an exposure mode for the AE algorithm to use.
+
+ The exposure modes specify how the desired total exposure is divided
+ between the exposure time and the sensor's analogue gain. They are
+ platform specific, and not all exposure modes may be supported.
+ enum:
+ - name: ExposureNormal
+ value: 0
+ description: Default exposure mode.
+ - name: ExposureShort
+ value: 1
+ description: Exposure mode allowing only short exposure times.
+ - name: ExposureLong
+ value: 2
+ description: Exposure mode allowing long exposure times.
+ - name: ExposureCustom
+ value: 3
+ description: Custom exposure mode.
+
+ - ExposureValue:
+ type: float
+ direction: inout
+ description: |
+ Specify an Exposure Value (EV) parameter.
+
+ The EV parameter will only be applied if the AE algorithm is currently
+ enabled.
+
+ By convention EV adjusts the exposure as log2. For example
+ EV = [-2, -1, -0.5, 0, 0.5, 1, 2] results in an exposure adjustment
+ of [1/4x, 1/2x, 1/sqrt(2)x, 1x, sqrt(2)x, 2x, 4x].
+
+ \sa AeEnable
+
+ - ExposureTime:
+ type: int32_t
+ direction: inout
+ description: |
+ Exposure time for the frame applied in the sensor device.
+
+ This value is specified in micro-seconds.
+
+ Setting this value means that it is now fixed and the AE algorithm may
+ not change it. Setting it back to zero returns it to the control of the
+ AE algorithm.
+
+ \sa AnalogueGain AeEnable
+
+ \todo Document the interactions between AeEnable and setting a fixed
+ value for this control. Consider interactions with other AE features,
+ such as aperture and aperture/shutter priority mode, and decide if
+ control of which features should be automatically adjusted shouldn't
+ better be handled through a separate AE mode control.
+
+ - AnalogueGain:
+ type: float
+ direction: inout
+ description: |
+ Analogue gain value applied in the sensor device.
+
+ The value of the control specifies the gain multiplier applied to all
+ colour channels. This value cannot be lower than 1.0.
+
+ Setting this value means that it is now fixed and the AE algorithm may
+ not change it. Setting it back to zero returns it to the control of the
+ AE algorithm.
+
+ \sa ExposureTime AeEnable
+
+ \todo Document the interactions between AeEnable and setting a fixed
+ value for this control. Consider interactions with other AE features,
+ such as aperture and aperture/shutter priority mode, and decide if
+ control of which features should be automatically adjusted shouldn't
+ better be handled through a separate AE mode control.
+
+ - AeFlickerMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Set the flicker avoidance mode for AGC/AEC.
+
+ The flicker mode determines whether, and how, the AGC/AEC algorithm
+ attempts to hide flicker effects caused by the duty cycle of artificial
+ lighting.
+
+ Although implementation dependent, many algorithms for "flicker
+ avoidance" work by restricting this exposure time to integer multiples
+ of the cycle period, wherever possible.
+
+ Implementations may not support all of the flicker modes listed below.
+
+ By default the system will start in FlickerAuto mode if this is
+ supported, otherwise the flicker mode will be set to FlickerOff.
+
+ enum:
+ - name: FlickerOff
+ value: 0
+ description: |
+ No flicker avoidance is performed.
+ - name: FlickerManual
+ value: 1
+ description: |
+ Manual flicker avoidance.
+
+ Suppress flicker effects caused by lighting running with a period
+ specified by the AeFlickerPeriod control.
+ \sa AeFlickerPeriod
+ - name: FlickerAuto
+ value: 2
+ description: |
+ Automatic flicker period detection and avoidance.
+
+ The system will automatically determine the most likely value of
+ flicker period, and avoid flicker of this frequency. Once flicker
+ is being corrected, it is implementation dependent whether the
+ system is still able to detect a change in the flicker period.
+ \sa AeFlickerDetected
+
+ - AeFlickerPeriod:
+ type: int32_t
+ direction: inout
+ description: |
+ Manual flicker period in microseconds.
+
+ This value sets the current flicker period to avoid. It is used when
+ AeFlickerMode is set to FlickerManual.
+
+ To cancel 50Hz mains flicker, this should be set to 10000 (corresponding
+ to 100Hz), or 8333 (120Hz) for 60Hz mains.
+
+ Setting the mode to FlickerManual when no AeFlickerPeriod has ever been
+ set means that no flicker cancellation occurs (until the value of this
+ control is updated).
+
+ Switching to modes other than FlickerManual has no effect on the
+ value of the AeFlickerPeriod control.
+
+ \sa AeFlickerMode
+
+ - AeFlickerDetected:
+ type: int32_t
+ direction: out
+ description: |
+ Flicker period detected in microseconds.
+
+ The value reported here indicates the currently detected flicker
+ period, or zero if no flicker at all is detected.
+
+ When AeFlickerMode is set to FlickerAuto, there may be a period during
+ which the value reported here remains zero. Once a non-zero value is
+ reported, then this is the flicker period that has been detected and is
+ now being cancelled.
+
+ In the case of 50Hz mains flicker, the value would be 10000
+ (corresponding to 100Hz), or 8333 (120Hz) for 60Hz mains flicker.
+
+ It is implementation dependent whether the system can continue to detect
+ flicker of different periods when another frequency is already being
+ cancelled.
+
+ \sa AeFlickerMode
+
+ - Brightness:
+ type: float
+ direction: inout
+ description: |
+ Specify a fixed brightness parameter.
+
+ Positive values (up to 1.0) produce brighter images; negative values
+ (up to -1.0) produce darker images and 0.0 leaves pixels unchanged.
+
+ - Contrast:
+ type: float
+ direction: inout
+ description: |
+ Specify a fixed contrast parameter.
+
+ Normal contrast is given by the value 1.0; larger values produce images
+ with more contrast.
+
+ - Lux:
+ type: float
+ direction: out
+ description: |
+ Report an estimate of the current illuminance level in lux.
+
+ The Lux control can only be returned in metadata.
+
+ - AwbEnable:
+ type: bool
+ direction: inout
+ description: |
+ Enable or disable the AWB.
+
+ When AWB is enabled, the algorithm estimates the colour temperature of
+ the scene and computes colour gains and the colour correction matrix
+ automatically. The computed colour temperature, gains and correction
+ matrix are reported in metadata. The corresponding controls are ignored
+ if set in a request.
+
+ When AWB is disabled, the colour temperature, gains and correction
+ matrix are not updated automatically and can be set manually in
+ requests.
+
+ \sa ColourCorrectionMatrix
+ \sa ColourGains
+ \sa ColourTemperature
+
+ # AwbMode needs further attention:
+ # - Auto-generate max enum value.
+ # - Better handling of custom types.
+ - AwbMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Specify the range of illuminants to use for the AWB algorithm.
+
+ The modes supported are platform specific, and not all modes may be
+ supported.
+ enum:
+ - name: AwbAuto
+ value: 0
+ description: Search over the whole colour temperature range.
+ - name: AwbIncandescent
+ value: 1
+ description: Incandescent AWB lamp mode.
+ - name: AwbTungsten
+ value: 2
+ description: Tungsten AWB lamp mode.
+ - name: AwbFluorescent
+ value: 3
+ description: Fluorescent AWB lamp mode.
+ - name: AwbIndoor
+ value: 4
+ description: Indoor AWB lighting mode.
+ - name: AwbDaylight
+ value: 5
+ description: Daylight AWB lighting mode.
+ - name: AwbCloudy
+ value: 6
+ description: Cloudy AWB lighting mode.
+ - name: AwbCustom
+ value: 7
+ description: Custom AWB mode.
+
+ - AwbLocked:
+ type: bool
+ direction: out
+ description: |
+ Report the lock status of a running AWB algorithm.
+
+ If the AWB algorithm is locked the value shall be set to true, if it's
+ converging it shall be set to false. If the AWB algorithm is not
+ running the control shall not be present in the metadata control list.
+
+ \sa AwbEnable
+
+ - ColourGains:
+ type: float
+ direction: inout
+ description: |
+ Pair of gain values for the Red and Blue colour channels, in that
+ order.
+
+ ColourGains can only be applied in a Request when the AWB is disabled.
+ If ColourGains is set in a request but ColourTemperature is not, the
+ implementation shall calculate and set the ColourTemperature based on
+ the ColourGains.
+
+ \sa AwbEnable
+ \sa ColourTemperature
+ size: [2]
+
+ - ColourTemperature:
+ type: int32_t
+ direction: out
+ description: |
+ ColourTemperature of the frame, in kelvin.
+
+ ColourTemperature can only be applied in a Request when the AWB is
+ disabled.
+
+ If ColourTemperature is set in a request but ColourGains is not, the
+ implementation shall calculate and set the ColourGains based on the
+ given ColourTemperature. If ColourTemperature is set (either directly,
+ or indirectly by setting ColourGains) but ColourCorrectionMatrix is not,
+ the ColourCorrectionMatrix is updated based on the ColourTemperature.
+
+ The ColourTemperature used to process the frame is reported in metadata.
+
+ \sa AwbEnable
+ \sa ColourCorrectionMatrix
+ \sa ColourGains
+
+ - Saturation:
+ type: float
+ direction: inout
+ description: |
+ Specify a fixed saturation parameter.
+
+ Normal saturation is given by the value 1.0; larger values produce more
+ saturated colours; 0.0 produces a greyscale image.
+
+ - SensorBlackLevels:
+ type: int32_t
+ direction: out
+ description: |
+ Reports the sensor black levels used for processing a frame.
+
+ The values are in the order R, Gr, Gb, B. They are returned as numbers
+ out of a 16-bit pixel range (as if pixels ranged from 0 to 65535). The
+ SensorBlackLevels control can only be returned in metadata.
+ size: [4]
+
+ - Sharpness:
+ type: float
+ direction: inout
+ description: |
+ Intensity of the sharpening applied to the image.
+
+ A value of 0.0 means no sharpening. The minimum value means
+ minimal sharpening, and shall be 0.0 unless the camera can't
+ disable sharpening completely. The default value shall give a
+ "reasonable" level of sharpening, suitable for most use cases.
+ The maximum value may apply extremely high levels of sharpening,
+ higher than anyone could reasonably want. Negative values are
+ not allowed. Note also that sharpening is not applied to raw
+ streams.
+
+ - FocusFoM:
+ type: int32_t
+ direction: out
+ description: |
+ Reports a Figure of Merit (FoM) to indicate how in-focus the frame is.
+
+ A larger FocusFoM value indicates a more in-focus frame. This singular
+ value may be based on a combination of statistics gathered from
+ multiple focus regions within an image. The number of focus regions and
+ method of combination is platform dependent. In this respect, it is not
+ necessarily aimed at providing a way to implement a focus algorithm by
+ the application, rather an indication of how in-focus a frame is.
+
+ - ColourCorrectionMatrix:
+ type: float
+ direction: inout
+ description: |
+ The 3x3 matrix that converts camera RGB to sRGB within the imaging
+ pipeline.
+
+ This should describe the matrix that is used after pixels have been
+ white-balanced, but before any gamma transformation. The 3x3 matrix is
+ stored in conventional reading order in an array of 9 floating point
+ values.
+
+ ColourCorrectionMatrix can only be applied in a Request when the AWB is
+ disabled.
+
+ \sa AwbEnable
+ \sa ColourTemperature
+ size: [3,3]
+
+ - ScalerCrop:
+ type: Rectangle
+ direction: inout
+ description: |
+ Sets the image portion that will be scaled to form the whole of
+ the final output image.
+
+ The (x,y) location of this rectangle is relative to the
+ PixelArrayActiveAreas that is being used. The units remain native
+ sensor pixels, even if the sensor is being used in a binning or
+ skipping mode.
+
+ This control is only present when the pipeline supports scaling. Its
+ maximum valid value is given by the properties::ScalerCropMaximum
+ property, and the two can be used to implement digital zoom.
+
+ - DigitalGain:
+ type: float
+ direction: inout
+ description: |
+ Digital gain value applied during the processing steps applied
+ to the image as captured from the sensor.
+
+ The global digital gain factor is applied to all the colour channels
+ of the RAW image. Different pipeline models are free to
+ specify how the global gain factor applies to each separate
+ channel.
+
+ If an imaging pipeline applies digital gain in distinct
+ processing steps, this value indicates their total sum.
+ Pipelines are free to decide how to adjust each processing
+ step to respect the received gain factor and shall report
+ their total value in the request metadata.
+
+ - FrameDuration:
+ type: int64_t
+ direction: out
+ description: |
+ The instantaneous frame duration from start of frame exposure to start
+ of next exposure, expressed in microseconds.
+
+ This control is meant to be returned in metadata.
+
+ - FrameDurationLimits:
+ type: int64_t
+ direction: inout
+ description: |
+ The minimum and maximum (in that order) frame duration, expressed in
+ microseconds.
+
+ When provided by applications, the control specifies the sensor frame
+ duration interval the pipeline has to use. This limits the largest
+ exposure time the sensor can use. For example, if a maximum frame
+ duration of 33ms is requested (corresponding to 30 frames per second),
+ the sensor will not be able to raise the exposure time above 33ms.
+ A fixed frame duration is achieved by setting the minimum and maximum
+ values to be the same. Setting both values to 0 reverts to using the
+ camera defaults.
+
+ The maximum frame duration provides the absolute limit to the exposure
+ time computed by the AE algorithm and it overrides any exposure mode
+ setting specified with controls::AeExposureMode. Similarly, when a
+ manual exposure time is set through controls::ExposureTime, it also
+ gets clipped to the limits set by this control. When reported in
+ metadata, the control expresses the minimum and maximum frame durations
+ used after being clipped to the sensor provided frame duration limits.
+
+ \sa AeExposureMode
+ \sa ExposureTime
+
+ \todo Define how to calculate the capture frame rate by
+ defining controls to report additional delays introduced by
+ the capture pipeline or post-processing stages (ie JPEG
+ conversion, frame scaling).
+
+ \todo Provide an explicit definition of default control values, for
+ this and all other controls.
+
+ size: [2]
+
+ - SensorTemperature:
+ type: float
+ direction: out
+ description: |
+ Temperature measure from the camera sensor in Celsius.
+
+ This value is typically obtained by a thermal sensor present on-die or
+ in the camera module. The range of reported temperatures is device
+ dependent.
+
+ The SensorTemperature control will only be returned in metadata if a
+ thermal sensor is present.
+
+ - SensorTimestamp:
+ type: int64_t
+ direction: out
+ description: |
+ The time when the first row of the image sensor active array is exposed.
+
+ The timestamp, expressed in nanoseconds, represents a monotonically
+ increasing counter since the system boot time, as defined by the
+ Linux-specific CLOCK_BOOTTIME clock id.
+
+ The SensorTimestamp control can only be returned in metadata.
+
+ \todo Define how the sensor timestamp has to be used in the reprocessing
+ use case.
+
+ - AfMode:
+ type: int32_t
+ direction: inout
+ description: |
+ The mode of the AF (autofocus) algorithm.
+
+ An implementation may choose not to implement all the modes.
+
+ enum:
+ - name: AfModeManual
+ value: 0
+ description: |
+ The AF algorithm is in manual mode.
+
+ In this mode it will never perform any action nor move the lens of
+ its own accord, but an application can specify the desired lens
+ position using the LensPosition control. The AfState will always
+ report AfStateIdle.
+
+ If the camera is started in AfModeManual, it will move the focus
+ lens to the position specified by the LensPosition control.
+
+ This mode is the recommended default value for the AfMode control.
+ External cameras (as reported by the Location property set to
+ CameraLocationExternal) may use a different default value.
+ - name: AfModeAuto
+ value: 1
+ description: |
+ The AF algorithm is in auto mode.
+
+ In this mode the algorithm will never move the lens or change state
+ unless the AfTrigger control is used. The AfTrigger control can be
+ used to initiate a focus scan, the results of which will be
+ reported by AfState.
+
+ If the autofocus algorithm is moved from AfModeAuto to another mode
+ while a scan is in progress, the scan is cancelled immediately,
+ without waiting for the scan to finish.
+
+ When first entering this mode the AfState will report AfStateIdle.
+ When a trigger control is sent, AfState will report AfStateScanning
+ for a period before spontaneously changing to AfStateFocused or
+ AfStateFailed, depending on the outcome of the scan. It will remain
+ in this state until another scan is initiated by the AfTrigger
+ control. If a scan is cancelled (without changing to another mode),
+ AfState will return to AfStateIdle.
+ - name: AfModeContinuous
+ value: 2
+ description: |
+ The AF algorithm is in continuous mode.
+
+ In this mode the lens can re-start a scan spontaneously at any
+ moment, without any user intervention. The AfState still reports
+ whether the algorithm is currently scanning or not, though the
+ application has no ability to initiate or cancel scans, nor to move
+ the lens for itself.
+
+ However, applications can pause the AF algorithm from continuously
+ scanning by using the AfPause control. This allows video or still
+ images to be captured whilst guaranteeing that the focus is fixed.
+
+ When set to AfModeContinuous, the system will immediately initiate a
+ scan so AfState will report AfStateScanning, and will settle on one
+ of AfStateFocused or AfStateFailed, depending on the scan result.
+
+ - AfRange:
+ type: int32_t
+ direction: inout
+ description: |
+ The range of focus distances that is scanned.
+
+ An implementation may choose not to implement all the options here.
+ enum:
+ - name: AfRangeNormal
+ value: 0
+ description: |
+ A wide range of focus distances is scanned.
+
+ Scanned distances cover all the way from infinity down to close
+ distances, though depending on the implementation, possibly not
+ including the very closest macro positions.
+ - name: AfRangeMacro
+ value: 1
+ description: |
+ Only close distances are scanned.
+ - name: AfRangeFull
+ value: 2
+ description: |
+ The full range of focus distances is scanned.
+
+ This range is similar to AfRangeNormal but includes the very
+ closest macro positions.
+
+ - AfSpeed:
+ type: int32_t
+ direction: inout
+ description: |
+ Determine whether the AF is to move the lens as quickly as possible or
+ more steadily.
+
+ For example, during video recording it may be desirable not to move the
+ lens too abruptly, but when in a preview mode (waiting for a still
+ capture) it may be helpful to move the lens as quickly as is reasonably
+ possible.
+ enum:
+ - name: AfSpeedNormal
+ value: 0
+ description: Move the lens at its usual speed.
+ - name: AfSpeedFast
+ value: 1
+ description: Move the lens more quickly.
+
+ - AfMetering:
+ type: int32_t
+ direction: inout
+ description: |
+ The parts of the image used by the AF algorithm to measure focus.
+ enum:
+ - name: AfMeteringAuto
+ value: 0
+ description: |
+ Let the AF algorithm decide for itself where it will measure focus.
+ - name: AfMeteringWindows
+ value: 1
+ description: |
+ Use the rectangles defined by the AfWindows control to measure focus.
+
+ If no windows are specified the behaviour is platform dependent.
+
+ - AfWindows:
+ type: Rectangle
+ direction: inout
+ description: |
+ The focus windows used by the AF algorithm when AfMetering is set to
+ AfMeteringWindows.
+
+ The units used are pixels within the rectangle returned by the
+ ScalerCropMaximum property.
+
+ In order to be activated, a rectangle must be programmed with non-zero
+ width and height. Internally, these rectangles are intersected with the
+ ScalerCropMaximum rectangle. If the window becomes empty after this
+ operation, then the window is ignored. If all the windows end up being
+ ignored, then the behaviour is platform dependent.
+
+ On platforms that support the ScalerCrop control (for implementing
+ digital zoom, for example), no automatic recalculation or adjustment of
+ AF windows is performed internally if the ScalerCrop is changed. If any
+ window lies outside the output image after the scaler crop has been
+ applied, it is up to the application to recalculate them.
+
+ The details of how the windows are used are platform dependent. We note
+ that when there is more than one AF window, a typical implementation
+ might find the optimal focus position for each one and finally select
+ the window where the focal distance for the objects shown in that part
+ of the image are closest to the camera.
+
+ size: [n]
+
+ - AfTrigger:
+ type: int32_t
+ direction: in
+ description: |
+ Start an autofocus scan.
+
+ This control starts an autofocus scan when AfMode is set to AfModeAuto,
+ and is ignored if AfMode is set to AfModeManual or AfModeContinuous. It
+ can also be used to terminate a scan early.
+
+ enum:
+ - name: AfTriggerStart
+ value: 0
+ description: |
+ Start an AF scan.
+
+ Setting the control to AfTriggerStart is ignored if a scan is in
+ progress.
+ - name: AfTriggerCancel
+ value: 1
+ description: |
+ Cancel an AF scan.
+
+ This does not cause the lens to move anywhere else. Ignored if no
+ scan is in progress.
+
+ - AfPause:
+ type: int32_t
+ direction: in
+ description: |
+ Pause lens movements when in continuous autofocus mode.
+
+ This control has no effect except when in continuous autofocus mode
+ (AfModeContinuous). It can be used to pause any lens movements while
+ (for example) images are captured. The algorithm remains inactive
+ until it is instructed to resume.
+
+ enum:
+ - name: AfPauseImmediate
+ value: 0
+ description: |
+ Pause the continuous autofocus algorithm immediately.
+
+ The autofocus algorithm is paused whether or not any kind of scan
+ is underway. AfPauseState will subsequently report
+ AfPauseStatePaused. AfState may report any of AfStateScanning,
+ AfStateFocused or AfStateFailed, depending on the algorithm's state
+ when it received this control.
+ - name: AfPauseDeferred
+ value: 1
+ description: |
+ Pause the continuous autofocus algorithm at the end of the scan.
+
+ This is similar to AfPauseImmediate, and if the AfState is
+ currently reporting AfStateFocused or AfStateFailed it will remain
+ in that state and AfPauseState will report AfPauseStatePaused.
+
+ However, if the algorithm is scanning (AfStateScanning),
+ AfPauseState will report AfPauseStatePausing until the scan is
+ finished, at which point AfState will report one of AfStateFocused
+ or AfStateFailed, and AfPauseState will change to
+ AfPauseStatePaused.
+
+ - name: AfPauseResume
+ value: 2
+ description: |
+ Resume continuous autofocus operation.
+
+ The algorithm starts again from exactly where it left off, and
+ AfPauseState will report AfPauseStateRunning.
+
+ - LensPosition:
+ type: float
+ direction: inout
+ description: |
+ Set and report the focus lens position.
+
+ This control instructs the lens to move to a particular position and
+ also reports back the position of the lens for each frame.
+
+ The LensPosition control is ignored unless the AfMode is set to
+ AfModeManual, though the value is reported back unconditionally in all
+ modes.
+
+ This value, which is generally a non-integer, is the reciprocal of the
+ focal distance in metres, also known as dioptres. That is, to set a
+ focal distance D, the lens position LP is given by
+
+ \f$LP = \frac{1\mathrm{m}}{D}\f$
+
+ For example:
+
+ - 0 moves the lens to infinity.
+ - 0.5 moves the lens to focus on objects 2m away.
+ - 2 moves the lens to focus on objects 50cm away.
+ - And larger values will focus the lens closer.
+
+ The default value of the control should indicate a good general
+ position for the lens, often corresponding to the hyperfocal distance
+ (the closest position for which objects at infinity are still
+ acceptably sharp). The minimum will often be zero (meaning infinity),
+ and the maximum value defines the closest focus position.
+
+ \todo Define a property to report the Hyperfocal distance of calibrated
+ lenses.
+
+ - AfState:
+ type: int32_t
+ direction: out
+ description: |
+ The current state of the AF algorithm.
+
+ This control reports the current state of the AF algorithm in
+ conjunction with the reported AfMode value and (in continuous AF mode)
+ the AfPauseState value. The possible state changes are described below,
+ though we note the following state transitions that occur when the
+ AfMode is changed.
+
+ If the AfMode is set to AfModeManual, then the AfState will always
+ report AfStateIdle (even if the lens is subsequently moved). Changing
+ to the AfModeManual state does not initiate any lens movement.
+
+ If the AfMode is set to AfModeAuto then the AfState will report
+ AfStateIdle. However, if AfModeAuto and AfTriggerStart are sent
+ together then AfState will omit AfStateIdle and move straight to
+ AfStateScanning (and start a scan).
+
+ If the AfMode is set to AfModeContinuous then the AfState will
+ initially report AfStateScanning.
+
+ enum:
+ - name: AfStateIdle
+ value: 0
+ description: |
+ The AF algorithm is in manual mode (AfModeManual) or in auto mode
+ (AfModeAuto) and a scan has not yet been triggered, or an
+ in-progress scan was cancelled.
+ - name: AfStateScanning
+ value: 1
+ description: |
+ The AF algorithm is in auto mode (AfModeAuto), and a scan has been
+ started using the AfTrigger control.
+
+ The scan can be cancelled by sending AfTriggerCancel at which point
+ the algorithm will either move back to AfStateIdle or, if the scan
+ actually completes before the cancel request is processed, to one
+ of AfStateFocused or AfStateFailed.
+
+ Alternatively the AF algorithm could be in continuous mode
+ (AfModeContinuous) at which point it may enter this state
+ spontaneously whenever it determines that a rescan is needed.
+ - name: AfStateFocused
+ value: 2
+ description: |
+ The AF algorithm is in auto (AfModeAuto) or continuous
+ (AfModeContinuous) mode and a scan has completed with the result
+ that the algorithm believes the image is now in focus.
+ - name: AfStateFailed
+ value: 3
+ description: |
+ The AF algorithm is in auto (AfModeAuto) or continuous
+ (AfModeContinuous) mode and a scan has completed with the result
+ that the algorithm did not find a good focus position.
+
+ - AfPauseState:
+ type: int32_t
+ direction: out
+ description: |
+ Report whether the autofocus is currently running, paused or pausing.
+
+ This control is only applicable in continuous (AfModeContinuous) mode,
+ and reports whether the algorithm is currently running, paused or
+ pausing (that is, will pause as soon as any in-progress scan
+ completes).
+
+ Any change to AfMode will cause AfPauseStateRunning to be reported.
+
+ enum:
+ - name: AfPauseStateRunning
+ value: 0
+ description: |
+ Continuous AF is running and the algorithm may restart a scan
+ spontaneously.
+ - name: AfPauseStatePausing
+ value: 1
+ description: |
+ Continuous AF has been sent an AfPauseDeferred control, and will
+ pause as soon as any in-progress scan completes.
+
+ When the scan completes, the AfPauseState control will report
+ AfPauseStatePaused. No new scans will be start spontaneously until
+ the AfPauseResume control is sent.
+ - name: AfPauseStatePaused
+ value: 2
+ description: |
+ Continuous AF is paused.
+
+ No further state changes or lens movements will occur until the
+ AfPauseResume control is sent.
+
+ - HdrMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Set the mode to be used for High Dynamic Range (HDR) imaging.
+
+ HDR techniques typically include multiple exposure, image fusion and
+ tone mapping techniques to improve the dynamic range of the resulting
+ images.
+
+ When using an HDR mode, images are captured with different sets of AGC
+ settings called HDR channels. Channels indicate in particular the type
+ of exposure (short, medium or long) used to capture the raw image,
+ before fusion. Each HDR image is tagged with the corresponding channel
+ using the HdrChannel control.
+
+ \sa HdrChannel
+
+ enum:
+ - name: HdrModeOff
+ value: 0
+ description: |
+ HDR is disabled.
+
+ Metadata for this frame will not include the HdrChannel control.
+ - name: HdrModeMultiExposureUnmerged
+ value: 1
+ description: |
+ Multiple exposures will be generated in an alternating fashion.
+
+ The multiple exposures will not be merged together and will be
+ returned to the application as they are. Each image will be tagged
+ with the correct HDR channel, indicating what kind of exposure it
+ is. The tag should be the same as in the HdrModeMultiExposure case.
+
+ The expectation is that an application using this mode would merge
+ the frames to create HDR images for itself if it requires them.
+ - name: HdrModeMultiExposure
+ value: 2
+ description: |
+ Multiple exposures will be generated and merged to create HDR
+ images.
+
+ Each image will be tagged with the HDR channel (long, medium or
+ short) that arrived and which caused this image to be output.
+
+ Systems that use two channels for HDR will return images tagged
+ alternately as the short and long channel. Systems that use three
+ channels for HDR will cycle through the short, medium and long
+ channel before repeating.
+ - name: HdrModeSingleExposure
+ value: 3
+ description: |
+ Multiple frames all at a single exposure will be used to create HDR
+ images.
+
+ These images should be reported as all corresponding to the HDR
+ short channel.
+ - name: HdrModeNight
+ value: 4
+ description: |
+ Multiple frames will be combined to produce "night mode" images.
+
+ It is up to the implementation exactly which HDR channels it uses,
+ and the images will all be tagged accordingly with the correct HDR
+ channel information.
+
+ - HdrChannel:
+ type: int32_t
+ direction: out
+ description: |
+ The HDR channel used to capture the frame.
+
+ This value is reported back to the application so that it can discover
+ whether this capture corresponds to the short or long exposure image
+ (or any other image used by the HDR procedure). An application can
+ monitor the HDR channel to discover when the differently exposed images
+ have arrived.
+
+ This metadata is only available when an HDR mode has been enabled.
+
+ \sa HdrMode
+
+ enum:
+ - name: HdrChannelNone
+ value: 0
+ description: |
+ This image does not correspond to any of the captures used to create
+ an HDR image.
+ - name: HdrChannelShort
+ value: 1
+ description: |
+ This is a short exposure image.
+ - name: HdrChannelMedium
+ value: 2
+ description: |
+ This is a medium exposure image.
+ - name: HdrChannelLong
+ value: 3
+ description: |
+ This is a long exposure image.
+
+ - Gamma:
+ type: float
+ direction: inout
+ description: |
+ Specify a fixed gamma value.
+
+ The default gamma value must be 2.2 which closely mimics sRGB gamma.
+ Note that this is camera gamma, so it is applied as 1.0/gamma.
+
+ - DebugMetadataEnable:
+ type: bool
+ direction: inout
+ description: |
+ Enable or disable the debug metadata.
+
+...
diff --git a/src/libcamera/control_ids_debug.yaml b/src/libcamera/control_ids_debug.yaml
new file mode 100644
index 00000000..79753271
--- /dev/null
+++ b/src/libcamera/control_ids_debug.yaml
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+%YAML 1.1
+---
+vendor: debug
+controls: []
diff --git a/src/libcamera/control_ids_draft.yaml b/src/libcamera/control_ids_draft.yaml
new file mode 100644
index 00000000..87e4e02d
--- /dev/null
+++ b/src/libcamera/control_ids_draft.yaml
@@ -0,0 +1,327 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2019, Google Inc.
+#
+%YAML 1.1
+---
+# Unless otherwise stated, all controls are bi-directional, i.e. they can be
+# set through Request::controls() and returned out through Request::metadata().
+vendor: draft
+controls:
+ - AePrecaptureTrigger:
+ type: int32_t
+ direction: inout
+ description: |
+ Control for AE metering trigger. Currently identical to
+ ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER.
+
+ Whether the camera device will trigger a precapture metering sequence
+ when it processes this request.
+ enum:
+ - name: AePrecaptureTriggerIdle
+ value: 0
+ description: The trigger is idle.
+ - name: AePrecaptureTriggerStart
+ value: 1
+ description: The pre-capture AE metering is started by the camera.
+ - name: AePrecaptureTriggerCancel
+ value: 2
+ description: |
+ The camera will cancel any active or completed metering sequence.
+ The AE algorithm is reset to its initial state.
+
+ - NoiseReductionMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Control to select the noise reduction algorithm mode. Currently
+ identical to ANDROID_NOISE_REDUCTION_MODE.
+
+ Mode of operation for the noise reduction algorithm.
+ enum:
+ - name: NoiseReductionModeOff
+ value: 0
+ description: No noise reduction is applied
+ - name: NoiseReductionModeFast
+ value: 1
+ description: |
+ Noise reduction is applied without reducing the frame rate.
+ - name: NoiseReductionModeHighQuality
+ value: 2
+ description: |
+ High quality noise reduction at the expense of frame rate.
+ - name: NoiseReductionModeMinimal
+ value: 3
+ description: |
+ Minimal noise reduction is applied without reducing the frame rate.
+ - name: NoiseReductionModeZSL
+ value: 4
+ description: |
+ Noise reduction is applied at different levels to different streams.
+
+ - ColorCorrectionAberrationMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Control to select the color correction aberration mode. Currently
+ identical to ANDROID_COLOR_CORRECTION_ABERRATION_MODE.
+
+ Mode of operation for the chromatic aberration correction algorithm.
+ enum:
+ - name: ColorCorrectionAberrationOff
+ value: 0
+ description: No aberration correction is applied.
+ - name: ColorCorrectionAberrationFast
+ value: 1
+ description: Aberration correction will not slow down the frame rate.
+ - name: ColorCorrectionAberrationHighQuality
+ value: 2
+ description: |
+ High quality aberration correction which might reduce the frame
+ rate.
+
+ - AeState:
+ type: int32_t
+ direction: out
+ description: |
+ Control to report the current AE algorithm state. Currently identical to
+ ANDROID_CONTROL_AE_STATE.
+
+ Current state of the AE algorithm.
+ enum:
+ - name: AeStateInactive
+ value: 0
+ description: The AE algorithm is inactive.
+ - name: AeStateSearching
+ value: 1
+ description: The AE algorithm has not converged yet.
+ - name: AeStateConverged
+ value: 2
+ description: The AE algorithm has converged.
+ - name: AeStateLocked
+ value: 3
+ description: The AE algorithm is locked.
+ - name: AeStateFlashRequired
+ value: 4
+ description: The AE algorithm would need a flash for good results
+ - name: AeStatePrecapture
+ value: 5
+ description: |
+ The AE algorithm has started a pre-capture metering session.
+ \sa AePrecaptureTrigger
+
+ - AwbState:
+ type: int32_t
+ direction: out
+ description: |
+ Control to report the current AWB algorithm state. Currently identical
+ to ANDROID_CONTROL_AWB_STATE.
+
+ Current state of the AWB algorithm.
+ enum:
+ - name: AwbStateInactive
+ value: 0
+ description: The AWB algorithm is inactive.
+ - name: AwbStateSearching
+ value: 1
+ description: The AWB algorithm has not converged yet.
+ - name: AwbConverged
+ value: 2
+ description: The AWB algorithm has converged.
+ - name: AwbLocked
+ value: 3
+ description: The AWB algorithm is locked.
+
+ - SensorRollingShutterSkew:
+ type: int64_t
+ direction: out
+ description: |
+ Control to report the time between the start of exposure of the first
+ row and the start of exposure of the last row. Currently identical to
+ ANDROID_SENSOR_ROLLING_SHUTTER_SKEW
+
+ - LensShadingMapMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Control to report if the lens shading map is available. Currently
+ identical to ANDROID_STATISTICS_LENS_SHADING_MAP_MODE.
+ enum:
+ - name: LensShadingMapModeOff
+ value: 0
+ description: No lens shading map mode is available.
+ - name: LensShadingMapModeOn
+ value: 1
+ description: The lens shading map mode is available.
+
+ - PipelineDepth:
+ type: int32_t
+ direction: out
+ description: |
+ Specifies the number of pipeline stages the frame went through from when
+ it was exposed to when the final completed result was available to the
+ framework. Always less than or equal to PipelineMaxDepth. Currently
+ identical to ANDROID_REQUEST_PIPELINE_DEPTH.
+
+ The typical value for this control is 3 as a frame is first exposed,
+ captured and then processed in a single pass through the ISP. Any
+ additional processing step performed after the ISP pass (in example face
+ detection, additional format conversions etc) count as an additional
+ pipeline stage.
+
+ - MaxLatency:
+ type: int32_t
+ direction: out
+ description: |
+ The maximum number of frames that can occur after a request (different
+ than the previous) has been submitted, and before the result's state
+ becomes synchronized. A value of -1 indicates unknown latency, and 0
+ indicates per-frame control. Currently identical to
+ ANDROID_SYNC_MAX_LATENCY.
+
+ - TestPatternMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Control to select the test pattern mode. Currently identical to
+ ANDROID_SENSOR_TEST_PATTERN_MODE.
+ enum:
+ - name: TestPatternModeOff
+ value: 0
+ description: |
+ No test pattern mode is used. The camera device returns frames from
+ the image sensor.
+ - name: TestPatternModeSolidColor
+ value: 1
+ description: |
+ Each pixel in [R, G_even, G_odd, B] is replaced by its respective
+ color channel provided in test pattern data.
+ \todo Add control for test pattern data.
+ - name: TestPatternModeColorBars
+ value: 2
+ description: |
+ All pixel data is replaced with an 8-bar color pattern. The vertical
+ bars (left-to-right) are as follows; white, yellow, cyan, green,
+ magenta, red, blue and black. Each bar should take up 1/8 of the
+ sensor pixel array width. When this is not possible, the bar size
+ should be rounded down to the nearest integer and the pattern can
+ repeat on the right side. Each bar's height must always take up the
+ full sensor pixel array height.
+ - name: TestPatternModeColorBarsFadeToGray
+ value: 3
+ description: |
+ The test pattern is similar to TestPatternModeColorBars,
+ except that each bar should start at its specified color at the top
+ and fade to gray at the bottom. Furthermore each bar is further
+ subdevided into a left and right half. The left half should have a
+ smooth gradient, and the right half should have a quantized
+ gradient. In particular, the right half's should consist of blocks
+ of the same color for 1/16th active sensor pixel array width. The
+ least significant bits in the quantized gradient should be copied
+ from the most significant bits of the smooth gradient. The height of
+ each bar should always be a multiple of 128. When this is not the
+ case, the pattern should repeat at the bottom of the image.
+ - name: TestPatternModePn9
+ value: 4
+ description: |
+ All pixel data is replaced by a pseudo-random sequence generated
+ from a PN9 512-bit sequence (typically implemented in hardware with
+ a linear feedback shift register). The generator should be reset at
+ the beginning of each frame, and thus each subsequent raw frame with
+ this test pattern should be exactly the same as the last.
+ - name: TestPatternModeCustom1
+ value: 256
+ description: |
+ The first custom test pattern. All custom patterns that are
+ available only on this camera device are at least this numeric
+ value. All of the custom test patterns will be static (that is the
+ raw image must not vary from frame to frame).
+
+ - FaceDetectMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Control to select the face detection mode used by the pipeline.
+
+ Currently identical to ANDROID_STATISTICS_FACE_DETECT_MODE.
+
+ \sa FaceDetectFaceRectangles
+ \sa FaceDetectFaceScores
+ \sa FaceDetectFaceLandmarks
+ \sa FaceDetectFaceIds
+
+ enum:
+ - name: FaceDetectModeOff
+ value: 0
+ description: |
+ Pipeline doesn't perform face detection and doesn't report any
+ control related to face detection.
+ - name: FaceDetectModeSimple
+ value: 1
+ description: |
+ Pipeline performs face detection and reports the
+ FaceDetectFaceRectangles and FaceDetectFaceScores controls for each
+ detected face. FaceDetectFaceLandmarks and FaceDetectFaceIds are
+ optional.
+ - name: FaceDetectModeFull
+ value: 2
+ description: |
+ Pipeline performs face detection and reports all the controls
+ related to face detection including FaceDetectFaceRectangles,
+ FaceDetectFaceScores, FaceDetectFaceLandmarks, and
+ FaceDeteceFaceIds for each detected face.
+
+ - FaceDetectFaceRectangles:
+ type: Rectangle
+ direction: out
+ description: |
+ Boundary rectangles of the detected faces. The number of values is
+ the number of detected faces.
+
+ The FaceDetectFaceRectangles control can only be returned in metadata.
+
+ Currently identical to ANDROID_STATISTICS_FACE_RECTANGLES.
+ size: [n]
+
+ - FaceDetectFaceScores:
+ type: uint8_t
+ direction: out
+ description: |
+ Confidence score of each of the detected faces. The range of score is
+ [0, 100]. The number of values should be the number of faces reported
+ in FaceDetectFaceRectangles.
+
+ The FaceDetectFaceScores control can only be returned in metadata.
+
+ Currently identical to ANDROID_STATISTICS_FACE_SCORES.
+ size: [n]
+
+ - FaceDetectFaceLandmarks:
+ type: Point
+ direction: out
+ description: |
+ Array of human face landmark coordinates in format [..., left_eye_i,
+ right_eye_i, mouth_i, left_eye_i+1, ...], with i = index of face. The
+ number of values should be 3 * the number of faces reported in
+ FaceDetectFaceRectangles.
+
+ The FaceDetectFaceLandmarks control can only be returned in metadata.
+
+ Currently identical to ANDROID_STATISTICS_FACE_LANDMARKS.
+ size: [n]
+
+ - FaceDetectFaceIds:
+ type: int32_t
+ direction: out
+ description: |
+ Each detected face is given a unique ID that is valid for as long as the
+ face is visible to the camera device. A face that leaves the field of
+ view and later returns may be assigned a new ID. The number of values
+ should be the number of faces reported in FaceDetectFaceRectangles.
+
+ The FaceDetectFaceIds control can only be returned in metadata.
+
+ Currently identical to ANDROID_STATISTICS_FACE_IDS.
+ size: [n]
+
+...
diff --git a/src/libcamera/control_ids_rpi.yaml b/src/libcamera/control_ids_rpi.yaml
new file mode 100644
index 00000000..7524c5d2
--- /dev/null
+++ b/src/libcamera/control_ids_rpi.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+%YAML 1.1
+---
+# Raspberry Pi (VC4 and PiSP) specific vendor controls
+vendor: rpi
+controls:
+ - StatsOutputEnable:
+ type: bool
+ direction: inout
+ description: |
+ Toggles the Raspberry Pi IPA to output the hardware generated statistics.
+
+ When this control is set to true, the IPA outputs a binary dump of the
+ hardware generated statistics through the Request metadata in the
+ Bcm2835StatsOutput control.
+
+ \sa Bcm2835StatsOutput
+
+ - Bcm2835StatsOutput:
+ type: uint8_t
+ size: [n]
+ direction: out
+ description: |
+ Span of the BCM2835 ISP generated statistics for the current frame.
+
+ This is sent in the Request metadata if the StatsOutputEnable is set to
+ true. The statistics struct definition can be found in
+ include/linux/bcm2835-isp.h.
+
+ \sa StatsOutputEnable
+
+ - ScalerCrops:
+ type: Rectangle
+ size: [n]
+ direction: out
+ description: |
+ An array of rectangles, where each singular value has identical
+ functionality to the ScalerCrop control. This control allows the
+ Raspberry Pi pipeline handler to control individual scaler crops per
+ output stream.
+
+ The order of rectangles passed into the control must match the order of
+ streams configured by the application. The pipeline handler will only
+ configure crop retangles up-to the number of output streams configured.
+ All subsequent rectangles passed into this control are ignored by the
+ pipeline handler.
+
+ If both rpi::ScalerCrops and ScalerCrop controls are present in a
+ ControlList, the latter is discarded, and crops are obtained from this
+ control.
+
+ Note that using different crop rectangles for each output stream with
+ this control is only applicable on the Pi5/PiSP platform. This control
+ should also be considered temporary/draft and will be replaced with
+ official libcamera API support for per-stream controls in the future.
+
+ \sa ScalerCrop
+...
diff --git a/src/libcamera/control_ranges.yaml b/src/libcamera/control_ranges.yaml
new file mode 100644
index 00000000..6752eb98
--- /dev/null
+++ b/src/libcamera/control_ranges.yaml
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+%YAML 1.1
+---
+# Specifies the control id ranges/offsets for core/draft libcamera and vendor
+# controls and properties.
+ranges:
+ # Core libcamera controls
+ libcamera: 0
+ # Draft designated libcamera controls
+ draft: 10000
+ # Raspberry Pi vendor controls
+ rpi: 20000
+ # Controls for debug metadata
+ debug: 30000
+ # Next range starts at 40000
+
+...
diff --git a/src/libcamera/control_serializer.cpp b/src/libcamera/control_serializer.cpp
index e87d2362..17834648 100644
--- a/src/libcamera/control_serializer.cpp
+++ b/src/libcamera/control_serializer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_serializer.cpp - Control (de)serializer
+ * Control (de)serializer
*/
#include "libcamera/internal/control_serializer.h"
@@ -144,7 +144,7 @@ void ControlSerializer::reset()
size_t ControlSerializer::binarySize(const ControlValue &value)
{
- return value.data().size_bytes();
+ return sizeof(ControlType) + value.data().size_bytes();
}
size_t ControlSerializer::binarySize(const ControlInfo &info)
@@ -195,6 +195,8 @@ size_t ControlSerializer::binarySize(const ControlList &list)
void ControlSerializer::store(const ControlValue &value,
ByteStreamBuffer &buffer)
{
+ const ControlType type = value.type();
+ buffer.write(&type);
buffer.write(value.data());
}
@@ -279,6 +281,7 @@ int ControlSerializer::serialize(const ControlInfoMap &infoMap,
entry.id = id->id();
entry.type = id->type();
entry.offset = values.offset();
+ entry.direction = static_cast<ControlId::DirectionFlags::Type>(id->direction());
entries.write(&entry);
store(info, values);
@@ -379,11 +382,13 @@ int ControlSerializer::serialize(const ControlList &list,
return 0;
}
-ControlValue ControlSerializer::loadControlValue(ControlType type,
- ByteStreamBuffer &buffer,
+ControlValue ControlSerializer::loadControlValue(ByteStreamBuffer &buffer,
bool isArray,
unsigned int count)
{
+ ControlType type;
+ buffer.read(&type);
+
ControlValue value;
value.reserve(type, isArray, count);
@@ -392,15 +397,11 @@ ControlValue ControlSerializer::loadControlValue(ControlType type,
return value;
}
-ControlInfo ControlSerializer::loadControlInfo(ControlType type,
- ByteStreamBuffer &b)
+ControlInfo ControlSerializer::loadControlInfo(ByteStreamBuffer &b)
{
- if (type == ControlTypeString)
- type = ControlTypeInteger32;
-
- ControlValue min = loadControlValue(type, b);
- ControlValue max = loadControlValue(type, b);
- ControlValue def = loadControlValue(type, b);
+ ControlValue min = loadControlValue(b);
+ ControlValue max = loadControlValue(b);
+ ControlValue def = loadControlValue(b);
return ControlInfo(min, max, def);
}
@@ -493,12 +494,17 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
/* If we're using a local id map, populate it. */
if (localIdMap) {
+ ControlId::DirectionFlags flags{
+ static_cast<ControlId::Direction>(entry->direction)
+ };
+
/**
* \todo Find a way to preserve the control name for
* debugging purpose.
*/
controlIds_.emplace_back(std::make_unique<ControlId>(entry->id,
- "", type));
+ "", "local", type,
+ flags));
(*localIdMap)[entry->id] = controlIds_.back().get();
}
@@ -513,7 +519,7 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
}
/* Create and store the ControlInfo. */
- ctrls.emplace(controlId, loadControlInfo(type, values));
+ ctrls.emplace(controlId, loadControlInfo(values));
}
/*
@@ -624,10 +630,8 @@ ControlList ControlSerializer::deserialize<ControlList>(ByteStreamBuffer &buffer
return {};
}
- ControlType type = static_cast<ControlType>(entry->type);
ctrls.set(entry->id,
- loadControlValue(type, values, entry->is_array,
- entry->count));
+ loadControlValue(values, entry->is_array, entry->count));
}
return ctrls;
diff --git a/src/libcamera/control_validator.cpp b/src/libcamera/control_validator.cpp
index cf08b34a..93982cff 100644
--- a/src/libcamera/control_validator.cpp
+++ b/src/libcamera/control_validator.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_validator.cpp - Control validator
+ * Control validator
*/
#include "libcamera/internal/control_validator.h"
diff --git a/src/libcamera/controls.cpp b/src/libcamera/controls.cpp
index 03ac6345..70f6f609 100644
--- a/src/libcamera/controls.cpp
+++ b/src/libcamera/controls.cpp
@@ -2,15 +2,14 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * controls.cpp - Control handling
+ * Control handling
*/
#include <libcamera/controls.h>
-#include <iomanip>
#include <sstream>
-#include <string>
#include <string.h>
+#include <string>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
@@ -55,12 +54,15 @@ static constexpr size_t ControlValueSize[] = {
[ControlTypeNone] = 0,
[ControlTypeBool] = sizeof(bool),
[ControlTypeByte] = sizeof(uint8_t),
+ [ControlTypeUnsigned16] = sizeof(uint16_t),
+ [ControlTypeUnsigned32] = sizeof(uint32_t),
[ControlTypeInteger32] = sizeof(int32_t),
[ControlTypeInteger64] = sizeof(int64_t),
[ControlTypeFloat] = sizeof(float),
[ControlTypeString] = sizeof(char),
[ControlTypeRectangle] = sizeof(Rectangle),
[ControlTypeSize] = sizeof(Size),
+ [ControlTypePoint] = sizeof(Point),
};
} /* namespace */
@@ -74,10 +76,14 @@ static constexpr size_t ControlValueSize[] = {
* The control stores a boolean value
* \var ControlTypeByte
* The control stores a byte value as an unsigned 8-bit integer
+ * \var ControlTypeUnsigned16
+ * The control stores an unsigned 16-bit integer value
+ * \var ControlTypeUnsigned32
+ * The control stores an unsigned 32-bit integer value
* \var ControlTypeInteger32
- * The control stores a 32-bit integer value
+ * The control stores a signed 32-bit integer value
* \var ControlTypeInteger64
- * The control stores a 64-bit integer value
+ * The control stores a signed 64-bit integer value
* \var ControlTypeFloat
* The control stores a 32-bit floating point value
* \var ControlTypeString
@@ -230,6 +236,16 @@ std::string ControlValue::toString() const
str += std::to_string(*value);
break;
}
+ case ControlTypeUnsigned16: {
+ const uint16_t *value = reinterpret_cast<const uint16_t *>(data);
+ str += std::to_string(*value);
+ break;
+ }
+ case ControlTypeUnsigned32: {
+ const uint32_t *value = reinterpret_cast<const uint32_t *>(data);
+ str += std::to_string(*value);
+ break;
+ }
case ControlTypeInteger32: {
const int32_t *value = reinterpret_cast<const int32_t *>(data);
str += std::to_string(*value);
@@ -255,6 +271,11 @@ std::string ControlValue::toString() const
str += value->toString();
break;
}
+ case ControlTypePoint: {
+ const Point *value = reinterpret_cast<const Point *>(data);
+ str += value->toString();
+ break;
+ }
case ControlTypeNone:
case ControlTypeString:
break;
@@ -389,8 +410,22 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen
* \brief Construct a ControlId instance
* \param[in] id The control numerical ID
* \param[in] name The control name
+ * \param[in] vendor The vendor name
* \param[in] type The control data type
- */
+ * \param[in] direction The direction of the control, if it can be used in Controls or Metadata
+ * \param[in] size The size of the array control, or 0 if scalar control
+ * \param[in] enumStrMap The map from enum names to values (optional)
+ */
+ControlId::ControlId(unsigned int id, const std::string &name,
+ const std::string &vendor, ControlType type,
+ DirectionFlags direction, std::size_t size,
+ const std::map<std::string, int32_t> &enumStrMap)
+ : id_(id), name_(name), vendor_(vendor), type_(type),
+ direction_(direction), size_(size), enumStrMap_(enumStrMap)
+{
+ for (const auto &pair : enumStrMap_)
+ reverseMap_[pair.second] = pair.first;
+}
/**
* \fn unsigned int ControlId::id() const
@@ -405,12 +440,68 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen
*/
/**
+ * \fn const std::string &ControlId::vendor() const
+ * \brief Retrieve the vendor name
+ * \return The vendor name, as a string
+ */
+
+/**
* \fn ControlType ControlId::type() const
* \brief Retrieve the control data type
* \return The control data type
*/
/**
+ * \fn DirectionFlags ControlId::direction() const
+ * \brief Return the direction that the control can be used in
+ *
+ * This is similar to \sa isInput() and \sa isOutput(), but returns the flags
+ * direction instead of booleans for each direction.
+ *
+ * \return The direction flags corresponding to if the control can be used as
+ * an input control or as output metadata
+ */
+
+/**
+ * \fn bool ControlId::isInput() const
+ * \brief Determine if the control is available to be used as an input control
+ *
+ * Controls can be used either as input in controls, or as output in metadata.
+ * This function checks if the control is allowed to be used as the former.
+ *
+ * \return True if the control can be used as an input control, false otherwise
+ */
+
+/**
+ * \fn bool ControlId::isOutput() const
+ * \brief Determine if the control is available to be used in output metadata
+ *
+ * Controls can be used either as input in controls, or as output in metadata.
+ * This function checks if the control is allowed to be used as the latter.
+ *
+ * \return True if the control can be returned in output metadata, false otherwise
+ */
+
+/**
+ * \fn bool ControlId::isArray() const
+ * \brief Determine if the control is an array control
+ * \return True if the control is an array control, false otherwise
+ */
+
+/**
+ * \fn std::size_t ControlId::size() const
+ * \brief Retrieve the size of the control if it is an array control
+ * \return The size of the array control, size_t::max for dynamic extent, or 0
+ * for non-array
+ */
+
+/**
+ * \fn const std::map<int32_t, std::string> &ControlId::enumerators() const
+ * \brief Retrieve the map of enum values to enum names
+ * \return The map of enum values to enum names
+ */
+
+/**
* \fn bool operator==(unsigned int lhs, const ControlId &rhs)
* \brief Compare a ControlId with a control numerical ID
* \param[in] lhs Left-hand side numerical ID
@@ -429,6 +520,22 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen
*/
/**
+ * \enum ControlId::Direction
+ * \brief The direction the control is capable of being passed from/to
+ *
+ * \var ControlId::Direction::In
+ * \brief The control can be passed as input in controls
+ *
+ * \var ControlId::Direction::Out
+ * \brief The control can be returned as output in metadata
+ */
+
+/**
+ * \typedef ControlId::DirectionFlags
+ * \brief A wrapper for ControlId::Direction so that it can be used as flags
+ */
+
+/**
* \class Control
* \brief Describe a control and its intrinsic properties
*
@@ -456,10 +563,14 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen
*/
/**
- * \fn Control::Control(unsigned int id, const char *name)
+ * \fn Control::Control(unsigned int id, const char *name, const char *vendor)
* \brief Construct a Control instance
* \param[in] id The control numerical ID
* \param[in] name The control name
+ * \param[in] vendor The vendor name
+ * \param[in] direction The direction of the control, if it can be used in
+ * Controls or Metadata
+ * \param[in] enumStrMap The map from enum names to values (optional)
*
* The control data type is automatically deduced from the template type T.
*/
@@ -677,6 +788,9 @@ ControlInfoMap::ControlInfoMap(Map &&info, const ControlIdMap &idmap)
bool ControlInfoMap::validate()
{
+ if (!idmap_)
+ return false;
+
for (const auto &ctrl : *this) {
const ControlId *id = ctrl.first;
auto it = idmap_->find(id->id());
@@ -719,6 +833,8 @@ bool ControlInfoMap::validate()
*/
ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id)
{
+ ASSERT(idmap_);
+
return at(idmap_->at(id));
}
@@ -729,6 +845,8 @@ ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id)
*/
const ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id) const
{
+ ASSERT(idmap_);
+
return at(idmap_->at(id));
}
@@ -739,6 +857,9 @@ const ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id) const
*/
ControlInfoMap::size_type ControlInfoMap::count(unsigned int id) const
{
+ if (!idmap_)
+ return 0;
+
/*
* The ControlInfoMap and its idmap have a 1:1 mapping between their
* entries, we can thus just count the matching entries in idmap to
@@ -755,6 +876,9 @@ ControlInfoMap::size_type ControlInfoMap::count(unsigned int id) const
*/
ControlInfoMap::iterator ControlInfoMap::find(unsigned int id)
{
+ if (!idmap_)
+ return end();
+
auto iter = idmap_->find(id);
if (iter == idmap_->end())
return end();
@@ -770,6 +894,9 @@ ControlInfoMap::iterator ControlInfoMap::find(unsigned int id)
*/
ControlInfoMap::const_iterator ControlInfoMap::find(unsigned int id) const
{
+ if (!idmap_)
+ return end();
+
auto iter = idmap_->find(id);
if (iter == idmap_->end())
return end();
@@ -892,12 +1019,26 @@ ControlList::ControlList(const ControlInfoMap &infoMap,
*/
/**
+ * \enum ControlList::MergePolicy
+ * \brief The policy used by the merge function
+ *
+ * \var ControlList::MergePolicy::KeepExisting
+ * \brief Existing controls in the target list are kept
+ *
+ * \var ControlList::MergePolicy::OverwriteExisting
+ * \brief Existing controls in the target list are updated
+ */
+
+/**
* \brief Merge the \a source into the ControlList
* \param[in] source The ControlList to merge into this object
+ * \param[in] policy Controls if existing elements in *this shall be
+ * overwritten
*
* Merging two control lists copies elements from the \a source and inserts
* them in *this. If the \a source contains elements whose key is already
- * present in *this, then those elements are not overwritten.
+ * present in *this, then those elements are only overwritten if
+ * \a policy is MergePolicy::OverwriteExisting.
*
* Only control lists created from the same ControlIdMap or ControlInfoMap may
* be merged. Attempting to do otherwise results in undefined behaviour.
@@ -905,10 +1046,10 @@ ControlList::ControlList(const ControlInfoMap &infoMap,
* \todo Reimplement or implement an overloaded version which internally uses
* std::unordered_map::merge() and accepts a non-const argument.
*/
-void ControlList::merge(const ControlList &source)
+void ControlList::merge(const ControlList &source, MergePolicy policy)
{
/**
- * \todo: ASSERT that the current and source ControlList are derived
+ * \todo ASSERT that the current and source ControlList are derived
* from a compatible ControlIdMap, to prevent undefined behaviour due to
* id collisions.
*
@@ -920,7 +1061,7 @@ void ControlList::merge(const ControlList &source)
*/
for (const auto &ctrl : source) {
- if (contains(ctrl.first)) {
+ if (policy == MergePolicy::KeepExisting && contains(ctrl.first)) {
const ControlId *id = idmap_->at(ctrl.first);
LOG(Controls, Warning)
<< "Control " << id->name() << " not overwritten";
@@ -933,17 +1074,6 @@ void ControlList::merge(const ControlList &source)
/**
* \brief Check if the list contains a control with the specified \a id
- * \param[in] id The control ID
- *
- * \return True if the list contains a matching control, false otherwise
- */
-bool ControlList::contains(const ControlId &id) const
-{
- return controls_.find(id.id()) != controls_.end();
-}
-
-/**
- * \brief Check if the list contains a control with the specified \a id
* \param[in] id The control numerical ID
*
* \return True if the list contains a matching control, false otherwise
@@ -954,22 +1084,20 @@ bool ControlList::contains(unsigned int id) const
}
/**
- * \fn template<typename T> T ControlList::get(const Control<T> &ctrl) const
+ * \fn ControlList::get(const Control<T> &ctrl) const
* \brief Get the value of control \a ctrl
* \param[in] ctrl The control
*
- * The behaviour is undefined if the control \a ctrl is not present in the
- * list. Use ControlList::contains() to test for the presence of a control in
- * the list before retrieving its value.
+ * Beside getting the value of a control, this function can also be used to
+ * check if a control is present in the ControlList by converting the returned
+ * std::optional<T> to bool (or calling its has_value() function).
*
- * The control value type shall match the type T, otherwise the behaviour is
- * undefined.
- *
- * \return The control value
+ * \return A std::optional<T> containing the control value, or std::nullopt if
+ * the control \a ctrl is not present in the list
*/
/**
- * \fn template<typename T, typename V> void ControlList::set(const Control<T> &ctrl, const V &value)
+ * \fn ControlList::set(const Control<T> &ctrl, const V &value)
* \brief Set the control \a ctrl value to \a value
* \param[in] ctrl The control
* \param[in] value The control value
@@ -983,8 +1111,7 @@ bool ControlList::contains(unsigned int id) const
*/
/**
- * \fn template<typename T, typename V> \
- * void ControlList::set(const Control<T> &ctrl, const std::initializer_list<V> &value)
+ * \fn ControlList::set(const Control<Span<T, Size>> &ctrl, const std::initializer_list<V> &value)
* \copydoc ControlList::set(const Control<T> &ctrl, const V &value)
*/
diff --git a/src/libcamera/converter.cpp b/src/libcamera/converter.cpp
new file mode 100644
index 00000000..d551b908
--- /dev/null
+++ b/src/libcamera/converter.cpp
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright 2022 NXP
+ *
+ * Generic format converter interface
+ */
+
+#include "libcamera/internal/converter.h"
+
+#include <algorithm>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/media_device.h"
+
+/**
+ * \file converter.h
+ * \brief Abstract converter
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Converter)
+
+/**
+ * \class Converter
+ * \brief Abstract Base Class for converter
+ *
+ * The Converter class is an Abstract Base Class defining the interfaces of
+ * converter implementations.
+ *
+ * Converters offer scaling and pixel format conversion services on an input
+ * stream. The converter can output multiple streams with individual conversion
+ * parameters from the same input stream.
+ */
+
+/**
+ * \enum Converter::Feature
+ * \brief Specify the features supported by the converter
+ * \var Converter::Feature::None
+ * \brief No extra features supported by the converter
+ * \var Converter::Feature::InputCrop
+ * \brief Cropping capability at input is supported by the converter
+ */
+
+/**
+ * \typedef Converter::Features
+ * \brief A bitwise combination of features supported by the converter
+ */
+
+/**
+ * \enum Converter::Alignment
+ * \brief The alignment mode specified when adjusting the converter input or
+ * output sizes
+ * \var Converter::Alignment::Down
+ * \brief Adjust the Converter sizes to a smaller valid size
+ * \var Converter::Alignment::Up
+ * \brief Adjust the Converter sizes to a larger valid size
+ */
+
+/**
+ * \brief Construct a Converter instance
+ * \param[in] media The media device implementing the converter
+ * \param[in] features Features flags representing supported features
+ *
+ * This searches for the entity implementing the data streaming function in the
+ * media graph entities and use its device node as the converter device node.
+ */
+Converter::Converter(MediaDevice *media, Features features)
+{
+ const std::vector<MediaEntity *> &entities = media->entities();
+ auto it = std::find_if(entities.begin(), entities.end(),
+ [](MediaEntity *entity) {
+ return entity->function() == MEDIA_ENT_F_IO_V4L;
+ });
+ if (it == entities.end()) {
+ LOG(Converter, Error)
+ << "No entity suitable for implementing a converter in "
+ << media->driver() << " entities list.";
+ return;
+ }
+
+ deviceNode_ = (*it)->deviceNode();
+ features_ = features;
+}
+
+Converter::~Converter()
+{
+}
+
+/**
+ * \fn Converter::loadConfiguration()
+ * \brief Load converter configuration from file
+ * \param[in] filename The file name path
+ *
+ * Load converter dependent configuration parameters to apply on the hardware.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::isValid()
+ * \brief Check if the converter configuration is valid
+ * \return True is the converter is valid, false otherwise
+ */
+
+/**
+ * \fn Converter::formats()
+ * \brief Retrieve the list of supported pixel formats for an input pixel format
+ * \param[in] input Input pixel format to retrieve output pixel format list for
+ * \return The list of supported output pixel formats
+ */
+
+/**
+ * \fn Converter::sizes()
+ * \brief Retrieve the range of minimum and maximum output sizes for an input size
+ * \param[in] input Input stream size to retrieve range for
+ * \return A range of output image sizes
+ */
+
+/**
+ * \fn Converter::adjustInputSize()
+ * \brief Adjust the converter input \a size to a valid value
+ * \param[in] pixFmt The pixel format of the converter input stream
+ * \param[in] size The converter input size to adjust to a valid value
+ * \param[in] align The desired alignment
+ * \return The adjusted converter input size or a null Size if \a size cannot
+ * be adjusted
+ */
+
+/**
+ * \fn Converter::adjustOutputSize()
+ * \brief Adjust the converter output \a size to a valid value
+ * \param[in] pixFmt The pixel format of the converter output stream
+ * \param[in] size The converter output size to adjust to a valid value
+ * \param[in] align The desired alignment
+ * \return The adjusted converter output size or a null Size if \a size cannot
+ * be adjusted
+ */
+
+/**
+ * \fn Converter::strideAndFrameSize()
+ * \brief Retrieve the output stride and frame size for an input configutation
+ * \param[in] pixelFormat Input stream pixel format
+ * \param[in] size Input stream size
+ * \return A tuple indicating the stride and frame size or an empty tuple on error
+ */
+
+/**
+ * \fn Converter::validateOutput()
+ * \brief Validate and possibily adjust \a cfg to a valid converter output
+ * \param[inout] cfg The StreamConfiguration to validate and adjust
+ * \param[out] adjusted Set to true if \a cfg has been adjusted
+ * \param[in] align The desired alignment
+ * \return 0 if \a cfg is valid or has been adjusted, a negative error code
+ * otherwise if \a cfg cannot be adjusted
+ */
+
+/**
+ * \fn Converter::configure()
+ * \brief Configure a set of output stream conversion from an input stream
+ * \param[in] inputCfg Input stream configuration
+ * \param[out] outputCfgs A list of output stream configurations
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::isConfigured()
+ * \brief Check if a given stream is configured
+ * \param[in] stream The output stream
+ * \return True if the \a stream is configured or false otherwise
+ */
+
+/**
+ * \fn Converter::exportBuffers()
+ * \brief Export buffers from the converter device
+ * \param[in] stream Output stream pointer exporting the buffers
+ * \param[in] count Number of buffers to allocate
+ * \param[out] buffers Vector to store allocated buffers
+ *
+ * This function operates similarly to V4L2VideoDevice::exportBuffers() on the
+ * output stream indicated by the \a output.
+ *
+ * \return The number of allocated buffers on success or a negative error code
+ * otherwise
+ */
+
+/**
+ * \fn Converter::start()
+ * \brief Start the converter streaming operation
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::stop()
+ * \brief Stop the converter streaming operation
+ */
+
+/**
+ * \fn Converter::queueBuffers()
+ * \brief Queue buffers to converter device
+ * \param[in] input The frame buffer to apply the conversion
+ * \param[out] outputs The container holding the output stream pointers and
+ * their respective frame buffer outputs.
+ *
+ * This function queues the \a input frame buffer on the output streams of the
+ * \a outputs map key and retrieve the output frame buffer indicated by the
+ * buffer map value.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::setInputCrop()
+ * \brief Set the crop rectangle \a rect for \a stream
+ * \param[in] stream The output stream
+ * \param[inout] rect The crop rectangle to apply and return the rectangle
+ * that is actually applied
+ *
+ * Set the crop rectangle \a rect for \a stream provided the converter supports
+ * cropping. The converter has the Feature::InputCrop flag in this case.
+ *
+ * The underlying hardware can adjust the rectangle supplied by the user
+ * due to hardware constraints. The caller can inspect \a rect to determine the
+ * actual rectangle that has been applied by the converter, after this function
+ * returns.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::inputCropBounds()
+ * \brief Retrieve the crop bounds of the converter
+ *
+ * Retrieve the minimum and maximum crop bounds of the converter. This can be
+ * used to query the crop bounds before configuring a stream.
+ *
+ * \return A pair containing the minimum and maximum crop bound in that order
+ */
+
+/**
+ * \fn Converter::inputCropBounds(const Stream *stream)
+ * \brief Retrieve the crop bounds for \a stream
+ * \param[in] stream The output stream
+ *
+ * Retrieve the minimum and maximum crop bounds for \a stream. The converter
+ * should support cropping (Feature::InputCrop).
+ *
+ * The crop bounds depend on the configuration of the output stream and hence
+ * this function should be called after the \a stream has been configured using
+ * configure().
+ *
+ * When called with an unconfigured \a stream, this function returns a pair of
+ * null rectangles.
+ *
+ * \return A pair containing the minimum and maximum crop bound in that order
+ */
+
+/**
+ * \var Converter::inputBufferReady
+ * \brief A signal emitted when the input frame buffer completes
+ */
+
+/**
+ * \var Converter::outputBufferReady
+ * \brief A signal emitted on each frame buffer completion of the output queue
+ */
+
+/**
+ * \var Converter::features_
+ * \brief Stores the features supported by the converter
+ */
+
+/**
+ * \fn Converter::deviceNode()
+ * \brief The converter device node attribute accessor
+ * \return The converter device node string
+ */
+
+/**
+ * \fn Converter::features()
+ * \brief Retrieve the features supported by the converter
+ * \return The converter Features flags
+ */
+
+/**
+ * \class ConverterFactoryBase
+ * \brief Base class for converter factories
+ *
+ * The ConverterFactoryBase class is the base of all specializations of the
+ * ConverterFactory class template. It implements the factory registration,
+ * maintains a registry of factories, and provides access to the registered
+ * factories.
+ */
+
+/**
+ * \brief Construct a converter factory base
+ * \param[in] name Name of the converter class
+ * \param[in] compatibles Name aliases of the converter class
+ *
+ * Creating an instance of the factory base registers it with the global list of
+ * factories, accessible through the factories() function.
+ *
+ * The factory \a name is used as unique identifier. If the converter
+ * implementation fully relies on a generic framework, the name should be the
+ * same as the framework. Otherwise, if the implementation is specialized, the
+ * factory name should match the driver name implementing the function.
+ *
+ * The factory \a compatibles holds a list of driver names implementing a generic
+ * subsystem without any personalizations.
+ */
+ConverterFactoryBase::ConverterFactoryBase(const std::string name, std::initializer_list<std::string> compatibles)
+ : name_(name), compatibles_(compatibles)
+{
+ registerType(this);
+}
+
+/**
+ * \fn ConverterFactoryBase::compatibles()
+ * \return The list of compatible name aliases of the converter
+ */
+
+/**
+ * \brief Create an instance of the converter corresponding to the media device
+ * \param[in] media The media device to create the converter for
+ *
+ * The converter is created by matching the factory name or any of its
+ * compatible aliases with the media device driver name.
+ *
+ * \return A new instance of the converter subclass corresponding to the media
+ * device, or null if the media device driver name doesn't match anything
+ */
+std::unique_ptr<Converter> ConverterFactoryBase::create(MediaDevice *media)
+{
+ const std::vector<ConverterFactoryBase *> &factories =
+ ConverterFactoryBase::factories();
+
+ for (const ConverterFactoryBase *factory : factories) {
+ const std::vector<std::string> &compatibles = factory->compatibles();
+ auto it = std::find(compatibles.begin(), compatibles.end(), media->driver());
+
+ if (it == compatibles.end() && media->driver() != factory->name_)
+ continue;
+
+ LOG(Converter, Debug)
+ << "Creating converter from "
+ << factory->name_ << " factory with "
+ << (it == compatibles.end() ? "no" : media->driver()) << " alias.";
+
+ std::unique_ptr<Converter> converter = factory->createInstance(media);
+ if (converter->isValid())
+ return converter;
+ }
+
+ return nullptr;
+}
+
+/**
+ * \brief Add a converter factory to the registry
+ * \param[in] factory Factory to use to construct the converter class
+ *
+ * The caller is responsible to guarantee the uniqueness of the converter
+ * factory name.
+ */
+void ConverterFactoryBase::registerType(ConverterFactoryBase *factory)
+{
+ std::vector<ConverterFactoryBase *> &factories =
+ ConverterFactoryBase::factories();
+
+ factories.push_back(factory);
+}
+
+/**
+ * \brief Retrieve the list of all converter factory names
+ * \return The list of all converter factory names
+ */
+std::vector<std::string> ConverterFactoryBase::names()
+{
+ std::vector<std::string> list;
+
+ std::vector<ConverterFactoryBase *> &factories =
+ ConverterFactoryBase::factories();
+
+ for (ConverterFactoryBase *factory : factories) {
+ list.push_back(factory->name_);
+
+ const auto &compatibles = factory->compatibles();
+ list.insert(list.end(), compatibles.begin(), compatibles.end());
+ }
+
+ return list;
+}
+
+/**
+ * \brief Retrieve the list of all converter factories
+ * \return The list of converter factories
+ */
+std::vector<ConverterFactoryBase *> &ConverterFactoryBase::factories()
+{
+ /*
+ * The static factories map is defined inside the function to ensure
+ * it gets initialized on first use, without any dependency on link
+ * order.
+ */
+ static std::vector<ConverterFactoryBase *> factories;
+ return factories;
+}
+
+/**
+ * \var ConverterFactoryBase::name_
+ * \brief The name of the factory
+ */
+
+/**
+ * \var ConverterFactoryBase::compatibles_
+ * \brief The list holding the factory compatibles
+ */
+
+/**
+ * \class ConverterFactory
+ * \brief Registration of ConverterFactory classes and creation of instances
+ * \param _Converter The converter class type for this factory
+ *
+ * To facilitate discovery and instantiation of Converter classes, the
+ * ConverterFactory class implements auto-registration of converter helpers.
+ * Each Converter subclass shall register itself using the REGISTER_CONVERTER()
+ * macro, which will create a corresponding instance of a ConverterFactory
+ * subclass and register it with the static list of factories.
+ */
+
+/**
+ * \fn ConverterFactory::ConverterFactory(const char *name, std::initializer_list<std::string> compatibles)
+ * \brief Construct a converter factory
+ * \details \copydetails ConverterFactoryBase::ConverterFactoryBase
+ */
+
+/**
+ * \fn ConverterFactory::createInstance() const
+ * \brief Create an instance of the Converter corresponding to the factory
+ * \param[in] media Media device pointer
+ * \return A unique pointer to a newly constructed instance of the Converter
+ * subclass corresponding to the factory
+ */
+
+/**
+ * \def REGISTER_CONVERTER
+ * \brief Register a converter with the Converter factory
+ * \param[in] name Converter name used to register the class
+ * \param[in] converter Class name of Converter derived class to register
+ * \param[in] compatibles List of compatible names
+ *
+ * Register a Converter subclass with the factory and make it available to try
+ * and match converters.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/converter/converter_v4l2_m2m.cpp b/src/libcamera/converter/converter_v4l2_m2m.cpp
new file mode 100644
index 00000000..566f18ce
--- /dev/null
+++ b/src/libcamera/converter/converter_v4l2_m2m.cpp
@@ -0,0 +1,751 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Laurent Pinchart
+ * Copyright 2022 NXP
+ *
+ * V4L2 M2M Format converter
+ */
+
+#include "libcamera/internal/converter/converter_v4l2_m2m.h"
+
+#include <algorithm>
+#include <limits.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/signal.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+/**
+ * \file converter/converter_v4l2_m2m.h
+ * \brief V4L2 M2M based converter
+ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Converter)
+
+namespace {
+
+int getCropBounds(V4L2VideoDevice *device, Rectangle &minCrop,
+ Rectangle &maxCrop)
+{
+ Rectangle minC;
+ Rectangle maxC;
+
+ /* Find crop bounds */
+ minC.width = 1;
+ minC.height = 1;
+ maxC.width = UINT_MAX;
+ maxC.height = UINT_MAX;
+
+ int ret = device->setSelection(V4L2_SEL_TGT_CROP, &minC);
+ if (ret) {
+ LOG(Converter, Error)
+ << "Could not query minimum selection crop: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ ret = device->getSelection(V4L2_SEL_TGT_CROP_BOUNDS, &maxC);
+ if (ret) {
+ LOG(Converter, Error)
+ << "Could not query maximum selection crop: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ /* Reset the crop to its maximum */
+ ret = device->setSelection(V4L2_SEL_TGT_CROP, &maxC);
+ if (ret) {
+ LOG(Converter, Error)
+ << "Could not reset selection crop: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ minCrop = minC;
+ maxCrop = maxC;
+ return 0;
+}
+
+} /* namespace */
+
+/* -----------------------------------------------------------------------------
+ * V4L2M2MConverter::V4L2M2MStream
+ */
+
+V4L2M2MConverter::V4L2M2MStream::V4L2M2MStream(V4L2M2MConverter *converter, const Stream *stream)
+ : converter_(converter), stream_(stream)
+{
+ m2m_ = std::make_unique<V4L2M2MDevice>(converter->deviceNode());
+
+ m2m_->output()->bufferReady.connect(this, &V4L2M2MStream::outputBufferReady);
+ m2m_->capture()->bufferReady.connect(this, &V4L2M2MStream::captureBufferReady);
+
+ int ret = m2m_->open();
+ if (ret < 0)
+ m2m_.reset();
+}
+
+int V4L2M2MConverter::V4L2M2MStream::configure(const StreamConfiguration &inputCfg,
+ const StreamConfiguration &outputCfg)
+{
+ V4L2PixelFormat videoFormat =
+ m2m_->output()->toV4L2PixelFormat(inputCfg.pixelFormat);
+
+ V4L2DeviceFormat format;
+ format.fourcc = videoFormat;
+ format.size = inputCfg.size;
+ format.planesCount = 1;
+ format.planes[0].bpl = inputCfg.stride;
+
+ int ret = m2m_->output()->setFormat(&format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set input format: " << strerror(-ret);
+ return ret;
+ }
+
+ if (format.fourcc != videoFormat || format.size != inputCfg.size ||
+ format.planes[0].bpl != inputCfg.stride) {
+ LOG(Converter, Error)
+ << "Input format not supported (requested "
+ << inputCfg.size << "-" << videoFormat
+ << ", got " << format << ")";
+ return -EINVAL;
+ }
+
+ /* Set the pixel format and size on the output. */
+ videoFormat = m2m_->capture()->toV4L2PixelFormat(outputCfg.pixelFormat);
+ format = {};
+ format.fourcc = videoFormat;
+ format.size = outputCfg.size;
+
+ ret = m2m_->capture()->setFormat(&format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set output format: " << strerror(-ret);
+ return ret;
+ }
+
+ if (format.fourcc != videoFormat || format.size != outputCfg.size) {
+ LOG(Converter, Error)
+ << "Output format not supported";
+ return -EINVAL;
+ }
+
+ inputBufferCount_ = inputCfg.bufferCount;
+ outputBufferCount_ = outputCfg.bufferCount;
+
+ if (converter_->features() & Feature::InputCrop) {
+ ret = getCropBounds(m2m_->output(), inputCropBounds_.first,
+ inputCropBounds_.second);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int V4L2M2MConverter::V4L2M2MStream::exportBuffers(unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ return m2m_->capture()->exportBuffers(count, buffers);
+}
+
+int V4L2M2MConverter::V4L2M2MStream::start()
+{
+ int ret = m2m_->output()->importBuffers(inputBufferCount_);
+ if (ret < 0)
+ return ret;
+
+ ret = m2m_->capture()->importBuffers(outputBufferCount_);
+ if (ret < 0) {
+ stop();
+ return ret;
+ }
+
+ ret = m2m_->output()->streamOn();
+ if (ret < 0) {
+ stop();
+ return ret;
+ }
+
+ ret = m2m_->capture()->streamOn();
+ if (ret < 0) {
+ stop();
+ return ret;
+ }
+
+ return 0;
+}
+
+void V4L2M2MConverter::V4L2M2MStream::stop()
+{
+ m2m_->capture()->streamOff();
+ m2m_->output()->streamOff();
+ m2m_->capture()->releaseBuffers();
+ m2m_->output()->releaseBuffers();
+}
+
+int V4L2M2MConverter::V4L2M2MStream::queueBuffers(FrameBuffer *input, FrameBuffer *output)
+{
+ int ret = m2m_->output()->queueBuffer(input);
+ if (ret < 0)
+ return ret;
+
+ ret = m2m_->capture()->queueBuffer(output);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int V4L2M2MConverter::V4L2M2MStream::getInputSelection(unsigned int target, Rectangle *rect)
+{
+ return m2m_->output()->getSelection(target, rect);
+}
+
+int V4L2M2MConverter::V4L2M2MStream::setInputSelection(unsigned int target, Rectangle *rect)
+{
+ return m2m_->output()->setSelection(target, rect);
+}
+
+std::pair<Rectangle, Rectangle> V4L2M2MConverter::V4L2M2MStream::inputCropBounds()
+{
+ return inputCropBounds_;
+}
+
+std::string V4L2M2MConverter::V4L2M2MStream::logPrefix() const
+{
+ return stream_->configuration().toString();
+}
+
+void V4L2M2MConverter::V4L2M2MStream::outputBufferReady(FrameBuffer *buffer)
+{
+ auto it = converter_->queue_.find(buffer);
+ if (it == converter_->queue_.end())
+ return;
+
+ if (!--it->second) {
+ converter_->inputBufferReady.emit(buffer);
+ converter_->queue_.erase(it);
+ }
+}
+
+void V4L2M2MConverter::V4L2M2MStream::captureBufferReady(FrameBuffer *buffer)
+{
+ converter_->outputBufferReady.emit(buffer);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2M2MConverter
+ */
+
+/**
+ * \class libcamera::V4L2M2MConverter
+ * \brief The V4L2 M2M converter implements the converter interface based on
+ * V4L2 M2M device.
+*/
+
+/**
+ * \fn V4L2M2MConverter::V4L2M2MConverter
+ * \brief Construct a V4L2M2MConverter instance
+ * \param[in] media The media device implementing the converter
+ */
+
+V4L2M2MConverter::V4L2M2MConverter(MediaDevice *media)
+ : Converter(media)
+{
+ if (deviceNode().empty())
+ return;
+
+ m2m_ = std::make_unique<V4L2M2MDevice>(deviceNode());
+ int ret = m2m_->open();
+ if (ret < 0) {
+ m2m_.reset();
+ return;
+ }
+
+ ret = getCropBounds(m2m_->output(), inputCropBounds_.first,
+ inputCropBounds_.second);
+ if (!ret && inputCropBounds_.first != inputCropBounds_.second) {
+ features_ |= Feature::InputCrop;
+
+ LOG(Converter, Info)
+ << "Converter supports cropping on its input";
+ }
+}
+
+/**
+ * \fn libcamera::V4L2M2MConverter::loadConfiguration
+ * \details \copydetails libcamera::Converter::loadConfiguration
+ */
+
+/**
+ * \fn libcamera::V4L2M2MConverter::isValid
+ * \details \copydetails libcamera::Converter::isValid
+ */
+
+/**
+ * \fn libcamera::V4L2M2MConverter::formats
+ * \details \copydetails libcamera::Converter::formats
+ */
+std::vector<PixelFormat> V4L2M2MConverter::formats(PixelFormat input)
+{
+ if (!m2m_)
+ return {};
+
+ /*
+ * Set the format on the input side (V4L2 output) of the converter to
+ * enumerate the conversion capabilities on its output (V4L2 capture).
+ */
+ V4L2DeviceFormat v4l2Format;
+ v4l2Format.fourcc = m2m_->output()->toV4L2PixelFormat(input);
+ v4l2Format.size = { 1, 1 };
+
+ int ret = m2m_->output()->setFormat(&v4l2Format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set format: " << strerror(-ret);
+ return {};
+ }
+
+ if (v4l2Format.fourcc != m2m_->output()->toV4L2PixelFormat(input)) {
+ LOG(Converter, Debug)
+ << "Input format " << input << " not supported.";
+ return {};
+ }
+
+ std::vector<PixelFormat> pixelFormats;
+
+ for (const auto &format : m2m_->capture()->formats()) {
+ PixelFormat pixelFormat = format.first.toPixelFormat();
+ if (pixelFormat)
+ pixelFormats.push_back(pixelFormat);
+ }
+
+ return pixelFormats;
+}
+
+/**
+ * \copydoc libcamera::Converter::sizes
+ */
+SizeRange V4L2M2MConverter::sizes(const Size &input)
+{
+ if (!m2m_)
+ return {};
+
+ /*
+ * Set the size on the input side (V4L2 output) of the converter to
+ * enumerate the scaling capabilities on its output (V4L2 capture).
+ */
+ V4L2DeviceFormat format;
+ format.fourcc = V4L2PixelFormat();
+ format.size = input;
+
+ int ret = m2m_->output()->setFormat(&format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set format: " << strerror(-ret);
+ return {};
+ }
+
+ SizeRange sizes;
+
+ format.size = { 1, 1 };
+ ret = m2m_->capture()->setFormat(&format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set format: " << strerror(-ret);
+ return {};
+ }
+
+ sizes.min = format.size;
+
+ format.size = { UINT_MAX, UINT_MAX };
+ ret = m2m_->capture()->setFormat(&format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set format: " << strerror(-ret);
+ return {};
+ }
+
+ sizes.max = format.size;
+
+ return sizes;
+}
+
+/**
+ * \copydoc libcamera::Converter::strideAndFrameSize
+ */
+std::tuple<unsigned int, unsigned int>
+V4L2M2MConverter::strideAndFrameSize(const PixelFormat &pixelFormat,
+ const Size &size)
+{
+ V4L2DeviceFormat format;
+ format.fourcc = m2m_->capture()->toV4L2PixelFormat(pixelFormat);
+ format.size = size;
+
+ int ret = m2m_->capture()->tryFormat(&format);
+ if (ret < 0)
+ return std::make_tuple(0, 0);
+
+ return std::make_tuple(format.planes[0].bpl, format.planes[0].size);
+}
+
+/**
+ * \copydoc libcamera::Converter::adjustInputSize
+ */
+Size V4L2M2MConverter::adjustInputSize(const PixelFormat &pixFmt,
+ const Size &size, Alignment align)
+{
+ auto formats = m2m_->output()->formats();
+ V4L2PixelFormat v4l2PixFmt = m2m_->output()->toV4L2PixelFormat(pixFmt);
+
+ auto it = formats.find(v4l2PixFmt);
+ if (it == formats.end()) {
+ LOG(Converter, Info)
+ << "Unsupported pixel format " << pixFmt;
+ return {};
+ }
+
+ return adjustSizes(size, it->second, align);
+}
+
+/**
+ * \copydoc libcamera::Converter::adjustOutputSize
+ */
+Size V4L2M2MConverter::adjustOutputSize(const PixelFormat &pixFmt,
+ const Size &size, Alignment align)
+{
+ auto formats = m2m_->capture()->formats();
+ V4L2PixelFormat v4l2PixFmt = m2m_->capture()->toV4L2PixelFormat(pixFmt);
+
+ auto it = formats.find(v4l2PixFmt);
+ if (it == formats.end()) {
+ LOG(Converter, Info)
+ << "Unsupported pixel format " << pixFmt;
+ return {};
+ }
+
+ return adjustSizes(size, it->second, align);
+}
+
+Size V4L2M2MConverter::adjustSizes(const Size &cfgSize,
+ const std::vector<SizeRange> &ranges,
+ Alignment align)
+{
+ Size size = cfgSize;
+
+ if (ranges.size() == 1) {
+ /*
+ * The device supports either V4L2_FRMSIZE_TYPE_CONTINUOUS or
+ * V4L2_FRMSIZE_TYPE_STEPWISE.
+ */
+ const SizeRange &range = *ranges.begin();
+
+ size.width = std::clamp(size.width, range.min.width,
+ range.max.width);
+ size.height = std::clamp(size.height, range.min.height,
+ range.max.height);
+
+ /*
+ * Check if any alignment is needed. If the sizes are already
+ * aligned, or the device supports V4L2_FRMSIZE_TYPE_CONTINUOUS
+ * with hStep and vStep equal to 1, we're done here.
+ */
+ int widthR = size.width % range.hStep;
+ int heightR = size.height % range.vStep;
+
+ /* Align up or down according to the caller request. */
+
+ if (widthR != 0)
+ size.width = size.width - widthR
+ + ((align == Alignment::Up) ? range.hStep : 0);
+
+ if (heightR != 0)
+ size.height = size.height - heightR
+ + ((align == Alignment::Up) ? range.vStep : 0);
+ } else {
+ /*
+ * The device supports V4L2_FRMSIZE_TYPE_DISCRETE, find the
+ * size closer to the requested output configuration.
+ *
+ * The size ranges vector is not ordered, so we sort it first.
+ * If we align up, start from the larger element.
+ */
+ std::vector<Size> sizes(ranges.size());
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+ std::sort(sizes.begin(), sizes.end());
+
+ if (align == Alignment::Up)
+ std::reverse(sizes.begin(), sizes.end());
+
+ /*
+ * Return true if s2 is valid according to the desired
+ * alignment: smaller than s1 if we align down, larger than s1
+ * if we align up.
+ */
+ auto nextSizeValid = [](const Size &s1, const Size &s2, Alignment a) {
+ return a == Alignment::Down
+ ? (s1.width > s2.width && s1.height > s2.height)
+ : (s1.width < s2.width && s1.height < s2.height);
+ };
+
+ Size newSize;
+ for (const Size &sz : sizes) {
+ if (!nextSizeValid(size, sz, align))
+ break;
+
+ newSize = sz;
+ }
+
+ if (newSize.isNull()) {
+ LOG(Converter, Error)
+ << "Cannot adjust " << cfgSize
+ << " to a supported converter size";
+ return {};
+ }
+
+ size = newSize;
+ }
+
+ return size;
+}
+
+/**
+ * \copydoc libcamera::Converter::configure
+ */
+int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+{
+ int ret = 0;
+
+ streams_.clear();
+
+ for (unsigned int i = 0; i < outputCfgs.size(); ++i) {
+ const StreamConfiguration &cfg = outputCfgs[i];
+ std::unique_ptr<V4L2M2MStream> stream =
+ std::make_unique<V4L2M2MStream>(this, cfg.stream());
+
+ if (!stream->isValid()) {
+ LOG(Converter, Error)
+ << "Failed to create stream " << i;
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = stream->configure(inputCfg, cfg);
+ if (ret < 0)
+ break;
+
+ streams_.emplace(cfg.stream(), std::move(stream));
+ }
+
+ if (ret < 0) {
+ streams_.clear();
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::Converter::isConfigured
+ */
+bool V4L2M2MConverter::isConfigured(const Stream *stream) const
+{
+ return streams_.find(stream) != streams_.end();
+}
+
+/**
+ * \copydoc libcamera::Converter::exportBuffers
+ */
+int V4L2M2MConverter::exportBuffers(const Stream *stream, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ auto iter = streams_.find(stream);
+ if (iter == streams_.end())
+ return -EINVAL;
+
+ return iter->second->exportBuffers(count, buffers);
+}
+
+/**
+ * \copydoc libcamera::Converter::setInputCrop
+ */
+int V4L2M2MConverter::setInputCrop(const Stream *stream, Rectangle *rect)
+{
+ if (!(features_ & Feature::InputCrop))
+ return -ENOTSUP;
+
+ auto iter = streams_.find(stream);
+ if (iter == streams_.end()) {
+ LOG(Converter, Error) << "Invalid output stream";
+ return -EINVAL;
+ }
+
+ return iter->second->setInputSelection(V4L2_SEL_TGT_CROP, rect);
+}
+
+/**
+ * \fn libcamera::V4L2M2MConverter::inputCropBounds()
+ * \copydoc libcamera::Converter::inputCropBounds()
+ */
+
+/**
+ * \copydoc libcamera::Converter::inputCropBounds(const Stream *stream)
+ */
+std::pair<Rectangle, Rectangle>
+V4L2M2MConverter::inputCropBounds(const Stream *stream)
+{
+ auto iter = streams_.find(stream);
+ if (iter == streams_.end()) {
+ LOG(Converter, Error) << "Invalid output stream";
+ return {};
+ }
+
+ return iter->second->inputCropBounds();
+}
+
+/**
+ * \copydoc libcamera::Converter::start
+ */
+int V4L2M2MConverter::start()
+{
+ int ret;
+
+ for (auto &iter : streams_) {
+ ret = iter.second->start();
+ if (ret < 0) {
+ stop();
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::Converter::stop
+ */
+void V4L2M2MConverter::stop()
+{
+ for (auto &iter : streams_)
+ iter.second->stop();
+}
+
+/**
+ * \copydoc libcamera::Converter::validateOutput
+ */
+int V4L2M2MConverter::validateOutput(StreamConfiguration *cfg, bool *adjusted,
+ Alignment align)
+{
+ V4L2VideoDevice *capture = m2m_->capture();
+ V4L2VideoDevice::Formats fmts = capture->formats();
+
+ if (adjusted)
+ *adjusted = false;
+
+ PixelFormat fmt = cfg->pixelFormat;
+ V4L2PixelFormat v4l2PixFmt = capture->toV4L2PixelFormat(fmt);
+
+ auto it = fmts.find(v4l2PixFmt);
+ if (it == fmts.end()) {
+ it = fmts.begin();
+ v4l2PixFmt = it->first;
+ cfg->pixelFormat = v4l2PixFmt.toPixelFormat();
+
+ if (adjusted)
+ *adjusted = true;
+
+ LOG(Converter, Info)
+ << "Converter output pixel format adjusted to "
+ << cfg->pixelFormat;
+ }
+
+ const Size cfgSize = cfg->size;
+ cfg->size = adjustSizes(cfgSize, it->second, align);
+
+ if (cfg->size.isNull())
+ return -EINVAL;
+
+ if (cfg->size.width != cfgSize.width ||
+ cfg->size.height != cfgSize.height) {
+ LOG(Converter, Info)
+ << "Converter size adjusted to "
+ << cfg->size;
+ if (adjusted)
+ *adjusted = true;
+ }
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::Converter::queueBuffers
+ */
+int V4L2M2MConverter::queueBuffers(FrameBuffer *input,
+ const std::map<const Stream *, FrameBuffer *> &outputs)
+{
+ std::set<FrameBuffer *> outputBufs;
+ int ret;
+
+ /*
+ * Validate the outputs as a sanity check: at least one output is
+ * required, all outputs must reference a valid stream and no two
+ * streams can reference same output framebuffers.
+ */
+ if (outputs.empty())
+ return -EINVAL;
+
+ for (auto [stream, buffer] : outputs) {
+ if (!buffer)
+ return -EINVAL;
+
+ outputBufs.insert(buffer);
+ }
+
+ if (outputBufs.size() != streams_.size())
+ return -EINVAL;
+
+ /* Queue the input and output buffers to all the streams. */
+ for (auto [stream, buffer] : outputs) {
+ ret = streams_.at(stream)->queueBuffers(input, buffer);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Add the input buffer to the queue, with the number of streams as a
+ * reference count. Completion of the input buffer will be signalled by
+ * the stream that releases the last reference.
+ */
+ queue_.emplace(std::piecewise_construct,
+ std::forward_as_tuple(input),
+ std::forward_as_tuple(outputs.size()));
+
+ return 0;
+}
+
+/*
+ * \todo: This should be extended to include Feature::Flag to denote
+ * what each converter supports feature-wise.
+ */
+static std::initializer_list<std::string> compatibles = {
+ "mtk-mdp",
+ "pxp",
+};
+
+REGISTER_CONVERTER("v4l2_m2m", V4L2M2MConverter, compatibles)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/converter/meson.build b/src/libcamera/converter/meson.build
new file mode 100644
index 00000000..af1a80fe
--- /dev/null
+++ b/src/libcamera/converter/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'converter_v4l2_m2m.cpp'
+])
diff --git a/src/libcamera/debug_controls.cpp b/src/libcamera/debug_controls.cpp
new file mode 100644
index 00000000..33960231
--- /dev/null
+++ b/src/libcamera/debug_controls.cpp
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Helper to easily record debug metadata inside libcamera.
+ */
+
+#include "libcamera/internal/debug_controls.h"
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(DebugControls)
+
+/**
+ * \file debug_controls.h
+ * \brief Helper to easily record debug metadata inside libcamera
+ */
+
+/**
+ * \class DebugMetadata
+ * \brief Helper to record metadata for later use
+ *
+ * Metadata is a useful tool for debugging the internal state of libcamera. It
+ * has the benefit that it is easy to use and related tooling is readily
+ * available. The difficulty is that the metadata control list is often not
+ * directly available (either because the variable to debug lives inside
+ * process() of an IPA or inside a closed algorithm class with no direct access
+ * to the IPA and therefore the metadata list).
+ *
+ * This class helps in both cases. It allows to forward the data to a parent or
+ * alternatively record the data and at a later point in time copy it to the
+ * metadata list when it becomes available. Both mechanisms allow easy reuse and
+ * loose coupling.
+ *
+ * Typical usage is to instantiate a DebugMetadata object in every
+ * class/algorithm where debug metadata shall be recorded (the inner object). If
+ * the IPA doesn't support debug metadata, the object is still usable, but the
+ * debug data gets dropped. If the IPA supports debug metadata it will either
+ * register a parent DebugMetadata object on the inner object or manually
+ * retrieve the data using enable()/moveToList().
+ *
+ * The concepts of forwarding to a parent and recording for later retrieval are
+ * mutually exclusive and the parent takes precedence. E.g. it is not allowed to
+ * enable a DebugMetadata object, log entries to it and later set the parent.
+ *
+ * This is done to keep the path open for using other means of data transport
+ * (like tracing). For every tracing event a corresponding context needs to be
+ * available on set() time. The parent can be treated as such, the top level
+ * object (the one where enable() get's called) also lives in a place where that
+ * information is also available.
+ */
+
+/**
+ * \fn DebugMetadata::enableByControl()
+ * \brief Enable based on controls::DebugMetadataEnable in the supplied
+ * ControlList
+ * \param[in] controls The supplied ControlList
+ *
+ * This function looks for controls::DebugMetadataEnable and enables or disables
+ * debug metadata handling accordingly.
+ */
+void DebugMetadata::enableByControl(const ControlList &controls)
+{
+ const auto &ctrl = controls.get(controls::DebugMetadataEnable);
+ if (ctrl)
+ enable(*ctrl);
+}
+
+/**
+ * \fn DebugMetadata::enable()
+ * \brief Enable or disable metadata handling
+ * \param[in] enable The enable state
+ *
+ * When \a enable is true, all calls to set() get cached and can later be
+ * retrieved using moveEntries(). When \a enable is false, the cache gets
+ * cleared and no further metadata is recorded.
+ *
+ * Forwarding to a parent is independent of the enabled state.
+ */
+void DebugMetadata::enable(bool enable)
+{
+ enabled_ = enable;
+ if (!enabled_)
+ cache_.clear();
+}
+
+/**
+ * \fn DebugMetadata::setParent()
+ * \brief Set the parent metadata handler to \a parent
+ * \param[in] parent The parent handler
+ *
+ * When a \a parent is set, all further calls to set() are unconditionally
+ * forwarded to that instance.
+ *
+ * The parent can be reset by passing a nullptr.
+ */
+void DebugMetadata::setParent(DebugMetadata *parent)
+{
+ parent_ = parent;
+
+ if (!parent_)
+ return;
+
+ if (!cache_.empty())
+ LOG(DebugControls, Error)
+ << "Controls were recorded before setting a parent."
+ << " These are dropped.";
+
+ cache_.clear();
+}
+
+/**
+ * \fn DebugMetadata::moveEntries()
+ * \brief Move all cached entries into control list \a list
+ * \param[in] list The control list
+ *
+ * This function moves all entries into the list specified by \a list. Duplicate
+ * entries in \a list get overwritten.
+ */
+void DebugMetadata::moveEntries(ControlList &list)
+{
+ list.merge(std::move(cache_), ControlList::MergePolicy::OverwriteExisting);
+ cache_.clear();
+}
+
+/**
+ * \fn DebugMetadata::set(const Control<T> &ctrl, const V &value)
+ * \brief Set the value of \a ctrl to \a value
+ * \param[in] ctrl The control to set
+ * \param[in] value The control value
+ *
+ * If a parent is set, the value gets passed there unconditionally. Otherwise it
+ * gets cached if the instance is enabled or dropped silently when disabled.
+ *
+ * \sa enable()
+ */
+
+/**
+ * \fn DebugMetadata::set(unsigned int id, const ControlValue &value)
+ * \brief Set the value of control \a id to \a value
+ * \param[in] id The id of the control
+ * \param[in] value The control value
+ *
+ * If a parent is set, the value gets passed there unconditionally. Otherwise it
+ * gets cached if the instance is enabled or dropped silently when disabled.
+ *
+ * \sa enable()
+ */
+void DebugMetadata::set(unsigned int id, const ControlValue &value)
+{
+ if (parent_) {
+ parent_->set(id, value);
+ return;
+ }
+
+ if (!enabled_)
+ return;
+
+ cache_.set(id, value);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/delayed_controls.cpp b/src/libcamera/delayed_controls.cpp
index 9667187e..94d0a575 100644
--- a/src/libcamera/delayed_controls.cpp
+++ b/src/libcamera/delayed_controls.cpp
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * delayed_controls.h - Helper to deal with controls that take effect with a delay
+ * Helper to deal with controls that take effect with a delay
*/
#include "libcamera/internal/delayed_controls.h"
@@ -115,8 +115,6 @@ DelayedControls::DelayedControls(V4L2Device *device,
*/
void DelayedControls::reset()
{
- running_ = false;
- firstSequence_ = 0;
queueCount_ = 1;
writeCount_ = 0;
@@ -204,8 +202,7 @@ bool DelayedControls::push(const ControlList &controls)
*/
ControlList DelayedControls::get(uint32_t sequence)
{
- uint32_t adjustedSeq = sequence - firstSequence_;
- unsigned int index = std::max<int>(0, adjustedSeq - maxDelay_);
+ unsigned int index = std::max<int>(0, sequence - maxDelay_);
ControlList out(device_->controls());
for (const auto &ctrl : values_) {
@@ -236,11 +233,6 @@ void DelayedControls::applyControls(uint32_t sequence)
{
LOG(DelayedControls, Debug) << "frame " << sequence << " started";
- if (!running_) {
- firstSequence_ = sequence;
- running_ = true;
- }
-
/*
* Create control list peeking ahead in the value queue to ensure
* values are set in time to satisfy the sensor delay.
@@ -279,7 +271,7 @@ void DelayedControls::applyControls(uint32_t sequence)
}
}
- writeCount_ = sequence - firstSequence_ + 1;
+ writeCount_ = sequence + 1;
while (writeCount_ > queueCount_) {
LOG(DelayedControls, Debug)
diff --git a/src/libcamera/device_enumerator.cpp b/src/libcamera/device_enumerator.cpp
index d1258050..ae17862f 100644
--- a/src/libcamera/device_enumerator.cpp
+++ b/src/libcamera/device_enumerator.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * device_enumerator.cpp - Enumeration and matching
+ * Enumeration and matching
*/
#include "libcamera/internal/device_enumerator.h"
@@ -56,7 +56,7 @@ LOG_DEFINE_CATEGORY(DeviceEnumerator)
* names can be added as match criteria.
*
* Pipeline handlers are recommended to add entities to DeviceMatch as
- * appropriare to ensure that the media device they need can be uniquely
+ * appropriate to ensure that the media device they need can be uniquely
* identified. This is useful when the corresponding kernel driver can produce
* different graphs, for instance as a result of different driver versions or
* hardware configurations, and not all those graphs are suitable for a pipeline
@@ -101,8 +101,14 @@ bool DeviceMatch::match(const MediaDevice *device) const
for (const MediaEntity *entity : device->entities()) {
if (name == entity->name()) {
- found = true;
- break;
+ if (!entity->deviceNode().empty()) {
+ found = true;
+ break;
+ } else {
+ LOG(DeviceEnumerator, Debug)
+ << "Skip " << entity->name()
+ << ": no device node";
+ }
}
}
@@ -161,7 +167,7 @@ std::unique_ptr<DeviceEnumerator> DeviceEnumerator::create()
DeviceEnumerator::~DeviceEnumerator()
{
- for (std::shared_ptr<MediaDevice> media : devices_) {
+ for (const std::shared_ptr<MediaDevice> &media : devices_) {
if (media->busy())
LOG(DeviceEnumerator, Error)
<< "Removing media device " << media->deviceNode()
diff --git a/src/libcamera/device_enumerator_sysfs.cpp b/src/libcamera/device_enumerator_sysfs.cpp
index 686bb809..7866885c 100644
--- a/src/libcamera/device_enumerator_sysfs.cpp
+++ b/src/libcamera/device_enumerator_sysfs.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * device_enumerator_sysfs.cpp - sysfs-based device enumerator
+ * sysfs-based device enumerator
*/
#include "libcamera/internal/device_enumerator_sysfs.h"
@@ -33,7 +33,7 @@ int DeviceEnumeratorSysfs::init()
int DeviceEnumeratorSysfs::enumerate()
{
struct dirent *ent;
- DIR *dir;
+ DIR *dir = nullptr;
static const char * const sysfs_dirs[] = {
"/sys/subsystem/media/devices",
diff --git a/src/libcamera/device_enumerator_udev.cpp b/src/libcamera/device_enumerator_udev.cpp
index 5317afbd..4e20a3cc 100644
--- a/src/libcamera/device_enumerator_udev.cpp
+++ b/src/libcamera/device_enumerator_udev.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018-2019, Google Inc.
*
- * device_enumerator_udev.cpp - udev-based device enumerator
+ * udev-based device enumerator
*/
#include "libcamera/internal/device_enumerator_udev.h"
@@ -13,6 +13,7 @@
#include <list>
#include <map>
#include <string.h>
+#include <string_view>
#include <sys/ioctl.h>
#include <sys/sysmacros.h>
#include <unistd.h>
@@ -315,6 +316,7 @@ int DeviceEnumeratorUdev::addV4L2Device(dev_t devnum)
* enumerator.
*/
deps->deps_.erase(devnum);
+ devMap_.erase(it);
if (deps->deps_.empty()) {
LOG(DeviceEnumerator, Debug)
@@ -330,18 +332,26 @@ int DeviceEnumeratorUdev::addV4L2Device(dev_t devnum)
void DeviceEnumeratorUdev::udevNotify()
{
struct udev_device *dev = udev_monitor_receive_device(monitor_);
- std::string action(udev_device_get_action(dev));
- std::string deviceNode(udev_device_get_devnode(dev));
+ if (!dev) {
+ int err = errno;
+ LOG(DeviceEnumerator, Warning)
+ << "Ignoring notfication received without a device: "
+ << strerror(err);
+ return;
+ }
+
+ std::string_view action(udev_device_get_action(dev));
+ std::string_view deviceNode(udev_device_get_devnode(dev));
LOG(DeviceEnumerator, Debug)
- << action << " device " << udev_device_get_devnode(dev);
+ << action << " device " << deviceNode;
if (action == "add") {
addUdevDevice(dev);
} else if (action == "remove") {
const char *subsystem = udev_device_get_subsystem(dev);
if (subsystem && !strcmp(subsystem, "media"))
- removeDevice(deviceNode);
+ removeDevice(std::string(deviceNode));
}
udev_device_unref(dev);
diff --git a/src/libcamera/dma_buf_allocator.cpp b/src/libcamera/dma_buf_allocator.cpp
new file mode 100644
index 00000000..a014c3b4
--- /dev/null
+++ b/src/libcamera/dma_buf_allocator.cpp
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper class for dma-buf allocations.
+ */
+
+#include "libcamera/internal/dma_buf_allocator.h"
+
+#include <array>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/udmabuf.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/memfd.h>
+#include <libcamera/base/shared_fd.h>
+
+#include <libcamera/framebuffer.h>
+
+/**
+ * \file dma_buf_allocator.cpp
+ * \brief dma-buf allocator
+ */
+
+namespace libcamera {
+
+#ifndef __DOXYGEN__
+struct DmaBufAllocatorInfo {
+ DmaBufAllocator::DmaBufAllocatorFlag type;
+ const char *deviceNodeName;
+};
+#endif
+
+static constexpr std::array<DmaBufAllocatorInfo, 4> providerInfos = { {
+ /*
+ * /dev/dma_heap/linux,cma is the CMA dma-heap. When the cma heap size is
+ * specified on the kernel command line, this gets renamed to "reserved".
+ */
+ { DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap, "/dev/dma_heap/linux,cma" },
+ { DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap, "/dev/dma_heap/reserved" },
+ { DmaBufAllocator::DmaBufAllocatorFlag::SystemHeap, "/dev/dma_heap/system" },
+ { DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf, "/dev/udmabuf" },
+} };
+
+LOG_DEFINE_CATEGORY(DmaBufAllocator)
+
+/**
+ * \class DmaBufAllocator
+ * \brief Helper class for dma-buf allocations
+ *
+ * This class wraps a userspace dma-buf provider selected at construction time,
+ * and exposes functions to allocate dma-buffers from this provider.
+ *
+ * Different providers may provide dma-buffers with different properties for
+ * the underlying memory. Which providers are acceptable is specified through
+ * the type argument passed to the DmaBufAllocator() constructor.
+ */
+
+/**
+ * \enum DmaBufAllocator::DmaBufAllocatorFlag
+ * \brief Type of the dma-buf provider
+ * \var DmaBufAllocator::CmaHeap
+ * \brief Allocate from a CMA dma-heap, providing physically-contiguous memory
+ * \var DmaBufAllocator::SystemHeap
+ * \brief Allocate from the system dma-heap, using the page allocator
+ * \var DmaBufAllocator::UDmaBuf
+ * \brief Allocate using a memfd + /dev/udmabuf
+ */
+
+/**
+ * \typedef DmaBufAllocator::DmaBufAllocatorFlags
+ * \brief A bitwise combination of DmaBufAllocator::DmaBufAllocatorFlag values
+ */
+
+/**
+ * \brief Construct a DmaBufAllocator of a given type
+ * \param[in] type The type(s) of the dma-buf providers to allocate from
+ *
+ * The dma-buf provider type is selected with the \a type parameter, which
+ * defaults to the CMA heap. If no provider of the given type can be accessed,
+ * the constructed DmaBufAllocator instance is invalid as indicated by
+ * the isValid() function.
+ *
+ * Multiple types can be selected by combining type flags, in which case
+ * the constructed DmaBufAllocator will match one of the types. If multiple
+ * requested types can work on the system, which provider is used is undefined.
+ */
+DmaBufAllocator::DmaBufAllocator(DmaBufAllocatorFlags type)
+{
+ for (const auto &info : providerInfos) {
+ if (!(type & info.type))
+ continue;
+
+ int ret = ::open(info.deviceNodeName, O_RDWR | O_CLOEXEC, 0);
+ if (ret < 0) {
+ ret = errno;
+ LOG(DmaBufAllocator, Debug)
+ << "Failed to open " << info.deviceNodeName << ": "
+ << strerror(ret);
+ continue;
+ }
+
+ LOG(DmaBufAllocator, Debug) << "Using " << info.deviceNodeName;
+ providerHandle_ = UniqueFD(ret);
+ type_ = info.type;
+ break;
+ }
+
+ if (!providerHandle_.isValid())
+ LOG(DmaBufAllocator, Error) << "Could not open any dma-buf provider";
+}
+
+/**
+ * \brief Destroy the DmaBufAllocator instance
+ */
+DmaBufAllocator::~DmaBufAllocator() = default;
+
+/**
+ * \fn DmaBufAllocator::isValid()
+ * \brief Check if the DmaBufAllocator instance is valid
+ * \return True if the DmaBufAllocator is valid, false otherwise
+ */
+UniqueFD DmaBufAllocator::allocFromUDmaBuf(const char *name, std::size_t size)
+{
+ /* Size must be a multiple of the page size. Round it up. */
+ std::size_t pageMask = sysconf(_SC_PAGESIZE) - 1;
+ size = (size + pageMask) & ~pageMask;
+
+ /* udmabuf dma-buffers *must* have the F_SEAL_SHRINK seal. */
+ UniqueFD memfd = MemFd::create(name, size, MemFd::Seal::Shrink);
+ if (!memfd.isValid())
+ return {};
+
+ struct udmabuf_create create;
+
+ create.memfd = memfd.get();
+ create.flags = UDMABUF_FLAGS_CLOEXEC;
+ create.offset = 0;
+ create.size = size;
+
+ int ret = ::ioctl(providerHandle_.get(), UDMABUF_CREATE, &create);
+ if (ret < 0) {
+ ret = errno;
+ LOG(DmaBufAllocator, Error)
+ << "Failed to create dma buf for " << name
+ << ": " << strerror(ret);
+ return {};
+ }
+
+ /* The underlying memfd is kept as as a reference in the kernel. */
+ return UniqueFD(ret);
+}
+
+UniqueFD DmaBufAllocator::allocFromHeap(const char *name, std::size_t size)
+{
+ struct dma_heap_allocation_data alloc = {};
+ int ret;
+
+ alloc.len = size;
+ alloc.fd_flags = O_CLOEXEC | O_RDWR;
+
+ ret = ::ioctl(providerHandle_.get(), DMA_HEAP_IOCTL_ALLOC, &alloc);
+ if (ret < 0) {
+ LOG(DmaBufAllocator, Error)
+ << "dma-heap allocation failure for " << name;
+ return {};
+ }
+
+ UniqueFD allocFd(alloc.fd);
+ ret = ::ioctl(allocFd.get(), DMA_BUF_SET_NAME, name);
+ if (ret < 0) {
+ LOG(DmaBufAllocator, Error)
+ << "dma-heap naming failure for " << name;
+ return {};
+ }
+
+ return allocFd;
+}
+
+/**
+ * \brief Allocate a dma-buf from the DmaBufAllocator
+ * \param [in] name The name to set for the allocated buffer
+ * \param [in] size The size of the buffer to allocate
+ *
+ * Allocates a dma-buf with read/write access.
+ *
+ * If the allocation fails, return an invalid UniqueFD.
+ *
+ * \return The UniqueFD of the allocated buffer
+ */
+UniqueFD DmaBufAllocator::alloc(const char *name, std::size_t size)
+{
+ if (!name)
+ return {};
+
+ if (type_ == DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf)
+ return allocFromUDmaBuf(name, size);
+ else
+ return allocFromHeap(name, size);
+}
+
+/**
+ * \brief Allocate and export buffers from the DmaBufAllocator
+ * \param[in] count The number of requested FrameBuffers
+ * \param[in] planeSizes The sizes of planes in each FrameBuffer
+ * \param[out] buffers Array of buffers successfully allocated
+ *
+ * Planes in a FrameBuffer are allocated with a single dma buf.
+ * \todo Add the option to allocate each plane with a dma buf respectively.
+ *
+ * \return The number of allocated buffers on success or a negative error code
+ * otherwise
+ */
+int DmaBufAllocator::exportBuffers(unsigned int count,
+ const std::vector<unsigned int> &planeSizes,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ for (unsigned int i = 0; i < count; ++i) {
+ std::unique_ptr<FrameBuffer> buffer =
+ createBuffer("frame-" + std::to_string(i), planeSizes);
+ if (!buffer) {
+ LOG(DmaBufAllocator, Error) << "Unable to create buffer";
+
+ buffers->clear();
+ return -EINVAL;
+ }
+
+ buffers->push_back(std::move(buffer));
+ }
+
+ return count;
+}
+
+std::unique_ptr<FrameBuffer>
+DmaBufAllocator::createBuffer(std::string name,
+ const std::vector<unsigned int> &planeSizes)
+{
+ std::vector<FrameBuffer::Plane> planes;
+
+ unsigned int frameSize = 0, offset = 0;
+ for (auto planeSize : planeSizes)
+ frameSize += planeSize;
+
+ SharedFD fd(alloc(name.c_str(), frameSize));
+ if (!fd.isValid())
+ return nullptr;
+
+ for (auto planeSize : planeSizes) {
+ planes.emplace_back(FrameBuffer::Plane{ fd, offset, planeSize });
+ offset += planeSize;
+ }
+
+ return std::make_unique<FrameBuffer>(planes);
+}
+
+/**
+ * \class DmaSyncer
+ * \brief Helper class for dma-buf's synchronization
+ *
+ * This class wraps a userspace dma-buf's synchronization process with an
+ * object's lifetime.
+ *
+ * It's used when the user needs to access a dma-buf with CPU, mostly mapped
+ * with MappedFrameBuffer, so that the buffer is synchronized between CPU and
+ * ISP.
+ */
+
+/**
+ * \enum DmaSyncer::SyncType
+ * \brief Read and/or write access via the CPU map
+ * \var DmaSyncer::Read
+ * \brief Indicates that the mapped dma-buf will be read by the client via the
+ * CPU map
+ * \var DmaSyncer::Write
+ * \brief Indicates that the mapped dm-buf will be written by the client via the
+ * CPU map
+ * \var DmaSyncer::ReadWrite
+ * \brief Indicates that the mapped dma-buf will be read and written by the
+ * client via the CPU map
+ */
+
+/**
+ * \brief Construct a DmaSyncer with a dma-buf's fd and the access type
+ * \param[in] fd The dma-buf's file descriptor to synchronize
+ * \param[in] type Read and/or write access via the CPU map
+ */
+DmaSyncer::DmaSyncer(SharedFD fd, SyncType type)
+ : fd_(fd)
+{
+ switch (type) {
+ case SyncType::Read:
+ flags_ = DMA_BUF_SYNC_READ;
+ break;
+ case SyncType::Write:
+ flags_ = DMA_BUF_SYNC_WRITE;
+ break;
+ case SyncType::ReadWrite:
+ flags_ = DMA_BUF_SYNC_RW;
+ break;
+ }
+
+ sync(DMA_BUF_SYNC_START);
+}
+
+/**
+ * \fn DmaSyncer::DmaSyncer(DmaSyncer &&other);
+ * \param[in] other The other instance
+ * \brief Enable move on class DmaSyncer
+ */
+
+/**
+ * \fn DmaSyncer::operator=(DmaSyncer &&other);
+ * \param[in] other The other instance
+ * \brief Enable move on class DmaSyncer
+ */
+
+DmaSyncer::~DmaSyncer()
+{
+ sync(DMA_BUF_SYNC_END);
+}
+
+void DmaSyncer::sync(uint64_t step)
+{
+ struct dma_buf_sync sync = {
+ .flags = flags_ | step
+ };
+
+ int ret;
+ do {
+ ret = ioctl(fd_.get(), DMA_BUF_IOCTL_SYNC, &sync);
+ } while (ret && (errno == EINTR || errno == EAGAIN));
+
+ if (ret) {
+ ret = errno;
+ LOG(DmaBufAllocator, Error)
+ << "Unable to sync dma fd: " << fd_.get()
+ << ", err: " << strerror(ret)
+ << ", flags: " << sync.flags;
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/fence.cpp b/src/libcamera/fence.cpp
index 7b784778..73299b40 100644
--- a/src/libcamera/fence.cpp
+++ b/src/libcamera/fence.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * fence.cpp - Synchronization fence
+ * Synchronization fence
*/
#include "libcamera/fence.h"
@@ -11,7 +11,7 @@ namespace libcamera {
/**
*
- * \file libcamera/fence.h
+ * \file fence.h
* \brief Definition of the Fence class
*/
diff --git a/src/libcamera/formats.cpp b/src/libcamera/formats.cpp
index afcaabc5..bfcdfc08 100644
--- a/src/libcamera/formats.cpp
+++ b/src/libcamera/formats.cpp
@@ -2,13 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * formats.cpp - libcamera image formats
+ * libcamera image formats
*/
#include "libcamera/internal/formats.h"
-#include <algorithm>
-#include <errno.h>
+#include <map>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
@@ -16,7 +15,7 @@
#include <libcamera/formats.h>
/**
- * \file internal/formats.h
+ * \file libcamera/internal/formats.h
* \brief Types and helper functions to handle libcamera image formats
*/
@@ -33,7 +32,7 @@ LOG_DEFINE_CATEGORY(Formats)
* used in pipeline handlers.
*
* \var PixelFormatInfo::name
- * \brief The format name as a human-readable string, used as the test
+ * \brief The format name as a human-readable string, used as the text
* representation of the PixelFormat
*
* \var PixelFormatInfo::format
@@ -42,19 +41,16 @@ LOG_DEFINE_CATEGORY(Formats)
* \var PixelFormatInfo::v4l2Formats
* \brief The V4L2 pixel formats corresponding to the PixelFormat
*
- * Multiple V4L2 formats may exist for one PixelFormat when the format uses
- * multiple planes, as V4L2 defines separate 4CCs for contiguous and separate
- * planes formats. The two entries in the array store the contiguous and
- * non-contiguous V4L2 formats respectively. If the PixelFormat isn't a
- * multiplanar format, or if no corresponding non-contiguous V4L2 format
- * exists, the second entry is invalid.
+ * Multiple V4L2 formats may exist for one PixelFormat, as V4L2 defines
+ * separate 4CCs for contiguous and non-contiguous versions of the same image
+ * format.
*
* \var PixelFormatInfo::bitsPerPixel
* \brief The average number of bits per pixel
*
- * The number per pixel averages the total number of bits for all colour
- * components over the whole image, excluding any padding bits or padding
- * pixels.
+ * The number of bits per pixel averages the total number of bits for all
+ * colour components over the whole image, excluding any padding bits or
+ * padding pixels.
*
* For formats that store pixels with bit padding within words, only the
* effective bits are taken into account. For instance, 12-bit Bayer data
@@ -156,36 +152,27 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::RGB565, {
.name = "RGB565",
.format = formats::RGB565,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_RGB565),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB565), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
.pixelsPerGroup = 1,
- .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
} },
{ formats::RGB565_BE, {
.name = "RGB565_BE",
.format = formats::RGB565_BE,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_RGB565X),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB565X), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
.pixelsPerGroup = 1,
- .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
} },
{ formats::BGR888, {
.name = "BGR888",
.format = formats::BGR888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_RGB24),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB24), },
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -195,10 +182,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::RGB888, {
.name = "RGB888",
.format = formats::RGB888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_BGR24),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGR24), },
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -208,10 +192,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::XRGB8888, {
.name = "XRGB8888",
.format = formats::XRGB8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_XBGR32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_XBGR32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -221,10 +202,17 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::XBGR8888, {
.name = "XBGR8888",
.format = formats::XBGR8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_RGBX32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGBX32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGBX8888, {
+ .name = "RGBX8888",
+ .format = formats::RGBX8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGRX32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -234,10 +222,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::BGRX8888, {
.name = "BGRX8888",
.format = formats::BGRX8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_XRGB32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_XRGB32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -247,10 +232,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::ABGR8888, {
.name = "ABGR8888",
.format = formats::ABGR8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_RGBA32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGBA32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -260,10 +242,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::ARGB8888, {
.name = "ARGB8888",
.format = formats::ARGB8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_ABGR32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_ABGR32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -273,10 +252,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::BGRA8888, {
.name = "BGRA8888",
.format = formats::BGRA8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_ARGB32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_ARGB32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -286,25 +262,39 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::RGBA8888, {
.name = "RGBA8888",
.format = formats::RGBA8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_BGRA32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGRA32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
.pixelsPerGroup = 1,
.planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
} },
+ { formats::BGR161616, {
+ .name = "BGR161616",
+ .format = formats::BGR161616,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB48), },
+ .bitsPerPixel = 48,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGB161616, {
+ .name = "RGB161616",
+ .format = formats::RGB161616,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGR48), },
+ .bitsPerPixel = 48,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
/* YUV packed formats. */
{ formats::YUYV, {
.name = "YUYV",
.format = formats::YUYV,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_YUYV),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUYV), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -314,10 +304,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::YVYU, {
.name = "YVYU",
.format = formats::YVYU,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_YVYU),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YVYU), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -327,10 +314,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::UYVY, {
.name = "UYVY",
.format = formats::UYVY,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_UYVY),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_UYVY), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -340,24 +324,41 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::VYUY, {
.name = "VYUY",
.format = formats::VYUY,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_VYUY),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_VYUY), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
.pixelsPerGroup = 2,
.planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
} },
+ { formats::AVUY8888, {
+ .name = "AVUY8888",
+ .format = formats::AVUY8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUVA32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::XVUY8888, {
+ .name = "XVUY8888",
+ .format = formats::XVUY8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUVX32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
/* YUV planar formats. */
{ formats::NV12, {
.name = "NV12",
.format = formats::NV12,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV12),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_NV12M),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV12),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV12M),
},
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -369,8 +370,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "NV21",
.format = formats::NV21,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV21),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_NV21M),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV21),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV21M),
},
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -382,8 +383,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "NV16",
.format = formats::NV16,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV16),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_NV16M),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV16),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV16M),
},
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -395,8 +396,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "NV61",
.format = formats::NV61,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV61),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_NV61M),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV61),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV61M),
},
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -407,10 +408,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::NV24, {
.name = "NV24",
.format = formats::NV24,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV24),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_NV24), },
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -420,10 +418,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::NV42, {
.name = "NV42",
.format = formats::NV42,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV42),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_NV42), },
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -434,8 +429,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "YUV420",
.format = formats::YUV420,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_YUV420),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_YUV420M),
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV420),
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV420M),
},
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -447,8 +442,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "YVU420",
.format = formats::YVU420,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_YVU420),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_YVU420M),
+ V4L2PixelFormat(V4L2_PIX_FMT_YVU420),
+ V4L2PixelFormat(V4L2_PIX_FMT_YVU420M),
},
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -460,8 +455,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "YUV422",
.format = formats::YUV422,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_YUV422P),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_YUV422M),
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV422P),
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV422M),
},
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -469,15 +464,42 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.pixelsPerGroup = 2,
.planes = {{ { 2, 1 }, { 1, 1 }, { 1, 1 } }},
} },
+ { formats::YVU422, {
+ .name = "YVU422",
+ .format = formats::YVU422,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YVU422M), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 1, 1 }, { 1, 1 } }},
+ } },
+ { formats::YUV444, {
+ .name = "YUV444",
+ .format = formats::YUV444,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUV444M), },
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 1, 1 }, { 1, 1 } }},
+ } },
+ { formats::YVU444, {
+ .name = "YVU444",
+ .format = formats::YVU444,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YVU444M), },
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 1, 1 }, { 1, 1 } }},
+ } },
/* Greyscale formats. */
{ formats::R8, {
.name = "R8",
.format = formats::R8,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_GREY),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_GREY), },
.bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -487,51 +509,69 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::R10, {
.name = "R10",
.format = formats::R10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_Y10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
.pixelsPerGroup = 1,
.planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
} },
+ { formats::R10_CSI2P, {
+ .name = "R10_CSI2P",
+ .format = formats::R10_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y10P), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::R12_CSI2P, {
+ .name = "R12_CSI2P",
+ .format = formats::R12_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y12P), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
{ formats::R12, {
.name = "R12",
.format = formats::R12,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_Y12),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y12), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
.pixelsPerGroup = 1,
.planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
} },
- { formats::R10_CSI2P, {
- .name = "R10_CSI2P",
- .format = formats::R10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_Y10P),
- .multi = V4L2PixelFormat(),
- },
- .bitsPerPixel = 10,
+ { formats::R16, {
+ .name = "R16",
+ .format = formats::R16,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y16), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::MONO_PISP_COMP1, {
+ .name = "MONO_PISP_COMP1",
+ .format = formats::MONO_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO), },
+ .bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = true,
- .pixelsPerGroup = 4,
- .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 0, 0 }, { 0, 0 } }},
} },
/* Bayer formats. */
{ formats::SBGGR8, {
.name = "SBGGR8",
.format = formats::SBGGR8,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR8),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR8), },
.bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -541,10 +581,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG8, {
.name = "SGBRG8",
.format = formats::SGBRG8,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG8),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG8), },
.bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -554,10 +591,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG8, {
.name = "SGRBG8",
.format = formats::SGRBG8,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8), },
.bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -567,10 +601,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB8, {
.name = "SRGGB8",
.format = formats::SRGGB8,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB8),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB8), },
.bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -580,10 +611,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SBGGR10, {
.name = "SBGGR10",
.format = formats::SBGGR10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -593,10 +621,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG10, {
.name = "SGBRG10",
.format = formats::SGBRG10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -606,10 +631,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG10, {
.name = "SGRBG10",
.format = formats::SGRBG10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -619,10 +641,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB10, {
.name = "SRGGB10",
.format = formats::SRGGB10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -632,10 +651,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SBGGR10_CSI2P, {
.name = "SBGGR10_CSI2P",
.format = formats::SBGGR10_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10P), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -645,10 +661,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG10_CSI2P, {
.name = "SGBRG10_CSI2P",
.format = formats::SGBRG10_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10P), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -658,10 +671,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG10_CSI2P, {
.name = "SGRBG10_CSI2P",
.format = formats::SGRBG10_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10P), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -671,10 +681,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB10_CSI2P, {
.name = "SRGGB10_CSI2P",
.format = formats::SRGGB10_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10P), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -684,10 +691,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SBGGR12, {
.name = "SBGGR12",
.format = formats::SBGGR12,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -697,10 +701,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG12, {
.name = "SGBRG12",
.format = formats::SGBRG12,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -710,10 +711,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG12, {
.name = "SGRBG12",
.format = formats::SGRBG12,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -723,10 +721,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB12, {
.name = "SRGGB12",
.format = formats::SRGGB12,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -736,10 +731,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SBGGR12_CSI2P, {
.name = "SBGGR12_CSI2P",
.format = formats::SBGGR12_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12P), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -749,10 +741,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG12_CSI2P, {
.name = "SGBRG12_CSI2P",
.format = formats::SGBRG12_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12P), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -762,10 +751,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG12_CSI2P, {
.name = "SGRBG12_CSI2P",
.format = formats::SGRBG12_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12P), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -775,23 +761,97 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB12_CSI2P, {
.name = "SRGGB12_CSI2P",
.format = formats::SRGGB12_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
.pixelsPerGroup = 2,
.planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
} },
+ { formats::SBGGR14, {
+ .name = "SBGGR14",
+ .format = formats::SBGGR14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG14, {
+ .name = "SGBRG14",
+ .format = formats::SGBRG14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG14, {
+ .name = "SGRBG14",
+ .format = formats::SGRBG14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB14, {
+ .name = "SRGGB14",
+ .format = formats::SRGGB14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR14_CSI2P, {
+ .name = "SBGGR14_CSI2P",
+ .format = formats::SBGGR14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG14_CSI2P, {
+ .name = "SGBRG14_CSI2P",
+ .format = formats::SGBRG14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG14_CSI2P, {
+ .name = "SGRBG14_CSI2P",
+ .format = formats::SGRBG14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB14_CSI2P, {
+ .name = "SRGGB14_CSI2P",
+ .format = formats::SRGGB14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
{ formats::SBGGR16, {
.name = "SBGGR16",
.format = formats::SBGGR16,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -801,10 +861,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG16, {
.name = "SGBRG16",
.format = formats::SGBRG16,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG16),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG16), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -814,10 +871,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG16, {
.name = "SGRBG16",
.format = formats::SGRBG16,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG16),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG16), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -827,10 +881,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB16, {
.name = "SRGGB16",
.format = formats::SRGGB16,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -840,10 +891,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SBGGR10_IPU3, {
.name = "SBGGR10_IPU3",
.format = formats::SBGGR10_IPU3,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SBGGR10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SBGGR10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -854,10 +902,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG10_IPU3, {
.name = "SGBRG10_IPU3",
.format = formats::SGBRG10_IPU3,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGBRG10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGBRG10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -867,10 +912,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG10_IPU3, {
.name = "SGRBG10_IPU3",
.format = formats::SGRBG10_IPU3,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGRBG10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGRBG10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -880,24 +922,60 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB10_IPU3, {
.name = "SRGGB10_IPU3",
.format = formats::SRGGB10_IPU3,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SRGGB10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SRGGB10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
.pixelsPerGroup = 25,
.planes = {{ { 32, 1 }, { 0, 0 }, { 0, 0 } }},
} },
-
+ { formats::BGGR_PISP_COMP1, {
+ .name = "BGGR_PISP_COMP1",
+ .format = formats::BGGR_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::GBRG_PISP_COMP1, {
+ .name = "GBRG_PISP_COMP1",
+ .format = formats::GBRG_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::GRBG_PISP_COMP1, {
+ .name = "GRBG_PISP_COMP1",
+ .format = formats::GRBG_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGGB_PISP_COMP1, {
+ .name = "RGGB_PISP_COMP1",
+ .format = formats::RGGB_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
/* Compressed formats. */
{ formats::MJPEG, {
.name = "MJPEG",
.format = formats::MJPEG,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_MJPEG),
- .multi = V4L2PixelFormat(),
+ V4L2PixelFormat(V4L2_PIX_FMT_MJPEG),
+ V4L2PixelFormat(V4L2_PIX_FMT_JPEG),
},
.bitsPerPixel = 0,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -926,7 +1004,7 @@ const PixelFormatInfo &PixelFormatInfo::info(const PixelFormat &format)
const auto iter = pixelFormatInfo.find(format);
if (iter == pixelFormatInfo.end()) {
LOG(Formats, Warning)
- << "Unsupported pixel format 0x"
+ << "Unsupported pixel format "
<< utils::hex(format.fourcc());
return pixelFormatInfoInvalid;
}
@@ -935,22 +1013,22 @@ const PixelFormatInfo &PixelFormatInfo::info(const PixelFormat &format)
}
/**
- * \brief Retrieve information about a pixel format
+ * \brief Retrieve information about a V4L2 pixel format
* \param[in] format The V4L2 pixel format
* \return The PixelFormatInfo describing the V4L2 \a format if known, or an
* invalid PixelFormatInfo otherwise
*/
const PixelFormatInfo &PixelFormatInfo::info(const V4L2PixelFormat &format)
{
- const auto &info = std::find_if(pixelFormatInfo.begin(), pixelFormatInfo.end(),
- [format](auto pair) {
- return pair.second.v4l2Formats.single == format ||
- pair.second.v4l2Formats.multi == format;
- });
- if (info == pixelFormatInfo.end())
+ PixelFormat pixelFormat = format.toPixelFormat(false);
+ if (!pixelFormat.isValid())
return pixelFormatInfoInvalid;
- return info->second;
+ const auto iter = pixelFormatInfo.find(pixelFormat);
+ if (iter == pixelFormatInfo.end())
+ return pixelFormatInfoInvalid;
+
+ return iter->second;
}
/**
@@ -997,7 +1075,7 @@ unsigned int PixelFormatInfo::stride(unsigned int width, unsigned int plane,
return 0;
}
- if (plane > planes.size() || !planes[plane].bytesPerGroup) {
+ if (plane >= planes.size() || !planes[plane].bytesPerGroup) {
LOG(Formats, Warning) << "Invalid plane index, stride is zero";
return 0;
}
diff --git a/src/libcamera/formats.yaml b/src/libcamera/formats.yaml
index 1f3f0433..2d54d391 100644
--- a/src/libcamera/formats.yaml
+++ b/src/libcamera/formats.yaml
@@ -2,7 +2,7 @@
#
# Copyright (C) 2020, Google Inc.
#
-%YAML 1.2
+%YAML 1.1
---
formats:
- R8:
@@ -11,6 +11,8 @@ formats:
fourcc: DRM_FORMAT_R10
- R12:
fourcc: DRM_FORMAT_R12
+ - R16:
+ fourcc: DRM_FORMAT_R16
- RGB565:
fourcc: DRM_FORMAT_RGB565
@@ -41,6 +43,11 @@ formats:
- BGRA8888:
fourcc: DRM_FORMAT_BGRA8888
+ - RGB161616:
+ fourcc: DRM_FORMAT_RGB161616
+ - BGR161616:
+ fourcc: DRM_FORMAT_BGR161616
+
- YUYV:
fourcc: DRM_FORMAT_YUYV
- YVYU:
@@ -49,6 +56,10 @@ formats:
fourcc: DRM_FORMAT_UYVY
- VYUY:
fourcc: DRM_FORMAT_VYUY
+ - AVUY8888:
+ fourcc: DRM_FORMAT_AVUY8888
+ - XVUY8888:
+ fourcc: DRM_FORMAT_XVUY8888
- NV12:
fourcc: DRM_FORMAT_NV12
@@ -69,6 +80,12 @@ formats:
fourcc: DRM_FORMAT_YVU420
- YUV422:
fourcc: DRM_FORMAT_YUV422
+ - YVU422:
+ fourcc: DRM_FORMAT_YVU422
+ - YUV444:
+ fourcc: DRM_FORMAT_YUV444
+ - YVU444:
+ fourcc: DRM_FORMAT_YVU444
- MJPEG:
fourcc: DRM_FORMAT_MJPEG
@@ -100,6 +117,15 @@ formats:
- SBGGR12:
fourcc: DRM_FORMAT_SBGGR12
+ - SRGGB14:
+ fourcc: DRM_FORMAT_SRGGB14
+ - SGRBG14:
+ fourcc: DRM_FORMAT_SGRBG14
+ - SGBRG14:
+ fourcc: DRM_FORMAT_SGBRG14
+ - SBGGR14:
+ fourcc: DRM_FORMAT_SBGGR14
+
- SRGGB16:
fourcc: DRM_FORMAT_SRGGB16
- SGRBG16:
@@ -112,6 +138,9 @@ formats:
- R10_CSI2P:
fourcc: DRM_FORMAT_R10
mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - R12_CSI2P:
+ fourcc: DRM_FORMAT_R12
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
- SRGGB10_CSI2P:
fourcc: DRM_FORMAT_SRGGB10
@@ -139,6 +168,19 @@ formats:
fourcc: DRM_FORMAT_SBGGR12
mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SRGGB14_CSI2P:
+ fourcc: DRM_FORMAT_SRGGB14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGRBG14_CSI2P:
+ fourcc: DRM_FORMAT_SGRBG14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGBRG14_CSI2P:
+ fourcc: DRM_FORMAT_SGBRG14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SBGGR14_CSI2P:
+ fourcc: DRM_FORMAT_SBGGR14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+
- SRGGB10_IPU3:
fourcc: DRM_FORMAT_SRGGB10
mod: IPU3_FORMAT_MOD_PACKED
@@ -151,4 +193,20 @@ formats:
- SBGGR10_IPU3:
fourcc: DRM_FORMAT_SBGGR10
mod: IPU3_FORMAT_MOD_PACKED
+
+ - RGGB_PISP_COMP1:
+ fourcc: DRM_FORMAT_SRGGB16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - GRBG_PISP_COMP1:
+ fourcc: DRM_FORMAT_SGRBG16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - GBRG_PISP_COMP1:
+ fourcc: DRM_FORMAT_SGBRG16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - BGGR_PISP_COMP1:
+ fourcc: DRM_FORMAT_SBGGR16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - MONO_PISP_COMP1:
+ fourcc: DRM_FORMAT_R16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
...
diff --git a/src/libcamera/framebuffer.cpp b/src/libcamera/framebuffer.cpp
index 049b1c7e..826848f7 100644
--- a/src/libcamera/framebuffer.cpp
+++ b/src/libcamera/framebuffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * framebuffer.cpp - Frame buffer handling
+ * Frame buffer handling
*/
#include <libcamera/framebuffer.h>
@@ -16,7 +16,10 @@
/**
* \file libcamera/framebuffer.h
* \brief Frame buffer handling
- *
+ */
+
+/**
+ * \internal
* \file libcamera/internal/framebuffer.h
* \brief Internal frame buffer handling support
*/
@@ -30,7 +33,7 @@ LOG_DEFINE_CATEGORY(Buffer)
* \brief Metadata related to a captured frame
*
* The FrameMetadata structure stores all metadata related to a captured frame,
- * as stored in a FrameBuffer, such as capture status, timestamp and bytesused.
+ * as stored in a FrameBuffer, such as capture status, timestamp and bytes used.
*/
/**
@@ -104,6 +107,7 @@ LOG_DEFINE_CATEGORY(Buffer)
* \return The array of per-plane metadata
*/
+#ifndef __DOXYGEN_PUBLIC__
/**
* \class FrameBuffer::Private
* \brief Base class for FrameBuffer private data
@@ -114,9 +118,16 @@ LOG_DEFINE_CATEGORY(Buffer)
* pipeline handlers.
*/
-FrameBuffer::Private::Private()
- : request_(nullptr), isContiguous_(true)
+/**
+ * \brief Construct a FrameBuffer::Private instance
+ * \param[in] planes The frame memory planes
+ * \param[in] cookie Cookie
+ */
+FrameBuffer::Private::Private(const std::vector<Plane> &planes, uint64_t cookie)
+ : planes_(planes), cookie_(cookie), request_(nullptr),
+ isContiguous_(true)
{
+ metadata_.planes_.resize(planes_.size());
}
/**
@@ -187,6 +198,21 @@ FrameBuffer::Private::~Private()
*/
/**
+ * \fn FrameBuffer::Private::cancel()
+ * \brief Marks the buffer as cancelled
+ *
+ * If a buffer is not used by a request, it shall be marked as cancelled to
+ * indicate that the metadata is invalid.
+ */
+
+/**
+ * \fn FrameBuffer::Private::metadata()
+ * \brief Retrieve the dynamic metadata
+ * \return Dynamic metadata for the frame contained in the buffer
+ */
+#endif /* __DOXYGEN_PUBLIC__ */
+
+/**
* \class FrameBuffer
* \brief Frame buffer data and its associated dynamic metadata
*
@@ -283,29 +309,22 @@ ino_t fileDescriptorInode(const SharedFD &fd)
* \param[in] cookie Cookie
*/
FrameBuffer::FrameBuffer(const std::vector<Plane> &planes, unsigned int cookie)
- : FrameBuffer(std::make_unique<Private>(), planes, cookie)
+ : FrameBuffer(std::make_unique<Private>(planes, cookie))
{
}
/**
- * \brief Construct a FrameBuffer with an extensible private class and an array
- * of planes
+ * \brief Construct a FrameBuffer with an extensible private class
* \param[in] d The extensible private class
- * \param[in] planes The frame memory planes
- * \param[in] cookie Cookie
*/
-FrameBuffer::FrameBuffer(std::unique_ptr<Private> d,
- const std::vector<Plane> &planes,
- unsigned int cookie)
- : Extensible(std::move(d)), planes_(planes), cookie_(cookie)
+FrameBuffer::FrameBuffer(std::unique_ptr<Private> d)
+ : Extensible(std::move(d))
{
- metadata_.planes_.resize(planes_.size());
-
unsigned int offset = 0;
bool isContiguous = true;
ino_t inode = 0;
- for (const auto &plane : planes_) {
+ for (const auto &plane : _d()->planes_) {
ASSERT(plane.offset != Plane::kInvalidOffset);
if (plane.offset != offset) {
@@ -317,9 +336,9 @@ FrameBuffer::FrameBuffer(std::unique_ptr<Private> d,
* Two different dmabuf file descriptors may still refer to the
* same dmabuf instance. Check this using inodes.
*/
- if (plane.fd != planes_[0].fd) {
+ if (plane.fd != _d()->planes_[0].fd) {
if (!inode)
- inode = fileDescriptorInode(planes_[0].fd);
+ inode = fileDescriptorInode(_d()->planes_[0].fd);
if (fileDescriptorInode(plane.fd) != inode) {
isContiguous = false;
break;
@@ -336,10 +355,13 @@ FrameBuffer::FrameBuffer(std::unique_ptr<Private> d,
}
/**
- * \fn FrameBuffer::planes()
* \brief Retrieve the static plane descriptors
* \return Array of plane descriptors
*/
+const std::vector<FrameBuffer::Plane> &FrameBuffer::planes() const
+{
+ return _d()->planes_;
+}
/**
* \brief Retrieve the request this buffer belongs to
@@ -360,13 +382,15 @@ Request *FrameBuffer::request() const
}
/**
- * \fn FrameBuffer::metadata()
* \brief Retrieve the dynamic metadata
* \return Dynamic metadata for the frame contained in the buffer
*/
+const FrameMetadata &FrameBuffer::metadata() const
+{
+ return _d()->metadata_;
+}
/**
- * \fn FrameBuffer::cookie()
* \brief Retrieve the cookie
*
* The cookie belongs to the creator of the FrameBuffer, which controls its
@@ -376,9 +400,12 @@ Request *FrameBuffer::request() const
*
* \return The cookie
*/
+uint64_t FrameBuffer::cookie() const
+{
+ return _d()->cookie_;
+}
/**
- * \fn FrameBuffer::setCookie()
* \brief Set the cookie
* \param[in] cookie Cookie to set
*
@@ -387,6 +414,10 @@ Request *FrameBuffer::request() const
* modify the cookie value of buffers they haven't created themselves. The
* libcamera core never modifies the buffer cookie.
*/
+void FrameBuffer::setCookie(uint64_t cookie)
+{
+ _d()->cookie_ = cookie;
+}
/**
* \brief Extract the Fence associated with this Framebuffer
@@ -407,12 +438,4 @@ std::unique_ptr<Fence> FrameBuffer::releaseFence()
return std::move(_d()->fence_);
}
-/**
- * \fn FrameBuffer::cancel()
- * \brief Marks the buffer as cancelled
- *
- * If a buffer is not used by a request, it shall be marked as cancelled to
- * indicate that the metadata is invalid.
- */
-
} /* namespace libcamera */
diff --git a/src/libcamera/framebuffer_allocator.cpp b/src/libcamera/framebuffer_allocator.cpp
index 4df27cac..3d53bde2 100644
--- a/src/libcamera/framebuffer_allocator.cpp
+++ b/src/libcamera/framebuffer_allocator.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * framebuffer_allocator.cpp - FrameBuffer allocator
+ * FrameBuffer allocator
*/
#include <libcamera/framebuffer_allocator.h>
@@ -59,14 +59,11 @@ LOG_DEFINE_CATEGORY(Allocator)
* \param[in] camera The camera
*/
FrameBufferAllocator::FrameBufferAllocator(std::shared_ptr<Camera> camera)
- : camera_(camera)
+ : camera_(std::move(camera))
{
}
-FrameBufferAllocator::~FrameBufferAllocator()
-{
- buffers_.clear();
-}
+FrameBufferAllocator::~FrameBufferAllocator() = default;
/**
* \brief Allocate buffers for a configured stream
@@ -88,16 +85,22 @@ FrameBufferAllocator::~FrameBufferAllocator()
*/
int FrameBufferAllocator::allocate(Stream *stream)
{
- if (buffers_.count(stream)) {
+ const auto &[it, inserted] = buffers_.try_emplace(stream);
+
+ if (!inserted) {
LOG(Allocator, Error) << "Buffers already allocated for stream";
return -EBUSY;
}
- int ret = camera_->exportFrameBuffers(stream, &buffers_[stream]);
+ int ret = camera_->exportFrameBuffers(stream, &it->second);
if (ret == -EINVAL)
LOG(Allocator, Error)
<< "Stream is not part of " << camera_->id()
<< " active configuration";
+
+ if (ret < 0)
+ buffers_.erase(it);
+
return ret;
}
@@ -119,8 +122,6 @@ int FrameBufferAllocator::free(Stream *stream)
if (iter == buffers_.end())
return -EINVAL;
- std::vector<std::unique_ptr<FrameBuffer>> &buffers = iter->second;
- buffers.clear();
buffers_.erase(iter);
return 0;
diff --git a/src/libcamera/geometry.cpp b/src/libcamera/geometry.cpp
index cb3c2de1..81cc8cd5 100644
--- a/src/libcamera/geometry.cpp
+++ b/src/libcamera/geometry.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * geometry.cpp - Geometry-related structures
+ * Geometry-related structures
*/
#include <libcamera/geometry.h>
@@ -56,8 +56,7 @@ namespace libcamera {
const std::string Point::toString() const
{
std::stringstream ss;
-
- ss << "(" << x << "," << y << ")";
+ ss << *this;
return ss.str();
}
@@ -84,10 +83,22 @@ bool operator==(const Point &lhs, const Point &rhs)
*/
/**
- * \struct Size
+ * \brief Insert a text representation of a Point into an output stream
+ * \param[in] out The output stream
+ * \param[in] p The point
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const Point &p)
+{
+ out << "(" << p.x << ", " << p.y << ")";
+ return out;
+}
+
+/**
+ * \class Size
* \brief Describe a two-dimensional size
*
- * The Size structure defines a two-dimensional size with integer precision.
+ * The Size class defines a two-dimensional size with integer precision.
*/
/**
@@ -124,7 +135,10 @@ bool operator==(const Point &lhs, const Point &rhs)
*/
const std::string Size::toString() const
{
- return std::to_string(width) + "x" + std::to_string(height);
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
}
/**
@@ -429,7 +443,19 @@ bool operator<(const Size &lhs, const Size &rhs)
*/
/**
- * \struct SizeRange
+ * \brief Insert a text representation of a Size into an output stream
+ * \param[in] out The output stream
+ * \param[in] s The size
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const Size &s)
+{
+ out << s.width << "x" << s.height;
+ return out;
+}
+
+/**
+ * \class SizeRange
* \brief Describe a range of sizes
*
* A SizeRange describes a range of sizes included in the [min, max] interval
@@ -528,9 +554,7 @@ bool SizeRange::contains(const Size &size) const
std::string SizeRange::toString() const
{
std::stringstream ss;
-
- ss << "(" << min.toString() << ")-(" << max.toString() << ")/(+"
- << hStep << ",+" << vStep << ")";
+ ss << *this;
return ss.str();
}
@@ -551,11 +575,27 @@ bool operator==(const SizeRange &lhs, const SizeRange &rhs)
*/
/**
- * \struct Rectangle
+ * \brief Insert a text representation of a SizeRange into an output stream
+ * \param[in] out The output stream
+ * \param[in] sr The size range
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const SizeRange &sr)
+{
+ out << "(" << sr.min << ")-(" << sr.max << ")/(+"
+ << sr.hStep << ",+" << sr.vStep << ")";
+
+ return out;
+}
+
+/**
+ * \class Rectangle
* \brief Describe a rectangle's position and dimensions
*
* Rectangles are used to identify an area of an image. They are specified by
* the coordinates of top-left corner and their horizontal and vertical size.
+ * By convention, the top-left corner is defined as the corner with the lowest
+ * x and y coordinates, regardless of the origin and direction of the axes.
*
* The measure unit of the rectangle coordinates and size, as well as the
* reference point from which the Rectangle::x and Rectangle::y displacements
@@ -573,6 +613,8 @@ bool operator==(const SizeRange &lhs, const SizeRange &rhs)
* \param[in] x The horizontal coordinate of the top-left corner
* \param[in] y The vertical coordinate of the top-left corner
* \param[in] size The size
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
*/
/**
@@ -582,6 +624,8 @@ bool operator==(const SizeRange &lhs, const SizeRange &rhs)
* \param[in] y The vertical coordinate of the top-left corner
* \param[in] width The width
* \param[in] height The height
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
*/
/**
@@ -592,13 +636,24 @@ bool operator==(const SizeRange &lhs, const SizeRange &rhs)
*/
/**
+ * \fn Rectangle::Rectangle(const Point &point1, const Point &point2)
+ * \brief Construct a Rectangle from two opposite corners
+ * \param[in] point1 One of corners of the rectangle
+ * \param[in] point2 The opposite corner of \a point1
+ */
+
+/**
* \var Rectangle::x
* \brief The horizontal coordinate of the rectangle's top-left corner
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
*/
/**
* \var Rectangle::y
* \brief The vertical coordinate of the rectangle's top-left corner
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
*/
/**
@@ -624,8 +679,7 @@ bool operator==(const SizeRange &lhs, const SizeRange &rhs)
const std::string Rectangle::toString() const
{
std::stringstream ss;
-
- ss << "(" << x << "x" << y << ")/" << width << "x" << height;
+ ss << *this;
return ss.str();
}
@@ -648,6 +702,9 @@ Point Rectangle::center() const
/**
* \fn Point Rectangle::topLeft() const
* \brief Retrieve the coordinates of the top left corner of this Rectangle
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
+ *
* \return The Rectangle's top left corner
*/
@@ -781,6 +838,55 @@ Rectangle Rectangle::translatedBy(const Point &point) const
}
/**
+ * \brief Transform a Rectangle from one reference rectangle to another
+ * \param[in] source The \a source reference rectangle
+ * \param[in] destination The \a destination reference rectangle
+ *
+ * The \a source and \a destination parameters describe two rectangles defined
+ * in different reference systems. The Rectangle is translated from the source
+ * reference system into the destination reference system.
+ *
+ * The typical use case for this function is to translate a selection rectangle
+ * specified in a reference system, in example the sensor's pixel array, into
+ * the same rectangle re-scaled and translated into a different reference
+ * system, in example the output frame on which the selection rectangle is
+ * applied to.
+ *
+ * For example, consider a sensor with a resolution of 4040x2360 pixels and a
+ * assume a rectangle of (100, 100)/3840x2160 (sensorFrame) in sensor
+ * coordinates is mapped to a rectangle (0,0)/(1920,1080) (displayFrame) in
+ * display coordinates. This function can be used to transform an arbitrary
+ * rectangle from display coordinates to sensor coordinates or vice versa:
+ *
+ * \code{.cpp}
+ * Rectangle sensorReference(100, 100, 3840, 2160);
+ * Rectangle displayReference(0, 0, 1920, 1080);
+ *
+ * // Bottom right quarter in sensor coordinates
+ * Rectangle sensorRect(2020, 100, 1920, 1080);
+ * displayRect = sensorRect.transformedBetween(sensorReference, displayReference);
+ * // displayRect is now (960, 540)/960x540
+ *
+ * // Transformation back to sensor coordinates
+ * sensorRect = displayRect.transformedBetween(displayReference, sensorReference);
+ * \endcode
+ */
+Rectangle Rectangle::transformedBetween(const Rectangle &source,
+ const Rectangle &destination) const
+{
+ Rectangle r;
+ double sx = static_cast<double>(destination.width) / source.width;
+ double sy = static_cast<double>(destination.height) / source.height;
+
+ r.x = static_cast<int>((x - source.x) * sx) + destination.x;
+ r.y = static_cast<int>((y - source.y) * sy) + destination.y;
+ r.width = static_cast<int>(width * sx);
+ r.height = static_cast<int>(height * sy);
+
+ return r;
+}
+
+/**
* \brief Compare rectangles for equality
* \return True if the two rectangles are equal, false otherwise
*/
@@ -796,4 +902,16 @@ bool operator==(const Rectangle &lhs, const Rectangle &rhs)
* \return True if the two rectangles are not equal, false otherwise
*/
+/**
+ * \brief Insert a text representation of a Rectangle into an output stream
+ * \param[in] out The output stream
+ * \param[in] r The rectangle
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const Rectangle &r)
+{
+ out << "(" << r.x << ", " << r.y << ")/" << r.width << "x" << r.height;
+ return out;
+}
+
} /* namespace libcamera */
diff --git a/src/libcamera/ipa/meson.build b/src/libcamera/ipa/meson.build
index 44695240..ef73b3f9 100644
--- a/src/libcamera/ipa/meson.build
+++ b/src/libcamera/ipa/meson.build
@@ -3,13 +3,10 @@
libcamera_ipa_interfaces = []
foreach file : ipa_mojom_files
- name = '@0@'.format(file).split('/')[-1].split('.')[0]
-
# {pipeline}_ipa_interface.cpp
libcamera_ipa_interfaces += \
- custom_target(name + '_ipa_interface_cpp',
- input : file,
- output : name + '_ipa_interface.cpp',
+ custom_target(input : file,
+ output : '@BASENAME@_ipa_interface.cpp',
command : [
mojom_docs_extractor,
'-o', '@OUTPUT@', '@INPUT@'
diff --git a/src/libcamera/ipa_controls.cpp b/src/libcamera/ipa_controls.cpp
index c3489bbf..12d92ebe 100644
--- a/src/libcamera/ipa_controls.cpp
+++ b/src/libcamera/ipa_controls.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_controls.cpp - IPA control handling
+ * IPA control handling
*/
#include <libcamera/ipa/ipa_controls.h>
@@ -131,6 +131,8 @@
* shall be ignored when parsing the packet.
*/
+namespace libcamera {
+
/**
* \def IPA_CONTROLS_FORMAT_VERSION
* \brief The current control serialization format version
@@ -218,9 +220,15 @@ static_assert(sizeof(ipa_control_value_entry) == 16,
* \var ipa_control_info_entry::offset
* The offset in bytes from the beginning of the data section to the control
* info data (shall be a multiple of 8 bytes)
+ * \var ipa_control_info_entry::direction
+ * The directions in which the control is allowed to be sent. This is a flags
+ * value, where 0x1 signifies input (as controls), and 0x2 signifies output (as
+ * metadata). \sa ControlId::Direction
* \var ipa_control_info_entry::padding
* Padding bytes (shall be set to 0)
*/
static_assert(sizeof(ipa_control_info_entry) == 16,
"Invalid ABI size change for struct ipa_control_info_entry");
+
+} /* namespace libcamera */
diff --git a/src/libcamera/ipa_data_serializer.cpp b/src/libcamera/ipa_data_serializer.cpp
index 0a259305..2189a246 100644
--- a/src/libcamera/ipa_data_serializer.cpp
+++ b/src/libcamera/ipa_data_serializer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipa_data_serializer.cpp - Image Processing Algorithm data serializer
+ * Image Processing Algorithm data serializer
*/
#include "libcamera/internal/ipa_data_serializer.h"
@@ -11,6 +11,8 @@
#include <libcamera/base/log.h>
+#include "libcamera/internal/byte_stream_buffer.h"
+
/**
* \file ipa_data_serializer.h
* \brief IPA Data Serializer
@@ -537,7 +539,6 @@ IPADataSerializer<SharedFD>::serialize(const SharedFD &data,
if (data.isValid())
fdVec.push_back(data);
-
return { dataVec, fdVec };
}
@@ -604,7 +605,7 @@ IPADataSerializer<FrameBuffer::Plane>::deserialize(std::vector<uint8_t>::const_i
FrameBuffer::Plane ret;
ret.fd = IPADataSerializer<SharedFD>::deserialize(dataBegin, dataBegin + 4,
- fdsBegin, fdsBegin + 1);
+ fdsBegin, fdsBegin + 1);
ret.offset = readPOD<uint32_t>(dataBegin, 4, dataEnd);
ret.length = readPOD<uint32_t>(dataBegin, 8, dataEnd);
diff --git a/src/libcamera/ipa_interface.cpp b/src/libcamera/ipa_interface.cpp
index 8ea6cbee..a9dc54ad 100644
--- a/src/libcamera/ipa_interface.cpp
+++ b/src/libcamera/ipa_interface.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_interface.cpp - Image Processing Algorithm interface
+ * Image Processing Algorithm interface
*/
#include <libcamera/ipa/ipa_interface.h>
diff --git a/src/libcamera/ipa_manager.cpp b/src/libcamera/ipa_manager.cpp
index ec966045..cfc24d38 100644
--- a/src/libcamera/ipa_manager.cpp
+++ b/src/libcamera/ipa_manager.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_manager.cpp - Image Processing Algorithm module manager
+ * Image Processing Algorithm module manager
*/
#include "libcamera/internal/ipa_manager.h"
@@ -95,8 +95,6 @@ LOG_DEFINE_CATEGORY(IPAManager)
* IPC.
*/
-IPAManager *IPAManager::self_ = nullptr;
-
/**
* \brief Construct an IPAManager instance
*
@@ -105,9 +103,10 @@ IPAManager *IPAManager::self_ = nullptr;
*/
IPAManager::IPAManager()
{
- if (self_)
- LOG(IPAManager, Fatal)
- << "Multiple IPAManager objects are not allowed";
+#if HAVE_IPA_PUBKEY
+ if (!pubKey_.isValid())
+ LOG(IPAManager, Warning) << "Public key not valid";
+#endif
unsigned int ipaCount = 0;
@@ -133,7 +132,7 @@ IPAManager::IPAManager()
std::string root = utils::libcameraBuildPath();
if (!root.empty()) {
std::string ipaBuildPath = root + "src/ipa";
- constexpr int maxDepth = 1;
+ constexpr int maxDepth = 2;
LOG(IPAManager, Info)
<< "libcamera is not installed. Adding '"
@@ -148,16 +147,12 @@ IPAManager::IPAManager()
if (!ipaCount)
LOG(IPAManager, Warning)
<< "No IPA found in '" IPA_MODULE_DIR "'";
-
- self_ = this;
}
IPAManager::~IPAManager()
{
for (IPAModule *module : modules_)
delete module;
-
- self_ = nullptr;
}
/**
@@ -274,6 +269,19 @@ IPAModule *IPAManager::module(PipelineHandler *pipe, uint32_t minVersion,
* found or if the IPA proxy fails to initialize
*/
+#if HAVE_IPA_PUBKEY
+/**
+ * \fn IPAManager::pubKey()
+ * \brief Retrieve the IPA module signing public key
+ *
+ * IPA module signature verification is normally handled internally by the
+ * IPAManager class. This function is meant to be used by utilities that need to
+ * verify signatures externally.
+ *
+ * \return The IPA module signing public key
+ */
+#endif
+
bool IPAManager::isSignatureValid([[maybe_unused]] IPAModule *ipa) const
{
#if HAVE_IPA_PUBKEY
diff --git a/src/libcamera/ipa_module.cpp b/src/libcamera/ipa_module.cpp
index c9ff7de3..9ca74be6 100644
--- a/src/libcamera/ipa_module.cpp
+++ b/src/libcamera/ipa_module.cpp
@@ -2,13 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_module.cpp - Image Processing Algorithm module
+ * Image Processing Algorithm module
*/
#include "libcamera/internal/ipa_module.h"
#include <algorithm>
-#include <array>
#include <ctype.h>
#include <dlfcn.h>
#include <elf.h>
@@ -51,8 +50,8 @@ typename std::remove_extent_t<T> *elfPointer(Span<const uint8_t> elf,
if (size > elf.size() || size < objSize)
return nullptr;
- return reinterpret_cast<typename std::remove_extent_t<T> *>
- (reinterpret_cast<const char *>(elf.data()) + offset);
+ return reinterpret_cast<typename std::remove_extent_t<T> *>(
+ reinterpret_cast<const char *>(elf.data()) + offset);
}
template<typename T>
@@ -225,9 +224,9 @@ Span<const uint8_t> elfLoadSymbol(Span<const uint8_t> elf, const char *symbol)
* \brief The name of the IPA module
*
* The name may be used to build file system paths to IPA-specific resources.
- * It shall only contain printable characters, and may not contain '/', '*',
- * '?' or '\'. For IPA modules included in libcamera, it shall match the
- * directory of the IPA module in the source tree.
+ * It shall only contain printable characters, and may not contain '*', '?' or
+ * '\'. For IPA modules included in libcamera, it shall match the directory of
+ * the IPA module in the source tree.
*
* \todo Allow user to choose to isolate open source IPAs
*/
@@ -288,25 +287,30 @@ int IPAModule::loadIPAModuleInfo()
}
Span<const uint8_t> info = elfLoadSymbol(data, "ipaModuleInfo");
- if (info.size() != sizeof(info_)) {
+ if (info.size() < sizeof(info_)) {
LOG(IPAModule, Error) << "IPA module has no valid info";
return -EINVAL;
}
- memcpy(&info_, info.data(), info.size());
+ memcpy(&info_, info.data(), sizeof(info_));
if (info_.moduleAPIVersion != IPA_MODULE_API_VERSION) {
LOG(IPAModule, Error) << "IPA module API version mismatch";
return -EINVAL;
}
- /* Validate the IPA module name. */
+ /*
+ * Validate the IPA module name.
+ *
+ * \todo Consider module naming restrictions to avoid escaping from a
+ * base directory. Forbidding ".." may be enough, but this may be best
+ * implemented in a different layer.
+ */
std::string ipaName = info_.name;
auto iter = std::find_if_not(ipaName.begin(), ipaName.end(),
[](unsigned char c) -> bool {
- return isprint(c) && c != '/' &&
- c != '?' && c != '*' &&
- c != '\\';
+ return isprint(c) && c != '?' &&
+ c != '*' && c != '\\';
});
if (iter != ipaName.end()) {
LOG(IPAModule, Error)
diff --git a/src/libcamera/ipa_proxy.cpp b/src/libcamera/ipa_proxy.cpp
index 3f2cc6b8..85004737 100644
--- a/src/libcamera/ipa_proxy.cpp
+++ b/src/libcamera/ipa_proxy.cpp
@@ -2,12 +2,11 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_proxy.cpp - Image Processing Algorithm proxy
+ * Image Processing Algorithm proxy
*/
#include "libcamera/internal/ipa_proxy.h"
-#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
@@ -72,6 +71,7 @@ IPAProxy::~IPAProxy()
/**
* \brief Retrieve the absolute path to an IPA configuration file
* \param[in] name The configuration file name
+ * \param[in] fallbackName The name of a fallback configuration file
*
* This function locates the configuration file for an IPA and returns its
* absolute path. It searches the following directories, in order:
@@ -89,10 +89,14 @@ IPAProxy::~IPAProxy()
* named after the IPA module name, as reported in IPAModuleInfo::name, and for
* a file named \a name within that directory. The \a name is IPA-specific.
*
+ * If the file named \a name is not found and \a fallbackName is non-empty then
+ * the whole search is repeated for \a fallbackName.
+ *
* \return The full path to the IPA configuration file, or an empty string if
* no configuration file can be found
*/
-std::string IPAProxy::configurationFile(const std::string &name) const
+std::string IPAProxy::configurationFile(const std::string &name,
+ const std::string &fallbackName) const
{
struct stat statbuf;
int ret;
@@ -146,11 +150,18 @@ std::string IPAProxy::configurationFile(const std::string &name) const
}
}
- LOG(IPAProxy, Error)
- << "Configuration file '" << name
- << "' not found for IPA module '" << ipaName << "'";
+ if (fallbackName.empty()) {
+ LOG(IPAProxy, Error)
+ << "Configuration file '" << name
+ << "' not found for IPA module '" << ipaName << "'";
+ return std::string();
+ }
- return std::string();
+ LOG(IPAProxy, Warning)
+ << "Configuration file '" << name
+ << "' not found for IPA module '" << ipaName
+ << "', falling back to '" << fallbackName << "'";
+ return configurationFile(fallbackName);
}
/**
diff --git a/src/libcamera/ipa_pub_key.cpp.in b/src/libcamera/ipa_pub_key.cpp.in
index 01e5333b..5d8c92c2 100644
--- a/src/libcamera/ipa_pub_key.cpp.in
+++ b/src/libcamera/ipa_pub_key.cpp.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
- * ipa_pub_key.cpp - IPA module signing public key
+ * IPA module signing public key
*
* This file is auto-generated. Do not edit.
*/
diff --git a/src/libcamera/ipc_pipe.cpp b/src/libcamera/ipc_pipe.cpp
index 31a0ca09..548299d0 100644
--- a/src/libcamera/ipc_pipe.cpp
+++ b/src/libcamera/ipc_pipe.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipc_pipe.cpp - Image Processing Algorithm IPC module for IPA proxies
+ * Image Processing Algorithm IPC module for IPA proxies
*/
#include "libcamera/internal/ipc_pipe.h"
diff --git a/src/libcamera/ipc_pipe_unixsocket.cpp b/src/libcamera/ipc_pipe_unixsocket.cpp
index 3ef90709..668ec73b 100644
--- a/src/libcamera/ipc_pipe_unixsocket.cpp
+++ b/src/libcamera/ipc_pipe_unixsocket.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipc_pipe_unixsocket.cpp - Image Processing Algorithm IPC module using unix socket
+ * Image Processing Algorithm IPC module using unix socket
*/
#include "libcamera/internal/ipc_pipe_unixsocket.h"
@@ -18,6 +18,8 @@
#include "libcamera/internal/ipc_unixsocket.h"
#include "libcamera/internal/process.h"
+using namespace std::chrono_literals;
+
namespace libcamera {
LOG_DECLARE_CATEGORY(IPCPipe)
@@ -126,7 +128,7 @@ int IPCPipeUnixSocket::call(const IPCUnixSocket::Payload &message,
}
/* \todo Make this less dangerous, see IPCPipe::sendSync() */
- timeout.start(2000);
+ timeout.start(2000ms);
while (!iter->second.done) {
if (!timeout.isRunning()) {
LOG(IPCPipe, Error) << "Call timeout!";
diff --git a/src/libcamera/ipc_unixsocket.cpp b/src/libcamera/ipc_unixsocket.cpp
index 1980d374..002053e3 100644
--- a/src/libcamera/ipc_unixsocket.cpp
+++ b/src/libcamera/ipc_unixsocket.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipc_unixsocket.cpp - IPC mechanism based on Unix sockets
+ * IPC mechanism based on Unix sockets
*/
#include "libcamera/internal/ipc_unixsocket.h"
@@ -12,6 +12,7 @@
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
+#include <vector>
#include <libcamera/base/event_notifier.h>
#include <libcamera/base/log.h>
@@ -247,10 +248,9 @@ int IPCUnixSocket::sendData(const void *buffer, size_t length,
iov[0].iov_base = const_cast<void *>(buffer);
iov[0].iov_len = length;
- char buf[CMSG_SPACE(num * sizeof(uint32_t))];
- memset(buf, 0, sizeof(buf));
+ std::vector<uint8_t> buf(CMSG_SPACE(num * sizeof(uint32_t)));
- struct cmsghdr *cmsg = (struct cmsghdr *)buf;
+ struct cmsghdr *cmsg = reinterpret_cast<struct cmsghdr *>(buf.data());
cmsg->cmsg_len = CMSG_LEN(num * sizeof(uint32_t));
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
@@ -283,10 +283,9 @@ int IPCUnixSocket::recvData(void *buffer, size_t length,
iov[0].iov_base = buffer;
iov[0].iov_len = length;
- char buf[CMSG_SPACE(num * sizeof(uint32_t))];
- memset(buf, 0, sizeof(buf));
+ std::vector<uint8_t> buf(CMSG_SPACE(num * sizeof(uint32_t)));
- struct cmsghdr *cmsg = (struct cmsghdr *)buf;
+ struct cmsghdr *cmsg = reinterpret_cast<struct cmsghdr *>(buf.data());
cmsg->cmsg_len = CMSG_LEN(num * sizeof(uint32_t));
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
diff --git a/src/libcamera/mapped_framebuffer.cpp b/src/libcamera/mapped_framebuffer.cpp
index 6860069b..f54bbf21 100644
--- a/src/libcamera/mapped_framebuffer.cpp
+++ b/src/libcamera/mapped_framebuffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * mapped_framebuffer.cpp - Mapped Framebuffer support
+ * Mapped Framebuffer support
*/
#include "libcamera/internal/mapped_framebuffer.h"
@@ -16,7 +16,7 @@
#include <libcamera/base/log.h>
/**
- * \file libcamera/internal/mapped_framebuffer.h
+ * \file mapped_framebuffer.h
* \brief Frame buffer memory mapping support
*/
@@ -72,7 +72,7 @@ MappedBuffer::MappedBuffer(MappedBuffer &&other)
/**
* \brief Move assignment operator, replace the mappings with those of \a other
-* \param[in] other The other MappedBuffer
+ * \param[in] other The other MappedBuffer
*
* Moving a MappedBuffer moves the mappings contained in the \a other to the new
* MappedBuffer and invalidates the \a other.
diff --git a/src/libcamera/matrix.cpp b/src/libcamera/matrix.cpp
new file mode 100644
index 00000000..4d95a19b
--- /dev/null
+++ b/src/libcamera/matrix.cpp
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Matrix and related operations
+ */
+
+#include "libcamera/internal/matrix.h"
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file matrix.h
+ * \brief Matrix class
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Matrix)
+
+/**
+ * \class Matrix
+ * \brief Matrix class
+ * \tparam T Type of numerical values to be stored in the matrix
+ * \tparam Rows Number of rows in the matrix
+ * \tparam Cols Number of columns in the matrix
+ */
+
+/**
+ * \fn Matrix::Matrix()
+ * \brief Construct a zero matrix
+ */
+
+/**
+ * \fn Matrix::Matrix(const std::array<T, Rows * Cols> &data)
+ * \brief Construct a matrix from supplied data
+ * \param[in] data Data from which to construct a matrix
+ *
+ * \a data is a one-dimensional vector and will be turned into a matrix in
+ * row-major order. The size of \a data must be equal to the product of the
+ * number of rows and columns of the matrix (Rows x Cols).
+ */
+
+/**
+ * \fn Matrix::identity()
+ * \brief Construct an identity matrix
+ */
+
+/**
+ * \fn Matrix::toString()
+ * \brief Assemble and return a string describing the matrix
+ * \return A string describing the matrix
+ */
+
+/**
+ * \fn Span<const T, Cols> Matrix::operator[](size_t i) const
+ * \brief Index to a row in the matrix
+ * \param[in] i Index of row to retrieve
+ *
+ * This operator[] returns a Span, which can then be indexed into again with
+ * another operator[], allowing a convenient m[i][j] to access elements of the
+ * matrix. Note that the lifetime of the Span returned by this first-level
+ * operator[] is bound to that of the Matrix itself, so it is not recommended
+ * to save the Span that is the result of this operator[].
+ *
+ * \return Row \a i from the matrix, as a Span
+ */
+
+/**
+ * \fn Matrix::operator[](size_t i)
+ * \copydoc Matrix::operator[](size_t i) const
+ */
+
+/**
+ * \fn Matrix<T, Rows, Cols> &Matrix::operator*=(U d)
+ * \brief Multiply the matrix by a scalar in-place
+ * \tparam U Type of the numerical scalar value
+ * \param d The scalar multiplier
+ * \return Product of this matrix and scalar \a d
+ */
+
+/**
+ * \fn Matrix::Matrix<U, Rows, Cols> operator*(T d, const Matrix<U, Rows, Cols> &m)
+ * \brief Multiply the matrix by a scalar
+ * \tparam T Type of the numerical scalar value
+ * \tparam U Type of numerical values in the matrix
+ * \tparam Rows Number of rows in the matrix
+ * \tparam Cols Number of columns in the matrix
+ * \param d The scalar multiplier
+ * \param m The matrix
+ * \return Product of scalar \a d and matrix \a m
+ */
+
+/**
+ * \fn Matrix::Matrix<U, Rows, Cols> operator*(const Matrix<U, Rows, Cols> &m, T d)
+ * \copydoc operator*(T d, const Matrix<U, Rows, Cols> &m)
+ */
+
+/**
+ * \fn Matrix<T, R1, C2> operator*(const Matrix<T, R1, C1> &m1, const Matrix<T, R2, C2> &m2)
+ * \brief Matrix multiplication
+ * \tparam T Type of numerical values in the matrices
+ * \tparam R1 Number of rows in the first matrix
+ * \tparam C1 Number of columns in the first matrix
+ * \tparam R2 Number of rows in the second matrix
+ * \tparam C2 Number of columns in the second matrix
+ * \param m1 Multiplicand matrix
+ * \param m2 Multiplier matrix
+ * \return Matrix product of matrices \a m1 and \a m2
+ */
+
+/**
+ * \fn Matrix<T, Rows, Cols> operator+(const Matrix<T, Rows, Cols> &m1, const Matrix<T, Rows, Cols> &m2)
+ * \brief Matrix addition
+ * \tparam T Type of numerical values in the matrices
+ * \tparam Rows Number of rows in the matrices
+ * \tparam Cols Number of columns in the matrices
+ * \param m1 Summand matrix
+ * \param m2 Summand matrix
+ * \return Matrix sum of matrices \a m1 and \a m2
+ */
+
+#ifndef __DOXYGEN__
+/*
+ * The YAML data shall be a list of numerical values. Its size shall be equal
+ * to the product of the number of rows and columns of the matrix (Rows x
+ * Cols). The values shall be stored in row-major order.
+ */
+bool matrixValidateYaml(const YamlObject &obj, unsigned int size)
+{
+ if (!obj.isList())
+ return false;
+
+ if (obj.size() != size) {
+ LOG(Matrix, Error)
+ << "Wrong number of values in matrix: expected "
+ << size << ", got " << obj.size();
+ return false;
+ }
+
+ return true;
+}
+#endif /* __DOXYGEN__ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/media_device.cpp b/src/libcamera/media_device.cpp
index 941f86c2..d71dad74 100644
--- a/src/libcamera/media_device.cpp
+++ b/src/libcamera/media_device.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * media_device.cpp - Media device handler
+ * Media device handler
*/
#include "libcamera/internal/media_device.h"
@@ -352,8 +352,9 @@ MediaEntity *MediaDevice::getEntityByName(const std::string &name) const
* entity with name \a sourceName, to the pad at index \a sinkIdx of the
* sink entity with name \a sinkName, if any.
*
- * \sa MediaDevice::link(const MediaEntity *source, unsigned int sourceIdx, const MediaEntity *sink, unsigned int sinkIdx) const
- * \sa MediaDevice::link(const MediaPad *source, const MediaPad *sink) const
+ * \sa link(const MediaEntity *source, unsigned int sourceIdx,
+ * const MediaEntity *sink, unsigned int sinkIdx)
+ * \sa link(const MediaPad *source, const MediaPad *sink)
*
* \return The link that connects the two pads, or nullptr if no such a link
* exists
@@ -381,8 +382,9 @@ MediaLink *MediaDevice::link(const std::string &sourceName, unsigned int sourceI
* entity \a source, to the pad at index \a sinkIdx of the sink entity \a
* sink, if any.
*
- * \sa MediaDevice::link(const std::string &sourceName, unsigned int sourceIdx, const std::string &sinkName, unsigned int sinkIdx) const
- * \sa MediaDevice::link(const MediaPad *source, const MediaPad *sink) const
+ * \sa link(const std::string &sourceName, unsigned int sourceIdx,
+ * const std::string &sinkName, unsigned int sinkIdx)
+ * \sa link(const MediaPad *source, const MediaPad *sink)
*
* \return The link that connects the two pads, or nullptr if no such a link
* exists
@@ -404,8 +406,10 @@ MediaLink *MediaDevice::link(const MediaEntity *source, unsigned int sourceIdx,
* \param[in] source The source pad
* \param[in] sink The sink pad
*
- * \sa MediaDevice::link(const std::string &sourceName, unsigned int sourceIdx, const std::string &sinkName, unsigned int sinkIdx) const
- * \sa MediaDevice::link(const MediaEntity *source, unsigned int sourceIdx, const MediaEntity *sink, unsigned int sinkIdx) const
+ * \sa link(const std::string &sourceName, unsigned int sourceIdx,
+ * const std::string &sinkName, unsigned int sinkIdx)
+ * \sa link(const MediaEntity *source, unsigned int sourceIdx,
+ * const MediaEntity *sink, unsigned int sinkIdx)
*
* \return The link that connects the two pads, or nullptr if no such a link
* exists
@@ -473,7 +477,7 @@ int MediaDevice::open()
return -EBUSY;
}
- fd_ = UniqueFD(::open(deviceNode_.c_str(), O_RDWR));
+ fd_ = UniqueFD(::open(deviceNode_.c_str(), O_RDWR | O_CLOEXEC));
if (!fd_.isValid()) {
int ret = -errno;
LOG(MediaDevice, Error)
@@ -683,40 +687,72 @@ bool MediaDevice::populateLinks(const struct media_v2_topology &topology)
(topology.ptr_links);
for (unsigned int i = 0; i < topology.num_links; ++i) {
- /* We only care about pad-2-pad links here. */
- if ((mediaLinks[i].flags & MEDIA_LNK_FL_LINK_TYPE) !=
- MEDIA_LNK_FL_DATA_LINK)
+ if ((mediaLinks[i].flags & MEDIA_LNK_FL_LINK_TYPE) ==
+ MEDIA_LNK_FL_INTERFACE_LINK)
continue;
- /* Store references to source and sink pads in the link. */
+ /* Look up the source and sink objects. */
unsigned int source_id = mediaLinks[i].source_id;
- MediaPad *source = dynamic_cast<MediaPad *>
- (object(source_id));
+ MediaObject *source = object(source_id);
if (!source) {
LOG(MediaDevice, Error)
- << "Failed to find pad with id: "
+ << "Failed to find MediaObject with id "
<< source_id;
return false;
}
unsigned int sink_id = mediaLinks[i].sink_id;
- MediaPad *sink = dynamic_cast<MediaPad *>
- (object(sink_id));
+ MediaObject *sink = object(sink_id);
if (!sink) {
LOG(MediaDevice, Error)
- << "Failed to find pad with id: "
+ << "Failed to find MediaObject with id "
<< sink_id;
return false;
}
- MediaLink *link = new MediaLink(&mediaLinks[i], source, sink);
- if (!addObject(link)) {
- delete link;
- return false;
+ switch (mediaLinks[i].flags & MEDIA_LNK_FL_LINK_TYPE) {
+ case MEDIA_LNK_FL_DATA_LINK: {
+ MediaPad *sourcePad = dynamic_cast<MediaPad *>(source);
+ MediaPad *sinkPad = dynamic_cast<MediaPad *>(sink);
+ if (!source || !sink) {
+ LOG(MediaDevice, Error)
+ << "Source or sink is not a pad";
+ return false;
+ }
+
+ MediaLink *link = new MediaLink(&mediaLinks[i],
+ sourcePad, sinkPad);
+ if (!addObject(link)) {
+ delete link;
+ return false;
+ }
+
+ link->source()->addLink(link);
+ link->sink()->addLink(link);
+
+ break;
}
- source->addLink(link);
- sink->addLink(link);
+ case MEDIA_LNK_FL_ANCILLARY_LINK: {
+ MediaEntity *primary = dynamic_cast<MediaEntity *>(source);
+ MediaEntity *ancillary = dynamic_cast<MediaEntity *>(sink);
+ if (!primary || !ancillary) {
+ LOG(MediaDevice, Error)
+ << "Source or sink is not an entity";
+ return false;
+ }
+
+ primary->addAncillaryEntity(ancillary);
+
+ break;
+ }
+
+ default:
+ LOG(MediaDevice, Warning)
+ << "Unknown media link type";
+
+ break;
+ }
}
return true;
@@ -782,20 +818,12 @@ int MediaDevice::setupLink(const MediaLink *link, unsigned int flags)
if (ret) {
ret = -errno;
LOG(MediaDevice, Error)
- << "Failed to setup link "
- << source->entity()->name() << "["
- << source->index() << "] -> "
- << sink->entity()->name() << "["
- << sink->index() << "]: "
+ << "Failed to setup link " << *link << ": "
<< strerror(-ret);
return ret;
}
- LOG(MediaDevice, Debug)
- << source->entity()->name() << "["
- << source->index() << "] -> "
- << sink->entity()->name() << "["
- << sink->index() << "]: " << flags;
+ LOG(MediaDevice, Debug) << *link << ": " << flags;
return 0;
}
diff --git a/src/libcamera/media_object.cpp b/src/libcamera/media_object.cpp
index f425d044..3e3772a6 100644
--- a/src/libcamera/media_object.cpp
+++ b/src/libcamera/media_object.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * media_object.cpp - Media device objects: entities, pads and links
+ * Media device objects: entities, pads and links
*/
#include "libcamera/internal/media_object.h"
@@ -147,6 +147,31 @@ MediaLink::MediaLink(const struct media_v2_link *link, MediaPad *source,
}
/**
+ * \brief Generate a string representation of the MediaLink
+ * \return A string representing the MediaLink
+ */
+std::string MediaLink::toString() const
+{
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \brief Insert a text representation of a Link into an output stream
+ * \param[in] out The output stream
+ * \param[in] link The MediaLink
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const MediaLink &link)
+{
+ out << *link.source() << " -> " << *link.sink();
+
+ return out;
+}
+
+/**
* \fn MediaLink::source()
* \brief Retrieve the link's source pad
* \return The source pad at the origin of the link
@@ -236,6 +261,31 @@ void MediaPad::addLink(MediaLink *link)
}
/**
+ * \brief Generate a string representation of the MediaPad
+ * \return A string representing the MediaPad
+ */
+std::string MediaPad::toString() const
+{
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \brief Insert a text representation of a MediaPad into an output stream
+ * \param[in] out The output stream
+ * \param[in] pad The MediaPad
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const MediaPad &pad)
+{
+ out << "'" << pad.entity()->name() << "'[" << pad.index() << "]";
+
+ return out;
+}
+
+/**
* \class MediaEntity
* \brief The MediaEntity represents an entity in the media graph
*
@@ -423,4 +473,19 @@ void MediaEntity::addPad(MediaPad *pad)
pads_.push_back(pad);
}
+/**
+ * \brief Add a MediaEntity to the list of ancillary entities
+ * \param[in] ancillaryEntity The instance of MediaEntity to add
+ */
+void MediaEntity::addAncillaryEntity(MediaEntity *ancillaryEntity)
+{
+ ancillaryEntities_.push_back(ancillaryEntity);
+}
+
+/**
+ * \fn MediaEntity::ancillaryEntities()
+ * \brief Retrieve all ancillary entities of the entity
+ * \return The list of the entity's ancillary entities
+ */
+
} /* namespace libcamera */
diff --git a/src/libcamera/meson.build b/src/libcamera/meson.build
index 26912ca1..57fde8a8 100644
--- a/src/libcamera/meson.build
+++ b/src/libcamera/meson.build
@@ -1,26 +1,35 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources = files([
- 'bayer_format.cpp',
- 'byte_stream_buffer.cpp',
+libcamera_public_sources = files([
'camera.cpp',
- 'camera_controls.cpp',
- 'camera_lens.cpp',
'camera_manager.cpp',
- 'camera_sensor.cpp',
- 'camera_sensor_properties.cpp',
'color_space.cpp',
'controls.cpp',
+ 'fence.cpp',
+ 'framebuffer.cpp',
+ 'framebuffer_allocator.cpp',
+ 'geometry.cpp',
+ 'orientation.cpp',
+ 'pixel_format.cpp',
+ 'request.cpp',
+ 'stream.cpp',
+ 'transform.cpp',
+])
+
+libcamera_internal_sources = files([
+ 'bayer_format.cpp',
+ 'byte_stream_buffer.cpp',
+ 'camera_controls.cpp',
+ 'camera_lens.cpp',
'control_serializer.cpp',
'control_validator.cpp',
+ 'converter.cpp',
+ 'debug_controls.cpp',
'delayed_controls.cpp',
'device_enumerator.cpp',
'device_enumerator_sysfs.cpp',
- 'fence.cpp',
+ 'dma_buf_allocator.cpp',
'formats.cpp',
- 'framebuffer.cpp',
- 'framebuffer_allocator.cpp',
- 'geometry.cpp',
'ipa_controls.cpp',
'ipa_data_serializer.cpp',
'ipa_interface.cpp',
@@ -31,83 +40,135 @@ libcamera_sources = files([
'ipc_pipe_unixsocket.cpp',
'ipc_unixsocket.cpp',
'mapped_framebuffer.cpp',
+ 'matrix.cpp',
'media_device.cpp',
'media_object.cpp',
'pipeline_handler.cpp',
- 'pixel_format.cpp',
'process.cpp',
'pub_key.cpp',
- 'request.cpp',
+ 'shared_mem_object.cpp',
'source_paths.cpp',
- 'stream.cpp',
'sysfs.cpp',
- 'transform.cpp',
'v4l2_device.cpp',
'v4l2_pixelformat.cpp',
'v4l2_subdevice.cpp',
'v4l2_videodevice.cpp',
+ 'yaml_parser.cpp',
])
-libcamera_sources += libcamera_public_headers
-libcamera_sources += libcamera_generated_ipa_headers
-libcamera_sources += libcamera_tracepoint_header
-
includes = [
libcamera_includes,
]
+libcamera_deps = []
+
libatomic = cc.find_library('atomic', required : false)
+libthreads = dependency('threads')
subdir('base')
+subdir('converter')
subdir('ipa')
subdir('pipeline')
subdir('proxy')
+subdir('sensor')
+subdir('software_isp')
-libdl = cc.find_library('dl')
-libgnutls = cc.find_library('gnutls', required : true)
-libudev = dependency('libudev', required : false)
+null_dep = dependency('', required : false)
-if libgnutls.found()
+# TODO: Use dependency('dl') when updating to meson 0.62.0 or newer.
+libdl = null_dep
+if not cc.has_function('dlopen')
+ libdl = cc.find_library('dl')
+endif
+libudev = dependency('libudev', required : get_option('udev'))
+libyaml = dependency('yaml-0.1', required : false)
+
+# Use one of gnutls or libcrypto (provided by OpenSSL), trying gnutls first.
+libcrypto = dependency('gnutls', required : false)
+if libcrypto.found()
config_h.set('HAVE_GNUTLS', 1)
+else
+ libcrypto = dependency('libcrypto', required : false)
+ if libcrypto.found()
+ config_h.set('HAVE_CRYPTO', 1)
+ endif
+endif
+
+if not libcrypto.found()
+ warning('Neither gnutls nor libcrypto found, all IPA modules will be isolated')
+ summary({'IPA modules signed with': 'None (modules will run isolated)'},
+ section : 'Configuration')
+else
+ summary({'IPA modules signed with' : libcrypto.name()}, section : 'Configuration')
endif
if liblttng.found()
tracing_enabled = true
config_h.set('HAVE_TRACING', 1)
- libcamera_sources += files(['tracepoints.cpp'])
+ libcamera_internal_sources += files(['tracepoints.cpp'])
else
tracing_enabled = false
endif
if libudev.found()
config_h.set('HAVE_LIBUDEV', 1)
- libcamera_sources += files([
+ libcamera_internal_sources += files([
'device_enumerator_udev.cpp',
])
endif
+# Fallback to a subproject if libyaml isn't found, as it's not packaged in AOSP.
+if not libyaml.found()
+ cmake = import('cmake')
+
+ libyaml_vars = cmake.subproject_options()
+ libyaml_vars.add_cmake_defines({'CMAKE_POSITION_INDEPENDENT_CODE': 'ON'})
+ libyaml_vars.append_compile_args('c', '-Wno-unused-value')
+ libyaml_wrap = cmake.subproject('libyaml', options : libyaml_vars)
+ libyaml = libyaml_wrap.dependency('yaml')
+endif
+
control_sources = []
-foreach source : control_source_files
- input_files = files(source +'.yaml', source + '.cpp.in')
- control_sources += custom_target(source + '_cpp',
+controls_mode_files = {
+ 'controls': [
+ controls_files,
+ 'control_ids.cpp',
+ ],
+ 'properties': [
+ properties_files,
+ 'property_ids.cpp',
+ ],
+}
+
+foreach mode, inout_files : controls_mode_files
+ input_files = inout_files[0]
+ output_file = inout_files[1]
+
+ template_file = files('control_ids.cpp.in')
+ ranges_file = files('control_ranges.yaml')
+
+ control_sources += custom_target(mode + '_ids_cpp',
input : input_files,
- output : source + '.cpp',
- command : [gen_controls, '-o', '@OUTPUT@', '@INPUT@'])
+ output : output_file,
+ command : [gen_controls, '-o', '@OUTPUT@',
+ '--mode', mode, '-t', template_file,
+ '-r', ranges_file, '@INPUT@'],
+ env : py_build_env)
endforeach
-libcamera_sources += control_sources
+libcamera_public_sources += control_sources
-gen_version = meson.source_root() / 'utils' / 'gen-version.sh'
+gen_version = meson.project_source_root() / 'utils' / 'gen-version.sh'
# Use vcs_tag() and not configure_file() or run_command(), to ensure that the
# version gets updated with every ninja build and not just at meson setup time.
-version_cpp = vcs_tag(command : [gen_version, meson.build_root(), meson.source_root()],
+version_cpp = vcs_tag(command : [gen_version, meson.project_build_root(), meson.project_source_root()],
input : 'version.cpp.in',
output : 'version.cpp',
fallback : meson.project_version())
-libcamera_sources += version_cpp
+libcamera_public_sources += version_cpp
if ipa_sign_module
ipa_pub_key_cpp = custom_target('ipa_pub_key_cpp',
@@ -115,17 +176,18 @@ if ipa_sign_module
output : 'ipa_pub_key.cpp',
command : [gen_ipa_pub_key, '@INPUT@', '@OUTPUT@'])
- libcamera_sources += ipa_pub_key_cpp
+ libcamera_internal_sources += ipa_pub_key_cpp
endif
-libcamera_deps = [
+libcamera_deps += [
libatomic,
libcamera_base,
libcamera_base_private,
+ libcrypto,
libdl,
- libgnutls,
liblttng,
libudev,
+ libyaml,
]
# We add '/' to the build_rpath as a 'safe' path to act as a boolean flag.
@@ -134,8 +196,15 @@ libcamera_deps = [
# for the presence or abscence of the dynamic tag.
libcamera = shared_library('libcamera',
- libcamera_sources,
+ [
+ libcamera_public_headers,
+ libcamera_public_sources,
+ libcamera_ipa_headers,
+ libcamera_internal_headers,
+ libcamera_internal_sources,
+ ],
version : libcamera_version,
+ soversion : libcamera_soversion,
name_prefix : '',
install : true,
include_directories : includes,
@@ -143,7 +212,6 @@ libcamera = shared_library('libcamera',
dependencies : libcamera_deps)
libcamera_public = declare_dependency(sources : [
- libcamera_ipa_headers,
libcamera_public_headers,
],
include_directories : libcamera_includes,
@@ -152,7 +220,7 @@ libcamera_public = declare_dependency(sources : [
# Internal dependency for components and plugins which can use private APIs
libcamera_private = declare_dependency(sources : [
- libcamera_generated_ipa_headers,
+ libcamera_ipa_headers,
],
dependencies : [
libcamera_public,
@@ -165,4 +233,6 @@ pkg_mod.generate(libcamera,
description : 'Complex Camera Support Library',
subdirs : 'libcamera')
+meson.override_dependency('libcamera', libcamera_public)
+
subdir('proxy/worker')
diff --git a/src/libcamera/orientation.cpp b/src/libcamera/orientation.cpp
new file mode 100644
index 00000000..7d7d21ae
--- /dev/null
+++ b/src/libcamera/orientation.cpp
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Ideas On Board Oy
+ *
+ * Image orientation
+ */
+
+#include <libcamera/orientation.h>
+
+#include <array>
+
+/**
+ * \file orientation.h
+ * \brief Image orientation definition
+ */
+
+namespace libcamera {
+
+/**
+ * \enum Orientation
+ * \brief The image orientation in a memory buffer
+ *
+ * The Orientation enumeration describes the orientation of the images
+ * produced by the camera pipeline as they get received by the application
+ * inside memory buffers.
+ *
+ * The image orientation expressed using the Orientation enumeration can be then
+ * inferred by applying to a naturally oriented image a multiple of a 90 degrees
+ * rotation in the clockwise direction from the origin and then by applying an
+ * optional horizontal mirroring.
+ *
+ * The enumeration numerical values follow the ones defined by the EXIF
+ * Specification version 2.32, Tag 274 "Orientation", while the names of the
+ * enumerated values report the rotation and mirroring operations performed.
+ *
+ * For example, Orientation::Rotate90Mirror describes the orientation obtained
+ * by rotating the image 90 degrees clockwise first and then applying a
+ * horizontal mirroring.
+ *
+ * \var CameraConfiguration::Rotate0
+ * \image html rotation/rotate0.svg
+ * \var CameraConfiguration::Rotate0Mirror
+ * \image html rotation/rotate0Mirror.svg
+ * \var CameraConfiguration::Rotate180
+ * \image html rotation/rotate180.svg
+ * \var CameraConfiguration::Rotate180Mirror
+ * \image html rotation/rotate180Mirror.svg
+ * \var CameraConfiguration::Rotate90Mirror
+ * \image html rotation/rotate90Mirror.svg
+ * \var CameraConfiguration::Rotate270
+ * \image html rotation/rotate270.svg
+ * \var CameraConfiguration::Rotate270Mirror
+ * \image html rotation/rotate270Mirror.svg
+ * \var CameraConfiguration::Rotate90
+ * \image html rotation/rotate90.svg
+ */
+
+/**
+ * \brief Return the orientation representing a rotation of the given angle
+ * clockwise
+ * \param[in] angle The angle of rotation in a clockwise sense. Negative values
+ * can be used to represent anticlockwise rotations
+ * \param[out] success Set to `true` if the angle is a multiple of 90 degrees,
+ * otherwise `false`
+ * \return The orientation corresponding to the rotation if \a success was set
+ * to `true`, otherwise the `Rotate0` orientation
+ */
+Orientation orientationFromRotation(int angle, bool *success)
+{
+ angle = angle % 360;
+ if (angle < 0)
+ angle += 360;
+
+ if (success != nullptr)
+ *success = true;
+
+ switch (angle) {
+ case 0:
+ return Orientation::Rotate0;
+ case 90:
+ return Orientation::Rotate90;
+ case 180:
+ return Orientation::Rotate180;
+ case 270:
+ return Orientation::Rotate270;
+ }
+
+ if (success != nullptr)
+ *success = false;
+
+ return Orientation::Rotate0;
+}
+
+/**
+ * \brief Prints human-friendly names for Orientation items
+ * \param[in] out The output stream
+ * \param[in] orientation The Orientation item
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const Orientation &orientation)
+{
+ constexpr std::array<const char *, 9> orientationNames = {
+ "", /* Orientation starts counting from 1. */
+ "Rotate0",
+ "Rotate0Mirror",
+ "Rotate180",
+ "Rotate180Mirror",
+ "Rotate90Mirror",
+ "Rotate270",
+ "Rotate270Mirror",
+ "Rotate90",
+ };
+
+ out << orientationNames[static_cast<unsigned int>(orientation)];
+ return out;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp
new file mode 100644
index 00000000..4e66b336
--- /dev/null
+++ b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp
@@ -0,0 +1,1116 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022 - Jacopo Mondi <jacopo@jmondi.org>
+ *
+ * Pipeline handler for ISI interface found on NXP i.MX8 SoC
+ */
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera_manager.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+#include "linux/media-bus-format.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(ISI)
+
+class PipelineHandlerISI;
+
+class ISICameraData : public Camera::Private
+{
+public:
+ ISICameraData(PipelineHandler *ph)
+ : Camera::Private(ph)
+ {
+ /*
+ * \todo Assume 2 channels only for now, as that's the number of
+ * available channels on i.MX8MP.
+ */
+ streams_.resize(2);
+ }
+
+ PipelineHandlerISI *pipe();
+
+ int init();
+
+ unsigned int pipeIndex(const Stream *stream)
+ {
+ return stream - &*streams_.begin();
+ }
+
+ unsigned int getRawMediaBusFormat(PixelFormat *pixelFormat) const;
+ unsigned int getYuvMediaBusFormat(const PixelFormat &pixelFormat) const;
+ unsigned int getMediaBusFormat(PixelFormat *pixelFormat) const;
+
+ std::unique_ptr<CameraSensor> sensor_;
+ std::unique_ptr<V4L2Subdevice> csis_;
+
+ std::vector<Stream> streams_;
+
+ std::vector<Stream *> enabledStreams_;
+
+ unsigned int xbarSink_;
+};
+
+class ISICameraConfiguration : public CameraConfiguration
+{
+public:
+ ISICameraConfiguration(ISICameraData *data)
+ : data_(data)
+ {
+ }
+
+ Status validate() override;
+
+ static const std::map<PixelFormat, unsigned int> formatsMap_;
+
+ V4L2SubdeviceFormat sensorFormat_;
+
+private:
+ CameraConfiguration::Status
+ validateRaw(std::set<Stream *> &availableStreams, const Size &maxResolution);
+ CameraConfiguration::Status
+ validateYuv(std::set<Stream *> &availableStreams, const Size &maxResolution);
+
+ const ISICameraData *data_;
+};
+
+class PipelineHandlerISI : public PipelineHandler
+{
+public:
+ PipelineHandlerISI(CameraManager *manager);
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+
+protected:
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+private:
+ static constexpr Size kPreviewSize = { 1920, 1080 };
+ static constexpr Size kMinISISize = { 1, 1 };
+
+ struct Pipe {
+ std::unique_ptr<V4L2Subdevice> isi;
+ std::unique_ptr<V4L2VideoDevice> capture;
+ };
+
+ ISICameraData *cameraData(Camera *camera)
+ {
+ return static_cast<ISICameraData *>(camera->_d());
+ }
+
+ Pipe *pipeFromStream(Camera *camera, const Stream *stream);
+
+ StreamConfiguration generateYUVConfiguration(Camera *camera,
+ const Size &size);
+ StreamConfiguration generateRawConfiguration(Camera *camera);
+
+ void bufferReady(FrameBuffer *buffer);
+
+ MediaDevice *isiDev_;
+
+ std::unique_ptr<V4L2Subdevice> crossbar_;
+ std::vector<Pipe> pipes_;
+};
+
+/* -----------------------------------------------------------------------------
+ * Camera Data
+ */
+
+PipelineHandlerISI *ISICameraData::pipe()
+{
+ return static_cast<PipelineHandlerISI *>(Camera::Private::pipe());
+}
+
+/* Open and initialize pipe components. */
+int ISICameraData::init()
+{
+ if (!sensor_)
+ return -ENODEV;
+
+ int ret = csis_->open();
+ if (ret)
+ return ret;
+
+ properties_ = sensor_->properties();
+
+ return 0;
+}
+
+/*
+ * Get a RAW Bayer media bus format compatible with the requested pixelFormat.
+ *
+ * If the requested pixelFormat cannot be produced by the sensor adjust it to
+ * the one corresponding to the media bus format with the largest bit-depth.
+ */
+unsigned int ISICameraData::getRawMediaBusFormat(PixelFormat *pixelFormat) const
+{
+ std::vector<unsigned int> mbusCodes = sensor_->mbusCodes();
+
+ static const std::map<PixelFormat, unsigned int> rawFormats = {
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+ { formats::SBGGR14, MEDIA_BUS_FMT_SBGGR14_1X14 },
+ { formats::SGBRG14, MEDIA_BUS_FMT_SGBRG14_1X14 },
+ { formats::SGRBG14, MEDIA_BUS_FMT_SGRBG14_1X14 },
+ { formats::SRGGB14, MEDIA_BUS_FMT_SRGGB14_1X14 },
+ };
+
+ /*
+ * Make sure the requested PixelFormat is supported in the above
+ * map and the sensor can produce the compatible mbus code.
+ */
+ auto it = rawFormats.find(*pixelFormat);
+ if (it != rawFormats.end() &&
+ std::count(mbusCodes.begin(), mbusCodes.end(), it->second))
+ return it->second;
+
+ if (it == rawFormats.end())
+ LOG(ISI, Warning) << pixelFormat
+ << " not supported in ISI formats map.";
+
+ /*
+ * The desired pixel format cannot be produced. Adjust it to the one
+ * corresponding to the raw media bus format with the largest bit-depth
+ * the sensor provides.
+ */
+ unsigned int sensorCode = 0;
+ unsigned int maxDepth = 0;
+ *pixelFormat = {};
+
+ for (unsigned int code : mbusCodes) {
+ /* Make sure the media bus format is RAW Bayer. */
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(code);
+ if (!bayerFormat.isValid())
+ continue;
+
+ /* Make sure the media format is supported. */
+ it = std::find_if(rawFormats.begin(), rawFormats.end(),
+ [code](auto &rawFormat) {
+ return rawFormat.second == code;
+ });
+
+ if (it == rawFormats.end()) {
+ LOG(ISI, Warning) << bayerFormat
+ << " not supported in ISI formats map.";
+ continue;
+ }
+
+ /* Pick the one with the largest bit depth. */
+ if (bayerFormat.bitDepth > maxDepth) {
+ maxDepth = bayerFormat.bitDepth;
+ *pixelFormat = it->first;
+ sensorCode = code;
+ }
+ }
+
+ if (!pixelFormat->isValid())
+ LOG(ISI, Error) << "Cannot find a supported RAW format";
+
+ return sensorCode;
+}
+
+/*
+ * Get a YUV/RGB media bus format from which the ISI can produce a processed
+ * stream, preferring codes with the same colour encoding as the requested
+ * pixelformat.
+ *
+ * If the sensor does not provide any YUV/RGB media bus format the ISI cannot
+ * generate any processed pixel format as it cannot debayer.
+ */
+unsigned int ISICameraData::getYuvMediaBusFormat(const PixelFormat &pixelFormat) const
+{
+ std::vector<unsigned int> mbusCodes = sensor_->mbusCodes();
+
+ /*
+ * The ISI can produce YUV/RGB pixel formats from any non-RAW Bayer
+ * media bus formats.
+ *
+ * Keep the list in sync with the mxc_isi_bus_formats[] array in
+ * the ISI driver.
+ */
+ std::vector<unsigned int> yuvCodes = {
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_RGB565_1X16,
+ MEDIA_BUS_FMT_RGB888_1X24,
+ };
+
+ std::sort(mbusCodes.begin(), mbusCodes.end());
+ std::sort(yuvCodes.begin(), yuvCodes.end());
+
+ std::vector<unsigned int> supportedCodes;
+ std::set_intersection(mbusCodes.begin(), mbusCodes.end(),
+ yuvCodes.begin(), yuvCodes.end(),
+ std::back_inserter(supportedCodes));
+
+ if (supportedCodes.empty()) {
+ LOG(ISI, Warning) << "Cannot find a supported YUV/RGB format";
+
+ return 0;
+ }
+
+ /* Prefer codes with the same encoding as the requested pixel format. */
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixelFormat);
+ for (unsigned int code : supportedCodes) {
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingYUV &&
+ (code == MEDIA_BUS_FMT_UYVY8_1X16 ||
+ code == MEDIA_BUS_FMT_YUV8_1X24))
+ return code;
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRGB &&
+ (code == MEDIA_BUS_FMT_RGB565_1X16 ||
+ code == MEDIA_BUS_FMT_RGB888_1X24))
+ return code;
+ }
+
+ /* Otherwise return the first found code. */
+ return supportedCodes[0];
+}
+
+unsigned int ISICameraData::getMediaBusFormat(PixelFormat *pixelFormat) const
+{
+ if (PixelFormatInfo::info(*pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW)
+ return getRawMediaBusFormat(pixelFormat);
+
+ return getYuvMediaBusFormat(*pixelFormat);
+}
+
+/* -----------------------------------------------------------------------------
+ * Camera Configuration
+ */
+
+/*
+ * ISICameraConfiguration::formatsMap_ records the association between an output
+ * pixel format and the ISI source pixel format to be applied to the pipeline.
+ */
+const std::map<PixelFormat, unsigned int> ISICameraConfiguration::formatsMap_ = {
+ { formats::YUYV, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::AVUY8888, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::NV12, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::NV16, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::YUV444, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::RGB565, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::BGR888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::RGB888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::XRGB8888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::ABGR8888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+/*
+ * Adjust stream configuration when the first requested stream is RAW: all the
+ * streams will have the same RAW pixelformat and size.
+ */
+CameraConfiguration::Status
+ISICameraConfiguration::validateRaw(std::set<Stream *> &availableStreams,
+ const Size &maxResolution)
+{
+ CameraConfiguration::Status status = Valid;
+
+ /*
+ * Make sure the requested RAW format is supported by the
+ * pipeline, otherwise adjust it.
+ */
+ std::vector<unsigned int> mbusCodes = data_->sensor_->mbusCodes();
+ StreamConfiguration &rawConfig = config_[0];
+ PixelFormat rawFormat = rawConfig.pixelFormat;
+
+ unsigned int sensorCode = data_->getRawMediaBusFormat(&rawFormat);
+ if (!sensorCode) {
+ LOG(ISI, Error) << "Cannot adjust RAW pixelformat "
+ << rawConfig.pixelFormat;
+ return Invalid;
+ }
+
+ if (rawFormat != rawConfig.pixelFormat) {
+ LOG(ISI, Debug) << "RAW pixelformat adjusted to "
+ << rawFormat;
+ rawConfig.pixelFormat = rawFormat;
+ status = Adjusted;
+ }
+
+ /* Cap the RAW stream size to the maximum resolution. */
+ const Size configSize = rawConfig.size;
+ rawConfig.size.boundTo(maxResolution);
+ if (rawConfig.size != configSize) {
+ LOG(ISI, Debug) << "RAW size adjusted to "
+ << rawConfig.size;
+ status = Adjusted;
+ }
+
+ /* Adjust all other streams to RAW. */
+ for (const auto &[i, cfg] : utils::enumerate(config_)) {
+
+ LOG(ISI, Debug) << "Stream " << i << ": " << cfg.toString();
+ const PixelFormat pixFmt = cfg.pixelFormat;
+ const Size size = cfg.size;
+
+ cfg.pixelFormat = rawConfig.pixelFormat;
+ cfg.size = rawConfig.size;
+
+ if (cfg.pixelFormat != pixFmt || cfg.size != size) {
+ LOG(ISI, Debug) << "Stream " << i << " adjusted to "
+ << cfg.toString();
+ status = Adjusted;
+ }
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+ cfg.stride = info.stride(cfg.size.width, 0);
+ cfg.frameSize = info.frameSize(cfg.size, info.bitsPerPixel);
+
+ /* Assign streams in the order they are presented. */
+ auto stream = availableStreams.extract(availableStreams.begin());
+ cfg.setStream(stream.value());
+ }
+
+ return status;
+}
+
+/*
+ * Adjust stream configuration when the first requested stream is not RAW: all
+ * the streams will be either YUV or RGB processed formats.
+ */
+CameraConfiguration::Status
+ISICameraConfiguration::validateYuv(std::set<Stream *> &availableStreams,
+ const Size &maxResolution)
+{
+ CameraConfiguration::Status status = Valid;
+
+ StreamConfiguration &yuvConfig = config_[0];
+ PixelFormat yuvPixelFormat = yuvConfig.pixelFormat;
+
+ /*
+ * Make sure the sensor can produce a compatible YUV/RGB media bus
+ * format. If the sensor can only produce RAW Bayer we can only fail
+ * here as we can't adjust to anything but RAW.
+ */
+ unsigned int yuvMediaBusCode = data_->getYuvMediaBusFormat(yuvPixelFormat);
+ if (!yuvMediaBusCode) {
+ LOG(ISI, Error) << "Cannot adjust pixelformat "
+ << yuvConfig.pixelFormat;
+ return Invalid;
+ }
+
+ /* Adjust all the other streams. */
+ for (const auto &[i, cfg] : utils::enumerate(config_)) {
+
+ LOG(ISI, Debug) << "Stream " << i << ": " << cfg.toString();
+
+ /* If the stream is RAW or not supported default it to YUYV. */
+ const PixelFormatInfo &cfgInfo = PixelFormatInfo::info(cfg.pixelFormat);
+ if (cfgInfo.colourEncoding == PixelFormatInfo::ColourEncodingRAW ||
+ !formatsMap_.count(cfg.pixelFormat)) {
+
+ LOG(ISI, Debug) << "Stream " << i << " format: "
+ << cfg.pixelFormat << " adjusted to YUYV";
+
+ cfg.pixelFormat = formats::YUYV;
+ status = Adjusted;
+ }
+
+ /* Cap the streams size to the maximum accepted resolution. */
+ Size configSize = cfg.size;
+ cfg.size.boundTo(maxResolution);
+ if (cfg.size != configSize) {
+ LOG(ISI, Debug)
+ << "Stream " << i << " adjusted to " << cfg.size;
+ status = Adjusted;
+ }
+
+ /* Re-fetch the pixel format info in case it has been adjusted. */
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+
+ /* \todo Multiplane ? */
+ cfg.stride = info.stride(cfg.size.width, 0);
+ cfg.frameSize = info.frameSize(cfg.size, info.bitsPerPixel);
+
+ /* Assign streams in the order they are presented. */
+ auto stream = availableStreams.extract(availableStreams.begin());
+ cfg.setStream(stream.value());
+ }
+
+ return status;
+}
+
+CameraConfiguration::Status ISICameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ std::set<Stream *> availableStreams;
+ std::transform(data_->streams_.begin(), data_->streams_.end(),
+ std::inserter(availableStreams, availableStreams.end()),
+ [](const Stream &s) { return const_cast<Stream *>(&s); });
+
+ if (config_.empty())
+ return Invalid;
+
+ /* Cap the number of streams to the number of available ISI pipes. */
+ if (config_.size() > availableStreams.size()) {
+ config_.resize(availableStreams.size());
+ status = Adjusted;
+ }
+
+ /*
+ * If more than a single stream is requested, the maximum allowed input
+ * image width is 2048. Cap the maximum image size accordingly.
+ *
+ * \todo The (size > 1) check only applies to i.MX8MP which has 2 ISI
+ * channels. SoCs with more channels than the i.MX8MP are capable of
+ * supporting more streams with input width > 2048 by chaining
+ * successive channels together. Define a policy for channels allocation
+ * to fully support other SoCs.
+ */
+ CameraSensor *sensor = data_->sensor_.get();
+ Size maxResolution = sensor->resolution();
+ if (config_.size() > 1)
+ maxResolution.width = std::min(2048U, maxResolution.width);
+
+ /* Validate streams according to the format of the first one. */
+ const PixelFormatInfo info = PixelFormatInfo::info(config_[0].pixelFormat);
+
+ Status validationStatus;
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ validationStatus = validateRaw(availableStreams, maxResolution);
+ else
+ validationStatus = validateYuv(availableStreams, maxResolution);
+
+ if (validationStatus == Invalid)
+ return Invalid;
+
+ if (validationStatus == Adjusted)
+ status = Adjusted;
+
+ /*
+ * Sensor format selection policy: the first stream selects the media
+ * bus code to use, the largest stream selects the size.
+ *
+ * \todo The sensor format selection policy could be changed to
+ * prefer operating the sensor at full resolution to prioritize
+ * image quality in exchange of a usually slower frame rate.
+ * Usage of the STILL_CAPTURE role could be consider for this.
+ */
+ Size maxSize;
+ for (const auto &cfg : config_) {
+ if (cfg.size > maxSize)
+ maxSize = cfg.size;
+ }
+
+ PixelFormat pixelFormat = config_[0].pixelFormat;
+
+ V4L2SubdeviceFormat sensorFormat{};
+ sensorFormat.code = data_->getMediaBusFormat(&pixelFormat);
+ sensorFormat.size = maxSize;
+
+ LOG(ISI, Debug) << "Computed sensor configuration: " << sensorFormat;
+
+ /*
+ * We can't use CameraSensor::getFormat() as it might return a
+ * format larger than our strict width limit, as that function
+ * prioritizes formats with the same aspect ratio over formats with less
+ * difference in size.
+ *
+ * Manually walk all the sensor supported sizes searching for
+ * the smallest larger format without considering the aspect ratio
+ * as the ISI can freely scale.
+ */
+ auto sizes = sensor->sizes(sensorFormat.code);
+ Size bestSize;
+
+ for (const Size &s : sizes) {
+ /* Ignore smaller sizes. */
+ if (s.width < sensorFormat.size.width ||
+ s.height < sensorFormat.size.height)
+ continue;
+
+ /* Make sure the width stays in the limits. */
+ if (s.width > maxResolution.width)
+ continue;
+
+ bestSize = s;
+ break;
+ }
+
+ /*
+ * This should happen only if the sensor can only produce formats that
+ * exceed the maximum allowed input width.
+ */
+ if (bestSize.isNull()) {
+ LOG(ISI, Error) << "Unable to find a suitable sensor format";
+ return Invalid;
+ }
+
+ sensorFormat_.code = sensorFormat.code;
+ sensorFormat_.size = bestSize;
+
+ LOG(ISI, Debug) << "Selected sensor format: " << sensorFormat_;
+
+ return status;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Handler
+ */
+
+PipelineHandlerISI::PipelineHandlerISI(CameraManager *manager)
+ : PipelineHandler(manager)
+{
+}
+
+/*
+ * Generate a StreamConfiguration for YUV/RGB use case.
+ *
+ * Verify it the sensor can produce a YUV/RGB media bus format and collect
+ * all the processed pixel formats the ISI can generate as supported stream
+ * configurations.
+ */
+StreamConfiguration PipelineHandlerISI::generateYUVConfiguration(Camera *camera,
+ const Size &size)
+{
+ ISICameraData *data = cameraData(camera);
+ PixelFormat pixelFormat = formats::YUYV;
+ unsigned int mbusCode;
+
+ mbusCode = data->getYuvMediaBusFormat(pixelFormat);
+ if (!mbusCode)
+ return {};
+
+ /* Adjust the requested size to the sensor's capabilities. */
+ V4L2SubdeviceFormat sensorFmt;
+ sensorFmt.code = mbusCode;
+ sensorFmt.size = size;
+
+ int ret = data->sensor_->tryFormat(&sensorFmt);
+ if (ret) {
+ LOG(ISI, Error) << "Failed to try sensor format.";
+ return {};
+ }
+
+ Size sensorSize = sensorFmt.size;
+
+ /*
+ * Populate the StreamConfiguration.
+ *
+ * As the sensor supports at least one YUV/RGB media bus format all the
+ * processed ones in formatsMap_ can be generated from it.
+ */
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+
+ for (const auto &[pixFmt, pipeFmt] : ISICameraConfiguration::formatsMap_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ continue;
+
+ streamFormats[pixFmt] = { { kMinISISize, sensorSize } };
+ }
+
+ StreamFormats formats(streamFormats);
+
+ StreamConfiguration cfg(formats);
+ cfg.pixelFormat = pixelFormat;
+ cfg.size = sensorSize;
+ cfg.bufferCount = 4;
+
+ return cfg;
+}
+
+/*
+ * Generate a StreamConfiguration for Raw Bayer use case. Verify if the sensor
+ * can produce the requested RAW bayer format and eventually adjust it to
+ * the one with the largest bit-depth the sensor can produce.
+ */
+StreamConfiguration PipelineHandlerISI::generateRawConfiguration(Camera *camera)
+{
+ static const std::map<unsigned int, PixelFormat> rawFormats = {
+ { MEDIA_BUS_FMT_SBGGR8_1X8, formats::SBGGR8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, formats::SGBRG8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, formats::SGRBG8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, formats::SRGGB8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, formats::SBGGR10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, formats::SGBRG10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, formats::SGRBG10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, formats::SRGGB10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, formats::SBGGR12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, formats::SGBRG12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, formats::SGRBG12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, formats::SRGGB12 },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, formats::SBGGR14 },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, formats::SGBRG14 },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, formats::SGRBG14 },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, formats::SRGGB14 },
+ };
+
+ ISICameraData *data = cameraData(camera);
+ PixelFormat pixelFormat = formats::SBGGR10;
+ unsigned int mbusCode;
+
+ /* pixelFormat will be adjusted, if the sensor can produce RAW. */
+ mbusCode = data->getRawMediaBusFormat(&pixelFormat);
+ if (!mbusCode)
+ return {};
+
+ /*
+ * Populate the StreamConfiguration with all the supported Bayer
+ * formats the sensor can produce.
+ */
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+ const CameraSensor *sensor = data->sensor_.get();
+
+ for (unsigned int code : sensor->mbusCodes()) {
+ /* Find a Bayer media bus code from the sensor. */
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(code);
+ if (!bayerFormat.isValid())
+ continue;
+
+ auto it = rawFormats.find(code);
+ if (it == rawFormats.end()) {
+ LOG(ISI, Warning) << bayerFormat
+ << " not supported in ISI formats map.";
+ continue;
+ }
+
+ streamFormats[it->second] = { { sensor->resolution(), sensor->resolution() } };
+ }
+
+ StreamFormats formats(streamFormats);
+
+ StreamConfiguration cfg(formats);
+ cfg.size = sensor->resolution();
+ cfg.pixelFormat = pixelFormat;
+ cfg.bufferCount = 4;
+
+ return cfg;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerISI::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
+{
+ ISICameraData *data = cameraData(camera);
+ std::unique_ptr<ISICameraConfiguration> config =
+ std::make_unique<ISICameraConfiguration>(data);
+
+ if (roles.empty())
+ return config;
+
+ if (roles.size() > data->streams_.size()) {
+ LOG(ISI, Error) << "Only up to " << data->streams_.size()
+ << " streams are supported";
+ return nullptr;
+ }
+
+ for (const auto &role : roles) {
+ /*
+ * Prefer the following formats:
+ * - Still Capture: Full resolution YUYV
+ * - ViewFinder/VideoRecording: 1080p YUYV
+ * - RAW: Full resolution Bayer
+ */
+ StreamConfiguration cfg;
+
+ switch (role) {
+ case StreamRole::StillCapture:
+ case StreamRole::Viewfinder:
+ case StreamRole::VideoRecording: {
+ Size size = role == StreamRole::StillCapture
+ ? data->sensor_->resolution()
+ : PipelineHandlerISI::kPreviewSize;
+ cfg = generateYUVConfiguration(camera, size);
+ if (cfg.pixelFormat.isValid())
+ break;
+
+
+ /*
+ * Fallback to use a Bayer format if that's what the
+ * sensor supports.
+ */
+ [[fallthrough]];
+
+ }
+
+ case StreamRole::Raw: {
+ cfg = generateRawConfiguration(camera);
+ break;
+ }
+
+ default:
+ LOG(ISI, Error) << "Requested stream role not supported: " << role;
+ return nullptr;
+ }
+
+ if (!cfg.pixelFormat.isValid()) {
+ LOG(ISI, Error)
+ << "Cannot generate configuration for role: " << role;
+ return nullptr;
+ }
+
+ config->addConfiguration(cfg);
+ }
+
+ config->validate();
+
+ return config;
+}
+
+int PipelineHandlerISI::configure(Camera *camera, CameraConfiguration *c)
+{
+ ISICameraConfiguration *camConfig = static_cast<ISICameraConfiguration *>(c);
+ ISICameraData *data = cameraData(camera);
+
+ /* All links are immutable except the sensor -> csis link. */
+ const MediaPad *sensorSrc = data->sensor_->entity()->getPadByIndex(0);
+ sensorSrc->links()[0]->setEnabled(true);
+
+ /*
+ * Reset the crossbar switch routing and enable one route for each
+ * requested stream configuration.
+ *
+ * \todo Handle concurrent usage of multiple cameras by adjusting the
+ * routing table instead of resetting it.
+ */
+ V4L2Subdevice::Routing routing = {};
+ unsigned int xbarFirstSource = crossbar_->entity()->pads().size() / 2 + 1;
+
+ for (const auto &[idx, config] : utils::enumerate(*c)) {
+ uint32_t sourcePad = xbarFirstSource + idx;
+ routing.emplace_back(V4L2Subdevice::Stream{ data->xbarSink_, 0 },
+ V4L2Subdevice::Stream{ sourcePad, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+ }
+
+ int ret = crossbar_->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ /* Apply format to the sensor and CSIS receiver. */
+ V4L2SubdeviceFormat format = camConfig->sensorFormat_;
+ ret = data->sensor_->setFormat(&format);
+ if (ret)
+ return ret;
+
+ ret = data->csis_->setFormat(0, &format);
+ if (ret)
+ return ret;
+
+ ret = crossbar_->setFormat(data->xbarSink_, &format);
+ if (ret)
+ return ret;
+
+ /* Now configure the ISI and video node instances, one per stream. */
+ data->enabledStreams_.clear();
+ for (const auto &config : *c) {
+ Pipe *pipe = pipeFromStream(camera, config.stream());
+
+ /*
+ * Set the format on the ISI sink pad: it must match what is
+ * received by the CSIS.
+ */
+ ret = pipe->isi->setFormat(0, &format);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the ISI sink compose rectangle to downscale the
+ * image.
+ *
+ * \todo Additional cropping could be applied on the ISI source
+ * pad to further reduce the output image size.
+ */
+ Rectangle isiScale(config.size);
+ ret = pipe->isi->setSelection(0, V4L2_SEL_TGT_COMPOSE, &isiScale);
+ if (ret)
+ return ret;
+
+ /*
+ * Set the format on ISI source pad: only the media bus code
+ * is relevant as it configures format conversion, while the
+ * size is taken from the sink's COMPOSE (or source's CROP,
+ * if any) rectangles.
+ */
+ unsigned int isiCode = ISICameraConfiguration::formatsMap_.at(config.pixelFormat);
+
+ V4L2SubdeviceFormat isiFormat{};
+ isiFormat.code = isiCode;
+ isiFormat.size = config.size;
+
+ ret = pipe->isi->setFormat(1, &isiFormat);
+ if (ret)
+ return ret;
+
+ V4L2DeviceFormat captureFmt{};
+ captureFmt.fourcc = pipe->capture->toV4L2PixelFormat(config.pixelFormat);
+ captureFmt.size = config.size;
+
+ /* \todo Set stride and format. */
+ ret = pipe->capture->setFormat(&captureFmt);
+ if (ret)
+ return ret;
+
+ /* Store the list of enabled streams for later use. */
+ data->enabledStreams_.push_back(config.stream());
+ }
+
+ return 0;
+}
+
+int PipelineHandlerISI::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ unsigned int count = stream->configuration().bufferCount;
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ return pipe->capture->exportBuffers(count, buffers);
+}
+
+int PipelineHandlerISI::start(Camera *camera,
+ [[maybe_unused]] const ControlList *controls)
+{
+ ISICameraData *data = cameraData(camera);
+
+ for (const auto &stream : data->enabledStreams_) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+ const StreamConfiguration &config = stream->configuration();
+
+ int ret = pipe->capture->importBuffers(config.bufferCount);
+ if (ret)
+ return ret;
+
+ ret = pipe->capture->streamOn();
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void PipelineHandlerISI::stopDevice(Camera *camera)
+{
+ ISICameraData *data = cameraData(camera);
+
+ for (const auto &stream : data->enabledStreams_) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ pipe->capture->streamOff();
+ pipe->capture->releaseBuffers();
+ }
+}
+
+int PipelineHandlerISI::queueRequestDevice(Camera *camera, Request *request)
+{
+ for (auto &[stream, buffer] : request->buffers()) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ int ret = pipe->capture->queueBuffer(buffer);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+bool PipelineHandlerISI::match(DeviceEnumerator *enumerator)
+{
+ DeviceMatch dm("mxc-isi");
+ dm.add("crossbar");
+ dm.add("mxc_isi.0");
+ dm.add("mxc_isi.0.capture");
+
+ isiDev_ = acquireMediaDevice(enumerator, dm);
+ if (!isiDev_)
+ return false;
+
+ /*
+ * Acquire the subdevs and video nodes for the crossbar switch and the
+ * processing pipelines.
+ */
+ crossbar_ = V4L2Subdevice::fromEntityName(isiDev_, "crossbar");
+ if (!crossbar_)
+ return false;
+
+ int ret = crossbar_->open();
+ if (ret)
+ return false;
+
+ for (unsigned int i = 0; ; ++i) {
+ std::string entityName = "mxc_isi." + std::to_string(i);
+ std::unique_ptr<V4L2Subdevice> isi =
+ V4L2Subdevice::fromEntityName(isiDev_, entityName);
+ if (!isi)
+ break;
+
+ ret = isi->open();
+ if (ret)
+ return false;
+
+ entityName += ".capture";
+ std::unique_ptr<V4L2VideoDevice> capture =
+ V4L2VideoDevice::fromEntityName(isiDev_, entityName);
+ if (!capture)
+ return false;
+
+ capture->bufferReady.connect(this, &PipelineHandlerISI::bufferReady);
+
+ ret = capture->open();
+ if (ret)
+ return ret;
+
+ pipes_.push_back({ std::move(isi), std::move(capture) });
+ }
+
+ if (pipes_.empty()) {
+ LOG(ISI, Error) << "Unable to enumerate pipes";
+ return false;
+ }
+
+ /*
+ * Loop over all the crossbar switch sink pads to find connected CSI-2
+ * receivers and camera sensors.
+ */
+ unsigned int numCameras = 0;
+ unsigned int numSinks = 0;
+ for (MediaPad *pad : crossbar_->entity()->pads()) {
+ unsigned int sink = numSinks;
+
+ if (!(pad->flags() & MEDIA_PAD_FL_SINK) || pad->links().empty())
+ continue;
+
+ /*
+ * Count each crossbar sink pad to correctly configure
+ * routing and format for this camera.
+ */
+ numSinks++;
+
+ MediaEntity *csi = pad->links()[0]->source()->entity();
+ if (csi->pads().size() != 2) {
+ LOG(ISI, Debug) << "Skip unsupported CSI-2 receiver "
+ << csi->name();
+ continue;
+ }
+
+ pad = csi->pads()[0];
+ if (!(pad->flags() & MEDIA_PAD_FL_SINK) || pad->links().empty())
+ continue;
+
+ MediaEntity *sensor = pad->links()[0]->source()->entity();
+ if (sensor->function() != MEDIA_ENT_F_CAM_SENSOR) {
+ LOG(ISI, Debug) << "Skip unsupported subdevice "
+ << sensor->name();
+ continue;
+ }
+
+ /* Create the camera data. */
+ std::unique_ptr<ISICameraData> data =
+ std::make_unique<ISICameraData>(this);
+
+ data->sensor_ = CameraSensorFactoryBase::create(sensor);
+ data->csis_ = std::make_unique<V4L2Subdevice>(csi);
+ data->xbarSink_ = sink;
+
+ ret = data->init();
+ if (ret) {
+ LOG(ISI, Error) << "Failed to initialize camera data";
+ return false;
+ }
+
+ /* Register the camera. */
+ const std::string &id = data->sensor_->id();
+ std::set<Stream *> streams;
+ std::transform(data->streams_.begin(), data->streams_.end(),
+ std::inserter(streams, streams.end()),
+ [](Stream &s) { return &s; });
+
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(data), id, streams);
+
+ registerCamera(std::move(camera));
+ numCameras++;
+ }
+
+ return numCameras > 0;
+}
+
+PipelineHandlerISI::Pipe *PipelineHandlerISI::pipeFromStream(Camera *camera,
+ const Stream *stream)
+{
+ ISICameraData *data = cameraData(camera);
+ unsigned int pipeIndex = data->pipeIndex(stream);
+
+ ASSERT(pipeIndex < pipes_.size());
+
+ return &pipes_[pipeIndex];
+}
+
+void PipelineHandlerISI::bufferReady(FrameBuffer *buffer)
+{
+ Request *request = buffer->request();
+
+ /* Record the sensor's timestamp in the request metadata. */
+ ControlList &metadata = request->metadata();
+ if (!metadata.contains(controls::SensorTimestamp.id()))
+ metadata.set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
+
+ completeBuffer(request, buffer);
+ if (request->hasPendingBuffers())
+ return;
+
+ completeRequest(request);
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerISI, "imx8-isi")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/imx8-isi/meson.build b/src/libcamera/pipeline/imx8-isi/meson.build
new file mode 100644
index 00000000..b369b031
--- /dev/null
+++ b/src/libcamera/pipeline/imx8-isi/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'imx8-isi.cpp'
+])
diff --git a/src/libcamera/pipeline/ipu3/cio2.cpp b/src/libcamera/pipeline/ipu3/cio2.cpp
index f4e8c663..aa544d7b 100644
--- a/src/libcamera/pipeline/ipu3/cio2.cpp
+++ b/src/libcamera/pipeline/ipu3/cio2.cpp
@@ -2,19 +2,20 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * cio2.cpp - Intel IPU3 CIO2
+ * Intel IPU3 CIO2
*/
#include "cio2.h"
+#include <cmath>
#include <limits>
-#include <math.h>
#include <linux/media-bus-format.h>
#include <libcamera/formats.h>
#include <libcamera/geometry.h>
#include <libcamera/stream.h>
+#include <libcamera/transform.h>
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/framebuffer.h"
@@ -133,10 +134,9 @@ int CIO2Device::init(const MediaDevice *media, unsigned int index)
MediaLink *link = links[0];
MediaEntity *sensorEntity = link->source()->entity();
- sensor_ = std::make_unique<CameraSensor>(sensorEntity);
- ret = sensor_->init();
- if (ret)
- return ret;
+ sensor_ = CameraSensorFactoryBase::create(sensorEntity);
+ if (!sensor_)
+ return -ENODEV;
ret = link->setEnabled(true);
if (ret)
@@ -177,10 +177,12 @@ int CIO2Device::init(const MediaDevice *media, unsigned int index)
/**
* \brief Configure the CIO2 unit
* \param[in] size The requested CIO2 output frame size
+ * \param[in] transform The transformation to be applied on the image sensor
* \param[out] outputFormat The CIO2 unit output image format
* \return 0 on success or a negative error code otherwise
*/
-int CIO2Device::configure(const Size &size, V4L2DeviceFormat *outputFormat)
+int CIO2Device::configure(const Size &size, const Transform &transform,
+ V4L2DeviceFormat *outputFormat)
{
V4L2SubdeviceFormat sensorFormat;
int ret;
@@ -191,7 +193,7 @@ int CIO2Device::configure(const Size &size, V4L2DeviceFormat *outputFormat)
*/
std::vector<unsigned int> mbusCodes = utils::map_keys(mbusCodesToPixelFormat);
sensorFormat = getSensorFormat(mbusCodes, size);
- ret = sensor_->setFormat(&sensorFormat);
+ ret = sensor_->setFormat(&sensorFormat, transform);
if (ret)
return ret;
@@ -199,11 +201,11 @@ int CIO2Device::configure(const Size &size, V4L2DeviceFormat *outputFormat)
if (ret)
return ret;
- const auto &itInfo = mbusCodesToPixelFormat.find(sensorFormat.mbus_code);
+ const auto &itInfo = mbusCodesToPixelFormat.find(sensorFormat.code);
if (itInfo == mbusCodesToPixelFormat.end())
return -EINVAL;
- outputFormat->fourcc = V4L2PixelFormat::fromPixelFormat(itInfo->second);
+ outputFormat->fourcc = output_->toV4L2PixelFormat(itInfo->second);
outputFormat->size = sensorFormat.size;
outputFormat->planesCount = 1;
@@ -211,7 +213,7 @@ int CIO2Device::configure(const Size &size, V4L2DeviceFormat *outputFormat)
if (ret)
return ret;
- LOG(IPU3, Debug) << "CIO2 output format " << outputFormat->toString();
+ LOG(IPU3, Debug) << "CIO2 output format " << *outputFormat;
return 0;
}
@@ -227,13 +229,13 @@ StreamConfiguration CIO2Device::generateConfiguration(Size size) const
/* Query the sensor static information for closest match. */
std::vector<unsigned int> mbusCodes = utils::map_keys(mbusCodesToPixelFormat);
V4L2SubdeviceFormat sensorFormat = getSensorFormat(mbusCodes, size);
- if (!sensorFormat.mbus_code) {
+ if (!sensorFormat.code) {
LOG(IPU3, Error) << "Sensor does not support mbus code";
return {};
}
cfg.size = sensorFormat.size;
- cfg.pixelFormat = mbusCodesToPixelFormat.at(sensorFormat.mbus_code);
+ cfg.pixelFormat = mbusCodesToPixelFormat.at(sensorFormat.code);
cfg.bufferCount = kBufferCount;
return cfg;
@@ -301,7 +303,7 @@ V4L2SubdeviceFormat CIO2Device::getSensorFormat(const std::vector<unsigned int>
* comparing it with a single precision digit is enough.
*/
ratio = static_cast<unsigned int>(ratio * 10) / 10.0;
- float ratioDiff = fabsf(ratio - desiredRatio);
+ float ratioDiff = std::abs(ratio - desiredRatio);
unsigned int area = sz.width * sz.height;
unsigned int areaDiff = area - desiredArea;
@@ -323,7 +325,7 @@ V4L2SubdeviceFormat CIO2Device::getSensorFormat(const std::vector<unsigned int>
}
V4L2SubdeviceFormat format{};
- format.mbus_code = bestCode;
+ format.code = bestCode;
format.size = bestSize;
return format;
diff --git a/src/libcamera/pipeline/ipu3/cio2.h b/src/libcamera/pipeline/ipu3/cio2.h
index 68504a2d..963c2f6b 100644
--- a/src/libcamera/pipeline/ipu3/cio2.h
+++ b/src/libcamera/pipeline/ipu3/cio2.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * cio2.h - Intel IPU3 CIO2
+ * Intel IPU3 CIO2
*/
#pragma once
@@ -26,6 +26,7 @@ class Request;
class Size;
class SizeRange;
struct StreamConfiguration;
+enum class Transform;
class CIO2Device
{
@@ -38,7 +39,8 @@ public:
std::vector<SizeRange> sizes(const PixelFormat &format) const;
int init(const MediaDevice *media, unsigned int index);
- int configure(const Size &size, V4L2DeviceFormat *outputFormat);
+ int configure(const Size &size, const Transform &transform,
+ V4L2DeviceFormat *outputFormat);
StreamConfiguration generateConfiguration(Size size) const;
diff --git a/src/libcamera/pipeline/ipu3/frames.cpp b/src/libcamera/pipeline/ipu3/frames.cpp
index a4c3477c..bc0526a7 100644
--- a/src/libcamera/pipeline/ipu3/frames.cpp
+++ b/src/libcamera/pipeline/ipu3/frames.cpp
@@ -2,17 +2,18 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * frames.cpp - Intel IPU3 Frames helper
+ * Intel IPU3 Frames helper
*/
#include "frames.h"
+#include <libcamera/base/log.h>
+
#include <libcamera/framebuffer.h>
#include <libcamera/request.h>
#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/pipeline_handler.h"
-#include "libcamera/internal/v4l2_videodevice.h"
namespace libcamera {
diff --git a/src/libcamera/pipeline/ipu3/frames.h b/src/libcamera/pipeline/ipu3/frames.h
index 6e3cb915..a347b66f 100644
--- a/src/libcamera/pipeline/ipu3/frames.h
+++ b/src/libcamera/pipeline/ipu3/frames.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * frames.h - Intel IPU3 Frames helper
+ * Intel IPU3 Frames helper
*/
#pragma once
diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp b/src/libcamera/pipeline/ipu3/imgu.cpp
index e5bbc382..7be78091 100644
--- a/src/libcamera/pipeline/ipu3/imgu.cpp
+++ b/src/libcamera/pipeline/ipu3/imgu.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * imgu.cpp - Intel IPU3 ImgU
+ * Intel IPU3 ImgU
*/
#include "imgu.h"
@@ -386,9 +386,9 @@ ImgUDevice::PipeConfig ImgUDevice::calculatePipeConfig(Pipe *pipe)
pipeConfigs.clear();
LOG(IPU3, Debug) << "Calculating pipe configuration for: ";
- LOG(IPU3, Debug) << "input: " << pipe->input.toString();
- LOG(IPU3, Debug) << "main: " << pipe->main.toString();
- LOG(IPU3, Debug) << "vf: " << pipe->viewfinder.toString();
+ LOG(IPU3, Debug) << "input: " << pipe->input;
+ LOG(IPU3, Debug) << "main: " << pipe->main;
+ LOG(IPU3, Debug) << "vf: " << pipe->viewfinder;
const Size &in = pipe->input;
@@ -397,8 +397,7 @@ ImgUDevice::PipeConfig ImgUDevice::calculatePipeConfig(Pipe *pipe)
* See https://bugs.libcamera.org/show_bug.cgi?id=32
*/
if (in.width < ImgUDevice::kIFMaxCropWidth || in.height < ImgUDevice::kIFMaxCropHeight) {
- LOG(IPU3, Error) << "Input resolution " << in.toString()
- << " not supported";
+ LOG(IPU3, Error) << "Input resolution " << in << " not supported";
return {};
}
@@ -460,9 +459,9 @@ ImgUDevice::PipeConfig ImgUDevice::calculatePipeConfig(Pipe *pipe)
}
LOG(IPU3, Debug) << "Computed pipe configuration: ";
- LOG(IPU3, Debug) << "IF: " << pipeConfigs[bestIndex].iif.toString();
- LOG(IPU3, Debug) << "BDS: " << pipeConfigs[bestIndex].bds.toString();
- LOG(IPU3, Debug) << "GDC: " << pipeConfigs[bestIndex].gdc.toString();
+ LOG(IPU3, Debug) << "IF: " << pipeConfigs[bestIndex].iif;
+ LOG(IPU3, Debug) << "BDS: " << pipeConfigs[bestIndex].bds;
+ LOG(IPU3, Debug) << "GDC: " << pipeConfigs[bestIndex].gdc;
return pipeConfigs[bestIndex];
}
@@ -480,7 +479,7 @@ int ImgUDevice::configure(const PipeConfig &pipeConfig, V4L2DeviceFormat *inputF
if (ret)
return ret;
- LOG(IPU3, Debug) << "ImgU input format = " << inputFormat->toString();
+ LOG(IPU3, Debug) << "ImgU input format = " << *inputFormat;
/*
* \todo The IPU3 driver implementation shall be changed to use the
@@ -496,23 +495,23 @@ int ImgUDevice::configure(const PipeConfig &pipeConfig, V4L2DeviceFormat *inputF
ret = imgu_->setSelection(PAD_INPUT, V4L2_SEL_TGT_CROP, &iif);
if (ret)
return ret;
- LOG(IPU3, Debug) << "ImgU IF rectangle = " << iif.toString();
+ LOG(IPU3, Debug) << "ImgU IF rectangle = " << iif;
Rectangle bds{ 0, 0, pipeConfig.bds };
ret = imgu_->setSelection(PAD_INPUT, V4L2_SEL_TGT_COMPOSE, &bds);
if (ret)
return ret;
- LOG(IPU3, Debug) << "ImgU BDS rectangle = " << bds.toString();
+ LOG(IPU3, Debug) << "ImgU BDS rectangle = " << bds;
V4L2SubdeviceFormat gdcFormat = {};
- gdcFormat.mbus_code = MEDIA_BUS_FMT_FIXED;
+ gdcFormat.code = MEDIA_BUS_FMT_FIXED;
gdcFormat.size = pipeConfig.gdc;
ret = imgu_->setFormat(PAD_INPUT, &gdcFormat);
if (ret)
return ret;
- LOG(IPU3, Debug) << "ImgU GDC format = " << gdcFormat.toString();
+ LOG(IPU3, Debug) << "ImgU GDC format = " << gdcFormat;
StreamConfiguration paramCfg = {};
paramCfg.size = inputFormat->size;
@@ -544,7 +543,7 @@ int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
V4L2DeviceFormat *outputFormat)
{
V4L2SubdeviceFormat imguFormat = {};
- imguFormat.mbus_code = MEDIA_BUS_FMT_FIXED;
+ imguFormat.code = MEDIA_BUS_FMT_FIXED;
imguFormat.size = cfg.size;
int ret = imgu_->setFormat(pad, &imguFormat);
@@ -559,7 +558,7 @@ int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
return 0;
*outputFormat = {};
- outputFormat->fourcc = V4L2PixelFormat::fromPixelFormat(formats::NV12);
+ outputFormat->fourcc = dev->toV4L2PixelFormat(formats::NV12);
outputFormat->size = cfg.size;
outputFormat->planesCount = 2;
@@ -569,7 +568,7 @@ int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
const char *name = dev == output_.get() ? "output" : "viewfinder";
LOG(IPU3, Debug) << "ImgU " << name << " format = "
- << outputFormat->toString();
+ << *outputFormat;
return 0;
}
diff --git a/src/libcamera/pipeline/ipu3/imgu.h b/src/libcamera/pipeline/ipu3/imgu.h
index 0af4dd8a..fa508316 100644
--- a/src/libcamera/pipeline/ipu3/imgu.h
+++ b/src/libcamera/pipeline/ipu3/imgu.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * imgu.h - Intel IPU3 ImgU
+ * Intel IPU3 ImgU
*/
#pragma once
diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
index 6c5617cd..e31e3879 100644
--- a/src/libcamera/pipeline/ipu3/ipu3.cpp
+++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
@@ -2,32 +2,36 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipu3.cpp - Pipeline handler for Intel IPU3
+ * Pipeline handler for Intel IPU3
*/
#include <algorithm>
-#include <iomanip>
#include <memory>
#include <queue>
#include <vector>
+#include <linux/intel-ipu3.h>
+
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
#include <libcamera/control_ids.h>
#include <libcamera/formats.h>
-#include <libcamera/ipa/ipu3_ipa_interface.h>
-#include <libcamera/ipa/ipu3_ipa_proxy.h>
#include <libcamera/property_ids.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
+#include <libcamera/ipa/ipu3_ipa_interface.h>
+#include <libcamera/ipa/ipu3_ipa_proxy.h>
+
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_lens.h"
#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
#include "libcamera/internal/delayed_controls.h"
#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
@@ -48,7 +52,7 @@ class IPU3CameraData : public Camera::Private
{
public:
IPU3CameraData(PipelineHandler *pipe)
- : Camera::Private(pipe), supportsFlips_(false)
+ : Camera::Private(pipe)
{
}
@@ -70,8 +74,6 @@ public:
Stream rawStream_;
Rectangle cropRegion_;
- bool supportsFlips_;
- Transform rotationTransform_;
std::unique_ptr<DelayedControls> delayedCtrls_;
IPU3Frames frameInfos_;
@@ -86,8 +88,10 @@ public:
ControlInfoMap ipaControls_;
private:
- void queueFrameAction(unsigned int id,
- const ipa::ipu3::IPU3Action &action);
+ void metadataReady(unsigned int id, const ControlList &metadata);
+ void paramsComputed(unsigned int id);
+ void setSensorControls(unsigned int id, const ControlList &sensorControls,
+ const ControlList &lensControls);
};
class IPU3CameraConfiguration : public CameraConfiguration
@@ -131,8 +135,8 @@ public:
PipelineHandlerIPU3(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -179,48 +183,15 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
if (config_.empty())
return Invalid;
- Transform combined = transform * data_->rotationTransform_;
-
/*
- * We combine the platform and user transform, but must "adjust away"
- * any combined result that includes a transposition, as we can't do
- * those. In this case, flipping only the transpose bit is helpful to
- * applications - they either get the transform they requested, or have
- * to do a simple transpose themselves (they don't have to worry about
- * the other possible cases).
+ * Validate the requested transform against the sensor capabilities and
+ * rotation and store the final combined transform that configure() will
+ * need to apply to the sensor to save us working it out again.
*/
- if (!!(combined & Transform::Transpose)) {
- /*
- * Flipping the transpose bit in "transform" flips it in the
- * combined result too (as it's the last thing that happens),
- * which is of course clearing it.
- */
- transform ^= Transform::Transpose;
- combined &= ~Transform::Transpose;
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->cio2_.sensor()->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
status = Adjusted;
- }
-
- /*
- * We also check if the sensor doesn't do h/vflips at all, in which
- * case we clear them, and the application will have to do everything.
- */
- if (!data_->supportsFlips_ && !!combined) {
- /*
- * If the sensor can do no transforms, then combined must be
- * changed to the identity. The only user transform that gives
- * rise to this is the inverse of the rotation. (Recall that
- * combined = transform * rotationTransform.)
- */
- transform = -data_->rotationTransform_;
- combined = Transform::Identity;
- status = Adjusted;
- }
-
- /*
- * Store the final combined transform that configure() will need to
- * apply to the sensor to save us working it out again.
- */
- combinedTransform_ = combined;
/* Cap the number of entries to the available streams. */
if (config_.size() > kMaxStreams) {
@@ -240,6 +211,7 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
*/
unsigned int rawCount = 0;
unsigned int yuvCount = 0;
+ Size rawRequirement;
Size maxYuvSize;
Size rawSize;
@@ -248,10 +220,11 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
rawCount++;
- rawSize.expandTo(cfg.size);
+ rawSize = std::max(rawSize, cfg.size);
} else {
yuvCount++;
- maxYuvSize.expandTo(cfg.size);
+ maxYuvSize = std::max(maxYuvSize, cfg.size);
+ rawRequirement.expandTo(cfg.size);
}
}
@@ -280,17 +253,17 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
* The output YUV streams will be limited in size to the maximum frame
* size requested for the RAW stream, if present.
*
- * If no raw stream is requested generate a size as large as the maximum
- * requested YUV size aligned to the ImgU constraints and bound by the
- * sensor's maximum resolution. See
+ * If no raw stream is requested, generate a size from the largest YUV
+ * stream, aligned to the ImgU constraints and bound
+ * by the sensor's maximum resolution. See
* https://bugs.libcamera.org/show_bug.cgi?id=32
*/
if (rawSize.isNull())
- rawSize = maxYuvSize.expandedTo({ ImgUDevice::kIFMaxCropWidth,
- ImgUDevice::kIFMaxCropHeight })
- .grownBy({ ImgUDevice::kOutputMarginWidth,
- ImgUDevice::kOutputMarginHeight })
- .boundedTo(data_->cio2_.sensor()->resolution());
+ rawSize = rawRequirement.expandedTo({ ImgUDevice::kIFMaxCropWidth,
+ ImgUDevice::kIFMaxCropHeight })
+ .grownBy({ ImgUDevice::kOutputMarginWidth,
+ ImgUDevice::kOutputMarginHeight })
+ .boundedTo(data_->cio2_.sensor()->resolution());
cio2Configuration_ = data_->cio2_.generateConfiguration(rawSize);
if (!cio2Configuration_.pixelFormat.isValid())
@@ -417,11 +390,12 @@ PipelineHandlerIPU3::PipelineHandlerIPU3(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerIPU3::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
{
IPU3CameraData *data = cameraData(camera);
- IPU3CameraConfiguration *config = new IPU3CameraConfiguration(data);
+ std::unique_ptr<IPU3CameraConfiguration> config =
+ std::make_unique<IPU3CameraConfiguration>(data);
if (roles.empty())
return config;
@@ -487,7 +461,6 @@ CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
default:
LOG(IPU3, Error)
<< "Requested stream role not supported: " << role;
- delete config;
return nullptr;
}
@@ -549,7 +522,7 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
return ret;
/*
- * \todo: Enable links selectively based on the requested streams.
+ * \todo Enable links selectively based on the requested streams.
* As of now, enable all links unconditionally.
* \todo Don't configure the ImgU at all if we only have a single
* stream which is for raw capture, in which case no buffers will
@@ -565,7 +538,7 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
*/
const Size &sensorSize = config->cio2Format().size;
V4L2DeviceFormat cio2Format;
- ret = cio2->configure(sensorSize, &cio2Format);
+ ret = cio2->configure(sensorSize, config->combinedTransform_, &cio2Format);
if (ret)
return ret;
@@ -574,24 +547,6 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
data->cropRegion_ = sensorInfo.analogCrop;
/*
- * Configure the H/V flip controls based on the combination of
- * the sensor and user transform.
- */
- if (data->supportsFlips_) {
- ControlList sensorCtrls(cio2->sensor()->controls());
- sensorCtrls.set(V4L2_CID_HFLIP,
- static_cast<int32_t>(!!(config->combinedTransform_
- & Transform::HFlip)));
- sensorCtrls.set(V4L2_CID_VFLIP,
- static_cast<int32_t>(!!(config->combinedTransform_
- & Transform::VFlip)));
-
- ret = cio2->sensor()->setControls(&sensorCtrls);
- if (ret)
- return ret;
- }
-
- /*
* If the ImgU gets configured, its driver seems to expect that
* buffers will be queued to its outputs, as otherwise the next
* capture session that uses the ImgU fails when queueing
@@ -660,6 +615,11 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
ipa::ipu3::IPAConfigInfo configInfo;
configInfo.sensorControls = data->cio2_.sensor()->controls();
+
+ CameraLens *lens = data->cio2_.sensor()->focusLens();
+ if (lens)
+ configInfo.lensControls = lens->controls();
+
configInfo.sensorInfo = sensorInfo;
configInfo.bdsOutputSize = config->imguConfig().bds;
configInfo.iif = config->imguConfig().iif;
@@ -829,7 +789,7 @@ void IPU3CameraData::cancelPendingRequests()
for (auto it : request->buffers()) {
FrameBuffer *buffer = it.second;
- buffer->cancel();
+ buffer->_d()->cancel();
pipe()->completeBuffer(request, buffer);
}
@@ -866,11 +826,7 @@ void IPU3CameraData::queuePendingRequests()
info->rawBuffer = rawBuffer;
- ipa::ipu3::IPU3Event ev;
- ev.op = ipa::ipu3::EventProcessControls;
- ev.frame = info->id;
- ev.controls = request->controls();
- ipa_->processEvent(ev);
+ ipa_->queueRequest(info->id, request->controls());
pendingRequests_.pop();
processingRequests_.push(request);
@@ -1003,7 +959,7 @@ int PipelineHandlerIPU3::updateControls(IPU3CameraData *data)
values.reserve(testPatternModes.size());
for (auto pattern : testPatternModes)
- values.emplace_back(static_cast<int32_t>(pattern));
+ values.emplace_back(pattern);
controls[&controls::draft::TestPatternMode] = ControlInfo(values);
}
@@ -1122,14 +1078,10 @@ int PipelineHandlerIPU3::registerCameras()
if (ret)
continue;
- /*
- * \todo Read delay values from the sensor itself or from a
- * a sensor database. For now use generic values taken from
- * the Raspberry Pi and listed as 'generic values'.
- */
+ const CameraSensorProperties::SensorDelays &delays = cio2->sensor()->sensorDelays();
std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
- { V4L2_CID_ANALOGUE_GAIN, { 1, false } },
- { V4L2_CID_EXPOSURE, { 2, false } },
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
};
data->delayedCtrls_ =
@@ -1139,25 +1091,12 @@ int PipelineHandlerIPU3::registerCameras()
&IPU3CameraData::frameStart);
/* Convert the sensor rotation to a transformation */
- int32_t rotation = 0;
- if (data->properties_.contains(properties::Rotation))
- rotation = data->properties_.get(properties::Rotation);
- else
+ const auto &rotation = data->properties_.get(properties::Rotation);
+ if (!rotation)
LOG(IPU3, Warning) << "Rotation control not exposed by "
<< cio2->sensor()->id()
<< ". Assume rotation 0";
- bool success;
- data->rotationTransform_ = transformFromRotation(rotation, &success);
- if (!success)
- LOG(IPU3, Warning) << "Invalid rotation of " << rotation
- << " degrees: ignoring";
-
- ControlList ctrls = cio2->sensor()->getControls({ V4L2_CID_HFLIP });
- if (!ctrls.empty())
- /* We assume the sensor supports VFLIP too. */
- data->supportsFlips_ = true;
-
/**
* \todo Dynamically assign ImgU and output devices to each
* stream and camera; as of now, limit support to two cameras
@@ -1175,19 +1114,19 @@ int PipelineHandlerIPU3::registerCameras()
* returned through the ImgU main and secondary outputs.
*/
data->cio2_.bufferReady().connect(data.get(),
- &IPU3CameraData::cio2BufferReady);
+ &IPU3CameraData::cio2BufferReady);
data->cio2_.bufferAvailable.connect(
data.get(), &IPU3CameraData::queuePendingRequests);
data->imgu_->input_->bufferReady.connect(&data->cio2_,
- &CIO2Device::tryReturnBuffer);
+ &CIO2Device::tryReturnBuffer);
data->imgu_->output_->bufferReady.connect(data.get(),
- &IPU3CameraData::imguOutputBufferReady);
+ &IPU3CameraData::imguOutputBufferReady);
data->imgu_->viewfinder_->bufferReady.connect(data.get(),
- &IPU3CameraData::imguOutputBufferReady);
+ &IPU3CameraData::imguOutputBufferReady);
data->imgu_->param_->bufferReady.connect(data.get(),
- &IPU3CameraData::paramBufferReady);
+ &IPU3CameraData::paramBufferReady);
data->imgu_->stat_->bufferReady.connect(data.get(),
- &IPU3CameraData::statBufferReady);
+ &IPU3CameraData::statBufferReady);
/* Create and register the Camera instance. */
const std::string &cameraId = cio2->sensor()->id();
@@ -1213,7 +1152,9 @@ int IPU3CameraData::loadIPA()
if (!ipa_)
return -ENOENT;
- ipa_->queueFrameAction.connect(this, &IPU3CameraData::queueFrameAction);
+ ipa_->setSensorControls.connect(this, &IPU3CameraData::setSensorControls);
+ ipa_->paramsComputed.connect(this, &IPU3CameraData::paramsComputed);
+ ipa_->metadataReady.connect(this, &IPU3CameraData::metadataReady);
/*
* Pass the sensor info to the IPA to initialize controls.
@@ -1238,8 +1179,15 @@ int IPU3CameraData::loadIPA()
if (ret)
return ret;
- ret = ipa_->init(IPASettings{ "", sensor->model() }, sensorInfo,
- sensor->controls(), &ipaControls_);
+ /*
+ * The API tuning file is made from the sensor name. If the tuning file
+ * isn't found, fall back to the 'uncalibrated' file.
+ */
+ std::string ipaTuningFile =
+ ipa_->configurationFile(sensor->model() + ".yaml", "uncalibrated.yaml");
+
+ ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() },
+ sensorInfo, sensor->controls(), &ipaControls_);
if (ret) {
LOG(IPU3, Error) << "Failed to initialise the IPU3 IPA";
return ret;
@@ -1248,69 +1196,60 @@ int IPU3CameraData::loadIPA()
return 0;
}
-void IPU3CameraData::queueFrameAction(unsigned int id,
- const ipa::ipu3::IPU3Action &action)
+void IPU3CameraData::setSensorControls([[maybe_unused]] unsigned int id,
+ const ControlList &sensorControls,
+ const ControlList &lensControls)
{
- switch (action.op) {
- case ipa::ipu3::ActionSetSensorControls: {
- const ControlList &sensorControls = action.sensorControls;
- delayedCtrls_->push(sensorControls);
+ delayedCtrls_->push(sensorControls);
- CameraLens *focusLens = cio2_.sensor()->focusLens();
- if (!focusLens)
- break;
-
- const ControlList lensControls = action.lensControls;
- if (!lensControls.contains(V4L2_CID_FOCUS_ABSOLUTE))
- break;
+ CameraLens *focusLens = cio2_.sensor()->focusLens();
+ if (!focusLens)
+ return;
- const ControlValue &focusValue =
- lensControls.get(V4L2_CID_FOCUS_ABSOLUTE);
+ if (!lensControls.contains(V4L2_CID_FOCUS_ABSOLUTE))
+ return;
- focusLens->setFocusPostion(focusValue.get<int32_t>());
+ const ControlValue &focusValue = lensControls.get(V4L2_CID_FOCUS_ABSOLUTE);
- break;
- }
- case ipa::ipu3::ActionParamFilled: {
- IPU3Frames::Info *info = frameInfos_.find(id);
- if (!info)
- break;
-
- /* Queue all buffers from the request aimed for the ImgU. */
- for (auto it : info->request->buffers()) {
- const Stream *stream = it.first;
- FrameBuffer *outbuffer = it.second;
+ focusLens->setFocusPosition(focusValue.get<int32_t>());
+}
- if (stream == &outStream_)
- imgu_->output_->queueBuffer(outbuffer);
- else if (stream == &vfStream_)
- imgu_->viewfinder_->queueBuffer(outbuffer);
- }
+void IPU3CameraData::paramsComputed(unsigned int id)
+{
+ IPU3Frames::Info *info = frameInfos_.find(id);
+ if (!info)
+ return;
- imgu_->param_->queueBuffer(info->paramBuffer);
- imgu_->stat_->queueBuffer(info->statBuffer);
- imgu_->input_->queueBuffer(info->rawBuffer);
+ /* Queue all buffers from the request aimed for the ImgU. */
+ for (auto it : info->request->buffers()) {
+ const Stream *stream = it.first;
+ FrameBuffer *outbuffer = it.second;
- break;
+ if (stream == &outStream_)
+ imgu_->output_->queueBuffer(outbuffer);
+ else if (stream == &vfStream_)
+ imgu_->viewfinder_->queueBuffer(outbuffer);
}
- case ipa::ipu3::ActionMetadataReady: {
- IPU3Frames::Info *info = frameInfos_.find(id);
- if (!info)
- break;
- Request *request = info->request;
- request->metadata().merge(action.controls);
+ info->paramBuffer->_d()->metadata().planes()[0].bytesused =
+ sizeof(struct ipu3_uapi_params);
+ imgu_->param_->queueBuffer(info->paramBuffer);
+ imgu_->stat_->queueBuffer(info->statBuffer);
+ imgu_->input_->queueBuffer(info->rawBuffer);
+}
- info->metadataProcessed = true;
- if (frameInfos_.tryComplete(info))
- pipe()->completeRequest(request);
+void IPU3CameraData::metadataReady(unsigned int id, const ControlList &metadata)
+{
+ IPU3Frames::Info *info = frameInfos_.find(id);
+ if (!info)
+ return;
- break;
- }
- default:
- LOG(IPU3, Error) << "Unknown action " << action.op;
- break;
- }
+ Request *request = info->request;
+ request->metadata().merge(metadata);
+
+ info->metadataProcessed = true;
+ if (frameInfos_.tryComplete(info))
+ pipe()->completeRequest(request);
}
/* -----------------------------------------------------------------------------
@@ -1335,8 +1274,9 @@ void IPU3CameraData::imguOutputBufferReady(FrameBuffer *buffer)
request->metadata().set(controls::draft::PipelineDepth, 3);
/* \todo Actually apply the scaler crop region to the ImgU. */
- if (request->controls().contains(controls::ScalerCrop))
- cropRegion_ = request->controls().get(controls::ScalerCrop);
+ const auto &scalerCrop = request->controls().get(controls::ScalerCrop);
+ if (scalerCrop)
+ cropRegion_ = *scalerCrop;
request->metadata().set(controls::ScalerCrop, cropRegion_);
if (frameInfos_.tryComplete(info))
@@ -1362,7 +1302,7 @@ void IPU3CameraData::cio2BufferReady(FrameBuffer *buffer)
if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
for (auto it : request->buffers()) {
FrameBuffer *b = it.second;
- b->cancel();
+ b->_d()->cancel();
pipe()->completeBuffer(request, b);
}
@@ -1385,11 +1325,7 @@ void IPU3CameraData::cio2BufferReady(FrameBuffer *buffer)
if (request->findBuffer(&rawStream_))
pipe()->completeBuffer(request, buffer);
- ipa::ipu3::IPU3Event ev;
- ev.op = ipa::ipu3::EventFillParams;
- ev.frame = info->id;
- ev.bufferId = info->paramBuffer->cookie();
- ipa_->processEvent(ev);
+ ipa_->computeParams(info->id, info->paramBuffer->cookie());
}
void IPU3CameraData::paramBufferReady(FrameBuffer *buffer)
@@ -1433,13 +1369,8 @@ void IPU3CameraData::statBufferReady(FrameBuffer *buffer)
return;
}
- ipa::ipu3::IPU3Event ev;
- ev.op = ipa::ipu3::EventStatReady;
- ev.frame = info->id;
- ev.bufferId = info->statBuffer->cookie();
- ev.frameTimestamp = request->metadata().get(controls::SensorTimestamp);
- ev.sensorControls = info->effectiveSensorControls;
- ipa_->processEvent(ev);
+ ipa_->processStats(info->id, request->metadata().get(controls::SensorTimestamp).value_or(0),
+ info->statBuffer->cookie(), info->effectiveSensorControls);
}
/*
@@ -1463,20 +1394,18 @@ void IPU3CameraData::frameStart(uint32_t sequence)
/*
* Handle controls to be set immediately on the next frame.
* This currently only handle the TestPatternMode control.
- *
+ *
* \todo Synchronize with the sequence number
*/
Request *request = processingRequests_.front();
processingRequests_.pop();
- if (!request->controls().contains(controls::draft::TestPatternMode))
+ const auto &testPatternMode = request->controls().get(controls::draft::TestPatternMode);
+ if (!testPatternMode)
return;
- const int32_t testPatternMode = request->controls().get(
- controls::draft::TestPatternMode);
-
int ret = cio2_.sensor()->setTestPatternMode(
- static_cast<controls::draft::TestPatternModeEnum>(testPatternMode));
+ static_cast<controls::draft::TestPatternModeEnum>(*testPatternMode));
if (ret) {
LOG(IPU3, Error) << "Failed to set test pattern mode: "
<< ret;
@@ -1484,9 +1413,9 @@ void IPU3CameraData::frameStart(uint32_t sequence)
}
request->metadata().set(controls::draft::TestPatternMode,
- testPatternMode);
+ *testPatternMode);
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerIPU3)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerIPU3, "ipu3")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/ipu3/meson.build b/src/libcamera/pipeline/ipu3/meson.build
index a1b0b31a..f2904b4a 100644
--- a/src/libcamera/pipeline/ipu3/meson.build
+++ b/src/libcamera/pipeline/ipu3/meson.build
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources += files([
+libcamera_internal_sources += files([
'cio2.cpp',
'frames.cpp',
'imgu.cpp',
diff --git a/src/libcamera/pipeline/mali-c55/mali-c55.cpp b/src/libcamera/pipeline/mali-c55/mali-c55.cpp
new file mode 100644
index 00000000..5abd6b20
--- /dev/null
+++ b/src/libcamera/pipeline/mali-c55/mali-c55.cpp
@@ -0,0 +1,1755 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Pipeline Handler for ARM's Mali-C55 ISP
+ */
+
+#include <algorithm>
+#include <array>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+
+#include <linux/mali-c55-config.h>
+#include <linux/media-bus-format.h>
+#include <linux/media.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/stream.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+#include <libcamera/ipa/mali-c55_ipa_interface.h>
+#include <libcamera/ipa/mali-c55_ipa_proxy.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/delayed_controls.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/request.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace {
+
+bool isFormatRaw(const libcamera::PixelFormat &pixFmt)
+{
+ return libcamera::PixelFormatInfo::info(pixFmt).colourEncoding ==
+ libcamera::PixelFormatInfo::ColourEncodingRAW;
+}
+
+} /* namespace */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(MaliC55)
+
+const std::map<libcamera::PixelFormat, unsigned int> maliC55FmtToCode = {
+ /* \todo Support all formats supported by the driver in libcamera. */
+
+ { formats::RGB565, MEDIA_BUS_FMT_RGB121212_1X36 },
+ { formats::RGB888, MEDIA_BUS_FMT_RGB121212_1X36 },
+ { formats::YUYV, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::UYVY, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::R8, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::NV12, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::NV21, MEDIA_BUS_FMT_YUV10_1X30 },
+
+ /* RAW formats, FR pipe only. */
+ { formats::SGBRG16, MEDIA_BUS_FMT_SGBRG16_1X16 },
+ { formats::SRGGB16, MEDIA_BUS_FMT_SRGGB16_1X16 },
+ { formats::SBGGR16, MEDIA_BUS_FMT_SBGGR16_1X16 },
+ { formats::SGRBG16, MEDIA_BUS_FMT_SGRBG16_1X16 },
+};
+
+constexpr Size kMaliC55MinInputSize = { 640, 480 };
+constexpr Size kMaliC55MinSize = { 128, 128 };
+constexpr Size kMaliC55MaxSize = { 8192, 8192 };
+constexpr unsigned int kMaliC55ISPInternalFormat = MEDIA_BUS_FMT_RGB121212_1X36;
+
+struct MaliC55FrameInfo {
+ Request *request;
+
+ FrameBuffer *paramBuffer;
+ FrameBuffer *statBuffer;
+
+ bool paramsDone;
+ bool statsDone;
+};
+
+class MaliC55CameraData : public Camera::Private
+{
+public:
+ MaliC55CameraData(PipelineHandler *pipe, MediaEntity *entity)
+ : Camera::Private(pipe), entity_(entity)
+ {
+ }
+
+ int init();
+ int loadIPA();
+
+ /* Deflect these functionalities to either TPG or CameraSensor. */
+ const std::vector<Size> sizes(unsigned int mbusCode) const;
+ const Size resolution() const;
+
+ int pixfmtToMbusCode(const PixelFormat &pixFmt) const;
+ const PixelFormat &bestRawFormat() const;
+
+ void updateControls(const ControlInfoMap &ipaControls);
+
+ PixelFormat adjustRawFormat(const PixelFormat &pixFmt) const;
+ Size adjustRawSizes(const PixelFormat &pixFmt, const Size &rawSize) const;
+
+ std::unique_ptr<CameraSensor> sensor_;
+
+ MediaEntity *entity_;
+ std::unique_ptr<V4L2Subdevice> csi_;
+ std::unique_ptr<V4L2Subdevice> sd_;
+ Stream frStream_;
+ Stream dsStream_;
+
+ std::unique_ptr<ipa::mali_c55::IPAProxyMaliC55> ipa_;
+ std::vector<IPABuffer> ipaStatBuffers_;
+ std::vector<IPABuffer> ipaParamBuffers_;
+
+ std::unique_ptr<DelayedControls> delayedCtrls_;
+
+private:
+ void initTPGData();
+ void setSensorControls(const ControlList &sensorControls);
+
+ std::string id_;
+ std::vector<unsigned int> tpgCodes_;
+ std::vector<Size> tpgSizes_;
+ Size tpgResolution_;
+};
+
+int MaliC55CameraData::init()
+{
+ int ret;
+
+ sd_ = std::make_unique<V4L2Subdevice>(entity_);
+ ret = sd_->open();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to open sensor subdevice";
+ return ret;
+ }
+
+ /* If this camera is created from TPG, we return here. */
+ if (entity_->name() == "mali-c55 tpg") {
+ initTPGData();
+ return 0;
+ }
+
+ /*
+ * Register a CameraSensor if we connect to a sensor and create
+ * an entity for the connected CSI-2 receiver.
+ */
+ sensor_ = CameraSensorFactoryBase::create(entity_);
+ if (!sensor_)
+ return ret;
+
+ const MediaPad *sourcePad = entity_->getPadByIndex(0);
+ MediaEntity *csiEntity = sourcePad->links()[0]->sink()->entity();
+
+ csi_ = std::make_unique<V4L2Subdevice>(csiEntity);
+ if (csi_->open()) {
+ LOG(MaliC55, Error) << "Failed to open CSI-2 subdevice";
+ return false;
+ }
+
+ return 0;
+}
+
+void MaliC55CameraData::initTPGData()
+{
+ /* Replicate the CameraSensor implementation for TPG. */
+ V4L2Subdevice::Formats formats = sd_->formats(0);
+ if (formats.empty())
+ return;
+
+ tpgCodes_ = utils::map_keys(formats);
+ std::sort(tpgCodes_.begin(), tpgCodes_.end());
+
+ for (const auto &format : formats) {
+ const std::vector<SizeRange> &ranges = format.second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(tpgSizes_),
+ [](const SizeRange &range) { return range.max; });
+ }
+
+ tpgResolution_ = tpgSizes_.back();
+}
+
+void MaliC55CameraData::setSensorControls(const ControlList &sensorControls)
+{
+ delayedCtrls_->push(sensorControls);
+}
+
+const std::vector<Size> MaliC55CameraData::sizes(unsigned int mbusCode) const
+{
+ if (sensor_)
+ return sensor_->sizes(mbusCode);
+
+ V4L2Subdevice::Formats formats = sd_->formats(0);
+ if (formats.empty())
+ return {};
+
+ std::vector<Size> sizes;
+ const auto &format = formats.find(mbusCode);
+ if (format == formats.end())
+ return {};
+
+ const std::vector<SizeRange> &ranges = format->second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+
+ std::sort(sizes.begin(), sizes.end());
+
+ return sizes;
+}
+
+const Size MaliC55CameraData::resolution() const
+{
+ if (sensor_)
+ return sensor_->resolution();
+
+ return tpgResolution_;
+}
+
+/*
+ * The Mali C55 ISP can only produce 16-bit RAW output in bypass modes, but the
+ * sensors connected to it might produce 8/10/12/16 bits. We simply search the
+ * sensor's supported formats for the one with a matching bayer order and the
+ * greatest bitdepth.
+ */
+int MaliC55CameraData::pixfmtToMbusCode(const PixelFormat &pixFmt) const
+{
+ auto it = maliC55FmtToCode.find(pixFmt);
+ if (it == maliC55FmtToCode.end())
+ return -EINVAL;
+
+ BayerFormat bayerFormat = BayerFormat::fromMbusCode(it->second);
+ if (!bayerFormat.isValid())
+ return -EINVAL;
+
+ V4L2Subdevice::Formats formats = sd_->formats(0);
+ unsigned int sensorMbusCode = 0;
+ unsigned int bitDepth = 0;
+
+ for (const auto &[code, sizes] : formats) {
+ BayerFormat sdBayerFormat = BayerFormat::fromMbusCode(code);
+ if (!sdBayerFormat.isValid())
+ continue;
+
+ if (sdBayerFormat.order != bayerFormat.order)
+ continue;
+
+ if (sdBayerFormat.bitDepth > bitDepth) {
+ bitDepth = sdBayerFormat.bitDepth;
+ sensorMbusCode = code;
+ }
+ }
+
+ if (!sensorMbusCode)
+ return -EINVAL;
+
+ return sensorMbusCode;
+}
+
+/*
+ * Find a RAW PixelFormat supported by both the ISP and the sensor.
+ *
+ * The situation is mildly complicated by the fact that we expect the sensor to
+ * output something like RAW8/10/12/16, but the ISP can only accept as input
+ * RAW20 and can only produce as output RAW16. The one constant in that is the
+ * bayer order of the data, so we'll simply check that the sensor produces a
+ * format with a bayer order that matches that of one of the formats we support,
+ * and select that.
+ */
+const PixelFormat &MaliC55CameraData::bestRawFormat() const
+{
+ static const PixelFormat invalidPixFmt = {};
+
+ for (const auto &fmt : sd_->formats(0)) {
+ BayerFormat sensorBayer = BayerFormat::fromMbusCode(fmt.first);
+
+ if (!sensorBayer.isValid())
+ continue;
+
+ for (const auto &[pixFmt, rawCode] : maliC55FmtToCode) {
+ if (!isFormatRaw(pixFmt))
+ continue;
+
+ BayerFormat bayer = BayerFormat::fromMbusCode(rawCode);
+ if (bayer.order == sensorBayer.order)
+ return pixFmt;
+ }
+ }
+
+ LOG(MaliC55, Error) << "Sensor doesn't provide a compatible format";
+ return invalidPixFmt;
+}
+
+void MaliC55CameraData::updateControls(const ControlInfoMap &ipaControls)
+{
+ if (!sensor_)
+ return;
+
+ IPACameraSensorInfo sensorInfo;
+ int ret = sensor_->sensorInfo(&sensorInfo);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to retrieve sensor info";
+ return;
+ }
+
+ ControlInfoMap::Map controls;
+ Rectangle ispMinCrop{ 0, 0, 640, 480 };
+ controls[&controls::ScalerCrop] =
+ ControlInfo(ispMinCrop, sensorInfo.analogCrop,
+ sensorInfo.analogCrop);
+
+ for (auto const &c : ipaControls)
+ controls.emplace(c.first, c.second);
+
+ controlInfo_ = ControlInfoMap(std::move(controls), controls::controls);
+}
+
+/*
+ * Make sure the provided raw pixel format is supported and adjust it to
+ * one of the supported ones if it's not.
+ */
+PixelFormat MaliC55CameraData::adjustRawFormat(const PixelFormat &rawFmt) const
+{
+ /* Make sure the RAW mbus code is supported by the image source. */
+ int rawCode = pixfmtToMbusCode(rawFmt);
+ if (rawCode < 0)
+ return bestRawFormat();
+
+ const auto rawSizes = sizes(rawCode);
+ if (rawSizes.empty())
+ return bestRawFormat();
+
+ return rawFmt;
+}
+
+Size MaliC55CameraData::adjustRawSizes(const PixelFormat &rawFmt, const Size &size) const
+{
+ /* Expand the RAW size to the minimum ISP input size. */
+ Size rawSize = size.expandedTo(kMaliC55MinInputSize);
+
+ /* Check if the size is natively supported. */
+ int rawCode = pixfmtToMbusCode(rawFmt);
+ if (rawCode < 0)
+ return {};
+
+ const auto rawSizes = sizes(rawCode);
+ auto sizeIt = std::find(rawSizes.begin(), rawSizes.end(), rawSize);
+ if (sizeIt != rawSizes.end())
+ return rawSize;
+
+ /* Or adjust it to the closest supported size. */
+ uint16_t distance = std::numeric_limits<uint16_t>::max();
+ Size bestSize;
+ for (const Size &sz : rawSizes) {
+ uint16_t dist = std::abs(static_cast<int>(rawSize.width) -
+ static_cast<int>(sz.width)) +
+ std::abs(static_cast<int>(rawSize.height) -
+ static_cast<int>(sz.height));
+ if (dist < distance) {
+ dist = distance;
+ bestSize = sz;
+ }
+ }
+
+ return bestSize;
+}
+
+int MaliC55CameraData::loadIPA()
+{
+ int ret;
+
+ /* Do not initialize IPA for TPG. */
+ if (!sensor_)
+ return 0;
+
+ ipa_ = IPAManager::createIPA<ipa::mali_c55::IPAProxyMaliC55>(pipe(), 1, 1);
+ if (!ipa_)
+ return -ENOENT;
+
+ ipa_->setSensorControls.connect(this, &MaliC55CameraData::setSensorControls);
+
+ std::string ipaTuningFile = ipa_->configurationFile(sensor_->model() + ".yaml",
+ "uncalibrated.yaml");
+
+ /* We need to inform the IPA of the sensor configuration */
+ ipa::mali_c55::IPAConfigInfo ipaConfig{};
+
+ ret = sensor_->sensorInfo(&ipaConfig.sensorInfo);
+ if (ret)
+ return ret;
+
+ ipaConfig.sensorControls = sensor_->controls();
+
+ ControlInfoMap ipaControls;
+ ret = ipa_->init({ ipaTuningFile, sensor_->model() }, ipaConfig,
+ &ipaControls);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to initialise the Mali-C55 IPA";
+ return ret;
+ }
+
+ updateControls(ipaControls);
+
+ return 0;
+}
+
+class MaliC55CameraConfiguration : public CameraConfiguration
+{
+public:
+ MaliC55CameraConfiguration(MaliC55CameraData *data)
+ : CameraConfiguration(), data_(data)
+ {
+ }
+
+ Status validate() override;
+ const Transform &combinedTransform() { return combinedTransform_; }
+
+ V4L2SubdeviceFormat sensorFormat_;
+
+private:
+ static constexpr unsigned int kMaxStreams = 2;
+
+ const MaliC55CameraData *data_;
+ Transform combinedTransform_;
+};
+
+CameraConfiguration::Status MaliC55CameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ /*
+ * The TPG doesn't support flips, so we only need to calculate a
+ * transform if we have a sensor.
+ */
+ if (data_->sensor_) {
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->sensor_->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
+ status = Adjusted;
+ } else {
+ combinedTransform_ = Transform::Rot0;
+ }
+
+ /* Only 2 streams available. */
+ if (config_.size() > kMaxStreams) {
+ config_.resize(kMaxStreams);
+ status = Adjusted;
+ }
+
+ bool frPipeAvailable = true;
+ StreamConfiguration *rawConfig = nullptr;
+ for (StreamConfiguration &config : config_) {
+ if (!isFormatRaw(config.pixelFormat))
+ continue;
+
+ if (rawConfig) {
+ LOG(MaliC55, Error)
+ << "Only a single RAW stream is supported";
+ return Invalid;
+ }
+
+ rawConfig = &config;
+ }
+
+ /*
+ * The C55 can not upscale. Limit the configuration to the ISP
+ * capabilities and the sensor resolution.
+ */
+ Size maxSize = kMaliC55MaxSize.boundedTo(data_->resolution());
+ if (rawConfig) {
+ /*
+ * \todo Take into account the Bayer components ordering once
+ * we support rotations.
+ */
+ PixelFormat rawFormat =
+ data_->adjustRawFormat(rawConfig->pixelFormat);
+
+ if (!rawFormat.isValid())
+ return Invalid;
+
+ if (rawFormat != rawConfig->pixelFormat) {
+ LOG(MaliC55, Debug)
+ << "RAW format adjusted to " << rawFormat;
+ rawConfig->pixelFormat = rawFormat;
+ status = Adjusted;
+ }
+
+ Size rawSize =
+ data_->adjustRawSizes(rawFormat, rawConfig->size);
+ if (rawSize != rawConfig->size) {
+ LOG(MaliC55, Debug)
+ << "RAW sizes adjusted to " << rawSize;
+ rawConfig->size = rawSize;
+ status = Adjusted;
+ }
+
+ maxSize = rawSize;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(rawConfig->pixelFormat);
+ rawConfig->stride = info.stride(rawConfig->size.width, 0, 4);
+ rawConfig->frameSize = info.frameSize(rawConfig->size, 4);
+
+ rawConfig->setStream(const_cast<Stream *>(&data_->frStream_));
+ frPipeAvailable = false;
+ }
+
+ /*
+ * Adjust processed streams.
+ *
+ * Compute the minimum sensor size to be later used to select the
+ * sensor configuration.
+ */
+ Size minSensorSize = kMaliC55MinInputSize;
+ for (StreamConfiguration &config : config_) {
+ if (isFormatRaw(config.pixelFormat))
+ continue;
+
+ /* Adjust format and size for processed streams. */
+ const auto it = maliC55FmtToCode.find(config.pixelFormat);
+ if (it == maliC55FmtToCode.end()) {
+ LOG(MaliC55, Debug)
+ << "Format adjusted to " << formats::RGB565;
+ config.pixelFormat = formats::RGB565;
+ status = Adjusted;
+ }
+
+ Size size = std::clamp(config.size, kMaliC55MinSize, maxSize);
+ if (size != config.size) {
+ LOG(MaliC55, Debug)
+ << "Size adjusted to " << size;
+ config.size = size;
+ status = Adjusted;
+ }
+
+ if (minSensorSize < size)
+ minSensorSize = size;
+
+ if (frPipeAvailable) {
+ config.setStream(const_cast<Stream *>(&data_->frStream_));
+ frPipeAvailable = false;
+ } else {
+ config.setStream(const_cast<Stream *>(&data_->dsStream_));
+ }
+ }
+
+ /* Compute the sensor format. */
+
+ /* If there's a RAW config, sensor configuration follows it. */
+ if (rawConfig) {
+ sensorFormat_.code = data_->pixfmtToMbusCode(rawConfig->pixelFormat);
+ sensorFormat_.size = rawConfig->size.expandedTo(minSensorSize);
+
+ return status;
+ }
+
+ /* If there's no RAW config, compute the sensor configuration here. */
+ PixelFormat rawFormat = data_->bestRawFormat();
+ if (!rawFormat.isValid())
+ return Invalid;
+
+ sensorFormat_.code = data_->pixfmtToMbusCode(rawFormat);
+
+ uint16_t distance = std::numeric_limits<uint16_t>::max();
+ const auto sizes = data_->sizes(sensorFormat_.code);
+ Size bestSize;
+ for (const auto &size : sizes) {
+ if (minSensorSize.width > size.width ||
+ minSensorSize.height > size.height)
+ continue;
+
+ uint16_t dist = std::abs(static_cast<int>(minSensorSize.width) -
+ static_cast<int>(size.width)) +
+ std::abs(static_cast<int>(minSensorSize.height) -
+ static_cast<int>(size.height));
+ if (dist < distance) {
+ dist = distance;
+ bestSize = size;
+ }
+ }
+ sensorFormat_.size = bestSize;
+
+ LOG(MaliC55, Debug) << "Computed sensor configuration " << sensorFormat_;
+
+ return status;
+}
+
+class PipelineHandlerMaliC55 : public PipelineHandler
+{
+public:
+ PipelineHandlerMaliC55(CameraManager *manager);
+
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+ int allocateBuffers(Camera *camera);
+ void freeBuffers(Camera *camera);
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+ void imageBufferReady(FrameBuffer *buffer);
+ void paramsBufferReady(FrameBuffer *buffer);
+ void statsBufferReady(FrameBuffer *buffer);
+ void paramsComputed(unsigned int requestId);
+ void statsProcessed(unsigned int requestId, const ControlList &metadata);
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ struct MaliC55Pipe {
+ std::unique_ptr<V4L2Subdevice> resizer;
+ std::unique_ptr<V4L2VideoDevice> cap;
+ MediaLink *link;
+ Stream *stream;
+ };
+
+ enum {
+ MaliC55FR,
+ MaliC55DS,
+ MaliC55NumPipes,
+ };
+
+ MaliC55CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<MaliC55CameraData *>(camera->_d());
+ }
+
+ MaliC55Pipe *pipeFromStream(MaliC55CameraData *data, Stream *stream)
+ {
+ if (stream == &data->frStream_)
+ return &pipes_[MaliC55FR];
+ else if (stream == &data->dsStream_)
+ return &pipes_[MaliC55DS];
+ else
+ LOG(MaliC55, Fatal) << "Stream " << stream << " not valid";
+ return nullptr;
+ }
+
+ MaliC55Pipe *pipeFromStream(MaliC55CameraData *data, const Stream *stream)
+ {
+ return pipeFromStream(data, const_cast<Stream *>(stream));
+ }
+
+ void resetPipes()
+ {
+ for (MaliC55Pipe &pipe : pipes_)
+ pipe.stream = nullptr;
+ }
+
+ MaliC55FrameInfo *findFrameInfo(FrameBuffer *buffer);
+ MaliC55FrameInfo *findFrameInfo(Request *request);
+ void tryComplete(MaliC55FrameInfo *info);
+
+ int configureRawStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat);
+ int configureProcessedStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat);
+
+ void applyScalerCrop(Camera *camera, const ControlList &controls);
+
+ bool registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
+ const std::string &name);
+ bool registerTPGCamera(MediaLink *link);
+ bool registerSensorCamera(MediaLink *link);
+
+ MediaDevice *media_;
+ std::unique_ptr<V4L2Subdevice> isp_;
+ std::unique_ptr<V4L2VideoDevice> stats_;
+ std::unique_ptr<V4L2VideoDevice> params_;
+
+ std::vector<std::unique_ptr<FrameBuffer>> statsBuffers_;
+ std::queue<FrameBuffer *> availableStatsBuffers_;
+
+ std::vector<std::unique_ptr<FrameBuffer>> paramsBuffers_;
+ std::queue<FrameBuffer *> availableParamsBuffers_;
+
+ std::map<unsigned int, MaliC55FrameInfo> frameInfoMap_;
+
+ std::array<MaliC55Pipe, MaliC55NumPipes> pipes_;
+
+ bool dsFitted_;
+};
+
+PipelineHandlerMaliC55::PipelineHandlerMaliC55(CameraManager *manager)
+ : PipelineHandler(manager), dsFitted_(true)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerMaliC55::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
+{
+ MaliC55CameraData *data = cameraData(camera);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<MaliC55CameraConfiguration>(data);
+ bool frPipeAvailable = true;
+
+ if (roles.empty())
+ return config;
+
+ /* Check if one stream is RAW to reserve the FR pipe for it. */
+ if (std::find(roles.begin(), roles.end(), StreamRole::Raw) != roles.end())
+ frPipeAvailable = false;
+
+ for (const StreamRole &role : roles) {
+ struct MaliC55Pipe *pipe;
+
+ /* Assign pipe for this role. */
+ if (role == StreamRole::Raw) {
+ pipe = &pipes_[MaliC55FR];
+ } else {
+ if (frPipeAvailable) {
+ pipe = &pipes_[MaliC55FR];
+ frPipeAvailable = false;
+ } else {
+ pipe = &pipes_[MaliC55DS];
+ }
+ }
+
+ Size size = std::min(Size{ 1920, 1080 }, data->resolution());
+ PixelFormat pixelFormat;
+
+ switch (role) {
+ case StreamRole::StillCapture:
+ size = data->resolution();
+ [[fallthrough]];
+ case StreamRole::VideoRecording:
+ pixelFormat = formats::NV12;
+ break;
+
+ case StreamRole::Viewfinder:
+ pixelFormat = formats::RGB565;
+ break;
+
+ case StreamRole::Raw:
+ pixelFormat = data->bestRawFormat();
+ if (!pixelFormat.isValid()) {
+ LOG(MaliC55, Error)
+ << "Camera does not support RAW formats";
+ return nullptr;
+ }
+
+ size = data->resolution();
+ break;
+
+ default:
+ LOG(MaliC55, Error)
+ << "Requested stream role not supported: " << role;
+ return nullptr;
+ }
+
+ std::map<PixelFormat, std::vector<SizeRange>> formats;
+ for (const auto &maliFormat : maliC55FmtToCode) {
+ PixelFormat pixFmt = maliFormat.first;
+ bool isRaw = isFormatRaw(pixFmt);
+
+ /* RAW formats are only supported on the FR pipe. */
+ if (pipe != &pipes_[MaliC55FR] && isRaw)
+ continue;
+
+ if (isRaw) {
+ /* Make sure the mbus code is supported. */
+ int rawCode = data->pixfmtToMbusCode(pixFmt);
+ if (rawCode < 0)
+ continue;
+
+ const auto sizes = data->sizes(rawCode);
+ if (sizes.empty())
+ continue;
+
+ /* And list all sizes the sensor can produce. */
+ std::vector<SizeRange> sizeRanges;
+ std::transform(sizes.begin(), sizes.end(),
+ std::back_inserter(sizeRanges),
+ [](const Size &s) {
+ return SizeRange(s);
+ });
+
+ formats[pixFmt] = sizeRanges;
+ } else {
+ /* Processed formats are always available. */
+ Size maxSize = std::min(kMaliC55MaxSize,
+ data->resolution());
+ formats[pixFmt] = { kMaliC55MinSize, maxSize };
+ }
+ }
+
+ StreamFormats streamFormats(formats);
+ StreamConfiguration cfg(streamFormats);
+ cfg.pixelFormat = pixelFormat;
+ cfg.bufferCount = 4;
+ cfg.size = size;
+
+ config->addConfiguration(cfg);
+ }
+
+ if (config->validate() == CameraConfiguration::Invalid)
+ return nullptr;
+
+ return config;
+}
+
+int PipelineHandlerMaliC55::configureRawStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat)
+{
+ Stream *stream = config.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ if (pipe != &pipes_[MaliC55FR]) {
+ LOG(MaliC55, Fatal) << "Only the FR pipe supports RAW capture.";
+ return -EINVAL;
+ }
+
+ /* Enable the debayer route to set fixed internal format on pad #0. */
+ V4L2Subdevice::Routing routing = {};
+ routing.emplace_back(V4L2Subdevice::Stream{ 0, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+
+ int ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ unsigned int rawCode = subdevFormat.code;
+ subdevFormat.code = kMaliC55ISPInternalFormat;
+ ret = pipe->resizer->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ /* Enable the bypass route and apply RAW formats there. */
+ routing.clear();
+ routing.emplace_back(V4L2Subdevice::Stream{ 2, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+ ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ subdevFormat.code = rawCode;
+ ret = pipe->resizer->setFormat(2, &subdevFormat);
+ if (ret)
+ return ret;
+
+ ret = pipe->resizer->setFormat(1, &subdevFormat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int PipelineHandlerMaliC55::configureProcessedStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat)
+{
+ Stream *stream = config.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ /* Enable the debayer route on the resizer pipe. */
+ V4L2Subdevice::Routing routing = {};
+ routing.emplace_back(V4L2Subdevice::Stream{ 0, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+
+ int ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ subdevFormat.code = kMaliC55ISPInternalFormat;
+ ret = pipe->resizer->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ /*
+ * Compute the scaler-in to scaler-out ratio: first center-crop to align
+ * the FOV to the desired resolution, then scale to the desired size.
+ */
+ Size scalerIn = subdevFormat.size.boundedToAspectRatio(config.size);
+ int xCrop = (subdevFormat.size.width - scalerIn.width) / 2;
+ int yCrop = (subdevFormat.size.height - scalerIn.height) / 2;
+ Rectangle ispCrop = { xCrop, yCrop, scalerIn };
+ ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_CROP, &ispCrop);
+ if (ret)
+ return ret;
+
+ Rectangle ispCompose = { 0, 0, config.size };
+ ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_COMPOSE, &ispCompose);
+ if (ret)
+ return ret;
+
+ /*
+ * The source pad format size comes directly from the sink
+ * compose rectangle.
+ */
+ subdevFormat.size = ispCompose.size();
+ subdevFormat.code = maliC55FmtToCode.find(config.pixelFormat)->second;
+ return pipe->resizer->setFormat(1, &subdevFormat);
+}
+
+int PipelineHandlerMaliC55::configure(Camera *camera,
+ CameraConfiguration *config)
+{
+ resetPipes();
+
+ int ret = media_->disableLinks();
+ if (ret)
+ return ret;
+
+ /* Link the graph depending if we are operating the TPG or a sensor. */
+ MaliC55CameraData *data = cameraData(camera);
+ if (data->csi_) {
+ const MediaEntity *csiEntity = data->csi_->entity();
+ ret = csiEntity->getPadByIndex(1)->links()[0]->setEnabled(true);
+ } else {
+ ret = data->entity_->getPadByIndex(0)->links()[0]->setEnabled(true);
+ }
+ if (ret)
+ return ret;
+
+ MaliC55CameraConfiguration *maliConfig =
+ static_cast<MaliC55CameraConfiguration *>(config);
+ V4L2SubdeviceFormat subdevFormat = maliConfig->sensorFormat_;
+ ret = data->sd_->getFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ if (data->sensor_) {
+ ret = data->sensor_->setFormat(&subdevFormat,
+ maliConfig->combinedTransform());
+ if (ret)
+ return ret;
+ }
+
+ if (data->csi_) {
+ ret = data->csi_->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ ret = data->csi_->getFormat(1, &subdevFormat);
+ if (ret)
+ return ret;
+ }
+
+ V4L2DeviceFormat statsFormat;
+ ret = stats_->getFormat(&statsFormat);
+ if (ret)
+ return ret;
+
+ if (statsFormat.planes[0].size != sizeof(struct mali_c55_stats_buffer)) {
+ LOG(MaliC55, Error) << "3a stats buffer size invalid";
+ return -EINVAL;
+ }
+
+ /*
+ * Propagate the format to the ISP sink pad and configure the input
+ * crop rectangle (no crop at the moment).
+ *
+ * \todo Configure the CSI-2 receiver.
+ */
+ ret = isp_->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ Rectangle ispCrop(0, 0, subdevFormat.size);
+ ret = isp_->setSelection(0, V4L2_SEL_TGT_CROP, &ispCrop);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the resizer: fixed format the sink pad; use the media
+ * bus code associated with the desired capture format on the source
+ * pad.
+ *
+ * Configure the crop and compose rectangles to match the desired
+ * stream output size
+ *
+ * \todo Make the crop/scaler configurable
+ */
+ for (const StreamConfiguration &streamConfig : *config) {
+ Stream *stream = streamConfig.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ /*
+ * Enable the media link between the pipe's resizer and the
+ * capture video device
+ */
+
+ ret = pipe->link->setEnabled(true);
+ if (ret) {
+ LOG(MaliC55, Error) << "Couldn't enable resizer's link";
+ return ret;
+ }
+
+ if (isFormatRaw(streamConfig.pixelFormat))
+ ret = configureRawStream(data, streamConfig, subdevFormat);
+ else
+ ret = configureProcessedStream(data, streamConfig, subdevFormat);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to configure pipeline";
+ return ret;
+ }
+
+ /* Now apply the pixel format and size to the capture device. */
+ V4L2DeviceFormat captureFormat;
+ captureFormat.fourcc = pipe->cap->toV4L2PixelFormat(streamConfig.pixelFormat);
+ captureFormat.size = streamConfig.size;
+
+ ret = pipe->cap->setFormat(&captureFormat);
+ if (ret)
+ return ret;
+
+ pipe->stream = stream;
+ }
+
+ if (!data->ipa_)
+ return 0;
+
+ /*
+ * Enable the media link between the ISP subdevice and the statistics
+ * video device.
+ */
+ const MediaEntity *ispEntity = isp_->entity();
+ ret = ispEntity->getPadByIndex(3)->links()[0]->setEnabled(true);
+ if (ret) {
+ LOG(MaliC55, Error) << "Couldn't enable statistics link";
+ return ret;
+ }
+
+ /*
+ * Enable the media link between the ISP subdevice and the parameters
+ * video device.
+ */
+ ret = ispEntity->getPadByIndex(4)->links()[0]->setEnabled(true);
+ if (ret) {
+ LOG(MaliC55, Error) << "Couldn't enable parameters link";
+ return ret;
+ }
+
+ /* We need to inform the IPA of the sensor configuration */
+ ipa::mali_c55::IPAConfigInfo ipaConfig{};
+
+ ret = data->sensor_->sensorInfo(&ipaConfig.sensorInfo);
+ if (ret)
+ return ret;
+
+ ipaConfig.sensorControls = data->sensor_->controls();
+
+ /*
+ * And we also need to tell the IPA the bayerOrder of the data (as
+ * affected by any flips that we've configured)
+ */
+ const Transform &combinedTransform = maliConfig->combinedTransform();
+ BayerFormat::Order bayerOrder = data->sensor_->bayerOrder(combinedTransform);
+
+ ControlInfoMap ipaControls;
+ ret = data->ipa_->configure(ipaConfig, utils::to_underlying(bayerOrder),
+ &ipaControls);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to configure IPA";
+ return ret;
+ }
+
+ data->updateControls(ipaControls);
+
+ return 0;
+}
+
+int PipelineHandlerMaliC55::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ MaliC55Pipe *pipe = pipeFromStream(cameraData(camera), stream);
+ unsigned int count = stream->configuration().bufferCount;
+
+ return pipe->cap->exportBuffers(count, buffers);
+}
+
+void PipelineHandlerMaliC55::freeBuffers(Camera *camera)
+{
+ MaliC55CameraData *data = cameraData(camera);
+
+ while (!availableStatsBuffers_.empty())
+ availableStatsBuffers_.pop();
+ while (!availableParamsBuffers_.empty())
+ availableParamsBuffers_.pop();
+
+ statsBuffers_.clear();
+ paramsBuffers_.clear();
+
+ if (data->ipa_) {
+ data->ipa_->unmapBuffers(data->ipaStatBuffers_);
+ data->ipa_->unmapBuffers(data->ipaParamBuffers_);
+ }
+ data->ipaStatBuffers_.clear();
+ data->ipaParamBuffers_.clear();
+
+ if (stats_->releaseBuffers())
+ LOG(MaliC55, Error) << "Failed to release stats buffers";
+
+ if (params_->releaseBuffers())
+ LOG(MaliC55, Error) << "Failed to release params buffers";
+
+ return;
+}
+
+int PipelineHandlerMaliC55::allocateBuffers(Camera *camera)
+{
+ MaliC55CameraData *data = cameraData(camera);
+ unsigned int ipaBufferId = 1;
+ unsigned int bufferCount;
+ int ret;
+
+ bufferCount = std::max({
+ data->frStream_.configuration().bufferCount,
+ data->dsStream_.configuration().bufferCount,
+ });
+
+ ret = stats_->allocateBuffers(bufferCount, &statsBuffers_);
+ if (ret < 0)
+ return ret;
+
+ for (std::unique_ptr<FrameBuffer> &buffer : statsBuffers_) {
+ buffer->setCookie(ipaBufferId++);
+ data->ipaStatBuffers_.emplace_back(buffer->cookie(),
+ buffer->planes());
+ availableStatsBuffers_.push(buffer.get());
+ }
+
+ ret = params_->allocateBuffers(bufferCount, &paramsBuffers_);
+ if (ret < 0)
+ return ret;
+
+ for (std::unique_ptr<FrameBuffer> &buffer : paramsBuffers_) {
+ buffer->setCookie(ipaBufferId++);
+ data->ipaParamBuffers_.emplace_back(buffer->cookie(),
+ buffer->planes());
+ availableParamsBuffers_.push(buffer.get());
+ }
+
+ if (data->ipa_) {
+ data->ipa_->mapBuffers(data->ipaStatBuffers_, true);
+ data->ipa_->mapBuffers(data->ipaParamBuffers_, false);
+ }
+
+ return 0;
+}
+
+int PipelineHandlerMaliC55::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
+{
+ MaliC55CameraData *data = cameraData(camera);
+ int ret;
+
+ ret = allocateBuffers(camera);
+ if (ret)
+ return ret;
+
+ if (data->ipa_) {
+ ret = data->ipa_->start();
+ if (ret) {
+ LOG(MaliC55, Error)
+ << "Failed to start IPA" << camera->id();
+ freeBuffers(camera);
+ return ret;
+ }
+ }
+
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ Stream *stream = pipe.stream;
+
+ ret = pipe.cap->importBuffers(stream->configuration().bufferCount);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to import buffers";
+ if (data->ipa_)
+ data->ipa_->stop();
+ freeBuffers(camera);
+ return ret;
+ }
+
+ ret = pipe.cap->streamOn();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to start stream";
+ if (data->ipa_)
+ data->ipa_->stop();
+ freeBuffers(camera);
+ return ret;
+ }
+ }
+
+ ret = stats_->streamOn();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to start stats stream";
+
+ if (data->ipa_)
+ data->ipa_->stop();
+
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (pipe.stream)
+ pipe.cap->streamOff();
+ }
+
+ freeBuffers(camera);
+ return ret;
+ }
+
+ ret = params_->streamOn();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to start params stream";
+
+ stats_->streamOff();
+ if (data->ipa_)
+ data->ipa_->stop();
+
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (pipe.stream)
+ pipe.cap->streamOff();
+ }
+
+ freeBuffers(camera);
+ return ret;
+ }
+
+ ret = isp_->setFrameStartEnabled(true);
+ if (ret)
+ LOG(MaliC55, Error) << "Failed to enable frame start events";
+
+ return 0;
+}
+
+void PipelineHandlerMaliC55::stopDevice(Camera *camera)
+{
+ MaliC55CameraData *data = cameraData(camera);
+
+ isp_->setFrameStartEnabled(false);
+
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ pipe.cap->streamOff();
+ pipe.cap->releaseBuffers();
+ }
+
+ stats_->streamOff();
+ params_->streamOff();
+ if (data->ipa_)
+ data->ipa_->stop();
+ freeBuffers(camera);
+}
+
+void PipelineHandlerMaliC55::applyScalerCrop(Camera *camera,
+ const ControlList &controls)
+{
+ MaliC55CameraData *data = cameraData(camera);
+
+ const auto &scalerCrop = controls.get<Rectangle>(controls::ScalerCrop);
+ if (!scalerCrop)
+ return;
+
+ if (!data->sensor_) {
+ LOG(MaliC55, Error) << "ScalerCrop not supported for TPG";
+ return;
+ }
+
+ Rectangle nativeCrop = *scalerCrop;
+
+ IPACameraSensorInfo sensorInfo;
+ int ret = data->sensor_->sensorInfo(&sensorInfo);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to retrieve sensor info";
+ return;
+ }
+
+ /*
+ * The ScalerCrop rectangle re-scaling in the ISP crop rectangle
+ * comes straight from the RPi pipeline handler.
+ *
+ * Create a version of the crop rectangle aligned to the analogue crop
+ * rectangle top-left coordinates and scaled in the [analogue crop to
+ * output frame] ratio to take into account binning/skipping on the
+ * sensor.
+ */
+ Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo.analogCrop
+ .topLeft());
+ ispCrop.scaleBy(sensorInfo.outputSize, sensorInfo.analogCrop.size());
+
+ /*
+ * The crop rectangle should be:
+ * 1. At least as big as ispMinCropSize_, once that's been
+ * enlarged to the same aspect ratio.
+ * 2. With the same mid-point, if possible.
+ * 3. But it can't go outside the sensor area.
+ */
+ Rectangle ispMinCrop{ 0, 0, 640, 480 };
+ Size minSize = ispMinCrop.size().expandedToAspectRatio(nativeCrop.size());
+ Size size = ispCrop.size().expandedTo(minSize);
+ ispCrop = size.centeredTo(ispCrop.center())
+ .enclosedIn(Rectangle(sensorInfo.outputSize));
+
+ /*
+ * As the resizer can't upscale, the crop rectangle has to be larger
+ * than the larger stream output size.
+ */
+ Size maxYuvSize;
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ const StreamConfiguration &config = pipe.stream->configuration();
+ if (isFormatRaw(config.pixelFormat)) {
+ LOG(MaliC55, Debug) << "Cannot crop with a RAW stream";
+ return;
+ }
+
+ Size streamSize = config.size;
+ if (streamSize.width > maxYuvSize.width)
+ maxYuvSize.width = streamSize.width;
+ if (streamSize.height > maxYuvSize.height)
+ maxYuvSize.height = streamSize.height;
+ }
+
+ ispCrop.size().expandTo(maxYuvSize);
+
+ /*
+ * Now apply the scaler crop to each enabled output. This overrides the
+ * crop configuration performed at configure() time and can cause
+ * square pixels if the crop rectangle and scaler output FOV ratio are
+ * different.
+ */
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ /* Create a copy to avoid setSelection() to modify ispCrop. */
+ Rectangle pipeCrop = ispCrop;
+ ret = pipe.resizer->setSelection(0, V4L2_SEL_TGT_CROP, &pipeCrop);
+ if (ret) {
+ LOG(MaliC55, Error)
+ << "Failed to apply crop to "
+ << (pipe.stream == &data->frStream_ ?
+ "FR" : "DS") << " pipe";
+ return;
+ }
+ }
+}
+
+int PipelineHandlerMaliC55::queueRequestDevice(Camera *camera, Request *request)
+{
+ MaliC55CameraData *data = cameraData(camera);
+
+ /* Do not run the IPA if the TPG is in use. */
+ if (!data->ipa_) {
+ MaliC55FrameInfo frameInfo;
+ frameInfo.request = request;
+ frameInfo.statBuffer = nullptr;
+ frameInfo.paramBuffer = nullptr;
+ frameInfo.paramsDone = true;
+ frameInfo.statsDone = true;
+
+ frameInfoMap_[request->sequence()] = frameInfo;
+
+ for (auto &[stream, buffer] : request->buffers()) {
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ pipe->cap->queueBuffer(buffer);
+ }
+
+ return 0;
+ }
+
+ if (availableStatsBuffers_.empty()) {
+ LOG(MaliC55, Error) << "Stats buffer underrun";
+ return -ENOENT;
+ }
+
+ if (availableParamsBuffers_.empty()) {
+ LOG(MaliC55, Error) << "Params buffer underrun";
+ return -ENOENT;
+ }
+
+ MaliC55FrameInfo frameInfo;
+ frameInfo.request = request;
+
+ frameInfo.statBuffer = availableStatsBuffers_.front();
+ availableStatsBuffers_.pop();
+ frameInfo.paramBuffer = availableParamsBuffers_.front();
+ availableParamsBuffers_.pop();
+
+ frameInfo.paramsDone = false;
+ frameInfo.statsDone = false;
+
+ frameInfoMap_[request->sequence()] = frameInfo;
+
+ data->ipa_->queueRequest(request->sequence(), request->controls());
+ data->ipa_->fillParams(request->sequence(),
+ frameInfo.paramBuffer->cookie());
+
+ return 0;
+}
+
+MaliC55FrameInfo *PipelineHandlerMaliC55::findFrameInfo(Request *request)
+{
+ for (auto &[sequence, info] : frameInfoMap_) {
+ if (info.request == request)
+ return &info;
+ }
+
+ return nullptr;
+}
+
+MaliC55FrameInfo *PipelineHandlerMaliC55::findFrameInfo(FrameBuffer *buffer)
+{
+ for (auto &[sequence, info] : frameInfoMap_) {
+ if (info.paramBuffer == buffer ||
+ info.statBuffer == buffer)
+ return &info;
+ }
+
+ return nullptr;
+}
+
+void PipelineHandlerMaliC55::tryComplete(MaliC55FrameInfo *info)
+{
+ if (!info->paramsDone)
+ return;
+ if (!info->statsDone)
+ return;
+
+ Request *request = info->request;
+ if (request->hasPendingBuffers())
+ return;
+
+ if (info->statBuffer)
+ availableStatsBuffers_.push(info->statBuffer);
+ if (info->paramBuffer)
+ availableParamsBuffers_.push(info->paramBuffer);
+
+ frameInfoMap_.erase(request->sequence());
+
+ completeRequest(request);
+}
+
+void PipelineHandlerMaliC55::imageBufferReady(FrameBuffer *buffer)
+{
+ Request *request = buffer->request();
+ MaliC55FrameInfo *info = findFrameInfo(request);
+ ASSERT(info);
+
+ if (completeBuffer(request, buffer))
+ tryComplete(info);
+}
+
+void PipelineHandlerMaliC55::paramsBufferReady(FrameBuffer *buffer)
+{
+ MaliC55FrameInfo *info = findFrameInfo(buffer);
+ ASSERT(info);
+
+ info->paramsDone = true;
+
+ tryComplete(info);
+}
+
+void PipelineHandlerMaliC55::statsBufferReady(FrameBuffer *buffer)
+{
+ MaliC55FrameInfo *info = findFrameInfo(buffer);
+ ASSERT(info);
+
+ Request *request = info->request;
+ MaliC55CameraData *data = cameraData(request->_d()->camera());
+
+ ControlList sensorControls = data->delayedCtrls_->get(buffer->metadata().sequence);
+
+ data->ipa_->processStats(request->sequence(), buffer->cookie(),
+ sensorControls);
+}
+
+void PipelineHandlerMaliC55::paramsComputed(unsigned int requestId)
+{
+ MaliC55FrameInfo &frameInfo = frameInfoMap_[requestId];
+ Request *request = frameInfo.request;
+ MaliC55CameraData *data = cameraData(request->_d()->camera());
+
+ /*
+ * Queue buffers for stats and params, then queue buffers to the capture
+ * video devices.
+ */
+
+ frameInfo.paramBuffer->_d()->metadata().planes()[0].bytesused =
+ sizeof(struct mali_c55_params_buffer);
+ params_->queueBuffer(frameInfo.paramBuffer);
+ stats_->queueBuffer(frameInfo.statBuffer);
+
+ for (auto &[stream, buffer] : request->buffers()) {
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ pipe->cap->queueBuffer(buffer);
+ }
+}
+
+void PipelineHandlerMaliC55::statsProcessed(unsigned int requestId,
+ const ControlList &metadata)
+{
+ MaliC55FrameInfo &frameInfo = frameInfoMap_[requestId];
+
+ frameInfo.statsDone = true;
+ frameInfo.request->metadata().merge(metadata);
+
+ tryComplete(&frameInfo);
+}
+
+bool PipelineHandlerMaliC55::registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
+ const std::string &name)
+{
+ if (data->loadIPA())
+ return false;
+
+ if (data->ipa_) {
+ data->ipa_->statsProcessed.connect(this, &PipelineHandlerMaliC55::statsProcessed);
+ data->ipa_->paramsComputed.connect(this, &PipelineHandlerMaliC55::paramsComputed);
+ }
+
+ std::set<Stream *> streams{ &data->frStream_ };
+ if (dsFitted_)
+ streams.insert(&data->dsStream_);
+
+ std::shared_ptr<Camera> camera = Camera::create(std::move(data),
+ name, streams);
+ registerCamera(std::move(camera));
+
+ return true;
+}
+
+/*
+ * The only camera we support through direct connection to the ISP is the
+ * Mali-C55 TPG. Check we have that and warn if not.
+ */
+bool PipelineHandlerMaliC55::registerTPGCamera(MediaLink *link)
+{
+ const std::string &name = link->source()->entity()->name();
+ if (name != "mali-c55 tpg") {
+ LOG(MaliC55, Warning) << "Unsupported direct connection to "
+ << link->source()->entity()->name();
+ /*
+ * Return true and just skip registering a camera for this
+ * entity.
+ */
+ return true;
+ }
+
+ std::unique_ptr<MaliC55CameraData> data =
+ std::make_unique<MaliC55CameraData>(this, link->source()->entity());
+
+ if (data->init())
+ return false;
+
+ return registerMaliCamera(std::move(data), name);
+}
+
+/*
+ * Register a Camera for each sensor connected to the ISP through a CSI-2
+ * receiver.
+ *
+ * \todo Support more complex topologies, such as video muxes.
+ */
+bool PipelineHandlerMaliC55::registerSensorCamera(MediaLink *ispLink)
+{
+ MediaEntity *csi2 = ispLink->source()->entity();
+ const MediaPad *csi2Sink = csi2->getPadByIndex(0);
+
+ for (MediaLink *link : csi2Sink->links()) {
+ MediaEntity *sensor = link->source()->entity();
+ unsigned int function = sensor->function();
+
+ if (function != MEDIA_ENT_F_CAM_SENSOR)
+ continue;
+
+ std::unique_ptr<MaliC55CameraData> data =
+ std::make_unique<MaliC55CameraData>(this, sensor);
+ if (data->init())
+ return false;
+
+ data->properties_ = data->sensor_->properties();
+
+ const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays();
+ std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ };
+
+ data->delayedCtrls_ =
+ std::make_unique<DelayedControls>(data->sensor_->device(),
+ params);
+ isp_->frameStart.connect(data->delayedCtrls_.get(),
+ &DelayedControls::applyControls);
+
+ /* \todo: Init properties. */
+
+ if (!registerMaliCamera(std::move(data), sensor->name()))
+ return false;
+ }
+
+ return true;
+}
+
+bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator)
+{
+ const MediaPad *ispSink;
+
+ /*
+ * We search for just the always-available elements of the media graph.
+ * The TPG and the downscale pipe are both optional blocks and may not
+ * be fitted.
+ */
+ DeviceMatch dm("mali-c55");
+ dm.add("mali-c55 isp");
+ dm.add("mali-c55 resizer fr");
+ dm.add("mali-c55 fr");
+ dm.add("mali-c55 3a stats");
+ dm.add("mali-c55 3a params");
+
+ media_ = acquireMediaDevice(enumerator, dm);
+ if (!media_)
+ return false;
+
+ isp_ = V4L2Subdevice::fromEntityName(media_, "mali-c55 isp");
+ if (isp_->open() < 0)
+ return false;
+
+ stats_ = V4L2VideoDevice::fromEntityName(media_, "mali-c55 3a stats");
+ if (stats_->open() < 0)
+ return false;
+
+ params_ = V4L2VideoDevice::fromEntityName(media_, "mali-c55 3a params");
+ if (params_->open() < 0)
+ return false;
+
+ MaliC55Pipe *frPipe = &pipes_[MaliC55FR];
+ frPipe->resizer = V4L2Subdevice::fromEntityName(media_, "mali-c55 resizer fr");
+ if (frPipe->resizer->open() < 0)
+ return false;
+
+ frPipe->cap = V4L2VideoDevice::fromEntityName(media_, "mali-c55 fr");
+ if (frPipe->cap->open() < 0)
+ return false;
+
+ frPipe->link = media_->link("mali-c55 resizer fr", 1, "mali-c55 fr", 0);
+ if (!frPipe->link) {
+ LOG(MaliC55, Error) << "No link between fr resizer and video node";
+ return false;
+ }
+
+ frPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::imageBufferReady);
+
+ dsFitted_ = !!media_->getEntityByName("mali-c55 ds");
+ if (dsFitted_) {
+ LOG(MaliC55, Debug) << "Downscaler pipe is fitted";
+
+ MaliC55Pipe *dsPipe = &pipes_[MaliC55DS];
+
+ dsPipe->resizer = V4L2Subdevice::fromEntityName(media_, "mali-c55 resizer ds");
+ if (dsPipe->resizer->open() < 0)
+ return false;
+
+ dsPipe->cap = V4L2VideoDevice::fromEntityName(media_, "mali-c55 ds");
+ if (dsPipe->cap->open() < 0)
+ return false;
+
+ dsPipe->link = media_->link("mali-c55 resizer ds", 1,
+ "mali-c55 ds", 0);
+ if (!dsPipe->link) {
+ LOG(MaliC55, Error) << "No link between ds resizer and video node";
+ return false;
+ }
+
+ dsPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::imageBufferReady);
+ }
+
+ stats_->bufferReady.connect(this, &PipelineHandlerMaliC55::statsBufferReady);
+ params_->bufferReady.connect(this, &PipelineHandlerMaliC55::paramsBufferReady);
+
+ ispSink = isp_->entity()->getPadByIndex(0);
+ if (!ispSink || ispSink->links().empty()) {
+ LOG(MaliC55, Error) << "ISP sink pad error";
+ return false;
+ }
+
+ /*
+ * We could have several links pointing to the ISP's sink pad, which
+ * will be from entities with one of the following functions:
+ *
+ * MEDIA_ENT_F_CAM_SENSOR - The test pattern generator
+ * MEDIA_ENT_F_VID_IF_BRIDGE - A CSI-2 receiver
+ * MEDIA_ENT_F_IO_V4L - An input device
+ *
+ * The last one will be unsupported for now. The TPG is relatively easy,
+ * we just register a Camera for it. If we have a CSI-2 receiver we need
+ * to check its sink pad and register Cameras for anything connected to
+ * it (probably...there are some complex situations in which that might
+ * not be true but let's pretend they don't exist until we come across
+ * them)
+ */
+ bool registered;
+ for (MediaLink *link : ispSink->links()) {
+ unsigned int function = link->source()->entity()->function();
+
+ switch (function) {
+ case MEDIA_ENT_F_CAM_SENSOR:
+ registered = registerTPGCamera(link);
+ if (!registered)
+ return registered;
+
+ break;
+ case MEDIA_ENT_F_VID_IF_BRIDGE:
+ registered = registerSensorCamera(link);
+ if (!registered)
+ return registered;
+
+ break;
+ case MEDIA_ENT_F_IO_V4L:
+ LOG(MaliC55, Warning) << "Memory input not yet supported";
+ break;
+ default:
+ LOG(MaliC55, Error) << "Unsupported entity function";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerMaliC55, "mali-c55")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/mali-c55/meson.build b/src/libcamera/pipeline/mali-c55/meson.build
new file mode 100644
index 00000000..eba8e5a3
--- /dev/null
+++ b/src/libcamera/pipeline/mali-c55/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'mali-c55.cpp'
+])
diff --git a/src/libcamera/pipeline/meson.build b/src/libcamera/pipeline/meson.build
index 30dc5b97..8a61991c 100644
--- a/src/libcamera/pipeline/meson.build
+++ b/src/libcamera/pipeline/meson.build
@@ -1,5 +1,20 @@
# SPDX-License-Identifier: CC0-1.0
+# Location of pipeline specific configuration files
+pipeline_data_dir = libcamera_datadir / 'pipeline'
+
+# Allow multi-level directory structuring for the pipeline handlers if needed.
+subdirs = []
+
foreach pipeline : pipelines
+ pipeline = pipeline.split('/')[0]
+ if pipeline in subdirs
+ continue
+ endif
+
+ subdirs += pipeline
subdir(pipeline)
+
+ # Don't reuse the pipeline variable below, the subdirectory may have
+ # overwritten it.
endforeach
diff --git a/src/libcamera/pipeline/raspberrypi/dma_heaps.cpp b/src/libcamera/pipeline/raspberrypi/dma_heaps.cpp
deleted file mode 100644
index 69831dab..00000000
--- a/src/libcamera/pipeline/raspberrypi/dma_heaps.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
- *
- * dma_heaps.h - Helper class for dma-heap allocations.
- */
-
-#include "dma_heaps.h"
-
-#include <array>
-#include <fcntl.h>
-#include <linux/dma-buf.h>
-#include <linux/dma-heap.h>
-#include <sys/ioctl.h>
-#include <unistd.h>
-
-#include <libcamera/base/log.h>
-
-/*
- * /dev/dma-heap/linux,cma is the dma-heap allocator, which allows dmaheap-cma
- * to only have to worry about importing.
- *
- * Annoyingly, should the cma heap size be specified on the kernel command line
- * instead of DT, the heap gets named "reserved" instead.
- */
-static constexpr std::array<const char *, 2> heapNames = {
- "/dev/dma_heap/linux,cma",
- "/dev/dma_heap/reserved"
-};
-
-namespace libcamera {
-
-LOG_DECLARE_CATEGORY(RPI)
-
-namespace RPi {
-
-DmaHeap::DmaHeap()
-{
- for (const char *name : heapNames) {
- int ret = ::open(name, O_RDWR, 0);
- if (ret < 0) {
- ret = errno;
- LOG(RPI, Debug) << "Failed to open " << name << ": "
- << strerror(ret);
- continue;
- }
-
- dmaHeapHandle_ = UniqueFD(ret);
- break;
- }
-
- if (!dmaHeapHandle_.isValid())
- LOG(RPI, Error) << "Could not open any dmaHeap device";
-}
-
-DmaHeap::~DmaHeap() = default;
-
-UniqueFD DmaHeap::alloc(const char *name, std::size_t size)
-{
- int ret;
-
- if (!name)
- return {};
-
- struct dma_heap_allocation_data alloc = {};
-
- alloc.len = size;
- alloc.fd_flags = O_CLOEXEC | O_RDWR;
-
- ret = ::ioctl(dmaHeapHandle_.get(), DMA_HEAP_IOCTL_ALLOC, &alloc);
- if (ret < 0) {
- LOG(RPI, Error) << "dmaHeap allocation failure for "
- << name;
- return {};
- }
-
- UniqueFD allocFd(alloc.fd);
- ret = ::ioctl(allocFd.get(), DMA_BUF_SET_NAME, name);
- if (ret < 0) {
- LOG(RPI, Error) << "dmaHeap naming failure for "
- << name;
- return {};
- }
-
- return allocFd;
-}
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/dma_heaps.h b/src/libcamera/pipeline/raspberrypi/dma_heaps.h
deleted file mode 100644
index d38f41ea..00000000
--- a/src/libcamera/pipeline/raspberrypi/dma_heaps.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
- *
- * dma_heaps.h - Helper class for dma-heap allocations.
- */
-
-#pragma once
-
-#include <stddef.h>
-
-#include <libcamera/base/unique_fd.h>
-
-namespace libcamera {
-
-namespace RPi {
-
-class DmaHeap
-{
-public:
- DmaHeap();
- ~DmaHeap();
- bool isValid() const { return dmaHeapHandle_.isValid(); }
- UniqueFD alloc(const char *name, std::size_t size);
-
-private:
- UniqueFD dmaHeapHandle_;
-};
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/meson.build b/src/libcamera/pipeline/raspberrypi/meson.build
deleted file mode 100644
index f1a2f5ee..00000000
--- a/src/libcamera/pipeline/raspberrypi/meson.build
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: CC0-1.0
-
-libcamera_sources += files([
- 'dma_heaps.cpp',
- 'raspberrypi.cpp',
- 'rpi_stream.cpp',
-])
diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
deleted file mode 100644
index 29bff9d6..00000000
--- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
+++ /dev/null
@@ -1,2127 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019-2021, Raspberry Pi (Trading) Ltd.
- *
- * raspberrypi.cpp - Pipeline handler for Raspberry Pi devices
- */
-#include <algorithm>
-#include <assert.h>
-#include <cmath>
-#include <fcntl.h>
-#include <memory>
-#include <mutex>
-#include <queue>
-#include <unordered_set>
-#include <utility>
-
-#include <libcamera/base/shared_fd.h>
-#include <libcamera/base/utils.h>
-
-#include <libcamera/camera.h>
-#include <libcamera/control_ids.h>
-#include <libcamera/formats.h>
-#include <libcamera/ipa/raspberrypi.h>
-#include <libcamera/ipa/raspberrypi_ipa_interface.h>
-#include <libcamera/ipa/raspberrypi_ipa_proxy.h>
-#include <libcamera/logging.h>
-#include <libcamera/property_ids.h>
-#include <libcamera/request.h>
-
-#include <linux/bcm2835-isp.h>
-#include <linux/media-bus-format.h>
-#include <linux/videodev2.h>
-
-#include "libcamera/internal/bayer_format.h"
-#include "libcamera/internal/camera.h"
-#include "libcamera/internal/camera_sensor.h"
-#include "libcamera/internal/delayed_controls.h"
-#include "libcamera/internal/device_enumerator.h"
-#include "libcamera/internal/framebuffer.h"
-#include "libcamera/internal/ipa_manager.h"
-#include "libcamera/internal/media_device.h"
-#include "libcamera/internal/pipeline_handler.h"
-#include "libcamera/internal/v4l2_videodevice.h"
-
-#include "dma_heaps.h"
-#include "rpi_stream.h"
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(RPI)
-
-namespace {
-
-constexpr unsigned int defaultRawBitDepth = 12;
-
-/* Map of mbus codes to supported sizes reported by the sensor. */
-using SensorFormats = std::map<unsigned int, std::vector<Size>>;
-
-SensorFormats populateSensorFormats(std::unique_ptr<CameraSensor> &sensor)
-{
- SensorFormats formats;
-
- for (auto const mbusCode : sensor->mbusCodes())
- formats.emplace(mbusCode, sensor->sizes(mbusCode));
-
- return formats;
-}
-
-PixelFormat mbusCodeToPixelFormat(unsigned int mbus_code,
- BayerFormat::Packing packingReq)
-{
- BayerFormat bayer = BayerFormat::fromMbusCode(mbus_code);
-
- ASSERT(bayer.isValid());
-
- bayer.packing = packingReq;
- PixelFormat pix = bayer.toPixelFormat();
-
- /*
- * Not all formats (e.g. 8-bit or 16-bit Bayer formats) can have packed
- * variants. So if the PixelFormat returns as invalid, use the non-packed
- * conversion instead.
- */
- if (!pix.isValid()) {
- bayer.packing = BayerFormat::Packing::None;
- pix = bayer.toPixelFormat();
- }
-
- return pix;
-}
-
-V4L2DeviceFormat toV4L2DeviceFormat(const V4L2SubdeviceFormat &format,
- BayerFormat::Packing packingReq)
-{
- const PixelFormat pix = mbusCodeToPixelFormat(format.mbus_code, packingReq);
- V4L2DeviceFormat deviceFormat;
-
- deviceFormat.fourcc = V4L2PixelFormat::fromPixelFormat(pix);
- deviceFormat.size = format.size;
- deviceFormat.colorSpace = format.colorSpace;
- return deviceFormat;
-}
-
-bool isRaw(const PixelFormat &pixFmt)
-{
- /*
- * The isRaw test might be redundant right now the pipeline handler only
- * supports RAW sensors. Leave it in for now, just as a sanity check.
- */
- if (!pixFmt.isValid())
- return false;
-
- const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
- if (!info.isValid())
- return false;
-
- return info.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
-}
-
-double scoreFormat(double desired, double actual)
-{
- double score = desired - actual;
- /* Smaller desired dimensions are preferred. */
- if (score < 0.0)
- score = (-score) / 8;
- /* Penalise non-exact matches. */
- if (actual != desired)
- score *= 2;
-
- return score;
-}
-
-V4L2SubdeviceFormat findBestFormat(const SensorFormats &formatsMap, const Size &req, unsigned int bitDepth)
-{
- double bestScore = std::numeric_limits<double>::max(), score;
- V4L2SubdeviceFormat bestFormat;
- bestFormat.colorSpace = ColorSpace::Raw;
-
- constexpr float penaltyAr = 1500.0;
- constexpr float penaltyBitDepth = 500.0;
-
- /* Calculate the closest/best mode from the user requested size. */
- for (const auto &iter : formatsMap) {
- const unsigned int mbusCode = iter.first;
- const PixelFormat format = mbusCodeToPixelFormat(mbusCode,
- BayerFormat::Packing::None);
- const PixelFormatInfo &info = PixelFormatInfo::info(format);
-
- for (const Size &size : iter.second) {
- double reqAr = static_cast<double>(req.width) / req.height;
- double fmtAr = static_cast<double>(size.width) / size.height;
-
- /* Score the dimensions for closeness. */
- score = scoreFormat(req.width, size.width);
- score += scoreFormat(req.height, size.height);
- score += penaltyAr * scoreFormat(reqAr, fmtAr);
-
- /* Add any penalties... this is not an exact science! */
- score += utils::abs_diff(info.bitsPerPixel, bitDepth) * penaltyBitDepth;
-
- if (score <= bestScore) {
- bestScore = score;
- bestFormat.mbus_code = mbusCode;
- bestFormat.size = size;
- }
-
- LOG(RPI, Debug) << "Format: " << size.toString()
- << " fmt " << format.toString()
- << " Score: " << score
- << " (best " << bestScore << ")";
- }
- }
-
- return bestFormat;
-}
-
-enum class Unicam : unsigned int { Image, Embedded };
-enum class Isp : unsigned int { Input, Output0, Output1, Stats };
-
-} /* namespace */
-
-class RPiCameraData : public Camera::Private
-{
-public:
- RPiCameraData(PipelineHandler *pipe)
- : Camera::Private(pipe), state_(State::Stopped),
- supportsFlips_(false), flipsAlterBayerOrder_(false),
- dropFrameCount_(0), ispOutputCount_(0)
- {
- }
-
- void frameStarted(uint32_t sequence);
-
- int loadIPA(ipa::RPi::SensorConfig *sensorConfig);
- int configureIPA(const CameraConfiguration *config);
-
- void enumerateVideoDevices(MediaLink *link);
-
- void statsMetadataComplete(uint32_t bufferId, const ControlList &controls);
- void runIsp(uint32_t bufferId);
- void embeddedComplete(uint32_t bufferId);
- void setIspControls(const ControlList &controls);
- void setDelayedControls(const ControlList &controls);
- void setSensorControls(ControlList &controls);
-
- /* bufferComplete signal handlers. */
- void unicamBufferDequeue(FrameBuffer *buffer);
- void ispInputDequeue(FrameBuffer *buffer);
- void ispOutputDequeue(FrameBuffer *buffer);
-
- void clearIncompleteRequests();
- void handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream);
- void handleExternalBuffer(FrameBuffer *buffer, RPi::Stream *stream);
- void handleState();
- void applyScalerCrop(const ControlList &controls);
-
- std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
-
- std::unique_ptr<CameraSensor> sensor_;
- SensorFormats sensorFormats_;
- /* Array of Unicam and ISP device streams and associated buffers/streams. */
- RPi::Device<Unicam, 2> unicam_;
- RPi::Device<Isp, 4> isp_;
- /* The vector below is just for convenience when iterating over all streams. */
- std::vector<RPi::Stream *> streams_;
- /* Stores the ids of the buffers mapped in the IPA. */
- std::unordered_set<unsigned int> ipaBuffers_;
- /*
- * Stores a cascade of Video Mux or Bridge devices between the sensor and
- * Unicam together with media link across the entities.
- */
- std::vector<std::pair<std::unique_ptr<V4L2Subdevice>, MediaLink *>> bridgeDevices_;
-
- /* DMAHEAP allocation helper. */
- RPi::DmaHeap dmaHeap_;
- SharedFD lsTable_;
-
- std::unique_ptr<DelayedControls> delayedCtrls_;
- bool sensorMetadata_;
-
- /*
- * All the functions in this class are called from a single calling
- * thread. So, we do not need to have any mutex to protect access to any
- * of the variables below.
- */
- enum class State { Stopped, Idle, Busy, IpaComplete };
- State state_;
-
- struct BayerFrame {
- FrameBuffer *buffer;
- ControlList controls;
- };
-
- std::queue<BayerFrame> bayerQueue_;
- std::queue<FrameBuffer *> embeddedQueue_;
- std::deque<Request *> requestQueue_;
-
- /*
- * Manage horizontal and vertical flips supported (or not) by the
- * sensor. Also store the "native" Bayer order (that is, with no
- * transforms applied).
- */
- bool supportsFlips_;
- bool flipsAlterBayerOrder_;
- BayerFormat::Order nativeBayerOrder_;
-
- /* For handling digital zoom. */
- IPACameraSensorInfo sensorInfo_;
- Rectangle ispCrop_; /* crop in ISP (camera mode) pixels */
- Rectangle scalerCrop_; /* crop in sensor native pixels */
- Size ispMinCropSize_;
-
- unsigned int dropFrameCount_;
-
- /*
- * If set, this stores the value that represets a gain of one for
- * the V4L2_CID_NOTIFY_GAINS control.
- */
- std::optional<int32_t> notifyGainsUnity_;
-
-private:
- void checkRequestCompleted();
- void fillRequestMetadata(const ControlList &bufferControls,
- Request *request);
- void tryRunPipeline();
- bool findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer);
-
- unsigned int ispOutputCount_;
-};
-
-class RPiCameraConfiguration : public CameraConfiguration
-{
-public:
- RPiCameraConfiguration(const RPiCameraData *data);
-
- Status validate() override;
-
- /* Cache the combinedTransform_ that will be applied to the sensor */
- Transform combinedTransform_;
-
-private:
- const RPiCameraData *data_;
-};
-
-class PipelineHandlerRPi : public PipelineHandler
-{
-public:
- PipelineHandlerRPi(CameraManager *manager);
-
- CameraConfiguration *generateConfiguration(Camera *camera, const StreamRoles &roles) override;
- int configure(Camera *camera, CameraConfiguration *config) override;
-
- int exportFrameBuffers(Camera *camera, Stream *stream,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
-
- int start(Camera *camera, const ControlList *controls) override;
- void stopDevice(Camera *camera) override;
-
- int queueRequestDevice(Camera *camera, Request *request) override;
-
- bool match(DeviceEnumerator *enumerator) override;
-
-private:
- RPiCameraData *cameraData(Camera *camera)
- {
- return static_cast<RPiCameraData *>(camera->_d());
- }
-
- int registerCamera(MediaDevice *unicam, MediaDevice *isp, MediaEntity *sensorEntity);
- int queueAllBuffers(Camera *camera);
- int prepareBuffers(Camera *camera);
- void freeBuffers(Camera *camera);
- void mapBuffers(Camera *camera, const RPi::BufferMap &buffers, unsigned int mask);
-};
-
-RPiCameraConfiguration::RPiCameraConfiguration(const RPiCameraData *data)
- : CameraConfiguration(), data_(data)
-{
-}
-
-CameraConfiguration::Status RPiCameraConfiguration::validate()
-{
- Status status = Valid;
-
- if (config_.empty())
- return Invalid;
-
- status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
-
- /*
- * What if the platform has a non-90 degree rotation? We can't even
- * "adjust" the configuration and carry on. Alternatively, raising an
- * error means the platform can never run. Let's just print a warning
- * and continue regardless; the rotation is effectively set to zero.
- */
- int32_t rotation = data_->sensor_->properties().get(properties::Rotation);
- bool success;
- Transform rotationTransform = transformFromRotation(rotation, &success);
- if (!success)
- LOG(RPI, Warning) << "Invalid rotation of " << rotation
- << " degrees - ignoring";
- Transform combined = transform * rotationTransform;
-
- /*
- * We combine the platform and user transform, but must "adjust away"
- * any combined result that includes a transform, as we can't do those.
- * In this case, flipping only the transpose bit is helpful to
- * applications - they either get the transform they requested, or have
- * to do a simple transpose themselves (they don't have to worry about
- * the other possible cases).
- */
- if (!!(combined & Transform::Transpose)) {
- /*
- * Flipping the transpose bit in "transform" flips it in the
- * combined result too (as it's the last thing that happens),
- * which is of course clearing it.
- */
- transform ^= Transform::Transpose;
- combined &= ~Transform::Transpose;
- status = Adjusted;
- }
-
- /*
- * We also check if the sensor doesn't do h/vflips at all, in which
- * case we clear them, and the application will have to do everything.
- */
- if (!data_->supportsFlips_ && !!combined) {
- /*
- * If the sensor can do no transforms, then combined must be
- * changed to the identity. The only user transform that gives
- * rise to this the inverse of the rotation. (Recall that
- * combined = transform * rotationTransform.)
- */
- transform = -rotationTransform;
- combined = Transform::Identity;
- status = Adjusted;
- }
-
- /*
- * Store the final combined transform that configure() will need to
- * apply to the sensor to save us working it out again.
- */
- combinedTransform_ = combined;
-
- unsigned int rawCount = 0, outCount = 0, count = 0, maxIndex = 0;
- std::pair<int, Size> outSize[2];
- Size maxSize;
- for (StreamConfiguration &cfg : config_) {
- if (isRaw(cfg.pixelFormat)) {
- /*
- * Calculate the best sensor mode we can use based on
- * the user request.
- */
- const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
- unsigned int bitDepth = info.isValid() ? info.bitsPerPixel : defaultRawBitDepth;
- V4L2SubdeviceFormat sensorFormat = findBestFormat(data_->sensorFormats_, cfg.size, bitDepth);
- BayerFormat::Packing packing = BayerFormat::Packing::CSI2;
- if (info.isValid() && !info.packed)
- packing = BayerFormat::Packing::None;
- V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat,
- packing);
- int ret = data_->unicam_[Unicam::Image].dev()->tryFormat(&unicamFormat);
- if (ret)
- return Invalid;
-
- /*
- * Some sensors change their Bayer order when they are
- * h-flipped or v-flipped, according to the transform.
- * If this one does, we must advertise the transformed
- * Bayer order in the raw stream. Note how we must
- * fetch the "native" (i.e. untransformed) Bayer order,
- * because the sensor may currently be flipped!
- */
- V4L2PixelFormat fourcc = unicamFormat.fourcc;
- if (data_->flipsAlterBayerOrder_) {
- BayerFormat bayer = BayerFormat::fromV4L2PixelFormat(fourcc);
- bayer.order = data_->nativeBayerOrder_;
- bayer = bayer.transform(combined);
- fourcc = bayer.toV4L2PixelFormat();
- }
-
- PixelFormat unicamPixFormat = fourcc.toPixelFormat();
- if (cfg.size != unicamFormat.size ||
- cfg.pixelFormat != unicamPixFormat) {
- cfg.size = unicamFormat.size;
- cfg.pixelFormat = unicamPixFormat;
- status = Adjusted;
- }
-
- cfg.stride = unicamFormat.planes[0].bpl;
- cfg.frameSize = unicamFormat.planes[0].size;
-
- rawCount++;
- } else {
- outSize[outCount] = std::make_pair(count, cfg.size);
- /* Record the largest resolution for fixups later. */
- if (maxSize < cfg.size) {
- maxSize = cfg.size;
- maxIndex = outCount;
- }
- outCount++;
- }
-
- count++;
-
- /* Can only output 1 RAW stream, or 2 YUV/RGB streams. */
- if (rawCount > 1 || outCount > 2) {
- LOG(RPI, Error) << "Invalid number of streams requested";
- return Invalid;
- }
- }
-
- /*
- * Now do any fixups needed. For the two ISP outputs, one stream must be
- * equal or smaller than the other in all dimensions.
- */
- for (unsigned int i = 0; i < outCount; i++) {
- outSize[i].second.width = std::min(outSize[i].second.width,
- maxSize.width);
- outSize[i].second.height = std::min(outSize[i].second.height,
- maxSize.height);
-
- if (config_.at(outSize[i].first).size != outSize[i].second) {
- config_.at(outSize[i].first).size = outSize[i].second;
- status = Adjusted;
- }
-
- /*
- * Also validate the correct pixel formats here.
- * Note that Output0 and Output1 support a different
- * set of formats.
- *
- * Output 0 must be for the largest resolution. We will
- * have that fixed up in the code above.
- *
- */
- StreamConfiguration &cfg = config_.at(outSize[i].first);
- PixelFormat &cfgPixFmt = cfg.pixelFormat;
- V4L2VideoDevice *dev;
-
- if (i == maxIndex)
- dev = data_->isp_[Isp::Output0].dev();
- else
- dev = data_->isp_[Isp::Output1].dev();
-
- V4L2VideoDevice::Formats fmts = dev->formats();
-
- if (fmts.find(V4L2PixelFormat::fromPixelFormat(cfgPixFmt)) == fmts.end()) {
- /* If we cannot find a native format, use a default one. */
- cfgPixFmt = formats::NV12;
- status = Adjusted;
- }
-
- V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
- format.size = cfg.size;
- format.colorSpace = cfg.colorSpace;
-
- LOG(RPI, Debug)
- << "Try color space " << ColorSpace::toString(cfg.colorSpace);
-
- int ret = dev->tryFormat(&format);
- if (ret)
- return Invalid;
-
- if (cfg.colorSpace != format.colorSpace) {
- status = Adjusted;
- LOG(RPI, Debug)
- << "Color space changed from "
- << ColorSpace::toString(cfg.colorSpace) << " to "
- << ColorSpace::toString(format.colorSpace);
- }
-
- cfg.colorSpace = format.colorSpace;
-
- cfg.stride = format.planes[0].bpl;
- cfg.frameSize = format.planes[0].size;
-
- }
-
- return status;
-}
-
-PipelineHandlerRPi::PipelineHandlerRPi(CameraManager *manager)
- : PipelineHandler(manager)
-{
-}
-
-CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
-{
- RPiCameraData *data = cameraData(camera);
- CameraConfiguration *config = new RPiCameraConfiguration(data);
- V4L2SubdeviceFormat sensorFormat;
- unsigned int bufferCount;
- PixelFormat pixelFormat;
- V4L2VideoDevice::Formats fmts;
- Size size;
- std::optional<ColorSpace> colorSpace;
-
- if (roles.empty())
- return config;
-
- unsigned int rawCount = 0;
- unsigned int outCount = 0;
- Size sensorSize = data->sensor_->resolution();
- for (const StreamRole role : roles) {
- switch (role) {
- case StreamRole::Raw:
- size = sensorSize;
- sensorFormat = findBestFormat(data->sensorFormats_, size, defaultRawBitDepth);
- pixelFormat = mbusCodeToPixelFormat(sensorFormat.mbus_code,
- BayerFormat::Packing::CSI2);
- ASSERT(pixelFormat.isValid());
- colorSpace = ColorSpace::Raw;
- bufferCount = 2;
- rawCount++;
- break;
-
- case StreamRole::StillCapture:
- fmts = data->isp_[Isp::Output0].dev()->formats();
- pixelFormat = formats::NV12;
- /*
- * Still image codecs usually expect the JPEG color space.
- * Even RGB codecs will be fine as the RGB we get with the
- * JPEG color space is the same as sRGB.
- */
- colorSpace = ColorSpace::Jpeg;
- /* Return the largest sensor resolution. */
- size = sensorSize;
- bufferCount = 1;
- outCount++;
- break;
-
- case StreamRole::VideoRecording:
- /*
- * The colour denoise algorithm requires the analysis
- * image, produced by the second ISP output, to be in
- * YUV420 format. Select this format as the default, to
- * maximize chances that it will be picked by
- * applications and enable usage of the colour denoise
- * algorithm.
- */
- fmts = data->isp_[Isp::Output0].dev()->formats();
- pixelFormat = formats::YUV420;
- /*
- * Choose a color space appropriate for video recording.
- * Rec.709 will be a good default for HD resolutions.
- */
- colorSpace = ColorSpace::Rec709;
- size = { 1920, 1080 };
- bufferCount = 4;
- outCount++;
- break;
-
- case StreamRole::Viewfinder:
- fmts = data->isp_[Isp::Output0].dev()->formats();
- pixelFormat = formats::ARGB8888;
- colorSpace = ColorSpace::Jpeg;
- size = { 800, 600 };
- bufferCount = 4;
- outCount++;
- break;
-
- default:
- LOG(RPI, Error) << "Requested stream role not supported: "
- << role;
- delete config;
- return nullptr;
- }
-
- if (rawCount > 1 || outCount > 2) {
- LOG(RPI, Error) << "Invalid stream roles requested";
- delete config;
- return nullptr;
- }
-
- std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
- if (role == StreamRole::Raw) {
- /* Translate the MBUS codes to a PixelFormat. */
- for (const auto &format : data->sensorFormats_) {
- PixelFormat pf = mbusCodeToPixelFormat(format.first,
- BayerFormat::Packing::CSI2);
- if (pf.isValid())
- deviceFormats.emplace(std::piecewise_construct, std::forward_as_tuple(pf),
- std::forward_as_tuple(format.second.begin(), format.second.end()));
- }
- } else {
- /*
- * Translate the V4L2PixelFormat to PixelFormat. Note that we
- * limit the recommended largest ISP output size to match the
- * sensor resolution.
- */
- for (const auto &format : fmts) {
- PixelFormat pf = format.first.toPixelFormat();
- if (pf.isValid()) {
- const SizeRange &ispSizes = format.second[0];
- deviceFormats[pf].emplace_back(ispSizes.min, sensorSize,
- ispSizes.hStep, ispSizes.vStep);
- }
- }
- }
-
- /* Add the stream format based on the device node used for the use case. */
- StreamFormats formats(deviceFormats);
- StreamConfiguration cfg(formats);
- cfg.size = size;
- cfg.pixelFormat = pixelFormat;
- cfg.colorSpace = colorSpace;
- cfg.bufferCount = bufferCount;
- config->addConfiguration(cfg);
- }
-
- config->validate();
-
- return config;
-}
-
-int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
-{
- RPiCameraData *data = cameraData(camera);
- int ret;
-
- /* Start by resetting the Unicam and ISP stream states. */
- for (auto const stream : data->streams_)
- stream->reset();
-
- BayerFormat::Packing packing = BayerFormat::Packing::CSI2;
- Size maxSize, sensorSize;
- unsigned int maxIndex = 0;
- bool rawStream = false;
- unsigned int bitDepth = defaultRawBitDepth;
-
- /*
- * Look for the RAW stream (if given) size as well as the largest
- * ISP output size.
- */
- for (unsigned i = 0; i < config->size(); i++) {
- StreamConfiguration &cfg = config->at(i);
-
- if (isRaw(cfg.pixelFormat)) {
- /*
- * If we have been given a RAW stream, use that size
- * for setting up the sensor.
- */
- sensorSize = cfg.size;
- rawStream = true;
- /* Check if the user has explicitly set an unpacked format. */
- BayerFormat bayerFormat = BayerFormat::fromPixelFormat(cfg.pixelFormat);
- packing = bayerFormat.packing;
- bitDepth = bayerFormat.bitDepth;
- } else {
- if (cfg.size > maxSize) {
- maxSize = config->at(i).size;
- maxIndex = i;
- }
- }
- }
-
- /*
- * Configure the H/V flip controls based on the combination of
- * the sensor and user transform.
- */
- if (data->supportsFlips_) {
- const RPiCameraConfiguration *rpiConfig =
- static_cast<const RPiCameraConfiguration *>(config);
- ControlList controls;
-
- controls.set(V4L2_CID_HFLIP,
- static_cast<int32_t>(!!(rpiConfig->combinedTransform_ & Transform::HFlip)));
- controls.set(V4L2_CID_VFLIP,
- static_cast<int32_t>(!!(rpiConfig->combinedTransform_ & Transform::VFlip)));
- data->setSensorControls(controls);
- }
-
- /* First calculate the best sensor mode we can use based on the user request. */
- V4L2SubdeviceFormat sensorFormat = findBestFormat(data->sensorFormats_, rawStream ? sensorSize : maxSize, bitDepth);
- ret = data->sensor_->setFormat(&sensorFormat);
- if (ret)
- return ret;
-
- V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat, packing);
- ret = data->unicam_[Unicam::Image].dev()->setFormat(&unicamFormat);
- if (ret)
- return ret;
-
- LOG(RPI, Info) << "Sensor: " << camera->id()
- << " - Selected sensor format: " << sensorFormat.toString()
- << " - Selected unicam format: " << unicamFormat.toString();
-
- ret = data->isp_[Isp::Input].dev()->setFormat(&unicamFormat);
- if (ret)
- return ret;
-
- /*
- * See which streams are requested, and route the user
- * StreamConfiguration appropriately.
- */
- V4L2DeviceFormat format;
- bool output0Set = false, output1Set = false;
- for (unsigned i = 0; i < config->size(); i++) {
- StreamConfiguration &cfg = config->at(i);
-
- if (isRaw(cfg.pixelFormat)) {
- cfg.setStream(&data->unicam_[Unicam::Image]);
- data->unicam_[Unicam::Image].setExternal(true);
- continue;
- }
-
- /* The largest resolution gets routed to the ISP Output 0 node. */
- RPi::Stream *stream = i == maxIndex ? &data->isp_[Isp::Output0]
- : &data->isp_[Isp::Output1];
-
- V4L2PixelFormat fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
- format.size = cfg.size;
- format.fourcc = fourcc;
- format.colorSpace = cfg.colorSpace;
-
- LOG(RPI, Debug) << "Setting " << stream->name() << " to "
- << format.toString();
-
- ret = stream->dev()->setFormat(&format);
- if (ret)
- return -EINVAL;
-
- if (format.size != cfg.size || format.fourcc != fourcc) {
- LOG(RPI, Error)
- << "Failed to set requested format on " << stream->name()
- << ", returned " << format.toString();
- return -EINVAL;
- }
-
- LOG(RPI, Debug)
- << "Stream " << stream->name() << " has color space "
- << ColorSpace::toString(cfg.colorSpace);
-
- cfg.setStream(stream);
- stream->setExternal(true);
-
- if (i != maxIndex)
- output1Set = true;
- else
- output0Set = true;
- }
-
- /*
- * If ISP::Output0 stream has not been configured by the application,
- * we must allow the hardware to generate an output so that the data
- * flow in the pipeline handler remains consistent, and we still generate
- * statistics for the IPA to use. So enable the output at a very low
- * resolution for internal use.
- *
- * \todo Allow the pipeline to work correctly without Output0 and only
- * statistics coming from the hardware.
- */
- if (!output0Set) {
- maxSize = Size(320, 240);
- format = {};
- format.size = maxSize;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(formats::YUV420);
- /* No one asked for output, so the color space doesn't matter. */
- format.colorSpace = ColorSpace::Jpeg;
- ret = data->isp_[Isp::Output0].dev()->setFormat(&format);
- if (ret) {
- LOG(RPI, Error)
- << "Failed to set default format on ISP Output0: "
- << ret;
- return -EINVAL;
- }
-
- LOG(RPI, Debug) << "Defaulting ISP Output0 format to "
- << format.toString();
- }
-
- /*
- * If ISP::Output1 stream has not been requested by the application, we
- * set it up for internal use now. This second stream will be used for
- * fast colour denoise, and must be a quarter resolution of the ISP::Output0
- * stream. However, also limit the maximum size to 1200 pixels in the
- * larger dimension, just to avoid being wasteful with buffer allocations
- * and memory bandwidth.
- *
- * \todo If Output 1 format is not YUV420, Output 1 ought to be disabled as
- * colour denoise will not run.
- */
- if (!output1Set) {
- V4L2DeviceFormat output1Format = format;
- constexpr Size maxDimensions(1200, 1200);
- const Size limit = maxDimensions.boundedToAspectRatio(format.size);
-
- output1Format.size = (format.size / 2).boundedTo(limit).alignedDownTo(2, 2);
-
- LOG(RPI, Debug) << "Setting ISP Output1 (internal) to "
- << output1Format.toString();
-
- ret = data->isp_[Isp::Output1].dev()->setFormat(&output1Format);
- if (ret) {
- LOG(RPI, Error) << "Failed to set format on ISP Output1: "
- << ret;
- return -EINVAL;
- }
- }
-
- /* ISP statistics output format. */
- format = {};
- format.fourcc = V4L2PixelFormat(V4L2_META_FMT_BCM2835_ISP_STATS);
- ret = data->isp_[Isp::Stats].dev()->setFormat(&format);
- if (ret) {
- LOG(RPI, Error) << "Failed to set format on ISP stats stream: "
- << format.toString();
- return ret;
- }
-
- /* Figure out the smallest selection the ISP will allow. */
- Rectangle testCrop(0, 0, 1, 1);
- data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &testCrop);
- data->ispMinCropSize_ = testCrop.size();
-
- /* Adjust aspect ratio by providing crops on the input image. */
- Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
- Rectangle crop = size.centeredTo(Rectangle(unicamFormat.size).center());
- data->ispCrop_ = crop;
-
- data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop);
-
- ret = data->configureIPA(config);
- if (ret)
- LOG(RPI, Error) << "Failed to configure the IPA: " << ret;
-
- /*
- * Configure the Unicam embedded data output format only if the sensor
- * supports it.
- */
- if (data->sensorMetadata_) {
- V4L2SubdeviceFormat embeddedFormat;
-
- data->sensor_->device()->getFormat(1, &embeddedFormat);
- format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
- format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
-
- LOG(RPI, Debug) << "Setting embedded data format.";
- ret = data->unicam_[Unicam::Embedded].dev()->setFormat(&format);
- if (ret) {
- LOG(RPI, Error) << "Failed to set format on Unicam embedded: "
- << format.toString();
- return ret;
- }
- }
-
- /*
- * Update the ScalerCropMaximum to the correct value for this camera mode.
- * For us, it's the same as the "analogue crop".
- *
- * \todo Make this property the ScalerCrop maximum value when dynamic
- * controls are available and set it at validate() time
- */
- data->properties_.set(properties::ScalerCropMaximum, data->sensorInfo_.analogCrop);
-
- /* Setup the Video Mux/Bridge entities. */
- for (auto &[device, link] : data->bridgeDevices_) {
- /*
- * Start by disabling all the sink pad links on the devices in the
- * cascade, with the exception of the link connecting the device.
- */
- for (const MediaPad *p : device->entity()->pads()) {
- if (!(p->flags() & MEDIA_PAD_FL_SINK))
- continue;
-
- for (MediaLink *l : p->links()) {
- if (l != link)
- l->setEnabled(false);
- }
- }
-
- /*
- * Next, enable the entity -> entity links, and setup the pad format.
- *
- * \todo Some bridge devices may chainge the media bus code, so we
- * ought to read the source pad format and propagate it to the sink pad.
- */
- link->setEnabled(true);
- const MediaPad *sinkPad = link->sink();
- ret = device->setFormat(sinkPad->index(), &sensorFormat);
- if (ret) {
- LOG(RPI, Error) << "Failed to set format on " << device->entity()->name()
- << " pad " << sinkPad->index()
- << " with format " << format.toString()
- << ": " << ret;
- return ret;
- }
-
- LOG(RPI, Debug) << "Configured media link on device " << device->entity()->name()
- << " on pad " << sinkPad->index();
- }
-
- return ret;
-}
-
-int PipelineHandlerRPi::exportFrameBuffers([[maybe_unused]] Camera *camera, Stream *stream,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers)
-{
- RPi::Stream *s = static_cast<RPi::Stream *>(stream);
- unsigned int count = stream->configuration().bufferCount;
- int ret = s->dev()->exportBuffers(count, buffers);
-
- s->setExportedBuffers(buffers);
-
- return ret;
-}
-
-int PipelineHandlerRPi::start(Camera *camera, const ControlList *controls)
-{
- RPiCameraData *data = cameraData(camera);
- int ret;
-
- /* Allocate buffers for internal pipeline usage. */
- ret = prepareBuffers(camera);
- if (ret) {
- LOG(RPI, Error) << "Failed to allocate buffers";
- stop(camera);
- return ret;
- }
-
- /* Check if a ScalerCrop control was specified. */
- if (controls)
- data->applyScalerCrop(*controls);
-
- /* Start the IPA. */
- ipa::RPi::StartConfig startConfig;
- data->ipa_->start(controls ? *controls : ControlList{ controls::controls },
- &startConfig);
-
- /* Apply any gain/exposure settings that the IPA may have passed back. */
- if (!startConfig.controls.empty())
- data->setSensorControls(startConfig.controls);
-
- /* Configure the number of dropped frames required on startup. */
- data->dropFrameCount_ = startConfig.dropFrameCount;
-
- /* We need to set the dropFrameCount_ before queueing buffers. */
- ret = queueAllBuffers(camera);
- if (ret) {
- LOG(RPI, Error) << "Failed to queue buffers";
- stop(camera);
- return ret;
- }
-
- /* Enable SOF event generation. */
- data->unicam_[Unicam::Image].dev()->setFrameStartEnabled(true);
-
- /*
- * Reset the delayed controls with the gain and exposure values set by
- * the IPA.
- */
- data->delayedCtrls_->reset();
-
- data->state_ = RPiCameraData::State::Idle;
-
- /* Start all streams. */
- for (auto const stream : data->streams_) {
- ret = stream->dev()->streamOn();
- if (ret) {
- stop(camera);
- return ret;
- }
- }
-
- return 0;
-}
-
-void PipelineHandlerRPi::stopDevice(Camera *camera)
-{
- RPiCameraData *data = cameraData(camera);
-
- data->state_ = RPiCameraData::State::Stopped;
-
- /* Disable SOF event generation. */
- data->unicam_[Unicam::Image].dev()->setFrameStartEnabled(false);
-
- for (auto const stream : data->streams_)
- stream->dev()->streamOff();
-
- data->clearIncompleteRequests();
- data->bayerQueue_ = {};
- data->embeddedQueue_ = {};
-
- /* Stop the IPA. */
- data->ipa_->stop();
-
- freeBuffers(camera);
-}
-
-int PipelineHandlerRPi::queueRequestDevice(Camera *camera, Request *request)
-{
- RPiCameraData *data = cameraData(camera);
-
- if (data->state_ == RPiCameraData::State::Stopped)
- return -EINVAL;
-
- LOG(RPI, Debug) << "queueRequestDevice: New request.";
-
- /* Push all buffers supplied in the Request to the respective streams. */
- for (auto stream : data->streams_) {
- if (!stream->isExternal())
- continue;
-
- FrameBuffer *buffer = request->findBuffer(stream);
- if (buffer && stream->getBufferId(buffer) == -1) {
- /*
- * This buffer is not recognised, so it must have been allocated
- * outside the v4l2 device. Store it in the stream buffer list
- * so we can track it.
- */
- stream->setExternalBuffer(buffer);
- }
- /*
- * If no buffer is provided by the request for this stream, we
- * queue a nullptr to the stream to signify that it must use an
- * internally allocated buffer for this capture request. This
- * buffer will not be given back to the application, but is used
- * to support the internal pipeline flow.
- *
- * The below queueBuffer() call will do nothing if there are not
- * enough internal buffers allocated, but this will be handled by
- * queuing the request for buffers in the RPiStream object.
- */
- int ret = stream->queueBuffer(buffer);
- if (ret)
- return ret;
- }
-
- /* Push the request to the back of the queue. */
- data->requestQueue_.push_back(request);
- data->handleState();
-
- return 0;
-}
-
-bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
-{
- DeviceMatch unicam("unicam");
- MediaDevice *unicamDevice = acquireMediaDevice(enumerator, unicam);
-
- if (!unicamDevice) {
- LOG(RPI, Debug) << "Unable to acquire a Unicam instance";
- return false;
- }
-
- DeviceMatch isp("bcm2835-isp");
- MediaDevice *ispDevice = acquireMediaDevice(enumerator, isp);
-
- if (!ispDevice) {
- LOG(RPI, Debug) << "Unable to acquire ISP instance";
- return false;
- }
-
- /*
- * The loop below is used to register multiple cameras behind one or more
- * video mux devices that are attached to a particular Unicam instance.
- * Obviously these cameras cannot be used simultaneously.
- */
- unsigned int numCameras = 0;
- for (MediaEntity *entity : unicamDevice->entities()) {
- if (entity->function() != MEDIA_ENT_F_CAM_SENSOR)
- continue;
-
- int ret = registerCamera(unicamDevice, ispDevice, entity);
- if (ret)
- LOG(RPI, Error) << "Failed to register camera "
- << entity->name() << ": " << ret;
- else
- numCameras++;
- }
-
- return !!numCameras;
-}
-
-int PipelineHandlerRPi::registerCamera(MediaDevice *unicam, MediaDevice *isp, MediaEntity *sensorEntity)
-{
- std::unique_ptr<RPiCameraData> data = std::make_unique<RPiCameraData>(this);
-
- if (!data->dmaHeap_.isValid())
- return -ENOMEM;
-
- MediaEntity *unicamImage = unicam->getEntityByName("unicam-image");
- MediaEntity *ispOutput0 = isp->getEntityByName("bcm2835-isp0-output0");
- MediaEntity *ispCapture1 = isp->getEntityByName("bcm2835-isp0-capture1");
- MediaEntity *ispCapture2 = isp->getEntityByName("bcm2835-isp0-capture2");
- MediaEntity *ispCapture3 = isp->getEntityByName("bcm2835-isp0-capture3");
-
- if (!unicamImage || !ispOutput0 || !ispCapture1 || !ispCapture2 || !ispCapture3)
- return -ENOENT;
-
- /* Locate and open the unicam video streams. */
- data->unicam_[Unicam::Image] = RPi::Stream("Unicam Image", unicamImage);
-
- /* An embedded data node will not be present if the sensor does not support it. */
- MediaEntity *unicamEmbedded = unicam->getEntityByName("unicam-embedded");
- if (unicamEmbedded) {
- data->unicam_[Unicam::Embedded] = RPi::Stream("Unicam Embedded", unicamEmbedded);
- data->unicam_[Unicam::Embedded].dev()->bufferReady.connect(data.get(),
- &RPiCameraData::unicamBufferDequeue);
- }
-
- /* Tag the ISP input stream as an import stream. */
- data->isp_[Isp::Input] = RPi::Stream("ISP Input", ispOutput0, true);
- data->isp_[Isp::Output0] = RPi::Stream("ISP Output0", ispCapture1);
- data->isp_[Isp::Output1] = RPi::Stream("ISP Output1", ispCapture2);
- data->isp_[Isp::Stats] = RPi::Stream("ISP Stats", ispCapture3);
-
- /* Wire up all the buffer connections. */
- data->unicam_[Unicam::Image].dev()->frameStart.connect(data.get(), &RPiCameraData::frameStarted);
- data->unicam_[Unicam::Image].dev()->bufferReady.connect(data.get(), &RPiCameraData::unicamBufferDequeue);
- data->isp_[Isp::Input].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispInputDequeue);
- data->isp_[Isp::Output0].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispOutputDequeue);
- data->isp_[Isp::Output1].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispOutputDequeue);
- data->isp_[Isp::Stats].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispOutputDequeue);
-
- data->sensor_ = std::make_unique<CameraSensor>(sensorEntity);
- if (!data->sensor_)
- return -EINVAL;
-
- if (data->sensor_->init())
- return -EINVAL;
-
- /*
- * Enumerate all the Video Mux/Bridge devices across the sensor -> unicam
- * chain. There may be a cascade of devices in this chain!
- */
- MediaLink *link = sensorEntity->getPadByIndex(0)->links()[0];
- data->enumerateVideoDevices(link);
-
- data->sensorFormats_ = populateSensorFormats(data->sensor_);
-
- ipa::RPi::SensorConfig sensorConfig;
- if (data->loadIPA(&sensorConfig)) {
- LOG(RPI, Error) << "Failed to load a suitable IPA library";
- return -EINVAL;
- }
-
- if (sensorConfig.sensorMetadata ^ !!unicamEmbedded) {
- LOG(RPI, Warning) << "Mismatch between Unicam and CamHelper for embedded data usage!";
- sensorConfig.sensorMetadata = false;
- if (unicamEmbedded)
- data->unicam_[Unicam::Embedded].dev()->bufferReady.disconnect();
- }
-
- /*
- * Open all Unicam and ISP streams. The exception is the embedded data
- * stream, which only gets opened below if the IPA reports that the sensor
- * supports embedded data.
- *
- * The below grouping is just for convenience so that we can easily
- * iterate over all streams in one go.
- */
- data->streams_.push_back(&data->unicam_[Unicam::Image]);
- if (sensorConfig.sensorMetadata)
- data->streams_.push_back(&data->unicam_[Unicam::Embedded]);
-
- for (auto &stream : data->isp_)
- data->streams_.push_back(&stream);
-
- for (auto stream : data->streams_) {
- int ret = stream->dev()->open();
- if (ret)
- return ret;
- }
-
- if (!data->unicam_[Unicam::Image].dev()->caps().hasMediaController()) {
- LOG(RPI, Error) << "Unicam driver does not use the MediaController, please update your kernel!";
- return -EINVAL;
- }
-
- /*
- * Setup our delayed control writer with the sensor default
- * gain and exposure delays. Mark VBLANK for priority write.
- */
- std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
- { V4L2_CID_ANALOGUE_GAIN, { sensorConfig.gainDelay, false } },
- { V4L2_CID_EXPOSURE, { sensorConfig.exposureDelay, false } },
- { V4L2_CID_VBLANK, { sensorConfig.vblankDelay, true } }
- };
- data->delayedCtrls_ = std::make_unique<DelayedControls>(data->sensor_->device(), params);
- data->sensorMetadata_ = sensorConfig.sensorMetadata;
-
- /* Register the controls that the Raspberry Pi IPA can handle. */
- data->controlInfo_ = RPi::Controls;
- /* Initialize the camera properties. */
- data->properties_ = data->sensor_->properties();
-
- /*
- * The V4L2_CID_NOTIFY_GAINS control, if present, is used to inform the
- * sensor of the colour gains. It is defined to be a linear gain where
- * the default value represents a gain of exactly one.
- */
- auto it = data->sensor_->controls().find(V4L2_CID_NOTIFY_GAINS);
- if (it != data->sensor_->controls().end())
- data->notifyGainsUnity_ = it->second.def().get<int32_t>();
-
- /*
- * Set a default value for the ScalerCropMaximum property to show
- * that we support its use, however, initialise it to zero because
- * it's not meaningful until a camera mode has been chosen.
- */
- data->properties_.set(properties::ScalerCropMaximum, Rectangle{});
-
- /*
- * We cache three things about the sensor in relation to transforms
- * (meaning horizontal and vertical flips).
- *
- * Firstly, does it support them?
- * Secondly, if you use them does it affect the Bayer ordering?
- * Thirdly, what is the "native" Bayer order, when no transforms are
- * applied?
- *
- * We note that the sensor's cached list of supported formats is
- * already in the "native" order, with any flips having been undone.
- */
- const V4L2Subdevice *sensor = data->sensor_->device();
- const struct v4l2_query_ext_ctrl *hflipCtrl = sensor->controlInfo(V4L2_CID_HFLIP);
- if (hflipCtrl) {
- /* We assume it will support vflips too... */
- data->supportsFlips_ = true;
- data->flipsAlterBayerOrder_ = hflipCtrl->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT;
- }
-
- /* Look for a valid Bayer format. */
- BayerFormat bayerFormat;
- for (const auto &iter : data->sensorFormats_) {
- bayerFormat = BayerFormat::fromMbusCode(iter.first);
- if (bayerFormat.isValid())
- break;
- }
-
- if (!bayerFormat.isValid()) {
- LOG(RPI, Error) << "No Bayer format found";
- return -EINVAL;
- }
- data->nativeBayerOrder_ = bayerFormat.order;
-
- /*
- * List the available streams an application may request. At present, we
- * do not advertise Unicam Embedded and ISP Statistics streams, as there
- * is no mechanism for the application to request non-image buffer formats.
- */
- std::set<Stream *> streams;
- streams.insert(&data->unicam_[Unicam::Image]);
- streams.insert(&data->isp_[Isp::Output0]);
- streams.insert(&data->isp_[Isp::Output1]);
-
- /* Create and register the camera. */
- const std::string &id = data->sensor_->id();
- std::shared_ptr<Camera> camera =
- Camera::create(std::move(data), id, streams);
- PipelineHandler::registerCamera(std::move(camera));
-
- LOG(RPI, Info) << "Registered camera " << id
- << " to Unicam device " << unicam->deviceNode()
- << " and ISP device " << isp->deviceNode();
- return 0;
-}
-
-int PipelineHandlerRPi::queueAllBuffers(Camera *camera)
-{
- RPiCameraData *data = cameraData(camera);
- int ret;
-
- for (auto const stream : data->streams_) {
- if (!stream->isExternal()) {
- ret = stream->queueAllBuffers();
- if (ret < 0)
- return ret;
- } else {
- /*
- * For external streams, we must queue up a set of internal
- * buffers to handle the number of drop frames requested by
- * the IPA. This is done by passing nullptr in queueBuffer().
- *
- * The below queueBuffer() call will do nothing if there
- * are not enough internal buffers allocated, but this will
- * be handled by queuing the request for buffers in the
- * RPiStream object.
- */
- unsigned int i;
- for (i = 0; i < data->dropFrameCount_; i++) {
- ret = stream->queueBuffer(nullptr);
- if (ret)
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-int PipelineHandlerRPi::prepareBuffers(Camera *camera)
-{
- RPiCameraData *data = cameraData(camera);
- unsigned int numRawBuffers = 0;
- int ret;
-
- for (Stream *s : camera->streams()) {
- if (isRaw(s->configuration().pixelFormat)) {
- numRawBuffers = s->configuration().bufferCount;
- break;
- }
- }
-
- /* Decide how many internal buffers to allocate. */
- for (auto const stream : data->streams_) {
- unsigned int numBuffers;
- /*
- * For Unicam, allocate a minimum of 4 buffers as we want
- * to avoid any frame drops.
- */
- constexpr unsigned int minBuffers = 4;
- if (stream == &data->unicam_[Unicam::Image]) {
- /*
- * If an application has configured a RAW stream, allocate
- * additional buffers to make up the minimum, but ensure
- * we have at least 2 sets of internal buffers to use to
- * minimise frame drops.
- */
- numBuffers = std::max<int>(2, minBuffers - numRawBuffers);
- } else if (stream == &data->isp_[Isp::Input]) {
- /*
- * ISP input buffers are imported from Unicam, so follow
- * similar logic as above to count all the RAW buffers
- * available.
- */
- numBuffers = numRawBuffers + std::max<int>(2, minBuffers - numRawBuffers);
-
- } else if (stream == &data->unicam_[Unicam::Embedded]) {
- /*
- * Embedded data buffers are (currently) for internal use,
- * so allocate the minimum required to avoid frame drops.
- */
- numBuffers = minBuffers;
- } else {
- /*
- * Since the ISP runs synchronous with the IPA and requests,
- * we only ever need one set of internal buffers. Any buffers
- * the application wants to hold onto will already be exported
- * through PipelineHandlerRPi::exportFrameBuffers().
- */
- numBuffers = 1;
- }
-
- ret = stream->prepareBuffers(numBuffers);
- if (ret < 0)
- return ret;
- }
-
- /*
- * Pass the stats and embedded data buffers to the IPA. No other
- * buffers need to be passed.
- */
- mapBuffers(camera, data->isp_[Isp::Stats].getBuffers(), ipa::RPi::MaskStats);
- if (data->sensorMetadata_)
- mapBuffers(camera, data->unicam_[Unicam::Embedded].getBuffers(),
- ipa::RPi::MaskEmbeddedData);
-
- return 0;
-}
-
-void PipelineHandlerRPi::mapBuffers(Camera *camera, const RPi::BufferMap &buffers, unsigned int mask)
-{
- RPiCameraData *data = cameraData(camera);
- std::vector<IPABuffer> ipaBuffers;
- /*
- * Link the FrameBuffers with the id (key value) in the map stored in
- * the RPi stream object - along with an identifier mask.
- *
- * This will allow us to identify buffers passed between the pipeline
- * handler and the IPA.
- */
- for (auto const &it : buffers) {
- ipaBuffers.push_back(IPABuffer(mask | it.first,
- it.second->planes()));
- data->ipaBuffers_.insert(mask | it.first);
- }
-
- data->ipa_->mapBuffers(ipaBuffers);
-}
-
-void PipelineHandlerRPi::freeBuffers(Camera *camera)
-{
- RPiCameraData *data = cameraData(camera);
-
- /* Copy the buffer ids from the unordered_set to a vector to pass to the IPA. */
- std::vector<unsigned int> ipaBuffers(data->ipaBuffers_.begin(), data->ipaBuffers_.end());
- data->ipa_->unmapBuffers(ipaBuffers);
- data->ipaBuffers_.clear();
-
- for (auto const stream : data->streams_)
- stream->releaseBuffers();
-}
-
-void RPiCameraData::frameStarted(uint32_t sequence)
-{
- LOG(RPI, Debug) << "frame start " << sequence;
-
- /* Write any controls for the next frame as soon as we can. */
- delayedCtrls_->applyControls(sequence);
-}
-
-int RPiCameraData::loadIPA(ipa::RPi::SensorConfig *sensorConfig)
-{
- ipa_ = IPAManager::createIPA<ipa::RPi::IPAProxyRPi>(pipe(), 1, 1);
-
- if (!ipa_)
- return -ENOENT;
-
- ipa_->statsMetadataComplete.connect(this, &RPiCameraData::statsMetadataComplete);
- ipa_->runIsp.connect(this, &RPiCameraData::runIsp);
- ipa_->embeddedComplete.connect(this, &RPiCameraData::embeddedComplete);
- ipa_->setIspControls.connect(this, &RPiCameraData::setIspControls);
- ipa_->setDelayedControls.connect(this, &RPiCameraData::setDelayedControls);
-
- /*
- * The configuration (tuning file) is made from the sensor name unless
- * the environment variable overrides it.
- */
- std::string configurationFile;
- char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_TUNING_FILE");
- if (!configFromEnv || *configFromEnv == '\0')
- configurationFile = ipa_->configurationFile(sensor_->model() + ".json");
- else
- configurationFile = std::string(configFromEnv);
-
- IPASettings settings(configurationFile, sensor_->model());
-
- return ipa_->init(settings, sensorConfig);
-}
-
-int RPiCameraData::configureIPA(const CameraConfiguration *config)
-{
- std::map<unsigned int, IPAStream> streamConfig;
- std::map<unsigned int, ControlInfoMap> entityControls;
- ipa::RPi::IPAConfig ipaConfig;
-
- /* Inform IPA of stream configuration and sensor controls. */
- unsigned int i = 0;
- for (auto const &stream : isp_) {
- if (stream.isExternal()) {
- streamConfig[i++] = IPAStream(
- stream.configuration().pixelFormat,
- stream.configuration().size);
- }
- }
-
- entityControls.emplace(0, sensor_->controls());
- entityControls.emplace(1, isp_[Isp::Input].dev()->controls());
-
- /* Always send the user transform to the IPA. */
- ipaConfig.transform = static_cast<unsigned int>(config->transform);
-
- /* Allocate the lens shading table via dmaHeap and pass to the IPA. */
- if (!lsTable_.isValid()) {
- lsTable_ = SharedFD(dmaHeap_.alloc("ls_grid", ipa::RPi::MaxLsGridSize));
- if (!lsTable_.isValid())
- return -ENOMEM;
-
- /* Allow the IPA to mmap the LS table via the file descriptor. */
- /*
- * \todo Investigate if mapping the lens shading table buffer
- * could be handled with mapBuffers().
- */
- ipaConfig.lsTableHandle = lsTable_;
- }
-
- /* We store the IPACameraSensorInfo for digital zoom calculations. */
- int ret = sensor_->sensorInfo(&sensorInfo_);
- if (ret) {
- LOG(RPI, Error) << "Failed to retrieve camera sensor info";
- return ret;
- }
-
- /* Ready the IPA - it must know about the sensor resolution. */
- ControlList controls;
- ret = ipa_->configure(sensorInfo_, streamConfig, entityControls, ipaConfig,
- &controls);
- if (ret < 0) {
- LOG(RPI, Error) << "IPA configuration failed!";
- return -EPIPE;
- }
-
- if (!controls.empty())
- setSensorControls(controls);
-
- return 0;
-}
-
-/*
- * enumerateVideoDevices() iterates over the Media Controller topology, starting
- * at the sensor and finishing at Unicam. For each sensor, RPiCameraData stores
- * a unique list of any intermediate video mux or bridge devices connected in a
- * cascade, together with the entity to entity link.
- *
- * Entity pad configuration and link enabling happens at the end of configure().
- * We first disable all pad links on each entity device in the chain, and then
- * selectively enabling the specific links to link sensor to Unicam across all
- * intermediate muxes and bridges.
- *
- * In the cascaded topology below, if Sensor1 is used, the Mux2 -> Mux1 link
- * will be disabled, and Sensor1 -> Mux1 -> Unicam links enabled. Alternatively,
- * if Sensor3 is used, the Sensor2 -> Mux2 and Sensor1 -> Mux1 links are disabled,
- * and Sensor3 -> Mux2 -> Mux1 -> Unicam links are enabled. All other links will
- * remain unchanged.
- *
- * +----------+
- * | Unicam |
- * +-----^----+
- * |
- * +---+---+
- * | Mux1 <-------+
- * +--^----+ |
- * | |
- * +-----+---+ +---+---+
- * | Sensor1 | | Mux2 |<--+
- * +---------+ +-^-----+ |
- * | |
- * +-------+-+ +---+-----+
- * | Sensor2 | | Sensor3 |
- * +---------+ +---------+
- */
-void RPiCameraData::enumerateVideoDevices(MediaLink *link)
-{
- const MediaPad *sinkPad = link->sink();
- const MediaEntity *entity = sinkPad->entity();
- bool unicamFound = false;
-
- /* We only deal with Video Mux and Bridge devices in cascade. */
- if (entity->function() != MEDIA_ENT_F_VID_MUX &&
- entity->function() != MEDIA_ENT_F_VID_IF_BRIDGE)
- return;
-
- /* Find the source pad for this Video Mux or Bridge device. */
- const MediaPad *sourcePad = nullptr;
- for (const MediaPad *pad : entity->pads()) {
- if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
- /*
- * We can only deal with devices that have a single source
- * pad. If this device has multiple source pads, ignore it
- * and this branch in the cascade.
- */
- if (sourcePad)
- return;
-
- sourcePad = pad;
- }
- }
-
- LOG(RPI, Debug) << "Found video mux device " << entity->name()
- << " linked to sink pad " << sinkPad->index();
-
- bridgeDevices_.emplace_back(std::make_unique<V4L2Subdevice>(entity), link);
- bridgeDevices_.back().first->open();
-
- /*
- * Iterate through all the sink pad links down the cascade to find any
- * other Video Mux and Bridge devices.
- */
- for (MediaLink *l : sourcePad->links()) {
- enumerateVideoDevices(l);
- /* Once we reach the Unicam entity, we are done. */
- if (l->sink()->entity()->name() == "unicam-image") {
- unicamFound = true;
- break;
- }
- }
-
- /* This identifies the end of our entity enumeration recursion. */
- if (link->source()->entity()->function() == MEDIA_ENT_F_CAM_SENSOR) {
- /*
- * If Unicam is not at the end of this cascade, we cannot configure
- * this topology automatically, so remove all entity references.
- */
- if (!unicamFound) {
- LOG(RPI, Warning) << "Cannot automatically configure this MC topology!";
- bridgeDevices_.clear();
- }
- }
-}
-
-void RPiCameraData::statsMetadataComplete(uint32_t bufferId, const ControlList &controls)
-{
- if (state_ == State::Stopped)
- return;
-
- FrameBuffer *buffer = isp_[Isp::Stats].getBuffers().at(bufferId);
-
- handleStreamBuffer(buffer, &isp_[Isp::Stats]);
-
- /* Add to the Request metadata buffer what the IPA has provided. */
- Request *request = requestQueue_.front();
- request->metadata().merge(controls);
-
- /*
- * Inform the sensor of the latest colour gains if it has the
- * V4L2_CID_NOTIFY_GAINS control (which means notifyGainsUnity_ is set).
- */
- if (notifyGainsUnity_ && controls.contains(libcamera::controls::ColourGains)) {
- libcamera::Span<const float> colourGains = controls.get(libcamera::controls::ColourGains);
- /* The control wants linear gains in the order B, Gb, Gr, R. */
- ControlList ctrls(sensor_->controls());
- std::array<int32_t, 4> gains{
- static_cast<int32_t>(colourGains[1] * *notifyGainsUnity_),
- *notifyGainsUnity_,
- *notifyGainsUnity_,
- static_cast<int32_t>(colourGains[0] * *notifyGainsUnity_)
- };
- ctrls.set(V4L2_CID_NOTIFY_GAINS, Span<const int32_t>{ gains });
-
- sensor_->setControls(&ctrls);
- }
-
- state_ = State::IpaComplete;
- handleState();
-}
-
-void RPiCameraData::runIsp(uint32_t bufferId)
-{
- if (state_ == State::Stopped)
- return;
-
- FrameBuffer *buffer = unicam_[Unicam::Image].getBuffers().at(bufferId);
-
- LOG(RPI, Debug) << "Input re-queue to ISP, buffer id " << bufferId
- << ", timestamp: " << buffer->metadata().timestamp;
-
- isp_[Isp::Input].queueBuffer(buffer);
- ispOutputCount_ = 0;
- handleState();
-}
-
-void RPiCameraData::embeddedComplete(uint32_t bufferId)
-{
- if (state_ == State::Stopped)
- return;
-
- FrameBuffer *buffer = unicam_[Unicam::Embedded].getBuffers().at(bufferId);
- handleStreamBuffer(buffer, &unicam_[Unicam::Embedded]);
- handleState();
-}
-
-void RPiCameraData::setIspControls(const ControlList &controls)
-{
- ControlList ctrls = controls;
-
- if (ctrls.contains(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING)) {
- ControlValue &value =
- const_cast<ControlValue &>(ctrls.get(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING));
- Span<uint8_t> s = value.data();
- bcm2835_isp_lens_shading *ls =
- reinterpret_cast<bcm2835_isp_lens_shading *>(s.data());
- ls->dmabuf = lsTable_.get();
- }
-
- isp_[Isp::Input].dev()->setControls(&ctrls);
- handleState();
-}
-
-void RPiCameraData::setDelayedControls(const ControlList &controls)
-{
- if (!delayedCtrls_->push(controls))
- LOG(RPI, Error) << "V4L2 DelayedControl set failed";
- handleState();
-}
-
-void RPiCameraData::setSensorControls(ControlList &controls)
-{
- /*
- * We need to ensure that if both VBLANK and EXPOSURE are present, the
- * former must be written ahead of, and separately from EXPOSURE to avoid
- * V4L2 rejecting the latter. This is identical to what DelayedControls
- * does with the priority write flag.
- *
- * As a consequence of the below logic, VBLANK gets set twice, and we
- * rely on the v4l2 framework to not pass the second control set to the
- * driver as the actual control value has not changed.
- */
- if (controls.contains(V4L2_CID_EXPOSURE) && controls.contains(V4L2_CID_VBLANK)) {
- ControlList vblank_ctrl;
-
- vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
- sensor_->setControls(&vblank_ctrl);
- }
-
- sensor_->setControls(&controls);
-}
-
-void RPiCameraData::unicamBufferDequeue(FrameBuffer *buffer)
-{
- RPi::Stream *stream = nullptr;
- int index;
-
- if (state_ == State::Stopped)
- return;
-
- for (RPi::Stream &s : unicam_) {
- index = s.getBufferId(buffer);
- if (index != -1) {
- stream = &s;
- break;
- }
- }
-
- /* The buffer must belong to one of our streams. */
- ASSERT(stream);
-
- LOG(RPI, Debug) << "Stream " << stream->name() << " buffer dequeue"
- << ", buffer id " << index
- << ", timestamp: " << buffer->metadata().timestamp;
-
- if (stream == &unicam_[Unicam::Image]) {
- /*
- * Lookup the sensor controls used for this frame sequence from
- * DelayedControl and queue them along with the frame buffer.
- */
- ControlList ctrl = delayedCtrls_->get(buffer->metadata().sequence);
- /*
- * Add the frame timestamp to the ControlList for the IPA to use
- * as it does not receive the FrameBuffer object.
- */
- ctrl.set(controls::SensorTimestamp, buffer->metadata().timestamp);
- bayerQueue_.push({ buffer, std::move(ctrl) });
- } else {
- embeddedQueue_.push(buffer);
- }
-
- handleState();
-}
-
-void RPiCameraData::ispInputDequeue(FrameBuffer *buffer)
-{
- if (state_ == State::Stopped)
- return;
-
- LOG(RPI, Debug) << "Stream ISP Input buffer complete"
- << ", buffer id " << unicam_[Unicam::Image].getBufferId(buffer)
- << ", timestamp: " << buffer->metadata().timestamp;
-
- /* The ISP input buffer gets re-queued into Unicam. */
- handleStreamBuffer(buffer, &unicam_[Unicam::Image]);
- handleState();
-}
-
-void RPiCameraData::ispOutputDequeue(FrameBuffer *buffer)
-{
- RPi::Stream *stream = nullptr;
- int index;
-
- if (state_ == State::Stopped)
- return;
-
- for (RPi::Stream &s : isp_) {
- index = s.getBufferId(buffer);
- if (index != -1) {
- stream = &s;
- break;
- }
- }
-
- /* The buffer must belong to one of our ISP output streams. */
- ASSERT(stream);
-
- LOG(RPI, Debug) << "Stream " << stream->name() << " buffer complete"
- << ", buffer id " << index
- << ", timestamp: " << buffer->metadata().timestamp;
-
- /*
- * ISP statistics buffer must not be re-queued or sent back to the
- * application until after the IPA signals so.
- */
- if (stream == &isp_[Isp::Stats]) {
- ipa_->signalStatReady(ipa::RPi::MaskStats | static_cast<unsigned int>(index));
- } else {
- /* Any other ISP output can be handed back to the application now. */
- handleStreamBuffer(buffer, stream);
- }
-
- /*
- * Increment the number of ISP outputs generated.
- * This is needed to track dropped frames.
- */
- ispOutputCount_++;
-
- handleState();
-}
-
-void RPiCameraData::clearIncompleteRequests()
-{
- /*
- * All outstanding requests (and associated buffers) must be returned
- * back to the application.
- */
- while (!requestQueue_.empty()) {
- Request *request = requestQueue_.front();
-
- for (auto &b : request->buffers()) {
- FrameBuffer *buffer = b.second;
- /*
- * Has the buffer already been handed back to the
- * request? If not, do so now.
- */
- if (buffer->request()) {
- buffer->cancel();
- pipe()->completeBuffer(request, buffer);
- }
- }
-
- pipe()->completeRequest(request);
- requestQueue_.pop_front();
- }
-}
-
-void RPiCameraData::handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream)
-{
- /*
- * It is possible to be here without a pending request, so check
- * that we actually have one to action, otherwise we just return
- * buffer back to the stream.
- */
- Request *request = requestQueue_.empty() ? nullptr : requestQueue_.front();
- if (!dropFrameCount_ && request && request->findBuffer(stream) == buffer) {
- /*
- * Check if this is an externally provided buffer, and if
- * so, we must stop tracking it in the pipeline handler.
- */
- handleExternalBuffer(buffer, stream);
- /*
- * Tag the buffer as completed, returning it to the
- * application.
- */
- pipe()->completeBuffer(request, buffer);
- } else {
- /*
- * This buffer was not part of the Request (which happens if an
- * internal buffer was used for an external stream, or
- * unconditionally for internal streams), or there is no pending
- * request, so we can recycle it.
- */
- stream->returnBuffer(buffer);
- }
-}
-
-void RPiCameraData::handleExternalBuffer(FrameBuffer *buffer, RPi::Stream *stream)
-{
- unsigned int id = stream->getBufferId(buffer);
-
- if (!(id & ipa::RPi::MaskExternalBuffer))
- return;
-
- /* Stop the Stream object from tracking the buffer. */
- stream->removeExternalBuffer(buffer);
-}
-
-void RPiCameraData::handleState()
-{
- switch (state_) {
- case State::Stopped:
- case State::Busy:
- break;
-
- case State::IpaComplete:
- /* If the request is completed, we will switch to Idle state. */
- checkRequestCompleted();
- /*
- * No break here, we want to try running the pipeline again.
- * The fallthrough clause below suppresses compiler warnings.
- */
- [[fallthrough]];
-
- case State::Idle:
- tryRunPipeline();
- break;
- }
-}
-
-void RPiCameraData::checkRequestCompleted()
-{
- bool requestCompleted = false;
- /*
- * If we are dropping this frame, do not touch the request, simply
- * change the state to IDLE when ready.
- */
- if (!dropFrameCount_) {
- Request *request = requestQueue_.front();
- if (request->hasPendingBuffers())
- return;
-
- /* Must wait for metadata to be filled in before completing. */
- if (state_ != State::IpaComplete)
- return;
-
- pipe()->completeRequest(request);
- requestQueue_.pop_front();
- requestCompleted = true;
- }
-
- /*
- * Make sure we have three outputs completed in the case of a dropped
- * frame.
- */
- if (state_ == State::IpaComplete &&
- ((ispOutputCount_ == 3 && dropFrameCount_) || requestCompleted)) {
- state_ = State::Idle;
- if (dropFrameCount_) {
- dropFrameCount_--;
- LOG(RPI, Debug) << "Dropping frame at the request of the IPA ("
- << dropFrameCount_ << " left)";
- }
- }
-}
-
-void RPiCameraData::applyScalerCrop(const ControlList &controls)
-{
- if (controls.contains(controls::ScalerCrop)) {
- Rectangle nativeCrop = controls.get<Rectangle>(controls::ScalerCrop);
-
- if (!nativeCrop.width || !nativeCrop.height)
- nativeCrop = { 0, 0, 1, 1 };
-
- /* Create a version of the crop scaled to ISP (camera mode) pixels. */
- Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo_.analogCrop.topLeft());
- ispCrop.scaleBy(sensorInfo_.outputSize, sensorInfo_.analogCrop.size());
-
- /*
- * The crop that we set must be:
- * 1. At least as big as ispMinCropSize_, once that's been
- * enlarged to the same aspect ratio.
- * 2. With the same mid-point, if possible.
- * 3. But it can't go outside the sensor area.
- */
- Size minSize = ispMinCropSize_.expandedToAspectRatio(nativeCrop.size());
- Size size = ispCrop.size().expandedTo(minSize);
- ispCrop = size.centeredTo(ispCrop.center()).enclosedIn(Rectangle(sensorInfo_.outputSize));
-
- if (ispCrop != ispCrop_) {
- isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &ispCrop);
- ispCrop_ = ispCrop;
-
- /*
- * Also update the ScalerCrop in the metadata with what we actually
- * used. But we must first rescale that from ISP (camera mode) pixels
- * back into sensor native pixels.
- */
- scalerCrop_ = ispCrop_.scaledBy(sensorInfo_.analogCrop.size(),
- sensorInfo_.outputSize);
- scalerCrop_.translateBy(sensorInfo_.analogCrop.topLeft());
- }
- }
-}
-
-void RPiCameraData::fillRequestMetadata(const ControlList &bufferControls,
- Request *request)
-{
- request->metadata().set(controls::SensorTimestamp,
- bufferControls.get(controls::SensorTimestamp));
-
- request->metadata().set(controls::ScalerCrop, scalerCrop_);
-}
-
-void RPiCameraData::tryRunPipeline()
-{
- FrameBuffer *embeddedBuffer;
- BayerFrame bayerFrame;
-
- /* If any of our request or buffer queues are empty, we cannot proceed. */
- if (state_ != State::Idle || requestQueue_.empty() ||
- bayerQueue_.empty() || (embeddedQueue_.empty() && sensorMetadata_))
- return;
-
- if (!findMatchingBuffers(bayerFrame, embeddedBuffer))
- return;
-
- /* Take the first request from the queue and action the IPA. */
- Request *request = requestQueue_.front();
-
- /* See if a new ScalerCrop value needs to be applied. */
- applyScalerCrop(request->controls());
-
- /*
- * Clear the request metadata and fill it with some initial non-IPA
- * related controls. We clear it first because the request metadata
- * may have been populated if we have dropped the previous frame.
- */
- request->metadata().clear();
- fillRequestMetadata(bayerFrame.controls, request);
-
- /*
- * Process all the user controls by the IPA. Once this is complete, we
- * queue the ISP output buffer listed in the request to start the HW
- * pipeline.
- */
- ipa_->signalQueueRequest(request->controls());
-
- /* Set our state to say the pipeline is active. */
- state_ = State::Busy;
-
- unsigned int bayerId = unicam_[Unicam::Image].getBufferId(bayerFrame.buffer);
-
- LOG(RPI, Debug) << "Signalling signalIspPrepare:"
- << " Bayer buffer id: " << bayerId;
-
- ipa::RPi::ISPConfig ispPrepare;
- ispPrepare.bayerBufferId = ipa::RPi::MaskBayerData | bayerId;
- ispPrepare.controls = std::move(bayerFrame.controls);
-
- if (embeddedBuffer) {
- unsigned int embeddedId = unicam_[Unicam::Embedded].getBufferId(embeddedBuffer);
-
- ispPrepare.embeddedBufferId = ipa::RPi::MaskEmbeddedData | embeddedId;
- ispPrepare.embeddedBufferPresent = true;
-
- LOG(RPI, Debug) << "Signalling signalIspPrepare:"
- << " Embedded buffer id: " << embeddedId;
- }
-
- ipa_->signalIspPrepare(ispPrepare);
-}
-
-bool RPiCameraData::findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer)
-{
- if (bayerQueue_.empty())
- return false;
-
- /* Start with the front of the bayer queue. */
- bayerFrame = std::move(bayerQueue_.front());
- bayerQueue_.pop();
-
- /*
- * Find the embedded data buffer with a matching timestamp to pass to
- * the IPA. Any embedded buffers with a timestamp lower than the
- * current bayer buffer will be removed and re-queued to the driver.
- */
- uint64_t ts = bayerFrame.buffer->metadata().timestamp;
- embeddedBuffer = nullptr;
- while (!embeddedQueue_.empty()) {
- FrameBuffer *b = embeddedQueue_.front();
- if (b->metadata().timestamp < ts) {
- embeddedQueue_.pop();
- unicam_[Unicam::Embedded].returnBuffer(b);
- LOG(RPI, Debug) << "Dropping unmatched input frame in stream "
- << unicam_[Unicam::Embedded].name();
- } else if (b->metadata().timestamp == ts) {
- /* Found a match! */
- embeddedBuffer = b;
- embeddedQueue_.pop();
- break;
- } else {
- break; /* Only higher timestamps from here. */
- }
- }
-
- if (!embeddedBuffer && sensorMetadata_) {
- /* Log if there is no matching embedded data buffer found. */
- LOG(RPI, Debug) << "Returning bayer frame without a matching embedded buffer.";
- }
-
- return true;
-}
-
-REGISTER_PIPELINE_HANDLER(PipelineHandlerRPi)
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/rpi_stream.h b/src/libcamera/pipeline/raspberrypi/rpi_stream.h
deleted file mode 100644
index d6f49d34..00000000
--- a/src/libcamera/pipeline/raspberrypi/rpi_stream.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
- *
- * rpi_stream.h - Raspberry Pi device stream abstraction class.
- */
-
-#pragma once
-
-#include <queue>
-#include <string>
-#include <unordered_map>
-#include <vector>
-
-#include <libcamera/ipa/raspberrypi.h>
-#include <libcamera/ipa/raspberrypi_ipa_interface.h>
-#include <libcamera/stream.h>
-
-#include "libcamera/internal/v4l2_videodevice.h"
-
-namespace libcamera {
-
-namespace RPi {
-
-using BufferMap = std::unordered_map<unsigned int, FrameBuffer *>;
-
-/*
- * Device stream abstraction for either an internal or external stream.
- * Used for both Unicam and the ISP.
- */
-class Stream : public libcamera::Stream
-{
-public:
- Stream()
- : id_(ipa::RPi::MaskID)
- {
- }
-
- Stream(const char *name, MediaEntity *dev, bool importOnly = false)
- : external_(false), importOnly_(importOnly), name_(name),
- dev_(std::make_unique<V4L2VideoDevice>(dev)), id_(ipa::RPi::MaskID)
- {
- }
-
- V4L2VideoDevice *dev() const;
- std::string name() const;
- bool isImporter() const;
- void reset();
-
- void setExternal(bool external);
- bool isExternal() const;
-
- void setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers);
- const BufferMap &getBuffers() const;
- int getBufferId(FrameBuffer *buffer) const;
-
- void setExternalBuffer(FrameBuffer *buffer);
- void removeExternalBuffer(FrameBuffer *buffer);
-
- int prepareBuffers(unsigned int count);
- int queueBuffer(FrameBuffer *buffer);
- void returnBuffer(FrameBuffer *buffer);
-
- int queueAllBuffers();
- void releaseBuffers();
-
-private:
- class IdGenerator
- {
- public:
- IdGenerator(int max)
- : max_(max), id_(0)
- {
- }
-
- int get()
- {
- int id;
- if (!recycle_.empty()) {
- id = recycle_.front();
- recycle_.pop();
- } else {
- id = id_++;
- ASSERT(id_ <= max_);
- }
- return id;
- }
-
- void release(int id)
- {
- recycle_.push(id);
- }
-
- void reset()
- {
- id_ = 0;
- recycle_ = {};
- }
-
- private:
- int max_;
- int id_;
- std::queue<int> recycle_;
- };
-
- void clearBuffers();
- int queueToDevice(FrameBuffer *buffer);
-
- /*
- * Indicates that this stream is active externally, i.e. the buffers
- * might be provided by (and returned to) the application.
- */
- bool external_;
-
- /* Indicates that this stream only imports buffers, e.g. ISP input. */
- bool importOnly_;
-
- /* Stream name identifier. */
- std::string name_;
-
- /* The actual device stream. */
- std::unique_ptr<V4L2VideoDevice> dev_;
-
- /* Tracks a unique id key for the bufferMap_ */
- IdGenerator id_;
-
- /* All frame buffers associated with this device stream. */
- BufferMap bufferMap_;
-
- /*
- * List of frame buffers that we can use if none have been provided by
- * the application for external streams. This is populated by the
- * buffers exported internally.
- */
- std::queue<FrameBuffer *> availableBuffers_;
-
- /*
- * List of frame buffers that are to be queued into the device from a Request.
- * A nullptr indicates any internal buffer can be used (from availableBuffers_),
- * whereas a valid pointer indicates an external buffer to be queued.
- *
- * Ordering buffers to be queued is important here as it must match the
- * requests coming from the application.
- */
- std::queue<FrameBuffer *> requestBuffers_;
-
- /*
- * This is a list of buffers exported internally. Need to keep this around
- * as the stream needs to maintain ownership of these buffers.
- */
- std::vector<std::unique_ptr<FrameBuffer>> internalBuffers_;
-};
-
-/*
- * The following class is just a convenient (and typesafe) array of device
- * streams indexed with an enum class.
- */
-template<typename E, std::size_t N>
-class Device : public std::array<class Stream, N>
-{
-private:
- constexpr auto index(E e) const noexcept
- {
- return static_cast<std::underlying_type_t<E>>(e);
- }
-public:
- Stream &operator[](E e)
- {
- return std::array<class Stream, N>::operator[](index(e));
- }
- const Stream &operator[](E e) const
- {
- return std::array<class Stream, N>::operator[](index(e));
- }
-};
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/meson.build b/src/libcamera/pipeline/rkisp1/meson.build
index cad66535..d21a6ef9 100644
--- a/src/libcamera/pipeline/rkisp1/meson.build
+++ b/src/libcamera/pipeline/rkisp1/meson.build
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources += files([
+libcamera_internal_sources += files([
'rkisp1.cpp',
'rkisp1_path.cpp',
])
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
index 8cca8a15..35c793da 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
@@ -2,35 +2,44 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * rkisp1.cpp - Pipeline handler for Rockchip ISP1
+ * Pipeline handler for Rockchip ISP1
*/
#include <algorithm>
-#include <array>
-#include <iomanip>
+#include <map>
#include <memory>
#include <numeric>
+#include <optional>
#include <queue>
+#include <vector>
#include <linux/media-bus-format.h>
+#include <linux/rkisp1-config.h>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
+#include <libcamera/color_space.h>
#include <libcamera/control_ids.h>
#include <libcamera/formats.h>
#include <libcamera/framebuffer.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/request.h>
+#include <libcamera/stream.h>
+#include <libcamera/transform.h>
+
#include <libcamera/ipa/core_ipa_interface.h>
#include <libcamera/ipa/rkisp1_ipa_interface.h>
#include <libcamera/ipa/rkisp1_ipa_proxy.h>
-#include <libcamera/request.h>
-#include <libcamera/stream.h>
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/converter/converter_v4l2_m2m.h"
#include "libcamera/internal/delayed_controls.h"
#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
@@ -64,7 +73,8 @@ class RkISP1Frames
public:
RkISP1Frames(PipelineHandler *pipe);
- RkISP1FrameInfo *create(const RkISP1CameraData *data, Request *request);
+ RkISP1FrameInfo *create(const RkISP1CameraData *data, Request *request,
+ bool isRaw);
int destroy(unsigned int frame);
void clear();
@@ -88,6 +98,7 @@ public:
}
PipelineHandlerRkISP1 *pipe();
+ const PipelineHandlerRkISP1 *pipe() const;
int loadIPA(unsigned int hwRevision);
Stream mainPathStream_;
@@ -103,9 +114,12 @@ public:
std::unique_ptr<ipa::rkisp1::IPAProxyRkISP1> ipa_;
+ ControlInfoMap ipaControls_;
+
private:
- void queueFrameAction(unsigned int frame,
- const ipa::rkisp1::RkISP1Action &action);
+ void paramsComputed(unsigned int frame, unsigned int bytesused);
+ void setSensorControls(unsigned int frame,
+ const ControlList &sensorControls);
void metadataReady(unsigned int frame, const ControlList &metadata);
};
@@ -118,6 +132,7 @@ public:
Status validate() override;
const V4L2SubdeviceFormat &sensorFormat() { return sensorFormat_; }
+ const Transform &combinedTransform() { return combinedTransform_; }
private:
bool fitsAllPaths(const StreamConfiguration &cfg);
@@ -131,6 +146,7 @@ private:
const RkISP1CameraData *data_;
V4L2SubdeviceFormat sensorFormat_;
+ Transform combinedTransform_;
};
class PipelineHandlerRkISP1 : public PipelineHandler
@@ -138,8 +154,8 @@ class PipelineHandlerRkISP1 : public PipelineHandler
public:
PipelineHandlerRkISP1(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -153,40 +169,62 @@ public:
bool match(DeviceEnumerator *enumerator) override;
private:
+ static constexpr Size kRkISP1PreviewSize = { 1920, 1080 };
+
RkISP1CameraData *cameraData(Camera *camera)
{
return static_cast<RkISP1CameraData *>(camera->_d());
}
friend RkISP1CameraData;
+ friend RkISP1CameraConfiguration;
friend RkISP1Frames;
int initLinks(Camera *camera, const CameraSensor *sensor,
const RkISP1CameraConfiguration &config);
int createCamera(MediaEntity *sensor);
- void tryCompleteRequest(Request *request);
- void bufferReady(FrameBuffer *buffer);
- void paramReady(FrameBuffer *buffer);
- void statReady(FrameBuffer *buffer);
+ void tryCompleteRequest(RkISP1FrameInfo *info);
+ void imageBufferReady(FrameBuffer *buffer);
+ void paramBufferReady(FrameBuffer *buffer);
+ void statBufferReady(FrameBuffer *buffer);
+ void dewarpBufferReady(FrameBuffer *buffer);
void frameStart(uint32_t sequence);
int allocateBuffers(Camera *camera);
int freeBuffers(Camera *camera);
+ int updateControls(RkISP1CameraData *data);
+
MediaDevice *media_;
std::unique_ptr<V4L2Subdevice> isp_;
std::unique_ptr<V4L2VideoDevice> param_;
std::unique_ptr<V4L2VideoDevice> stat_;
+ std::unique_ptr<V4L2Subdevice> csi_;
+
+ bool hasSelfPath_;
+ bool isRaw_;
RkISP1MainPath mainPath_;
RkISP1SelfPath selfPath_;
+ std::unique_ptr<V4L2M2MConverter> dewarper_;
+ Rectangle scalerMaxCrop_;
+ bool useDewarper_;
+
+ std::optional<Rectangle> activeCrop_;
+
+ /* Internal buffers used when dewarper is being used */
+ std::vector<std::unique_ptr<FrameBuffer>> mainPathBuffers_;
+ std::queue<FrameBuffer *> availableMainPathBuffers_;
+
std::vector<std::unique_ptr<FrameBuffer>> paramBuffers_;
std::vector<std::unique_ptr<FrameBuffer>> statBuffers_;
std::queue<FrameBuffer *> availableParamBuffers_;
std::queue<FrameBuffer *> availableStatBuffers_;
Camera *activeCamera_;
+
+ const MediaPad *ispSink_;
};
RkISP1Frames::RkISP1Frames(PipelineHandler *pipe)
@@ -194,27 +232,42 @@ RkISP1Frames::RkISP1Frames(PipelineHandler *pipe)
{
}
-RkISP1FrameInfo *RkISP1Frames::create(const RkISP1CameraData *data, Request *request)
+RkISP1FrameInfo *RkISP1Frames::create(const RkISP1CameraData *data, Request *request,
+ bool isRaw)
{
unsigned int frame = data->frame_;
- if (pipe_->availableParamBuffers_.empty()) {
- LOG(RkISP1, Error) << "Parameters buffer underrun";
- return nullptr;
- }
- FrameBuffer *paramBuffer = pipe_->availableParamBuffers_.front();
+ FrameBuffer *paramBuffer = nullptr;
+ FrameBuffer *statBuffer = nullptr;
+ FrameBuffer *mainPathBuffer = nullptr;
+ FrameBuffer *selfPathBuffer = nullptr;
- if (pipe_->availableStatBuffers_.empty()) {
- LOG(RkISP1, Error) << "Statisitc buffer underrun";
- return nullptr;
- }
- FrameBuffer *statBuffer = pipe_->availableStatBuffers_.front();
+ if (!isRaw) {
+ if (pipe_->availableParamBuffers_.empty()) {
+ LOG(RkISP1, Error) << "Parameters buffer underrun";
+ return nullptr;
+ }
- FrameBuffer *mainPathBuffer = request->findBuffer(&data->mainPathStream_);
- FrameBuffer *selfPathBuffer = request->findBuffer(&data->selfPathStream_);
+ if (pipe_->availableStatBuffers_.empty()) {
+ LOG(RkISP1, Error) << "Statistic buffer underrun";
+ return nullptr;
+ }
- pipe_->availableParamBuffers_.pop();
- pipe_->availableStatBuffers_.pop();
+ paramBuffer = pipe_->availableParamBuffers_.front();
+ pipe_->availableParamBuffers_.pop();
+
+ statBuffer = pipe_->availableStatBuffers_.front();
+ pipe_->availableStatBuffers_.pop();
+
+ if (pipe_->useDewarper_) {
+ mainPathBuffer = pipe_->availableMainPathBuffers_.front();
+ pipe_->availableMainPathBuffers_.pop();
+ }
+ }
+
+ if (!mainPathBuffer)
+ mainPathBuffer = request->findBuffer(&data->mainPathStream_);
+ selfPathBuffer = request->findBuffer(&data->selfPathStream_);
RkISP1FrameInfo *info = new RkISP1FrameInfo;
@@ -240,6 +293,7 @@ int RkISP1Frames::destroy(unsigned int frame)
pipe_->availableParamBuffers_.push(info->paramBuffer);
pipe_->availableStatBuffers_.push(info->statBuffer);
+ pipe_->availableMainPathBuffers_.push(info->mainPathBuffer);
frameInfo_.erase(info->frame);
@@ -255,6 +309,7 @@ void RkISP1Frames::clear()
pipe_->availableParamBuffers_.push(info->paramBuffer);
pipe_->availableStatBuffers_.push(info->statBuffer);
+ pipe_->availableMainPathBuffers_.push(info->mainPathBuffer);
delete info;
}
@@ -310,16 +365,43 @@ PipelineHandlerRkISP1 *RkISP1CameraData::pipe()
return static_cast<PipelineHandlerRkISP1 *>(Camera::Private::pipe());
}
+const PipelineHandlerRkISP1 *RkISP1CameraData::pipe() const
+{
+ return static_cast<const PipelineHandlerRkISP1 *>(Camera::Private::pipe());
+}
+
int RkISP1CameraData::loadIPA(unsigned int hwRevision)
{
ipa_ = IPAManager::createIPA<ipa::rkisp1::IPAProxyRkISP1>(pipe(), 1, 1);
if (!ipa_)
return -ENOENT;
- ipa_->queueFrameAction.connect(this,
- &RkISP1CameraData::queueFrameAction);
+ ipa_->setSensorControls.connect(this, &RkISP1CameraData::setSensorControls);
+ ipa_->paramsComputed.connect(this, &RkISP1CameraData::paramsComputed);
+ ipa_->metadataReady.connect(this, &RkISP1CameraData::metadataReady);
+
+ /*
+ * The API tuning file is made from the sensor name unless the
+ * environment variable overrides it.
+ */
+ std::string ipaTuningFile;
+ char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RKISP1_TUNING_FILE");
+ if (!configFromEnv || *configFromEnv == '\0') {
+ ipaTuningFile =
+ ipa_->configurationFile(sensor_->model() + ".yaml", "uncalibrated.yaml");
+ } else {
+ ipaTuningFile = std::string(configFromEnv);
+ }
- int ret = ipa_->init(IPASettings{ "", sensor_->model() }, hwRevision);
+ IPACameraSensorInfo sensorInfo{};
+ int ret = sensor_->sensorInfo(&sensorInfo);
+ if (ret) {
+ LOG(RkISP1, Error) << "Camera sensor information not available";
+ return ret;
+ }
+
+ ret = ipa_->init({ ipaTuningFile, sensor_->model() }, hwRevision,
+ sensorInfo, sensor_->controls(), &ipaControls_);
if (ret < 0) {
LOG(RkISP1, Error) << "IPA initialization failure";
return ret;
@@ -328,39 +410,28 @@ int RkISP1CameraData::loadIPA(unsigned int hwRevision)
return 0;
}
-void RkISP1CameraData::queueFrameAction(unsigned int frame,
- const ipa::rkisp1::RkISP1Action &action)
+void RkISP1CameraData::paramsComputed(unsigned int frame, unsigned int bytesused)
{
- switch (action.op) {
- case ipa::rkisp1::ActionV4L2Set: {
- const ControlList &controls = action.sensorControls;
- delayedCtrls_->push(controls);
- break;
- }
- case ipa::rkisp1::ActionParamFilled: {
- PipelineHandlerRkISP1 *pipe = RkISP1CameraData::pipe();
- RkISP1FrameInfo *info = frameInfo_.find(frame);
- if (!info)
- break;
+ PipelineHandlerRkISP1 *pipe = RkISP1CameraData::pipe();
+ RkISP1FrameInfo *info = frameInfo_.find(frame);
+ if (!info)
+ return;
- pipe->param_->queueBuffer(info->paramBuffer);
- pipe->stat_->queueBuffer(info->statBuffer);
+ info->paramBuffer->_d()->metadata().planes()[0].bytesused = bytesused;
+ pipe->param_->queueBuffer(info->paramBuffer);
+ pipe->stat_->queueBuffer(info->statBuffer);
- if (info->mainPathBuffer)
- mainPath_->queueBuffer(info->mainPathBuffer);
+ if (info->mainPathBuffer)
+ mainPath_->queueBuffer(info->mainPathBuffer);
- if (info->selfPathBuffer)
- selfPath_->queueBuffer(info->selfPathBuffer);
+ if (selfPath_ && info->selfPathBuffer)
+ selfPath_->queueBuffer(info->selfPathBuffer);
+}
- break;
- }
- case ipa::rkisp1::ActionMetadata:
- metadataReady(frame, action.controls);
- break;
- default:
- LOG(RkISP1, Error) << "Unknown action " << action.op;
- break;
- }
+void RkISP1CameraData::setSensorControls([[maybe_unused]] unsigned int frame,
+ const ControlList &sensorControls)
+{
+ delayedCtrls_->push(sensorControls);
}
void RkISP1CameraData::metadataReady(unsigned int frame, const ControlList &metadata)
@@ -372,9 +443,33 @@ void RkISP1CameraData::metadataReady(unsigned int frame, const ControlList &meta
info->request->metadata().merge(metadata);
info->metadataProcessed = true;
- pipe()->tryCompleteRequest(info->request);
+ pipe()->tryCompleteRequest(info);
}
+/* -----------------------------------------------------------------------------
+ * Camera Configuration
+ */
+
+namespace {
+
+/* Keep in sync with the supported raw formats in rkisp1_path.cpp. */
+const std::map<PixelFormat, uint32_t> rawFormats = {
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+} /* namespace */
+
RkISP1CameraConfiguration::RkISP1CameraConfiguration(Camera *camera,
RkISP1CameraData *data)
: CameraConfiguration()
@@ -385,14 +480,16 @@ RkISP1CameraConfiguration::RkISP1CameraConfiguration(Camera *camera,
bool RkISP1CameraConfiguration::fitsAllPaths(const StreamConfiguration &cfg)
{
+ const CameraSensor *sensor = data_->sensor_.get();
StreamConfiguration config;
config = cfg;
- if (data_->mainPath_->validate(&config) != Valid)
+ if (data_->mainPath_->validate(sensor, sensorConfig, &config) != Valid)
return false;
config = cfg;
- if (data_->selfPath_->validate(&config) != Valid)
+ if (data_->selfPath_ &&
+ data_->selfPath_->validate(sensor, sensorConfig, &config) != Valid)
return false;
return true;
@@ -400,21 +497,73 @@ bool RkISP1CameraConfiguration::fitsAllPaths(const StreamConfiguration &cfg)
CameraConfiguration::Status RkISP1CameraConfiguration::validate()
{
+ const PipelineHandlerRkISP1 *pipe = data_->pipe();
const CameraSensor *sensor = data_->sensor_.get();
- Status status = Valid;
+ unsigned int pathCount = data_->selfPath_ ? 2 : 1;
+ Status status;
if (config_.empty())
return Invalid;
- if (transform != Transform::Identity) {
- transform = Transform::Identity;
- status = Adjusted;
+ status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
+
+ /*
+ * Make sure that if a sensor configuration has been requested it
+ * is valid.
+ */
+ if (sensorConfig) {
+ if (!sensorConfig->isValid()) {
+ LOG(RkISP1, Error)
+ << "Invalid sensor configuration request";
+
+ return Invalid;
+ }
+
+ unsigned int bitDepth = sensorConfig->bitDepth;
+ if (bitDepth != 8 && bitDepth != 10 && bitDepth != 12) {
+ LOG(RkISP1, Error)
+ << "Invalid sensor configuration bit depth";
+
+ return Invalid;
+ }
}
/* Cap the number of entries to the available streams. */
- if (config_.size() > 2) {
- config_.resize(2);
+ if (config_.size() > pathCount) {
+ config_.resize(pathCount);
+ status = Adjusted;
+ }
+
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->sensor_->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
status = Adjusted;
+
+ /*
+ * Simultaneous capture of raw and processed streams isn't possible. If
+ * there is any raw stream, cap the number of streams to one.
+ */
+ if (config_.size() > 1) {
+ for (const auto &cfg : config_) {
+ if (PixelFormatInfo::info(cfg.pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW) {
+ config_.resize(1);
+ status = Adjusted;
+ break;
+ }
+ }
+ }
+
+ bool useDewarper = false;
+ if (pipe->dewarper_) {
+ /*
+ * Platforms with dewarper support, such as i.MX8MP, support
+ * only a single stream. We can inspect config_[0] only here.
+ */
+ bool isRaw = PixelFormatInfo::info(config_[0].pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW;
+ if (!isRaw)
+ useDewarper = true;
}
/*
@@ -429,135 +578,219 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
if (config_.size() == 2 && fitsAllPaths(config_[0]))
std::reverse(order.begin(), order.end());
+ /*
+ * Validate the configuration against the desired path and, if the
+ * platform supports it, the dewarper.
+ */
+ auto validateConfig = [&](StreamConfiguration &cfg, RkISP1Path *path,
+ Stream *stream, Status expectedStatus) {
+ StreamConfiguration tryCfg = cfg;
+
+ Status ret = path->validate(sensor, sensorConfig, &tryCfg);
+ if (ret == Invalid)
+ return false;
+
+ if (!useDewarper &&
+ (expectedStatus == Valid && ret == Adjusted))
+ return false;
+
+ if (useDewarper) {
+ bool adjusted;
+
+ pipe->dewarper_->validateOutput(&tryCfg, &adjusted,
+ Converter::Alignment::Down);
+ if (expectedStatus == Valid && adjusted)
+ return false;
+ }
+
+ cfg = tryCfg;
+ cfg.setStream(stream);
+ return true;
+ };
+
bool mainPathAvailable = true;
- bool selfPathAvailable = true;
+ bool selfPathAvailable = data_->selfPath_;
+ RkISP1Path *mainPath = data_->mainPath_;
+ RkISP1Path *selfPath = data_->selfPath_;
+ Stream *mainPathStream = const_cast<Stream *>(&data_->mainPathStream_);
+ Stream *selfPathStream = const_cast<Stream *>(&data_->selfPathStream_);
for (unsigned int index : order) {
StreamConfiguration &cfg = config_[index];
/* Try to match stream without adjusting configuration. */
if (mainPathAvailable) {
- StreamConfiguration tryCfg = cfg;
- if (data_->mainPath_->validate(&tryCfg) == Valid) {
+ if (validateConfig(cfg, mainPath, mainPathStream, Valid)) {
mainPathAvailable = false;
- cfg = tryCfg;
- cfg.setStream(const_cast<Stream *>(&data_->mainPathStream_));
continue;
}
}
if (selfPathAvailable) {
- StreamConfiguration tryCfg = cfg;
- if (data_->selfPath_->validate(&tryCfg) == Valid) {
+ if (validateConfig(cfg, selfPath, selfPathStream, Valid)) {
selfPathAvailable = false;
- cfg = tryCfg;
- cfg.setStream(const_cast<Stream *>(&data_->selfPathStream_));
continue;
}
}
/* Try to match stream allowing adjusting configuration. */
if (mainPathAvailable) {
- StreamConfiguration tryCfg = cfg;
- if (data_->mainPath_->validate(&tryCfg) == Adjusted) {
+ if (validateConfig(cfg, mainPath, mainPathStream, Adjusted)) {
mainPathAvailable = false;
- cfg = tryCfg;
- cfg.setStream(const_cast<Stream *>(&data_->mainPathStream_));
status = Adjusted;
continue;
}
}
if (selfPathAvailable) {
- StreamConfiguration tryCfg = cfg;
- if (data_->selfPath_->validate(&tryCfg) == Adjusted) {
+ if (validateConfig(cfg, selfPath, selfPathStream, Adjusted)) {
selfPathAvailable = false;
- cfg = tryCfg;
- cfg.setStream(const_cast<Stream *>(&data_->selfPathStream_));
status = Adjusted;
continue;
}
}
- /* All paths rejected configuraiton. */
+ /* All paths rejected configuration. */
LOG(RkISP1, Debug) << "Camera configuration not supported "
<< cfg.toString();
return Invalid;
}
/* Select the sensor format. */
+ PixelFormat rawFormat;
Size maxSize;
- for (const StreamConfiguration &cfg : config_)
+
+ for (const StreamConfiguration &cfg : config_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ rawFormat = cfg.pixelFormat;
+
maxSize = std::max(maxSize, cfg.size);
+ }
+
+ std::vector<unsigned int> mbusCodes;
+
+ if (rawFormat.isValid()) {
+ mbusCodes = { rawFormats.at(rawFormat) };
+ } else {
+ std::transform(rawFormats.begin(), rawFormats.end(),
+ std::back_inserter(mbusCodes),
+ [](const auto &value) { return value.second; });
+ }
+
+ sensorFormat_ = sensor->getFormat(mbusCodes, maxSize,
+ mainPath->maxResolution());
- sensorFormat_ = sensor->getFormat({ MEDIA_BUS_FMT_SBGGR12_1X12,
- MEDIA_BUS_FMT_SGBRG12_1X12,
- MEDIA_BUS_FMT_SGRBG12_1X12,
- MEDIA_BUS_FMT_SRGGB12_1X12,
- MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10,
- MEDIA_BUS_FMT_SBGGR8_1X8,
- MEDIA_BUS_FMT_SGBRG8_1X8,
- MEDIA_BUS_FMT_SGRBG8_1X8,
- MEDIA_BUS_FMT_SRGGB8_1X8 },
- maxSize);
if (sensorFormat_.size.isNull())
sensorFormat_.size = sensor->resolution();
return status;
}
-PipelineHandlerRkISP1::PipelineHandlerRkISP1(CameraManager *manager)
- : PipelineHandler(manager)
-{
-}
-
/* -----------------------------------------------------------------------------
* Pipeline Operations
*/
-CameraConfiguration *PipelineHandlerRkISP1::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+PipelineHandlerRkISP1::PipelineHandlerRkISP1(CameraManager *manager)
+ : PipelineHandler(manager), hasSelfPath_(true), useDewarper_(false)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerRkISP1::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
RkISP1CameraData *data = cameraData(camera);
- CameraConfiguration *config = new RkISP1CameraConfiguration(camera, data);
+
+ unsigned int pathCount = data->selfPath_ ? 2 : 1;
+ if (roles.size() > pathCount) {
+ LOG(RkISP1, Error) << "Too many stream roles requested";
+ return nullptr;
+ }
+
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<RkISP1CameraConfiguration>(camera, data);
if (roles.empty())
return config;
+ /*
+ * As the ISP can't output different color spaces for the main and self
+ * path, pick a sensible default color space based on the role of the
+ * first stream and use it for all streams.
+ */
+ std::optional<ColorSpace> colorSpace;
bool mainPathAvailable = true;
- bool selfPathAvailable = true;
+
for (const StreamRole role : roles) {
- bool useMainPath;
+ Size size;
switch (role) {
- case StreamRole::StillCapture: {
- useMainPath = mainPathAvailable;
+ case StreamRole::StillCapture:
+ /* JPEG encoders typically expect sYCC. */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Sycc;
+
+ size = data->sensor_->resolution();
break;
- }
+
case StreamRole::Viewfinder:
- case StreamRole::VideoRecording: {
- useMainPath = !selfPathAvailable;
+ /*
+ * sYCC is the YCbCr encoding of sRGB, which is commonly
+ * used by displays.
+ */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Sycc;
+
+ size = kRkISP1PreviewSize;
break;
- }
+
+ case StreamRole::VideoRecording:
+ /* Rec. 709 is a good default for HD video recording. */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Rec709;
+
+ size = kRkISP1PreviewSize;
+ break;
+
+ case StreamRole::Raw:
+ if (roles.size() > 1) {
+ LOG(RkISP1, Error)
+ << "Can't capture both raw and processed streams";
+ return nullptr;
+ }
+
+ colorSpace = ColorSpace::Raw;
+ size = data->sensor_->resolution();
+ break;
+
default:
LOG(RkISP1, Warning)
<< "Requested stream role not supported: " << role;
- delete config;
return nullptr;
}
- StreamConfiguration cfg;
- if (useMainPath) {
- cfg = data->mainPath_->generateConfiguration(
- data->sensor_->resolution());
+ /*
+ * Prefer the main path if available, as it supports higher
+ * resolutions.
+ *
+ * \todo Using the main path unconditionally hides support for
+ * RGB (only available on the self path) in the streams formats
+ * exposed to applications. This likely calls for a better API
+ * to expose streams capabilities.
+ */
+ RkISP1Path *path;
+ if (mainPathAvailable) {
+ path = data->mainPath_;
mainPathAvailable = false;
} else {
- cfg = data->selfPath_->generateConfiguration(
- data->sensor_->resolution());
- selfPathAvailable = false;
+ path = data->selfPath_;
}
+ StreamConfiguration cfg =
+ path->generateConfiguration(data->sensor_.get(), size, role);
+ if (!cfg.pixelFormat.isValid())
+ return nullptr;
+
+ cfg.colorSpace = colorSpace;
config->addConfiguration(cfg);
}
@@ -583,56 +816,119 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
* the pipeline.
*/
V4L2SubdeviceFormat format = config->sensorFormat();
- LOG(RkISP1, Debug) << "Configuring sensor with " << format.toString();
+ LOG(RkISP1, Debug) << "Configuring sensor with " << format;
+
+ if (config->sensorConfig)
+ ret = sensor->applyConfiguration(*config->sensorConfig,
+ config->combinedTransform(),
+ &format);
+ else
+ ret = sensor->setFormat(&format, config->combinedTransform());
- ret = sensor->setFormat(&format);
if (ret < 0)
return ret;
- LOG(RkISP1, Debug) << "Sensor configured with " << format.toString();
+ LOG(RkISP1, Debug) << "Sensor configured with " << format;
+
+ if (csi_) {
+ ret = csi_->setFormat(0, &format);
+ if (ret < 0)
+ return ret;
+ }
ret = isp_->setFormat(0, &format);
if (ret < 0)
return ret;
- Rectangle rect(0, 0, format.size);
- ret = isp_->setSelection(0, V4L2_SEL_TGT_CROP, &rect);
+ Rectangle inputCrop(0, 0, format.size);
+ ret = isp_->setSelection(0, V4L2_SEL_TGT_CROP, &inputCrop);
if (ret < 0)
return ret;
LOG(RkISP1, Debug)
- << "ISP input pad configured with " << format.toString()
- << " crop " << rect.toString();
+ << "ISP input pad configured with " << format
+ << " crop " << inputCrop;
+
+ Rectangle outputCrop = inputCrop;
+ const PixelFormat &streamFormat = config->at(0).pixelFormat;
+ const PixelFormatInfo &info = PixelFormatInfo::info(streamFormat);
+ isRaw_ = info.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
+ useDewarper_ = dewarper_ && !isRaw_;
/* YUYV8_2X8 is required on the ISP source path pad for YUV output. */
- format.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
+ if (!isRaw_)
+ format.code = MEDIA_BUS_FMT_YUYV8_2X8;
+
+ /*
+ * On devices without DUAL_CROP (like the imx8mp) cropping needs to be
+ * done on the ISP/IS output.
+ */
+ if (media_->hwRevision() == RKISP1_V_IMX8MP) {
+ /* imx8mp has only a single path. */
+ const auto &cfg = config->at(0);
+ Size ispCrop = format.size.boundedToAspectRatio(cfg.size);
+ if (useDewarper_)
+ ispCrop = dewarper_->adjustInputSize(cfg.pixelFormat,
+ ispCrop);
+ else
+ ispCrop.alignUpTo(2, 2);
+
+ outputCrop = ispCrop.centeredTo(Rectangle(format.size).center());
+ format.size = ispCrop;
+ }
+
LOG(RkISP1, Debug)
- << "Configuring ISP output pad with " << format.toString()
- << " crop " << rect.toString();
+ << "Configuring ISP output pad with " << format
+ << " crop " << outputCrop;
- ret = isp_->setSelection(2, V4L2_SEL_TGT_CROP, &rect);
+ ret = isp_->setSelection(2, V4L2_SEL_TGT_CROP, &outputCrop);
if (ret < 0)
return ret;
+ format.colorSpace = config->at(0).colorSpace;
ret = isp_->setFormat(2, &format);
if (ret < 0)
return ret;
LOG(RkISP1, Debug)
- << "ISP output pad configured with " << format.toString()
- << " crop " << rect.toString();
+ << "ISP output pad configured with " << format
+ << " crop " << outputCrop;
+
+ IPACameraSensorInfo sensorInfo;
+ ret = data->sensor_->sensorInfo(&sensorInfo);
+ if (ret)
+ return ret;
std::map<unsigned int, IPAStream> streamConfig;
+ std::vector<std::reference_wrapper<StreamConfiguration>> outputCfgs;
for (const StreamConfiguration &cfg : *config) {
if (cfg.stream() == &data->mainPathStream_) {
ret = mainPath_.configure(cfg, format);
streamConfig[0] = IPAStream(cfg.pixelFormat,
cfg.size);
- } else {
+ /* Configure dewarp */
+ if (dewarper_ && !isRaw_) {
+ outputCfgs.push_back(const_cast<StreamConfiguration &>(cfg));
+ ret = dewarper_->configure(cfg, outputCfgs);
+ if (ret)
+ return ret;
+
+ /*
+ * Calculate the crop rectangle of the data
+ * flowing into the dewarper in sensor
+ * coordinates.
+ */
+ scalerMaxCrop_ =
+ outputCrop.transformedBetween(inputCrop,
+ sensorInfo.analogCrop);
+ }
+ } else if (hasSelfPath_) {
ret = selfPath_.configure(cfg, format);
streamConfig[1] = IPAStream(cfg.pixelFormat,
cfg.size);
+ } else {
+ return -ENODEV;
}
if (ret)
@@ -640,7 +936,7 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
}
V4L2DeviceFormat paramFormat;
- paramFormat.fourcc = V4L2PixelFormat(V4L2_META_FMT_RK_ISP1_PARAMS);
+ paramFormat.fourcc = V4L2PixelFormat(V4L2_META_FMT_RK_ISP1_EXT_PARAMS);
ret = param_->setFormat(&paramFormat);
if (ret)
return ret;
@@ -652,24 +948,17 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
return ret;
/* Inform IPA of stream configuration and sensor controls. */
- IPACameraSensorInfo sensorInfo = {};
- ret = data->sensor_->sensorInfo(&sensorInfo);
- if (ret) {
- /* \todo Turn this into a hard failure. */
- LOG(RkISP1, Warning) << "Camera sensor information not available";
- sensorInfo = {};
- ret = 0;
- }
-
- std::map<uint32_t, ControlInfoMap> entityControls;
- entityControls.emplace(0, data->sensor_->controls());
+ ipa::rkisp1::IPAConfigInfo ipaConfig{ sensorInfo,
+ data->sensor_->controls(),
+ paramFormat.fourcc };
- ret = data->ipa_->configure(sensorInfo, streamConfig, entityControls);
+ ret = data->ipa_->configure(ipaConfig, streamConfig, &data->ipaControls_);
if (ret) {
LOG(RkISP1, Error) << "failed configuring IPA (" << ret << ")";
return ret;
}
- return 0;
+
+ return updateControls(data);
}
int PipelineHandlerRkISP1::exportFrameBuffers([[maybe_unused]] Camera *camera, Stream *stream,
@@ -678,10 +967,19 @@ int PipelineHandlerRkISP1::exportFrameBuffers([[maybe_unused]] Camera *camera, S
RkISP1CameraData *data = cameraData(camera);
unsigned int count = stream->configuration().bufferCount;
- if (stream == &data->mainPathStream_)
- return mainPath_.exportBuffers(count, buffers);
- else if (stream == &data->selfPathStream_)
+ if (stream == &data->mainPathStream_) {
+ /*
+ * Currently, i.MX8MP is the only platform with DW100 dewarper.
+ * It has mainpath and no self path. Hence, export buffers from
+ * dewarper just for the main path stream, for now.
+ */
+ if (useDewarper_)
+ return dewarper_->exportBuffers(&data->mainPathStream_, count, buffers);
+ else
+ return mainPath_.exportBuffers(count, buffers);
+ } else if (hasSelfPath_ && stream == &data->selfPathStream_) {
return selfPath_.exportBuffers(count, buffers);
+ }
return -EINVAL;
}
@@ -697,13 +995,25 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
data->selfPathStream_.configuration().bufferCount,
});
- ret = param_->allocateBuffers(maxCount, &paramBuffers_);
- if (ret < 0)
- goto error;
+ if (!isRaw_) {
+ ret = param_->allocateBuffers(maxCount, &paramBuffers_);
+ if (ret < 0)
+ goto error;
- ret = stat_->allocateBuffers(maxCount, &statBuffers_);
- if (ret < 0)
- goto error;
+ ret = stat_->allocateBuffers(maxCount, &statBuffers_);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* If the dewarper is being used, allocate internal buffers for ISP. */
+ if (useDewarper_) {
+ ret = mainPath_.exportBuffers(maxCount, &mainPathBuffers_);
+ if (ret < 0)
+ goto error;
+
+ for (std::unique_ptr<FrameBuffer> &buffer : mainPathBuffers_)
+ availableMainPathBuffers_.push(buffer.get());
+ }
for (std::unique_ptr<FrameBuffer> &buffer : paramBuffers_) {
buffer->setCookie(ipaBufferId++);
@@ -726,6 +1036,7 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
error:
paramBuffers_.clear();
statBuffers_.clear();
+ mainPathBuffers_.clear();
return ret;
}
@@ -740,8 +1051,12 @@ int PipelineHandlerRkISP1::freeBuffers(Camera *camera)
while (!availableParamBuffers_.empty())
availableParamBuffers_.pop();
+ while (!availableMainPathBuffers_.empty())
+ availableMainPathBuffers_.pop();
+
paramBuffers_.clear();
statBuffers_.clear();
+ mainPathBuffers_.clear();
std::vector<unsigned int> ids;
for (IPABuffer &ipabuf : data->ipaBuffers_)
@@ -762,69 +1077,71 @@ int PipelineHandlerRkISP1::freeBuffers(Camera *camera)
int PipelineHandlerRkISP1::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
{
RkISP1CameraData *data = cameraData(camera);
+ utils::ScopeExitActions actions;
int ret;
/* Allocate buffers for internal pipeline usage. */
ret = allocateBuffers(camera);
if (ret)
return ret;
+ actions += [&]() { freeBuffers(camera); };
ret = data->ipa_->start();
if (ret) {
- freeBuffers(camera);
LOG(RkISP1, Error)
<< "Failed to start IPA " << camera->id();
return ret;
}
+ actions += [&]() { data->ipa_->stop(); };
data->frame_ = 0;
- ret = param_->streamOn();
- if (ret) {
- data->ipa_->stop();
- freeBuffers(camera);
- LOG(RkISP1, Error)
- << "Failed to start parameters " << camera->id();
- return ret;
- }
+ if (!isRaw_) {
+ ret = param_->streamOn();
+ if (ret) {
+ LOG(RkISP1, Error)
+ << "Failed to start parameters " << camera->id();
+ return ret;
+ }
+ actions += [&]() { param_->streamOff(); };
- ret = stat_->streamOn();
- if (ret) {
- param_->streamOff();
- data->ipa_->stop();
- freeBuffers(camera);
- LOG(RkISP1, Error)
- << "Failed to start statistics " << camera->id();
- return ret;
+ ret = stat_->streamOn();
+ if (ret) {
+ LOG(RkISP1, Error)
+ << "Failed to start statistics " << camera->id();
+ return ret;
+ }
+ actions += [&]() { stat_->streamOff(); };
+
+ if (useDewarper_) {
+ ret = dewarper_->start();
+ if (ret) {
+ LOG(RkISP1, Error) << "Failed to start dewarper";
+ return ret;
+ }
+ actions += [&]() { dewarper_->stop(); };
+ }
}
if (data->mainPath_->isEnabled()) {
ret = mainPath_.start();
- if (ret) {
- param_->streamOff();
- stat_->streamOff();
- data->ipa_->stop();
- freeBuffers(camera);
+ if (ret)
return ret;
- }
+ actions += [&]() { mainPath_.stop(); };
}
- if (data->selfPath_->isEnabled()) {
+ if (hasSelfPath_ && data->selfPath_->isEnabled()) {
ret = selfPath_.start();
- if (ret) {
- mainPath_.stop();
- param_->streamOff();
- stat_->streamOff();
- data->ipa_->stop();
- freeBuffers(camera);
+ if (ret)
return ret;
- }
}
isp_->setFrameStartEnabled(true);
activeCamera_ = camera;
- return ret;
+
+ actions.release();
+ return 0;
}
void PipelineHandlerRkISP1::stopDevice(Camera *camera)
@@ -836,18 +1153,24 @@ void PipelineHandlerRkISP1::stopDevice(Camera *camera)
data->ipa_->stop();
- selfPath_.stop();
+ if (hasSelfPath_)
+ selfPath_.stop();
mainPath_.stop();
- ret = stat_->streamOff();
- if (ret)
- LOG(RkISP1, Warning)
- << "Failed to stop statistics for " << camera->id();
+ if (!isRaw_) {
+ ret = stat_->streamOff();
+ if (ret)
+ LOG(RkISP1, Warning)
+ << "Failed to stop statistics for " << camera->id();
- ret = param_->streamOff();
- if (ret)
- LOG(RkISP1, Warning)
- << "Failed to stop parameters for " << camera->id();
+ ret = param_->streamOff();
+ if (ret)
+ LOG(RkISP1, Warning)
+ << "Failed to stop parameters for " << camera->id();
+
+ if (useDewarper_)
+ dewarper_->stop();
+ }
ASSERT(data->queuedRequests_.empty());
data->frameInfo_.clear();
@@ -861,16 +1184,21 @@ int PipelineHandlerRkISP1::queueRequestDevice(Camera *camera, Request *request)
{
RkISP1CameraData *data = cameraData(camera);
- RkISP1FrameInfo *info = data->frameInfo_.create(data, request);
+ RkISP1FrameInfo *info = data->frameInfo_.create(data, request, isRaw_);
if (!info)
return -ENOENT;
- ipa::rkisp1::RkISP1Event ev;
- ev.op = ipa::rkisp1::EventQueueRequest;
- ev.frame = data->frame_;
- ev.bufferId = info->paramBuffer->cookie();
- ev.controls = request->controls();
- data->ipa_->processEvent(ev);
+ data->ipa_->queueRequest(data->frame_, request->controls());
+ if (isRaw_) {
+ if (info->mainPathBuffer)
+ data->mainPath_->queueBuffer(info->mainPathBuffer);
+
+ if (data->selfPath_ && info->selfPathBuffer)
+ data->selfPath_->queueBuffer(info->selfPathBuffer);
+ } else {
+ data->ipa_->computeParams(data->frame_,
+ info->paramBuffer->cookie());
+ }
data->frame_++;
@@ -896,8 +1224,7 @@ int PipelineHandlerRkISP1::initLinks(Camera *camera,
* Configure the sensor links: enable the link corresponding to this
* camera.
*/
- const MediaPad *pad = isp_->entity()->getPadByIndex(0);
- for (MediaLink *link : pad->links()) {
+ for (MediaLink *link : ispSink_->links()) {
if (link->source()->entity() != sensor->entity())
continue;
@@ -911,10 +1238,18 @@ int PipelineHandlerRkISP1::initLinks(Camera *camera,
return ret;
}
+ if (csi_) {
+ MediaLink *link = isp_->entity()->getPadByIndex(0)->links().at(0);
+
+ ret = link->setEnabled(true);
+ if (ret < 0)
+ return ret;
+ }
+
for (const StreamConfiguration &cfg : config) {
if (cfg.stream() == &data->mainPathStream_)
ret = data->mainPath_->setEnabled(true);
- else if (cfg.stream() == &data->selfPathStream_)
+ else if (hasSelfPath_ && cfg.stream() == &data->selfPathStream_)
ret = data->selfPath_->setEnabled(true);
else
return -EINVAL;
@@ -926,37 +1261,79 @@ int PipelineHandlerRkISP1::initLinks(Camera *camera,
return 0;
}
-int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor)
+/**
+ * \brief Update the camera controls
+ * \param[in] data The camera data
+ *
+ * Compute the camera controls by calculating controls which the pipeline
+ * is reponsible for and merge them with the controls computed by the IPA.
+ *
+ * This function needs data->ipaControls_ to be refreshed when a new
+ * configuration is applied to the camera by the IPA configure() function.
+ *
+ * Always call this function after IPA configure() to make sure to have a
+ * properly refreshed IPA controls list.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int PipelineHandlerRkISP1::updateControls(RkISP1CameraData *data)
{
- int ret;
+ ControlInfoMap::Map controls;
- std::unique_ptr<RkISP1CameraData> data =
- std::make_unique<RkISP1CameraData>(this, &mainPath_, &selfPath_);
+ if (dewarper_) {
+ std::pair<Rectangle, Rectangle> cropLimits;
+ if (dewarper_->isConfigured(&data->mainPathStream_))
+ cropLimits = dewarper_->inputCropBounds(&data->mainPathStream_);
+ else
+ cropLimits = dewarper_->inputCropBounds();
+
+ /*
+ * ScalerCrop is specified to be in Sensor coordinates.
+ * So we need to transform the limits to sensor coordinates.
+ * We can safely assume that the maximum crop limit contains the
+ * full fov of the dewarper.
+ */
+ Rectangle min = cropLimits.first.transformedBetween(cropLimits.second,
+ scalerMaxCrop_);
+
+ controls[&controls::ScalerCrop] = ControlInfo(min,
+ scalerMaxCrop_,
+ scalerMaxCrop_);
+ data->properties_.set(properties::ScalerCropMaximum, scalerMaxCrop_);
+ activeCrop_ = scalerMaxCrop_;
+ }
- ControlInfoMap::Map ctrls;
- ctrls.emplace(std::piecewise_construct,
- std::forward_as_tuple(&controls::AeEnable),
- std::forward_as_tuple(false, true));
+ /* Add the IPA registered controls to list of camera controls. */
+ for (const auto &ipaControl : data->ipaControls_)
+ controls[ipaControl.first] = ipaControl.second;
- data->controlInfo_ = ControlInfoMap(std::move(ctrls),
+ data->controlInfo_ = ControlInfoMap(std::move(controls),
controls::controls);
- data->sensor_ = std::make_unique<CameraSensor>(sensor);
- ret = data->sensor_->init();
- if (ret)
- return ret;
+ return 0;
+}
+
+int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor)
+{
+ int ret;
+
+ std::unique_ptr<RkISP1CameraData> data =
+ std::make_unique<RkISP1CameraData>(this, &mainPath_,
+ hasSelfPath_ ? &selfPath_ : nullptr);
+
+ data->sensor_ = CameraSensorFactoryBase::create(sensor);
+ if (!data->sensor_)
+ return -ENODEV;
/* Initialize the camera properties. */
data->properties_ = data->sensor_->properties();
- /*
- * \todo Read dealy values from the sensor itself or from a
- * a sensor database. For now use generic values taken from
- * the Raspberry Pi and listed as generic values.
- */
+ scalerMaxCrop_ = Rectangle(data->sensor_->resolution());
+
+ const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays();
std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
- { V4L2_CID_ANALOGUE_GAIN, { 1, false } },
- { V4L2_CID_EXPOSURE, { 2, false } },
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
};
data->delayedCtrls_ =
@@ -969,6 +1346,8 @@ int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor)
if (ret)
return ret;
+ updateControls(data.get());
+
std::set<Stream *> streams{
&data->mainPathStream_,
&data->selfPathStream_,
@@ -987,9 +1366,7 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
DeviceMatch dm("rkisp1");
dm.add("rkisp1_isp");
- dm.add("rkisp1_resizer_selfpath");
dm.add("rkisp1_resizer_mainpath");
- dm.add("rkisp1_selfpath");
dm.add("rkisp1_mainpath");
dm.add("rkisp1_stats");
dm.add("rkisp1_params");
@@ -1004,11 +1381,29 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
return false;
}
+ hasSelfPath_ = !!media_->getEntityByName("rkisp1_selfpath");
+
/* Create the V4L2 subdevices we will need. */
isp_ = V4L2Subdevice::fromEntityName(media_, "rkisp1_isp");
if (isp_->open() < 0)
return false;
+ /* Locate and open the optional CSI-2 receiver. */
+ ispSink_ = isp_->entity()->getPadByIndex(0);
+ if (!ispSink_ || ispSink_->links().empty())
+ return false;
+
+ pad = ispSink_->links().at(0)->source();
+ if (pad->entity()->function() == MEDIA_ENT_F_VID_IF_BRIDGE) {
+ csi_ = std::make_unique<V4L2Subdevice>(pad->entity());
+ if (csi_->open() < 0)
+ return false;
+
+ ispSink_ = csi_->entity()->getPadByIndex(0);
+ if (!ispSink_)
+ return false;
+ }
+
/* Locate and open the stats and params video nodes. */
stat_ = V4L2VideoDevice::fromEntityName(media_, "rkisp1_stats");
if (stat_->open() < 0)
@@ -1022,24 +1417,44 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
if (!mainPath_.init(media_))
return false;
- if (!selfPath_.init(media_))
+ if (hasSelfPath_ && !selfPath_.init(media_))
return false;
- mainPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady);
- selfPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady);
- stat_->bufferReady.connect(this, &PipelineHandlerRkISP1::statReady);
- param_->bufferReady.connect(this, &PipelineHandlerRkISP1::paramReady);
+ mainPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::imageBufferReady);
+ if (hasSelfPath_)
+ selfPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::imageBufferReady);
+ stat_->bufferReady.connect(this, &PipelineHandlerRkISP1::statBufferReady);
+ param_->bufferReady.connect(this, &PipelineHandlerRkISP1::paramBufferReady);
+
+ /* If dewarper is present, create its instance. */
+ DeviceMatch dwp("dw100");
+ dwp.add("dw100-source");
+ dwp.add("dw100-sink");
+
+ std::shared_ptr<MediaDevice> dwpMediaDevice = enumerator->search(dwp);
+ if (dwpMediaDevice) {
+ dewarper_ = std::make_unique<V4L2M2MConverter>(dwpMediaDevice.get());
+ if (dewarper_->isValid()) {
+ dewarper_->outputBufferReady.connect(
+ this, &PipelineHandlerRkISP1::dewarpBufferReady);
+
+ LOG(RkISP1, Info)
+ << "Using DW100 dewarper " << dewarper_->deviceNode();
+ } else {
+ LOG(RkISP1, Warning)
+ << "Found DW100 dewarper " << dewarper_->deviceNode()
+ << " but invalid";
+
+ dewarper_.reset();
+ }
+ }
/*
* Enumerate all sensors connected to the ISP and create one
* camera instance for each of them.
*/
- pad = isp_->entity()->getPadByIndex(0);
- if (!pad)
- return false;
-
bool registered = false;
- for (MediaLink *link : pad->links()) {
+ for (MediaLink *link : ispSink_->links()) {
if (!createCamera(link->source()->entity()))
registered = true;
}
@@ -1051,12 +1466,10 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
* Buffer Handling
*/
-void PipelineHandlerRkISP1::tryCompleteRequest(Request *request)
+void PipelineHandlerRkISP1::tryCompleteRequest(RkISP1FrameInfo *info)
{
RkISP1CameraData *data = cameraData(activeCamera_);
- RkISP1FrameInfo *info = data->frameInfo_.find(request);
- if (!info)
- return;
+ Request *request = info->request;
if (request->hasPendingBuffers())
return;
@@ -1064,7 +1477,7 @@ void PipelineHandlerRkISP1::tryCompleteRequest(Request *request)
if (!info->metadataProcessed)
return;
- if (!info->paramDequeued)
+ if (!isRaw_ && !info->paramDequeued)
return;
data->frameInfo_.destroy(info->frame);
@@ -1072,24 +1485,127 @@ void PipelineHandlerRkISP1::tryCompleteRequest(Request *request)
completeRequest(request);
}
-void PipelineHandlerRkISP1::bufferReady(FrameBuffer *buffer)
+void PipelineHandlerRkISP1::imageBufferReady(FrameBuffer *buffer)
{
- Request *request = buffer->request();
+ ASSERT(activeCamera_);
+ RkISP1CameraData *data = cameraData(activeCamera_);
+
+ RkISP1FrameInfo *info = data->frameInfo_.find(buffer);
+ if (!info)
+ return;
+
+ const FrameMetadata &metadata = buffer->metadata();
+ Request *request = info->request;
+
+ if (metadata.status != FrameMetadata::FrameCancelled) {
+ /*
+ * Record the sensor's timestamp in the request metadata.
+ *
+ * \todo The sensor timestamp should be better estimated by connecting
+ * to the V4L2Device::frameStart signal.
+ */
+ request->metadata().set(controls::SensorTimestamp,
+ metadata.timestamp);
+
+ if (isRaw_) {
+ const ControlList &ctrls =
+ data->delayedCtrls_->get(metadata.sequence);
+ data->ipa_->processStats(info->frame, 0, ctrls);
+ }
+ } else {
+ if (isRaw_)
+ info->metadataProcessed = true;
+ }
+
+ if (!useDewarper_) {
+ completeBuffer(request, buffer);
+ tryCompleteRequest(info);
+
+ return;
+ }
+
+ /* Do not queue cancelled frames to dewarper. */
+ if (metadata.status == FrameMetadata::FrameCancelled) {
+ /*
+ * i.MX8MP is the only known platform with dewarper. It has
+ * no self path. Hence, only main path buffer completion is
+ * required.
+ *
+ * Also, we cannot completeBuffer(request, buffer) as buffer
+ * here, is an internal buffer (between ISP and dewarper) and
+ * is not associated to the any specific request. The request
+ * buffer associated with main path stream is the one that
+ * is required to be completed (not the internal buffer).
+ */
+ for (auto it : request->buffers()) {
+ if (it.first == &data->mainPathStream_)
+ completeBuffer(request, it.second);
+ }
+
+ tryCompleteRequest(info);
+ return;
+ }
+
+ /* Handle scaler crop control. */
+ const auto &crop = request->controls().get(controls::ScalerCrop);
+ if (crop) {
+ Rectangle rect = crop.value();
+
+ /*
+ * ScalerCrop is specified to be in Sensor coordinates.
+ * So we need to transform it into dewarper coordinates.
+ * We can safely assume that the maximum crop limit contains the
+ * full fov of the dewarper.
+ */
+ std::pair<Rectangle, Rectangle> cropLimits =
+ dewarper_->inputCropBounds(&data->mainPathStream_);
+
+ rect = rect.transformedBetween(scalerMaxCrop_, cropLimits.second);
+ int ret = dewarper_->setInputCrop(&data->mainPathStream_,
+ &rect);
+ rect = rect.transformedBetween(cropLimits.second, scalerMaxCrop_);
+ if (!ret && rect != crop.value()) {
+ /*
+ * If the rectangle is changed by setInputCrop on the
+ * dewarper, log a debug message and cache the actual
+ * applied rectangle for metadata reporting.
+ */
+ LOG(RkISP1, Debug)
+ << "Applied rectangle " << rect.toString()
+ << " differs from requested " << crop.value().toString();
+ }
+
+ activeCrop_ = rect;
+ }
/*
- * Record the sensor's timestamp in the request metadata.
- *
- * \todo The sensor timestamp should be better estimated by connecting
- * to the V4L2Device::frameStart signal.
+ * Queue input and output buffers to the dewarper. The output
+ * buffers for the dewarper are the buffers of the request, supplied
+ * by the application.
*/
- request->metadata().set(controls::SensorTimestamp,
- buffer->metadata().timestamp);
+ int ret = dewarper_->queueBuffers(buffer, request->buffers());
+ if (ret < 0)
+ LOG(RkISP1, Error) << "Cannot queue buffers to dewarper: "
+ << strerror(-ret);
+
+ request->metadata().set(controls::ScalerCrop, activeCrop_.value());
+}
+
+void PipelineHandlerRkISP1::dewarpBufferReady(FrameBuffer *buffer)
+{
+ ASSERT(activeCamera_);
+ RkISP1CameraData *data = cameraData(activeCamera_);
+ Request *request = buffer->request();
+
+ RkISP1FrameInfo *info = data->frameInfo_.find(buffer->request());
+ if (!info)
+ return;
completeBuffer(request, buffer);
- tryCompleteRequest(request);
+ tryCompleteRequest(info);
}
-void PipelineHandlerRkISP1::paramReady(FrameBuffer *buffer)
+void PipelineHandlerRkISP1::paramBufferReady(FrameBuffer *buffer)
{
ASSERT(activeCamera_);
RkISP1CameraData *data = cameraData(activeCamera_);
@@ -1099,10 +1615,10 @@ void PipelineHandlerRkISP1::paramReady(FrameBuffer *buffer)
return;
info->paramDequeued = true;
- tryCompleteRequest(info->request);
+ tryCompleteRequest(info);
}
-void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
+void PipelineHandlerRkISP1::statBufferReady(FrameBuffer *buffer)
{
ASSERT(activeCamera_);
RkISP1CameraData *data = cameraData(activeCamera_);
@@ -1113,21 +1629,17 @@ void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
info->metadataProcessed = true;
- tryCompleteRequest(info->request);
+ tryCompleteRequest(info);
return;
}
if (data->frame_ <= buffer->metadata().sequence)
data->frame_ = buffer->metadata().sequence + 1;
- ipa::rkisp1::RkISP1Event ev;
- ev.op = ipa::rkisp1::EventSignalStatBuffer;
- ev.frame = info->frame;
- ev.bufferId = info->statBuffer->cookie();
- ev.sensorControls = data->delayedCtrls_->get(buffer->metadata().sequence);
- data->ipa_->processEvent(ev);
+ data->ipa_->processStats(info->frame, info->statBuffer->cookie(),
+ data->delayedCtrls_->get(buffer->metadata().sequence));
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerRkISP1)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerRkISP1, "rkisp1")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
index f8d47120..eee5b09e 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * rkisp1path.cpp - Rockchip ISP1 path helper
+ * Rockchip ISP1 path helper
*/
#include "rkisp1_path.h"
@@ -12,6 +12,7 @@
#include <libcamera/formats.h>
#include <libcamera/stream.h>
+#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
@@ -20,11 +21,41 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(RkISP1)
-RkISP1Path::RkISP1Path(const char *name, const Span<const PixelFormat> &formats,
- const Size &minResolution, const Size &maxResolution)
- : name_(name), running_(false), formats_(formats),
- minResolution_(minResolution), maxResolution_(maxResolution),
- link_(nullptr)
+namespace {
+
+/* Keep in sync with the supported raw formats in rkisp1.cpp. */
+const std::map<PixelFormat, uint32_t> formatToMediaBus = {
+ { formats::UYVY, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YUYV, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::NV12, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::NV21, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::NV16, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::NV61, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YUV420, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::YVU420, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::YUV422, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YVU422, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::R8, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::RGB565, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::XRGB8888, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+} /* namespace */
+
+RkISP1Path::RkISP1Path(const char *name, const Span<const PixelFormat> &formats)
+ : name_(name), running_(false), formats_(formats), link_(nullptr)
{
}
@@ -41,6 +72,8 @@ bool RkISP1Path::init(MediaDevice *media)
if (video_->open() < 0)
return false;
+ populateFormats();
+
link_ = media->link("rkisp1_isp", 2, resizer, 0);
if (!link_)
return false;
@@ -48,39 +81,309 @@ bool RkISP1Path::init(MediaDevice *media)
return true;
}
-StreamConfiguration RkISP1Path::generateConfiguration(const Size &resolution)
+void RkISP1Path::populateFormats()
+{
+ V4L2VideoDevice::Formats v4l2Formats = video_->formats();
+ if (v4l2Formats.empty()) {
+ LOG(RkISP1, Info)
+ << "Failed to enumerate supported formats and sizes, using defaults";
+
+ for (const PixelFormat &format : formats_)
+ streamFormats_.insert(format);
+ return;
+ }
+
+ minResolution_ = { 65535, 65535 };
+ maxResolution_ = { 0, 0 };
+
+ std::vector<PixelFormat> formats;
+ for (const auto &[format, sizes] : v4l2Formats) {
+ const PixelFormat pixelFormat = format.toPixelFormat();
+
+ /*
+ * As a defensive measure, skip any pixel format exposed by the
+ * driver that we don't know about. This ensures that looking up
+ * formats in formatToMediaBus using a key from streamFormats_
+ * will never fail in any of the other functions.
+ */
+ if (!formatToMediaBus.count(pixelFormat)) {
+ LOG(RkISP1, Warning)
+ << "Unsupported pixel format " << pixelFormat;
+ continue;
+ }
+
+ streamFormats_.insert(pixelFormat);
+
+ for (const auto &size : sizes) {
+ if (minResolution_ > size.min)
+ minResolution_ = size.min;
+ if (maxResolution_ < size.max)
+ maxResolution_ = size.max;
+ }
+ }
+}
+
+/**
+ * \brief Filter the sensor resolutions that can be supported
+ * \param[in] sensor The camera sensor
+ *
+ * This function retrieves all the sizes supported by the sensor and
+ * filters all the resolutions that can be supported on the pipeline.
+ * It is possible that the sensor's maximum output resolution is higher
+ * than the ISP maximum input. In that case, this function filters out all
+ * the resolution incapable of being supported and returns the maximum
+ * sensor resolution that can be supported by the pipeline.
+ *
+ * \return Maximum sensor size supported on the pipeline
+ */
+Size RkISP1Path::filterSensorResolution(const CameraSensor *sensor)
{
- Size maxResolution = resolution;
- maxResolution.boundTo(maxResolution_);
+ auto iter = sensorSizesMap_.find(sensor);
+ if (iter != sensorSizesMap_.end())
+ return iter->second.back();
+
+ std::vector<Size> &sizes = sensorSizesMap_[sensor];
+ for (unsigned int code : sensor->mbusCodes()) {
+ for (const Size &size : sensor->sizes(code)) {
+ if (size.width > maxResolution_.width ||
+ size.height > maxResolution_.height)
+ continue;
+
+ sizes.push_back(size);
+ }
+ }
+
+ /* Sort in increasing order and remove duplicates. */
+ std::sort(sizes.begin(), sizes.end());
+ auto last = std::unique(sizes.begin(), sizes.end());
+ sizes.erase(last, sizes.end());
+ return sizes.back();
+}
+
+StreamConfiguration
+RkISP1Path::generateConfiguration(const CameraSensor *sensor, const Size &size,
+ StreamRole role)
+{
+ const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes();
+ Size resolution = filterSensorResolution(sensor);
+
+ /* Min and max resolutions to populate the available stream formats. */
+ Size maxResolution = maxResolution_.boundedToAspectRatio(resolution)
+ .boundedTo(resolution);
+ Size minResolution = minResolution_.expandedToAspectRatio(resolution);
+
+ /* The desired stream size, bound to the max resolution. */
+ Size streamSize = size.boundedTo(maxResolution);
+
+ /* Create the list of supported stream formats. */
std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
- for (const PixelFormat &format : formats_)
- streamFormats[format] = { { minResolution_, maxResolution } };
+ unsigned int rawBitsPerPixel = 0;
+ PixelFormat rawFormat;
+
+ for (const auto &format : streamFormats_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ /* Populate stream formats for non-RAW configurations. */
+ if (info.colourEncoding != PixelFormatInfo::ColourEncodingRAW) {
+ if (role == StreamRole::Raw)
+ continue;
+
+ streamFormats[format] = { { minResolution, maxResolution } };
+ continue;
+ }
+
+ /* Skip RAW formats for non-raw roles. */
+ if (role != StreamRole::Raw)
+ continue;
+
+ /* Populate stream formats for RAW configurations. */
+ uint32_t mbusCode = formatToMediaBus.at(format);
+ if (std::find(mbusCodes.begin(), mbusCodes.end(), mbusCode) ==
+ mbusCodes.end())
+ /* Skip formats not supported by sensor. */
+ continue;
+
+ /* Add all the RAW sizes the sensor can produce for this code. */
+ for (const auto &rawSize : sensor->sizes(mbusCode)) {
+ if (rawSize.width > maxResolution_.width ||
+ rawSize.height > maxResolution_.height)
+ continue;
+
+ streamFormats[format].push_back({ rawSize, rawSize });
+ }
+
+ /*
+ * Store the raw format with the highest bits per pixel for
+ * later usage.
+ */
+ if (info.bitsPerPixel > rawBitsPerPixel) {
+ rawBitsPerPixel = info.bitsPerPixel;
+ rawFormat = format;
+ }
+ }
+
+ /*
+ * Pick a suitable pixel format for the role. Raw streams need to use a
+ * raw format, processed streams use NV12 by default.
+ */
+ PixelFormat format;
+
+ if (role == StreamRole::Raw) {
+ if (!rawFormat.isValid()) {
+ LOG(RkISP1, Error)
+ << "Sensor " << sensor->model()
+ << " doesn't support raw capture";
+ return {};
+ }
+
+ format = rawFormat;
+ } else {
+ format = formats::NV12;
+ }
StreamFormats formats(streamFormats);
StreamConfiguration cfg(formats);
- cfg.pixelFormat = formats::NV12;
- cfg.size = maxResolution;
+ cfg.pixelFormat = format;
+ cfg.size = streamSize;
cfg.bufferCount = RKISP1_BUFFER_COUNT;
return cfg;
}
-CameraConfiguration::Status RkISP1Path::validate(StreamConfiguration *cfg)
+CameraConfiguration::Status
+RkISP1Path::validate(const CameraSensor *sensor,
+ const std::optional<SensorConfiguration> &sensorConfig,
+ StreamConfiguration *cfg)
{
+ const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes();
+ Size resolution = filterSensorResolution(sensor);
+
const StreamConfiguration reqCfg = *cfg;
CameraConfiguration::Status status = CameraConfiguration::Valid;
- if (std::find(formats_.begin(), formats_.end(), cfg->pixelFormat) ==
- formats_.end())
- cfg->pixelFormat = formats::NV12;
+ /*
+ * Validate the pixel format. If the requested format isn't supported,
+ * default to either NV12 (all versions of the ISP are guaranteed to
+ * support NV12 on both the main and self paths) if the requested format
+ * is not a raw format, or to the supported raw format with the highest
+ * bits per pixel otherwise.
+ */
+ unsigned int rawBitsPerPixel = 0;
+ PixelFormat rawFormat;
+ bool found = false;
+
+ for (const auto &format : streamFormats_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
+ /* Skip raw formats not supported by the sensor. */
+ uint32_t mbusCode = formatToMediaBus.at(format);
+ if (std::find(mbusCodes.begin(), mbusCodes.end(), mbusCode) ==
+ mbusCodes.end())
+ continue;
+
+ /*
+ * If the bits per pixel is supplied from the sensor
+ * configuration, choose a raw format that complies with
+ * it. Otherwise, store the raw format with the highest
+ * bits per pixel for later usage.
+ */
+ if (sensorConfig && info.bitsPerPixel != sensorConfig->bitDepth)
+ continue;
+
+ if (info.bitsPerPixel > rawBitsPerPixel) {
+ rawBitsPerPixel = info.bitsPerPixel;
+ rawFormat = format;
+ }
+ }
+
+ if (cfg->pixelFormat == format) {
+ found = true;
+ break;
+ }
+ }
+
+ if (sensorConfig && !rawFormat.isValid())
+ return CameraConfiguration::Invalid;
+
+ bool isRaw = PixelFormatInfo::info(cfg->pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW;
+
+ /*
+ * If no raw format supported by the sensor has been found, use a
+ * processed format.
+ */
+ if (!rawFormat.isValid())
+ isRaw = false;
+
+ if (!found)
+ cfg->pixelFormat = isRaw ? rawFormat : formats::NV12;
+
+ Size minResolution;
+ Size maxResolution;
+
+ if (isRaw) {
+ /*
+ * Use the sensor output size closest to the requested stream
+ * size while ensuring the output size doesn't exceed ISP limits.
+ *
+ * As 'resolution' is the largest sensor resolution
+ * supported by the ISP, CameraSensor::getFormat() will never
+ * return a V4L2SubdeviceFormat with a larger size.
+ */
+ uint32_t mbusCode = formatToMediaBus.at(cfg->pixelFormat);
+ cfg->size.boundTo(resolution);
+
+ Size rawSize = sensorConfig ? sensorConfig->outputSize
+ : cfg->size;
+
+ V4L2SubdeviceFormat sensorFormat =
+ sensor->getFormat({ mbusCode }, rawSize);
+
+ if (sensorConfig &&
+ sensorConfig->outputSize != sensorFormat.size)
+ return CameraConfiguration::Invalid;
+
+ minResolution = sensorFormat.size;
+ maxResolution = sensorFormat.size;
+ } else if (sensorConfig) {
+ /*
+ * We have already ensured 'rawFormat' has the matching bit
+ * depth with sensorConfig.bitDepth hence, only validate the
+ * sensorConfig's output size here.
+ */
+ Size sensorSize = sensorConfig->outputSize;
+
+ if (sensorSize > resolution)
+ return CameraConfiguration::Invalid;
+
+ uint32_t mbusCode = formatToMediaBus.at(rawFormat);
+ V4L2SubdeviceFormat sensorFormat =
+ sensor->getFormat({ mbusCode }, sensorSize);
+
+ if (sensorFormat.size != sensorSize)
+ return CameraConfiguration::Invalid;
+
+ minResolution = minResolution_.expandedToAspectRatio(sensorSize);
+ maxResolution = maxResolution_.boundedTo(sensorSize)
+ .boundedToAspectRatio(sensorSize);
+ } else {
+ /*
+ * Adjust the size based on the sensor resolution and absolute
+ * limits of the ISP.
+ */
+ minResolution = minResolution_.expandedToAspectRatio(resolution);
+ maxResolution = maxResolution_.boundedToAspectRatio(resolution)
+ .boundedTo(resolution);
+ }
- cfg->size.boundTo(maxResolution_);
- cfg->size.expandTo(minResolution_);
+ cfg->size.boundTo(maxResolution);
+ cfg->size.expandTo(minResolution);
cfg->bufferCount = RKISP1_BUFFER_COUNT;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg->pixelFormat);
+ format.fourcc = video_->toV4L2PixelFormat(cfg->pixelFormat);
format.size = cfg->size;
int ret = video_->tryFormat(&format);
@@ -111,30 +414,40 @@ int RkISP1Path::configure(const StreamConfiguration &config,
if (ret < 0)
return ret;
- Rectangle rect(0, 0, ispFormat.size);
+ /*
+ * Crop on the resizer input to maintain FOV before downscaling.
+ *
+ * Note that this does not apply to devices without DUAL_CROP support
+ * (like imx8mp) , where the cropping needs to be done on the
+ * ImageStabilizer block on the ISP source pad and therefore is
+ * configured before this stage. For simplicity we still set the crop.
+ * This gets ignored by the kernel driver because the hardware is
+ * missing the capability.
+ *
+ * Alignment to a multiple of 2 pixels is required by the resizer.
+ */
+ Size ispCrop = inputFormat.size.boundedToAspectRatio(config.size)
+ .alignedUpTo(2, 2);
+ Rectangle rect = ispCrop.centeredTo(Rectangle(inputFormat.size).center());
ret = resizer_->setSelection(0, V4L2_SEL_TGT_CROP, &rect);
if (ret < 0)
return ret;
LOG(RkISP1, Debug)
<< "Configured " << name_ << " resizer input pad with "
- << ispFormat.toString() << " crop " << rect.toString();
+ << ispFormat << " crop " << rect;
ispFormat.size = config.size;
LOG(RkISP1, Debug)
<< "Configuring " << name_ << " resizer output pad with "
- << ispFormat.toString();
-
- switch (config.pixelFormat) {
- case formats::NV12:
- case formats::NV21:
- ispFormat.mbus_code = MEDIA_BUS_FMT_YUYV8_1_5X8;
- break;
- default:
- ispFormat.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
- break;
- }
+ << ispFormat;
+
+ /*
+ * The configuration has been validated, the pixel format is guaranteed
+ * to be supported and thus found in formatToMediaBus.
+ */
+ ispFormat.code = formatToMediaBus.at(config.pixelFormat);
ret = resizer_->setFormat(1, &ispFormat);
if (ret < 0)
@@ -142,11 +455,11 @@ int RkISP1Path::configure(const StreamConfiguration &config,
LOG(RkISP1, Debug)
<< "Configured " << name_ << " resizer output pad with "
- << ispFormat.toString();
+ << ispFormat;
const PixelFormatInfo &info = PixelFormatInfo::info(config.pixelFormat);
V4L2DeviceFormat outputFormat;
- outputFormat.fourcc = V4L2PixelFormat::fromPixelFormat(config.pixelFormat);
+ outputFormat.fourcc = video_->toV4L2PixelFormat(config.pixelFormat);
outputFormat.size = config.size;
outputFormat.planesCount = info.numPlanes();
@@ -155,7 +468,7 @@ int RkISP1Path::configure(const StreamConfiguration &config,
return ret;
if (outputFormat.size != config.size ||
- outputFormat.fourcc != V4L2PixelFormat::fromPixelFormat(config.pixelFormat)) {
+ outputFormat.fourcc != video_->toV4L2PixelFormat(config.pixelFormat)) {
LOG(RkISP1, Error)
<< "Unable to configure capture in " << config.toString();
return -EINVAL;
@@ -203,21 +516,32 @@ void RkISP1Path::stop()
running_ = false;
}
+/*
+ * \todo Remove the hardcoded formats once all users will have migrated to a
+ * recent enough kernel.
+ */
namespace {
-constexpr Size RKISP1_RSZ_MP_SRC_MIN{ 32, 16 };
-constexpr Size RKISP1_RSZ_MP_SRC_MAX{ 4416, 3312 };
-constexpr std::array<PixelFormat, 6> RKISP1_RSZ_MP_FORMATS{
+constexpr std::array<PixelFormat, 18> RKISP1_RSZ_MP_FORMATS{
formats::YUYV,
formats::NV16,
formats::NV61,
formats::NV21,
formats::NV12,
formats::R8,
- /* \todo Add support for RAW formats. */
+ formats::SBGGR8,
+ formats::SGBRG8,
+ formats::SGRBG8,
+ formats::SRGGB8,
+ formats::SBGGR10,
+ formats::SGBRG10,
+ formats::SGRBG10,
+ formats::SRGGB10,
+ formats::SBGGR12,
+ formats::SGBRG12,
+ formats::SGRBG12,
+ formats::SRGGB12,
};
-constexpr Size RKISP1_RSZ_SP_SRC_MIN{ 32, 16 };
-constexpr Size RKISP1_RSZ_SP_SRC_MAX{ 1920, 1920 };
constexpr std::array<PixelFormat, 8> RKISP1_RSZ_SP_FORMATS{
formats::YUYV,
formats::NV16,
@@ -231,14 +555,12 @@ constexpr std::array<PixelFormat, 8> RKISP1_RSZ_SP_FORMATS{
} /* namespace */
RkISP1MainPath::RkISP1MainPath()
- : RkISP1Path("main", RKISP1_RSZ_MP_FORMATS,
- RKISP1_RSZ_MP_SRC_MIN, RKISP1_RSZ_MP_SRC_MAX)
+ : RkISP1Path("main", RKISP1_RSZ_MP_FORMATS)
{
}
RkISP1SelfPath::RkISP1SelfPath()
- : RkISP1Path("self", RKISP1_RSZ_SP_FORMATS,
- RKISP1_RSZ_SP_SRC_MIN, RKISP1_RSZ_SP_SRC_MAX)
+ : RkISP1Path("self", RKISP1_RSZ_SP_FORMATS)
{
}
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
index f3f1ae39..2a1ef0ab 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
@@ -2,12 +2,14 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * rkisp1path.h - Rockchip ISP1 path helper
+ * Rockchip ISP1 path helper
*/
#pragma once
+#include <map>
#include <memory>
+#include <set>
#include <vector>
#include <libcamera/base/signal.h>
@@ -22,7 +24,9 @@
namespace libcamera {
+class CameraSensor;
class MediaDevice;
+class SensorConfiguration;
class V4L2Subdevice;
struct StreamConfiguration;
struct V4L2SubdeviceFormat;
@@ -30,16 +34,19 @@ struct V4L2SubdeviceFormat;
class RkISP1Path
{
public:
- RkISP1Path(const char *name, const Span<const PixelFormat> &formats,
- const Size &minResolution, const Size &maxResolution);
+ RkISP1Path(const char *name, const Span<const PixelFormat> &formats);
bool init(MediaDevice *media);
int setEnabled(bool enable) { return link_->setEnabled(enable); }
bool isEnabled() const { return link_->flags() & MEDIA_LNK_FL_ENABLED; }
- StreamConfiguration generateConfiguration(const Size &resolution);
- CameraConfiguration::Status validate(StreamConfiguration *cfg);
+ StreamConfiguration generateConfiguration(const CameraSensor *sensor,
+ const Size &resolution,
+ StreamRole role);
+ CameraConfiguration::Status validate(const CameraSensor *sensor,
+ const std::optional<SensorConfiguration> &sensorConfig,
+ StreamConfiguration *cfg);
int configure(const StreamConfiguration &config,
const V4L2SubdeviceFormat &inputFormat);
@@ -55,20 +62,31 @@ public:
int queueBuffer(FrameBuffer *buffer) { return video_->queueBuffer(buffer); }
Signal<FrameBuffer *> &bufferReady() { return video_->bufferReady; }
+ const Size &maxResolution() const { return maxResolution_; }
private:
+ void populateFormats();
+ Size filterSensorResolution(const CameraSensor *sensor);
+
static constexpr unsigned int RKISP1_BUFFER_COUNT = 4;
const char *name_;
bool running_;
const Span<const PixelFormat> formats_;
- const Size minResolution_;
- const Size maxResolution_;
+ std::set<PixelFormat> streamFormats_;
+ Size minResolution_;
+ Size maxResolution_;
std::unique_ptr<V4L2Subdevice> resizer_;
std::unique_ptr<V4L2VideoDevice> video_;
MediaLink *link_;
+
+ /*
+ * Map from camera sensors to the sizes (in increasing order),
+ * which are guaranteed to be supported by the pipeline.
+ */
+ std::map<const CameraSensor *, std::vector<Size>> sensorSizesMap_;
};
class RkISP1MainPath : public RkISP1Path
diff --git a/src/libcamera/pipeline/rpi/common/delayed_controls.cpp b/src/libcamera/pipeline/rpi/common/delayed_controls.cpp
new file mode 100644
index 00000000..ad50a7c8
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/delayed_controls.cpp
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper to deal with controls that take effect with a delay
+ *
+ * Note: This has been forked from the libcamera core implementation.
+ */
+
+#include "delayed_controls.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/v4l2_device.h"
+
+/**
+ * \file delayed_controls.h
+ * \brief Helper to deal with controls that take effect with a delay
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(RPiDelayedControls)
+
+namespace RPi {
+
+/**
+ * \class DelayedControls
+ * \brief Helper to deal with controls that take effect with a delay
+ *
+ * Some sensor controls take effect with a delay as the sensor needs time to
+ * adjust, for example exposure and analog gain. This is a helper class to deal
+ * with such controls and the intended users are pipeline handlers.
+ *
+ * The idea is to extend the concept of the buffer depth of a pipeline the
+ * application needs to maintain to also cover controls. Just as with buffer
+ * depth if the application keeps the number of requests queued above the
+ * control depth the controls are guaranteed to take effect for the correct
+ * request. The control depth is determined by the control with the greatest
+ * delay.
+ */
+
+/**
+ * \struct DelayedControls::ControlParams
+ * \brief Parameters associated with controls handled by the \a DelayedControls
+ * helper class
+ *
+ * \var ControlParams::delay
+ * \brief Frame delay from setting the control on a sensor device to when it is
+ * consumed during framing.
+ *
+ * \var ControlParams::priorityWrite
+ * \brief Flag to indicate that this control must be applied ahead of, and
+ * separately from the other controls.
+ *
+ * Typically set for the \a V4L2_CID_VBLANK control so that the device driver
+ * does not reject \a V4L2_CID_EXPOSURE control values that may be outside of
+ * the existing vertical blanking specified bounds, but are within the new
+ * blanking bounds.
+ */
+
+/**
+ * \brief Construct a DelayedControls instance
+ * \param[in] device The V4L2 device the controls have to be applied to
+ * \param[in] controlParams Map of the numerical V4L2 control ids to their
+ * associated control parameters.
+ *
+ * The control parameters comprise of delays (in frames) and a priority write
+ * flag. If this flag is set, the relevant control is written separately from,
+ * and ahead of the rest of the batched controls.
+ *
+ * Only controls specified in \a controlParams are handled. If it's desired to
+ * mix delayed controls and controls that take effect immediately the immediate
+ * controls must be listed in the \a controlParams map with a delay value of 0.
+ */
+DelayedControls::DelayedControls(V4L2Device *device,
+ const std::unordered_map<uint32_t, ControlParams> &controlParams)
+ : device_(device), maxDelay_(0)
+{
+ const ControlInfoMap &controls = device_->controls();
+
+ /*
+ * Create a map of control ids to delays for controls exposed by the
+ * device.
+ */
+ for (auto const &param : controlParams) {
+ auto it = controls.find(param.first);
+ if (it == controls.end()) {
+ LOG(RPiDelayedControls, Error)
+ << "Delay request for control id "
+ << utils::hex(param.first)
+ << " but control is not exposed by device "
+ << device_->deviceNode();
+ continue;
+ }
+
+ const ControlId *id = it->first;
+
+ controlParams_[id] = param.second;
+
+ LOG(RPiDelayedControls, Debug)
+ << "Set a delay of " << controlParams_[id].delay
+ << " and priority write flag " << controlParams_[id].priorityWrite
+ << " for " << id->name();
+
+ maxDelay_ = std::max(maxDelay_, controlParams_[id].delay);
+ }
+
+ reset(0);
+}
+
+/**
+ * \brief Reset state machine
+ *
+ * Resets the state machine to a starting position based on control values
+ * retrieved from the device.
+ */
+void DelayedControls::reset(unsigned int cookie)
+{
+ queueCount_ = 1;
+ writeCount_ = 0;
+ cookies_[0] = cookie;
+
+ /* Retrieve control as reported by the device. */
+ std::vector<uint32_t> ids;
+ for (auto const &param : controlParams_)
+ ids.push_back(param.first->id());
+
+ ControlList controls = device_->getControls(ids);
+
+ /* Seed the control queue with the controls reported by the device. */
+ values_.clear();
+ for (const auto &ctrl : controls) {
+ const ControlId *id = device_->controls().idmap().at(ctrl.first);
+ /*
+ * Do not mark this control value as updated, it does not need
+ * to be written to to device on startup.
+ */
+ values_[id][0] = Info(ctrl.second, false);
+ }
+}
+
+/**
+ * \brief Push a set of controls on the queue
+ * \param[in] controls List of controls to add to the device queue
+ *
+ * Push a set of controls to the control queue. This increases the control queue
+ * depth by one.
+ *
+ * \returns true if \a controls are accepted, or false otherwise
+ */
+bool DelayedControls::push(const ControlList &controls, const unsigned int cookie)
+{
+ /* Copy state from previous frame. */
+ for (auto &ctrl : values_) {
+ Info &info = ctrl.second[queueCount_];
+ info = values_[ctrl.first][queueCount_ - 1];
+ info.updated = false;
+ }
+
+ /* Update with new controls. */
+ const ControlIdMap &idmap = device_->controls().idmap();
+ for (const auto &control : controls) {
+ const auto &it = idmap.find(control.first);
+ if (it == idmap.end()) {
+ LOG(RPiDelayedControls, Warning)
+ << "Unknown control " << control.first;
+ return false;
+ }
+
+ const ControlId *id = it->second;
+
+ if (controlParams_.find(id) == controlParams_.end())
+ return false;
+
+ Info &info = values_[id][queueCount_];
+
+ info = Info(control.second);
+
+ LOG(RPiDelayedControls, Debug)
+ << "Queuing " << id->name()
+ << " to " << info.toString()
+ << " at index " << queueCount_;
+ }
+
+ cookies_[queueCount_] = cookie;
+ queueCount_++;
+
+ return true;
+}
+
+/**
+ * \brief Read back controls in effect at a sequence number
+ * \param[in] sequence The sequence number to get controls for
+ *
+ * Read back what controls where in effect at a specific sequence number. The
+ * history is a ring buffer of 16 entries where new and old values coexist. It's
+ * the callers responsibility to not read too old sequence numbers that have been
+ * pushed out of the history.
+ *
+ * Historic values are evicted by pushing new values onto the queue using
+ * push(). The max history from the current sequence number that yields valid
+ * values are thus 16 minus number of controls pushed.
+ *
+ * \return The controls at \a sequence number
+ */
+std::pair<ControlList, unsigned int> DelayedControls::get(uint32_t sequence)
+{
+ unsigned int index = std::max<int>(0, sequence - maxDelay_);
+
+ ControlList out(device_->controls());
+ for (const auto &ctrl : values_) {
+ const ControlId *id = ctrl.first;
+ const Info &info = ctrl.second[index];
+
+ out.set(id->id(), info);
+
+ LOG(RPiDelayedControls, Debug)
+ << "Reading " << id->name()
+ << " to " << info.toString()
+ << " at index " << index;
+ }
+
+ return { out, cookies_[index] };
+}
+
+/**
+ * \brief Inform DelayedControls of the start of a new frame
+ * \param[in] sequence Sequence number of the frame that started
+ *
+ * Inform the state machine that a new frame has started and of its sequence
+ * number. Any user of these helpers is responsible to inform the helper about
+ * the start of any frame. This can be connected with ease to the start of a
+ * exposure (SOE) V4L2 event.
+ */
+void DelayedControls::applyControls(uint32_t sequence)
+{
+ LOG(RPiDelayedControls, Debug) << "frame " << sequence << " started";
+
+ /*
+ * Create control list peeking ahead in the value queue to ensure
+ * values are set in time to satisfy the sensor delay.
+ */
+ ControlList out(device_->controls());
+ for (auto &ctrl : values_) {
+ const ControlId *id = ctrl.first;
+ unsigned int delayDiff = maxDelay_ - controlParams_[id].delay;
+ unsigned int index = std::max<int>(0, writeCount_ - delayDiff);
+ Info &info = ctrl.second[index];
+
+ if (info.updated) {
+ if (controlParams_[id].priorityWrite) {
+ /*
+ * This control must be written now, it could
+ * affect validity of the other controls.
+ */
+ ControlList priority(device_->controls());
+ priority.set(id->id(), info);
+ device_->setControls(&priority);
+ } else {
+ /*
+ * Batch up the list of controls and write them
+ * at the end of the function.
+ */
+ out.set(id->id(), info);
+ }
+
+ LOG(RPiDelayedControls, Debug)
+ << "Setting " << id->name()
+ << " to " << info.toString()
+ << " at index " << index;
+
+ /* Done with this update, so mark as completed. */
+ info.updated = false;
+ }
+ }
+
+ writeCount_ = sequence + 1;
+
+ while (writeCount_ > queueCount_) {
+ LOG(RPiDelayedControls, Debug)
+ << "Queue is empty, auto queue no-op.";
+ push({}, cookies_[queueCount_ - 1]);
+ }
+
+ device_->setControls(&out);
+}
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/delayed_controls.h b/src/libcamera/pipeline/rpi/common/delayed_controls.h
new file mode 100644
index 00000000..487b0057
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/delayed_controls.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper to deal with controls that take effect with a delay
+ *
+ * Note: This has been forked from the libcamera core implementation.
+ */
+
+#pragma once
+
+#include <stdint.h>
+#include <unordered_map>
+#include <utility>
+
+#include <libcamera/controls.h>
+
+namespace libcamera {
+
+class V4L2Device;
+
+namespace RPi {
+
+class DelayedControls
+{
+public:
+ struct ControlParams {
+ unsigned int delay;
+ bool priorityWrite;
+ };
+
+ DelayedControls(V4L2Device *device,
+ const std::unordered_map<uint32_t, ControlParams> &controlParams);
+
+ void reset(unsigned int cookie);
+
+ bool push(const ControlList &controls, unsigned int cookie);
+ std::pair<ControlList, unsigned int> get(uint32_t sequence);
+
+ void applyControls(uint32_t sequence);
+
+private:
+ class Info : public ControlValue
+ {
+ public:
+ Info()
+ : updated(false)
+ {
+ }
+
+ Info(const ControlValue &v, bool updated_ = true)
+ : ControlValue(v), updated(updated_)
+ {
+ }
+
+ bool updated;
+ };
+
+ static constexpr int listSize = 16;
+ template<typename T>
+ class RingBuffer : public std::array<T, listSize>
+ {
+ public:
+ T &operator[](unsigned int index)
+ {
+ return std::array<T, listSize>::operator[](index % listSize);
+ }
+
+ const T &operator[](unsigned int index) const
+ {
+ return std::array<T, listSize>::operator[](index % listSize);
+ }
+ };
+
+ V4L2Device *device_;
+ std::unordered_map<const ControlId *, ControlParams> controlParams_;
+ unsigned int maxDelay_;
+
+ uint32_t queueCount_;
+ uint32_t writeCount_;
+ std::unordered_map<const ControlId *, RingBuffer<Info>> values_;
+ RingBuffer<unsigned int> cookies_;
+};
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/meson.build b/src/libcamera/pipeline/rpi/common/meson.build
new file mode 100644
index 00000000..b2b1a0a6
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'delayed_controls.cpp',
+ 'pipeline_base.cpp',
+ 'rpi_stream.cpp',
+])
diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.cpp b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
new file mode 100644
index 00000000..4b147fdb
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
@@ -0,0 +1,1528 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler base class for Raspberry Pi devices
+ */
+
+#include "pipeline_base.h"
+
+#include <chrono>
+
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/logging.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+using namespace RPi;
+
+LOG_DEFINE_CATEGORY(RPI)
+
+using StreamFlag = RPi::Stream::StreamFlag;
+
+namespace {
+
+constexpr unsigned int defaultRawBitDepth = 12;
+
+PixelFormat mbusCodeToPixelFormat(unsigned int code,
+ BayerFormat::Packing packingReq)
+{
+ BayerFormat bayer = BayerFormat::fromMbusCode(code);
+
+ ASSERT(bayer.isValid());
+
+ bayer.packing = packingReq;
+ PixelFormat pix = bayer.toPixelFormat();
+
+ /*
+ * Not all formats (e.g. 8-bit or 16-bit Bayer formats) can have packed
+ * variants. So if the PixelFormat returns as invalid, use the non-packed
+ * conversion instead.
+ */
+ if (!pix.isValid()) {
+ bayer.packing = BayerFormat::Packing::None;
+ pix = bayer.toPixelFormat();
+ }
+
+ return pix;
+}
+
+bool isMonoSensor(std::unique_ptr<CameraSensor> &sensor)
+{
+ unsigned int mbusCode = sensor->mbusCodes()[0];
+ const BayerFormat &bayer = BayerFormat::fromMbusCode(mbusCode);
+
+ return bayer.order == BayerFormat::Order::MONO;
+}
+
+const std::vector<ColorSpace> validColorSpaces = {
+ ColorSpace::Sycc,
+ ColorSpace::Smpte170m,
+ ColorSpace::Rec709
+};
+
+std::optional<ColorSpace> findValidColorSpace(const ColorSpace &colourSpace)
+{
+ for (auto cs : validColorSpaces) {
+ if (colourSpace.primaries == cs.primaries &&
+ colourSpace.transferFunction == cs.transferFunction)
+ return cs;
+ }
+
+ return std::nullopt;
+}
+
+} /* namespace */
+
+/*
+ * Raspberry Pi drivers expect the following colour spaces:
+ * - V4L2_COLORSPACE_RAW for raw streams.
+ * - One of V4L2_COLORSPACE_JPEG, V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_REC709 for
+ * non-raw streams. Other fields such as transfer function, YCbCr encoding and
+ * quantisation are not used.
+ *
+ * The libcamera colour spaces that we wish to use corresponding to these are therefore:
+ * - ColorSpace::Raw for V4L2_COLORSPACE_RAW
+ * - ColorSpace::Sycc for V4L2_COLORSPACE_JPEG
+ * - ColorSpace::Smpte170m for V4L2_COLORSPACE_SMPTE170M
+ * - ColorSpace::Rec709 for V4L2_COLORSPACE_REC709
+ */
+CameraConfiguration::Status RPiCameraConfiguration::validateColorSpaces([[maybe_unused]] ColorSpaceFlags flags)
+{
+ Status status = Valid;
+ yuvColorSpace_.reset();
+
+ for (auto &cfg : config_) {
+ /* First fix up raw streams to have the "raw" colour space. */
+ if (PipelineHandlerBase::isRaw(cfg.pixelFormat)) {
+ /* If there was no value here, that doesn't count as "adjusted". */
+ if (cfg.colorSpace && cfg.colorSpace != ColorSpace::Raw)
+ status = Adjusted;
+ cfg.colorSpace = ColorSpace::Raw;
+ continue;
+ }
+
+ /* Next we need to find our shared colour space. The first valid one will do. */
+ if (cfg.colorSpace && !yuvColorSpace_)
+ yuvColorSpace_ = findValidColorSpace(cfg.colorSpace.value());
+ }
+
+ /* If no colour space was given anywhere, choose sYCC. */
+ if (!yuvColorSpace_)
+ yuvColorSpace_ = ColorSpace::Sycc;
+
+ /* Note the version of this that any RGB streams will have to use. */
+ rgbColorSpace_ = yuvColorSpace_;
+ rgbColorSpace_->ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
+ rgbColorSpace_->range = ColorSpace::Range::Full;
+
+ /* Go through the streams again and force everyone to the same colour space. */
+ for (auto &cfg : config_) {
+ if (cfg.colorSpace == ColorSpace::Raw)
+ continue;
+
+ if (PipelineHandlerBase::isYuv(cfg.pixelFormat) && cfg.colorSpace != yuvColorSpace_) {
+ /* Again, no value means "not adjusted". */
+ if (cfg.colorSpace)
+ status = Adjusted;
+ cfg.colorSpace = yuvColorSpace_;
+ }
+ if (PipelineHandlerBase::isRgb(cfg.pixelFormat) && cfg.colorSpace != rgbColorSpace_) {
+ /* Be nice, and let the YUV version count as non-adjusted too. */
+ if (cfg.colorSpace && cfg.colorSpace != yuvColorSpace_)
+ status = Adjusted;
+ cfg.colorSpace = rgbColorSpace_;
+ }
+ }
+
+ return status;
+}
+
+CameraConfiguration::Status RPiCameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ /*
+ * Make sure that if a sensor configuration has been requested it
+ * is valid.
+ */
+ if (sensorConfig && !sensorConfig->isValid()) {
+ LOG(RPI, Error) << "Invalid sensor configuration request";
+ return Invalid;
+ }
+
+ status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
+
+ /*
+ * Validate the requested transform against the sensor capabilities and
+ * rotation and store the final combined transform that configure() will
+ * need to apply to the sensor to save us working it out again.
+ */
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->sensor_->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
+ status = Adjusted;
+
+ rawStreams_.clear();
+ outStreams_.clear();
+ unsigned int rawStreamIndex = 0;
+ unsigned int outStreamIndex = 0;
+
+ for (auto &cfg : config_) {
+ if (PipelineHandlerBase::isRaw(cfg.pixelFormat))
+ rawStreams_.emplace_back(rawStreamIndex++, &cfg);
+ else
+ outStreams_.emplace_back(outStreamIndex++, &cfg);
+ }
+
+ /* Sort the streams so the highest resolution is first. */
+ std::sort(rawStreams_.begin(), rawStreams_.end(),
+ [](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
+
+ std::sort(outStreams_.begin(), outStreams_.end(),
+ [](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
+
+ /* Compute the sensor's format then do any platform specific fixups. */
+ unsigned int bitDepth;
+ Size sensorSize;
+
+ if (sensorConfig) {
+ /* Use the application provided sensor configuration. */
+ bitDepth = sensorConfig->bitDepth;
+ sensorSize = sensorConfig->outputSize;
+ } else if (!rawStreams_.empty()) {
+ /* Use the RAW stream format and size. */
+ BayerFormat bayerFormat = BayerFormat::fromPixelFormat(rawStreams_[0].cfg->pixelFormat);
+ bitDepth = bayerFormat.bitDepth;
+ sensorSize = rawStreams_[0].cfg->size;
+ } else {
+ bitDepth = defaultRawBitDepth;
+ sensorSize = outStreams_[0].cfg->size;
+ }
+
+ sensorFormat_ = data_->findBestFormat(sensorSize, bitDepth);
+
+ /*
+ * If a sensor configuration has been requested, it should apply
+ * without modifications.
+ */
+ if (sensorConfig) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(sensorFormat_.code);
+
+ if (bayer.bitDepth != sensorConfig->bitDepth ||
+ sensorFormat_.size != sensorConfig->outputSize) {
+ LOG(RPI, Error) << "Invalid sensor configuration: "
+ << "bitDepth/size mismatch";
+ return Invalid;
+ }
+ }
+
+ /* Start with some initial generic RAW stream adjustments. */
+ for (auto &raw : rawStreams_) {
+ StreamConfiguration *rawStream = raw.cfg;
+
+ /*
+ * Some sensors change their Bayer order when they are
+ * h-flipped or v-flipped, according to the transform. Adjust
+ * the RAW stream to match the computed sensor format by
+ * applying the sensor Bayer order resulting from the transform
+ * to the user request.
+ */
+
+ BayerFormat cfgBayer = BayerFormat::fromPixelFormat(rawStream->pixelFormat);
+ cfgBayer.order = data_->sensor_->bayerOrder(combinedTransform_);
+
+ if (rawStream->pixelFormat != cfgBayer.toPixelFormat()) {
+ rawStream->pixelFormat = cfgBayer.toPixelFormat();
+ status = Adjusted;
+ }
+ }
+
+ /* Do any platform specific fixups. */
+ Status st = data_->platformValidate(this);
+ if (st == Invalid)
+ return Invalid;
+ else if (st == Adjusted)
+ status = Adjusted;
+
+ /* Further fixups on the RAW streams. */
+ for (auto &raw : rawStreams_) {
+ int ret = raw.dev->tryFormat(&raw.format);
+ if (ret)
+ return Invalid;
+
+ if (RPi::PipelineHandlerBase::updateStreamConfig(raw.cfg, raw.format))
+ status = Adjusted;
+ }
+
+ /* Further fixups on the ISP output streams. */
+ for (auto &out : outStreams_) {
+
+ /*
+ * We want to send the associated YCbCr info through to the driver.
+ *
+ * But for RGB streams, the YCbCr info gets overwritten on the way back
+ * so we must check against what the stream cfg says, not what we actually
+ * requested (which carefully included the YCbCr info)!
+ */
+ out.format.colorSpace = yuvColorSpace_;
+
+ LOG(RPI, Debug)
+ << "Try color space " << ColorSpace::toString(out.cfg->colorSpace);
+
+ int ret = out.dev->tryFormat(&out.format);
+ if (ret)
+ return Invalid;
+
+ if (RPi::PipelineHandlerBase::updateStreamConfig(out.cfg, out.format))
+ status = Adjusted;
+ }
+
+ return status;
+}
+
+bool PipelineHandlerBase::isRgb(const PixelFormat &pixFmt)
+{
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ return info.colourEncoding == PixelFormatInfo::ColourEncodingRGB;
+}
+
+bool PipelineHandlerBase::isYuv(const PixelFormat &pixFmt)
+{
+ /* The code below would return true for raw mono streams, so weed those out first. */
+ if (PipelineHandlerBase::isRaw(pixFmt))
+ return false;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ return info.colourEncoding == PixelFormatInfo::ColourEncodingYUV;
+}
+
+bool PipelineHandlerBase::isRaw(const PixelFormat &pixFmt)
+{
+ /* This test works for both Bayer and raw mono formats. */
+ return BayerFormat::fromPixelFormat(pixFmt).isValid();
+}
+
+/*
+ * Adjust a StreamConfiguration fields to match a video device format.
+ * Returns true if the StreamConfiguration has been adjusted.
+ */
+bool PipelineHandlerBase::updateStreamConfig(StreamConfiguration *stream,
+ const V4L2DeviceFormat &format)
+{
+ const PixelFormat &pixFormat = format.fourcc.toPixelFormat();
+ bool adjusted = false;
+
+ if (stream->pixelFormat != pixFormat || stream->size != format.size) {
+ stream->pixelFormat = pixFormat;
+ stream->size = format.size;
+ adjusted = true;
+ }
+
+ if (stream->colorSpace != format.colorSpace) {
+ stream->colorSpace = format.colorSpace;
+ adjusted = true;
+ LOG(RPI, Debug)
+ << "Color space changed from "
+ << ColorSpace::toString(stream->colorSpace) << " to "
+ << ColorSpace::toString(format.colorSpace);
+ }
+
+ stream->stride = format.planes[0].bpl;
+ stream->frameSize = format.planes[0].size;
+
+ return adjusted;
+}
+
+/*
+ * Populate and return a video device format using a StreamConfiguration. */
+V4L2DeviceFormat PipelineHandlerBase::toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const StreamConfiguration *stream)
+{
+ V4L2DeviceFormat deviceFormat;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(stream->pixelFormat);
+ deviceFormat.planesCount = info.numPlanes();
+ deviceFormat.fourcc = dev->toV4L2PixelFormat(stream->pixelFormat);
+ deviceFormat.size = stream->size;
+ deviceFormat.planes[0].bpl = stream->stride;
+ deviceFormat.colorSpace = stream->colorSpace;
+
+ return deviceFormat;
+}
+
+V4L2DeviceFormat PipelineHandlerBase::toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const V4L2SubdeviceFormat &format,
+ BayerFormat::Packing packingReq)
+{
+ unsigned int code = format.code;
+ const PixelFormat pix = mbusCodeToPixelFormat(code, packingReq);
+ V4L2DeviceFormat deviceFormat;
+
+ deviceFormat.fourcc = dev->toV4L2PixelFormat(pix);
+ deviceFormat.size = format.size;
+ deviceFormat.colorSpace = format.colorSpace;
+ return deviceFormat;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerBase::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
+{
+ CameraData *data = cameraData(camera);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<RPiCameraConfiguration>(data);
+ V4L2SubdeviceFormat sensorFormat;
+ unsigned int bufferCount;
+ PixelFormat pixelFormat;
+ V4L2VideoDevice::Formats fmts;
+ Size size;
+ std::optional<ColorSpace> colorSpace;
+
+ if (roles.empty())
+ return config;
+
+ Size sensorSize = data->sensor_->resolution();
+ for (const StreamRole role : roles) {
+ switch (role) {
+ case StreamRole::Raw:
+ size = sensorSize;
+ sensorFormat = data->findBestFormat(size, defaultRawBitDepth);
+ pixelFormat = mbusCodeToPixelFormat(sensorFormat.code,
+ BayerFormat::Packing::CSI2);
+ ASSERT(pixelFormat.isValid());
+ colorSpace = ColorSpace::Raw;
+ bufferCount = 2;
+ break;
+
+ case StreamRole::StillCapture:
+ fmts = data->ispFormats();
+ pixelFormat = formats::YUV420;
+ /*
+ * Still image codecs usually expect the sYCC color space.
+ * Even RGB codecs will be fine as the RGB we get with the
+ * sYCC color space is the same as sRGB.
+ */
+ colorSpace = ColorSpace::Sycc;
+ /* Return the largest sensor resolution. */
+ size = sensorSize;
+ bufferCount = 1;
+ break;
+
+ case StreamRole::VideoRecording:
+ /*
+ * The colour denoise algorithm requires the analysis
+ * image, produced by the second ISP output, to be in
+ * YUV420 format. Select this format as the default, to
+ * maximize chances that it will be picked by
+ * applications and enable usage of the colour denoise
+ * algorithm.
+ */
+ fmts = data->ispFormats();
+ pixelFormat = formats::YUV420;
+ /*
+ * Choose a color space appropriate for video recording.
+ * Rec.709 will be a good default for HD resolutions.
+ */
+ colorSpace = ColorSpace::Rec709;
+ size = { 1920, 1080 };
+ bufferCount = 4;
+ break;
+
+ case StreamRole::Viewfinder:
+ fmts = data->ispFormats();
+ pixelFormat = formats::XRGB8888;
+ colorSpace = ColorSpace::Sycc;
+ size = { 800, 600 };
+ bufferCount = 4;
+ break;
+
+ default:
+ LOG(RPI, Error) << "Requested stream role not supported: "
+ << role;
+ return nullptr;
+ }
+
+ std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
+ if (role == StreamRole::Raw) {
+ /* Translate the MBUS codes to a PixelFormat. */
+ for (const auto &format : data->sensorFormats_) {
+ PixelFormat pf = mbusCodeToPixelFormat(format.first,
+ BayerFormat::Packing::CSI2);
+ if (pf.isValid())
+ deviceFormats.emplace(std::piecewise_construct, std::forward_as_tuple(pf),
+ std::forward_as_tuple(format.second.begin(), format.second.end()));
+ }
+ } else {
+ /*
+ * Translate the V4L2PixelFormat to PixelFormat. Note that we
+ * limit the recommended largest ISP output size to match the
+ * sensor resolution.
+ */
+ for (const auto &format : fmts) {
+ PixelFormat pf = format.first.toPixelFormat();
+ /*
+ * Some V4L2 formats translate to the same pixel format (e.g. YU12, YM12
+ * both give YUV420). We must avoid duplicating the range in this case.
+ */
+ if (pf.isValid() && deviceFormats.find(pf) == deviceFormats.end()) {
+ const SizeRange &ispSizes = format.second[0];
+ deviceFormats[pf].emplace_back(ispSizes.min, sensorSize,
+ ispSizes.hStep, ispSizes.vStep);
+ }
+ }
+ }
+
+ /* Add the stream format based on the device node used for the use case. */
+ StreamFormats formats(deviceFormats);
+ StreamConfiguration cfg(formats);
+ cfg.size = size;
+ cfg.pixelFormat = pixelFormat;
+ cfg.colorSpace = colorSpace;
+ cfg.bufferCount = bufferCount;
+ config->addConfiguration(cfg);
+ }
+
+ return config;
+}
+
+int PipelineHandlerBase::configure(Camera *camera, CameraConfiguration *config)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ /* Start by freeing all buffers and reset the stream states. */
+ data->freeBuffers();
+ for (auto const stream : data->streams_)
+ stream->clearFlags(StreamFlag::External);
+
+ /*
+ * Apply the format on the sensor with any cached transform.
+ *
+ * If the application has provided a sensor configuration apply it
+ * instead of just applying a format.
+ */
+ RPiCameraConfiguration *rpiConfig = static_cast<RPiCameraConfiguration *>(config);
+ V4L2SubdeviceFormat *sensorFormat = &rpiConfig->sensorFormat_;
+
+ if (rpiConfig->sensorConfig) {
+ ret = data->sensor_->applyConfiguration(*rpiConfig->sensorConfig,
+ rpiConfig->combinedTransform_,
+ sensorFormat);
+ } else {
+ ret = data->sensor_->setFormat(sensorFormat,
+ rpiConfig->combinedTransform_);
+ }
+ if (ret)
+ return ret;
+
+ /*
+ * Platform specific internal stream configuration. This also assigns
+ * external streams which get configured below.
+ */
+ data->cropParams_.clear();
+ ret = data->platformConfigure(rpiConfig);
+ if (ret)
+ return ret;
+
+ ipa::RPi::ConfigResult result;
+ ret = data->configureIPA(config, &result);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to configure the IPA: " << ret;
+ return ret;
+ }
+
+ /*
+ * Update the ScalerCropMaximum to the correct value for this camera mode.
+ * For us, it's the same as the "analogue crop".
+ *
+ * \todo Make this property the ScalerCrop maximum value when dynamic
+ * controls are available and set it at validate() time
+ */
+ data->properties_.set(properties::ScalerCropMaximum, data->sensorInfo_.analogCrop);
+
+ /* Store the mode sensitivity for the application. */
+ data->properties_.set(properties::SensorSensitivity, result.modeSensitivity);
+
+ /* Update the controls that the Raspberry Pi IPA can handle. */
+ ControlInfoMap::Map ctrlMap;
+ for (auto const &c : result.controlInfo)
+ ctrlMap.emplace(c.first, c.second);
+
+ const auto cropParamsIt = data->cropParams_.find(0);
+ if (cropParamsIt != data->cropParams_.end()) {
+ const CameraData::CropParams &cropParams = cropParamsIt->second;
+ /*
+ * Add the ScalerCrop control limits based on the current mode and
+ * the first configured stream.
+ */
+ Rectangle ispMinCrop = data->scaleIspCrop(Rectangle(cropParams.ispMinCropSize));
+ ctrlMap[&controls::ScalerCrop] = ControlInfo(ispMinCrop, data->sensorInfo_.analogCrop,
+ data->scaleIspCrop(cropParams.ispCrop));
+ if (data->cropParams_.size() == 2) {
+ /*
+ * The control map for rpi::ScalerCrops has the min value
+ * as the default crop for stream 0, max value as the default
+ * value for stream 1.
+ */
+ ctrlMap[&controls::rpi::ScalerCrops] =
+ ControlInfo(data->scaleIspCrop(data->cropParams_.at(0).ispCrop),
+ data->scaleIspCrop(data->cropParams_.at(1).ispCrop),
+ ctrlMap[&controls::ScalerCrop].def());
+ }
+ }
+
+ data->controlInfo_ = ControlInfoMap(std::move(ctrlMap), result.controlInfo.idmap());
+
+ /* Setup the Video Mux/Bridge entities. */
+ for (auto &[device, link] : data->bridgeDevices_) {
+ /*
+ * Start by disabling all the sink pad links on the devices in the
+ * cascade, with the exception of the link connecting the device.
+ */
+ for (const MediaPad *p : device->entity()->pads()) {
+ if (!(p->flags() & MEDIA_PAD_FL_SINK))
+ continue;
+
+ for (MediaLink *l : p->links()) {
+ if (l != link)
+ l->setEnabled(false);
+ }
+ }
+
+ /*
+ * Next, enable the entity -> entity links, and setup the pad format.
+ *
+ * \todo Some bridge devices may chainge the media bus code, so we
+ * ought to read the source pad format and propagate it to the sink pad.
+ */
+ link->setEnabled(true);
+ const MediaPad *sinkPad = link->sink();
+ ret = device->setFormat(sinkPad->index(), sensorFormat);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on " << device->entity()->name()
+ << " pad " << sinkPad->index()
+ << " with format " << *sensorFormat
+ << ": " << ret;
+ return ret;
+ }
+
+ LOG(RPI, Debug) << "Configured media link on device " << device->entity()->name()
+ << " on pad " << sinkPad->index();
+ }
+
+ return 0;
+}
+
+int PipelineHandlerBase::exportFrameBuffers([[maybe_unused]] Camera *camera, libcamera::Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ RPi::Stream *s = static_cast<RPi::Stream *>(stream);
+ unsigned int count = stream->configuration().bufferCount;
+ int ret = s->dev()->exportBuffers(count, buffers);
+
+ s->setExportedBuffers(buffers);
+
+ return ret;
+}
+
+int PipelineHandlerBase::start(Camera *camera, const ControlList *controls)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ /* Check if a ScalerCrop control was specified. */
+ if (controls)
+ data->applyScalerCrop(*controls);
+
+ /* Start the IPA. */
+ ipa::RPi::StartResult result;
+ data->ipa_->start(controls ? *controls : ControlList{ controls::controls },
+ &result);
+
+ /* Apply any gain/exposure settings that the IPA may have passed back. */
+ if (!result.controls.empty())
+ data->setSensorControls(result.controls);
+
+ /* Configure the number of dropped frames required on startup. */
+ data->dropFrameCount_ = data->config_.disableStartupFrameDrops
+ ? 0 : result.dropFrameCount;
+
+ for (auto const stream : data->streams_)
+ stream->resetBuffers();
+
+ if (!data->buffersAllocated_) {
+ /* Allocate buffers for internal pipeline usage. */
+ ret = prepareBuffers(camera);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to allocate buffers";
+ data->freeBuffers();
+ stop(camera);
+ return ret;
+ }
+ data->buffersAllocated_ = true;
+ }
+
+ /* We need to set the dropFrameCount_ before queueing buffers. */
+ ret = queueAllBuffers(camera);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to queue buffers";
+ stop(camera);
+ return ret;
+ }
+
+ /*
+ * Reset the delayed controls with the gain and exposure values set by
+ * the IPA.
+ */
+ data->delayedCtrls_->reset(0);
+ data->state_ = CameraData::State::Idle;
+
+ /* Enable SOF event generation. */
+ data->frontendDevice()->setFrameStartEnabled(true);
+
+ data->platformStart();
+
+ /* Start all streams. */
+ for (auto const stream : data->streams_) {
+ ret = stream->dev()->streamOn();
+ if (ret) {
+ stop(camera);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void PipelineHandlerBase::stopDevice(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+
+ data->state_ = CameraData::State::Stopped;
+ data->platformStop();
+
+ for (auto const stream : data->streams_)
+ stream->dev()->streamOff();
+
+ /* Disable SOF event generation. */
+ data->frontendDevice()->setFrameStartEnabled(false);
+
+ data->clearIncompleteRequests();
+
+ /* Stop the IPA. */
+ data->ipa_->stop();
+}
+
+void PipelineHandlerBase::releaseDevice(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+ data->freeBuffers();
+}
+
+int PipelineHandlerBase::queueRequestDevice(Camera *camera, Request *request)
+{
+ CameraData *data = cameraData(camera);
+
+ if (!data->isRunning())
+ return -EINVAL;
+
+ LOG(RPI, Debug) << "queueRequestDevice: New request sequence: "
+ << request->sequence();
+
+ /* Push all buffers supplied in the Request to the respective streams. */
+ for (auto stream : data->streams_) {
+ if (!(stream->getFlags() & StreamFlag::External))
+ continue;
+
+ FrameBuffer *buffer = request->findBuffer(stream);
+ if (buffer && !stream->getBufferId(buffer)) {
+ /*
+ * This buffer is not recognised, so it must have been allocated
+ * outside the v4l2 device. Store it in the stream buffer list
+ * so we can track it.
+ */
+ stream->setExportedBuffer(buffer);
+ }
+
+ /*
+ * If no buffer is provided by the request for this stream, we
+ * queue a nullptr to the stream to signify that it must use an
+ * internally allocated buffer for this capture request. This
+ * buffer will not be given back to the application, but is used
+ * to support the internal pipeline flow.
+ *
+ * The below queueBuffer() call will do nothing if there are not
+ * enough internal buffers allocated, but this will be handled by
+ * queuing the request for buffers in the RPiStream object.
+ */
+ int ret = stream->queueBuffer(buffer);
+ if (ret)
+ return ret;
+ }
+
+ /* Push the request to the back of the queue. */
+ data->requestQueue_.push(request);
+ data->handleState();
+
+ return 0;
+}
+
+int PipelineHandlerBase::registerCamera(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *frontend, const std::string &frontendName,
+ MediaDevice *backend, MediaEntity *sensorEntity)
+{
+ CameraData *data = cameraData.get();
+ int ret;
+
+ data->sensor_ = CameraSensorFactoryBase::create(sensorEntity);
+ if (!data->sensor_)
+ return -EINVAL;
+
+ /* Populate the map of sensor supported formats and sizes. */
+ for (auto const mbusCode : data->sensor_->mbusCodes())
+ data->sensorFormats_.emplace(mbusCode,
+ data->sensor_->sizes(mbusCode));
+
+ /*
+ * Enumerate all the Video Mux/Bridge devices across the sensor -> Fr
+ * chain. There may be a cascade of devices in this chain!
+ */
+ MediaLink *link = sensorEntity->getPadByIndex(0)->links()[0];
+ data->enumerateVideoDevices(link, frontendName);
+
+ ipa::RPi::InitResult result;
+ if (data->loadIPA(&result)) {
+ LOG(RPI, Error) << "Failed to load a suitable IPA library";
+ return -EINVAL;
+ }
+
+ /*
+ * Setup our delayed control writer with the sensor default
+ * gain and exposure delays. Mark VBLANK for priority write.
+ */
+ const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays();
+ std::unordered_map<uint32_t, RPi::DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ { V4L2_CID_HBLANK, { delays.hblankDelay, false } },
+ { V4L2_CID_VBLANK, { delays.vblankDelay, true } }
+ };
+ data->delayedCtrls_ = std::make_unique<RPi::DelayedControls>(data->sensor_->device(), params);
+ data->sensorMetadata_ = result.sensorConfig.sensorMetadata;
+
+ /* Register initial controls that the Raspberry Pi IPA can handle. */
+ data->controlInfo_ = std::move(result.controlInfo);
+
+ /* Initialize the camera properties. */
+ data->properties_ = data->sensor_->properties();
+
+ /*
+ * The V4L2_CID_NOTIFY_GAINS control, if present, is used to inform the
+ * sensor of the colour gains. It is defined to be a linear gain where
+ * the default value represents a gain of exactly one.
+ */
+ auto it = data->sensor_->controls().find(V4L2_CID_NOTIFY_GAINS);
+ if (it != data->sensor_->controls().end())
+ data->notifyGainsUnity_ = it->second.def().get<int32_t>();
+
+ /*
+ * Set a default value for the ScalerCropMaximum property to show
+ * that we support its use, however, initialise it to zero because
+ * it's not meaningful until a camera mode has been chosen.
+ */
+ data->properties_.set(properties::ScalerCropMaximum, Rectangle{});
+
+ ret = platformRegister(cameraData, frontend, backend);
+ if (ret)
+ return ret;
+
+ ret = data->loadPipelineConfiguration();
+ if (ret) {
+ LOG(RPI, Error) << "Unable to load pipeline configuration";
+ return ret;
+ }
+
+ /* Setup the general IPA signal handlers. */
+ data->frontendDevice()->dequeueTimeout.connect(data, &RPi::CameraData::cameraTimeout);
+ data->frontendDevice()->frameStart.connect(data, &RPi::CameraData::frameStarted);
+ data->ipa_->setDelayedControls.connect(data, &CameraData::setDelayedControls);
+ data->ipa_->setLensControls.connect(data, &CameraData::setLensControls);
+ data->ipa_->metadataReady.connect(data, &CameraData::metadataReady);
+
+ return 0;
+}
+
+void PipelineHandlerBase::mapBuffers(Camera *camera, const BufferMap &buffers, unsigned int mask)
+{
+ CameraData *data = cameraData(camera);
+ std::vector<IPABuffer> bufferIds;
+ /*
+ * Link the FrameBuffers with the id (key value) in the map stored in
+ * the RPi stream object - along with an identifier mask.
+ *
+ * This will allow us to identify buffers passed between the pipeline
+ * handler and the IPA.
+ */
+ for (auto const &it : buffers) {
+ bufferIds.push_back(IPABuffer(mask | it.first,
+ it.second.buffer->planes()));
+ data->bufferIds_.insert(mask | it.first);
+ }
+
+ data->ipa_->mapBuffers(bufferIds);
+}
+
+int PipelineHandlerBase::queueAllBuffers(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ for (auto const stream : data->streams_) {
+ if (!(stream->getFlags() & StreamFlag::External)) {
+ ret = stream->queueAllBuffers();
+ if (ret < 0)
+ return ret;
+ } else {
+ /*
+ * For external streams, we must queue up a set of internal
+ * buffers to handle the number of drop frames requested by
+ * the IPA. This is done by passing nullptr in queueBuffer().
+ *
+ * The below queueBuffer() call will do nothing if there
+ * are not enough internal buffers allocated, but this will
+ * be handled by queuing the request for buffers in the
+ * RPiStream object.
+ */
+ unsigned int i;
+ for (i = 0; i < data->dropFrameCount_; i++) {
+ ret = stream->queueBuffer(nullptr);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+double CameraData::scoreFormat(double desired, double actual) const
+{
+ double score = desired - actual;
+ /* Smaller desired dimensions are preferred. */
+ if (score < 0.0)
+ score = (-score) / 8;
+ /* Penalise non-exact matches. */
+ if (actual != desired)
+ score *= 2;
+
+ return score;
+}
+
+V4L2SubdeviceFormat CameraData::findBestFormat(const Size &req, unsigned int bitDepth) const
+{
+ double bestScore = std::numeric_limits<double>::max(), score;
+ V4L2SubdeviceFormat bestFormat;
+ bestFormat.colorSpace = ColorSpace::Raw;
+
+ constexpr float penaltyAr = 1500.0;
+ constexpr float penaltyBitDepth = 500.0;
+
+ /* Calculate the closest/best mode from the user requested size. */
+ for (const auto &iter : sensorFormats_) {
+ const unsigned int mbusCode = iter.first;
+ const PixelFormat format = mbusCodeToPixelFormat(mbusCode,
+ BayerFormat::Packing::None);
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ for (const Size &size : iter.second) {
+ double reqAr = static_cast<double>(req.width) / req.height;
+ double fmtAr = static_cast<double>(size.width) / size.height;
+
+ /* Score the dimensions for closeness. */
+ score = scoreFormat(req.width, size.width);
+ score += scoreFormat(req.height, size.height);
+ score += penaltyAr * scoreFormat(reqAr, fmtAr);
+
+ /* Add any penalties... this is not an exact science! */
+ score += utils::abs_diff(info.bitsPerPixel, bitDepth) * penaltyBitDepth;
+
+ if (score <= bestScore) {
+ bestScore = score;
+ bestFormat.code = mbusCode;
+ bestFormat.size = size;
+ }
+
+ LOG(RPI, Debug) << "Format: " << size
+ << " fmt " << format
+ << " Score: " << score
+ << " (best " << bestScore << ")";
+ }
+ }
+
+ return bestFormat;
+}
+
+void CameraData::freeBuffers()
+{
+ if (ipa_) {
+ /*
+ * Copy the buffer ids from the unordered_set to a vector to
+ * pass to the IPA.
+ */
+ std::vector<unsigned int> bufferIds(bufferIds_.begin(),
+ bufferIds_.end());
+ ipa_->unmapBuffers(bufferIds);
+ bufferIds_.clear();
+ }
+
+ for (auto const stream : streams_)
+ stream->releaseBuffers();
+
+ platformFreeBuffers();
+
+ buffersAllocated_ = false;
+}
+
+/*
+ * enumerateVideoDevices() iterates over the Media Controller topology, starting
+ * at the sensor and finishing at the frontend. For each sensor, CameraData stores
+ * a unique list of any intermediate video mux or bridge devices connected in a
+ * cascade, together with the entity to entity link.
+ *
+ * Entity pad configuration and link enabling happens at the end of configure().
+ * We first disable all pad links on each entity device in the chain, and then
+ * selectively enabling the specific links to link sensor to the frontend across
+ * all intermediate muxes and bridges.
+ *
+ * In the cascaded topology below, if Sensor1 is used, the Mux2 -> Mux1 link
+ * will be disabled, and Sensor1 -> Mux1 -> Frontend links enabled. Alternatively,
+ * if Sensor3 is used, the Sensor2 -> Mux2 and Sensor1 -> Mux1 links are disabled,
+ * and Sensor3 -> Mux2 -> Mux1 -> Frontend links are enabled. All other links will
+ * remain unchanged.
+ *
+ * +----------+
+ * | FE |
+ * +-----^----+
+ * |
+ * +---+---+
+ * | Mux1 |<------+
+ * +--^---- |
+ * | |
+ * +-----+---+ +---+---+
+ * | Sensor1 | | Mux2 |<--+
+ * +---------+ +-^-----+ |
+ * | |
+ * +-------+-+ +---+-----+
+ * | Sensor2 | | Sensor3 |
+ * +---------+ +---------+
+ */
+void CameraData::enumerateVideoDevices(MediaLink *link, const std::string &frontend)
+{
+ const MediaPad *sinkPad = link->sink();
+ const MediaEntity *entity = sinkPad->entity();
+ bool frontendFound = false;
+
+ /* We only deal with Video Mux and Bridge devices in cascade. */
+ if (entity->function() != MEDIA_ENT_F_VID_MUX &&
+ entity->function() != MEDIA_ENT_F_VID_IF_BRIDGE)
+ return;
+
+ /* Find the source pad for this Video Mux or Bridge device. */
+ const MediaPad *sourcePad = nullptr;
+ for (const MediaPad *pad : entity->pads()) {
+ if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
+ /*
+ * We can only deal with devices that have a single source
+ * pad. If this device has multiple source pads, ignore it
+ * and this branch in the cascade.
+ */
+ if (sourcePad)
+ return;
+
+ sourcePad = pad;
+ }
+ }
+
+ LOG(RPI, Debug) << "Found video mux device " << entity->name()
+ << " linked to sink pad " << sinkPad->index();
+
+ bridgeDevices_.emplace_back(std::make_unique<V4L2Subdevice>(entity), link);
+ bridgeDevices_.back().first->open();
+
+ /*
+ * Iterate through all the sink pad links down the cascade to find any
+ * other Video Mux and Bridge devices.
+ */
+ for (MediaLink *l : sourcePad->links()) {
+ enumerateVideoDevices(l, frontend);
+ /* Once we reach the Frontend entity, we are done. */
+ if (l->sink()->entity()->name() == frontend) {
+ frontendFound = true;
+ break;
+ }
+ }
+
+ /* This identifies the end of our entity enumeration recursion. */
+ if (link->source()->entity()->function() == MEDIA_ENT_F_CAM_SENSOR) {
+ /*
+ * If the frontend is not at the end of this cascade, we cannot
+ * configure this topology automatically, so remove all entity
+ * references.
+ */
+ if (!frontendFound) {
+ LOG(RPI, Warning) << "Cannot automatically configure this MC topology!";
+ bridgeDevices_.clear();
+ }
+ }
+}
+
+int CameraData::loadPipelineConfiguration()
+{
+ config_ = {
+ .disableStartupFrameDrops = false,
+ .cameraTimeoutValue = 0,
+ };
+
+ /* Initial configuration of the platform, in case no config file is present */
+ platformPipelineConfigure({});
+
+ char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_CONFIG_FILE");
+ if (!configFromEnv || *configFromEnv == '\0')
+ return 0;
+
+ std::string filename = std::string(configFromEnv);
+ File file(filename);
+
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(RPI, Warning) << "Failed to open configuration file '" << filename << "'"
+ << ", using defaults";
+ return 0;
+ }
+
+ LOG(RPI, Info) << "Using configuration file '" << filename << "'";
+
+ std::unique_ptr<YamlObject> root = YamlParser::parse(file);
+ if (!root) {
+ LOG(RPI, Warning) << "Failed to parse configuration file, using defaults";
+ return 0;
+ }
+
+ std::optional<double> ver = (*root)["version"].get<double>();
+ if (!ver || *ver != 1.0) {
+ LOG(RPI, Warning) << "Unexpected configuration file version reported: "
+ << *ver;
+ return 0;
+ }
+
+ const YamlObject &phConfig = (*root)["pipeline_handler"];
+
+ config_.disableStartupFrameDrops =
+ phConfig["disable_startup_frame_drops"].get<bool>(config_.disableStartupFrameDrops);
+
+ config_.cameraTimeoutValue =
+ phConfig["camera_timeout_value_ms"].get<unsigned int>(config_.cameraTimeoutValue);
+
+ if (config_.cameraTimeoutValue) {
+ /* Disable the IPA signal to control timeout and set the user requested value. */
+ ipa_->setCameraTimeout.disconnect();
+ frontendDevice()->setDequeueTimeout(config_.cameraTimeoutValue * 1ms);
+ }
+
+ return platformPipelineConfigure(root);
+}
+
+int CameraData::loadIPA(ipa::RPi::InitResult *result)
+{
+ int ret;
+
+ ipa_ = IPAManager::createIPA<ipa::RPi::IPAProxyRPi>(pipe(), 1, 1);
+
+ if (!ipa_)
+ return -ENOENT;
+
+ /*
+ * The configuration (tuning file) is made from the sensor name unless
+ * the environment variable overrides it.
+ */
+ std::string configurationFile;
+ char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_TUNING_FILE");
+ if (!configFromEnv || *configFromEnv == '\0') {
+ std::string model = sensor_->model();
+ if (isMonoSensor(sensor_))
+ model += "_mono";
+ configurationFile = ipa_->configurationFile(model + ".json");
+ } else {
+ configurationFile = std::string(configFromEnv);
+ }
+
+ IPASettings settings(configurationFile, sensor_->model());
+ ipa::RPi::InitParams params;
+
+ ret = sensor_->sensorInfo(&params.sensorInfo);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to retrieve camera sensor info";
+ return ret;
+ }
+
+ params.lensPresent = !!sensor_->focusLens();
+ ret = platformInitIpa(params);
+ if (ret)
+ return ret;
+
+ return ipa_->init(settings, params, result);
+}
+
+int CameraData::configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result)
+{
+ ipa::RPi::ConfigParams params;
+ int ret;
+
+ params.sensorControls = sensor_->controls();
+ if (sensor_->focusLens())
+ params.lensControls = sensor_->focusLens()->controls();
+
+ ret = platformConfigureIpa(params);
+ if (ret)
+ return ret;
+
+ /* We store the IPACameraSensorInfo for digital zoom calculations. */
+ ret = sensor_->sensorInfo(&sensorInfo_);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to retrieve camera sensor info";
+ return ret;
+ }
+
+ /* Always send the user transform to the IPA. */
+ Transform transform = config->orientation / Orientation::Rotate0;
+ params.transform = static_cast<unsigned int>(transform);
+
+ /* Ready the IPA - it must know about the sensor resolution. */
+ ret = ipa_->configure(sensorInfo_, params, result);
+ if (ret < 0) {
+ LOG(RPI, Error) << "IPA configuration failed!";
+ return -EPIPE;
+ }
+
+ if (!result->sensorControls.empty())
+ setSensorControls(result->sensorControls);
+ if (!result->lensControls.empty())
+ setLensControls(result->lensControls);
+
+ return 0;
+}
+
+void CameraData::metadataReady(const ControlList &metadata)
+{
+ if (!isRunning())
+ return;
+
+ /* Add to the Request metadata buffer what the IPA has provided. */
+ /* Last thing to do is to fill up the request metadata. */
+ Request *request = requestQueue_.front();
+ request->metadata().merge(metadata);
+
+ /*
+ * Inform the sensor of the latest colour gains if it has the
+ * V4L2_CID_NOTIFY_GAINS control (which means notifyGainsUnity_ is set).
+ */
+ const auto &colourGains = metadata.get(libcamera::controls::ColourGains);
+ if (notifyGainsUnity_ && colourGains) {
+ /* The control wants linear gains in the order B, Gb, Gr, R. */
+ ControlList ctrls(sensor_->controls());
+ std::array<int32_t, 4> gains{
+ static_cast<int32_t>((*colourGains)[1] * *notifyGainsUnity_),
+ *notifyGainsUnity_,
+ *notifyGainsUnity_,
+ static_cast<int32_t>((*colourGains)[0] * *notifyGainsUnity_)
+ };
+ ctrls.set(V4L2_CID_NOTIFY_GAINS, Span<const int32_t>{ gains });
+
+ sensor_->setControls(&ctrls);
+ }
+}
+
+void CameraData::setDelayedControls(const ControlList &controls, uint32_t delayContext)
+{
+ if (!delayedCtrls_->push(controls, delayContext))
+ LOG(RPI, Error) << "V4L2 DelayedControl set failed";
+}
+
+void CameraData::setLensControls(const ControlList &controls)
+{
+ CameraLens *lens = sensor_->focusLens();
+
+ if (lens && controls.contains(V4L2_CID_FOCUS_ABSOLUTE)) {
+ ControlValue const &focusValue = controls.get(V4L2_CID_FOCUS_ABSOLUTE);
+ lens->setFocusPosition(focusValue.get<int32_t>());
+ }
+}
+
+void CameraData::setSensorControls(ControlList &controls)
+{
+ /*
+ * We need to ensure that if both VBLANK and EXPOSURE are present, the
+ * former must be written ahead of, and separately from EXPOSURE to avoid
+ * V4L2 rejecting the latter. This is identical to what DelayedControls
+ * does with the priority write flag.
+ *
+ * As a consequence of the below logic, VBLANK gets set twice, and we
+ * rely on the v4l2 framework to not pass the second control set to the
+ * driver as the actual control value has not changed.
+ */
+ if (controls.contains(V4L2_CID_EXPOSURE) && controls.contains(V4L2_CID_VBLANK)) {
+ ControlList vblank_ctrl;
+
+ vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
+ sensor_->setControls(&vblank_ctrl);
+ }
+
+ sensor_->setControls(&controls);
+}
+
+Rectangle CameraData::scaleIspCrop(const Rectangle &ispCrop) const
+{
+ /*
+ * Scale a crop rectangle defined in the ISP's coordinates into native sensor
+ * coordinates.
+ */
+ Rectangle nativeCrop = ispCrop.scaledBy(sensorInfo_.analogCrop.size(),
+ sensorInfo_.outputSize);
+ nativeCrop.translateBy(sensorInfo_.analogCrop.topLeft());
+ return nativeCrop;
+}
+
+void CameraData::applyScalerCrop(const ControlList &controls)
+{
+ const auto &scalerCropRPi = controls.get<Span<const Rectangle>>(controls::rpi::ScalerCrops);
+ const auto &scalerCropCore = controls.get<Rectangle>(controls::ScalerCrop);
+ std::vector<Rectangle> scalerCrops;
+
+ /*
+ * First thing to do is create a vector of crops to apply to each ISP output
+ * based on either controls::ScalerCrop or controls::rpi::ScalerCrops if
+ * present.
+ *
+ * If controls::rpi::ScalerCrops is preset, apply the given crops to the
+ * ISP output streams, indexed by the same order in which they had been
+ * configured. This is not the same as the ISP output index. Otherwise
+ * if controls::ScalerCrop is present, apply the same crop to all ISP
+ * output streams.
+ */
+ for (unsigned int i = 0; i < cropParams_.size(); i++) {
+ if (scalerCropRPi && i < scalerCropRPi->size())
+ scalerCrops.push_back(scalerCropRPi->data()[i]);
+ else if (scalerCropCore)
+ scalerCrops.push_back(*scalerCropCore);
+ }
+
+ for (auto const &[i, scalerCrop] : utils::enumerate(scalerCrops)) {
+ Rectangle nativeCrop = scalerCrop;
+
+ if (!nativeCrop.width || !nativeCrop.height)
+ nativeCrop = { 0, 0, 1, 1 };
+
+ /* Create a version of the crop scaled to ISP (camera mode) pixels. */
+ Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo_.analogCrop.topLeft());
+ ispCrop.scaleBy(sensorInfo_.outputSize, sensorInfo_.analogCrop.size());
+
+ /*
+ * The crop that we set must be:
+ * 1. At least as big as ispMinCropSize_, once that's been
+ * enlarged to the same aspect ratio.
+ * 2. With the same mid-point, if possible.
+ * 3. But it can't go outside the sensor area.
+ */
+ Size minSize = cropParams_.at(i).ispMinCropSize.expandedToAspectRatio(nativeCrop.size());
+ Size size = ispCrop.size().expandedTo(minSize);
+ ispCrop = size.centeredTo(ispCrop.center()).enclosedIn(Rectangle(sensorInfo_.outputSize));
+
+ if (ispCrop != cropParams_.at(i).ispCrop) {
+ cropParams_.at(i).ispCrop = ispCrop;
+ platformSetIspCrop(cropParams_.at(i).ispIndex, ispCrop);
+ }
+ }
+}
+
+void CameraData::cameraTimeout()
+{
+ LOG(RPI, Error) << "Camera frontend has timed out!";
+ LOG(RPI, Error) << "Please check that your camera sensor connector is attached securely.";
+ LOG(RPI, Error) << "Alternatively, try another cable and/or sensor.";
+
+ state_ = CameraData::State::Error;
+ platformStop();
+
+ /*
+ * To allow the application to attempt a recovery from this timeout,
+ * stop all devices streaming, and return any outstanding requests as
+ * incomplete and cancelled.
+ */
+ for (auto const stream : streams_)
+ stream->dev()->streamOff();
+
+ clearIncompleteRequests();
+}
+
+void CameraData::frameStarted(uint32_t sequence)
+{
+ LOG(RPI, Debug) << "Frame start " << sequence;
+
+ /* Write any controls for the next frame as soon as we can. */
+ delayedCtrls_->applyControls(sequence);
+}
+
+void CameraData::clearIncompleteRequests()
+{
+ /*
+ * All outstanding requests (and associated buffers) must be returned
+ * back to the application.
+ */
+ while (!requestQueue_.empty()) {
+ Request *request = requestQueue_.front();
+
+ for (auto &b : request->buffers()) {
+ FrameBuffer *buffer = b.second;
+ /*
+ * Has the buffer already been handed back to the
+ * request? If not, do so now.
+ */
+ if (buffer->request()) {
+ buffer->_d()->cancel();
+ pipe()->completeBuffer(request, buffer);
+ }
+ }
+
+ pipe()->completeRequest(request);
+ requestQueue_.pop();
+ }
+}
+
+void CameraData::handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream)
+{
+ /*
+ * It is possible to be here without a pending request, so check
+ * that we actually have one to action, otherwise we just return
+ * buffer back to the stream.
+ */
+ Request *request = requestQueue_.empty() ? nullptr : requestQueue_.front();
+ if (!dropFrameCount_ && request && request->findBuffer(stream) == buffer) {
+ /*
+ * Tag the buffer as completed, returning it to the
+ * application.
+ */
+ LOG(RPI, Debug) << "Completing request buffer for stream "
+ << stream->name();
+ pipe()->completeBuffer(request, buffer);
+ } else {
+ /*
+ * This buffer was not part of the Request (which happens if an
+ * internal buffer was used for an external stream, or
+ * unconditionally for internal streams), or there is no pending
+ * request, so we can recycle it.
+ */
+ LOG(RPI, Debug) << "Returning buffer to stream "
+ << stream->name();
+ stream->returnBuffer(buffer);
+ }
+}
+
+void CameraData::handleState()
+{
+ switch (state_) {
+ case State::Stopped:
+ case State::Busy:
+ case State::Error:
+ break;
+
+ case State::IpaComplete:
+ /* If the request is completed, we will switch to Idle state. */
+ checkRequestCompleted();
+ /*
+ * No break here, we want to try running the pipeline again.
+ * The fallthrough clause below suppresses compiler warnings.
+ */
+ [[fallthrough]];
+
+ case State::Idle:
+ tryRunPipeline();
+ break;
+ }
+}
+
+void CameraData::checkRequestCompleted()
+{
+ bool requestCompleted = false;
+ /*
+ * If we are dropping this frame, do not touch the request, simply
+ * change the state to IDLE when ready.
+ */
+ if (!dropFrameCount_) {
+ Request *request = requestQueue_.front();
+ if (request->hasPendingBuffers())
+ return;
+
+ /* Must wait for metadata to be filled in before completing. */
+ if (state_ != State::IpaComplete)
+ return;
+
+ LOG(RPI, Debug) << "Completing request sequence: "
+ << request->sequence();
+
+ pipe()->completeRequest(request);
+ requestQueue_.pop();
+ requestCompleted = true;
+ }
+
+ /*
+ * Make sure we have three outputs completed in the case of a dropped
+ * frame.
+ */
+ if (state_ == State::IpaComplete &&
+ ((ispOutputCount_ == ispOutputTotal_ && dropFrameCount_) ||
+ requestCompleted)) {
+ LOG(RPI, Debug) << "Going into Idle state";
+ state_ = State::Idle;
+ if (dropFrameCount_) {
+ dropFrameCount_--;
+ LOG(RPI, Debug) << "Dropping frame at the request of the IPA ("
+ << dropFrameCount_ << " left)";
+ }
+ }
+}
+
+void CameraData::fillRequestMetadata(const ControlList &bufferControls, Request *request)
+{
+ request->metadata().set(controls::SensorTimestamp,
+ bufferControls.get(controls::SensorTimestamp).value_or(0));
+
+ if (cropParams_.size()) {
+ std::vector<Rectangle> crops;
+
+ for (auto const &[k, v] : cropParams_)
+ crops.push_back(scaleIspCrop(v.ispCrop));
+
+ request->metadata().set(controls::ScalerCrop, crops[0]);
+ if (crops.size() > 1) {
+ request->metadata().set(controls::rpi::ScalerCrops,
+ Span<const Rectangle>(crops.data(), crops.size()));
+ }
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.h b/src/libcamera/pipeline/rpi/common/pipeline_base.h
new file mode 100644
index 00000000..aae0c2f3
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/pipeline_base.h
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler base class for Raspberry Pi devices
+ */
+
+#include <map>
+#include <memory>
+#include <optional>
+#include <queue>
+#include <string>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include <libcamera/controls.h>
+#include <libcamera/request.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/media_object.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include <libcamera/ipa/raspberrypi_ipa_interface.h>
+#include <libcamera/ipa/raspberrypi_ipa_proxy.h>
+
+#include "delayed_controls.h"
+#include "rpi_stream.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+namespace RPi {
+
+/* Map of mbus codes to supported sizes reported by the sensor. */
+using SensorFormats = std::map<unsigned int, std::vector<Size>>;
+
+class RPiCameraConfiguration;
+class CameraData : public Camera::Private
+{
+public:
+ CameraData(PipelineHandler *pipe)
+ : Camera::Private(pipe), state_(State::Stopped),
+ dropFrameCount_(0), buffersAllocated_(false),
+ ispOutputCount_(0), ispOutputTotal_(0)
+ {
+ }
+
+ virtual ~CameraData()
+ {
+ }
+
+ virtual CameraConfiguration::Status platformValidate(RPiCameraConfiguration *rpiConfig) const = 0;
+ virtual int platformConfigure(const RPiCameraConfiguration *rpiConfig) = 0;
+ virtual void platformStart() = 0;
+ virtual void platformStop() = 0;
+
+ double scoreFormat(double desired, double actual) const;
+ V4L2SubdeviceFormat findBestFormat(const Size &req, unsigned int bitDepth) const;
+
+ void freeBuffers();
+ virtual void platformFreeBuffers() = 0;
+
+ void enumerateVideoDevices(MediaLink *link, const std::string &frontend);
+
+ int loadPipelineConfiguration();
+ int loadIPA(ipa::RPi::InitResult *result);
+ int configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result);
+ virtual int platformInitIpa(ipa::RPi::InitParams &params) = 0;
+ virtual int platformConfigureIpa(ipa::RPi::ConfigParams &params) = 0;
+
+ void metadataReady(const ControlList &metadata);
+ void setDelayedControls(const ControlList &controls, uint32_t delayContext);
+ void setLensControls(const ControlList &controls);
+ void setSensorControls(ControlList &controls);
+
+ Rectangle scaleIspCrop(const Rectangle &ispCrop) const;
+ void applyScalerCrop(const ControlList &controls);
+ virtual void platformSetIspCrop(unsigned int index, const Rectangle &ispCrop) = 0;
+
+ void cameraTimeout();
+ void frameStarted(uint32_t sequence);
+
+ void clearIncompleteRequests();
+ void handleStreamBuffer(FrameBuffer *buffer, Stream *stream);
+ void handleState();
+
+ virtual V4L2VideoDevice::Formats ispFormats() const = 0;
+ virtual V4L2VideoDevice::Formats rawFormats() const = 0;
+ virtual V4L2VideoDevice *frontendDevice() = 0;
+
+ virtual int platformPipelineConfigure(const std::unique_ptr<YamlObject> &root) = 0;
+
+ std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
+
+ std::unique_ptr<CameraSensor> sensor_;
+ SensorFormats sensorFormats_;
+
+ /* The vector below is just for convenience when iterating over all streams. */
+ std::vector<Stream *> streams_;
+ /* Stores the ids of the buffers mapped in the IPA. */
+ std::unordered_set<unsigned int> bufferIds_;
+ /*
+ * Stores a cascade of Video Mux or Bridge devices between the sensor and
+ * Unicam together with media link across the entities.
+ */
+ std::vector<std::pair<std::unique_ptr<V4L2Subdevice>, MediaLink *>> bridgeDevices_;
+
+ std::unique_ptr<DelayedControls> delayedCtrls_;
+ bool sensorMetadata_;
+
+ /*
+ * All the functions in this class are called from a single calling
+ * thread. So, we do not need to have any mutex to protect access to any
+ * of the variables below.
+ */
+ enum class State { Stopped, Idle, Busy, IpaComplete, Error };
+ State state_;
+
+ bool isRunning()
+ {
+ return state_ != State::Stopped && state_ != State::Error;
+ }
+
+ std::queue<Request *> requestQueue_;
+
+ /* For handling digital zoom. */
+ IPACameraSensorInfo sensorInfo_;
+
+ struct CropParams {
+ CropParams(Rectangle ispCrop_, Size ispMinCropSize_, unsigned int ispIndex_)
+ : ispCrop(ispCrop_), ispMinCropSize(ispMinCropSize_), ispIndex(ispIndex_)
+ {
+ }
+
+ /* Crop in ISP (camera mode) pixels */
+ Rectangle ispCrop;
+ /* Minimum crop size in ISP output pixels */
+ Size ispMinCropSize;
+ /* Index of the ISP output channel for this crop */
+ unsigned int ispIndex;
+ };
+
+ /* Mapping of CropParams keyed by the output stream order in CameraConfiguration */
+ std::map<unsigned int, CropParams> cropParams_;
+
+ unsigned int dropFrameCount_;
+
+ /*
+ * If set, this stores the value that represets a gain of one for
+ * the V4L2_CID_NOTIFY_GAINS control.
+ */
+ std::optional<int32_t> notifyGainsUnity_;
+
+ /* Have internal buffers been allocated? */
+ bool buffersAllocated_;
+
+ struct Config {
+ /*
+ * Override any request from the IPA to drop a number of startup
+ * frames.
+ */
+ bool disableStartupFrameDrops;
+ /*
+ * Override the camera timeout value calculated by the IPA based
+ * on frame durations.
+ */
+ unsigned int cameraTimeoutValue;
+ };
+
+ Config config_;
+
+protected:
+ void fillRequestMetadata(const ControlList &bufferControls,
+ Request *request);
+
+ virtual void tryRunPipeline() = 0;
+
+ unsigned int ispOutputCount_;
+ unsigned int ispOutputTotal_;
+
+private:
+ void checkRequestCompleted();
+};
+
+class PipelineHandlerBase : public PipelineHandler
+{
+public:
+ PipelineHandlerBase(CameraManager *manager)
+ : PipelineHandler(manager)
+ {
+ }
+
+ virtual ~PipelineHandlerBase()
+ {
+ }
+
+ static bool isRgb(const PixelFormat &pixFmt);
+ static bool isYuv(const PixelFormat &pixFmt);
+ static bool isRaw(const PixelFormat &pixFmt);
+
+ static bool updateStreamConfig(StreamConfiguration *stream,
+ const V4L2DeviceFormat &format);
+ static V4L2DeviceFormat toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const StreamConfiguration *stream);
+ static V4L2DeviceFormat toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const V4L2SubdeviceFormat &format,
+ BayerFormat::Packing packingReq);
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, libcamera::Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+ void releaseDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+protected:
+ int registerCamera(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *frontent, const std::string &frontendName,
+ MediaDevice *backend, MediaEntity *sensorEntity);
+
+ void mapBuffers(Camera *camera, const BufferMap &buffers, unsigned int mask);
+
+ virtual int platformRegister(std::unique_ptr<CameraData> &cameraData,
+ MediaDevice *unicam, MediaDevice *isp) = 0;
+
+private:
+ CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<CameraData *>(camera->_d());
+ }
+
+ int queueAllBuffers(Camera *camera);
+ virtual int prepareBuffers(Camera *camera) = 0;
+};
+
+class RPiCameraConfiguration final : public CameraConfiguration
+{
+public:
+ RPiCameraConfiguration(const CameraData *data)
+ : CameraConfiguration(), data_(data)
+ {
+ }
+
+ CameraConfiguration::Status validateColorSpaces(ColorSpaceFlags flags);
+ Status validate() override;
+
+ /* Cache the combinedTransform_ that will be applied to the sensor */
+ Transform combinedTransform_;
+ /* The sensor format computed in validate() */
+ V4L2SubdeviceFormat sensorFormat_;
+
+ struct StreamParams {
+ StreamParams()
+ : index(0), cfg(nullptr), dev(nullptr)
+ {
+ }
+
+ StreamParams(unsigned int index_, StreamConfiguration *cfg_)
+ : index(index_), cfg(cfg_), dev(nullptr)
+ {
+ }
+
+ unsigned int index;
+ StreamConfiguration *cfg;
+ V4L2VideoDevice *dev;
+ V4L2DeviceFormat format;
+ };
+
+ std::vector<StreamParams> rawStreams_;
+ std::vector<StreamParams> outStreams_;
+
+ /*
+ * Store the colour spaces that all our streams will have. RGB format streams
+ * will have the same colorspace as YUV streams, with YCbCr field cleared and
+ * range set to full.
+ */
+ std::optional<ColorSpace> yuvColorSpace_;
+ std::optional<ColorSpace> rgbColorSpace_;
+
+private:
+ const CameraData *data_;
+};
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/rpi_stream.cpp b/src/libcamera/pipeline/rpi/common/rpi_stream.cpp
index f446e1ce..accf59eb 100644
--- a/src/libcamera/pipeline/raspberrypi/rpi_stream.cpp
+++ b/src/libcamera/pipeline/rpi/common/rpi_stream.cpp
@@ -1,14 +1,19 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * rpi_stream.cpp - Raspberry Pi device stream abstraction class.
+ * Raspberry Pi device stream abstraction class.
*/
#include "rpi_stream.h"
+#include <algorithm>
+#include <tuple>
+#include <utility>
+
#include <libcamera/base/log.h>
-#include <libcamera/ipa/raspberrypi_ipa_interface.h>
+/* Maximum number of buffer slots to allocate in the V4L2 device driver. */
+static constexpr unsigned int maxV4L2BufferCount = 32;
namespace libcamera {
@@ -16,38 +21,64 @@ LOG_DEFINE_CATEGORY(RPISTREAM)
namespace RPi {
+const BufferObject Stream::errorBufferObject{ nullptr, false };
+
+void Stream::setFlags(StreamFlags flags)
+{
+ /* We don't want dynamic mmapping. */
+ ASSERT(!(flags & StreamFlag::RequiresMmap));
+
+ flags_ |= flags;
+
+ /* Import streams cannot be external. */
+ ASSERT(!(flags_ & StreamFlag::External) || !(flags_ & StreamFlag::ImportOnly));
+}
+
+void Stream::clearFlags(StreamFlags flags)
+{
+ /* We don't want dynamic mmapping. */
+ ASSERT(!(flags & StreamFlag::RequiresMmap));
+
+ flags_ &= ~flags;
+}
+
+RPi::Stream::StreamFlags Stream::getFlags() const
+{
+ return flags_;
+}
+
V4L2VideoDevice *Stream::dev() const
{
return dev_.get();
}
-std::string Stream::name() const
+const std::string &Stream::name() const
{
return name_;
}
-void Stream::reset()
+unsigned int Stream::swDownscale() const
{
- external_ = false;
- clearBuffers();
+ return swDownscale_;
}
-void Stream::setExternal(bool external)
+void Stream::setSwDownscale(unsigned int swDownscale)
{
- /* Import streams cannot be external. */
- ASSERT(!external || !importOnly_);
- external_ = external;
+ swDownscale_ = swDownscale;
}
-bool Stream::isExternal() const
+void Stream::resetBuffers()
{
- return external_;
+ /* Add all internal buffers to the queue of usable buffers. */
+ availableBuffers_ = {};
+ for (auto const &buffer : internalBuffers_)
+ availableBuffers_.push(buffer.get());
}
void Stream::setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
for (auto const &buffer : *buffers)
- bufferMap_.emplace(id_.get(), buffer.get());
+ bufferEmplace(++id_, buffer.get());
}
const BufferMap &Stream::getBuffers() const
@@ -55,71 +86,42 @@ const BufferMap &Stream::getBuffers() const
return bufferMap_;
}
-int Stream::getBufferId(FrameBuffer *buffer) const
+unsigned int Stream::getBufferId(FrameBuffer *buffer) const
{
- if (importOnly_)
- return -1;
+ if (flags_ & StreamFlag::ImportOnly)
+ return 0;
/* Find the buffer in the map, and return the buffer id. */
auto it = std::find_if(bufferMap_.begin(), bufferMap_.end(),
- [&buffer](auto const &p) { return p.second == buffer; });
+ [&buffer](auto const &p) { return p.second.buffer == buffer; });
if (it == bufferMap_.end())
- return -1;
+ return 0;
return it->first;
}
-void Stream::setExternalBuffer(FrameBuffer *buffer)
+void Stream::setExportedBuffer(FrameBuffer *buffer)
{
- bufferMap_.emplace(ipa::RPi::MaskExternalBuffer | id_.get(), buffer);
-}
-
-void Stream::removeExternalBuffer(FrameBuffer *buffer)
-{
- int id = getBufferId(buffer);
-
- /* Ensure we have this buffer in the stream, and it is marked external. */
- ASSERT(id != -1 && (id & ipa::RPi::MaskExternalBuffer));
- bufferMap_.erase(id);
+ bufferEmplace(++id_, buffer);
}
int Stream::prepareBuffers(unsigned int count)
{
int ret;
- if (!importOnly_) {
- if (count) {
- /* Export some frame buffers for internal use. */
- ret = dev_->exportBuffers(count, &internalBuffers_);
- if (ret < 0)
- return ret;
-
- /* Add these exported buffers to the internal/external buffer list. */
- setExportedBuffers(&internalBuffers_);
-
- /* Add these buffers to the queue of internal usable buffers. */
- for (auto const &buffer : internalBuffers_)
- availableBuffers_.push(buffer.get());
- }
+ if (!(flags_ & StreamFlag::ImportOnly)) {
+ /* Export some frame buffers for internal use. */
+ ret = dev_->exportBuffers(count, &internalBuffers_);
+ if (ret < 0)
+ return ret;
- /* We must import all internal/external exported buffers. */
- count = bufferMap_.size();
+ /* Add these exported buffers to the internal/external buffer list. */
+ setExportedBuffers(&internalBuffers_);
+ resetBuffers();
}
- /*
- * If this is an external stream, we must allocate slots for buffers that
- * might be externally allocated. We have no indication of how many buffers
- * may be used, so this might overallocate slots in the buffer cache.
- * Similarly, if this stream is only importing buffers, we do the same.
- *
- * \todo Find a better heuristic, or, even better, an exact solution to
- * this issue.
- */
- if (isExternal() || importOnly_)
- count = count * 2;
-
- return dev_->importBuffers(count);
+ return dev_->importBuffers(maxV4L2BufferCount);
}
int Stream::queueBuffer(FrameBuffer *buffer)
@@ -163,7 +165,7 @@ int Stream::queueBuffer(FrameBuffer *buffer)
void Stream::returnBuffer(FrameBuffer *buffer)
{
- if (!external_) {
+ if (!(flags_ & StreamFlag::External) && !(flags_ & StreamFlag::Recurrent)) {
/* For internal buffers, simply requeue back to the device. */
queueToDevice(buffer);
return;
@@ -172,9 +174,6 @@ void Stream::returnBuffer(FrameBuffer *buffer)
/* Push this buffer back into the queue to be used again. */
availableBuffers_.push(buffer);
- /* Allow the buffer id to be reused. */
- id_.release(getBufferId(buffer));
-
/*
* Do we have any Request buffers that are waiting to be queued?
* If so, do it now as availableBuffers_ will not be empty.
@@ -203,11 +202,32 @@ void Stream::returnBuffer(FrameBuffer *buffer)
}
}
+const BufferObject &Stream::getBuffer(unsigned int id)
+{
+ auto const &it = bufferMap_.find(id);
+ if (it == bufferMap_.end())
+ return errorBufferObject;
+
+ return it->second;
+}
+
+const BufferObject &Stream::acquireBuffer()
+{
+ /* No id provided, so pick up the next available buffer if possible. */
+ if (availableBuffers_.empty())
+ return errorBufferObject;
+
+ unsigned int id = getBufferId(availableBuffers_.front());
+ availableBuffers_.pop();
+
+ return getBuffer(id);
+}
+
int Stream::queueAllBuffers()
{
int ret;
- if (external_)
+ if ((flags_ & StreamFlag::External) || (flags_ & StreamFlag::Recurrent))
return 0;
while (!availableBuffers_.empty()) {
@@ -227,13 +247,23 @@ void Stream::releaseBuffers()
clearBuffers();
}
+void Stream::bufferEmplace(unsigned int id, FrameBuffer *buffer)
+{
+ if (flags_ & StreamFlag::RequiresMmap)
+ bufferMap_.emplace(std::piecewise_construct, std::forward_as_tuple(id),
+ std::forward_as_tuple(buffer, true));
+ else
+ bufferMap_.emplace(std::piecewise_construct, std::forward_as_tuple(id),
+ std::forward_as_tuple(buffer, false));
+}
+
void Stream::clearBuffers()
{
availableBuffers_ = std::queue<FrameBuffer *>{};
requestBuffers_ = std::queue<FrameBuffer *>{};
internalBuffers_.clear();
bufferMap_.clear();
- id_.reset();
+ id_ = 0;
}
int Stream::queueToDevice(FrameBuffer *buffer)
diff --git a/src/libcamera/pipeline/rpi/common/rpi_stream.h b/src/libcamera/pipeline/rpi/common/rpi_stream.h
new file mode 100644
index 00000000..a13d5dc0
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/rpi_stream.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Raspberry Pi device stream abstraction class.
+ */
+
+#pragma once
+
+#include <optional>
+#include <queue>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include <libcamera/base/flags.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace libcamera {
+
+namespace RPi {
+
+enum BufferMask {
+ MaskID = 0x00ffff,
+ MaskStats = 0x010000,
+ MaskEmbeddedData = 0x020000,
+ MaskBayerData = 0x040000,
+};
+
+struct BufferObject {
+ BufferObject(FrameBuffer *b, bool requiresMmap)
+ : buffer(b), mapped(std::nullopt)
+ {
+ if (requiresMmap)
+ mapped = std::make_optional<MappedFrameBuffer>
+ (b, MappedFrameBuffer::MapFlag::ReadWrite);
+ }
+
+ FrameBuffer *buffer;
+ std::optional<MappedFrameBuffer> mapped;
+};
+
+using BufferMap = std::unordered_map<unsigned int, BufferObject>;
+
+/*
+ * Device stream abstraction for either an internal or external stream.
+ * Used for both Unicam and the ISP.
+ */
+class Stream : public libcamera::Stream
+{
+public:
+ enum class StreamFlag {
+ None = 0,
+ /*
+ * Indicates that this stream only imports buffers, e.g. the ISP
+ * input stream.
+ */
+ ImportOnly = (1 << 0),
+ /*
+ * Indicates that this stream is active externally, i.e. the
+ * buffers might be provided by (and returned to) the application.
+ */
+ External = (1 << 1),
+ /*
+ * Indicates that the stream buffers need to be mmaped and returned
+ * to the pipeline handler when requested.
+ */
+ RequiresMmap = (1 << 2),
+ /*
+ * Indicates a stream that needs buffers recycled every frame internally
+ * in the pipeline handler, e.g. stitch, TDN, config. All buffer
+ * management will be handled by the pipeline handler.
+ */
+ Recurrent = (1 << 3),
+ /*
+ * Indicates that the output stream needs a software format conversion
+ * to be applied after ISP processing.
+ */
+ Needs32bitConv = (1 << 4),
+ };
+
+ using StreamFlags = Flags<StreamFlag>;
+
+ Stream()
+ : flags_(StreamFlag::None), id_(0), swDownscale_(0)
+ {
+ }
+
+ Stream(const char *name, MediaEntity *dev, StreamFlags flags = StreamFlag::None)
+ : flags_(flags), name_(name),
+ dev_(std::make_unique<V4L2VideoDevice>(dev)), id_(0),
+ swDownscale_(0)
+ {
+ }
+
+ void setFlags(StreamFlags flags);
+ void clearFlags(StreamFlags flags);
+ StreamFlags getFlags() const;
+
+ V4L2VideoDevice *dev() const;
+ const std::string &name() const;
+ void resetBuffers();
+
+ unsigned int swDownscale() const;
+ void setSwDownscale(unsigned int swDownscale);
+
+ void setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers);
+ const BufferMap &getBuffers() const;
+ unsigned int getBufferId(FrameBuffer *buffer) const;
+
+ void setExportedBuffer(FrameBuffer *buffer);
+
+ int prepareBuffers(unsigned int count);
+ int queueBuffer(FrameBuffer *buffer);
+ void returnBuffer(FrameBuffer *buffer);
+
+ const BufferObject &getBuffer(unsigned int id);
+ const BufferObject &acquireBuffer();
+
+ int queueAllBuffers();
+ void releaseBuffers();
+
+ /* For error handling. */
+ static const BufferObject errorBufferObject;
+
+private:
+ void bufferEmplace(unsigned int id, FrameBuffer *buffer);
+ void clearBuffers();
+ int queueToDevice(FrameBuffer *buffer);
+
+ StreamFlags flags_;
+
+ /* Stream name identifier. */
+ std::string name_;
+
+ /* The actual device stream. */
+ std::unique_ptr<V4L2VideoDevice> dev_;
+
+ /* Tracks a unique id key for the bufferMap_ */
+ unsigned int id_;
+
+ /* Power of 2 greater than one if software downscaling will be required. */
+ unsigned int swDownscale_;
+
+ /* All frame buffers associated with this device stream. */
+ BufferMap bufferMap_;
+
+ /*
+ * List of frame buffers that we can use if none have been provided by
+ * the application for external streams. This is populated by the
+ * buffers exported internally.
+ */
+ std::queue<FrameBuffer *> availableBuffers_;
+
+ /*
+ * List of frame buffers that are to be queued into the device from a Request.
+ * A nullptr indicates any internal buffer can be used (from availableBuffers_),
+ * whereas a valid pointer indicates an external buffer to be queued.
+ *
+ * Ordering buffers to be queued is important here as it must match the
+ * requests coming from the application.
+ */
+ std::queue<FrameBuffer *> requestBuffers_;
+
+ /*
+ * This is a list of buffers exported internally. Need to keep this around
+ * as the stream needs to maintain ownership of these buffers.
+ */
+ std::vector<std::unique_ptr<FrameBuffer>> internalBuffers_;
+};
+
+/*
+ * The following class is just a convenient (and typesafe) array of device
+ * streams indexed with an enum class.
+ */
+template<typename E, std::size_t N>
+class Device : public std::array<class Stream, N>
+{
+public:
+ Stream &operator[](E e)
+ {
+ return std::array<class Stream, N>::operator[](utils::to_underlying(e));
+ }
+ const Stream &operator[](E e) const
+ {
+ return std::array<class Stream, N>::operator[](utils::to_underlying(e));
+ }
+};
+
+} /* namespace RPi */
+
+LIBCAMERA_FLAGS_ENABLE_OPERATORS(RPi::Stream::StreamFlag)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/meson.build b/src/libcamera/pipeline/rpi/meson.build
new file mode 100644
index 00000000..2391b6a9
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: CC0-1.0
+
+subdir('common')
+
+foreach pipeline : pipelines
+ pipeline = pipeline.split('/')
+ if pipeline.length() < 2 or pipeline[0] != 'rpi'
+ continue
+ endif
+
+ subdir(pipeline[1])
+endforeach
diff --git a/src/libcamera/pipeline/rpi/vc4/data/example.yaml b/src/libcamera/pipeline/rpi/vc4/data/example.yaml
new file mode 100644
index 00000000..b8e01ade
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/data/example.yaml
@@ -0,0 +1,46 @@
+{
+ "version": 1.0,
+ "target": "bcm2835",
+
+ "pipeline_handler":
+ {
+ # The minimum number of internal buffers to be allocated for
+ # Unicam. This value must be greater than 0, but less than or
+ # equal to min_total_unicam_buffers.
+ #
+ # A larger number of internal buffers can reduce the occurrence
+ # of frame drops during high CPU loads, but might also cause
+ # additional latency in the system.
+ #
+ # Note that the pipeline handler might override this value and
+ # not allocate any internal buffers if it knows they will never
+ # be used. For example if the RAW stream is marked as mandatory
+ # and there are no dropped frames signalled for algorithm
+ # convergence.
+ #
+ # "min_unicam_buffers": 2,
+
+ # The minimum total (internal + external) buffer count used for
+ # Unicam. The number of internal buffers allocated for Unicam is
+ # given by:
+ #
+ # internal buffer count = max(min_unicam_buffers,
+ # min_total_unicam_buffers - external buffer count)
+ #
+ # "min_total_unicam_buffers": 4,
+
+ # Override any request from the IPA to drop a number of startup
+ # frames.
+ #
+ # "disable_startup_frame_drops": false,
+
+ # Custom timeout value (in ms) for camera to use. This overrides
+ # the value computed by the pipeline handler based on frame
+ # durations.
+ #
+ # Set this value to 0 to use the pipeline handler computed
+ # timeout value.
+ #
+ # "camera_timeout_value_ms": 0,
+ }
+}
diff --git a/src/libcamera/pipeline/rpi/vc4/data/meson.build b/src/libcamera/pipeline/rpi/vc4/data/meson.build
new file mode 100644
index 00000000..179feebc
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/data/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'example.yaml',
+])
+
+install_data(conf_files,
+ install_dir : pipeline_data_dir / 'rpi' / 'vc4',
+ install_tag : 'runtime')
diff --git a/src/libcamera/pipeline/rpi/vc4/meson.build b/src/libcamera/pipeline/rpi/vc4/meson.build
new file mode 100644
index 00000000..9b37c2f0
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'vc4.cpp',
+])
+
+subdir('data')
diff --git a/src/libcamera/pipeline/rpi/vc4/vc4.cpp b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
new file mode 100644
index 00000000..fd8d84b1
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
@@ -0,0 +1,1030 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler for VC4-based Raspberry Pi devices
+ */
+
+#include <linux/bcm2835-isp.h>
+#include <linux/v4l2-controls.h>
+#include <linux/videodev2.h>
+
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/dma_buf_allocator.h"
+
+#include "../common/pipeline_base.h"
+#include "../common/rpi_stream.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(RPI)
+
+using StreamFlag = RPi::Stream::StreamFlag;
+using StreamParams = RPi::RPiCameraConfiguration::StreamParams;
+
+namespace {
+
+enum class Unicam : unsigned int { Image, Embedded };
+enum class Isp : unsigned int { Input, Output0, Output1, Stats };
+
+} /* namespace */
+
+class Vc4CameraData final : public RPi::CameraData
+{
+public:
+ Vc4CameraData(PipelineHandler *pipe)
+ : RPi::CameraData(pipe)
+ {
+ }
+
+ ~Vc4CameraData()
+ {
+ freeBuffers();
+ }
+
+ V4L2VideoDevice::Formats ispFormats() const override
+ {
+ return isp_[Isp::Output0].dev()->formats();
+ }
+
+ V4L2VideoDevice::Formats rawFormats() const override
+ {
+ return unicam_[Unicam::Image].dev()->formats();
+ }
+
+ V4L2VideoDevice *frontendDevice() override
+ {
+ return unicam_[Unicam::Image].dev();
+ }
+
+ void platformFreeBuffers() override
+ {
+ }
+
+ CameraConfiguration::Status platformValidate(RPi::RPiCameraConfiguration *rpiConfig) const override;
+
+ int platformPipelineConfigure(const std::unique_ptr<YamlObject> &root) override;
+
+ void platformStart() override;
+ void platformStop() override;
+
+ void unicamBufferDequeue(FrameBuffer *buffer);
+ void ispInputDequeue(FrameBuffer *buffer);
+ void ispOutputDequeue(FrameBuffer *buffer);
+
+ void processStatsComplete(const ipa::RPi::BufferIds &buffers);
+ void prepareIspComplete(const ipa::RPi::BufferIds &buffers, bool stitchSwapBuffers);
+ void setIspControls(const ControlList &controls);
+ void setCameraTimeout(uint32_t maxFrameLengthMs);
+
+ /* Array of Unicam and ISP device streams and associated buffers/streams. */
+ RPi::Device<Unicam, 2> unicam_;
+ RPi::Device<Isp, 4> isp_;
+
+ /* DMAHEAP allocation helper. */
+ DmaBufAllocator dmaHeap_;
+ SharedFD lsTable_;
+
+ struct Config {
+ /*
+ * The minimum number of internal buffers to be allocated for
+ * the Unicam Image stream.
+ */
+ unsigned int minUnicamBuffers;
+ /*
+ * The minimum total (internal + external) buffer count used for
+ * the Unicam Image stream.
+ *
+ * Note that:
+ * minTotalUnicamBuffers must be >= 1, and
+ * minTotalUnicamBuffers >= minUnicamBuffers
+ */
+ unsigned int minTotalUnicamBuffers;
+ };
+
+ Config config_;
+
+private:
+ void platformSetIspCrop([[maybe_unused]] unsigned int index, const Rectangle &ispCrop) override
+ {
+ Rectangle crop = ispCrop;
+ isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop);
+ }
+
+ int platformConfigure(const RPi::RPiCameraConfiguration *rpiConfig) override;
+ int platformConfigureIpa(ipa::RPi::ConfigParams &params) override;
+
+ int platformInitIpa([[maybe_unused]] ipa::RPi::InitParams &params) override
+ {
+ return 0;
+ }
+
+ struct BayerFrame {
+ FrameBuffer *buffer;
+ ControlList controls;
+ unsigned int delayContext;
+ };
+
+ void tryRunPipeline() override;
+ bool findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer);
+
+ std::queue<BayerFrame> bayerQueue_;
+ std::queue<FrameBuffer *> embeddedQueue_;
+};
+
+class PipelineHandlerVc4 : public RPi::PipelineHandlerBase
+{
+public:
+ PipelineHandlerVc4(CameraManager *manager)
+ : RPi::PipelineHandlerBase(manager)
+ {
+ }
+
+ ~PipelineHandlerVc4()
+ {
+ }
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ Vc4CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<Vc4CameraData *>(camera->_d());
+ }
+
+ int prepareBuffers(Camera *camera) override;
+ int platformRegister(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *unicam, MediaDevice *isp) override;
+};
+
+bool PipelineHandlerVc4::match(DeviceEnumerator *enumerator)
+{
+ constexpr unsigned int numUnicamDevices = 2;
+
+ /*
+ * Loop over all Unicam instances, but return out once a match is found.
+ * This is to ensure we correctly enumrate the camera when an instance
+ * of Unicam has registered with media controller, but has not registered
+ * device nodes due to a sensor subdevice failure.
+ */
+ for (unsigned int i = 0; i < numUnicamDevices; i++) {
+ DeviceMatch unicam("unicam");
+ MediaDevice *unicamDevice = acquireMediaDevice(enumerator, unicam);
+
+ if (!unicamDevice) {
+ LOG(RPI, Debug) << "Unable to acquire a Unicam instance";
+ continue;
+ }
+
+ DeviceMatch isp("bcm2835-isp");
+ MediaDevice *ispDevice = acquireMediaDevice(enumerator, isp);
+
+ if (!ispDevice) {
+ LOG(RPI, Debug) << "Unable to acquire ISP instance";
+ continue;
+ }
+
+ /*
+ * The loop below is used to register multiple cameras behind one or more
+ * video mux devices that are attached to a particular Unicam instance.
+ * Obviously these cameras cannot be used simultaneously.
+ */
+ unsigned int numCameras = 0;
+ for (MediaEntity *entity : unicamDevice->entities()) {
+ if (entity->function() != MEDIA_ENT_F_CAM_SENSOR)
+ continue;
+
+ std::unique_ptr<RPi::CameraData> cameraData = std::make_unique<Vc4CameraData>(this);
+ int ret = RPi::PipelineHandlerBase::registerCamera(cameraData,
+ unicamDevice, "unicam-image",
+ ispDevice, entity);
+ if (ret)
+ LOG(RPI, Error) << "Failed to register camera "
+ << entity->name() << ": " << ret;
+ else
+ numCameras++;
+ }
+
+ if (numCameras)
+ return true;
+ }
+
+ return false;
+}
+
+int PipelineHandlerVc4::prepareBuffers(Camera *camera)
+{
+ Vc4CameraData *data = cameraData(camera);
+ unsigned int numRawBuffers = 0;
+ int ret;
+
+ for (Stream *s : camera->streams()) {
+ if (BayerFormat::fromPixelFormat(s->configuration().pixelFormat).isValid()) {
+ numRawBuffers = s->configuration().bufferCount;
+ break;
+ }
+ }
+
+ /* Decide how many internal buffers to allocate. */
+ for (auto const stream : data->streams_) {
+ unsigned int numBuffers;
+ /*
+ * For Unicam, allocate a minimum number of buffers for internal
+ * use as we want to avoid any frame drops.
+ */
+ const unsigned int minBuffers = data->config_.minTotalUnicamBuffers;
+ if (stream == &data->unicam_[Unicam::Image]) {
+ /*
+ * If an application has configured a RAW stream, allocate
+ * additional buffers to make up the minimum, but ensure
+ * we have at least minUnicamBuffers of internal buffers
+ * to use to minimise frame drops.
+ */
+ numBuffers = std::max<int>(data->config_.minUnicamBuffers,
+ minBuffers - numRawBuffers);
+ } else if (stream == &data->isp_[Isp::Input]) {
+ /*
+ * ISP input buffers are imported from Unicam, so follow
+ * similar logic as above to count all the RAW buffers
+ * available.
+ */
+ numBuffers = numRawBuffers +
+ std::max<int>(data->config_.minUnicamBuffers,
+ minBuffers - numRawBuffers);
+
+ } else if (stream == &data->unicam_[Unicam::Embedded]) {
+ /*
+ * Embedded data buffers are (currently) for internal use, and
+ * are small enough (typically 1-2KB) that we can
+ * allocate them generously to avoid causing problems in the
+ * IPA when we cannot supply the metadata.
+ *
+ * 12 are allocated as a typical application will have 8-10
+ * input buffers, so allocating more embedded buffers than that
+ * is a sensible choice.
+ *
+ * The lifetimes of these buffers are smaller than those of the
+ * raw buffers, so allocating a fixed number will still suffice
+ * if the application requests a greater number of raw
+ * buffers, as these will be recycled quicker.
+ */
+ numBuffers = 12;
+ } else {
+ /*
+ * Since the ISP runs synchronous with the IPA and requests,
+ * we only ever need one set of internal buffers. Any buffers
+ * the application wants to hold onto will already be exported
+ * through PipelineHandlerRPi::exportFrameBuffers().
+ */
+ numBuffers = 1;
+ }
+
+ LOG(RPI, Debug) << "Preparing " << numBuffers
+ << " buffers for stream " << stream->name();
+
+ ret = stream->prepareBuffers(numBuffers);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Pass the stats and embedded data buffers to the IPA. No other
+ * buffers need to be passed.
+ */
+ mapBuffers(camera, data->isp_[Isp::Stats].getBuffers(), RPi::MaskStats);
+ if (data->sensorMetadata_)
+ mapBuffers(camera, data->unicam_[Unicam::Embedded].getBuffers(),
+ RPi::MaskEmbeddedData);
+
+ return 0;
+}
+
+int PipelineHandlerVc4::platformRegister(std::unique_ptr<RPi::CameraData> &cameraData, MediaDevice *unicam, MediaDevice *isp)
+{
+ Vc4CameraData *data = static_cast<Vc4CameraData *>(cameraData.get());
+
+ if (!data->dmaHeap_.isValid())
+ return -ENOMEM;
+
+ MediaEntity *unicamImage = unicam->getEntityByName("unicam-image");
+ MediaEntity *ispOutput0 = isp->getEntityByName("bcm2835-isp0-output0");
+ MediaEntity *ispCapture1 = isp->getEntityByName("bcm2835-isp0-capture1");
+ MediaEntity *ispCapture2 = isp->getEntityByName("bcm2835-isp0-capture2");
+ MediaEntity *ispCapture3 = isp->getEntityByName("bcm2835-isp0-capture3");
+
+ if (!unicamImage || !ispOutput0 || !ispCapture1 || !ispCapture2 || !ispCapture3)
+ return -ENOENT;
+
+ /* Locate and open the unicam video streams. */
+ data->unicam_[Unicam::Image] = RPi::Stream("Unicam Image", unicamImage);
+
+ /* An embedded data node will not be present if the sensor does not support it. */
+ MediaEntity *unicamEmbedded = unicam->getEntityByName("unicam-embedded");
+ if (unicamEmbedded) {
+ data->unicam_[Unicam::Embedded] = RPi::Stream("Unicam Embedded", unicamEmbedded);
+ data->unicam_[Unicam::Embedded].dev()->bufferReady.connect(data,
+ &Vc4CameraData::unicamBufferDequeue);
+ }
+
+ /* Tag the ISP input stream as an import stream. */
+ data->isp_[Isp::Input] = RPi::Stream("ISP Input", ispOutput0, StreamFlag::ImportOnly);
+ data->isp_[Isp::Output0] = RPi::Stream("ISP Output0", ispCapture1);
+ data->isp_[Isp::Output1] = RPi::Stream("ISP Output1", ispCapture2);
+ data->isp_[Isp::Stats] = RPi::Stream("ISP Stats", ispCapture3);
+
+ /* Wire up all the buffer connections. */
+ data->unicam_[Unicam::Image].dev()->bufferReady.connect(data, &Vc4CameraData::unicamBufferDequeue);
+ data->isp_[Isp::Input].dev()->bufferReady.connect(data, &Vc4CameraData::ispInputDequeue);
+ data->isp_[Isp::Output0].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+ data->isp_[Isp::Output1].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+ data->isp_[Isp::Stats].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+
+ if (data->sensorMetadata_ ^ !!data->unicam_[Unicam::Embedded].dev()) {
+ LOG(RPI, Warning) << "Mismatch between Unicam and CamHelper for embedded data usage!";
+ data->sensorMetadata_ = false;
+ if (data->unicam_[Unicam::Embedded].dev())
+ data->unicam_[Unicam::Embedded].dev()->bufferReady.disconnect();
+ }
+
+ /*
+ * Open all Unicam and ISP streams. The exception is the embedded data
+ * stream, which only gets opened below if the IPA reports that the sensor
+ * supports embedded data.
+ *
+ * The below grouping is just for convenience so that we can easily
+ * iterate over all streams in one go.
+ */
+ data->streams_.push_back(&data->unicam_[Unicam::Image]);
+ if (data->sensorMetadata_)
+ data->streams_.push_back(&data->unicam_[Unicam::Embedded]);
+
+ for (auto &stream : data->isp_)
+ data->streams_.push_back(&stream);
+
+ for (auto stream : data->streams_) {
+ int ret = stream->dev()->open();
+ if (ret)
+ return ret;
+ }
+
+ if (!data->unicam_[Unicam::Image].dev()->caps().hasMediaController()) {
+ LOG(RPI, Error) << "Unicam driver does not use the MediaController, please update your kernel!";
+ return -EINVAL;
+ }
+
+ /* Write up all the IPA connections. */
+ data->ipa_->processStatsComplete.connect(data, &Vc4CameraData::processStatsComplete);
+ data->ipa_->prepareIspComplete.connect(data, &Vc4CameraData::prepareIspComplete);
+ data->ipa_->setIspControls.connect(data, &Vc4CameraData::setIspControls);
+ data->ipa_->setCameraTimeout.connect(data, &Vc4CameraData::setCameraTimeout);
+
+ /*
+ * List the available streams an application may request. At present, we
+ * do not advertise Unicam Embedded and ISP Statistics streams, as there
+ * is no mechanism for the application to request non-image buffer formats.
+ */
+ std::set<Stream *> streams;
+ streams.insert(&data->unicam_[Unicam::Image]);
+ streams.insert(&data->isp_[Isp::Output0]);
+ streams.insert(&data->isp_[Isp::Output1]);
+
+ /* Create and register the camera. */
+ const std::string &id = data->sensor_->id();
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(cameraData), id, streams);
+ PipelineHandler::registerCamera(std::move(camera));
+
+ LOG(RPI, Info) << "Registered camera " << id
+ << " to Unicam device " << unicam->deviceNode()
+ << " and ISP device " << isp->deviceNode();
+
+ return 0;
+}
+
+CameraConfiguration::Status Vc4CameraData::platformValidate(RPi::RPiCameraConfiguration *rpiConfig) const
+{
+ std::vector<StreamParams> &rawStreams = rpiConfig->rawStreams_;
+ std::vector<StreamParams> &outStreams = rpiConfig->outStreams_;
+
+ CameraConfiguration::Status status = CameraConfiguration::Status::Valid;
+
+ /* Can only output 1 RAW stream, or 2 YUV/RGB streams. */
+ if (rawStreams.size() > 1 || outStreams.size() > 2) {
+ LOG(RPI, Error) << "Invalid number of streams requested";
+ return CameraConfiguration::Status::Invalid;
+ }
+
+ if (!rawStreams.empty()) {
+ rawStreams[0].dev = unicam_[Unicam::Image].dev();
+
+ /* Adjust the RAW stream to match the computed sensor format. */
+ StreamConfiguration *rawStream = rawStreams[0].cfg;
+ BayerFormat rawBayer = BayerFormat::fromPixelFormat(rawStream->pixelFormat);
+
+ /* Apply the sensor bitdepth. */
+ rawBayer.bitDepth = BayerFormat::fromMbusCode(rpiConfig->sensorFormat_.code).bitDepth;
+
+ /* Default to CSI2 packing if the user request is unsupported. */
+ if (rawBayer.packing != BayerFormat::Packing::CSI2 &&
+ rawBayer.packing != BayerFormat::Packing::None)
+ rawBayer.packing = BayerFormat::Packing::CSI2;
+
+ PixelFormat rawFormat = rawBayer.toPixelFormat();
+
+ /*
+ * Try for an unpacked format if a packed one wasn't available.
+ * This catches 8 (and 16) bit formats which would otherwise
+ * fail.
+ */
+ if (!rawFormat.isValid() && rawBayer.packing != BayerFormat::Packing::None) {
+ rawBayer.packing = BayerFormat::Packing::None;
+ rawFormat = rawBayer.toPixelFormat();
+ }
+
+ if (rawStream->pixelFormat != rawFormat ||
+ rawStream->size != rpiConfig->sensorFormat_.size) {
+ rawStream->pixelFormat = rawFormat;
+ rawStream->size = rpiConfig->sensorFormat_.size;
+
+ status = CameraConfiguration::Adjusted;
+ }
+
+ rawStreams[0].format =
+ RPi::PipelineHandlerBase::toV4L2DeviceFormat(unicam_[Unicam::Image].dev(), rawStream);
+ }
+
+ /*
+ * For the two ISP outputs, one stream must be equal or smaller than the
+ * other in all dimensions.
+ *
+ * Index 0 contains the largest requested resolution.
+ */
+ for (unsigned int i = 0; i < outStreams.size(); i++) {
+ Size size;
+
+ /*
+ * \todo Should we warn if upscaling, as it reduces the image
+ * quality and is usually undesired ?
+ */
+
+ size.width = std::min(outStreams[i].cfg->size.width,
+ outStreams[0].cfg->size.width);
+ size.height = std::min(outStreams[i].cfg->size.height,
+ outStreams[0].cfg->size.height);
+
+ if (outStreams[i].cfg->size != size) {
+ outStreams[i].cfg->size = size;
+ status = CameraConfiguration::Status::Adjusted;
+ }
+
+ /*
+ * Output 0 must be for the largest resolution. We will
+ * have that fixed up in the code above.
+ */
+ outStreams[i].dev = isp_[i == 0 ? Isp::Output0 : Isp::Output1].dev();
+
+ outStreams[i].format = RPi::PipelineHandlerBase::toV4L2DeviceFormat(outStreams[i].dev, outStreams[i].cfg);
+ }
+
+ return status;
+}
+
+int Vc4CameraData::platformPipelineConfigure(const std::unique_ptr<YamlObject> &root)
+{
+ config_ = {
+ .minUnicamBuffers = 2,
+ .minTotalUnicamBuffers = 4,
+ };
+
+ if (!root)
+ return 0;
+
+ std::optional<double> ver = (*root)["version"].get<double>();
+ if (!ver || *ver != 1.0) {
+ LOG(RPI, Error) << "Unexpected configuration file version reported";
+ return -EINVAL;
+ }
+
+ std::optional<std::string> target = (*root)["target"].get<std::string>();
+ if (!target || *target != "bcm2835") {
+ LOG(RPI, Error) << "Unexpected target reported: expected \"bcm2835\", got "
+ << *target;
+ return -EINVAL;
+ }
+
+ const YamlObject &phConfig = (*root)["pipeline_handler"];
+ config_.minUnicamBuffers =
+ phConfig["min_unicam_buffers"].get<unsigned int>(config_.minUnicamBuffers);
+ config_.minTotalUnicamBuffers =
+ phConfig["min_total_unicam_buffers"].get<unsigned int>(config_.minTotalUnicamBuffers);
+
+ if (config_.minTotalUnicamBuffers < config_.minUnicamBuffers) {
+ LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= min_unicam_buffers";
+ return -EINVAL;
+ }
+
+ if (config_.minTotalUnicamBuffers < 1) {
+ LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= 1";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int Vc4CameraData::platformConfigure(const RPi::RPiCameraConfiguration *rpiConfig)
+{
+ const std::vector<StreamParams> &rawStreams = rpiConfig->rawStreams_;
+ const std::vector<StreamParams> &outStreams = rpiConfig->outStreams_;
+ int ret;
+
+ V4L2VideoDevice *unicam = unicam_[Unicam::Image].dev();
+ V4L2DeviceFormat unicamFormat;
+
+ /*
+ * See which streams are requested, and route the user
+ * StreamConfiguration appropriately.
+ */
+ if (!rawStreams.empty()) {
+ rawStreams[0].cfg->setStream(&unicam_[Unicam::Image]);
+ unicam_[Unicam::Image].setFlags(StreamFlag::External);
+ unicamFormat = rawStreams[0].format;
+ } else {
+ unicamFormat =
+ RPi::PipelineHandlerBase::toV4L2DeviceFormat(unicam,
+ rpiConfig->sensorFormat_,
+ BayerFormat::Packing::CSI2);
+ }
+
+ ret = unicam->setFormat(&unicamFormat);
+ if (ret)
+ return ret;
+
+ ret = isp_[Isp::Input].dev()->setFormat(&unicamFormat);
+ if (ret)
+ return ret;
+
+ LOG(RPI, Info) << "Sensor: " << sensor_->id()
+ << " - Selected sensor format: " << rpiConfig->sensorFormat_
+ << " - Selected unicam format: " << unicamFormat;
+
+ /* Use a sensible small default size if no output streams are configured. */
+ Size maxSize = outStreams.empty() ? Size(320, 240) : outStreams[0].cfg->size;
+ V4L2DeviceFormat format;
+
+ for (unsigned int i = 0; i < outStreams.size(); i++) {
+ StreamConfiguration *cfg = outStreams[i].cfg;
+
+ /* The largest resolution gets routed to the ISP Output 0 node. */
+ RPi::Stream *stream = i == 0 ? &isp_[Isp::Output0] : &isp_[Isp::Output1];
+ format = outStreams[i].format;
+
+ LOG(RPI, Debug) << "Setting " << stream->name() << " to "
+ << format;
+
+ ret = stream->dev()->setFormat(&format);
+ if (ret)
+ return -EINVAL;
+
+ LOG(RPI, Debug)
+ << "Stream " << stream->name() << " has color space "
+ << ColorSpace::toString(cfg->colorSpace);
+
+ cfg->setStream(stream);
+ stream->setFlags(StreamFlag::External);
+ }
+
+ ispOutputTotal_ = outStreams.size();
+
+ /*
+ * If ISP::Output0 stream has not been configured by the application,
+ * we must allow the hardware to generate an output so that the data
+ * flow in the pipeline handler remains consistent, and we still generate
+ * statistics for the IPA to use. So enable the output at a very low
+ * resolution for internal use.
+ *
+ * \todo Allow the pipeline to work correctly without Output0 and only
+ * statistics coming from the hardware.
+ */
+ if (outStreams.empty()) {
+ V4L2VideoDevice *dev = isp_[Isp::Output0].dev();
+
+ format = {};
+ format.size = maxSize;
+ format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
+ /* No one asked for output, so the color space doesn't matter. */
+ format.colorSpace = ColorSpace::Sycc;
+ ret = dev->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error)
+ << "Failed to set default format on ISP Output0: "
+ << ret;
+ return -EINVAL;
+ }
+
+ ispOutputTotal_++;
+
+ LOG(RPI, Debug) << "Defaulting ISP Output0 format to "
+ << format;
+ }
+
+ /*
+ * If ISP::Output1 stream has not been requested by the application, we
+ * set it up for internal use now. This second stream will be used for
+ * fast colour denoise, and must be a quarter resolution of the ISP::Output0
+ * stream. However, also limit the maximum size to 1200 pixels in the
+ * larger dimension, just to avoid being wasteful with buffer allocations
+ * and memory bandwidth.
+ *
+ * \todo If Output 1 format is not YUV420, Output 1 ought to be disabled as
+ * colour denoise will not run.
+ */
+ if (outStreams.size() <= 1) {
+ V4L2VideoDevice *dev = isp_[Isp::Output1].dev();
+
+ V4L2DeviceFormat output1Format;
+ constexpr Size maxDimensions(1200, 1200);
+ const Size limit = maxDimensions.boundedToAspectRatio(format.size);
+
+ output1Format.size = (format.size / 2).boundedTo(limit).alignedDownTo(2, 2);
+ output1Format.colorSpace = format.colorSpace;
+ output1Format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
+
+ LOG(RPI, Debug) << "Setting ISP Output1 (internal) to "
+ << output1Format;
+
+ ret = dev->setFormat(&output1Format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on ISP Output1: "
+ << ret;
+ return -EINVAL;
+ }
+
+ ispOutputTotal_++;
+ }
+
+ /* ISP statistics output format. */
+ format = {};
+ format.fourcc = V4L2PixelFormat(V4L2_META_FMT_BCM2835_ISP_STATS);
+ ret = isp_[Isp::Stats].dev()->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on ISP stats stream: "
+ << format;
+ return ret;
+ }
+
+ ispOutputTotal_++;
+
+ /*
+ * Configure the Unicam embedded data output format only if the sensor
+ * supports it.
+ */
+ if (sensorMetadata_) {
+ V4L2SubdeviceFormat embeddedFormat;
+
+ sensor_->device()->getFormat(1, &embeddedFormat);
+ format = {};
+ format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
+ format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
+
+ LOG(RPI, Debug) << "Setting embedded data format " << format.toString();
+ ret = unicam_[Unicam::Embedded].dev()->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on Unicam embedded: "
+ << format;
+ return ret;
+ }
+ }
+
+ /* Figure out the smallest selection the ISP will allow. */
+ Rectangle testCrop(0, 0, 1, 1);
+ isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &testCrop);
+
+ /* Adjust aspect ratio by providing crops on the input image. */
+ Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
+ Rectangle ispCrop = size.centeredTo(Rectangle(unicamFormat.size).center());
+
+ platformSetIspCrop(0, ispCrop);
+ /*
+ * Set the scaler crop to the value we are using (scaled to native sensor
+ * coordinates).
+ */
+ cropParams_.emplace(std::piecewise_construct,
+ std::forward_as_tuple(0),
+ std::forward_as_tuple(ispCrop, testCrop.size(), 0));
+
+ return 0;
+}
+
+int Vc4CameraData::platformConfigureIpa(ipa::RPi::ConfigParams &params)
+{
+ params.ispControls = isp_[Isp::Input].dev()->controls();
+
+ /* Allocate the lens shading table via dmaHeap and pass to the IPA. */
+ if (!lsTable_.isValid()) {
+ lsTable_ = SharedFD(dmaHeap_.alloc("ls_grid", ipa::RPi::MaxLsGridSize));
+ if (!lsTable_.isValid())
+ return -ENOMEM;
+
+ /* Allow the IPA to mmap the LS table via the file descriptor. */
+ /*
+ * \todo Investigate if mapping the lens shading table buffer
+ * could be handled with mapBuffers().
+ */
+ params.lsTableHandle = lsTable_;
+ }
+
+ return 0;
+}
+
+void Vc4CameraData::platformStart()
+{
+}
+
+void Vc4CameraData::platformStop()
+{
+ bayerQueue_ = {};
+ embeddedQueue_ = {};
+}
+
+void Vc4CameraData::unicamBufferDequeue(FrameBuffer *buffer)
+{
+ RPi::Stream *stream = nullptr;
+ unsigned int index;
+
+ if (!isRunning())
+ return;
+
+ for (RPi::Stream &s : unicam_) {
+ index = s.getBufferId(buffer);
+ if (index) {
+ stream = &s;
+ break;
+ }
+ }
+
+ /* The buffer must belong to one of our streams. */
+ ASSERT(stream);
+
+ LOG(RPI, Debug) << "Stream " << stream->name() << " buffer dequeue"
+ << ", buffer id " << index
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ if (stream == &unicam_[Unicam::Image]) {
+ /*
+ * Lookup the sensor controls used for this frame sequence from
+ * DelayedControl and queue them along with the frame buffer.
+ */
+ auto [ctrl, delayContext] = delayedCtrls_->get(buffer->metadata().sequence);
+ /*
+ * Add the frame timestamp to the ControlList for the IPA to use
+ * as it does not receive the FrameBuffer object.
+ */
+ ctrl.set(controls::SensorTimestamp, buffer->metadata().timestamp);
+ bayerQueue_.push({ buffer, std::move(ctrl), delayContext });
+ } else {
+ embeddedQueue_.push(buffer);
+ }
+
+ handleState();
+}
+
+void Vc4CameraData::ispInputDequeue(FrameBuffer *buffer)
+{
+ if (!isRunning())
+ return;
+
+ LOG(RPI, Debug) << "Stream ISP Input buffer complete"
+ << ", buffer id " << unicam_[Unicam::Image].getBufferId(buffer)
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ /* The ISP input buffer gets re-queued into Unicam. */
+ handleStreamBuffer(buffer, &unicam_[Unicam::Image]);
+ handleState();
+}
+
+void Vc4CameraData::ispOutputDequeue(FrameBuffer *buffer)
+{
+ RPi::Stream *stream = nullptr;
+ unsigned int index = 0;
+
+ if (!isRunning())
+ return;
+
+ for (RPi::Stream &s : isp_) {
+ index = s.getBufferId(buffer);
+ if (index) {
+ stream = &s;
+ break;
+ }
+ }
+
+ /* The buffer must belong to one of our ISP output streams. */
+ ASSERT(stream);
+
+ LOG(RPI, Debug) << "Stream " << stream->name() << " buffer complete"
+ << ", buffer id " << index
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ /*
+ * ISP statistics buffer must not be re-queued or sent back to the
+ * application until after the IPA signals so.
+ */
+ if (stream == &isp_[Isp::Stats]) {
+ ipa::RPi::ProcessParams params;
+ params.buffers.stats = index | RPi::MaskStats;
+ params.ipaContext = requestQueue_.front()->sequence();
+ ipa_->processStats(params);
+ } else {
+ /* Any other ISP output can be handed back to the application now. */
+ handleStreamBuffer(buffer, stream);
+ }
+
+ /*
+ * Increment the number of ISP outputs generated.
+ * This is needed to track dropped frames.
+ */
+ ispOutputCount_++;
+
+ handleState();
+}
+
+void Vc4CameraData::processStatsComplete(const ipa::RPi::BufferIds &buffers)
+{
+ if (!isRunning())
+ return;
+
+ FrameBuffer *buffer = isp_[Isp::Stats].getBuffers().at(buffers.stats & RPi::MaskID).buffer;
+
+ handleStreamBuffer(buffer, &isp_[Isp::Stats]);
+
+ state_ = State::IpaComplete;
+ handleState();
+}
+
+void Vc4CameraData::prepareIspComplete(const ipa::RPi::BufferIds &buffers,
+ [[maybe_unused]] bool stitchSwapBuffers)
+{
+ unsigned int embeddedId = buffers.embedded & RPi::MaskID;
+ unsigned int bayer = buffers.bayer & RPi::MaskID;
+ FrameBuffer *buffer;
+
+ if (!isRunning())
+ return;
+
+ buffer = unicam_[Unicam::Image].getBuffers().at(bayer & RPi::MaskID).buffer;
+ LOG(RPI, Debug) << "Input re-queue to ISP, buffer id " << (bayer & RPi::MaskID)
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ isp_[Isp::Input].queueBuffer(buffer);
+ ispOutputCount_ = 0;
+
+ if (sensorMetadata_ && embeddedId) {
+ buffer = unicam_[Unicam::Embedded].getBuffers().at(embeddedId & RPi::MaskID).buffer;
+ handleStreamBuffer(buffer, &unicam_[Unicam::Embedded]);
+ }
+
+ handleState();
+}
+
+void Vc4CameraData::setIspControls(const ControlList &controls)
+{
+ ControlList ctrls = controls;
+
+ if (ctrls.contains(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING)) {
+ ControlValue &value =
+ const_cast<ControlValue &>(ctrls.get(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING));
+ Span<uint8_t> s = value.data();
+ bcm2835_isp_lens_shading *ls =
+ reinterpret_cast<bcm2835_isp_lens_shading *>(s.data());
+ ls->dmabuf = lsTable_.get();
+ }
+
+ isp_[Isp::Input].dev()->setControls(&ctrls);
+ handleState();
+}
+
+void Vc4CameraData::setCameraTimeout(uint32_t maxFrameLengthMs)
+{
+ /*
+ * Set the dequeue timeout to the larger of 5x the maximum reported
+ * frame length advertised by the IPA over a number of frames. Allow
+ * a minimum timeout value of 1s.
+ */
+ utils::Duration timeout =
+ std::max<utils::Duration>(1s, 5 * maxFrameLengthMs * 1ms);
+
+ LOG(RPI, Debug) << "Setting Unicam timeout to " << timeout;
+ unicam_[Unicam::Image].dev()->setDequeueTimeout(timeout);
+}
+
+void Vc4CameraData::tryRunPipeline()
+{
+ FrameBuffer *embeddedBuffer;
+ BayerFrame bayerFrame;
+
+ /* If any of our request or buffer queues are empty, we cannot proceed. */
+ if (state_ != State::Idle || requestQueue_.empty() ||
+ bayerQueue_.empty() || (embeddedQueue_.empty() && sensorMetadata_))
+ return;
+
+ if (!findMatchingBuffers(bayerFrame, embeddedBuffer))
+ return;
+
+ /* Take the first request from the queue and action the IPA. */
+ Request *request = requestQueue_.front();
+
+ /* See if a new ScalerCrop value needs to be applied. */
+ applyScalerCrop(request->controls());
+
+ /*
+ * Clear the request metadata and fill it with some initial non-IPA
+ * related controls. We clear it first because the request metadata
+ * may have been populated if we have dropped the previous frame.
+ */
+ request->metadata().clear();
+ fillRequestMetadata(bayerFrame.controls, request);
+
+ /* Set our state to say the pipeline is active. */
+ state_ = State::Busy;
+
+ unsigned int bayer = unicam_[Unicam::Image].getBufferId(bayerFrame.buffer);
+
+ LOG(RPI, Debug) << "Signalling prepareIsp:"
+ << " Bayer buffer id: " << bayer;
+
+ ipa::RPi::PrepareParams params;
+ params.buffers.bayer = RPi::MaskBayerData | bayer;
+ params.sensorControls = std::move(bayerFrame.controls);
+ params.requestControls = request->controls();
+ params.ipaContext = request->sequence();
+ params.delayContext = bayerFrame.delayContext;
+ params.buffers.embedded = 0;
+
+ if (embeddedBuffer) {
+ unsigned int embeddedId = unicam_[Unicam::Embedded].getBufferId(embeddedBuffer);
+
+ params.buffers.embedded = RPi::MaskEmbeddedData | embeddedId;
+ LOG(RPI, Debug) << "Signalling prepareIsp:"
+ << " Embedded buffer id: " << embeddedId;
+ }
+
+ ipa_->prepareIsp(params);
+}
+
+bool Vc4CameraData::findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer)
+{
+ if (bayerQueue_.empty())
+ return false;
+
+ /*
+ * Find the embedded data buffer with a matching timestamp to pass to
+ * the IPA. Any embedded buffers with a timestamp lower than the
+ * current bayer buffer will be removed and re-queued to the driver.
+ */
+ uint64_t ts = bayerQueue_.front().buffer->metadata().timestamp;
+ embeddedBuffer = nullptr;
+ while (!embeddedQueue_.empty()) {
+ FrameBuffer *b = embeddedQueue_.front();
+ if (b->metadata().timestamp < ts) {
+ embeddedQueue_.pop();
+ unicam_[Unicam::Embedded].returnBuffer(b);
+ LOG(RPI, Debug) << "Dropping unmatched input frame in stream "
+ << unicam_[Unicam::Embedded].name();
+ } else if (b->metadata().timestamp == ts) {
+ /* Found a match! */
+ embeddedBuffer = b;
+ embeddedQueue_.pop();
+ break;
+ } else {
+ break; /* Only higher timestamps from here. */
+ }
+ }
+
+ if (!embeddedBuffer && sensorMetadata_) {
+ if (embeddedQueue_.empty()) {
+ /*
+ * If the embedded buffer queue is empty, wait for the next
+ * buffer to arrive - dequeue ordering may send the image
+ * buffer first.
+ */
+ LOG(RPI, Debug) << "Waiting for next embedded buffer.";
+ return false;
+ }
+
+ /* Log if there is no matching embedded data buffer found. */
+ LOG(RPI, Debug) << "Returning bayer frame without a matching embedded buffer.";
+ }
+
+ bayerFrame = std::move(bayerQueue_.front());
+ bayerQueue_.pop();
+
+ return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVc4, "rpi/vc4")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/simple/converter.cpp b/src/libcamera/pipeline/simple/converter.cpp
deleted file mode 100644
index 9cbc6ee3..00000000
--- a/src/libcamera/pipeline/simple/converter.cpp
+++ /dev/null
@@ -1,399 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Laurent Pinchart
- *
- * converter.cpp - Format converter for simple pipeline handler
- */
-
-#include "converter.h"
-
-#include <algorithm>
-#include <limits.h>
-
-#include <libcamera/base/log.h>
-#include <libcamera/base/signal.h>
-#include <libcamera/base/utils.h>
-
-#include <libcamera/framebuffer.h>
-#include <libcamera/geometry.h>
-#include <libcamera/stream.h>
-
-#include "libcamera/internal/media_device.h"
-#include "libcamera/internal/v4l2_videodevice.h"
-
-namespace libcamera {
-
-LOG_DECLARE_CATEGORY(SimplePipeline)
-
-/* -----------------------------------------------------------------------------
- * SimpleConverter::Stream
- */
-
-SimpleConverter::Stream::Stream(SimpleConverter *converter, unsigned int index)
- : converter_(converter), index_(index)
-{
- m2m_ = std::make_unique<V4L2M2MDevice>(converter->deviceNode_);
-
- m2m_->output()->bufferReady.connect(this, &Stream::outputBufferReady);
- m2m_->capture()->bufferReady.connect(this, &Stream::captureBufferReady);
-
- int ret = m2m_->open();
- if (ret < 0)
- m2m_.reset();
-}
-
-int SimpleConverter::Stream::configure(const StreamConfiguration &inputCfg,
- const StreamConfiguration &outputCfg)
-{
- V4L2PixelFormat videoFormat =
- V4L2PixelFormat::fromPixelFormat(inputCfg.pixelFormat);
-
- V4L2DeviceFormat format;
- format.fourcc = videoFormat;
- format.size = inputCfg.size;
- format.planesCount = 1;
- format.planes[0].bpl = inputCfg.stride;
-
- int ret = m2m_->output()->setFormat(&format);
- if (ret < 0) {
- LOG(SimplePipeline, Error)
- << "Failed to set input format: " << strerror(-ret);
- return ret;
- }
-
- if (format.fourcc != videoFormat || format.size != inputCfg.size ||
- format.planes[0].bpl != inputCfg.stride) {
- LOG(SimplePipeline, Error)
- << "Input format not supported (requested "
- << inputCfg.size.toString() << "-" << videoFormat.toString()
- << ", got " << format.toString() << ")";
- return -EINVAL;
- }
-
- /* Set the pixel format and size on the output. */
- videoFormat = V4L2PixelFormat::fromPixelFormat(outputCfg.pixelFormat);
- format = {};
- format.fourcc = videoFormat;
- format.size = outputCfg.size;
-
- ret = m2m_->capture()->setFormat(&format);
- if (ret < 0) {
- LOG(SimplePipeline, Error)
- << "Failed to set output format: " << strerror(-ret);
- return ret;
- }
-
- if (format.fourcc != videoFormat || format.size != outputCfg.size) {
- LOG(SimplePipeline, Error)
- << "Output format not supported";
- return -EINVAL;
- }
-
- inputBufferCount_ = inputCfg.bufferCount;
- outputBufferCount_ = outputCfg.bufferCount;
-
- return 0;
-}
-
-int SimpleConverter::Stream::exportBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers)
-{
- return m2m_->capture()->exportBuffers(count, buffers);
-}
-
-int SimpleConverter::Stream::start()
-{
- int ret = m2m_->output()->importBuffers(inputBufferCount_);
- if (ret < 0)
- return ret;
-
- ret = m2m_->capture()->importBuffers(outputBufferCount_);
- if (ret < 0) {
- stop();
- return ret;
- }
-
- ret = m2m_->output()->streamOn();
- if (ret < 0) {
- stop();
- return ret;
- }
-
- ret = m2m_->capture()->streamOn();
- if (ret < 0) {
- stop();
- return ret;
- }
-
- return 0;
-}
-
-void SimpleConverter::Stream::stop()
-{
- m2m_->capture()->streamOff();
- m2m_->output()->streamOff();
- m2m_->capture()->releaseBuffers();
- m2m_->output()->releaseBuffers();
-}
-
-int SimpleConverter::Stream::queueBuffers(FrameBuffer *input,
- FrameBuffer *output)
-{
- int ret = m2m_->output()->queueBuffer(input);
- if (ret < 0)
- return ret;
-
- ret = m2m_->capture()->queueBuffer(output);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-std::string SimpleConverter::Stream::logPrefix() const
-{
- return "stream" + std::to_string(index_);
-}
-
-void SimpleConverter::Stream::outputBufferReady(FrameBuffer *buffer)
-{
- auto it = converter_->queue_.find(buffer);
- if (it == converter_->queue_.end())
- return;
-
- if (!--it->second) {
- converter_->inputBufferReady.emit(buffer);
- converter_->queue_.erase(it);
- }
-}
-
-void SimpleConverter::Stream::captureBufferReady(FrameBuffer *buffer)
-{
- converter_->outputBufferReady.emit(buffer);
-}
-
-/* -----------------------------------------------------------------------------
- * SimpleConverter
- */
-
-SimpleConverter::SimpleConverter(MediaDevice *media)
-{
- /*
- * Locate the video node. There's no need to validate the pipeline
- * further, the caller guarantees that this is a V4L2 mem2mem device.
- */
- const std::vector<MediaEntity *> &entities = media->entities();
- auto it = std::find_if(entities.begin(), entities.end(),
- [](MediaEntity *entity) {
- return entity->function() == MEDIA_ENT_F_IO_V4L;
- });
- if (it == entities.end())
- return;
-
- deviceNode_ = (*it)->deviceNode();
-
- m2m_ = std::make_unique<V4L2M2MDevice>(deviceNode_);
- int ret = m2m_->open();
- if (ret < 0) {
- m2m_.reset();
- return;
- }
-}
-
-std::vector<PixelFormat> SimpleConverter::formats(PixelFormat input)
-{
- if (!m2m_)
- return {};
-
- /*
- * Set the format on the input side (V4L2 output) of the converter to
- * enumerate the conversion capabilities on its output (V4L2 capture).
- */
- V4L2DeviceFormat v4l2Format;
- v4l2Format.fourcc = V4L2PixelFormat::fromPixelFormat(input);
- v4l2Format.size = { 1, 1 };
-
- int ret = m2m_->output()->setFormat(&v4l2Format);
- if (ret < 0) {
- LOG(SimplePipeline, Error)
- << "Failed to set format: " << strerror(-ret);
- return {};
- }
-
- std::vector<PixelFormat> pixelFormats;
-
- for (const auto &format : m2m_->capture()->formats()) {
- PixelFormat pixelFormat = format.first.toPixelFormat();
- if (pixelFormat)
- pixelFormats.push_back(pixelFormat);
- }
-
- return pixelFormats;
-}
-
-SizeRange SimpleConverter::sizes(const Size &input)
-{
- if (!m2m_)
- return {};
-
- /*
- * Set the size on the input side (V4L2 output) of the converter to
- * enumerate the scaling capabilities on its output (V4L2 capture).
- */
- V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat();
- format.size = input;
-
- int ret = m2m_->output()->setFormat(&format);
- if (ret < 0) {
- LOG(SimplePipeline, Error)
- << "Failed to set format: " << strerror(-ret);
- return {};
- }
-
- SizeRange sizes;
-
- format.size = { 1, 1 };
- ret = m2m_->capture()->setFormat(&format);
- if (ret < 0) {
- LOG(SimplePipeline, Error)
- << "Failed to set format: " << strerror(-ret);
- return {};
- }
-
- sizes.min = format.size;
-
- format.size = { UINT_MAX, UINT_MAX };
- ret = m2m_->capture()->setFormat(&format);
- if (ret < 0) {
- LOG(SimplePipeline, Error)
- << "Failed to set format: " << strerror(-ret);
- return {};
- }
-
- sizes.max = format.size;
-
- return sizes;
-}
-
-std::tuple<unsigned int, unsigned int>
-SimpleConverter::strideAndFrameSize(const PixelFormat &pixelFormat,
- const Size &size)
-{
- V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(pixelFormat);
- format.size = size;
-
- int ret = m2m_->capture()->tryFormat(&format);
- if (ret < 0)
- return std::make_tuple(0, 0);
-
- return std::make_tuple(format.planes[0].bpl, format.planes[0].size);
-}
-
-int SimpleConverter::configure(const StreamConfiguration &inputCfg,
- const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
-{
- int ret = 0;
-
- streams_.clear();
- streams_.reserve(outputCfgs.size());
-
- for (unsigned int i = 0; i < outputCfgs.size(); ++i) {
- Stream &stream = streams_.emplace_back(this, i);
-
- if (!stream.isValid()) {
- LOG(SimplePipeline, Error)
- << "Failed to create stream " << i;
- ret = -EINVAL;
- break;
- }
-
- ret = stream.configure(inputCfg, outputCfgs[i]);
- if (ret < 0)
- break;
- }
-
- if (ret < 0) {
- streams_.clear();
- return ret;
- }
-
- return 0;
-}
-
-int SimpleConverter::exportBuffers(unsigned int output, unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers)
-{
- if (output >= streams_.size())
- return -EINVAL;
-
- return streams_[output].exportBuffers(count, buffers);
-}
-
-int SimpleConverter::start()
-{
- int ret;
-
- for (Stream &stream : streams_) {
- ret = stream.start();
- if (ret < 0) {
- stop();
- return ret;
- }
- }
-
- return 0;
-}
-
-void SimpleConverter::stop()
-{
- for (Stream &stream : utils::reverse(streams_))
- stream.stop();
-}
-
-int SimpleConverter::queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs)
-{
- unsigned int mask = 0;
- int ret;
-
- /*
- * Validate the outputs as a sanity check: at least one output is
- * required, all outputs must reference a valid stream and no two
- * outputs can reference the same stream.
- */
- if (outputs.empty())
- return -EINVAL;
-
- for (auto [index, buffer] : outputs) {
- if (!buffer)
- return -EINVAL;
- if (index >= streams_.size())
- return -EINVAL;
- if (mask & (1 << index))
- return -EINVAL;
-
- mask |= 1 << index;
- }
-
- /* Queue the input and output buffers to all the streams. */
- for (auto [index, buffer] : outputs) {
- ret = streams_[index].queueBuffers(input, buffer);
- if (ret < 0)
- return ret;
- }
-
- /*
- * Add the input buffer to the queue, with the number of streams as a
- * reference count. Completion of the input buffer will be signalled by
- * the stream that releases the last reference.
- */
- queue_.emplace(std::piecewise_construct,
- std::forward_as_tuple(input),
- std::forward_as_tuple(outputs.size()));
-
- return 0;
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/simple/converter.h b/src/libcamera/pipeline/simple/converter.h
deleted file mode 100644
index f0ebe2e0..00000000
--- a/src/libcamera/pipeline/simple/converter.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Laurent Pinchart
- *
- * converter.h - Format converter for simple pipeline handler
- */
-
-#pragma once
-
-#include <functional>
-#include <map>
-#include <memory>
-#include <string>
-#include <tuple>
-#include <vector>
-
-#include <libcamera/pixel_format.h>
-
-#include <libcamera/base/log.h>
-#include <libcamera/base/signal.h>
-
-namespace libcamera {
-
-class FrameBuffer;
-class MediaDevice;
-class Size;
-class SizeRange;
-struct StreamConfiguration;
-class V4L2M2MDevice;
-
-class SimpleConverter
-{
-public:
- SimpleConverter(MediaDevice *media);
-
- bool isValid() const { return m2m_ != nullptr; }
-
- std::vector<PixelFormat> formats(PixelFormat input);
- SizeRange sizes(const Size &input);
-
- std::tuple<unsigned int, unsigned int>
- strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size);
-
- int configure(const StreamConfiguration &inputCfg,
- const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfg);
- int exportBuffers(unsigned int ouput, unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers);
-
- int start();
- void stop();
-
- int queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs);
-
- Signal<FrameBuffer *> inputBufferReady;
- Signal<FrameBuffer *> outputBufferReady;
-
-private:
- class Stream : protected Loggable
- {
- public:
- Stream(SimpleConverter *converter, unsigned int index);
-
- bool isValid() const { return m2m_ != nullptr; }
-
- int configure(const StreamConfiguration &inputCfg,
- const StreamConfiguration &outputCfg);
- int exportBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers);
-
- int start();
- void stop();
-
- int queueBuffers(FrameBuffer *input, FrameBuffer *output);
-
- protected:
- std::string logPrefix() const override;
-
- private:
- void captureBufferReady(FrameBuffer *buffer);
- void outputBufferReady(FrameBuffer *buffer);
-
- SimpleConverter *converter_;
- unsigned int index_;
- std::unique_ptr<V4L2M2MDevice> m2m_;
-
- unsigned int inputBufferCount_;
- unsigned int outputBufferCount_;
- };
-
- std::string deviceNode_;
- std::unique_ptr<V4L2M2MDevice> m2m_;
-
- std::vector<Stream> streams_;
- std::map<FrameBuffer *, unsigned int> queue_;
-};
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/simple/meson.build b/src/libcamera/pipeline/simple/meson.build
index 9c99b32f..dda3de97 100644
--- a/src/libcamera/pipeline/simple/meson.build
+++ b/src/libcamera/pipeline/simple/meson.build
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources += files([
- 'converter.cpp',
+libcamera_internal_sources += files([
'simple.cpp',
])
diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
index ece821bf..8ac24e6e 100644
--- a/src/libcamera/pipeline/simple/simple.cpp
+++ b/src/libcamera/pipeline/simple/simple.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Laurent Pinchart
* Copyright (C) 2019, Martijn Braam
*
- * simple.cpp - Pipeline handler for simple pipelines
+ * Pipeline handler for simple pipelines
*/
#include <algorithm>
@@ -13,8 +13,9 @@
#include <memory>
#include <queue>
#include <set>
-#include <string>
+#include <stdint.h>
#include <string.h>
+#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
@@ -30,14 +31,16 @@
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/converter.h"
+#include "libcamera/internal/delayed_controls.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/software_isp/software_isp.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
-#include "converter.h"
-
namespace libcamera {
LOG_DEFINE_CATEGORY(SimplePipeline)
@@ -95,21 +98,51 @@ LOG_DEFINE_CATEGORY(SimplePipeline)
* valid pipeline configurations are found, a Camera is registered for the
* SimpleCameraData instance.
*
+ * Pipeline Traversal
+ * ------------------
+ *
+ * During the breadth-first search, the pipeline is traversed from entity to
+ * entity, by following media graph links from source to sink, starting at the
+ * camera sensor.
+ *
+ * When reaching an entity (on its sink side), if the entity is a V4L2 subdev
+ * that supports the streams API, the subdev internal routes are followed to
+ * find the connected source pads. Otherwise all of the entity's source pads
+ * are considered to continue the graph traversal. The pipeline handler
+ * currently considers the default internal routes only and doesn't attempt to
+ * setup custom routes. This can be extended if needed.
+ *
+ * The shortest path between the camera sensor and a video node is stored in
+ * SimpleCameraData::entities_ as a list of SimpleCameraData::Entity structures,
+ * ordered along the data path from the camera sensor to the video node. The
+ * Entity structure stores a pointer to the MediaEntity, as well as information
+ * about how it is connected in that particular path for later usage when
+ * configuring the pipeline.
+ *
* Pipeline Configuration
* ----------------------
*
* The simple pipeline handler configures the pipeline by propagating V4L2
* subdev formats from the camera sensor to the video node. The format is first
- * set on the camera sensor's output, using the native camera sensor
- * resolution. Then, on every link in the pipeline, the format is retrieved on
- * the link source and set unmodified on the link sink.
+ * set on the camera sensor's output, picking a resolution supported by the
+ * sensor that best matches the needs of the requested streams. Then, on every
+ * link in the pipeline, the format is retrieved on the link source and set
+ * unmodified on the link sink.
*
- * When initializating the camera data, this above procedure is repeated for
- * every media bus format supported by the camera sensor. Upon reaching the
- * video node, the pixel formats compatible with the media bus format are
- * enumerated. Each of those pixel formats corresponds to one possible pipeline
- * configuration, stored as an instance of SimpleCameraData::Configuration in
- * the SimpleCameraData::formats_ map.
+ * The best sensor resolution is selected using a heuristic that tries to
+ * minimize the required bus and memory bandwidth, as the simple pipeline
+ * handler is typically used on smaller, less powerful systems. To avoid the
+ * need to upscale, the pipeline handler picks the smallest sensor resolution
+ * large enough to accommodate the need of all streams. Resolutions that
+ * significantly restrict the field of view are ignored.
+ *
+ * When initializating the camera data, the above format propagation procedure
+ * is repeated for every media bus format and size supported by the camera
+ * sensor. Upon reaching the video node, the pixel formats compatible with the
+ * media bus format are enumerated. Each combination of the input media bus
+ * format, output pixel format and output size are recorded in an instance of
+ * the SimpleCameraData::Configuration structure, stored in the
+ * SimpleCameraData::configs_ vector.
*
* Format Conversion and Scaling
* -----------------------------
@@ -133,7 +166,7 @@ LOG_DEFINE_CATEGORY(SimplePipeline)
* handler has no a priori knowledge of. The pipeline handler thus implements a
* heuristic to handle sharing of hardware resources in a generic fashion.
*
- * Two cameras are considered to be mutually exclusive if their share common
+ * Two cameras are considered to be mutually exclusive if they share common
* pads along the pipeline from the camera sensor to the video node. An entity
* can thus be used concurrently by multiple cameras, as long as pads are
* distinct.
@@ -155,14 +188,25 @@ struct SimplePipelineInfo {
* and the number of streams it supports.
*/
std::vector<std::pair<const char *, unsigned int>> converters;
+ /*
+ * Using Software ISP is to be enabled per driver.
+ *
+ * The Software ISP can't be used together with the converters.
+ */
+ bool swIspEnabled;
};
namespace {
static const SimplePipelineInfo supportedDevices[] = {
- { "imx7-csi", { { "pxp", 1 } } },
- { "qcom-camss", {} },
- { "sun6i-csi", {} },
+ { "dcmipp", {}, false },
+ { "imx7-csi", { { "pxp", 1 } }, false },
+ { "intel-ipu6", {}, true },
+ { "j721e-csi2rx", {}, true },
+ { "mtk-seninf", { { "mtk-mdp", 3 } }, false },
+ { "mxc-isi", {}, false },
+ { "qcom-camss", {}, true },
+ { "sun6i-csi", {}, false },
};
} /* namespace */
@@ -180,8 +224,10 @@ public:
int init();
int setupLinks();
int setupFormats(V4L2SubdeviceFormat *format,
- V4L2Subdevice::Whence whence);
- void bufferReady(FrameBuffer *buffer);
+ V4L2Subdevice::Whence whence,
+ Transform transform = Transform::Identity);
+ void imageBufferReady(FrameBuffer *buffer);
+ void clearIncompleteRequests();
unsigned int streamIndex(const Stream *stream) const
{
@@ -192,6 +238,11 @@ public:
/* The media entity, always valid. */
MediaEntity *entity;
/*
+ * Whether or not the entity is a subdev that supports the
+ * routing API.
+ */
+ bool supportsRouting;
+ /*
* The local sink pad connected to the upstream entity, null for
* the camera sensor at the beginning of the pipeline.
*/
@@ -210,6 +261,7 @@ public:
struct Configuration {
uint32_t code;
+ Size sensorSize;
PixelFormat captureFormat;
Size captureSize;
std::vector<PixelFormat> outputFormats;
@@ -227,16 +279,30 @@ public:
V4L2VideoDevice *video_;
std::vector<Configuration> configs_;
- std::map<PixelFormat, const Configuration *> formats_;
+ std::map<PixelFormat, std::vector<const Configuration *>> formats_;
- std::unique_ptr<SimpleConverter> converter_;
- std::vector<std::unique_ptr<FrameBuffer>> converterBuffers_;
- bool useConverter_;
- std::queue<std::map<unsigned int, FrameBuffer *>> converterQueue_;
+ std::unique_ptr<DelayedControls> delayedCtrls_;
+
+ std::vector<std::unique_ptr<FrameBuffer>> conversionBuffers_;
+ struct RequestOutputs {
+ Request *request;
+ std::map<const Stream *, FrameBuffer *> outputs;
+ };
+ std::queue<RequestOutputs> conversionQueue_;
+ bool useConversion_;
+
+ std::unique_ptr<Converter> converter_;
+ std::unique_ptr<SoftwareIsp> swIsp_;
private:
- void converterInputDone(FrameBuffer *buffer);
- void converterOutputDone(FrameBuffer *buffer);
+ void tryPipeline(unsigned int code, const Size &size);
+ static std::vector<const MediaPad *> routedSourcePads(MediaPad *sink);
+
+ void conversionInputDone(FrameBuffer *buffer);
+ void conversionOutputDone(FrameBuffer *buffer);
+
+ void ispStatsReady(uint32_t frame, uint32_t bufferId);
+ void setSensorControls(const ControlList &sensorControls);
};
class SimpleCameraConfiguration : public CameraConfiguration
@@ -252,6 +318,7 @@ public:
}
bool needConversion() const { return needConversion_; }
+ const Transform &combinedTransform() const { return combinedTransform_; }
private:
/*
@@ -264,6 +331,7 @@ private:
const SimpleCameraData::Configuration *pipeConfig_;
bool needConversion_;
+ Transform combinedTransform_;
};
class SimplePipelineHandler : public PipelineHandler
@@ -271,8 +339,8 @@ class SimplePipelineHandler : public PipelineHandler
public:
SimplePipelineHandler(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -286,6 +354,7 @@ public:
V4L2VideoDevice *video(const MediaEntity *entity);
V4L2Subdevice *subdev(const MediaEntity *entity);
MediaDevice *converter() { return converter_; }
+ bool swIspEnabled() const { return swIspEnabled_; }
protected:
int queueRequestDevice(Camera *camera, Request *request) override;
@@ -304,15 +373,16 @@ private:
return static_cast<SimpleCameraData *>(camera->_d());
}
- std::vector<MediaEntity *> locateSensors();
+ std::vector<MediaEntity *> locateSensors(MediaDevice *media);
+ static int resetRoutingTable(V4L2Subdevice *subdev);
const MediaPad *acquirePipeline(SimpleCameraData *data);
void releasePipeline(SimpleCameraData *data);
- MediaDevice *media_;
std::map<const MediaEntity *, EntityData> entities_;
MediaDevice *converter_;
+ bool swIspEnabled_;
};
/* -----------------------------------------------------------------------------
@@ -324,8 +394,6 @@ SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe,
MediaEntity *sensor)
: Camera::Private(pipe), streams_(numStreams)
{
- int ret;
-
/*
* Find the shortest path from the camera sensor to a video capture
* device using the breadth-first search algorithm. This heuristic will
@@ -359,17 +427,40 @@ SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe,
break;
}
- /* The actual breadth-first search algorithm. */
visited.insert(entity);
- for (MediaPad *pad : entity->pads()) {
- if (!(pad->flags() & MEDIA_PAD_FL_SOURCE))
- continue;
+ /*
+ * Add direct downstream entities to the search queue. If the
+ * current entity supports the subdev internal routing API,
+ * restrict the search to downstream entities reachable through
+ * active routes.
+ */
+
+ std::vector<const MediaPad *> pads;
+ bool supportsRouting = false;
+
+ if (sinkPad) {
+ pads = routedSourcePads(sinkPad);
+ if (!pads.empty())
+ supportsRouting = true;
+ }
+
+ if (pads.empty()) {
+ for (const MediaPad *pad : entity->pads()) {
+ if (!(pad->flags() & MEDIA_PAD_FL_SOURCE))
+ continue;
+ pads.push_back(pad);
+ }
+ }
+
+ for (const MediaPad *pad : pads) {
for (MediaLink *link : pad->links()) {
MediaEntity *next = link->sink()->entity();
if (visited.find(next) == visited.end()) {
queue.push({ next, link->sink() });
- parents.insert({ next, { entity, sinkPad, pad, link } });
+
+ Entity e{ entity, supportsRouting, sinkPad, pad, link };
+ parents.insert({ next, e });
}
}
}
@@ -383,7 +474,7 @@ SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe,
* to the sensor. Store all the entities in the pipeline, from the
* camera sensor to the video node, in entities_.
*/
- entities_.push_front({ entity, sinkPad, nullptr, nullptr });
+ entities_.push_front({ entity, false, sinkPad, nullptr, nullptr });
for (auto it = parents.find(entity); it != parents.end();
it = parents.find(entity)) {
@@ -393,12 +484,9 @@ SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe,
}
/* Finally also remember the sensor. */
- sensor_ = std::make_unique<CameraSensor>(sensor);
- ret = sensor_->init();
- if (ret) {
- sensor_.reset();
+ sensor_ = CameraSensorFactoryBase::create(sensor);
+ if (!sensor_)
return;
- }
LOG(SimplePipeline, Debug)
<< "Found pipeline: "
@@ -428,14 +516,45 @@ int SimpleCameraData::init()
/* Open the converter, if any. */
MediaDevice *converter = pipe->converter();
if (converter) {
- converter_ = std::make_unique<SimpleConverter>(converter);
- if (!converter_->isValid()) {
+ converter_ = ConverterFactoryBase::create(converter);
+ if (!converter_) {
LOG(SimplePipeline, Warning)
<< "Failed to create converter, disabling format conversion";
converter_.reset();
} else {
- converter_->inputBufferReady.connect(this, &SimpleCameraData::converterInputDone);
- converter_->outputBufferReady.connect(this, &SimpleCameraData::converterOutputDone);
+ converter_->inputBufferReady.connect(this, &SimpleCameraData::conversionInputDone);
+ converter_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
+ }
+ }
+
+ /*
+ * Instantiate Soft ISP if this is enabled for the given driver and no converter is used.
+ */
+ if (!converter_ && pipe->swIspEnabled()) {
+ swIsp_ = std::make_unique<SoftwareIsp>(pipe, sensor_.get(), &controlInfo_);
+ if (!swIsp_->isValid()) {
+ LOG(SimplePipeline, Warning)
+ << "Failed to create software ISP, disabling software debayering";
+ swIsp_.reset();
+ } else {
+ /*
+ * The inputBufferReady signal is emitted from the soft ISP thread,
+ * and needs to be handled in the pipeline handler thread. Signals
+ * implement queued delivery, but this works transparently only if
+ * the receiver is bound to the target thread. As the
+ * SimpleCameraData class doesn't inherit from the Object class, it
+ * is not bound to any thread, and the signal would be delivered
+ * synchronously. Instead, connect the signal to a lambda function
+ * bound explicitly to the pipe, which is bound to the pipeline
+ * handler thread. The function then simply forwards the call to
+ * conversionInputDone().
+ */
+ swIsp_->inputBufferReady.connect(pipe, [this](FrameBuffer *buffer) {
+ this->conversionInputDone(buffer);
+ });
+ swIsp_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
+ swIsp_->ispStatsReady.connect(this, &SimpleCameraData::ispStatsReady);
+ swIsp_->setSensorControls.connect(this, &SimpleCameraData::setSensorControls);
}
}
@@ -451,57 +570,12 @@ int SimpleCameraData::init()
return ret;
/*
- * Enumerate the possible pipeline configurations. For each media bus
- * format supported by the sensor, propagate the formats through the
- * pipeline, and enumerate the corresponding possible V4L2 pixel
- * formats on the video node.
+ * Generate the list of possible pipeline configurations by trying each
+ * media bus format and size supported by the sensor.
*/
for (unsigned int code : sensor_->mbusCodes()) {
- V4L2SubdeviceFormat format{};
- format.mbus_code = code;
- format.size = sensor_->resolution();
-
- ret = setupFormats(&format, V4L2Subdevice::TryFormat);
- if (ret < 0) {
- LOG(SimplePipeline, Debug)
- << "Media bus code " << utils::hex(code, 4)
- << " not supported for this pipeline";
- /* Try next mbus_code supported by the sensor */
- continue;
- }
-
- V4L2VideoDevice::Formats videoFormats =
- video_->formats(format.mbus_code);
-
- LOG(SimplePipeline, Debug)
- << "Adding configuration for " << format.size.toString()
- << " in pixel formats [ "
- << utils::join(videoFormats, ", ",
- [](const auto &f) {
- return f.first.toString();
- })
- << " ]";
-
- for (const auto &videoFormat : videoFormats) {
- PixelFormat pixelFormat = videoFormat.first.toPixelFormat();
- if (!pixelFormat)
- continue;
-
- Configuration config;
- config.code = code;
- config.captureFormat = pixelFormat;
- config.captureSize = format.size;
-
- if (!converter_) {
- config.outputFormats = { pixelFormat };
- config.outputSizes = config.captureSize;
- } else {
- config.outputFormats = converter_->formats(pixelFormat);
- config.outputSizes = converter_->sizes(format.size);
- }
-
- configs_.push_back(config);
- }
+ for (const Size &size : sensor_->sizes(code))
+ tryPipeline(code, size);
}
if (configs_.empty()) {
@@ -509,16 +583,12 @@ int SimpleCameraData::init()
return -EINVAL;
}
- /*
- * Map the pixel formats to configurations. Any previously stored value
- * is overwritten, as the pipeline handler currently doesn't care about
- * how a particular PixelFormat is achieved.
- */
+ /* Map the pixel formats to configurations. */
for (const Configuration &config : configs_) {
- formats_[config.captureFormat] = &config;
+ formats_[config.captureFormat].push_back(&config);
for (PixelFormat fmt : config.outputFormats)
- formats_[fmt] = &config;
+ formats_[fmt].push_back(&config);
}
properties_ = sensor_->properties();
@@ -526,6 +596,78 @@ int SimpleCameraData::init()
return 0;
}
+/*
+ * Generate a list of supported pipeline configurations for a sensor media bus
+ * code and size.
+ *
+ * First propagate the media bus code and size through the pipeline from the
+ * camera sensor to the video node. Then, query the video node for all supported
+ * pixel formats compatible with the media bus code. For each pixel format, store
+ * a full pipeline configuration in the configs_ vector.
+ */
+void SimpleCameraData::tryPipeline(unsigned int code, const Size &size)
+{
+ /*
+ * Propagate the format through the pipeline, and enumerate the
+ * corresponding possible V4L2 pixel formats on the video node.
+ */
+ V4L2SubdeviceFormat format{};
+ format.code = code;
+ format.size = size;
+
+ int ret = setupFormats(&format, V4L2Subdevice::TryFormat);
+ if (ret < 0) {
+ /* Pipeline configuration failed, skip this configuration. */
+ format.code = code;
+ format.size = size;
+ LOG(SimplePipeline, Debug)
+ << "Sensor format " << format
+ << " not supported for this pipeline";
+ return;
+ }
+
+ V4L2VideoDevice::Formats videoFormats = video_->formats(format.code);
+
+ LOG(SimplePipeline, Debug)
+ << "Adding configuration for " << format.size
+ << " in pixel formats [ "
+ << utils::join(videoFormats, ", ",
+ [](const auto &f) {
+ return f.first.toString();
+ })
+ << " ]";
+
+ for (const auto &videoFormat : videoFormats) {
+ PixelFormat pixelFormat = videoFormat.first.toPixelFormat();
+ if (!pixelFormat)
+ continue;
+
+ Configuration config;
+ config.code = code;
+ config.sensorSize = size;
+ config.captureFormat = pixelFormat;
+ config.captureSize = format.size;
+
+ if (converter_) {
+ config.outputFormats = converter_->formats(pixelFormat);
+ config.outputSizes = converter_->sizes(format.size);
+ } else if (swIsp_) {
+ config.outputFormats = swIsp_->formats(pixelFormat);
+ config.outputSizes = swIsp_->sizes(pixelFormat, format.size);
+ if (config.outputFormats.empty()) {
+ /* Do not use swIsp for unsupported pixelFormat's. */
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
+ }
+ } else {
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
+ }
+
+ configs_.push_back(config);
+ }
+}
+
int SimpleCameraData::setupLinks()
{
int ret;
@@ -535,15 +677,32 @@ int SimpleCameraData::setupLinks()
* multiple sink links to be enabled together, even on different sink
* pads. We must thus start by disabling all sink links (but the one we
* want to enable) before enabling the pipeline link.
+ *
+ * The entities_ list stores entities along with their source link. We
+ * need to process the link in the context of the sink entity, so
+ * record the source link of the current entity as the sink link of the
+ * next entity, and skip the first entity in the loop.
*/
+ MediaLink *sinkLink = nullptr;
+
for (SimpleCameraData::Entity &e : entities_) {
- if (!e.sourceLink)
- break;
+ if (!sinkLink) {
+ sinkLink = e.sourceLink;
+ continue;
+ }
+
+ for (MediaPad *pad : e.entity->pads()) {
+ /*
+ * If the entity supports the V4L2 internal routing API,
+ * assume that it may carry multiple independent streams
+ * concurrently, and only disable links on the sink and
+ * source pads used by the pipeline.
+ */
+ if (e.supportsRouting && pad != e.sink && pad != e.source)
+ continue;
- MediaEntity *remote = e.sourceLink->sink()->entity();
- for (MediaPad *pad : remote->pads()) {
for (MediaLink *link : pad->links()) {
- if (link == e.sourceLink)
+ if (link == sinkLink)
continue;
if ((link->flags() & MEDIA_LNK_FL_ENABLED) &&
@@ -555,18 +714,21 @@ int SimpleCameraData::setupLinks()
}
}
- if (!(e.sourceLink->flags() & MEDIA_LNK_FL_ENABLED)) {
- ret = e.sourceLink->setEnabled(true);
+ if (!(sinkLink->flags() & MEDIA_LNK_FL_ENABLED)) {
+ ret = sinkLink->setEnabled(true);
if (ret < 0)
return ret;
}
+
+ sinkLink = e.sourceLink;
}
return 0;
}
int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format,
- V4L2Subdevice::Whence whence)
+ V4L2Subdevice::Whence whence,
+ Transform transform)
{
SimplePipelineHandler *pipe = SimpleCameraData::pipe();
int ret;
@@ -575,7 +737,7 @@ int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format,
* Configure the format on the sensor output and propagate it through
* the pipeline.
*/
- ret = sensor_->setFormat(format);
+ ret = sensor_->setFormat(format, transform);
if (ret < 0)
return ret;
@@ -602,31 +764,28 @@ int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format,
if (ret < 0)
return ret;
- if (format->mbus_code != sourceFormat.mbus_code ||
+ if (format->code != sourceFormat.code ||
format->size != sourceFormat.size) {
LOG(SimplePipeline, Debug)
<< "Source '" << source->entity()->name()
<< "':" << source->index()
- << " produces " << sourceFormat.toString()
+ << " produces " << sourceFormat
<< ", sink '" << sink->entity()->name()
<< "':" << sink->index()
- << " requires " << format->toString();
+ << " requires " << *format;
return -EINVAL;
}
}
LOG(SimplePipeline, Debug)
- << "Link '" << source->entity()->name()
- << "':" << source->index()
- << " -> '" << sink->entity()->name()
- << "':" << sink->index()
- << " configured with format " << format->toString();
+ << "Link " << *link << ": configured with format "
+ << *format;
}
return 0;
}
-void SimpleCameraData::bufferReady(FrameBuffer *buffer)
+void SimpleCameraData::imageBufferReady(FrameBuffer *buffer)
{
SimplePipelineHandler *pipe = SimpleCameraData::pipe();
@@ -636,7 +795,7 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
* point converting an erroneous buffer.
*/
if (buffer->metadata().status != FrameMetadata::FrameSuccess) {
- if (!useConverter_) {
+ if (!useConversion_) {
/* No conversion, just complete the request. */
Request *request = buffer->request();
pipe->completeBuffer(request, buffer);
@@ -645,26 +804,22 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
}
/*
- * The converter is in use. Requeue the internal buffer for
- * capture (unless the stream is being stopped), and complete
- * the request with all the user-facing buffers.
+ * The converter or Software ISP is in use. Requeue the internal
+ * buffer for capture (unless the stream is being stopped), and
+ * complete the request with all the user-facing buffers.
*/
if (buffer->metadata().status != FrameMetadata::FrameCancelled)
video_->queueBuffer(buffer);
- if (converterQueue_.empty())
+ if (conversionQueue_.empty())
return;
- Request *request = nullptr;
- for (auto &item : converterQueue_.front()) {
- FrameBuffer *outputBuffer = item.second;
- request = outputBuffer->request();
- pipe->completeBuffer(request, outputBuffer);
- }
- converterQueue_.pop();
+ const RequestOutputs &outputs = conversionQueue_.front();
+ for (auto &[stream, buf] : outputs.outputs)
+ pipe->completeBuffer(outputs.request, buf);
+ pipe->completeRequest(outputs.request);
+ conversionQueue_.pop();
- if (request)
- pipe->completeRequest(request);
return;
}
@@ -678,9 +833,9 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
*/
Request *request = buffer->request();
- if (useConverter_ && !converterQueue_.empty()) {
- const std::map<unsigned int, FrameBuffer *> &outputs =
- converterQueue_.front();
+ if (useConversion_ && !conversionQueue_.empty()) {
+ const std::map<const Stream *, FrameBuffer *> &outputs =
+ conversionQueue_.front().outputs;
if (!outputs.empty()) {
FrameBuffer *outputBuffer = outputs.begin()->second;
if (outputBuffer)
@@ -693,18 +848,28 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
buffer->metadata().timestamp);
/*
- * Queue the captured and the request buffer to the converter if format
- * conversion is needed. If there's no queued request, just requeue the
- * captured buffer for capture.
+ * Queue the captured and the request buffer to the converter or Software
+ * ISP if format conversion is needed. If there's no queued request, just
+ * requeue the captured buffer for capture.
*/
- if (useConverter_) {
- if (converterQueue_.empty()) {
+ if (useConversion_) {
+ if (conversionQueue_.empty()) {
video_->queueBuffer(buffer);
return;
}
- converter_->queueBuffers(buffer, converterQueue_.front());
- converterQueue_.pop();
+ if (converter_)
+ converter_->queueBuffers(buffer, conversionQueue_.front().outputs);
+ else
+ /*
+ * request->sequence() cannot be retrieved from `buffer' inside
+ * queueBuffers because unique_ptr's make buffer->request() invalid
+ * already here.
+ */
+ swIsp_->queueBuffers(request->sequence(), buffer,
+ conversionQueue_.front().outputs);
+
+ conversionQueue_.pop();
return;
}
@@ -713,13 +878,21 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
pipe->completeRequest(request);
}
-void SimpleCameraData::converterInputDone(FrameBuffer *buffer)
+void SimpleCameraData::clearIncompleteRequests()
+{
+ while (!conversionQueue_.empty()) {
+ pipe()->cancelRequest(conversionQueue_.front().request);
+ conversionQueue_.pop();
+ }
+}
+
+void SimpleCameraData::conversionInputDone(FrameBuffer *buffer)
{
/* Queue the input buffer back for capture. */
video_->queueBuffer(buffer);
}
-void SimpleCameraData::converterOutputDone(FrameBuffer *buffer)
+void SimpleCameraData::conversionOutputDone(FrameBuffer *buffer)
{
SimplePipelineHandler *pipe = SimpleCameraData::pipe();
@@ -729,6 +902,56 @@ void SimpleCameraData::converterOutputDone(FrameBuffer *buffer)
pipe->completeRequest(request);
}
+void SimpleCameraData::ispStatsReady(uint32_t frame, uint32_t bufferId)
+{
+ swIsp_->processStats(frame, bufferId,
+ delayedCtrls_->get(frame));
+}
+
+void SimpleCameraData::setSensorControls(const ControlList &sensorControls)
+{
+ delayedCtrls_->push(sensorControls);
+ ControlList ctrls(sensorControls);
+ sensor_->setControls(&ctrls);
+}
+
+/* Retrieve all source pads connected to a sink pad through active routes. */
+std::vector<const MediaPad *> SimpleCameraData::routedSourcePads(MediaPad *sink)
+{
+ MediaEntity *entity = sink->entity();
+ std::unique_ptr<V4L2Subdevice> subdev =
+ std::make_unique<V4L2Subdevice>(entity);
+
+ int ret = subdev->open();
+ if (ret < 0)
+ return {};
+
+ V4L2Subdevice::Routing routing = {};
+ ret = subdev->getRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret < 0)
+ return {};
+
+ std::vector<const MediaPad *> pads;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (sink->index() != route.sink.pad ||
+ !(route.flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ const MediaPad *pad = entity->getPadByIndex(route.source.pad);
+ if (!pad) {
+ LOG(SimplePipeline, Warning)
+ << "Entity " << entity->name()
+ << " has invalid route source pad "
+ << route.source.pad;
+ }
+
+ pads.push_back(pad);
+ }
+
+ return pads;
+}
+
/* -----------------------------------------------------------------------------
* Camera Configuration
*/
@@ -740,17 +963,45 @@ SimpleCameraConfiguration::SimpleCameraConfiguration(Camera *camera,
{
}
+namespace {
+
+static Size adjustSize(const Size &requestedSize, const SizeRange &supportedSizes)
+{
+ ASSERT(supportedSizes.min <= supportedSizes.max);
+
+ if (supportedSizes.min == supportedSizes.max)
+ return supportedSizes.max;
+
+ unsigned int hStep = supportedSizes.hStep;
+ unsigned int vStep = supportedSizes.vStep;
+
+ if (hStep == 0)
+ hStep = supportedSizes.max.width - supportedSizes.min.width;
+ if (vStep == 0)
+ vStep = supportedSizes.max.height - supportedSizes.min.height;
+
+ Size adjusted = requestedSize.boundedTo(supportedSizes.max)
+ .expandedTo(supportedSizes.min);
+
+ return adjusted.shrunkBy(supportedSizes.min)
+ .alignedDownTo(hStep, vStep)
+ .grownBy(supportedSizes.min);
+}
+
+} /* namespace */
+
CameraConfiguration::Status SimpleCameraConfiguration::validate()
{
+ const CameraSensor *sensor = data_->sensor_.get();
Status status = Valid;
if (config_.empty())
return Invalid;
- if (transform != Transform::Identity) {
- transform = Transform::Identity;
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = sensor->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
status = Adjusted;
- }
/* Cap the number of entries to the available streams. */
if (config_.size() > data_->streams_.size()) {
@@ -758,23 +1009,71 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
status = Adjusted;
}
+ /* Find the largest stream size. */
+ Size maxStreamSize;
+ for (const StreamConfiguration &cfg : config_)
+ maxStreamSize.expandTo(cfg.size);
+
+ LOG(SimplePipeline, Debug)
+ << "Largest stream size is " << maxStreamSize;
+
/*
- * Pick a configuration for the pipeline based on the pixel format for
- * the streams (ordered from highest to lowest priority). Default to
- * the first pipeline configuration if no streams requests a supported
- * pixel format.
+ * Find the best configuration for the pipeline using a heuristic.
+ * First select the pixel format based on the streams (which are
+ * considered ordered from highest to lowest priority). Default to the
+ * first pipeline configuration if no streams request a supported pixel
+ * format.
*/
- pipeConfig_ = data_->formats_.begin()->second;
+ const std::vector<const SimpleCameraData::Configuration *> *configs =
+ &data_->formats_.begin()->second;
for (const StreamConfiguration &cfg : config_) {
auto it = data_->formats_.find(cfg.pixelFormat);
if (it != data_->formats_.end()) {
- pipeConfig_ = it->second;
+ configs = &it->second;
break;
}
}
/*
+ * \todo Pick the best sensor output media bus format when the
+ * requested pixel format can be produced from multiple sensor media
+ * bus formats.
+ */
+
+ /*
+ * Then pick, among the possible configuration for the pixel format,
+ * the smallest sensor resolution that can accommodate all streams
+ * without upscaling.
+ */
+ const SimpleCameraData::Configuration *maxPipeConfig = nullptr;
+ pipeConfig_ = nullptr;
+
+ for (const SimpleCameraData::Configuration *pipeConfig : *configs) {
+ const Size &size = pipeConfig->captureSize;
+
+ if (size.width >= maxStreamSize.width &&
+ size.height >= maxStreamSize.height) {
+ if (!pipeConfig_ || size < pipeConfig_->captureSize)
+ pipeConfig_ = pipeConfig;
+ }
+
+ if (!maxPipeConfig || maxPipeConfig->captureSize < size)
+ maxPipeConfig = pipeConfig;
+ }
+
+ /* If no configuration was large enough, select the largest one. */
+ if (!pipeConfig_)
+ pipeConfig_ = maxPipeConfig;
+
+ LOG(SimplePipeline, Debug)
+ << "Picked "
+ << V4L2SubdeviceFormat{ pipeConfig_->code, pipeConfig_->sensorSize, {} }
+ << " -> " << pipeConfig_->captureSize
+ << "-" << pipeConfig_->captureFormat
+ << " for max stream size " << maxStreamSize;
+
+ /*
* Adjust the requested streams.
*
* Enable usage of the converter when producing multiple streams, as
@@ -807,10 +1106,19 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
}
if (!pipeConfig_->outputSizes.contains(cfg.size)) {
+ Size adjustedSize = pipeConfig_->captureSize;
+ /*
+ * The converter (when present) may not be able to output
+ * a size identical to its input size. The capture size is thus
+ * not guaranteed to be a valid output size. In such cases, use
+ * the smaller valid output size closest to the requested.
+ */
+ if (!pipeConfig_->outputSizes.contains(adjustedSize))
+ adjustedSize = adjustSize(cfg.size, pipeConfig_->outputSizes);
LOG(SimplePipeline, Debug)
- << "Adjusting size from " << cfg.size.toString()
- << " to " << pipeConfig_->captureSize.toString();
- cfg.size = pipeConfig_->captureSize;
+ << "Adjusting size from " << cfg.size
+ << " to " << adjustedSize;
+ cfg.size = adjustedSize;
status = Adjusted;
}
@@ -822,13 +1130,16 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
/* Set the stride, frameSize and bufferCount. */
if (needConversion_) {
std::tie(cfg.stride, cfg.frameSize) =
- data_->converter_->strideAndFrameSize(cfg.pixelFormat,
- cfg.size);
+ data_->converter_
+ ? data_->converter_->strideAndFrameSize(cfg.pixelFormat,
+ cfg.size)
+ : data_->swIsp_->strideAndFrameSize(cfg.pixelFormat,
+ cfg.size);
if (cfg.stride == 0)
return Invalid;
} else {
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
int ret = data_->video_->tryFormat(&format);
@@ -839,7 +1150,7 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
cfg.frameSize = format.planes[0].size;
}
- cfg.bufferCount = 3;
+ cfg.bufferCount = 4;
}
return status;
@@ -854,25 +1165,44 @@ SimplePipelineHandler::SimplePipelineHandler(CameraManager *manager)
{
}
-CameraConfiguration *SimplePipelineHandler::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+SimplePipelineHandler::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
{
SimpleCameraData *data = cameraData(camera);
- CameraConfiguration *config =
- new SimpleCameraConfiguration(camera, data);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<SimpleCameraConfiguration>(camera, data);
if (roles.empty())
return config;
/* Create the formats map. */
std::map<PixelFormat, std::vector<SizeRange>> formats;
- std::transform(data->formats_.begin(), data->formats_.end(),
- std::inserter(formats, formats.end()),
- [](const auto &format) -> decltype(formats)::value_type {
- const PixelFormat &pixelFormat = format.first;
- const Size &size = format.second->captureSize;
- return { pixelFormat, { size } };
- });
+
+ for (const SimpleCameraData::Configuration &cfg : data->configs_) {
+ for (PixelFormat format : cfg.outputFormats)
+ formats[format].push_back(cfg.outputSizes);
+ }
+
+ /* Sort the sizes and merge any consecutive overlapping ranges. */
+ for (auto &[format, sizes] : formats) {
+ std::sort(sizes.begin(), sizes.end(),
+ [](SizeRange &a, SizeRange &b) {
+ return a.min < b.min;
+ });
+
+ auto cur = sizes.begin();
+ auto next = cur;
+
+ while (++next != sizes.end()) {
+ if (cur->max.width >= next->min.width &&
+ cur->max.height >= next->min.height)
+ cur->max = next->max;
+ else if (++cur != next)
+ *cur = *next;
+ }
+
+ sizes.erase(++cur, sizes.end());
+ }
/*
* Create the stream configurations. Take the first entry in the formats
@@ -911,15 +1241,16 @@ int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
const SimpleCameraData::Configuration *pipeConfig = config->pipeConfig();
V4L2SubdeviceFormat format{};
- format.mbus_code = pipeConfig->code;
- format.size = data->sensor_->resolution();
+ format.code = pipeConfig->code;
+ format.size = pipeConfig->sensorSize;
- ret = data->setupFormats(&format, V4L2Subdevice::ActiveFormat);
+ ret = data->setupFormats(&format, V4L2Subdevice::ActiveFormat,
+ config->combinedTransform());
if (ret < 0)
return ret;
/* Configure the video node. */
- V4L2PixelFormat videoFormat = V4L2PixelFormat::fromPixelFormat(pipeConfig->captureFormat);
+ V4L2PixelFormat videoFormat = video->toV4L2PixelFormat(pipeConfig->captureFormat);
V4L2DeviceFormat captureFormat;
captureFormat.fourcc = videoFormat;
@@ -939,34 +1270,51 @@ int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
captureFormat.size != pipeConfig->captureSize) {
LOG(SimplePipeline, Error)
<< "Unable to configure capture in "
- << pipeConfig->captureSize.toString() << "-"
- << videoFormat.toString();
+ << pipeConfig->captureSize << "-" << videoFormat
+ << " (got " << captureFormat << ")";
return -EINVAL;
}
/* Configure the converter if needed. */
std::vector<std::reference_wrapper<StreamConfiguration>> outputCfgs;
- data->useConverter_ = config->needConversion();
+ data->useConversion_ = config->needConversion();
for (unsigned int i = 0; i < config->size(); ++i) {
StreamConfiguration &cfg = config->at(i);
cfg.setStream(&data->streams_[i]);
- if (data->useConverter_)
+ if (data->useConversion_)
outputCfgs.push_back(cfg);
}
if (outputCfgs.empty())
return 0;
+ const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays();
+ std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ };
+ data->delayedCtrls_ =
+ std::make_unique<DelayedControls>(data->sensor_->device(),
+ params);
+ data->video_->frameStart.connect(data->delayedCtrls_.get(),
+ &DelayedControls::applyControls);
+
StreamConfiguration inputCfg;
inputCfg.pixelFormat = pipeConfig->captureFormat;
inputCfg.size = pipeConfig->captureSize;
inputCfg.stride = captureFormat.planes[0].bpl;
inputCfg.bufferCount = kNumInternalBuffers;
- return data->converter_->configure(inputCfg, outputCfgs);
+ if (data->converter_) {
+ return data->converter_->configure(inputCfg, outputCfgs);
+ } else {
+ ipa::soft::IPAConfigInfo configInfo;
+ configInfo.sensorControls = data->sensor_->controls();
+ return data->swIsp_->configure(inputCfg, outputCfgs, configInfo);
+ }
}
int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
@@ -979,9 +1327,10 @@ int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
* Export buffers on the converter or capture video node, depending on
* whether the converter is used or not.
*/
- if (data->useConverter_)
- return data->converter_->exportBuffers(data->streamIndex(stream),
- count, buffers);
+ if (data->useConversion_)
+ return data->converter_
+ ? data->converter_->exportBuffers(stream, count, buffers)
+ : data->swIsp_->exportBuffers(stream, count, buffers);
else
return data->video_->exportBuffers(count, buffers);
}
@@ -1000,13 +1349,13 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
return -EBUSY;
}
- if (data->useConverter_) {
+ if (data->useConversion_) {
/*
* When using the converter allocate a fixed number of internal
* buffers.
*/
ret = video->allocateBuffers(kNumInternalBuffers,
- &data->converterBuffers_);
+ &data->conversionBuffers_);
} else {
/* Otherwise, prepare for using buffers from the only stream. */
Stream *stream = &data->streams_[0];
@@ -1017,7 +1366,7 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
return ret;
}
- video->bufferReady.connect(data, &SimpleCameraData::bufferReady);
+ video->bufferReady.connect(data, &SimpleCameraData::imageBufferReady);
ret = video->streamOn();
if (ret < 0) {
@@ -1025,15 +1374,21 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
return ret;
}
- if (data->useConverter_) {
- ret = data->converter_->start();
+ if (data->useConversion_) {
+ if (data->converter_)
+ ret = data->converter_->start();
+ else if (data->swIsp_)
+ ret = data->swIsp_->start();
+ else
+ ret = 0;
+
if (ret < 0) {
stop(camera);
return ret;
}
/* Queue all internal buffers for capture. */
- for (std::unique_ptr<FrameBuffer> &buffer : data->converterBuffers_)
+ for (std::unique_ptr<FrameBuffer> &buffer : data->conversionBuffers_)
video->queueBuffer(buffer.get());
}
@@ -1045,15 +1400,20 @@ void SimplePipelineHandler::stopDevice(Camera *camera)
SimpleCameraData *data = cameraData(camera);
V4L2VideoDevice *video = data->video_;
- if (data->useConverter_)
- data->converter_->stop();
+ if (data->useConversion_) {
+ if (data->converter_)
+ data->converter_->stop();
+ else if (data->swIsp_)
+ data->swIsp_->stop();
+ }
video->streamOff();
video->releaseBuffers();
- video->bufferReady.disconnect(data, &SimpleCameraData::bufferReady);
+ video->bufferReady.disconnect(data, &SimpleCameraData::imageBufferReady);
- data->converterBuffers_.clear();
+ data->clearIncompleteRequests();
+ data->conversionBuffers_.clear();
releasePipeline(data);
}
@@ -1063,7 +1423,7 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
SimpleCameraData *data = cameraData(camera);
int ret;
- std::map<unsigned int, FrameBuffer *> buffers;
+ std::map<const Stream *, FrameBuffer *> buffers;
for (auto &[stream, buffer] : request->buffers()) {
/*
@@ -1071,8 +1431,8 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
* queue, it will be handed to the converter in the capture
* completion handler.
*/
- if (data->useConverter_) {
- buffers.emplace(data->streamIndex(stream), buffer);
+ if (data->useConversion_) {
+ buffers.emplace(stream, buffer);
} else {
ret = data->video_->queueBuffer(buffer);
if (ret < 0)
@@ -1080,8 +1440,11 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
}
}
- if (data->useConverter_)
- data->converterQueue_.push(std::move(buffers));
+ if (data->useConversion_) {
+ data->conversionQueue_.push({ request, std::move(buffers) });
+ if (data->swIsp_)
+ data->swIsp_->queueRequest(request->sequence(), request->controls());
+ }
return 0;
}
@@ -1090,7 +1453,8 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
* Match and Setup
*/
-std::vector<MediaEntity *> SimplePipelineHandler::locateSensors()
+std::vector<MediaEntity *>
+SimplePipelineHandler::locateSensors(MediaDevice *media)
{
std::vector<MediaEntity *> entities;
@@ -1098,7 +1462,7 @@ std::vector<MediaEntity *> SimplePipelineHandler::locateSensors()
* Gather all the camera sensor entities based on the function they
* expose.
*/
- for (MediaEntity *entity : media_->entities()) {
+ for (MediaEntity *entity : media->entities()) {
if (entity->function() == MEDIA_ENT_F_CAM_SENSOR)
entities.push_back(entity);
}
@@ -1151,21 +1515,53 @@ std::vector<MediaEntity *> SimplePipelineHandler::locateSensors()
return sensors;
}
+int SimplePipelineHandler::resetRoutingTable(V4L2Subdevice *subdev)
+{
+ /* Reset the media entity routing table to its default state. */
+ V4L2Subdevice::Routing routing = {};
+
+ int ret = subdev->getRouting(&routing, V4L2Subdevice::TryFormat);
+ if (ret)
+ return ret;
+
+ ret = subdev->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ /*
+ * If the routing table is empty we won't be able to meaningfully use
+ * the subdev.
+ */
+ if (routing.empty()) {
+ LOG(SimplePipeline, Error)
+ << "Default routing table of " << subdev->deviceNode()
+ << " is empty";
+ return -EINVAL;
+ }
+
+ LOG(SimplePipeline, Debug)
+ << "Routing table of " << subdev->deviceNode()
+ << " reset to " << routing;
+
+ return 0;
+}
+
bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
{
const SimplePipelineInfo *info = nullptr;
unsigned int numStreams = 1;
+ MediaDevice *media;
for (const SimplePipelineInfo &inf : supportedDevices) {
DeviceMatch dm(inf.driver);
- media_ = acquireMediaDevice(enumerator, dm);
- if (media_) {
+ media = acquireMediaDevice(enumerator, dm);
+ if (media) {
info = &inf;
break;
}
}
- if (!media_)
+ if (!media)
return false;
for (const auto &[name, streams] : info->converters) {
@@ -1177,13 +1573,17 @@ bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
}
}
+ swIspEnabled_ = info->swIspEnabled;
+
/* Locate the sensors. */
- std::vector<MediaEntity *> sensors = locateSensors();
+ std::vector<MediaEntity *> sensors = locateSensors(media);
if (sensors.empty()) {
- LOG(SimplePipeline, Error) << "No sensor found";
+ LOG(SimplePipeline, Info) << "No sensor found for " << media->deviceNode();
return false;
}
+ LOG(SimplePipeline, Debug) << "Sensor found for " << media->deviceNode();
+
/*
* Create one camera data instance for each sensor and gather all
* entities in all pipelines.
@@ -1243,6 +1643,23 @@ bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
<< ": " << strerror(-ret);
return false;
}
+
+ if (subdev->caps().hasStreams()) {
+ /*
+ * Reset the routing table to its default state
+ * to make sure entities are enumerated according
+ * to the default routing configuration.
+ */
+ ret = resetRoutingTable(subdev.get());
+ if (ret) {
+ LOG(SimplePipeline, Error)
+ << "Failed to reset routes for "
+ << subdev->deviceNode() << ": "
+ << strerror(-ret);
+ return false;
+ }
+ }
+
break;
default:
@@ -1346,6 +1763,6 @@ void SimplePipelineHandler::releasePipeline(SimpleCameraData *data)
}
}
-REGISTER_PIPELINE_HANDLER(SimplePipelineHandler)
+REGISTER_PIPELINE_HANDLER(SimplePipelineHandler, "simple")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/uvcvideo/meson.build b/src/libcamera/pipeline/uvcvideo/meson.build
index a3c2efd4..a3a91074 100644
--- a/src/libcamera/pipeline/uvcvideo/meson.build
+++ b/src/libcamera/pipeline/uvcvideo/meson.build
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources += files([
+libcamera_internal_sources += files([
'uvcvideo.cpp',
])
diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
index 40654a0b..8c2c6baf 100644
--- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
+++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
@@ -2,17 +2,20 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * uvcvideo.cpp - Pipeline handler for uvcvideo devices
+ * Pipeline handler for uvcvideo devices
*/
#include <algorithm>
+#include <cmath>
#include <fstream>
-#include <iomanip>
-#include <math.h>
+#include <map>
#include <memory>
-#include <tuple>
+#include <set>
+#include <string>
+#include <vector>
#include <libcamera/base/log.h>
+#include <libcamera/base/mutex.h>
#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
@@ -44,10 +47,19 @@ public:
int init(MediaDevice *media);
void addControl(uint32_t cid, const ControlInfo &v4l2info,
ControlInfoMap::Map *ctrls);
- void bufferReady(FrameBuffer *buffer);
+ void imageBufferReady(FrameBuffer *buffer);
+ const std::string &id() const { return id_; }
+
+ Mutex openLock_;
std::unique_ptr<V4L2VideoDevice> video_;
Stream stream_;
+ std::map<PixelFormat, std::vector<SizeRange>> formats_;
+
+private:
+ bool generateId();
+
+ std::string id_;
};
class UVCCameraConfiguration : public CameraConfiguration
@@ -66,8 +78,8 @@ class PipelineHandlerUVC : public PipelineHandler
public:
PipelineHandlerUVC(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -81,12 +93,13 @@ public:
bool match(DeviceEnumerator *enumerator) override;
private:
- std::string generateId(const UVCCameraData *data);
-
int processControl(ControlList *controls, unsigned int id,
const ControlValue &value);
int processControls(UVCCameraData *data, Request *request);
+ bool acquireDevice(Camera *camera) override;
+ void releaseDevice(Camera *camera) override;
+
UVCCameraData *cameraData(Camera *camera)
{
return static_cast<UVCCameraData *>(camera->_d());
@@ -105,8 +118,8 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
if (config_.empty())
return Invalid;
- if (transform != Transform::Identity) {
- transform = Transform::Identity;
+ if (orientation != Orientation::Rotate0) {
+ orientation = Orientation::Rotate0;
status = Adjusted;
}
@@ -126,9 +139,8 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
if (iter == pixelFormats.end()) {
cfg.pixelFormat = pixelFormats.front();
LOG(UVC, Debug)
- << "Adjusting pixel format from "
- << pixelFormat.toString() << " to "
- << cfg.pixelFormat.toString();
+ << "Adjusting pixel format from " << pixelFormat
+ << " to " << cfg.pixelFormat;
status = Adjusted;
}
@@ -143,24 +155,48 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
if (cfg.size != size) {
LOG(UVC, Debug)
- << "Adjusting size from " << size.toString()
- << " to " << cfg.size.toString();
+ << "Adjusting size from " << size << " to " << cfg.size;
status = Adjusted;
}
cfg.bufferCount = 4;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
- int ret = data_->video_->tryFormat(&format);
- if (ret)
- return Invalid;
+ /*
+ * For power-consumption reasons video_ is closed when the camera is not
+ * acquired. Open it here if necessary.
+ */
+ {
+ bool opened = false;
+
+ MutexLocker locker(data_->openLock_);
+
+ if (!data_->video_->isOpen()) {
+ int ret = data_->video_->open();
+ if (ret)
+ return Invalid;
+
+ opened = true;
+ }
+
+ int ret = data_->video_->tryFormat(&format);
+ if (opened)
+ data_->video_->close();
+ if (ret)
+ return Invalid;
+ }
cfg.stride = format.planes[0].bpl;
cfg.frameSize = format.planes[0].size;
+ if (cfg.colorSpace != format.colorSpace) {
+ cfg.colorSpace = format.colorSpace;
+ status = Adjusted;
+ }
+
return status;
}
@@ -169,24 +205,18 @@ PipelineHandlerUVC::PipelineHandlerUVC(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerUVC::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerUVC::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
UVCCameraData *data = cameraData(camera);
- CameraConfiguration *config = new UVCCameraConfiguration(data);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<UVCCameraConfiguration>(data);
if (roles.empty())
return config;
- V4L2VideoDevice::Formats v4l2Formats = data->video_->formats();
- std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
- for (const auto &format : v4l2Formats) {
- PixelFormat pixelFormat = format.first.toPixelFormat();
- if (pixelFormat.isValid())
- deviceFormats[pixelFormat] = format.second;
- }
-
- StreamFormats formats(deviceFormats);
+ StreamFormats formats(data->formats_);
StreamConfiguration cfg(formats);
cfg.pixelFormat = formats.pixelformats().front();
@@ -207,7 +237,7 @@ int PipelineHandlerUVC::configure(Camera *camera, CameraConfiguration *config)
int ret;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
ret = data->video_->setFormat(&format);
@@ -215,7 +245,7 @@ int PipelineHandlerUVC::configure(Camera *camera, CameraConfiguration *config)
return ret;
if (format.size != cfg.size ||
- format.fourcc != V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat))
+ format.fourcc != data->video_->toV4L2PixelFormat(cfg.pixelFormat))
return -EINVAL;
cfg.setStream(&data->stream_);
@@ -290,14 +320,14 @@ int PipelineHandlerUVC::processControl(ControlList *controls, unsigned int id,
case V4L2_CID_BRIGHTNESS: {
float scale = std::max(max - def, def - min);
float fvalue = value.get<float>() * scale + def;
- controls->set(cid, static_cast<int32_t>(lroundf(fvalue)));
+ controls->set(cid, static_cast<int32_t>(std::lround(fvalue)));
break;
}
case V4L2_CID_SATURATION: {
float scale = def - min;
float fvalue = value.get<float>() * scale + min;
- controls->set(cid, static_cast<int32_t>(lroundf(fvalue)));
+ controls->set(cid, static_cast<int32_t>(std::lround(fvalue)));
break;
}
@@ -324,7 +354,7 @@ int PipelineHandlerUVC::processControl(ControlList *controls, unsigned int id,
}
float fvalue = (value.get<float>() - p) / m;
- controls->set(cid, static_cast<int32_t>(lroundf(fvalue)));
+ controls->set(cid, static_cast<int32_t>(std::lround(fvalue)));
break;
}
@@ -342,12 +372,8 @@ int PipelineHandlerUVC::processControls(UVCCameraData *data, Request *request)
{
ControlList controls(data->video_->controls());
- for (auto it : request->controls()) {
- unsigned int id = it.first;
- ControlValue &value = it.second;
-
+ for (const auto &[id, value] : request->controls())
processControl(&controls, id, value);
- }
for (const auto &ctrl : controls)
LOG(UVC, Debug)
@@ -385,69 +411,6 @@ int PipelineHandlerUVC::queueRequestDevice(Camera *camera, Request *request)
return 0;
}
-std::string PipelineHandlerUVC::generateId(const UVCCameraData *data)
-{
- const std::string path = data->video_->devicePath();
-
- /* Create a controller ID from first device described in firmware. */
- std::string controllerId;
- std::string searchPath = path;
- while (true) {
- std::string::size_type pos = searchPath.rfind('/');
- if (pos <= 1) {
- LOG(UVC, Error) << "Can not find controller ID";
- return {};
- }
-
- searchPath = searchPath.substr(0, pos);
-
- controllerId = sysfs::firmwareNodePath(searchPath);
- if (!controllerId.empty())
- break;
- }
-
- /*
- * Create a USB ID from the device path which has the known format:
- *
- * path = bus, "-", ports, ":", config, ".", interface ;
- * bus = number ;
- * ports = port, [ ".", ports ] ;
- * port = number ;
- * config = number ;
- * interface = number ;
- *
- * Example: 3-2.4:1.0
- *
- * The bus is not guaranteed to be stable and needs to be stripped from
- * the USB ID. The final USB ID is built up of the ports, config and
- * interface properties.
- *
- * Example 2.4:1.0.
- */
- std::string usbId = utils::basename(path.c_str());
- usbId = usbId.substr(usbId.find('-') + 1);
-
- /* Creata a device ID from the USB devices vendor and product ID. */
- std::string deviceId;
- for (const char *name : { "idVendor", "idProduct" }) {
- std::ifstream file(path + "/../" + name);
-
- if (!file.is_open())
- return {};
-
- std::string value;
- std::getline(file, value);
- file.close();
-
- if (!deviceId.empty())
- deviceId += ":";
-
- deviceId += value;
- }
-
- return controllerId + "-" + usbId + "-" + deviceId;
-}
-
bool PipelineHandlerUVC::match(DeviceEnumerator *enumerator)
{
MediaDevice *media;
@@ -463,12 +426,7 @@ bool PipelineHandlerUVC::match(DeviceEnumerator *enumerator)
return false;
/* Create and register the camera. */
- std::string id = generateId(data.get());
- if (id.empty()) {
- LOG(UVC, Error) << "Failed to generate camera ID";
- return false;
- }
-
+ std::string id = data->id();
std::set<Stream *> streams{ &data->stream_ };
std::shared_ptr<Camera> camera =
Camera::create(std::move(data), id, streams);
@@ -480,6 +438,23 @@ bool PipelineHandlerUVC::match(DeviceEnumerator *enumerator)
return true;
}
+bool PipelineHandlerUVC::acquireDevice(Camera *camera)
+{
+ UVCCameraData *data = cameraData(camera);
+
+ MutexLocker locker(data->openLock_);
+
+ return data->video_->open() == 0;
+}
+
+void PipelineHandlerUVC::releaseDevice(Camera *camera)
+{
+ UVCCameraData *data = cameraData(camera);
+
+ MutexLocker locker(data->openLock_);
+ data->video_->close();
+}
+
int UVCCameraData::init(MediaDevice *media)
{
int ret;
@@ -501,28 +476,71 @@ int UVCCameraData::init(MediaDevice *media)
if (ret)
return ret;
- video_->bufferReady.connect(this, &UVCCameraData::bufferReady);
+ video_->bufferReady.connect(this, &UVCCameraData::imageBufferReady);
- /*
- * \todo Find a way to tell internal and external UVC cameras apart.
- * Until then, treat all UVC cameras as external.
- */
- properties_.set(properties::Location, properties::CameraLocationExternal);
- properties_.set(properties::Model, utils::toAscii(media->model()));
+ /* Generate the camera ID. */
+ if (!generateId()) {
+ LOG(UVC, Error) << "Failed to generate camera ID";
+ return -EINVAL;
+ }
/*
- * Get the current format in order to initialize the sensor array
- * properties.
+ * Populate the map of supported formats, and infer the camera sensor
+ * resolution from the largest size it advertises.
*/
Size resolution;
- for (const auto &it : video_->formats()) {
- const std::vector<SizeRange> &sizeRanges = it.second;
+ for (const auto &format : video_->formats()) {
+ PixelFormat pixelFormat = format.first.toPixelFormat();
+ if (!pixelFormat.isValid())
+ continue;
+
+ formats_[pixelFormat] = format.second;
+
+ const std::vector<SizeRange> &sizeRanges = format.second;
for (const SizeRange &sizeRange : sizeRanges) {
if (sizeRange.max > resolution)
resolution = sizeRange.max;
}
}
+ if (formats_.empty()) {
+ LOG(UVC, Error)
+ << "Camera " << id_ << " (" << media->model()
+ << ") doesn't expose any supported format";
+ return -EINVAL;
+ }
+
+ /* Populate the camera properties. */
+ properties_.set(properties::Model, utils::toAscii(media->model()));
+
+ /*
+ * Derive the location from the device removable attribute in sysfs.
+ * Non-removable devices are assumed to be front as we lack detailed
+ * location information, and removable device are considered external.
+ *
+ * The sysfs removable attribute is derived from the ACPI _UPC attribute
+ * if available, or from the USB hub descriptors otherwise. ACPI data
+ * may not be very reliable, and the USB hub descriptors may not be
+ * accurate on DT-based platforms. A heuristic may need to be
+ * implemented later if too many devices end up being miscategorized.
+ *
+ * \todo Find a way to tell front and back devices apart. This could
+ * come from the ACPI _PLD, but that may be even more unreliable than
+ * the _UPC.
+ */
+ properties::LocationEnum location = properties::CameraLocationExternal;
+ std::ifstream file(video_->devicePath() + "/../removable");
+ if (file.is_open()) {
+ std::string value;
+ std::getline(file, value);
+ file.close();
+
+ if (value == "fixed")
+ location = properties::CameraLocationFront;
+ }
+
+ properties_.set(properties::Location, location);
+
properties_.set(properties::PixelArraySize, resolution);
properties_.set(properties::PixelArrayActiveAreas, { Rectangle(resolution) });
@@ -538,9 +556,79 @@ int UVCCameraData::init(MediaDevice *media)
controlInfo_ = ControlInfoMap(std::move(ctrls), controls::controls);
+ /*
+ * Close to allow camera to go into runtime-suspend, video_ will be
+ * re-opened from acquireDevice() and validate().
+ */
+ video_->close();
+
return 0;
}
+bool UVCCameraData::generateId()
+{
+ const std::string path = video_->devicePath();
+
+ /* Create a controller ID from first device described in firmware. */
+ std::string controllerId;
+ std::string searchPath = path;
+ while (true) {
+ std::string::size_type pos = searchPath.rfind('/');
+ if (pos <= 1) {
+ LOG(UVC, Error) << "Can not find controller ID";
+ return false;
+ }
+
+ searchPath = searchPath.substr(0, pos);
+
+ controllerId = sysfs::firmwareNodePath(searchPath);
+ if (!controllerId.empty())
+ break;
+ }
+
+ /*
+ * Create a USB ID from the device path which has the known format:
+ *
+ * path = bus, "-", ports, ":", config, ".", interface ;
+ * bus = number ;
+ * ports = port, [ ".", ports ] ;
+ * port = number ;
+ * config = number ;
+ * interface = number ;
+ *
+ * Example: 3-2.4:1.0
+ *
+ * The bus is not guaranteed to be stable and needs to be stripped from
+ * the USB ID. The final USB ID is built up of the ports, config and
+ * interface properties.
+ *
+ * Example 2.4:1.0.
+ */
+ std::string usbId = utils::basename(path.c_str());
+ usbId = usbId.substr(usbId.find('-') + 1);
+
+ /* Creata a device ID from the USB devices vendor and product ID. */
+ std::string deviceId;
+ for (const char *name : { "idVendor", "idProduct" }) {
+ std::ifstream file(path + "/../" + name);
+
+ if (!file.is_open())
+ return false;
+
+ std::string value;
+ std::getline(file, value);
+ file.close();
+
+ if (!deviceId.empty())
+ deviceId += ":";
+
+ deviceId += value;
+ }
+
+ id_ = controllerId + "-" + usbId + "-" + deviceId;
+ return true;
+}
+
void UVCCameraData::addControl(uint32_t cid, const ControlInfo &v4l2Info,
ControlInfoMap::Map *ctrls)
{
@@ -659,7 +747,7 @@ void UVCCameraData::addControl(uint32_t cid, const ControlInfo &v4l2Info,
ctrls->emplace(id, info);
}
-void UVCCameraData::bufferReady(FrameBuffer *buffer)
+void UVCCameraData::imageBufferReady(FrameBuffer *buffer)
{
Request *request = buffer->request();
@@ -671,6 +759,6 @@ void UVCCameraData::bufferReady(FrameBuffer *buffer)
pipe()->completeRequest(request);
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerUVC)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerUVC, "uvcvideo")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/vimc/meson.build b/src/libcamera/pipeline/vimc/meson.build
index 290eefb5..868e2546 100644
--- a/src/libcamera/pipeline/vimc/meson.build
+++ b/src/libcamera/pipeline/vimc/meson.build
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources += files([
+libcamera_internal_sources += files([
'vimc.cpp',
])
diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
index 29960fe3..07273bd2 100644
--- a/src/libcamera/pipeline/vimc/vimc.cpp
+++ b/src/libcamera/pipeline/vimc/vimc.cpp
@@ -2,18 +2,19 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * vimc.cpp - Pipeline handler for the vimc device
+ * Pipeline handler for the vimc device
*/
#include <algorithm>
+#include <cmath>
#include <iomanip>
#include <map>
-#include <math.h>
#include <tuple>
#include <linux/media-bus-format.h>
#include <linux/version.h>
+#include <libcamera/base/flags.h>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
@@ -21,6 +22,8 @@
#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
#include <libcamera/formats.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
@@ -32,6 +35,7 @@
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
@@ -52,8 +56,8 @@ public:
int init();
int allocateMockIPABuffers();
- void bufferReady(FrameBuffer *buffer);
- void paramsFilled(unsigned int id);
+ void imageBufferReady(FrameBuffer *buffer);
+ void paramsComputed(unsigned int id, const Flags<ipa::vimc::TestFlag> flags);
MediaDevice *media_;
std::unique_ptr<CameraSensor> sensor_;
@@ -83,8 +87,8 @@ class PipelineHandlerVimc : public PipelineHandler
public:
PipelineHandlerVimc(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -113,6 +117,9 @@ static const std::map<PixelFormat, uint32_t> pixelformats{
{ formats::BGR888, MEDIA_BUS_FMT_RGB888_1X24 },
};
+static constexpr Size kMinSize{ 16, 16 };
+static constexpr Size kMaxSize{ 4096, 2160 };
+
} /* namespace */
VimcCameraConfiguration::VimcCameraConfiguration(VimcCameraData *data)
@@ -127,8 +134,8 @@ CameraConfiguration::Status VimcCameraConfiguration::validate()
if (config_.empty())
return Invalid;
- if (transform != Transform::Identity) {
- transform = Transform::Identity;
+ if (orientation != Orientation::Rotate0) {
+ orientation = Orientation::Rotate0;
status = Adjusted;
}
@@ -152,25 +159,31 @@ CameraConfiguration::Status VimcCameraConfiguration::validate()
const Size size = cfg.size;
/*
- * The scaler hardcodes a x3 scale-up ratio, and the sensor output size
- * is aligned to two pixels in both directions. The output width and
- * height thus have to be multiples of 6.
+ * The sensor output size is aligned to two pixels in both directions.
+ * Additionally, prior to v5.16, the scaler hardcodes a x3 scale-up
+ * ratio, requiring the output width and height to be multiples of 6.
*/
- cfg.size.width = std::max(48U, std::min(4096U, cfg.size.width));
- cfg.size.height = std::max(48U, std::min(2160U, cfg.size.height));
- cfg.size.width -= cfg.size.width % 6;
- cfg.size.height -= cfg.size.height % 6;
+ Size minSize{ kMinSize };
+ unsigned int alignment = 2;
+
+ if (data_->media_->version() < KERNEL_VERSION(5, 16, 0)) {
+ minSize *= 3;
+ alignment *= 3;
+ }
+
+ cfg.size.expandTo(minSize).boundTo(kMaxSize)
+ .alignDownTo(alignment, alignment);
if (cfg.size != size) {
LOG(VIMC, Debug)
- << "Adjusting size to " << cfg.size.toString();
+ << "Adjusting size to " << cfg.size;
status = Adjusted;
}
cfg.bufferCount = 4;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
int ret = data_->video_->tryFormat(&format);
@@ -188,11 +201,13 @@ PipelineHandlerVimc::PipelineHandlerVimc(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerVimc::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerVimc::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
VimcCameraData *data = cameraData(camera);
- CameraConfiguration *config = new VimcCameraConfiguration(data);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<VimcCameraConfiguration>(data);
if (roles.empty())
return config;
@@ -208,15 +223,17 @@ CameraConfiguration *PipelineHandlerVimc::generateConfiguration(Camera *camera,
if (pixelformat.first != formats::BGR888) {
LOG(VIMC, Info)
<< "Skipping unsupported pixel format "
- << pixelformat.first.toString();
+ << pixelformat.first;
continue;
}
}
- /* The scaler hardcodes a x3 scale-up ratio. */
- std::vector<SizeRange> sizes{
- SizeRange{ { 48, 48 }, { 4096, 2160 } }
- };
+ /* Prior to v5.16, the scaler hardcodes a x3 scale-up ratio. */
+ Size minSize{ kMinSize };
+ if (data->media_->version() < KERNEL_VERSION(5, 16, 0))
+ minSize *= 3;
+
+ std::vector<SizeRange> sizes{ { minSize, kMaxSize } };
formats[pixelformat.first] = sizes;
}
@@ -239,10 +256,18 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
StreamConfiguration &cfg = config->at(0);
int ret;
- /* The scaler hardcodes a x3 scale-up ratio. */
+ /*
+ * Prior to v5.16, the scaler hardcodes a x3 scale-up ratio. For newer
+ * kernels, use a sensor resolution of 1920x1080 and let the scaler
+ * produce the requested stream size.
+ */
+ Size sensorSize{ 1920, 1080 };
+ if (data->media_->version() < KERNEL_VERSION(5, 16, 0))
+ sensorSize = { cfg.size.width / 3, cfg.size.height / 3 };
+
V4L2SubdeviceFormat subformat = {};
- subformat.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8;
- subformat.size = { cfg.size.width / 3, cfg.size.height / 3 };
+ subformat.code = MEDIA_BUS_FMT_SGRBG8_1X8;
+ subformat.size = sensorSize;
ret = data->sensor_->setFormat(&subformat);
if (ret)
@@ -252,7 +277,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
if (ret)
return ret;
- subformat.mbus_code = pixelformats.find(cfg.pixelFormat)->second;
+ subformat.code = pixelformats.find(cfg.pixelFormat)->second;
ret = data->debayer_->setFormat(1, &subformat);
if (ret)
return ret;
@@ -274,7 +299,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
return ret;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
ret = data->video_->setFormat(&format);
@@ -282,7 +307,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
return ret;
if (format.size != cfg.size ||
- format.fourcc != V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat))
+ format.fourcc != data->video_->toV4L2PixelFormat(cfg.pixelFormat))
return -EINVAL;
/*
@@ -290,7 +315,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
* vimc driver will fail pipeline validation.
*/
format.fourcc = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8);
- format.size = { cfg.size.width / 3, cfg.size.height / 3 };
+ format.size = sensorSize;
ret = data->raw_->setFormat(&format);
if (ret)
@@ -377,7 +402,7 @@ int PipelineHandlerVimc::processControls(VimcCameraData *data, Request *request)
{
ControlList controls(data->sensor_->controls());
- for (auto it : request->controls()) {
+ for (const auto &it : request->controls()) {
unsigned int id = it.first;
unsigned int offset;
uint32_t cid;
@@ -395,7 +420,7 @@ int PipelineHandlerVimc::processControls(VimcCameraData *data, Request *request)
continue;
}
- int32_t value = lroundf(it.second.get<float>() * 128 + offset);
+ int32_t value = std::lround(it.second.get<float>() * 128 + offset);
controls.set(cid, std::clamp(value, 0, 255));
}
@@ -432,7 +457,7 @@ int PipelineHandlerVimc::queueRequestDevice(Camera *camera, Request *request)
if (ret < 0)
return ret;
- data->ipa_->processControls(request->sequence(), request->controls());
+ data->ipa_->queueRequest(request->sequence(), request->controls());
return 0;
}
@@ -467,10 +492,18 @@ bool PipelineHandlerVimc::match(DeviceEnumerator *enumerator)
return false;
}
- data->ipa_->paramsFilled.connect(data.get(), &VimcCameraData::paramsFilled);
+ data->ipa_->paramsComputed.connect(data.get(), &VimcCameraData::paramsComputed);
std::string conf = data->ipa_->configurationFile("vimc.conf");
- data->ipa_->init(IPASettings{ conf, data->sensor_->model() });
+ Flags<ipa::vimc::TestFlag> inFlags = ipa::vimc::TestFlag::Flag2;
+ Flags<ipa::vimc::TestFlag> outFlags;
+ data->ipa_->init(IPASettings{ conf, data->sensor_->model() },
+ ipa::vimc::IPAOperationInit, inFlags, &outFlags);
+
+ LOG(VIMC, Debug)
+ << "Flag 1 was "
+ << (outFlags & ipa::vimc::TestFlag::Flag1 ? "" : "not ")
+ << "set";
/* Create and register the camera. */
std::set<Stream *> streams{ &data->stream_ };
@@ -499,10 +532,9 @@ int VimcCameraData::init()
return ret;
/* Create and open the camera sensor, debayer, scaler and video device. */
- sensor_ = std::make_unique<CameraSensor>(media_->getEntityByName("Sensor B"));
- ret = sensor_->init();
- if (ret)
- return ret;
+ sensor_ = CameraSensorFactoryBase::create(media_->getEntityByName("Sensor B"));
+ if (!sensor_)
+ return -ENODEV;
debayer_ = V4L2Subdevice::fromEntityName(media_, "Debayer B");
if (debayer_->open())
@@ -516,7 +548,7 @@ int VimcCameraData::init()
if (video_->open())
return -ENODEV;
- video_->bufferReady.connect(this, &VimcCameraData::bufferReady);
+ video_->bufferReady.connect(this, &VimcCameraData::imageBufferReady);
raw_ = V4L2VideoDevice::fromEntityName(media_, "Raw Capture 1");
if (raw_->open())
@@ -564,7 +596,7 @@ int VimcCameraData::init()
return 0;
}
-void VimcCameraData::bufferReady(FrameBuffer *buffer)
+void VimcCameraData::imageBufferReady(FrameBuffer *buffer)
{
PipelineHandlerVimc *pipe =
static_cast<PipelineHandlerVimc *>(this->pipe());
@@ -574,7 +606,7 @@ void VimcCameraData::bufferReady(FrameBuffer *buffer)
if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
for (auto it : request->buffers()) {
FrameBuffer *b = it.second;
- b->cancel();
+ b->_d()->cancel();
pipe->completeBuffer(request, b);
}
@@ -589,7 +621,7 @@ void VimcCameraData::bufferReady(FrameBuffer *buffer)
pipe->completeBuffer(request, buffer);
pipe->completeRequest(request);
- ipa_->fillParams(request->sequence(), mockIPABufs_[0]->cookie());
+ ipa_->computeParams(request->sequence(), mockIPABufs_[0]->cookie());
}
int VimcCameraData::allocateMockIPABuffers()
@@ -597,7 +629,7 @@ int VimcCameraData::allocateMockIPABuffers()
constexpr unsigned int kBufCount = 2;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(formats::BGR888);
+ format.fourcc = video_->toV4L2PixelFormat(formats::BGR888);
format.size = Size (160, 120);
int ret = video_->setFormat(&format);
@@ -607,10 +639,11 @@ int VimcCameraData::allocateMockIPABuffers()
return video_->exportBuffers(kBufCount, &mockIPABufs_);
}
-void VimcCameraData::paramsFilled([[maybe_unused]] unsigned int id)
+void VimcCameraData::paramsComputed([[maybe_unused]] unsigned int id,
+ [[maybe_unused]] const Flags<ipa::vimc::TestFlag> flags)
{
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerVimc)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVimc, "vimc")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/README.md b/src/libcamera/pipeline/virtual/README.md
new file mode 100644
index 00000000..a9f39c15
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/README.md
@@ -0,0 +1,65 @@
+# Virtual Pipeline Handler
+
+Virtual pipeline handler emulates fake external camera(s) for testing.
+
+## Parse config file and register cameras
+
+- A sample config file is located at `src/libcamera/pipeline/virtual/data/virtual.yaml`.
+- If libcamera is installed, the config file should be installed at
+ `share/libcamera/pipeline/virtual/virtual.yaml`.
+
+### Config File Format
+The config file contains the information about cameras' properties to register.
+The config file should be a yaml file with dictionary of the cameraIds
+associated with their properties as top level. The default value will be applied
+when any property is empty.
+
+Each camera block is a dictionary, containing the following keys:
+- `supported_formats` (list of `VirtualCameraData::Resolution`, optional):
+ List of supported resolution and frame rates of the emulated camera
+ - `width` (`unsigned int`, default=1920): Width of the window resolution.
+ This needs to be even.
+ - `height` (`unsigned int`, default=1080): Height of the window resolution.
+ - `frame_rates` (list of `int`, default=`[30,60]` ): Range of the frame
+ rate (per second). If the list contains one value, it's the lower bound
+ and the upper bound. If the list contains two values, the first is the
+ lower bound and the second is the upper bound. No other number of values
+ is allowed.
+- `test_pattern` (`string`): Which test pattern to use as frames. The options
+ are "bars", "lines". Cannot be set with `frames`.
+ - The test patterns are "bars" which means color bars, and "lines" which means
+ diagonal lines.
+- `frames` (dictionary):
+ - `path` (`string`): Path to an image, or path to a directory of a series of
+ images. Cannot be set with `test_pattern`.
+ - The path to an image has ".jpg" extension.
+ - The path to a directory ends with "/". The name of the images in the
+ directory are "{n}.jpg" with {n} is the sequence of images starting with 0.
+- `location` (`string`, default="front"): The location of the camera. Support
+ "CameraLocationFront", "CameraLocationBack", and "CameraLocationExternal".
+- `model` (`string`, default="Unknown"): The model name of the camera.
+
+Check `data/virtual.yaml` as the sample config file.
+
+### Implementation
+
+`Parser` class provides methods to parse the config file to register cameras
+in Virtual Pipeline Handler. `parseConfigFile()` is exposed to use in
+Virtual Pipeline Handler.
+
+This is the procedure of the Parser class:
+1. `parseConfigFile()` parses the config file to `YamlObject` using `YamlParser::parse()`.
+ - Parse the top level of config file which are the camera ids and look into
+ each camera properties.
+2. For each camera, `parseCameraConfigData()` returns a camera with the configuration.
+ - The methods in the next step fill the data with the pointer to the Camera object.
+ - If the config file contains invalid configuration, this method returns
+ nullptr. The camera will be skipped.
+3. Parse each property and register the data.
+ - `parseSupportedFormats()`: Parses `supported_formats` in the config, which
+ contains resolutions and frame rates.
+ - `parseFrameGenerator()`: Parses `test_pattern` or `frames` in the config.
+ - `parseLocation()`: Parses `location` in the config.
+ - `parseModel()`: Parses `model` in the config.
+4. Back to `parseConfigFile()` and append the camera configuration.
+5. Returns a list of camera configurations.
diff --git a/src/libcamera/pipeline/virtual/config_parser.cpp b/src/libcamera/pipeline/virtual/config_parser.cpp
new file mode 100644
index 00000000..0cbfe39b
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/config_parser.cpp
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Virtual cameras helper to parse config file
+ */
+
+#include "config_parser.h"
+
+#include <string.h>
+#include <utility>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "virtual.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Virtual)
+
+std::vector<std::unique_ptr<VirtualCameraData>>
+ConfigParser::parseConfigFile(File &file, PipelineHandler *pipe)
+{
+ std::vector<std::unique_ptr<VirtualCameraData>> configurations;
+
+ std::unique_ptr<YamlObject> cameras = YamlParser::parse(file);
+ if (!cameras) {
+ LOG(Virtual, Error) << "Failed to pass config file.";
+ return configurations;
+ }
+
+ if (!cameras->isDictionary()) {
+ LOG(Virtual, Error) << "Config file is not a dictionary at the top level.";
+ return configurations;
+ }
+
+ /* Look into the configuration of each camera */
+ for (const auto &[cameraId, cameraConfigData] : cameras->asDict()) {
+ std::unique_ptr<VirtualCameraData> data =
+ parseCameraConfigData(cameraConfigData, pipe);
+ /* Parse configData to data */
+ if (!data) {
+ /* Skip the camera if it has invalid config */
+ LOG(Virtual, Error) << "Failed to parse config of the camera: "
+ << cameraId;
+ continue;
+ }
+
+ data->config_.id = cameraId;
+ ControlInfoMap::Map controls;
+ /* todo: Check which resolution's frame rate to be reported */
+ controls[&controls::FrameDurationLimits] =
+ ControlInfo(1000000 / data->config_.resolutions[0].frameRates[1],
+ 1000000 / data->config_.resolutions[0].frameRates[0]);
+
+ std::vector<ControlValue> supportedFaceDetectModes{
+ static_cast<int32_t>(controls::draft::FaceDetectModeOff),
+ };
+ controls[&controls::draft::FaceDetectMode] = ControlInfo(supportedFaceDetectModes);
+
+ data->controlInfo_ = ControlInfoMap(std::move(controls), controls::controls);
+ configurations.push_back(std::move(data));
+ }
+
+ return configurations;
+}
+
+std::unique_ptr<VirtualCameraData>
+ConfigParser::parseCameraConfigData(const YamlObject &cameraConfigData,
+ PipelineHandler *pipe)
+{
+ std::vector<VirtualCameraData::Resolution> resolutions;
+ if (parseSupportedFormats(cameraConfigData, &resolutions))
+ return nullptr;
+
+ std::unique_ptr<VirtualCameraData> data =
+ std::make_unique<VirtualCameraData>(pipe, resolutions);
+
+ if (parseFrameGenerator(cameraConfigData, data.get()))
+ return nullptr;
+
+ if (parseLocation(cameraConfigData, data.get()))
+ return nullptr;
+
+ if (parseModel(cameraConfigData, data.get()))
+ return nullptr;
+
+ return data;
+}
+
+int ConfigParser::parseSupportedFormats(const YamlObject &cameraConfigData,
+ std::vector<VirtualCameraData::Resolution> *resolutions)
+{
+ if (cameraConfigData.contains("supported_formats")) {
+ const YamlObject &supportedResolutions = cameraConfigData["supported_formats"];
+
+ for (const YamlObject &supportedResolution : supportedResolutions.asList()) {
+ unsigned int width = supportedResolution["width"].get<unsigned int>(1920);
+ unsigned int height = supportedResolution["height"].get<unsigned int>(1080);
+ if (width == 0 || height == 0) {
+ LOG(Virtual, Error) << "Invalid width or/and height";
+ return -EINVAL;
+ }
+ if (width % 2 != 0) {
+ LOG(Virtual, Error) << "Invalid width: width needs to be even";
+ return -EINVAL;
+ }
+
+ std::vector<int64_t> frameRates;
+ if (supportedResolution.contains("frame_rates")) {
+ auto frameRatesList =
+ supportedResolution["frame_rates"].getList<int>();
+ if (!frameRatesList || (frameRatesList->size() != 1 &&
+ frameRatesList->size() != 2)) {
+ LOG(Virtual, Error) << "Invalid frame_rates: either one or two values";
+ return -EINVAL;
+ }
+
+ if (frameRatesList->size() == 2 &&
+ frameRatesList.value()[0] > frameRatesList.value()[1]) {
+ LOG(Virtual, Error) << "frame_rates's first value(lower bound)"
+ << " is higher than the second value(upper bound)";
+ return -EINVAL;
+ }
+ /*
+ * Push the min and max framerates. A
+ * single rate is duplicated.
+ */
+ frameRates.push_back(frameRatesList.value().front());
+ frameRates.push_back(frameRatesList.value().back());
+ } else {
+ frameRates.push_back(30);
+ frameRates.push_back(60);
+ }
+
+ resolutions->emplace_back(
+ VirtualCameraData::Resolution{ Size{ width, height },
+ frameRates });
+ }
+ } else {
+ resolutions->emplace_back(
+ VirtualCameraData::Resolution{ Size{ 1920, 1080 },
+ { 30, 60 } });
+ }
+
+ return 0;
+}
+
+int ConfigParser::parseFrameGenerator(const YamlObject &cameraConfigData, VirtualCameraData *data)
+{
+ const std::string testPatternKey = "test_pattern";
+ const std::string framesKey = "frames";
+ if (cameraConfigData.contains(testPatternKey)) {
+ if (cameraConfigData.contains(framesKey)) {
+ LOG(Virtual, Error) << "A camera should use either "
+ << testPatternKey << " or " << framesKey;
+ return -EINVAL;
+ }
+
+ auto testPattern = cameraConfigData[testPatternKey].get<std::string>("");
+
+ if (testPattern == "bars") {
+ data->config_.frame = TestPattern::ColorBars;
+ } else if (testPattern == "lines") {
+ data->config_.frame = TestPattern::DiagonalLines;
+ } else {
+ LOG(Virtual, Debug) << "Test pattern: " << testPattern
+ << " is not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ const YamlObject &frames = cameraConfigData[framesKey];
+
+ /* When there is no frames provided in the config file, use color bar test pattern */
+ if (!frames) {
+ data->config_.frame = TestPattern::ColorBars;
+ return 0;
+ }
+
+ if (!frames.isDictionary()) {
+ LOG(Virtual, Error) << "'frames' is not a dictionary.";
+ return -EINVAL;
+ }
+
+ auto path = frames["path"].get<std::string>();
+
+ if (!path) {
+ LOG(Virtual, Error) << "Test pattern or path should be specified.";
+ return -EINVAL;
+ }
+
+ std::vector<std::filesystem::path> files;
+
+ switch (std::filesystem::symlink_status(*path).type()) {
+ case std::filesystem::file_type::regular:
+ files.push_back(*path);
+ break;
+
+ case std::filesystem::file_type::directory:
+ for (const auto &dentry : std::filesystem::directory_iterator{ *path }) {
+ if (dentry.is_regular_file())
+ files.push_back(dentry.path());
+ }
+
+ std::sort(files.begin(), files.end(), [](const auto &a, const auto &b) {
+ return ::strverscmp(a.c_str(), b.c_str()) < 0;
+ });
+
+ if (files.empty()) {
+ LOG(Virtual, Error) << "Directory has no files: " << *path;
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ LOG(Virtual, Error) << "Frame: " << *path << " is not supported";
+ return -EINVAL;
+ }
+
+ data->config_.frame = ImageFrames{ std::move(files) };
+
+ return 0;
+}
+
+int ConfigParser::parseLocation(const YamlObject &cameraConfigData, VirtualCameraData *data)
+{
+ std::string location = cameraConfigData["location"].get<std::string>("CameraLocationFront");
+
+ /* Default value is properties::CameraLocationFront */
+ auto it = properties::LocationNameValueMap.find(location);
+ if (it == properties::LocationNameValueMap.end()) {
+ LOG(Virtual, Error)
+ << "location: " << location << " is not supported";
+ return -EINVAL;
+ }
+
+ data->properties_.set(properties::Location, it->second);
+
+ return 0;
+}
+
+int ConfigParser::parseModel(const YamlObject &cameraConfigData, VirtualCameraData *data)
+{
+ std::string model = cameraConfigData["model"].get<std::string>("Unknown");
+
+ data->properties_.set(properties::Model, model);
+
+ return 0;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/config_parser.h b/src/libcamera/pipeline/virtual/config_parser.h
new file mode 100644
index 00000000..d2000de9
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/config_parser.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Virtual cameras helper to parse config file
+ */
+
+#pragma once
+
+#include <memory>
+#include <vector>
+
+#include <libcamera/base/file.h>
+
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "virtual.h"
+
+namespace libcamera {
+
+class ConfigParser
+{
+public:
+ std::vector<std::unique_ptr<VirtualCameraData>>
+ parseConfigFile(File &file, PipelineHandler *pipe);
+
+private:
+ std::unique_ptr<VirtualCameraData>
+ parseCameraConfigData(const YamlObject &cameraConfigData, PipelineHandler *pipe);
+
+ int parseSupportedFormats(const YamlObject &cameraConfigData,
+ std::vector<VirtualCameraData::Resolution> *resolutions);
+ int parseFrameGenerator(const YamlObject &cameraConfigData, VirtualCameraData *data);
+ int parseLocation(const YamlObject &cameraConfigData, VirtualCameraData *data);
+ int parseModel(const YamlObject &cameraConfigData, VirtualCameraData *data);
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/data/virtual.yaml b/src/libcamera/pipeline/virtual/data/virtual.yaml
new file mode 100644
index 00000000..20471bb9
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/data/virtual.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+"Virtual0":
+ supported_formats:
+ - width: 1920
+ height: 1080
+ frame_rates:
+ - 30
+ - 60
+ - width: 1680
+ height: 1050
+ frame_rates:
+ - 70
+ - 80
+ test_pattern: "lines"
+ location: "CameraLocationFront"
+ model: "Virtual Video Device"
+"Virtual1":
+ supported_formats:
+ - width: 800
+ height: 600
+ frame_rates:
+ - 60
+ test_pattern: "bars"
+ location: "CameraLocationBack"
+ model: "Virtual Video Device1"
+"Virtual2":
+ supported_formats:
+ - width: 400
+ height: 300
+ test_pattern: "lines"
+ location: "CameraLocationFront"
+ model: "Virtual Video Device2"
+"Virtual3":
+ test_pattern: "bars"
diff --git a/src/libcamera/pipeline/virtual/frame_generator.h b/src/libcamera/pipeline/virtual/frame_generator.h
new file mode 100644
index 00000000..a0658c45
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/frame_generator.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Virtual cameras helper to generate frames
+ */
+
+#pragma once
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+
+namespace libcamera {
+
+class FrameGenerator
+{
+public:
+ virtual ~FrameGenerator() = default;
+
+ virtual void configure(const Size &size) = 0;
+
+ virtual int generateFrame(const Size &size,
+ const FrameBuffer *buffer) = 0;
+
+protected:
+ FrameGenerator() {}
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/image_frame_generator.cpp b/src/libcamera/pipeline/virtual/image_frame_generator.cpp
new file mode 100644
index 00000000..d1545b5d
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/image_frame_generator.cpp
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Derived class of FrameGenerator for generating frames from images
+ */
+
+#include "image_frame_generator.h"
+
+#include <string>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+#include <libcamera/framebuffer.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include "libyuv/convert.h"
+#include "libyuv/scale.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Virtual)
+
+/*
+ * Factory function to create an ImageFrameGenerator object.
+ * Read the images and convert them to buffers in NV12 format.
+ * Store the pointers to the buffers to a list (imageFrameDatas)
+ */
+std::unique_ptr<ImageFrameGenerator>
+ImageFrameGenerator::create(ImageFrames &imageFrames)
+{
+ std::unique_ptr<ImageFrameGenerator> imageFrameGenerator =
+ std::make_unique<ImageFrameGenerator>();
+ imageFrameGenerator->imageFrames_ = &imageFrames;
+
+ /*
+ * For each file in the directory, load the image,
+ * convert it to NV12, and store the pointer.
+ */
+ for (const auto &path : imageFrames.files) {
+ File file(path);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(Virtual, Error) << "Failed to open image file " << file.fileName()
+ << ": " << strerror(file.error());
+ return nullptr;
+ }
+
+ /* Read the image file to data */
+ auto fileSize = file.size();
+ auto buffer = std::make_unique<uint8_t[]>(fileSize);
+ if (file.read({ buffer.get(), static_cast<size_t>(fileSize) }) != fileSize) {
+ LOG(Virtual, Error) << "Failed to read file " << file.fileName()
+ << ": " << strerror(file.error());
+ return nullptr;
+ }
+
+ /* Get the width and height of the image */
+ int width, height;
+ if (libyuv::MJPGSize(buffer.get(), fileSize, &width, &height)) {
+ LOG(Virtual, Error) << "Failed to get the size of the image file: "
+ << file.fileName();
+ return nullptr;
+ }
+
+ std::unique_ptr<uint8_t[]> dstY =
+ std::make_unique<uint8_t[]>(width * height);
+ std::unique_ptr<uint8_t[]> dstUV =
+ std::make_unique<uint8_t[]>(width * height / 2);
+ int ret = libyuv::MJPGToNV12(buffer.get(), fileSize,
+ dstY.get(), width, dstUV.get(),
+ width, width, height, width, height);
+ if (ret != 0)
+ LOG(Virtual, Error) << "MJPGToNV12() failed with " << ret;
+
+ imageFrameGenerator->imageFrameDatas_.emplace_back(
+ ImageFrameData{ std::move(dstY), std::move(dstUV),
+ Size(width, height) });
+ }
+
+ ASSERT(!imageFrameGenerator->imageFrameDatas_.empty());
+
+ return imageFrameGenerator;
+}
+
+/*
+ * \var ImageFrameGenerator::frameRepeat
+ * \brief Number of frames to repeat before proceeding to the next frame
+ */
+
+/* Scale the buffers for image frames. */
+void ImageFrameGenerator::configure(const Size &size)
+{
+ /* Reset the source images to prevent multiple configuration calls */
+ scaledFrameDatas_.clear();
+ frameIndex_ = 0;
+ parameter_ = 0;
+
+ for (unsigned int i = 0; i < imageFrameDatas_.size(); i++) {
+ /* Scale the imageFrameDatas_ to scaledY and scaledUV */
+ unsigned int halfSizeWidth = (size.width + 1) / 2;
+ unsigned int halfSizeHeight = (size.height + 1) / 2;
+ std::unique_ptr<uint8_t[]> scaledY =
+ std::make_unique<uint8_t[]>(size.width * size.height);
+ std::unique_ptr<uint8_t[]> scaledUV =
+ std::make_unique<uint8_t[]>(halfSizeWidth * halfSizeHeight * 2);
+ auto &src = imageFrameDatas_[i];
+
+ /*
+ * \todo Some platforms might enforce stride due to GPU.
+ * The width needs to be a multiple of the stride to work
+ * properly for now.
+ */
+ libyuv::NV12Scale(src.Y.get(), src.size.width,
+ src.UV.get(), src.size.width,
+ src.size.width, src.size.height,
+ scaledY.get(), size.width, scaledUV.get(), size.width,
+ size.width, size.height, libyuv::FilterMode::kFilterBilinear);
+
+ scaledFrameDatas_.emplace_back(
+ ImageFrameData{ std::move(scaledY), std::move(scaledUV), size });
+ }
+}
+
+int ImageFrameGenerator::generateFrame(const Size &size, const FrameBuffer *buffer)
+{
+ ASSERT(!scaledFrameDatas_.empty());
+
+ MappedFrameBuffer mappedFrameBuffer(buffer, MappedFrameBuffer::MapFlag::Write);
+
+ const auto &planes = mappedFrameBuffer.planes();
+
+ /* Loop only around the number of images available */
+ frameIndex_ %= imageFrameDatas_.size();
+
+ /* Write the scaledY and scaledUV to the mapped frame buffer */
+ libyuv::NV12Copy(scaledFrameDatas_[frameIndex_].Y.get(), size.width,
+ scaledFrameDatas_[frameIndex_].UV.get(), size.width, planes[0].begin(),
+ size.width, planes[1].begin(), size.width,
+ size.width, size.height);
+
+ /* Proceed to the next image every 4 frames */
+ /* \todo Consider setting the frameRepeat in the config file */
+ parameter_++;
+ if (parameter_ % frameRepeat == 0)
+ frameIndex_++;
+
+ return 0;
+}
+
+/*
+ * \var ImageFrameGenerator::imageFrameDatas_
+ * \brief List of pointers to the not scaled image buffers
+ */
+
+/*
+ * \var ImageFrameGenerator::scaledFrameDatas_
+ * \brief List of pointers to the scaled image buffers
+ */
+
+/*
+ * \var ImageFrameGenerator::imageFrames_
+ * \brief Pointer to the imageFrames_ in VirtualCameraData
+ */
+
+/*
+ * \var ImageFrameGenerator::parameter_
+ * \brief Speed parameter. Change to the next image every parameter_ frames
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/image_frame_generator.h b/src/libcamera/pipeline/virtual/image_frame_generator.h
new file mode 100644
index 00000000..42a077ba
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/image_frame_generator.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Derived class of FrameGenerator for generating frames from images
+ */
+
+#pragma once
+
+#include <filesystem>
+#include <memory>
+#include <stdint.h>
+#include <sys/types.h>
+#include <vector>
+
+#include "frame_generator.h"
+
+namespace libcamera {
+
+/* Frame configuration provided by the config file */
+struct ImageFrames {
+ std::vector<std::filesystem::path> files;
+};
+
+class ImageFrameGenerator : public FrameGenerator
+{
+public:
+ static std::unique_ptr<ImageFrameGenerator> create(ImageFrames &imageFrames);
+
+private:
+ static constexpr unsigned int frameRepeat = 4;
+
+ struct ImageFrameData {
+ std::unique_ptr<uint8_t[]> Y;
+ std::unique_ptr<uint8_t[]> UV;
+ Size size;
+ };
+
+ void configure(const Size &size) override;
+ int generateFrame(const Size &size, const FrameBuffer *buffer) override;
+
+ std::vector<ImageFrameData> imageFrameDatas_;
+ std::vector<ImageFrameData> scaledFrameDatas_;
+ ImageFrames *imageFrames_;
+ unsigned int frameIndex_;
+ unsigned int parameter_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/meson.build b/src/libcamera/pipeline/virtual/meson.build
new file mode 100644
index 00000000..4786fe2e
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'config_parser.cpp',
+ 'image_frame_generator.cpp',
+ 'test_pattern_generator.cpp',
+ 'virtual.cpp',
+])
+
+libjpeg = dependency('libjpeg', required : true)
+
+libcamera_deps += [libyuv_dep]
+libcamera_deps += [libjpeg]
diff --git a/src/libcamera/pipeline/virtual/test_pattern_generator.cpp b/src/libcamera/pipeline/virtual/test_pattern_generator.cpp
new file mode 100644
index 00000000..745be83b
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/test_pattern_generator.cpp
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Derived class of FrameGenerator for generating test patterns
+ */
+
+#include "test_pattern_generator.h"
+
+#include <string.h>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include <libyuv/convert_from_argb.h>
+
+namespace {
+
+template<size_t SampleSize>
+void rotateLeft1Column(const libcamera::Size &size, uint8_t *image)
+{
+ if (size.width < 2)
+ return;
+
+ const size_t stride = size.width * SampleSize;
+ uint8_t first[SampleSize];
+
+ for (size_t i = 0; i < size.height; i++, image += stride) {
+ memcpy(first, &image[0], SampleSize);
+ memmove(&image[0], &image[SampleSize], stride - SampleSize);
+ memcpy(&image[stride - SampleSize], first, SampleSize);
+ }
+}
+
+} /* namespace */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Virtual)
+
+static const unsigned int kARGBSize = 4;
+
+int TestPatternGenerator::generateFrame(const Size &size,
+ const FrameBuffer *buffer)
+{
+ MappedFrameBuffer mappedFrameBuffer(buffer,
+ MappedFrameBuffer::MapFlag::Write);
+
+ const auto &planes = mappedFrameBuffer.planes();
+
+ rotateLeft1Column<kARGBSize>(size, template_.get());
+
+ /* Convert the template_ to the frame buffer */
+ int ret = libyuv::ARGBToNV12(template_.get(), size.width * kARGBSize,
+ planes[0].begin(), size.width,
+ planes[1].begin(), size.width,
+ size.width, size.height);
+ if (ret != 0)
+ LOG(Virtual, Error) << "ARGBToNV12() failed with " << ret;
+
+ return ret;
+}
+
+void ColorBarsGenerator::configure(const Size &size)
+{
+ constexpr uint8_t kColorBar[8][3] = {
+ /* R, G, B */
+ { 0xff, 0xff, 0xff }, /* White */
+ { 0xff, 0xff, 0x00 }, /* Yellow */
+ { 0x00, 0xff, 0xff }, /* Cyan */
+ { 0x00, 0xff, 0x00 }, /* Green */
+ { 0xff, 0x00, 0xff }, /* Magenta */
+ { 0xff, 0x00, 0x00 }, /* Red */
+ { 0x00, 0x00, 0xff }, /* Blue */
+ { 0x00, 0x00, 0x00 }, /* Black */
+ };
+
+ template_ = std::make_unique<uint8_t[]>(
+ size.width * size.height * kARGBSize);
+
+ unsigned int colorBarWidth = size.width / std::size(kColorBar);
+
+ uint8_t *buf = template_.get();
+ for (size_t h = 0; h < size.height; h++) {
+ for (size_t w = 0; w < size.width; w++) {
+ /* Repeat when the width is exceed */
+ unsigned int index = (w / colorBarWidth) % std::size(kColorBar);
+
+ *buf++ = kColorBar[index][2]; /* B */
+ *buf++ = kColorBar[index][1]; /* G */
+ *buf++ = kColorBar[index][0]; /* R */
+ *buf++ = 0x00; /* A */
+ }
+ }
+}
+
+void DiagonalLinesGenerator::configure(const Size &size)
+{
+ constexpr uint8_t kColorBar[2][3] = {
+ /* R, G, B */
+ { 0xff, 0xff, 0xff }, /* White */
+ { 0x00, 0x00, 0x00 }, /* Black */
+ };
+
+ template_ = std::make_unique<uint8_t[]>(
+ size.width * size.height * kARGBSize);
+
+ unsigned int lineWidth = size.width / 10;
+
+ uint8_t *buf = template_.get();
+ for (size_t h = 0; h < size.height; h++) {
+ for (size_t w = 0; w < size.width; w++) {
+ /* Repeat when the width is exceed */
+ int index = ((w + h) / lineWidth) % 2;
+
+ *buf++ = kColorBar[index][2]; /* B */
+ *buf++ = kColorBar[index][1]; /* G */
+ *buf++ = kColorBar[index][0]; /* R */
+ *buf++ = 0x00; /* A */
+ }
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/test_pattern_generator.h b/src/libcamera/pipeline/virtual/test_pattern_generator.h
new file mode 100644
index 00000000..2a51bd31
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/test_pattern_generator.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Derived class of FrameGenerator for generating test patterns
+ */
+
+#pragma once
+
+#include <memory>
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+
+#include "frame_generator.h"
+
+namespace libcamera {
+
+enum class TestPattern : char {
+ ColorBars = 0,
+ DiagonalLines = 1,
+};
+
+class TestPatternGenerator : public FrameGenerator
+{
+public:
+ int generateFrame(const Size &size, const FrameBuffer *buffer) override;
+
+protected:
+ /* Buffer of test pattern template */
+ std::unique_ptr<uint8_t[]> template_;
+};
+
+class ColorBarsGenerator : public TestPatternGenerator
+{
+public:
+ /* Generate a template buffer of the color bar test pattern. */
+ void configure(const Size &size) override;
+};
+
+class DiagonalLinesGenerator : public TestPatternGenerator
+{
+public:
+ /* Generate a template buffer of the diagonal lines test pattern. */
+ void configure(const Size &size) override;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/virtual.cpp b/src/libcamera/pipeline/virtual/virtual.cpp
new file mode 100644
index 00000000..e692a543
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/virtual.cpp
@@ -0,0 +1,397 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Pipeline handler for virtual cameras
+ */
+
+#include "virtual.h"
+
+#include <algorithm>
+#include <array>
+#include <chrono>
+#include <errno.h>
+#include <map>
+#include <memory>
+#include <ostream>
+#include <set>
+#include <stdint.h>
+#include <string>
+#include <time.h>
+#include <utility>
+#include <vector>
+
+#include <libcamera/base/flags.h>
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/formats.h>
+#include <libcamera/pixel_format.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/request.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/dma_buf_allocator.h"
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "pipeline/virtual/config_parser.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Virtual)
+
+namespace {
+
+uint64_t currentTimestamp()
+{
+ const auto now = std::chrono::steady_clock::now();
+ auto nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>(
+ now.time_since_epoch());
+
+ return nsecs.count();
+}
+
+} /* namespace */
+
+template<class... Ts>
+struct overloaded : Ts... {
+ using Ts::operator()...;
+};
+template<class... Ts>
+overloaded(Ts...) -> overloaded<Ts...>;
+
+class VirtualCameraConfiguration : public CameraConfiguration
+{
+public:
+ static constexpr unsigned int kBufferCount = 4;
+
+ VirtualCameraConfiguration(VirtualCameraData *data);
+
+ Status validate() override;
+
+private:
+ const VirtualCameraData *data_;
+};
+
+class PipelineHandlerVirtual : public PipelineHandler
+{
+public:
+ PipelineHandlerVirtual(CameraManager *manager);
+ ~PipelineHandlerVirtual();
+
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ static bool created_;
+
+ VirtualCameraData *cameraData(Camera *camera)
+ {
+ return static_cast<VirtualCameraData *>(camera->_d());
+ }
+
+ bool initFrameGenerator(Camera *camera);
+
+ DmaBufAllocator dmaBufAllocator_;
+
+ bool resetCreated_ = false;
+};
+
+VirtualCameraData::VirtualCameraData(PipelineHandler *pipe,
+ const std::vector<Resolution> &supportedResolutions)
+ : Camera::Private(pipe)
+{
+ config_.resolutions = supportedResolutions;
+ for (const auto &resolution : config_.resolutions) {
+ if (config_.minResolutionSize.isNull() || config_.minResolutionSize > resolution.size)
+ config_.minResolutionSize = resolution.size;
+
+ config_.maxResolutionSize = std::max(config_.maxResolutionSize, resolution.size);
+ }
+
+ properties_.set(properties::PixelArrayActiveAreas,
+ { Rectangle(config_.maxResolutionSize) });
+
+ /* \todo Support multiple streams and pass multi_stream_test */
+ streamConfigs_.resize(kMaxStream);
+}
+
+VirtualCameraConfiguration::VirtualCameraConfiguration(VirtualCameraData *data)
+ : CameraConfiguration(), data_(data)
+{
+}
+
+CameraConfiguration::Status VirtualCameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty()) {
+ LOG(Virtual, Error) << "Empty config";
+ return Invalid;
+ }
+
+ /* Only one stream is supported */
+ if (config_.size() > VirtualCameraData::kMaxStream) {
+ config_.resize(VirtualCameraData::kMaxStream);
+ status = Adjusted;
+ }
+
+ for (StreamConfiguration &cfg : config_) {
+ bool adjusted = false;
+ bool found = false;
+ for (const auto &resolution : data_->config_.resolutions) {
+ if (resolution.size.width == cfg.size.width &&
+ resolution.size.height == cfg.size.height) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /*
+ * \todo It's a pipeline's decision to choose a
+ * resolution when the exact one is not supported.
+ * Defining the default logic in PipelineHandler to
+ * find the closest resolution would be nice.
+ */
+ cfg.size = data_->config_.maxResolutionSize;
+ status = Adjusted;
+ adjusted = true;
+ }
+
+ if (cfg.pixelFormat != formats::NV12) {
+ cfg.pixelFormat = formats::NV12;
+ status = Adjusted;
+ adjusted = true;
+ }
+
+ if (adjusted)
+ LOG(Virtual, Info)
+ << "Stream configuration adjusted to " << cfg.toString();
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+ cfg.stride = info.stride(cfg.size.width, 0, 1);
+ cfg.frameSize = info.frameSize(cfg.size, 1);
+
+ cfg.bufferCount = VirtualCameraConfiguration::kBufferCount;
+ }
+
+ return status;
+}
+
+/* static */
+bool PipelineHandlerVirtual::created_ = false;
+
+PipelineHandlerVirtual::PipelineHandlerVirtual(CameraManager *manager)
+ : PipelineHandler(manager),
+ dmaBufAllocator_(DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap |
+ DmaBufAllocator::DmaBufAllocatorFlag::SystemHeap |
+ DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf)
+{
+}
+
+PipelineHandlerVirtual::~PipelineHandlerVirtual()
+{
+ if (resetCreated_)
+ created_ = false;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerVirtual::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
+{
+ VirtualCameraData *data = cameraData(camera);
+ auto config = std::make_unique<VirtualCameraConfiguration>(data);
+
+ if (roles.empty())
+ return config;
+
+ for (const StreamRole role : roles) {
+ switch (role) {
+ case StreamRole::StillCapture:
+ case StreamRole::VideoRecording:
+ case StreamRole::Viewfinder:
+ break;
+
+ case StreamRole::Raw:
+ default:
+ LOG(Virtual, Error)
+ << "Requested stream role not supported: " << role;
+ config.reset();
+ return config;
+ }
+
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+ PixelFormat pixelFormat = formats::NV12;
+ streamFormats[pixelFormat] = { { data->config_.minResolutionSize,
+ data->config_.maxResolutionSize } };
+ StreamFormats formats(streamFormats);
+ StreamConfiguration cfg(formats);
+ cfg.pixelFormat = pixelFormat;
+ cfg.size = data->config_.maxResolutionSize;
+ cfg.bufferCount = VirtualCameraConfiguration::kBufferCount;
+
+ config->addConfiguration(cfg);
+ }
+
+ ASSERT(config->validate() != CameraConfiguration::Invalid);
+
+ return config;
+}
+
+int PipelineHandlerVirtual::configure(Camera *camera,
+ CameraConfiguration *config)
+{
+ VirtualCameraData *data = cameraData(camera);
+ for (auto [i, c] : utils::enumerate(*config)) {
+ c.setStream(&data->streamConfigs_[i].stream);
+ /* Start reading the images/generating test patterns */
+ data->streamConfigs_[i].frameGenerator->configure(c.size);
+ }
+
+ return 0;
+}
+
+int PipelineHandlerVirtual::exportFrameBuffers([[maybe_unused]] Camera *camera,
+ Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ if (!dmaBufAllocator_.isValid())
+ return -ENOBUFS;
+
+ const StreamConfiguration &config = stream->configuration();
+ const PixelFormatInfo &info = PixelFormatInfo::info(config.pixelFormat);
+
+ std::vector<unsigned int> planeSizes;
+ for (size_t i = 0; i < info.numPlanes(); ++i)
+ planeSizes.push_back(info.planeSize(config.size, i));
+
+ return dmaBufAllocator_.exportBuffers(config.bufferCount, planeSizes, buffers);
+}
+
+int PipelineHandlerVirtual::start([[maybe_unused]] Camera *camera,
+ [[maybe_unused]] const ControlList *controls)
+{
+ return 0;
+}
+
+void PipelineHandlerVirtual::stopDevice([[maybe_unused]] Camera *camera)
+{
+}
+
+int PipelineHandlerVirtual::queueRequestDevice([[maybe_unused]] Camera *camera,
+ Request *request)
+{
+ VirtualCameraData *data = cameraData(camera);
+
+ for (auto const &[stream, buffer] : request->buffers()) {
+ bool found = false;
+ /* map buffer and fill test patterns */
+ for (auto &streamConfig : data->streamConfigs_) {
+ if (stream == &streamConfig.stream) {
+ found = true;
+ if (streamConfig.frameGenerator->generateFrame(
+ stream->configuration().size, buffer))
+ buffer->_d()->cancel();
+
+ completeBuffer(request, buffer);
+ break;
+ }
+ }
+ ASSERT(found);
+ }
+
+ request->metadata().set(controls::SensorTimestamp, currentTimestamp());
+ completeRequest(request);
+
+ return 0;
+}
+
+bool PipelineHandlerVirtual::match([[maybe_unused]] DeviceEnumerator *enumerator)
+{
+ if (created_)
+ return false;
+
+ created_ = true;
+
+ File file(configurationFile("virtual", "virtual.yaml"));
+ bool isOpen = file.open(File::OpenModeFlag::ReadOnly);
+ if (!isOpen) {
+ LOG(Virtual, Error) << "Failed to open config file: " << file.fileName();
+ return false;
+ }
+
+ ConfigParser parser;
+ auto configData = parser.parseConfigFile(file, this);
+ if (configData.size() == 0) {
+ LOG(Virtual, Error) << "Failed to parse any cameras from the config file: "
+ << file.fileName();
+ return false;
+ }
+
+ /* Configure and register cameras with configData */
+ for (auto &data : configData) {
+ std::set<Stream *> streams;
+ for (auto &streamConfig : data->streamConfigs_)
+ streams.insert(&streamConfig.stream);
+ std::string id = data->config_.id;
+ std::shared_ptr<Camera> camera = Camera::create(std::move(data), id, streams);
+
+ if (!initFrameGenerator(camera.get())) {
+ LOG(Virtual, Error) << "Failed to initialize frame "
+ << "generator for camera: " << id;
+ continue;
+ }
+
+ registerCamera(std::move(camera));
+ }
+
+ resetCreated_ = true;
+
+ return true;
+}
+
+bool PipelineHandlerVirtual::initFrameGenerator(Camera *camera)
+{
+ auto data = cameraData(camera);
+ auto &frame = data->config_.frame;
+ std::visit(overloaded{
+ [&](TestPattern &testPattern) {
+ for (auto &streamConfig : data->streamConfigs_) {
+ if (testPattern == TestPattern::DiagonalLines)
+ streamConfig.frameGenerator = std::make_unique<DiagonalLinesGenerator>();
+ else
+ streamConfig.frameGenerator = std::make_unique<ColorBarsGenerator>();
+ }
+ },
+ [&](ImageFrames &imageFrames) {
+ for (auto &streamConfig : data->streamConfigs_)
+ streamConfig.frameGenerator = ImageFrameGenerator::create(imageFrames);
+ } },
+ frame);
+
+ for (auto &streamConfig : data->streamConfigs_)
+ if (!streamConfig.frameGenerator)
+ return false;
+
+ return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVirtual, "virtual")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/virtual.h b/src/libcamera/pipeline/virtual/virtual.h
new file mode 100644
index 00000000..92ad7d4a
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/virtual.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Pipeline handler for virtual cameras
+ */
+
+#pragma once
+
+#include <string>
+#include <variant>
+#include <vector>
+
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/pipeline_handler.h"
+
+#include "frame_generator.h"
+#include "image_frame_generator.h"
+#include "test_pattern_generator.h"
+
+namespace libcamera {
+
+using VirtualFrame = std::variant<TestPattern, ImageFrames>;
+
+class VirtualCameraData : public Camera::Private
+{
+public:
+ const static unsigned int kMaxStream = 3;
+
+ struct Resolution {
+ Size size;
+ std::vector<int64_t> frameRates;
+ };
+ struct StreamConfig {
+ Stream stream;
+ std::unique_ptr<FrameGenerator> frameGenerator;
+ };
+ /* The config file is parsed to the Configuration struct */
+ struct Configuration {
+ std::string id;
+ std::vector<Resolution> resolutions;
+ VirtualFrame frame;
+
+ Size maxResolutionSize;
+ Size minResolutionSize;
+ };
+
+ VirtualCameraData(PipelineHandler *pipe,
+ const std::vector<Resolution> &supportedResolutions);
+
+ ~VirtualCameraData() = default;
+
+ Configuration config_;
+
+ std::vector<StreamConfig> streamConfigs_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/vivid/meson.build b/src/libcamera/pipeline/vivid/meson.build
new file mode 100644
index 00000000..513de9af
--- /dev/null
+++ b/src/libcamera/pipeline/vivid/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'vivid.cpp',
+])
diff --git a/src/libcamera/pipeline/vivid/vivid.cpp b/src/libcamera/pipeline/vivid/vivid.cpp
new file mode 100644
index 00000000..0340a500
--- /dev/null
+++ b/src/libcamera/pipeline/vivid/vivid.cpp
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * vivid.cpp - Pipeline handler for the vivid capture device
+ */
+
+#include <math.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/formats.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+/*
+ * Explicitly disable the unused-parameter warning in this pipeline handler.
+ *
+ * Parameters are left unused while they are introduced incrementally, so for
+ * documentation purposes only we disable this warning so that we can compile
+ * each commit independently without breaking the flow of the development
+ * additions.
+ *
+ * This is not recommended practice within libcamera, please listen to your
+ * compiler warnings.
+ */
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+
+#define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000)
+#define VIVID_CID_VIVID_CLASS (0x00f00000 | 1)
+#define VIVID_CID_TEST_PATTERN (VIVID_CID_VIVID_BASE + 0)
+#define VIVID_CID_OSD_TEXT_MODE (VIVID_CID_VIVID_BASE + 1)
+#define VIVID_CID_HOR_MOVEMENT (VIVID_CID_VIVID_BASE + 2)
+#define VIVID_CID_VERT_MOVEMENT (VIVID_CID_VIVID_BASE + 3)
+#define VIVID_CID_SHOW_BORDER (VIVID_CID_VIVID_BASE + 4)
+#define VIVID_CID_SHOW_SQUARE (VIVID_CID_VIVID_BASE + 5)
+#define VIVID_CID_INSERT_SAV (VIVID_CID_VIVID_BASE + 6)
+#define VIVID_CID_INSERT_EAV (VIVID_CID_VIVID_BASE + 7)
+#define VIVID_CID_VBI_CAP_INTERLACED (VIVID_CID_VIVID_BASE + 8)
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(VIVID)
+
+class VividCameraData : public Camera::Private
+{
+public:
+ VividCameraData(PipelineHandler *pipe, MediaDevice *media)
+ : Camera::Private(pipe), media_(media), video_(nullptr)
+ {
+ }
+
+ ~VividCameraData()
+ {
+ delete video_;
+ }
+
+ int init();
+ void bufferReady(FrameBuffer *buffer);
+
+ MediaDevice *media_;
+ V4L2VideoDevice *video_;
+ Stream stream_;
+};
+
+class VividCameraConfiguration : public CameraConfiguration
+{
+public:
+ VividCameraConfiguration();
+
+ Status validate() override;
+};
+
+class PipelineHandlerVivid : public PipelineHandler
+{
+public:
+ PipelineHandlerVivid(CameraManager *manager);
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ int processControls(VividCameraData *data, Request *request);
+
+ VividCameraData *cameraData(Camera *camera)
+ {
+ return static_cast<VividCameraData *>(camera->_d());
+ }
+};
+
+VividCameraConfiguration::VividCameraConfiguration()
+ : CameraConfiguration()
+{
+}
+
+CameraConfiguration::Status VividCameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ /* Cap the number of entries to the available streams. */
+ if (config_.size() > 1) {
+ config_.resize(1);
+ status = Adjusted;
+ }
+
+ StreamConfiguration &cfg = config_[0];
+
+ /* Adjust the pixel format. */
+ const std::vector<libcamera::PixelFormat> formats = cfg.formats().pixelformats();
+ if (std::find(formats.begin(), formats.end(), cfg.pixelFormat) == formats.end()) {
+ cfg.pixelFormat = cfg.formats().pixelformats()[0];
+ LOG(VIVID, Debug) << "Adjusting format to " << cfg.pixelFormat.toString();
+ status = Adjusted;
+ }
+
+ cfg.bufferCount = 4;
+
+ return status;
+}
+
+PipelineHandlerVivid::PipelineHandlerVivid(CameraManager *manager)
+ : PipelineHandler(manager)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerVivid::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
+{
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<VividCameraConfiguration>();
+ VividCameraData *data = cameraData(camera);
+
+ if (roles.empty())
+ return config;
+
+ std::map<V4L2PixelFormat, std::vector<SizeRange>> v4l2Formats =
+ data->video_->formats();
+ std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
+ std::transform(v4l2Formats.begin(), v4l2Formats.end(),
+ std::inserter(deviceFormats, deviceFormats.begin()),
+ [&](const decltype(v4l2Formats)::value_type &format) {
+ return decltype(deviceFormats)::value_type{
+ format.first.toPixelFormat(),
+ format.second
+ };
+ });
+
+ StreamFormats formats(deviceFormats);
+ StreamConfiguration cfg(formats);
+
+ cfg.pixelFormat = formats::BGR888;
+ cfg.size = { 1280, 720 };
+ cfg.bufferCount = 4;
+
+ config->addConfiguration(cfg);
+
+ config->validate();
+
+ return config;
+}
+
+int PipelineHandlerVivid::configure(Camera *camera, CameraConfiguration *config)
+{
+ VividCameraData *data = cameraData(camera);
+ StreamConfiguration &cfg = config->at(0);
+ int ret;
+
+ V4L2DeviceFormat format = {};
+ format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
+ format.size = cfg.size;
+
+ ret = data->video_->setFormat(&format);
+ if (ret)
+ return ret;
+
+ if (format.size != cfg.size ||
+ format.fourcc != data->video_->toV4L2PixelFormat(cfg.pixelFormat)) {
+ LOG(VIVID, Error)
+ << "Requested " << cfg.toString() << ", got "
+ << format.size.toString() << "-"
+ << format.fourcc.toString();
+ return -EINVAL;
+ }
+
+ /* Set initial controls specific to VIVID */
+ ControlList controls(data->video_->controls());
+ controls.set(VIVID_CID_TEST_PATTERN, 0); /* Vertical Colour Bars */
+ controls.set(VIVID_CID_OSD_TEXT_MODE, 0); /* Display all OSD */
+
+ /* Ensure clear colours configured. */
+ controls.set(V4L2_CID_BRIGHTNESS, 128);
+ controls.set(V4L2_CID_CONTRAST, 128);
+ controls.set(V4L2_CID_SATURATION, 128);
+
+ /* Enable movement to visualise buffer updates. */
+ controls.set(VIVID_CID_HOR_MOVEMENT, 5);
+
+ ret = data->video_->setControls(&controls);
+ if (ret) {
+ LOG(VIVID, Error) << "Failed to set controls: " << ret;
+ return ret < 0 ? ret : -EINVAL;
+ }
+
+ cfg.setStream(&data->stream_);
+ cfg.stride = format.planes[0].bpl;
+
+ return 0;
+}
+
+int PipelineHandlerVivid::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ VividCameraData *data = cameraData(camera);
+ unsigned int count = stream->configuration().bufferCount;
+
+ return data->video_->exportBuffers(count, buffers);
+}
+
+int PipelineHandlerVivid::start(Camera *camera, const ControlList *controls)
+{
+ VividCameraData *data = cameraData(camera);
+ unsigned int count = data->stream_.configuration().bufferCount;
+
+ int ret = data->video_->importBuffers(count);
+ if (ret < 0)
+ return ret;
+
+ ret = data->video_->streamOn();
+ if (ret < 0) {
+ data->video_->releaseBuffers();
+ return ret;
+ }
+
+ return 0;
+}
+
+void PipelineHandlerVivid::stopDevice(Camera *camera)
+{
+ VividCameraData *data = cameraData(camera);
+ data->video_->streamOff();
+ data->video_->releaseBuffers();
+}
+
+int PipelineHandlerVivid::processControls(VividCameraData *data, Request *request)
+{
+ ControlList controls(data->video_->controls());
+
+ for (auto it : request->controls()) {
+ unsigned int id = it.first;
+ unsigned int offset;
+ uint32_t cid;
+
+ if (id == controls::Brightness) {
+ cid = V4L2_CID_BRIGHTNESS;
+ offset = 128;
+ } else if (id == controls::Contrast) {
+ cid = V4L2_CID_CONTRAST;
+ offset = 0;
+ } else if (id == controls::Saturation) {
+ cid = V4L2_CID_SATURATION;
+ offset = 0;
+ } else {
+ continue;
+ }
+
+ int32_t value = lroundf(it.second.get<float>() * 128 + offset);
+ controls.set(cid, std::clamp(value, 0, 255));
+ }
+
+ for (const auto &ctrl : controls)
+ LOG(VIVID, Debug)
+ << "Setting control " << utils::hex(ctrl.first)
+ << " to " << ctrl.second.toString();
+
+ int ret = data->video_->setControls(&controls);
+ if (ret) {
+ LOG(VIVID, Error) << "Failed to set controls: " << ret;
+ return ret < 0 ? ret : -EINVAL;
+ }
+
+ return ret;
+}
+
+int PipelineHandlerVivid::queueRequestDevice(Camera *camera, Request *request)
+{
+ VividCameraData *data = cameraData(camera);
+ FrameBuffer *buffer = request->findBuffer(&data->stream_);
+ if (!buffer) {
+ LOG(VIVID, Error)
+ << "Attempt to queue request with invalid stream";
+
+ return -ENOENT;
+ }
+
+ int ret = processControls(data, request);
+ if (ret < 0)
+ return ret;
+
+ ret = data->video_->queueBuffer(buffer);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+bool PipelineHandlerVivid::match(DeviceEnumerator *enumerator)
+{
+ DeviceMatch dm("vivid");
+ dm.add("vivid-000-vid-cap");
+
+ MediaDevice *media = acquireMediaDevice(enumerator, dm);
+ if (!media)
+ return false;
+
+ std::unique_ptr<VividCameraData> data = std::make_unique<VividCameraData>(this, media);
+
+ /* Locate and open the capture video node. */
+ if (data->init())
+ return false;
+
+ /* Create and register the camera. */
+ std::set<Stream *> streams{ &data->stream_ };
+ const std::string id = data->video_->deviceName();
+ std::shared_ptr<Camera> camera = Camera::create(std::move(data), id, streams);
+ registerCamera(std::move(camera));
+
+ return true;
+}
+
+int VividCameraData::init()
+{
+ video_ = new V4L2VideoDevice(media_->getEntityByName("vivid-000-vid-cap"));
+ if (video_->open())
+ return -ENODEV;
+
+ video_->bufferReady.connect(this, &VividCameraData::bufferReady);
+
+ /* Initialise the supported controls and properties. */
+ const ControlInfoMap &controls = video_->controls();
+ ControlInfoMap::Map ctrls;
+
+ for (const auto &ctrl : controls) {
+ const ControlId *id;
+ ControlInfo info;
+
+ switch (ctrl.first->id()) {
+ case V4L2_CID_BRIGHTNESS:
+ id = &controls::Brightness;
+ info = ControlInfo{ { -1.0f }, { 1.0f }, { 0.0f } };
+ break;
+ case V4L2_CID_CONTRAST:
+ id = &controls::Contrast;
+ info = ControlInfo{ { 0.0f }, { 2.0f }, { 1.0f } };
+ break;
+ case V4L2_CID_SATURATION:
+ id = &controls::Saturation;
+ info = ControlInfo{ { 0.0f }, { 2.0f }, { 1.0f } };
+ break;
+ default:
+ continue;
+ }
+
+ ctrls.emplace(id, info);
+ }
+
+ controlInfo_ = ControlInfoMap(std::move(ctrls), controls::controls);
+
+ properties_.set(properties::Location, properties::CameraLocationExternal);
+ properties_.set(properties::Model, "Virtual Video Device");
+
+ return 0;
+}
+
+void VividCameraData::bufferReady(FrameBuffer *buffer)
+{
+ Request *request = buffer->request();
+
+ /* Record the sensor's timestamp in the request metadata. */
+ request->metadata().set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
+
+ pipe()->completeBuffer(request, buffer);
+ pipe()->completeRequest(request);
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVivid, "vivid")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline_handler.cpp b/src/libcamera/pipeline_handler.cpp
index 7ebd76ad..caa5c20e 100644
--- a/src/libcamera/pipeline_handler.cpp
+++ b/src/libcamera/pipeline_handler.cpp
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * pipeline_handler.cpp - Pipeline handler infrastructure
+ * Pipeline handler infrastructure
*/
#include "libcamera/internal/pipeline_handler.h"
#include <chrono>
+#include <sys/stat.h>
#include <sys/sysmacros.h>
#include <libcamera/base/log.h>
@@ -15,12 +16,12 @@
#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
-#include <libcamera/camera_manager.h>
#include <libcamera/framebuffer.h>
+#include <libcamera/property_ids.h>
#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_manager.h"
#include "libcamera/internal/device_enumerator.h"
-#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/request.h"
#include "libcamera/internal/tracepoints.h"
@@ -64,17 +65,16 @@ LOG_DEFINE_CATEGORY(Pipeline)
*
* In order to honour the std::enable_shared_from_this<> contract,
* PipelineHandler instances shall never be constructed manually, but always
- * through the PipelineHandlerFactory::create() function implemented by the
- * respective factories.
+ * through the PipelineHandlerFactoryBase::create() function.
*/
PipelineHandler::PipelineHandler(CameraManager *manager)
- : manager_(manager), lockOwner_(false)
+ : manager_(manager), useCount_(0)
{
}
PipelineHandler::~PipelineHandler()
{
- for (std::shared_ptr<MediaDevice> media : mediaDevices_)
+ for (std::shared_ptr<MediaDevice> &media : mediaDevices_)
media->release();
}
@@ -143,58 +143,122 @@ MediaDevice *PipelineHandler::acquireMediaDevice(DeviceEnumerator *enumerator,
}
/**
- * \brief Lock all media devices acquired by the pipeline
+ * \brief Acquire exclusive access to the pipeline handler for the process
*
- * This function shall not be called from pipeline handler implementation, as
- * the Camera class handles locking directly.
+ * This function locks all the media devices used by the pipeline to ensure
+ * that no other process can access them concurrently.
*
- * \context This function is \threadsafe.
+ * Access to a pipeline handler may be acquired recursively from within the
+ * same process. Every successful acquire() call shall be matched with a
+ * release() call. This allows concurrent access to the same pipeline handler
+ * from different cameras within the same process.
*
- * \return True if the devices could be locked, false otherwise
- * \sa unlock()
- * \sa MediaDevice::lock()
+ * Pipeline handlers shall not call this function directly as the Camera class
+ * handles access internally.
+ *
+ * \context This function is called from the CameraManager thread.
+ *
+ * \return True if the pipeline handler was acquired, false if another process
+ * has already acquired it
+ * \sa release()
*/
-bool PipelineHandler::lock()
+bool PipelineHandler::acquire(Camera *camera)
{
- MutexLocker locker(lock_);
-
- /* Do not allow nested locking in the same libcamera instance. */
- if (lockOwner_)
- return false;
-
- for (std::shared_ptr<MediaDevice> &media : mediaDevices_) {
- if (!media->lock()) {
- unlock();
- return false;
+ if (useCount_ == 0) {
+ for (std::shared_ptr<MediaDevice> &media : mediaDevices_) {
+ if (!media->lock()) {
+ unlockMediaDevices();
+ return false;
+ }
}
}
- lockOwner_ = true;
+ if (!acquireDevice(camera)) {
+ if (useCount_ == 0)
+ unlockMediaDevices();
+ return false;
+ }
+
+ ++useCount_;
return true;
}
/**
- * \brief Unlock all media devices acquired by the pipeline
+ * \brief Release exclusive access to the pipeline handler
+ * \param[in] camera The camera for which to release data
*
- * This function shall not be called from pipeline handler implementation, as
- * the Camera class handles locking directly.
+ * This function releases access to the pipeline handler previously acquired by
+ * a call to acquire(). Every release() call shall match a previous successful
+ * acquire() call. Calling this function on a pipeline handler that hasn't been
+ * acquired results in undefined behaviour.
*
- * \context This function is \threadsafe.
+ * Pipeline handlers shall not call this function directly as the Camera class
+ * handles access internally.
*
- * \sa lock()
+ * \context This function is called from the CameraManager thread.
+ *
+ * \sa acquire()
*/
-void PipelineHandler::unlock()
+void PipelineHandler::release(Camera *camera)
{
- MutexLocker locker(lock_);
+ ASSERT(useCount_);
- if (!lockOwner_)
- return;
+ releaseDevice(camera);
+
+ if (useCount_ == 1)
+ unlockMediaDevices();
+
+ --useCount_;
+}
+
+/**
+ * \brief Acquire resources associated with this camera
+ * \param[in] camera The camera for which to acquire resources
+ *
+ * Pipeline handlers may override this in order to get resources such as opening
+ * devices and allocating buffers when a camera is acquired.
+ *
+ * This is used by the uvcvideo pipeline handler to delay opening /dev/video#
+ * until the camera is acquired to avoid excess power consumption. The delayed
+ * opening of /dev/video# is a special case because the kernel uvcvideo driver
+ * powers on the USB device as soon as /dev/video# is opened. This behavior
+ * should *not* be copied by other pipeline handlers.
+ *
+ * \context This function is called from the CameraManager thread.
+ *
+ * \return True on success, false on failure
+ * \sa releaseDevice()
+ */
+bool PipelineHandler::acquireDevice([[maybe_unused]] Camera *camera)
+{
+ return true;
+}
+
+/**
+ * \brief Release resources associated with this camera
+ * \param[in] camera The camera for which to release resources
+ *
+ * Pipeline handlers may override this in order to perform cleanup operations
+ * when a camera is released, such as freeing memory.
+ *
+ * This is called once for every camera that is released. If there are resources
+ * shared by multiple cameras then the pipeline handler must take care to not
+ * release them until releaseDevice() has been called for all previously
+ * acquired cameras.
+ *
+ * \context This function is called from the CameraManager thread.
+ *
+ * \sa acquireDevice()
+ */
+void PipelineHandler::releaseDevice([[maybe_unused]] Camera *camera)
+{
+}
+void PipelineHandler::unlockMediaDevices()
+{
for (std::shared_ptr<MediaDevice> &media : mediaDevices_)
media->unlock();
-
- lockOwner_ = false;
}
/**
@@ -217,8 +281,7 @@ void PipelineHandler::unlock()
* handler.
*
* \return A valid CameraConfiguration if the requested roles can be satisfied,
- * or a null pointer otherwise. The ownership of the returned configuration is
- * passed to the caller.
+ * or a null pointer otherwise.
*/
/**
@@ -304,14 +367,14 @@ void PipelineHandler::stop(Camera *camera)
while (!waitingRequests_.empty()) {
Request *request = waitingRequests_.front();
waitingRequests_.pop();
-
- request->_d()->cancel();
- completeRequest(request);
+ cancelRequest(request);
}
/* Make sure no requests are pending. */
Camera::Private *data = camera->_d();
ASSERT(data->queuedRequests_.empty());
+
+ data->requestSequence_ = 0;
}
/**
@@ -405,10 +468,8 @@ void PipelineHandler::doQueueRequest(Request *request)
}
int ret = queueRequestDevice(camera, request);
- if (ret) {
- request->_d()->cancel();
- completeRequest(request);
- }
+ if (ret)
+ cancelRequest(request);
}
/**
@@ -504,6 +565,75 @@ void PipelineHandler::completeRequest(Request *request)
}
/**
+ * \brief Cancel request and signal its completion
+ * \param[in] request The request to cancel
+ *
+ * This function cancels and completes the request. The same rules as for
+ * completeRequest() apply.
+ */
+void PipelineHandler::cancelRequest(Request *request)
+{
+ request->_d()->cancel();
+ completeRequest(request);
+}
+
+/**
+ * \brief Retrieve the absolute path to a platform configuration file
+ * \param[in] subdir The pipeline handler specific subdirectory name
+ * \param[in] name The configuration file name
+ *
+ * This function locates a named platform configuration file and returns
+ * its absolute path to the pipeline handler. It searches the following
+ * directories, in order:
+ *
+ * - If libcamera is not installed, the src/libcamera/pipeline/\<subdir\>/data/
+ * directory within the source tree ; otherwise
+ * - The system data (share/libcamera/pipeline/\<subdir\>) directory.
+ *
+ * The system directories are not searched if libcamera is not installed.
+ *
+ * \return The full path to the pipeline handler configuration file, or an empty
+ * string if no configuration file can be found
+ */
+std::string PipelineHandler::configurationFile(const std::string &subdir,
+ const std::string &name) const
+{
+ std::string confPath;
+ struct stat statbuf;
+ int ret;
+
+ std::string root = utils::libcameraSourcePath();
+ if (!root.empty()) {
+ /*
+ * When libcamera is used before it is installed, load
+ * configuration files from the source directory. The
+ * configuration files are then located in the 'data'
+ * subdirectory of the corresponding pipeline handler.
+ */
+ std::string confDir = root + "src/libcamera/pipeline/";
+ confPath = confDir + subdir + "/data/" + name;
+
+ LOG(Pipeline, Info)
+ << "libcamera is not installed. Loading platform configuration file from '"
+ << confPath << "'";
+ } else {
+ /* Else look in the system locations. */
+ confPath = std::string(LIBCAMERA_DATA_DIR)
+ + "/pipeline/" + subdir + '/' + name;
+ }
+
+ ret = stat(confPath.c_str(), &statbuf);
+ if (ret == 0 && (statbuf.st_mode & S_IFMT) == S_IFREG)
+ return confPath;
+
+ LOG(Pipeline, Error)
+ << "Configuration file '" << confPath
+ << "' not found for pipeline handler '" << PipelineHandler::name() << "'";
+
+ return std::string();
+}
+
+/**
* \brief Register a camera to the camera manager and pipeline handler
* \param[in] camera The camera to be added
*
@@ -516,15 +646,20 @@ void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera)
{
cameras_.push_back(camera);
- if (mediaDevices_.empty())
- LOG(Pipeline, Fatal)
- << "Registering camera with no media devices!";
+ if (mediaDevices_.empty()) {
+ /*
+ * For virtual devices with no MediaDevice, there are no system
+ * devices to register.
+ */
+ manager_->_d()->addCamera(std::move(camera));
+ return;
+ }
/*
* Walk the entity list and map the devnums of all capture video nodes
* to the camera.
*/
- std::vector<dev_t> devnums;
+ std::vector<int64_t> devnums;
for (const std::shared_ptr<MediaDevice> &media : mediaDevices_) {
for (const MediaEntity *entity : media->entities()) {
if (entity->pads().size() == 1 &&
@@ -536,7 +671,14 @@ void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera)
}
}
- manager_->addCamera(std::move(camera), devnums);
+ /*
+ * Store the associated devices as a property of the camera to allow
+ * systems to identify which devices are managed by libcamera.
+ */
+ Camera::Private *data = camera->_d();
+ data->properties_.set(properties::SystemDevices, devnums);
+
+ manager_->_d()->addCamera(std::move(camera));
}
/**
@@ -553,7 +695,7 @@ void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera)
*/
void PipelineHandler::hotplugMediaDevice(MediaDevice *media)
{
- media->disconnected.connect(this, [=]() { mediaDeviceDisconnected(media); });
+ media->disconnected.connect(this, [this, media] { mediaDeviceDisconnected(media); });
}
/**
@@ -597,13 +739,13 @@ void PipelineHandler::disconnect()
*/
std::vector<std::weak_ptr<Camera>> cameras{ std::move(cameras_) };
- for (std::weak_ptr<Camera> ptr : cameras) {
+ for (const std::weak_ptr<Camera> &ptr : cameras) {
std::shared_ptr<Camera> camera = ptr.lock();
if (!camera)
continue;
camera->disconnect();
- manager_->removeCamera(camera);
+ manager_->_d()->removeCamera(camera);
}
}
@@ -624,27 +766,32 @@ void PipelineHandler::disconnect()
*/
/**
- * \class PipelineHandlerFactory
- * \brief Registration of PipelineHandler classes and creation of instances
+ * \fn PipelineHandler::cameraManager() const
+ * \brief Retrieve the CameraManager that this pipeline handler belongs to
+ * \context This function is \threadsafe.
+ * \return The CameraManager for this pipeline handler
+ */
+
+/**
+ * \class PipelineHandlerFactoryBase
+ * \brief Base class for pipeline handler factories
*
- * To facilitate discovery and instantiation of PipelineHandler classes, the
- * PipelineHandlerFactory class maintains a registry of pipeline handler
- * classes. Each PipelineHandler subclass shall register itself using the
- * REGISTER_PIPELINE_HANDLER() macro, which will create a corresponding
- * instance of a PipelineHandlerFactory subclass and register it with the
- * static list of factories.
+ * The PipelineHandlerFactoryBase class is the base of all specializations of
+ * the PipelineHandlerFactory class template. It implements the factory
+ * registration, maintains a registry of factories, and provides access to the
+ * registered factories.
*/
/**
- * \brief Construct a pipeline handler factory
+ * \brief Construct a pipeline handler factory base
* \param[in] name Name of the pipeline handler class
*
- * Creating an instance of the factory registers is with the global list of
+ * Creating an instance of the factory base registers it with the global list of
* factories, accessible through the factories() function.
*
* The factory \a name is used for debug purpose and shall be unique.
*/
-PipelineHandlerFactory::PipelineHandlerFactory(const char *name)
+PipelineHandlerFactoryBase::PipelineHandlerFactoryBase(const char *name)
: name_(name)
{
registerType(this);
@@ -657,15 +804,15 @@ PipelineHandlerFactory::PipelineHandlerFactory(const char *name)
* \return A shared pointer to a new instance of the PipelineHandler subclass
* corresponding to the factory
*/
-std::shared_ptr<PipelineHandler> PipelineHandlerFactory::create(CameraManager *manager)
+std::shared_ptr<PipelineHandler> PipelineHandlerFactoryBase::create(CameraManager *manager) const
{
- PipelineHandler *handler = createInstance(manager);
+ std::unique_ptr<PipelineHandler> handler = createInstance(manager);
handler->name_ = name_.c_str();
- return std::shared_ptr<PipelineHandler>(handler);
+ return std::shared_ptr<PipelineHandler>(std::move(handler));
}
/**
- * \fn PipelineHandlerFactory::name()
+ * \fn PipelineHandlerFactoryBase::name()
* \brief Retrieve the factory name
* \return The factory name
*/
@@ -677,9 +824,10 @@ std::shared_ptr<PipelineHandler> PipelineHandlerFactory::create(CameraManager *m
* The caller is responsible to guarantee the uniqueness of the pipeline handler
* name.
*/
-void PipelineHandlerFactory::registerType(PipelineHandlerFactory *factory)
+void PipelineHandlerFactoryBase::registerType(PipelineHandlerFactoryBase *factory)
{
- std::vector<PipelineHandlerFactory *> &factories = PipelineHandlerFactory::factories();
+ std::vector<PipelineHandlerFactoryBase *> &factories =
+ PipelineHandlerFactoryBase::factories();
factories.push_back(factory);
}
@@ -688,34 +836,77 @@ void PipelineHandlerFactory::registerType(PipelineHandlerFactory *factory)
* \brief Retrieve the list of all pipeline handler factories
* \return the list of pipeline handler factories
*/
-std::vector<PipelineHandlerFactory *> &PipelineHandlerFactory::factories()
+std::vector<PipelineHandlerFactoryBase *> &PipelineHandlerFactoryBase::factories()
{
/*
* The static factories map is defined inside the function to ensure
* it gets initialized on first use, without any dependency on
* link order.
*/
- static std::vector<PipelineHandlerFactory *> factories;
+ static std::vector<PipelineHandlerFactoryBase *> factories;
return factories;
}
/**
- * \fn PipelineHandlerFactory::createInstance()
- * \brief Create an instance of the PipelineHandler corresponding to the factory
- * \param[in] manager The camera manager
+ * \brief Return the factory for the pipeline handler with name \a name
+ * \param[in] name The pipeline handler name
+ * \return The factory of the pipeline with name \a name, or nullptr if not found
+ */
+const PipelineHandlerFactoryBase *PipelineHandlerFactoryBase::getFactoryByName(const std::string &name)
+{
+ const std::vector<PipelineHandlerFactoryBase *> &factories =
+ PipelineHandlerFactoryBase::factories();
+
+ auto iter = std::find_if(factories.begin(),
+ factories.end(),
+ [&name](const PipelineHandlerFactoryBase *f) {
+ return f->name() == name;
+ });
+
+ if (iter != factories.end())
+ return *iter;
+
+ return nullptr;
+}
+
+/**
+ * \class PipelineHandlerFactory
+ * \brief Registration of PipelineHandler classes and creation of instances
+ * \tparam _PipelineHandler The pipeline handler class type for this factory
*
- * This virtual function is implemented by the REGISTER_PIPELINE_HANDLER()
- * macro. It creates a pipeline handler instance associated with the camera
- * \a manager.
+ * To facilitate discovery and instantiation of PipelineHandler classes, the
+ * PipelineHandlerFactory class implements auto-registration of pipeline
+ * handlers. Each PipelineHandler subclass shall register itself using the
+ * REGISTER_PIPELINE_HANDLER() macro, which will create a corresponding
+ * instance of a PipelineHandlerFactory and register it with the static list of
+ * factories.
+ */
+
+/**
+ * \fn PipelineHandlerFactory::PipelineHandlerFactory(const char *name)
+ * \brief Construct a pipeline handler factory
+ * \param[in] name Name of the pipeline handler class
+ *
+ * Creating an instance of the factory registers it with the global list of
+ * factories, accessible through the factories() function.
*
- * \return a pointer to a newly constructed instance of the PipelineHandler
- * subclass corresponding to the factory
+ * The factory \a name is used for debug purpose and shall be unique.
+ */
+
+/**
+ * \fn PipelineHandlerFactory::createInstance() const
+ * \brief Create an instance of the PipelineHandler corresponding to the factory
+ * \param[in] manager The camera manager
+ * \return A unique pointer to a newly constructed instance of the
+ * PipelineHandler subclass corresponding to the factory
*/
/**
* \def REGISTER_PIPELINE_HANDLER
* \brief Register a pipeline handler with the pipeline handler factory
* \param[in] handler Class name of PipelineHandler derived class to register
+ * \param[in] name Name assigned to the pipeline handler, matching the pipeline
+ * subdirectory name in the source tree.
*
* Register a PipelineHandler subclass with the factory and make it available to
* try and match devices.
diff --git a/src/libcamera/pixel_format.cpp b/src/libcamera/pixel_format.cpp
index 997aeb8a..314179a8 100644
--- a/src/libcamera/pixel_format.cpp
+++ b/src/libcamera/pixel_format.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * pixel_format.cpp - libcamera Pixel Format
+ * libcamera Pixel Format
*/
#include <libcamera/formats.h>
@@ -140,4 +140,16 @@ PixelFormat PixelFormat::fromString(const std::string &name)
return PixelFormatInfo::info(name).format;
}
+/**
+ * \brief Insert a text representation of a PixelFormat into an output stream
+ * \param[in] out The output stream
+ * \param[in] f The PixelFormat
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const PixelFormat &f)
+{
+ out << f.toString();
+ return out;
+}
+
} /* namespace libcamera */
diff --git a/src/libcamera/process.cpp b/src/libcamera/process.cpp
index 0e6b4e1d..bc9833f4 100644
--- a/src/libcamera/process.cpp
+++ b/src/libcamera/process.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * process.cpp - Process object
+ * Process object
*/
#include "libcamera/internal/process.h"
@@ -10,7 +10,6 @@
#include <algorithm>
#include <dirent.h>
#include <fcntl.h>
-#include <iostream>
#include <list>
#include <signal.h>
#include <string.h>
@@ -189,7 +188,6 @@ const struct sigaction &ProcessManager::oldsa() const
return oldsa_;
}
-
/**
* \class Process
* \brief Process object
@@ -263,14 +261,16 @@ int Process::start(const std::string &path,
closeAllFdsExcept(fds);
- unsetenv("LIBCAMERA_LOG_FILE");
+ const char *file = utils::secure_getenv("LIBCAMERA_LOG_FILE");
+ if (file && strcmp(file, "syslog"))
+ unsetenv("LIBCAMERA_LOG_FILE");
const char **argv = new const char *[args.size() + 2];
unsigned int len = args.size();
argv[0] = path.c_str();
for (unsigned int i = 0; i < len; i++)
- argv[i+1] = args[i].c_str();
- argv[len+1] = nullptr;
+ argv[i + 1] = args[i].c_str();
+ argv[len + 1] = nullptr;
execv(path.c_str(), (char **)argv);
diff --git a/src/libcamera/property_ids.cpp.in b/src/libcamera/property_ids.cpp.in
deleted file mode 100644
index f917e334..00000000
--- a/src/libcamera/property_ids.cpp.in
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * property_ids.cpp : Property ID list
- *
- * This file is auto-generated. Do not edit.
- */
-
-#include <libcamera/property_ids.h>
-
-/**
- * \file property_ids.h
- * \brief Camera property identifiers
- */
-
-namespace libcamera {
-
-/**
- * \brief Namespace for libcamera properties
- */
-namespace properties {
-
-${controls_doc}
-
-/**
- * \brief Namespace for libcamera draft properties
- */
-namespace draft {
-
-${draft_controls_doc}
-
-} /* namespace draft */
-
-#ifndef __DOXYGEN__
-/*
- * Keep the properties definitions hidden from doxygen as it incorrectly parses
- * them as functions.
- */
-${controls_def}
-
-namespace draft {
-
-${draft_controls_def}
-
-} /* namespace draft */
-#endif
-
-/**
- * \brief List of all supported libcamera properties
- */
-extern const ControlIdMap properties {
-${controls_map}
-};
-
-} /* namespace properties */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/property_ids.yaml b/src/libcamera/property_ids_core.yaml
index 12ecbce5..834454a4 100644
--- a/src/libcamera/property_ids.yaml
+++ b/src/libcamera/property_ids_core.yaml
@@ -2,8 +2,9 @@
#
# Copyright (C) 2019, Google Inc.
#
-%YAML 1.2
+%YAML 1.1
---
+vendor: libcamera
controls:
- Location:
type: int32_t
@@ -29,10 +30,10 @@ controls:
- Rotation:
type: int32_t
description: |
- The camera rotation is expressed as the angular difference in degrees
- between two reference systems, one relative to the camera module, and
- one defined on the external world scene to be captured when projected
- on the image sensor pixel array.
+ The camera physical mounting rotation. It is expressed as the angular
+ difference in degrees between two reference systems, one relative to the
+ camera module, and one defined on the external world scene to be
+ captured when projected on the image sensor pixel array.
A camera sensor has a 2-dimensional reference system 'Rc' defined by
its pixel array read-out order. The origin is set to the first pixel
@@ -678,37 +679,26 @@ controls:
\todo Turn this property into a "maximum control value" for the
ScalerCrop control once "dynamic" controls have been implemented.
- # ----------------------------------------------------------------------------
- # Draft properties section
+ - SensorSensitivity:
+ type: float
+ description: |
+ The relative sensitivity of the chosen sensor mode.
- - ColorFilterArrangement:
- type: int32_t
- draft: true
+ Some sensors have readout modes with different sensitivities. For example,
+ a binned camera mode might, with the same exposure and gains, produce
+ twice the signal level of the full resolution readout. This would be
+ signalled by the binned mode, when it is chosen, indicating a value here
+ that is twice that of the full resolution mode. This value will be valid
+ after the configure method has returned successfully.
+
+ - SystemDevices:
+ type: int64_t
+ size: [n]
description: |
- The arrangement of color filters on sensor; represents the colors in the
- top-left 2x2 section of the sensor, in reading order. Currently
- identical to ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT.
- enum:
- - name: RGGB
- value: 0
- description: RGGB Bayer pattern
- - name: GRBG
- value: 1
- description: GRBG Bayer pattern
- - name: GBRG
- value: 2
- description: GBRG Bayer pattern
- - name: BGGR
- value: 3
- description: BGGR Bayer pattern
- - name: RGB
- value: 4
- description: |
- Sensor is not Bayer; output has 3 16-bit values for each pixel,
- instead of just 1 16-bit value per pixel.
- - name: MONO
- value: 5
- description: |
- Sensor is not Bayer; output consists of a single colour channel.
+ A list of integer values of type dev_t denoting the major and minor
+ device numbers of the underlying devices used in the operation of this
+ camera.
+
+ Different cameras may report identical devices.
...
diff --git a/src/libcamera/property_ids_draft.yaml b/src/libcamera/property_ids_draft.yaml
new file mode 100644
index 00000000..62f0e242
--- /dev/null
+++ b/src/libcamera/property_ids_draft.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2019, Google Inc.
+#
+%YAML 1.1
+---
+vendor: draft
+controls:
+ - ColorFilterArrangement:
+ type: int32_t
+ vendor: draft
+ description: |
+ The arrangement of color filters on sensor; represents the colors in the
+ top-left 2x2 section of the sensor, in reading order. Currently
+ identical to ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT.
+ enum:
+ - name: RGGB
+ value: 0
+ description: RGGB Bayer pattern
+ - name: GRBG
+ value: 1
+ description: GRBG Bayer pattern
+ - name: GBRG
+ value: 2
+ description: GBRG Bayer pattern
+ - name: BGGR
+ value: 3
+ description: BGGR Bayer pattern
+ - name: RGB
+ value: 4
+ description: |
+ Sensor is not Bayer; output has 3 16-bit values for each pixel,
+ instead of just 1 16-bit value per pixel.
+ - name: MONO
+ value: 5
+ description: |
+ Sensor is not Bayer; output consists of a single colour channel.
+
+...
diff --git a/src/libcamera/proxy/meson.build b/src/libcamera/proxy/meson.build
index 00ae5a8f..8bd1b135 100644
--- a/src/libcamera/proxy/meson.build
+++ b/src/libcamera/proxy/meson.build
@@ -13,7 +13,8 @@ foreach mojom : ipa_mojoms
'--libcamera_generate_proxy_cpp',
'--libcamera_output_path=@OUTPUT@',
'./' + '@INPUT@'
- ])
+ ],
+ env : py_build_env)
- libcamera_sources += proxy
+ libcamera_internal_sources += proxy
endforeach
diff --git a/src/libcamera/proxy/worker/meson.build b/src/libcamera/proxy/worker/meson.build
index 70c8760a..8c54a2e2 100644
--- a/src/libcamera/proxy/worker/meson.build
+++ b/src/libcamera/proxy/worker/meson.build
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: CC0-1.0
-proxy_install_dir = get_option('libexecdir') / 'libcamera'
+proxy_install_dir = libcamera_libexecdir
# generate {pipeline}_ipa_proxy_worker.cpp
foreach mojom : ipa_mojoms
@@ -15,10 +15,10 @@ foreach mojom : ipa_mojoms
'--libcamera_generate_proxy_worker',
'--libcamera_output_path=@OUTPUT@',
'./' + '@INPUT@'
- ])
+ ],
+ env : py_build_env)
- proxy = executable(mojom['name'] + '_ipa_proxy',
- [worker, libcamera_generated_ipa_headers],
+ proxy = executable(mojom['name'] + '_ipa_proxy', worker,
install : true,
install_dir : proxy_install_dir,
dependencies : libcamera_private)
diff --git a/src/libcamera/pub_key.cpp b/src/libcamera/pub_key.cpp
index 9bb08fda..f1d73a5c 100644
--- a/src/libcamera/pub_key.cpp
+++ b/src/libcamera/pub_key.cpp
@@ -2,12 +2,17 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * pub_key.cpp - Public key signature verification
+ * Public key signature verification
*/
#include "libcamera/internal/pub_key.h"
-#if HAVE_GNUTLS
+#if HAVE_CRYPTO
+#include <openssl/evp.h>
+#include <openssl/rsa.h>
+#include <openssl/sha.h>
+#include <openssl/x509.h>
+#elif HAVE_GNUTLS
#include <gnutls/abstract.h>
#endif
@@ -33,7 +38,14 @@ namespace libcamera {
PubKey::PubKey([[maybe_unused]] Span<const uint8_t> key)
: valid_(false)
{
-#if HAVE_GNUTLS
+#if HAVE_CRYPTO
+ const uint8_t *data = key.data();
+ pubkey_ = d2i_PUBKEY(nullptr, &data, key.size());
+ if (!pubkey_)
+ return;
+
+ valid_ = true;
+#elif HAVE_GNUTLS
int ret = gnutls_pubkey_init(&pubkey_);
if (ret < 0)
return;
@@ -52,7 +64,9 @@ PubKey::PubKey([[maybe_unused]] Span<const uint8_t> key)
PubKey::~PubKey()
{
-#if HAVE_GNUTLS
+#if HAVE_CRYPTO
+ EVP_PKEY_free(pubkey_);
+#elif HAVE_GNUTLS
gnutls_pubkey_deinit(pubkey_);
#endif
}
@@ -76,7 +90,35 @@ PubKey::~PubKey()
bool PubKey::verify([[maybe_unused]] Span<const uint8_t> data,
[[maybe_unused]] Span<const uint8_t> sig) const
{
-#if HAVE_GNUTLS
+ if (!valid_)
+ return false;
+
+#if HAVE_CRYPTO
+ /*
+ * Create and initialize a public key algorithm context for signature
+ * verification.
+ */
+ EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(pubkey_, nullptr);
+ if (!ctx)
+ return false;
+
+ if (EVP_PKEY_verify_init(ctx) <= 0 ||
+ EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING) <= 0 ||
+ EVP_PKEY_CTX_set_signature_md(ctx, EVP_sha256()) <= 0) {
+ EVP_PKEY_CTX_free(ctx);
+ return false;
+ }
+
+ /* Calculate the SHA256 digest of the data. */
+ uint8_t digest[SHA256_DIGEST_LENGTH];
+ SHA256(data.data(), data.size(), digest);
+
+ /* Decrypt the signature and verify it matches the digest. */
+ int ret = EVP_PKEY_verify(ctx, sig.data(), sig.size(), digest,
+ SHA256_DIGEST_LENGTH);
+ EVP_PKEY_CTX_free(ctx);
+ return ret == 1;
+#elif HAVE_GNUTLS
const gnutls_datum_t gnuTlsData{
const_cast<unsigned char *>(data.data()),
static_cast<unsigned int>(data.size())
diff --git a/src/libcamera/request.cpp b/src/libcamera/request.cpp
index 9c8da6ca..8c56ed30 100644
--- a/src/libcamera/request.cpp
+++ b/src/libcamera/request.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * request.cpp - Capture request handling
+ * Capture request handling
*/
#include "libcamera/internal/request.h"
@@ -28,10 +28,17 @@
* \brief Describes a frame capture request to be processed by a camera
*/
+/**
+ * \internal
+ * \file libcamera/internal/request.h
+ * \brief Internal support for request handling
+ */
+
namespace libcamera {
LOG_DEFINE_CATEGORY(Request)
+#ifndef __DOXYGEN_PUBLIC__
/**
* \class Request::Private
* \brief Request private data
@@ -129,7 +136,7 @@ void Request::Private::doCancelRequest()
Request *request = _o<Request>();
for (FrameBuffer *buffer : pending_) {
- buffer->cancel();
+ buffer->_d()->cancel();
camera_->bufferCompleted.emit(request, buffer);
}
@@ -158,9 +165,12 @@ void Request::Private::cancel()
}
/**
- * \copydoc Request::reuse()
+ * \brief Reset the request internal data to default values
+ *
+ * After calling this function, all request internal data will have default
+ * values as if the Request::Private instance had just been constructed.
*/
-void Request::Private::reuse()
+void Request::Private::reset()
{
sequence_ = 0;
cancelled_ = false;
@@ -297,6 +307,7 @@ void Request::Private::timeout()
emitPrepareCompleted();
}
+#endif /* __DOXYGEN_PUBLIC__ */
/**
* \enum Request::Status
@@ -349,7 +360,7 @@ Request::Request(Camera *camera, uint64_t cookie)
camera->_d()->validator());
/**
- * \todo: Add a validator for metadata controls.
+ * \todo Add a validator for metadata controls.
*/
metadata_ = new ControlList(controls::controls);
@@ -380,7 +391,7 @@ void Request::reuse(ReuseFlag flags)
{
LIBCAMERA_TRACEPOINT(request_reuse, this);
- _d()->reuse();
+ _d()->reset();
if (flags & ReuseBuffers) {
for (auto pair : bufferMap_) {
@@ -526,8 +537,8 @@ FrameBuffer *Request::findBuffer(const Stream *stream) const
*
* When requests are queued, they are given a sequential number to track the
* order in which requests are queued to a camera. This number counts all
- * requests given to a camera through its lifetime, and is not reset to zero
- * between camera stop/start sequences.
+ * requests given to a camera and is reset to zero between camera stop/start
+ * sequences.
*
* It can be used to support debugging and identifying the flow of requests
* through a pipeline, but does not guarantee to represent the sequence number
@@ -582,16 +593,28 @@ bool Request::hasPendingBuffers() const
std::string Request::toString() const
{
std::stringstream ss;
+ ss << *this;
+ return ss.str();
+}
+
+/**
+ * \brief Insert a text representation of a Request into an output stream
+ * \param[in] out The output stream
+ * \param[in] r The Request
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const Request &r)
+{
/* Pending, Completed, Cancelled(X). */
static const char *statuses = "PCX";
/* Example Output: Request(55:P:1/2:6523524) */
- ss << "Request(" << sequence() << ":" << statuses[status_] << ":"
- << _d()->pending_.size() << "/" << bufferMap_.size() << ":"
- << cookie_ << ")";
+ out << "Request(" << r.sequence() << ":" << statuses[r.status()] << ":"
+ << r._d()->pending_.size() << "/" << r.buffers().size() << ":"
+ << r.cookie() << ")";
- return ss.str();
+ return out;
}
} /* namespace libcamera */
diff --git a/src/libcamera/sensor/camera_sensor.cpp b/src/libcamera/sensor/camera_sensor.cpp
new file mode 100644
index 00000000..d19b5e2e
--- /dev/null
+++ b/src/libcamera/sensor/camera_sensor.cpp
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * A camera sensor
+ */
+
+#include "libcamera/internal/camera_sensor.h"
+
+#include <memory>
+#include <vector>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/media_object.h"
+
+/**
+ * \file camera_sensor.h
+ * \brief A camera sensor
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(CameraSensor)
+
+/**
+ * \class CameraSensor
+ * \brief A abstract camera sensor
+ *
+ * The CameraSensor class eases handling of sensors for pipeline handlers by
+ * hiding the details of the kernel API and caching sensor information.
+ */
+
+/**
+ * \brief Destroy a CameraSensor
+ */
+CameraSensor::~CameraSensor() = default;
+
+/**
+ * \fn CameraSensor::model()
+ * \brief Retrieve the sensor model name
+ *
+ * The sensor model name is a free-formed string that uniquely identifies the
+ * sensor model.
+ *
+ * \return The sensor model name
+ */
+
+/**
+ * \fn CameraSensor::id()
+ * \brief Retrieve the sensor ID
+ *
+ * The sensor ID is a free-form string that uniquely identifies the sensor in
+ * the system. The ID satisfies the requirements to be used as a camera ID.
+ *
+ * \return The sensor ID
+ */
+
+/**
+ * \fn CameraSensor::entity()
+ * \brief Retrieve the sensor media entity
+ * \return The sensor media entity
+ */
+
+/**
+ * \fn CameraSensor::device()
+ * \brief Retrieve the camera sensor device
+ * \todo Remove this function by integrating DelayedControl with CameraSensor
+ * \return The camera sensor device
+ */
+
+/**
+ * \fn CameraSensor::focusLens()
+ * \brief Retrieve the focus lens controller
+ *
+ * \return The focus lens controller. nullptr if no focus lens controller is
+ * connected to the sensor
+ */
+
+/**
+ * \fn CameraSensor::mbusCodes()
+ * \brief Retrieve the media bus codes supported by the camera sensor
+ *
+ * Any Bayer formats are listed using the sensor's native Bayer order,
+ * that is, with the effect of V4L2_CID_HFLIP and V4L2_CID_VFLIP undone
+ * (where these controls exist).
+ *
+ * \return The supported media bus codes sorted in increasing order
+ */
+
+/**
+ * \fn CameraSensor::sizes()
+ * \brief Retrieve the supported frame sizes for a media bus code
+ * \param[in] mbusCode The media bus code for which sizes are requested
+ *
+ * \return The supported frame sizes for \a mbusCode sorted in increasing order
+ */
+
+/**
+ * \fn CameraSensor::resolution()
+ * \brief Retrieve the camera sensor resolution
+ *
+ * The camera sensor resolution is the active pixel area size, clamped to the
+ * maximum frame size the sensor can produce if it is smaller than the active
+ * pixel area.
+ *
+ * \todo Consider if it desirable to distinguish between the maximum resolution
+ * the sensor can produce (also including upscaled ones) and the actual pixel
+ * array size by splitting this function in two.
+ *
+ * \return The camera sensor resolution in pixels
+ */
+
+/**
+ * \fn CameraSensor::getFormat()
+ * \brief Retrieve the best sensor format for a desired output
+ * \param[in] mbusCodes The list of acceptable media bus codes
+ * \param[in] size The desired size
+ * \param[in] maxSize The maximum size
+ *
+ * Media bus codes are selected from \a mbusCodes, which lists all acceptable
+ * codes in decreasing order of preference. Media bus codes supported by the
+ * sensor but not listed in \a mbusCodes are ignored. If none of the desired
+ * codes is supported, it returns an error.
+ *
+ * \a size indicates the desired size at the output of the sensor. This function
+ * selects the best media bus code and size supported by the sensor according
+ * to the following criteria.
+ *
+ * - The desired \a size shall fit in the sensor output size to avoid the need
+ * to up-scale.
+ * - The sensor output size shall match the desired aspect ratio to avoid the
+ * need to crop the field of view.
+ * - The sensor output size shall be as small as possible to lower the required
+ * bandwidth.
+ * - The desired \a size shall be supported by one of the media bus code listed
+ * in \a mbusCodes.
+ * - The desired \a size shall fit into the maximum size \a maxSize if it is not
+ * null.
+ *
+ * When multiple media bus codes can produce the same size, the code at the
+ * lowest position in \a mbusCodes is selected.
+ *
+ * The use of this function is optional, as the above criteria may not match the
+ * needs of all pipeline handlers. Pipeline handlers may implement custom
+ * sensor format selection when needed.
+ *
+ * The returned sensor output format is guaranteed to be acceptable by the
+ * setFormat() function without any modification.
+ *
+ * \return The best sensor output format matching the desired media bus codes
+ * and size on success, or an empty format otherwise.
+ */
+
+/**
+ * \fn CameraSensor::setFormat()
+ * \brief Set the sensor output format
+ * \param[in] format The desired sensor output format
+ * \param[in] transform The transform to be applied on the sensor.
+ * Defaults to Identity.
+ *
+ * If flips are writable they are configured according to the desired Transform.
+ * Transform::Identity always corresponds to H/V flip being disabled if the
+ * controls are writable. Flips are set before the new format is applied as
+ * they can effectively change the Bayer pattern ordering.
+ *
+ * The ranges of any controls associated with the sensor are also updated.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn CameraSensor::tryFormat()
+ * \brief Try the sensor output format
+ * \param[in] format The desired sensor output format
+ *
+ * The ranges of any controls associated with the sensor are not updated.
+ *
+ * \todo Add support for Transform by changing the format's Bayer ordering
+ * before calling subdev_->setFormat().
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn CameraSensor::applyConfiguration()
+ * \brief Apply a sensor configuration to the camera sensor
+ * \param[in] config The sensor configuration
+ * \param[in] transform The transform to be applied on the sensor.
+ * Defaults to Identity
+ * \param[out] sensorFormat Format applied to the sensor (optional)
+ *
+ * Apply to the camera sensor the configuration \a config.
+ *
+ * \todo The configuration shall be fully populated and if any of the fields
+ * specified cannot be applied exactly, an error code is returned.
+ *
+ * \return 0 if \a config is applied correctly to the camera sensor, a negative
+ * error code otherwise
+ */
+
+/**
+ * \brief Retrieve the image source stream
+ *
+ * Sensors that produce multiple streams do not guarantee that the image stream
+ * is always assigned number 0. This function allows callers to retrieve the
+ * image stream on the sensor's source pad, in order to configure the receiving
+ * side accordingly.
+ *
+ * \return The image source stream
+ */
+V4L2Subdevice::Stream CameraSensor::imageStream() const
+{
+ return { 0, 0 };
+}
+
+/**
+ * \brief Retrieve the embedded data source stream
+ *
+ * Some sensors produce embedded data in a stream separate from the image
+ * stream. This function indicates if the sensor supports this feature by
+ * returning the embedded data stream on the sensor's source pad if available,
+ * or an std::optional<> without a value otheriwse.
+ *
+ * \return The embedded data source stream
+ */
+std::optional<V4L2Subdevice::Stream> CameraSensor::embeddedDataStream() const
+{
+ return {};
+}
+
+/**
+ * \brief Retrieve the format on the embedded data stream
+ *
+ * When an embedded data stream is available, this function returns the
+ * corresponding format on the sensor's source pad. The format may vary with
+ * the image stream format, and should therefore be retrieved after configuring
+ * the image stream.
+ *
+ * If the sensor doesn't support embedded data, this function returns a
+ * default-constructed format.
+ *
+ * \return The format on the embedded data stream
+ */
+V4L2SubdeviceFormat CameraSensor::embeddedDataFormat() const
+{
+ return {};
+}
+
+/**
+ * \brief Enable or disable the embedded data stream
+ * \param[in] enable True to enable the embedded data stream, false to disable it
+ *
+ * For sensors that support embedded data, this function enables or disables
+ * generation of embedded data. Some of such sensors always produce embedded
+ * data, in which case this function return -EISCONN if the caller attempts to
+ * disable embedded data.
+ *
+ * If the sensor doesn't support embedded data, this function returns 0 when \a
+ * enable is false, and -ENOSTR otherwise.
+ *
+ * \return 0 on success, or a negative error code otherwise
+ */
+int CameraSensor::setEmbeddedDataEnabled(bool enable)
+{
+ return enable ? -ENOSTR : 0;
+}
+
+/**
+ * \fn CameraSensor::properties()
+ * \brief Retrieve the camera sensor properties
+ * \return The list of camera sensor properties
+ */
+
+/**
+ * \fn CameraSensor::sensorInfo()
+ * \brief Assemble and return the camera sensor info
+ * \param[out] info The camera sensor info
+ *
+ * This function fills \a info with information that describes the camera sensor
+ * and its current configuration. The information combines static data (such as
+ * the the sensor model or active pixel array size) and data specific to the
+ * current sensor configuration (such as the line length and pixel rate).
+ *
+ * Sensor information is only available for raw sensors. When called for a YUV
+ * sensor, this function returns -EINVAL.
+ *
+ * \return 0 on success, a negative error code otherwise
+ */
+
+/**
+ * \fn CameraSensor::computeTransform()
+ * \brief Compute the Transform that gives the requested \a orientation
+ * \param[inout] orientation The desired image orientation
+ *
+ * This function computes the Transform that the pipeline handler should apply
+ * to the CameraSensor to obtain the requested \a orientation.
+ *
+ * The intended caller of this function is the validate() implementation of
+ * pipeline handlers, that pass in the application requested
+ * CameraConfiguration::orientation and obtain a Transform to apply to the
+ * camera sensor, likely at configure() time.
+ *
+ * If the requested \a orientation cannot be obtained, the \a orientation
+ * parameter is adjusted to report the current image orientation and
+ * Transform::Identity is returned.
+ *
+ * If the requested \a orientation can be obtained, the function computes a
+ * Transform and does not adjust \a orientation.
+ *
+ * Pipeline handlers are expected to verify if \a orientation has been
+ * adjusted by this function and set the CameraConfiguration::status to
+ * Adjusted accordingly.
+ *
+ * \return A Transform instance that applied to the CameraSensor produces images
+ * with \a orientation
+ */
+
+/**
+ * \fn CameraSensor::bayerOrder()
+ * \brief Compute the Bayer order that results from the given Transform
+ * \param[in] t The Transform to apply to the sensor
+ *
+ * Some sensors change their Bayer order when they are h-flipped or v-flipped.
+ * This function computes and returns the Bayer order that would result from the
+ * given transform applied to the sensor.
+ *
+ * This function is valid only when the sensor produces raw Bayer formats.
+ *
+ * \return The Bayer order produced by the sensor when the Transform is applied
+ */
+
+/**
+ * \fn CameraSensor::controls()
+ * \brief Retrieve the supported V4L2 controls and their information
+ *
+ * Control information is updated automatically to reflect the current sensor
+ * configuration when the setFormat() function is called, without invalidating
+ * any iterator on the ControlInfoMap.
+ *
+ * \return A map of the V4L2 controls supported by the sensor
+ */
+
+/**
+ * \fn CameraSensor::getControls()
+ * \brief Read V4L2 controls from the sensor
+ * \param[in] ids The list of controls to read, specified by their ID
+ *
+ * This function reads the value of all controls contained in \a ids, and
+ * returns their values as a ControlList. The control identifiers are defined by
+ * the V4L2 specification (V4L2_CID_*).
+ *
+ * If any control in \a ids is not supported by the device, is disabled (i.e.
+ * has the V4L2_CTRL_FLAG_DISABLED flag set), or if any other error occurs
+ * during validation of the requested controls, no control is read and this
+ * function returns an empty control list.
+ *
+ * \sa V4L2Device::getControls()
+ *
+ * \return The control values in a ControlList on success, or an empty list on
+ * error
+ */
+
+/**
+ * \fn CameraSensor::setControls()
+ * \brief Write V4L2 controls to the sensor
+ * \param[in] ctrls The list of controls to write
+ *
+ * This function writes the value of all controls contained in \a ctrls, and
+ * stores the values actually applied to the device in the corresponding \a
+ * ctrls entry. The control identifiers are defined by the V4L2 specification
+ * (V4L2_CID_*).
+ *
+ * If any control in \a ctrls is not supported by the device, is disabled (i.e.
+ * has the V4L2_CTRL_FLAG_DISABLED flag set), is read-only, or if any other
+ * error occurs during validation of the requested controls, no control is
+ * written and this function returns -EINVAL.
+ *
+ * If an error occurs while writing the controls, the index of the first
+ * control that couldn't be written is returned. All controls below that index
+ * are written and their values are updated in \a ctrls, while all other
+ * controls are not written and their values are not changed.
+ *
+ * \sa V4L2Device::setControls()
+ *
+ * \return 0 on success or an error code otherwise
+ * \retval -EINVAL One of the control is not supported or not accessible
+ * \retval i The index of the control that failed
+ */
+
+/**
+ * \fn CameraSensor::testPatternModes()
+ * \brief Retrieve all the supported test pattern modes of the camera sensor
+ * The test pattern mode values correspond to the controls::TestPattern control.
+ *
+ * \return The list of test pattern modes
+ */
+
+/**
+ * \fn CameraSensor::setTestPatternMode()
+ * \brief Set the test pattern mode for the camera sensor
+ * \param[in] mode The test pattern mode
+ *
+ * The new \a mode is applied to the sensor if it differs from the active test
+ * pattern mode. Otherwise, this function is a no-op. Setting the same test
+ * pattern mode for every frame thus incurs no performance penalty.
+ */
+
+/**
+ * \fn CameraSensor::sensorDelays()
+ * \brief Fetch the sensor delay values
+ *
+ * This function retrieves the delays that the sensor applies to controls. If
+ * the static properties database doesn't specifiy control delay values for the
+ * sensor, default delays that may be suitable are returned and a warning is
+ * logged.
+ *
+ * \return The sensor delay values
+ */
+
+/**
+ * \class CameraSensorFactoryBase
+ * \brief Base class for camera sensor factories
+ *
+ * The CameraSensorFactoryBase class is the base of all specializations of
+ * the CameraSensorFactory class template. It implements the factory
+ * registration, maintains a registry of factories, and provides access to the
+ * registered factories.
+ */
+
+/**
+ * \brief Construct a camera sensor factory base
+ * \param[in] name The camera sensor factory name
+ * \param[in] priority Priority order for factory selection
+ *
+ * Creating an instance of the factory base registers it with the global list of
+ * factories, accessible through the factories() function.
+ */
+CameraSensorFactoryBase::CameraSensorFactoryBase(const char *name, int priority)
+ : name_(name), priority_(priority)
+{
+ registerFactory(this);
+}
+
+/**
+ * \brief Create an instance of the CameraSensor corresponding to a media entity
+ * \param[in] entity The media entity on the source end of the sensor
+ *
+ * When multiple factories match the same \a entity, this function selects the
+ * matching factory with the highest priority as specified to the
+ * REGISTER_CAMERA_SENSOR() macro at factory registration time. If multiple
+ * matching factories have the same highest priority value, which factory gets
+ * selected is undefined and may vary between runs.
+ *
+ * \return A unique pointer to a new instance of the CameraSensor subclass
+ * matching the entity, or a null pointer if no such factory exists
+ */
+std::unique_ptr<CameraSensor> CameraSensorFactoryBase::create(MediaEntity *entity)
+{
+ const std::vector<CameraSensorFactoryBase *> &factories =
+ CameraSensorFactoryBase::factories();
+
+ for (const CameraSensorFactoryBase *factory : factories) {
+ std::variant<std::unique_ptr<CameraSensor>, int> result =
+ factory->match(entity);
+
+ if (std::holds_alternative<std::unique_ptr<CameraSensor>>(result)) {
+ LOG(CameraSensor, Debug)
+ << "Entity '" << entity->name() << "' matched by "
+ << factory->name();
+ return std::get<std::unique_ptr<CameraSensor>>(std::move(result));
+ }
+
+ if (std::get<int>(result)) {
+ LOG(CameraSensor, Error)
+ << "Failed to create sensor for '"
+ << entity->name() << ": " << std::get<int>(result);
+ return nullptr;
+ }
+ }
+
+ return nullptr;
+}
+
+/**
+ * \fn CameraSensorFactoryBase::name()
+ * \brief Retrieve the camera sensor factory name
+ * \return The name of the factory
+ */
+
+/**
+ * \fn CameraSensorFactoryBase::priority()
+ * \brief Retrieve the priority value for the factory
+ * \return The priority value for the factory
+ */
+
+/**
+ * \brief Retrieve the list of all camera sensor factories
+ *
+ * The factories are sorted in decreasing priority order.
+ *
+ * \return The list of camera sensor factories
+ */
+std::vector<CameraSensorFactoryBase *> &CameraSensorFactoryBase::factories()
+{
+ /*
+ * The static factories map is defined inside the function to ensure
+ * it gets initialized on first use, without any dependency on link
+ * order.
+ */
+ static std::vector<CameraSensorFactoryBase *> factories;
+ return factories;
+}
+
+/**
+ * \brief Add a camera sensor class to the registry
+ * \param[in] factory Factory to use to construct the camera sensor
+ */
+void CameraSensorFactoryBase::registerFactory(CameraSensorFactoryBase *factory)
+{
+ std::vector<CameraSensorFactoryBase *> &factories =
+ CameraSensorFactoryBase::factories();
+
+ auto pos = std::upper_bound(factories.begin(), factories.end(), factory,
+ [](const CameraSensorFactoryBase *value,
+ const CameraSensorFactoryBase *elem) {
+ return value->priority() > elem->priority();
+ });
+ factories.insert(pos, factory);
+}
+
+/**
+ * \class CameraSensorFactory
+ * \brief Registration of CameraSensorFactory classes and creation of instances
+ * \tparam _CameraSensor The camera sensor class type for this factory
+ *
+ * To facilitate discovery and instantiation of CameraSensor classes, the
+ * CameraSensorFactory class implements auto-registration of camera sensors.
+ * Each CameraSensor subclass shall register itself using the
+ * REGISTER_CAMERA_SENSOR() macro, which will create a corresponding instance
+ * of a CameraSensorFactory subclass and register it with the static list of
+ * factories.
+ */
+
+/**
+ * \fn CameraSensorFactory::CameraSensorFactory()
+ * \brief Construct a camera sensor factory
+ *
+ * Creating an instance of the factory registers it with the global list of
+ * factories, accessible through the CameraSensorFactoryBase::factories()
+ * function.
+ */
+
+/**
+ * \def REGISTER_CAMERA_SENSOR(sensor, priority)
+ * \brief Register a camera sensor type to the sensor factory
+ * \param[in] sensor Class name of the CameraSensor derived class to register
+ * \param[in] priority Priority order for factory selection
+ *
+ * Register a CameraSensor subclass with the factory and make it available to
+ * try and match sensors. The subclass needs to implement a static match
+ * function:
+ *
+ * \code{.cpp}
+ * static std::variant<std::unique_ptr<CameraSensor>, int> match(MediaEntity *entity);
+ * \endcode
+ *
+ * The function tests if the sensor class supports the camera sensor identified
+ * by a MediaEntity. If so, it creates a new instance of the sensor class. The
+ * return value is a variant that contains
+ *
+ * - A new instance of the camera sensor class if the entity matched and
+ * creation succeeded ;
+ * - A non-zero error code if the entity matched and the creation failed ; or
+ * - A zero error code if the entity didn't match.
+ *
+ * When multiple factories can support the same MediaEntity (as in the match()
+ * function of multiple factories returning true for the same entity), the \a
+ * priority argument selects which factory will be used. See
+ * CameraSensorFactoryBase::create() for more information.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/camera_sensor.cpp b/src/libcamera/sensor/camera_sensor_legacy.cpp
index 64f7f12c..32989c19 100644
--- a/src/libcamera/camera_sensor.cpp
+++ b/src/libcamera/sensor/camera_sensor_legacy.cpp
@@ -2,79 +2,177 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_sensor.cpp - A camera sensor
+ * camera_sensor_legacy.cpp - A V4L2-backed camera sensor
*/
-#include "libcamera/internal/camera_sensor.h"
-#include "libcamera/internal/media_device.h"
-
#include <algorithm>
+#include <cmath>
#include <float.h>
#include <iomanip>
#include <limits.h>
-#include <math.h>
+#include <map>
+#include <memory>
#include <string.h>
+#include <string>
+#include <vector>
+#include <libcamera/base/class.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/geometry.h>
+#include <libcamera/orientation.h>
#include <libcamera/property_ids.h>
+#include <libcamera/transform.h>
-#include <libcamera/base/utils.h>
+#include <libcamera/ipa/core_ipa_interface.h>
#include "libcamera/internal/bayer_format.h"
#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/camera_sensor_properties.h"
#include "libcamera/internal/formats.h"
+#include "libcamera/internal/media_device.h"
#include "libcamera/internal/sysfs.h"
-
-/**
- * \file camera_sensor.h
- * \brief A camera sensor
- */
+#include "libcamera/internal/v4l2_subdevice.h"
namespace libcamera {
-LOG_DEFINE_CATEGORY(CameraSensor)
+class BayerFormat;
+class CameraLens;
+class MediaEntity;
+class SensorConfiguration;
+
+struct CameraSensorProperties;
+
+enum class Orientation;
+
+LOG_DECLARE_CATEGORY(CameraSensor)
+
+class CameraSensorLegacy : public CameraSensor, protected Loggable
+{
+public:
+ CameraSensorLegacy(const MediaEntity *entity);
+ ~CameraSensorLegacy();
+
+ static std::variant<std::unique_ptr<CameraSensor>, int>
+ match(MediaEntity *entity);
+
+ const std::string &model() const override { return model_; }
+ const std::string &id() const override { return id_; }
+
+ const MediaEntity *entity() const override { return entity_; }
+ V4L2Subdevice *device() override { return subdev_.get(); }
+
+ CameraLens *focusLens() override { return focusLens_.get(); }
+
+ const std::vector<unsigned int> &mbusCodes() const override { return mbusCodes_; }
+ std::vector<Size> sizes(unsigned int mbusCode) const override;
+ Size resolution() const override;
+
+ V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size,
+ const Size maxSize) const override;
+ int setFormat(V4L2SubdeviceFormat *format,
+ Transform transform = Transform::Identity) override;
+ int tryFormat(V4L2SubdeviceFormat *format) const override;
+
+ int applyConfiguration(const SensorConfiguration &config,
+ Transform transform = Transform::Identity,
+ V4L2SubdeviceFormat *sensorFormat = nullptr) override;
+
+ const ControlList &properties() const override { return properties_; }
+ int sensorInfo(IPACameraSensorInfo *info) const override;
+ Transform computeTransform(Orientation *orientation) const override;
+ BayerFormat::Order bayerOrder(Transform t) const override;
+
+ const ControlInfoMap &controls() const override;
+ ControlList getControls(const std::vector<uint32_t> &ids) override;
+ int setControls(ControlList *ctrls) override;
+
+ const std::vector<controls::draft::TestPatternModeEnum> &
+ testPatternModes() const override { return testPatternModes_; }
+ int setTestPatternMode(controls::draft::TestPatternModeEnum mode) override;
+ const CameraSensorProperties::SensorDelays &sensorDelays() override;
+
+protected:
+ std::string logPrefix() const override;
+
+private:
+ LIBCAMERA_DISABLE_COPY(CameraSensorLegacy)
+
+ int init();
+ int generateId();
+ int validateSensorDriver();
+ void initVimcDefaultProperties();
+ void initStaticProperties();
+ void initTestPatternModes();
+ int initProperties();
+ int applyTestPatternMode(controls::draft::TestPatternModeEnum mode);
+ int discoverAncillaryDevices();
+
+ const MediaEntity *entity_;
+ std::unique_ptr<V4L2Subdevice> subdev_;
+ unsigned int pad_;
+
+ const CameraSensorProperties *staticProps_;
+
+ std::string model_;
+ std::string id_;
+
+ V4L2Subdevice::Formats formats_;
+ std::vector<unsigned int> mbusCodes_;
+ std::vector<Size> sizes_;
+ std::vector<controls::draft::TestPatternModeEnum> testPatternModes_;
+ controls::draft::TestPatternModeEnum testPatternMode_;
+
+ Size pixelArraySize_;
+ Rectangle activeArea_;
+ const BayerFormat *bayerFormat_;
+ bool supportFlips_;
+ bool flipsAlterBayerOrder_;
+ Orientation mountingOrientation_;
+
+ ControlList properties_;
+
+ std::unique_ptr<CameraLens> focusLens_;
+};
/**
- * \class CameraSensor
+ * \class CameraSensorLegacy
* \brief A camera sensor based on V4L2 subdevices
*
- * The CameraSensor class eases handling of sensors for pipeline handlers by
- * hiding the details of the V4L2 subdevice kernel API and caching sensor
- * information.
- *
* The implementation is currently limited to sensors that expose a single V4L2
* subdevice with a single pad. It will be extended to support more complex
* devices as the needs arise.
*/
-/**
- * \brief Construct a CameraSensor
- * \param[in] entity The media entity backing the camera sensor
- *
- * Once constructed the instance must be initialized with init().
- */
-CameraSensor::CameraSensor(const MediaEntity *entity)
+CameraSensorLegacy::CameraSensorLegacy(const MediaEntity *entity)
: entity_(entity), pad_(UINT_MAX), staticProps_(nullptr),
- bayerFormat_(nullptr), properties_(properties::properties)
+ bayerFormat_(nullptr), supportFlips_(false),
+ flipsAlterBayerOrder_(false), properties_(properties::properties)
{
}
-/**
- * \brief Destroy a CameraSensor
- */
-CameraSensor::~CameraSensor()
+CameraSensorLegacy::~CameraSensorLegacy() = default;
+
+std::variant<std::unique_ptr<CameraSensor>, int>
+CameraSensorLegacy::match(MediaEntity *entity)
{
+ std::unique_ptr<CameraSensorLegacy> sensor =
+ std::make_unique<CameraSensorLegacy>(entity);
+
+ int ret = sensor->init();
+ if (ret)
+ return { ret };
+
+ return { std::move(sensor) };
}
-/**
- * \brief Initialize the camera sensor instance
- *
- * This function performs the initialisation steps of the CameraSensor that may
- * fail. It shall be called once and only once after constructing the instance.
- *
- * \return 0 on success or a negative error code otherwise
- */
-int CameraSensor::init()
+int CameraSensorLegacy::init()
{
for (const MediaPad *pad : entity_->pads()) {
if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
@@ -152,7 +250,12 @@ int CameraSensor::init()
*/
if (entity_->device()->driver() == "vimc") {
initVimcDefaultProperties();
- return initProperties();
+
+ ret = initProperties();
+ if (ret)
+ return ret;
+
+ return discoverAncillaryDevices();
}
/* Get the color filter array pattern (only for RAW sensors). */
@@ -172,10 +275,53 @@ int CameraSensor::init()
if (ret)
return ret;
+ ret = discoverAncillaryDevices();
+ if (ret)
+ return ret;
+
+ /*
+ * Set HBLANK to the minimum to start with a well-defined line length,
+ * allowing IPA modules that do not modify HBLANK to use the sensor
+ * minimum line length in their calculations.
+ */
+ const struct v4l2_query_ext_ctrl *hblankInfo = subdev_->controlInfo(V4L2_CID_HBLANK);
+ if (hblankInfo && !(hblankInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ ControlList ctrl(subdev_->controls());
+
+ ctrl.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblankInfo->minimum));
+ ret = subdev_->setControls(&ctrl);
+ if (ret)
+ return ret;
+ }
+
return applyTestPatternMode(controls::draft::TestPatternModeEnum::TestPatternModeOff);
}
-int CameraSensor::validateSensorDriver()
+int CameraSensorLegacy::generateId()
+{
+ const std::string devPath = subdev_->devicePath();
+
+ /* Try to get ID from firmware description. */
+ id_ = sysfs::firmwareNodePath(devPath);
+ if (!id_.empty())
+ return 0;
+
+ /*
+ * Virtual sensors not described in firmware
+ *
+ * Verify it's a platform device and construct ID from the device path
+ * and model of sensor.
+ */
+ if (devPath.find("/sys/devices/platform/", 0) == 0) {
+ id_ = devPath.substr(strlen("/sys/devices/")) + " " + model();
+ return 0;
+ }
+
+ LOG(CameraSensor, Error) << "Can't generate sensor ID";
+ return -EINVAL;
+}
+
+int CameraSensorLegacy::validateSensorDriver()
{
int err = 0;
@@ -213,6 +359,26 @@ int CameraSensor::validateSensorDriver()
}
/*
+ * Verify if sensor supports horizontal/vertical flips
+ *
+ * \todo Handle horizontal and vertical flips independently.
+ */
+ const struct v4l2_query_ext_ctrl *hflipInfo = subdev_->controlInfo(V4L2_CID_HFLIP);
+ const struct v4l2_query_ext_ctrl *vflipInfo = subdev_->controlInfo(V4L2_CID_VFLIP);
+ if (hflipInfo && !(hflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY) &&
+ vflipInfo && !(vflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ supportFlips_ = true;
+
+ if (hflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT ||
+ vflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT)
+ flipsAlterBayerOrder_ = true;
+ }
+
+ if (!supportFlips_)
+ LOG(CameraSensor, Debug)
+ << "Camera sensor does not support horizontal/vertical flip";
+
+ /*
* Make sure the required selection targets are supported.
*
* Failures in reading any of the targets are not deemed to be fatal,
@@ -234,7 +400,7 @@ int CameraSensor::validateSensorDriver()
LOG(CameraSensor, Warning)
<< "The PixelArraySize property has been defaulted to "
- << pixelArraySize_.toString();
+ << pixelArraySize_;
err = -EINVAL;
} else {
pixelArraySize_ = rect.size();
@@ -245,7 +411,7 @@ int CameraSensor::validateSensorDriver()
activeArea_ = Rectangle(pixelArraySize_);
LOG(CameraSensor, Warning)
<< "The PixelArrayActiveAreas property has been defaulted to "
- << activeArea_.toString();
+ << activeArea_;
err = -EINVAL;
}
@@ -271,6 +437,7 @@ int CameraSensor::validateSensorDriver()
* required by the CameraSensor class.
*/
static constexpr uint32_t mandatoryControls[] = {
+ V4L2_CID_ANALOGUE_GAIN,
V4L2_CID_EXPOSURE,
V4L2_CID_HBLANK,
V4L2_CID_PIXEL_RATE,
@@ -298,18 +465,14 @@ int CameraSensor::validateSensorDriver()
return 0;
}
-/*
- * \brief Initialize properties that cannot be intialized by the
- * regular initProperties() function for VIMC
- */
-void CameraSensor::initVimcDefaultProperties()
+void CameraSensorLegacy::initVimcDefaultProperties()
{
/* Use the largest supported size. */
pixelArraySize_ = sizes_.back();
activeArea_ = Rectangle(pixelArraySize_);
}
-void CameraSensor::initStaticProperties()
+void CameraSensorLegacy::initStaticProperties()
{
staticProps_ = CameraSensorProperties::get(model_);
if (!staticProps_)
@@ -321,7 +484,31 @@ void CameraSensor::initStaticProperties()
initTestPatternModes();
}
-void CameraSensor::initTestPatternModes()
+const CameraSensorProperties::SensorDelays &CameraSensorLegacy::sensorDelays()
+{
+ static constexpr CameraSensorProperties::SensorDelays defaultSensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 1,
+ .vblankDelay = 2,
+ .hblankDelay = 2,
+ };
+
+ if (!staticProps_ ||
+ (!staticProps_->sensorDelays.exposureDelay &&
+ !staticProps_->sensorDelays.gainDelay &&
+ !staticProps_->sensorDelays.vblankDelay &&
+ !staticProps_->sensorDelays.hblankDelay)) {
+ LOG(CameraSensor, Warning)
+ << "No sensor delays found in static properties. "
+ "Assuming unverified defaults.";
+
+ return defaultSensorDelays;
+ }
+
+ return staticProps_->sensorDelays;
+}
+
+void CameraSensorLegacy::initTestPatternModes()
{
const auto &v4l2TestPattern = controls().find(V4L2_CID_TEST_PATTERN);
if (v4l2TestPattern == controls().end()) {
@@ -365,7 +552,7 @@ void CameraSensor::initTestPatternModes()
}
}
-int CameraSensor::initProperties()
+int CameraSensorLegacy::initProperties()
{
model_ = subdev_->model();
properties_.set(properties::Model, utils::toAscii(model_));
@@ -380,18 +567,18 @@ int CameraSensor::initProperties()
/* Retrieve and register properties from the kernel interface. */
const ControlInfoMap &controls = subdev_->controls();
- int32_t propertyValue;
const auto &orientation = controls.find(V4L2_CID_CAMERA_ORIENTATION);
if (orientation != controls.end()) {
int32_t v4l2Orientation = orientation->second.def().get<int32_t>();
+ int32_t propertyValue;
switch (v4l2Orientation) {
default:
LOG(CameraSensor, Warning)
<< "Unsupported camera location "
<< v4l2Orientation << ", setting to External";
- /* Fall-through */
+ [[fallthrough]];
case V4L2_CAMERA_ORIENTATION_EXTERNAL:
propertyValue = properties::CameraLocationExternal;
break;
@@ -409,8 +596,27 @@ int CameraSensor::initProperties()
const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION);
if (rotationControl != controls.end()) {
- propertyValue = rotationControl->second.def().get<int32_t>();
+ int32_t propertyValue = rotationControl->second.def().get<int32_t>();
+
+ /*
+ * Cache the Transform associated with the camera mounting
+ * rotation for later use in computeTransform().
+ */
+ bool success;
+ mountingOrientation_ = orientationFromRotation(propertyValue, &success);
+ if (!success) {
+ LOG(CameraSensor, Warning)
+ << "Invalid rotation of " << propertyValue
+ << " degrees - ignoring";
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
properties_.set(properties::Rotation, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning)
+ << "Rotation control not available, default to 0 degrees";
+ properties_.set(properties::Rotation, 0);
+ mountingOrientation_ = Orientation::Rotate0;
}
properties_.set(properties::PixelArraySize, pixelArraySize_);
@@ -443,50 +649,34 @@ int CameraSensor::initProperties()
return 0;
}
-/**
- * \fn CameraSensor::model()
- * \brief Retrieve the sensor model name
- *
- * The sensor model name is a free-formed string that uniquely identifies the
- * sensor model.
- *
- * \return The sensor model name
- */
-
-/**
- * \fn CameraSensor::id()
- * \brief Retrieve the sensor ID
- *
- * The sensor ID is a free-form string that uniquely identifies the sensor in
- * the system. The ID satisfies the requirements to be used as a camera ID.
- *
- * \return The sensor ID
- */
+int CameraSensorLegacy::discoverAncillaryDevices()
+{
+ int ret;
+
+ for (MediaEntity *ancillary : entity_->ancillaryEntities()) {
+ switch (ancillary->function()) {
+ case MEDIA_ENT_F_LENS:
+ focusLens_ = std::make_unique<CameraLens>(ancillary);
+ ret = focusLens_->init();
+ if (ret) {
+ LOG(CameraSensor, Error)
+ << "Lens initialisation failed, lens disabled";
+ focusLens_.reset();
+ }
+ break;
-/**
- * \fn CameraSensor::entity()
- * \brief Retrieve the sensor media entity
- * \return The sensor media entity
- */
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported ancillary entity function "
+ << ancillary->function();
+ break;
+ }
+ }
-/**
- * \fn CameraSensor::mbusCodes()
- * \brief Retrieve the media bus codes supported by the camera sensor
- *
- * Any Bayer formats are listed using the sensor's native Bayer order,
- * that is, with the effect of V4L2_CID_HFLIP and V4L2_CID_VFLIP undone
- * (where these controls exist).
- *
- * \return The supported media bus codes sorted in increasing order
- */
+ return 0;
+}
-/**
- * \brief Retrieve the supported frame sizes for a media bus code
- * \param[in] mbusCode The media bus code for which sizes are requested
- *
- * \return The supported frame sizes for \a mbusCode sorted in increasing order
- */
-const std::vector<Size> CameraSensor::sizes(unsigned int mbusCode) const
+std::vector<Size> CameraSensorLegacy::sizes(unsigned int mbusCode) const
{
std::vector<Size> sizes;
@@ -503,117 +693,14 @@ const std::vector<Size> CameraSensor::sizes(unsigned int mbusCode) const
return sizes;
}
-/**
- * \brief Retrieve the camera sensor resolution
- *
- * The camera sensor resolution is the active pixel area size, clamped to the
- * maximum frame size the sensor can produce if it is smaller than the active
- * pixel area.
- *
- * \todo Consider if it desirable to distinguish between the maximum resolution
- * the sensor can produce (also including upscaled ones) and the actual pixel
- * array size by splitting this function in two.
- *
- * \return The camera sensor resolution in pixels
- */
-Size CameraSensor::resolution() const
+Size CameraSensorLegacy::resolution() const
{
return std::min(sizes_.back(), activeArea_.size());
}
-/**
- * \fn CameraSensor::testPatternModes()
- * \brief Retrieve all the supported test pattern modes of the camera sensor
- * The test pattern mode values correspond to the controls::TestPattern control.
- *
- * \return The list of test pattern modes
- */
-
-/**
- * \brief Set the test pattern mode for the camera sensor
- * \param[in] mode The test pattern mode
- *
- * The new \a mode is applied to the sensor if it differs from the active test
- * pattern mode. Otherwise, this function is a no-op. Setting the same test
- * pattern mode for every frame thus incurs no performance penalty.
- */
-int CameraSensor::setTestPatternMode(controls::draft::TestPatternModeEnum mode)
-{
- if (testPatternMode_ == mode)
- return 0;
-
- return applyTestPatternMode(mode);
-}
-
-int CameraSensor::applyTestPatternMode(controls::draft::TestPatternModeEnum mode)
-{
- if (testPatternModes_.empty()) {
- LOG(CameraSensor, Error)
- << "Camera sensor does not support test pattern modes.";
- return 0;
- }
-
- auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(),
- mode);
- if (it == testPatternModes_.end()) {
- LOG(CameraSensor, Error) << "Unsupported test pattern mode "
- << mode;
- return -EINVAL;
- }
-
- LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode;
-
- int32_t index = staticProps_->testPatternModes.at(mode);
- ControlList ctrls{ controls() };
- ctrls.set(V4L2_CID_TEST_PATTERN, index);
-
- int ret = setControls(&ctrls);
- if (ret)
- return ret;
-
- testPatternMode_ = mode;
-
- return 0;
-}
-
-/**
- * \brief Retrieve the best sensor format for a desired output
- * \param[in] mbusCodes The list of acceptable media bus codes
- * \param[in] size The desired size
- *
- * Media bus codes are selected from \a mbusCodes, which lists all acceptable
- * codes in decreasing order of preference. Media bus codes supported by the
- * sensor but not listed in \a mbusCodes are ignored. If none of the desired
- * codes is supported, it returns an error.
- *
- * \a size indicates the desired size at the output of the sensor. This function
- * selects the best media bus code and size supported by the sensor according
- * to the following criteria.
- *
- * - The desired \a size shall fit in the sensor output size to avoid the need
- * to up-scale.
- * - The sensor output size shall match the desired aspect ratio to avoid the
- * need to crop the field of view.
- * - The sensor output size shall be as small as possible to lower the required
- * bandwidth.
- * - The desired \a size shall be supported by one of the media bus code listed
- * in \a mbusCodes.
- *
- * When multiple media bus codes can produce the same size, the code at the
- * lowest position in \a mbusCodes is selected.
- *
- * The use of this function is optional, as the above criteria may not match the
- * needs of all pipeline handlers. Pipeline handlers may implement custom
- * sensor format selection when needed.
- *
- * The returned sensor output format is guaranteed to be acceptable by the
- * setFormat() function without any modification.
- *
- * \return The best sensor output format matching the desired media bus codes
- * and size on success, or an empty format otherwise.
- */
-V4L2SubdeviceFormat CameraSensor::getFormat(const std::vector<unsigned int> &mbusCodes,
- const Size &size) const
+V4L2SubdeviceFormat
+CameraSensorLegacy::getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size, Size maxSize) const
{
unsigned int desiredArea = size.width * size.height;
unsigned int bestArea = UINT_MAX;
@@ -630,11 +717,15 @@ V4L2SubdeviceFormat CameraSensor::getFormat(const std::vector<unsigned int> &mbu
for (const SizeRange &range : formats->second) {
const Size &sz = range.max;
+ if (!maxSize.isNull() &&
+ (sz.width > maxSize.width || sz.height > maxSize.height))
+ continue;
+
if (sz.width < size.width || sz.height < size.height)
continue;
float ratio = static_cast<float>(sz.width) / sz.height;
- float ratioDiff = fabsf(ratio - desiredRatio);
+ float ratioDiff = std::abs(ratio - desiredRatio);
unsigned int area = sz.width * sz.height;
unsigned int areaDiff = area - desiredArea;
@@ -656,7 +747,7 @@ V4L2SubdeviceFormat CameraSensor::getFormat(const std::vector<unsigned int> &mbu
}
V4L2SubdeviceFormat format{
- .mbus_code = bestCode,
+ .code = bestCode,
.size = *bestSize,
.colorSpace = ColorSpace::Raw,
};
@@ -664,125 +755,107 @@ V4L2SubdeviceFormat CameraSensor::getFormat(const std::vector<unsigned int> &mbu
return format;
}
-/**
- * \brief Set the sensor output format
- * \param[in] format The desired sensor output format
- *
- * The ranges of any controls associated with the sensor are also updated.
- *
- * \return 0 on success or a negative error code otherwise
- */
-int CameraSensor::setFormat(V4L2SubdeviceFormat *format)
+int CameraSensorLegacy::setFormat(V4L2SubdeviceFormat *format, Transform transform)
{
+ /* Configure flips if the sensor supports that. */
+ if (supportFlips_) {
+ ControlList flipCtrls(subdev_->controls());
+
+ flipCtrls.set(V4L2_CID_HFLIP,
+ static_cast<int32_t>(!!(transform & Transform::HFlip)));
+ flipCtrls.set(V4L2_CID_VFLIP,
+ static_cast<int32_t>(!!(transform & Transform::VFlip)));
+
+ int ret = subdev_->setControls(&flipCtrls);
+ if (ret)
+ return ret;
+ }
+
+ /* Apply format on the subdev. */
int ret = subdev_->setFormat(pad_, format);
if (ret)
return ret;
- updateControlInfo();
+ subdev_->updateControlInfo();
return 0;
}
-/**
- * \brief Retrieve the supported V4L2 controls and their information
- *
- * Control information is updated automatically to reflect the current sensor
- * configuration when the setFormat() function is called, without invalidating
- * any iterator on the ControlInfoMap. A manual update can also be forced by
- * calling the updateControlInfo() function for pipeline handlers that change
- * the sensor configuration wihtout using setFormat().
- *
- * \return A map of the V4L2 controls supported by the sensor
- */
-const ControlInfoMap &CameraSensor::controls() const
+int CameraSensorLegacy::tryFormat(V4L2SubdeviceFormat *format) const
{
- return subdev_->controls();
+ return subdev_->setFormat(pad_, format,
+ V4L2Subdevice::Whence::TryFormat);
}
-/**
- * \brief Read V4L2 controls from the sensor
- * \param[in] ids The list of controls to read, specified by their ID
- *
- * This function reads the value of all controls contained in \a ids, and
- * returns their values as a ControlList. The control identifiers are defined by
- * the V4L2 specification (V4L2_CID_*).
- *
- * If any control in \a ids is not supported by the device, is disabled (i.e.
- * has the V4L2_CTRL_FLAG_DISABLED flag set), or if any other error occurs
- * during validation of the requested controls, no control is read and this
- * function returns an empty control list.
- *
- * \sa V4L2Device::getControls()
- *
- * \return The control values in a ControlList on success, or an empty list on
- * error
- */
-ControlList CameraSensor::getControls(const std::vector<uint32_t> &ids)
+int CameraSensorLegacy::applyConfiguration(const SensorConfiguration &config,
+ Transform transform,
+ V4L2SubdeviceFormat *sensorFormat)
{
- return subdev_->getControls(ids);
-}
+ if (!config.isValid()) {
+ LOG(CameraSensor, Error) << "Invalid sensor configuration";
+ return -EINVAL;
+ }
-/**
- * \brief Write V4L2 controls to the sensor
- * \param[in] ctrls The list of controls to write
- *
- * This function writes the value of all controls contained in \a ctrls, and
- * stores the values actually applied to the device in the corresponding \a
- * ctrls entry. The control identifiers are defined by the V4L2 specification
- * (V4L2_CID_*).
- *
- * If any control in \a ctrls is not supported by the device, is disabled (i.e.
- * has the V4L2_CTRL_FLAG_DISABLED flag set), is read-only, or if any other
- * error occurs during validation of the requested controls, no control is
- * written and this function returns -EINVAL.
- *
- * If an error occurs while writing the controls, the index of the first
- * control that couldn't be written is returned. All controls below that index
- * are written and their values are updated in \a ctrls, while all other
- * controls are not written and their values are not changed.
- *
- * \sa V4L2Device::setControls()
- *
- * \return 0 on success or an error code otherwise
- * \retval -EINVAL One of the control is not supported or not accessible
- * \retval i The index of the control that failed
- */
-int CameraSensor::setControls(ControlList *ctrls)
-{
- return subdev_->setControls(ctrls);
-}
+ std::vector<unsigned int> filteredCodes;
+ std::copy_if(mbusCodes_.begin(), mbusCodes_.end(),
+ std::back_inserter(filteredCodes),
+ [&config](unsigned int mbusCode) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(mbusCode);
+ if (bayer.bitDepth == config.bitDepth)
+ return true;
+ return false;
+ });
+ if (filteredCodes.empty()) {
+ LOG(CameraSensor, Error)
+ << "Cannot find any format with bit depth "
+ << config.bitDepth;
+ return -EINVAL;
+ }
-/**
- * \fn CameraSensor::device()
- * \brief Retrieve the camera sensor device
- * \todo Remove this function by integrating DelayedControl with CameraSensor
- * \return The camera sensor device
- */
+ /*
+ * Compute the sensor's data frame size by applying the cropping
+ * rectangle, subsampling and output crop to the sensor's pixel array
+ * size.
+ *
+ * \todo The actual size computation is for now ignored and only the
+ * output size is considered. This implies that resolutions obtained
+ * with two different cropping/subsampling will look identical and
+ * only the first found one will be considered.
+ */
+ V4L2SubdeviceFormat subdevFormat = {};
+ for (unsigned int code : filteredCodes) {
+ for (const Size &size : sizes(code)) {
+ if (size.width != config.outputSize.width ||
+ size.height != config.outputSize.height)
+ continue;
-/**
- * \fn CameraSensor::properties()
- * \brief Retrieve the camera sensor properties
- * \return The list of camera sensor properties
- */
+ subdevFormat.code = code;
+ subdevFormat.size = size;
+ break;
+ }
+ }
+ if (!subdevFormat.code) {
+ LOG(CameraSensor, Error) << "Invalid output size in sensor configuration";
+ return -EINVAL;
+ }
-/**
- * \brief Assemble and return the camera sensor info
- * \param[out] info The camera sensor info
- *
- * This function fills \a info with information that describes the camera sensor
- * and its current configuration. The information combines static data (such as
- * the the sensor model or active pixel array size) and data specific to the
- * current sensor configuration (such as the line length and pixel rate).
- *
- * Sensor information is only available for raw sensors. When called for a YUV
- * sensor, this function returns -EINVAL.
- *
- * Pipeline handlers that do not change the sensor format using the setFormat()
- * function may need to call updateControlInfo() beforehand, to ensure all the
- * control ranges are up to date.
- *
- * \return 0 on success, a negative error code otherwise
- */
-int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const
+ int ret = setFormat(&subdevFormat, transform);
+ if (ret)
+ return ret;
+
+ /*
+ * Return to the caller the format actually applied to the sensor.
+ * This is relevant if transform has changed the bayer pattern order.
+ */
+ if (sensorFormat)
+ *sensorFormat = subdevFormat;
+
+ /* \todo Handle AnalogCrop. Most sensors do not support set_selection */
+ /* \todo Handle scaling in the digital domain. */
+
+ return 0;
+}
+
+int CameraSensorLegacy::sensorInfo(IPACameraSensorInfo *info) const
{
if (!bayerFormat_)
return -EINVAL;
@@ -823,9 +896,12 @@ int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const
ret = subdev_->getFormat(pad_, &format);
if (ret)
return ret;
- info->bitsPerPixel = format.bitsPerPixel();
+ info->bitsPerPixel = MediaBusFormatInfo::info(format.code).bitsPerPixel;
info->outputSize = format.size;
+ std::optional<int32_t> cfa = properties_.get(properties::draft::ColorFilterArrangement);
+ info->cfaPattern = cfa ? *cfa : properties::draft::RGB;
+
/*
* Retrieve the pixel rate, line length and minimum/maximum frame
* duration through V4L2 controls. Support for the V4L2_CID_PIXEL_RATE,
@@ -840,10 +916,12 @@ int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const
return -EINVAL;
}
- int32_t hblank = ctrls.get(V4L2_CID_HBLANK).get<int32_t>();
- info->lineLength = info->outputSize.width + hblank;
info->pixelRate = ctrls.get(V4L2_CID_PIXEL_RATE).get<int64_t>();
+ const ControlInfo hblank = ctrls.infoMap()->at(V4L2_CID_HBLANK);
+ info->minLineLength = info->outputSize.width + hblank.min().get<int32_t>();
+ info->maxLineLength = info->outputSize.width + hblank.max().get<int32_t>();
+
const ControlInfo vblank = ctrls.infoMap()->at(V4L2_CID_VBLANK);
info->minFrameLength = info->outputSize.height + vblank.min().get<int32_t>();
info->maxFrameLength = info->outputSize.height + vblank.max().get<int32_t>();
@@ -851,51 +929,117 @@ int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const
return 0;
}
-/**
- * \fn void CameraSensor::updateControlInfo()
- * \brief Update the sensor's ControlInfoMap in case they have changed
- * \sa V4L2Device::updateControlInfo()
- */
-void CameraSensor::updateControlInfo()
+Transform CameraSensorLegacy::computeTransform(Orientation *orientation) const
{
- subdev_->updateControlInfo();
+ /*
+ * If we cannot do any flips we cannot change the native camera mounting
+ * orientation.
+ */
+ if (!supportFlips_) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ /*
+ * Now compute the required transform to obtain 'orientation' starting
+ * from the mounting rotation.
+ *
+ * As a note:
+ * orientation / mountingOrientation_ = transform
+ * mountingOrientation_ * transform = orientation
+ */
+ Transform transform = *orientation / mountingOrientation_;
+
+ /*
+ * If transform contains any Transpose we cannot do it, so adjust
+ * 'orientation' to report the image native orientation and return Identity.
+ */
+ if (!!(transform & Transform::Transpose)) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ return transform;
}
-/**
- * \fn CameraSensor::focusLens()
- * \brief Retrieve the focus lens controller
- *
- * \return The focus lens controller. nullptr if no focus lens controller is
- * connected to the sensor
- */
+BayerFormat::Order CameraSensorLegacy::bayerOrder(Transform t) const
+{
+ /* Return a defined by meaningless value for non-Bayer sensors. */
+ if (!bayerFormat_)
+ return BayerFormat::Order::BGGR;
-std::string CameraSensor::logPrefix() const
+ if (!flipsAlterBayerOrder_)
+ return bayerFormat_->order;
+
+ /*
+ * Apply the transform to the native (i.e. untransformed) Bayer order,
+ * using the rest of the Bayer format supplied by the caller.
+ */
+ return bayerFormat_->transform(t).order;
+}
+
+const ControlInfoMap &CameraSensorLegacy::controls() const
{
- return "'" + entity_->name() + "'";
+ return subdev_->controls();
}
-int CameraSensor::generateId()
+ControlList CameraSensorLegacy::getControls(const std::vector<uint32_t> &ids)
{
- const std::string devPath = subdev_->devicePath();
+ return subdev_->getControls(ids);
+}
- /* Try to get ID from firmware description. */
- id_ = sysfs::firmwareNodePath(devPath);
- if (!id_.empty())
+int CameraSensorLegacy::setControls(ControlList *ctrls)
+{
+ return subdev_->setControls(ctrls);
+}
+
+int CameraSensorLegacy::setTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternMode_ == mode)
return 0;
- /*
- * Virtual sensors not described in firmware
- *
- * Verify it's a platform device and construct ID from the device path
- * and model of sensor.
- */
- if (devPath.find("/sys/devices/platform/", 0) == 0) {
- id_ = devPath.substr(strlen("/sys/devices/")) + " " + model();
+ if (testPatternModes_.empty()) {
+ LOG(CameraSensor, Error)
+ << "Camera sensor does not support test pattern modes.";
+ return -EINVAL;
+ }
+
+ return applyTestPatternMode(mode);
+}
+
+int CameraSensorLegacy::applyTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternModes_.empty())
return 0;
+
+ auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(),
+ mode);
+ if (it == testPatternModes_.end()) {
+ LOG(CameraSensor, Error) << "Unsupported test pattern mode "
+ << mode;
+ return -EINVAL;
}
- LOG(CameraSensor, Error) << "Can't generate sensor ID";
- return -EINVAL;
+ LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode;
+
+ int32_t index = staticProps_->testPatternModes.at(mode);
+ ControlList ctrls{ controls() };
+ ctrls.set(V4L2_CID_TEST_PATTERN, index);
+
+ int ret = setControls(&ctrls);
+ if (ret)
+ return ret;
+
+ testPatternMode_ = mode;
+
+ return 0;
}
+std::string CameraSensorLegacy::logPrefix() const
+{
+ return "'" + entity_->name() + "'";
+}
+
+REGISTER_CAMERA_SENSOR(CameraSensorLegacy, -100)
+
} /* namespace libcamera */
diff --git a/src/libcamera/sensor/camera_sensor_properties.cpp b/src/libcamera/sensor/camera_sensor_properties.cpp
new file mode 100644
index 00000000..e2f518f9
--- /dev/null
+++ b/src/libcamera/sensor/camera_sensor_properties.cpp
@@ -0,0 +1,473 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Database of camera sensor properties
+ */
+
+#include "libcamera/internal/camera_sensor_properties.h"
+
+#include <map>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+/**
+ * \file camera_sensor_properties.h
+ * \brief Database of camera sensor properties
+ *
+ * The database of camera sensor properties collects static information about
+ * camera sensors that is not possible or desirable to retrieve from the device
+ * at run time.
+ *
+ * The database is indexed using the camera sensor model, as reported by the
+ * properties::Model property, and for each supported sensor it contains a
+ * list of properties.
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(CameraSensorProperties)
+
+/**
+ * \struct CameraSensorProperties
+ * \brief Database of camera sensor properties
+ *
+ * \var CameraSensorProperties::unitCellSize
+ * \brief The physical size of a pixel, including pixel edges, in nanometers.
+ *
+ * \var CameraSensorProperties::testPatternModes
+ * \brief Map that associates the TestPattern control value with the indexes of
+ * the corresponding sensor test pattern modes as returned by
+ * V4L2_CID_TEST_PATTERN.
+ *
+ * \var CameraSensorProperties::sensorDelays
+ * \brief Sensor control application delays
+ *
+ * This structure may be defined as empty if the actual sensor delays are not
+ * available or have not been measured.
+ */
+
+/**
+ * \struct CameraSensorProperties::SensorDelays
+ * \brief Sensor control application delay values
+ *
+ * This structure holds delay values, expressed in number of frames, between the
+ * time a control value is applied to the sensor and the time that value is
+ * reflected in the output. For example "2 frames delay" means that parameters
+ * set during frame N will take effect for frame N+2 (and by extension a delay
+ * of 0 would mean the parameter is applied immediately to the current frame).
+ *
+ * \var CameraSensorProperties::SensorDelays::exposureDelay
+ * \brief Number of frames between application of exposure control and effect
+ *
+ * \var CameraSensorProperties::SensorDelays::gainDelay
+ * \brief Number of frames between application of analogue gain control and effect
+ *
+ * \var CameraSensorProperties::SensorDelays::vblankDelay
+ * \brief Number of frames between application of vblank control and effect
+ *
+ * \var CameraSensorProperties::SensorDelays::hblankDelay
+ * \brief Number of frames between application of hblank control and effect
+ */
+
+/**
+ * \brief Retrieve the properties associated with a sensor
+ * \param sensor The sensor model name as reported by properties::Model
+ * \return A pointer to the CameraSensorProperties instance associated with a sensor
+ * or nullptr if the sensor is not supported
+ */
+const CameraSensorProperties *CameraSensorProperties::get(const std::string &sensor)
+{
+ static const std::map<std::string, const CameraSensorProperties> sensorProps = {
+ { "ar0144", {
+ .unitCellSize = { 3000, 3000 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 1 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "ar0521", {
+ .unitCellSize = { 2200, 2200 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 1 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ },
+ .sensorDelays = { },
+ } },
+ { "gc05a2", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "gc08a3", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "hi846", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 1 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ /*
+ * No corresponding test pattern mode for:
+ * 5: "Gradient Horizontal"
+ * 6: "Gradient Vertical"
+ * 7: "Check Board"
+ * 8: "Slant Pattern"
+ * 9: "Resolution Pattern"
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "imx214", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ },
+ .sensorDelays = { },
+ } },
+ { "imx219", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 1,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "imx258", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 1 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ },
+ .sensorDelays = { },
+ } },
+ { "imx283", {
+ .unitCellSize = { 2400, 2400 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "imx290", {
+ .unitCellSize = { 2900, 2900 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "imx296", {
+ .unitCellSize = { 3450, 3450 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "imx327", {
+ .unitCellSize = { 2900, 2900 },
+ .testPatternModes = {},
+ .sensorDelays = { },
+ } },
+ { "imx335", {
+ .unitCellSize = { 2000, 2000 },
+ .testPatternModes = {},
+ .sensorDelays = { },
+ } },
+ { "imx415", {
+ .unitCellSize = { 1450, 1450 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "imx462", {
+ .unitCellSize = { 2900, 2900 },
+ .testPatternModes = {},
+ .sensorDelays = { },
+ } },
+ { "imx477", {
+ .unitCellSize = { 1550, 1550 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 3,
+ .hblankDelay = 3
+ },
+ } },
+ { "imx519", {
+ .unitCellSize = { 1220, 1220 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModePn9, 4 },
+ /*
+ * The driver reports ColorBars and ColorBarsFadeToGray as well but
+ * these two patterns do not comply with MIPI CCS v1.1 (Section 10.1).
+ */
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 3,
+ .hblankDelay = 3
+ },
+ } },
+ { "imx708", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 3,
+ .hblankDelay = 3
+ },
+ } },
+ { "ov2685", {
+ .unitCellSize = { 1750, 1750 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1},
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
+ /*
+ * No corresponding test pattern mode for:
+ * 3: "Random Data"
+ * 4: "Black White Square"
+ * 5: "Color Square"
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "ov2740", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1},
+ },
+ .sensorDelays = { },
+ } },
+ { "ov4689", {
+ .unitCellSize = { 2000, 2000 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1},
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2},
+ /*
+ * No corresponding test patterns in
+ * MIPI CCS specification for sensor's
+ * colorBarType2 and colorBarType3.
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "ov5640", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ },
+ .sensorDelays = { },
+ } },
+ { "ov5647", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {},
+ /*
+ * We run this sensor in a mode where the gain delay is
+ * bumped up to 2. It seems to be the only way to make
+ * the delays "predictable".
+ *
+ * \todo Verify these delays properly, as the upstream
+ * driver appears to configure _no_ delay.
+ */
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "ov5670", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ },
+ .sensorDelays = { },
+ } },
+ { "ov5675", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ },
+ .sensorDelays = { },
+ } },
+ { "ov5693", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ /*
+ * No corresponding test pattern mode for
+ * 1: "Random data" and 3: "Colour Bars with
+ * Rolling Bar".
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "ov64a40", {
+ .unitCellSize = { 1008, 1008 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
+ /*
+ * No corresponding test patter mode
+ * 3: "Vertical Color Bar Type 3",
+ * 4: "Vertical Color Bar Type 4"
+ */
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "ov7251", {
+ .unitCellSize = { 3000, 3000 },
+ .testPatternModes = { },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "ov8858", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
+ /*
+ * No corresponding test patter mode
+ * 3: "Vertical Color Bar Type 3",
+ * 4: "Vertical Color Bar Type 4"
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "ov8865", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ /*
+ * No corresponding test pattern mode for:
+ * 1: "Random data"
+ * 3: "Color bars with rolling bar"
+ * 4: "Color squares"
+ * 5: "Color squares with rolling bar"
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "ov9281", {
+ .unitCellSize = { 3000, 3000 },
+ .testPatternModes = { },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "ov13858", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ },
+ .sensorDelays = { },
+ } },
+ };
+
+ const auto it = sensorProps.find(sensor);
+ if (it == sensorProps.end()) {
+ LOG(CameraSensorProperties, Warning)
+ << "No static properties available for '" << sensor << "'";
+ LOG(CameraSensorProperties, Warning)
+ << "Please consider updating the camera sensor properties database";
+ return nullptr;
+ }
+
+ return &it->second;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/sensor/camera_sensor_raw.cpp b/src/libcamera/sensor/camera_sensor_raw.cpp
new file mode 100644
index 00000000..ab75b1f8
--- /dev/null
+++ b/src/libcamera/sensor/camera_sensor_raw.cpp
@@ -0,0 +1,1157 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy.
+ *
+ * camera_sensor_raw.cpp - A raw camera sensor using the V4L2 streams API
+ */
+
+#include <algorithm>
+#include <cmath>
+#include <float.h>
+#include <iomanip>
+#include <limits.h>
+#include <map>
+#include <memory>
+#include <optional>
+#include <string.h>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/geometry.h>
+#include <libcamera/orientation.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/transform.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/sysfs.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+namespace libcamera {
+
+class BayerFormat;
+class CameraLens;
+class MediaEntity;
+class SensorConfiguration;
+
+struct CameraSensorProperties;
+
+enum class Orientation;
+
+LOG_DECLARE_CATEGORY(CameraSensor)
+
+class CameraSensorRaw : public CameraSensor, protected Loggable
+{
+public:
+ CameraSensorRaw(const MediaEntity *entity);
+ ~CameraSensorRaw();
+
+ static std::variant<std::unique_ptr<CameraSensor>, int>
+ match(MediaEntity *entity);
+
+ const std::string &model() const override { return model_; }
+ const std::string &id() const override { return id_; }
+
+ const MediaEntity *entity() const override { return entity_; }
+ V4L2Subdevice *device() override { return subdev_.get(); }
+
+ CameraLens *focusLens() override { return focusLens_.get(); }
+
+ const std::vector<unsigned int> &mbusCodes() const override { return mbusCodes_; }
+ std::vector<Size> sizes(unsigned int mbusCode) const override;
+ Size resolution() const override;
+
+ V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size,
+ const Size maxSize) const override;
+ int setFormat(V4L2SubdeviceFormat *format,
+ Transform transform = Transform::Identity) override;
+ int tryFormat(V4L2SubdeviceFormat *format) const override;
+
+ int applyConfiguration(const SensorConfiguration &config,
+ Transform transform = Transform::Identity,
+ V4L2SubdeviceFormat *sensorFormat = nullptr) override;
+
+ V4L2Subdevice::Stream imageStream() const override;
+ std::optional<V4L2Subdevice::Stream> embeddedDataStream() const override;
+ V4L2SubdeviceFormat embeddedDataFormat() const override;
+ int setEmbeddedDataEnabled(bool enable) override;
+
+ const ControlList &properties() const override { return properties_; }
+ int sensorInfo(IPACameraSensorInfo *info) const override;
+ Transform computeTransform(Orientation *orientation) const override;
+ BayerFormat::Order bayerOrder(Transform t) const override;
+
+ const ControlInfoMap &controls() const override;
+ ControlList getControls(const std::vector<uint32_t> &ids) override;
+ int setControls(ControlList *ctrls) override;
+
+ const std::vector<controls::draft::TestPatternModeEnum> &
+ testPatternModes() const override { return testPatternModes_; }
+ int setTestPatternMode(controls::draft::TestPatternModeEnum mode) override;
+ const CameraSensorProperties::SensorDelays &sensorDelays() override;
+
+protected:
+ std::string logPrefix() const override;
+
+private:
+ LIBCAMERA_DISABLE_COPY(CameraSensorRaw)
+
+ std::optional<int> init();
+ int initProperties();
+ void initStaticProperties();
+ void initTestPatternModes();
+ int applyTestPatternMode(controls::draft::TestPatternModeEnum mode);
+
+ const MediaEntity *entity_;
+ std::unique_ptr<V4L2Subdevice> subdev_;
+
+ struct Streams {
+ V4L2Subdevice::Stream sink;
+ V4L2Subdevice::Stream source;
+ };
+
+ struct {
+ Streams image;
+ std::optional<Streams> edata;
+ } streams_;
+
+ const CameraSensorProperties *staticProps_;
+
+ std::string model_;
+ std::string id_;
+
+ V4L2Subdevice::Formats formats_;
+ std::vector<unsigned int> mbusCodes_;
+ std::vector<Size> sizes_;
+ std::vector<controls::draft::TestPatternModeEnum> testPatternModes_;
+ controls::draft::TestPatternModeEnum testPatternMode_;
+
+ Size pixelArraySize_;
+ Rectangle activeArea_;
+ BayerFormat::Order cfaPattern_;
+ bool supportFlips_;
+ bool flipsAlterBayerOrder_;
+ Orientation mountingOrientation_;
+
+ ControlList properties_;
+
+ std::unique_ptr<CameraLens> focusLens_;
+};
+
+/**
+ * \class CameraSensorRaw
+ * \brief A camera sensor based on V4L2 subdevices
+ *
+ * This class supports single-subdev sensors with a single source pad and one
+ * or two internal sink pads (for the image and embedded data streams).
+ */
+
+CameraSensorRaw::CameraSensorRaw(const MediaEntity *entity)
+ : entity_(entity), staticProps_(nullptr), supportFlips_(false),
+ flipsAlterBayerOrder_(false), properties_(properties::properties)
+{
+}
+
+CameraSensorRaw::~CameraSensorRaw() = default;
+
+std::variant<std::unique_ptr<CameraSensor>, int>
+CameraSensorRaw::match(MediaEntity *entity)
+{
+ /* Check the entity type. */
+ if (entity->type() != MediaEntity::Type::V4L2Subdevice ||
+ entity->function() != MEDIA_ENT_F_CAM_SENSOR) {
+ libcamera::LOG(CameraSensor, Debug)
+ << entity->name() << ": unsupported entity type ("
+ << utils::to_underlying(entity->type())
+ << ") or function (" << utils::hex(entity->function()) << ")";
+ return { 0 };
+ }
+
+ /* Count and check the number of pads. */
+ static constexpr uint32_t kPadFlagsMask = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_SOURCE
+ | MEDIA_PAD_FL_INTERNAL;
+ unsigned int numSinks = 0;
+ unsigned int numSources = 0;
+
+ for (const MediaPad *pad : entity->pads()) {
+ switch (pad->flags() & kPadFlagsMask) {
+ case MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_INTERNAL:
+ numSinks++;
+ break;
+
+ case MEDIA_PAD_FL_SOURCE:
+ numSources++;
+ break;
+
+ default:
+ libcamera::LOG(CameraSensor, Debug)
+ << entity->name() << ": unsupported pad " << pad->index()
+ << " type " << utils::hex(pad->flags());
+ return { 0 };
+ }
+ }
+
+ if (numSinks < 1 || numSinks > 2 || numSources != 1) {
+ libcamera::LOG(CameraSensor, Debug)
+ << entity->name() << ": unsupported number of sinks ("
+ << numSinks << ") or sources (" << numSources << ")";
+ return { 0 };
+ }
+
+ /*
+ * The entity matches. Create the camera sensor and initialize it. The
+ * init() function will perform further match checks.
+ */
+ std::unique_ptr<CameraSensorRaw> sensor =
+ std::make_unique<CameraSensorRaw>(entity);
+
+ std::optional<int> err = sensor->init();
+ if (err)
+ return { *err };
+
+ return { std::move(sensor) };
+}
+
+std::optional<int> CameraSensorRaw::init()
+{
+ /* Create and open the subdev. */
+ subdev_ = std::make_unique<V4L2Subdevice>(entity_);
+ int ret = subdev_->open();
+ if (ret)
+ return { ret };
+
+ /*
+ * 1. Identify the pads.
+ */
+
+ /*
+ * First locate the source pad. The match() function guarantees there
+ * is one and only one source pad.
+ */
+ unsigned int sourcePad = UINT_MAX;
+
+ for (const MediaPad *pad : entity_->pads()) {
+ if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
+ sourcePad = pad->index();
+ break;
+ }
+ }
+
+ /*
+ * Iterate over the routes to identify the streams on the source pad,
+ * and the internal sink pads.
+ */
+ V4L2Subdevice::Routing routing = {};
+ ret = subdev_->getRouting(&routing, V4L2Subdevice::TryFormat);
+ if (ret)
+ return { ret };
+
+ bool imageStreamFound = false;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (route.source.pad != sourcePad) {
+ LOG(CameraSensor, Error) << "Invalid route " << route;
+ return { -EINVAL };
+ }
+
+ /* Identify the stream type based on the supported formats. */
+ V4L2Subdevice::Formats formats = subdev_->formats(route.source);
+
+ std::optional<MediaBusFormatInfo::Type> type;
+
+ for (const auto &[code, sizes] : formats) {
+ const MediaBusFormatInfo &info =
+ MediaBusFormatInfo::info(code);
+ if (info.isValid()) {
+ type = info.type;
+ break;
+ }
+ }
+
+ if (!type) {
+ LOG(CameraSensor, Warning)
+ << "No known format on pad " << route.source;
+ continue;
+ }
+
+ switch (*type) {
+ case MediaBusFormatInfo::Type::Image:
+ if (imageStreamFound) {
+ LOG(CameraSensor, Error)
+ << "Multiple internal image streams ("
+ << streams_.image.sink << " and "
+ << route.sink << ")";
+ return { -EINVAL };
+ }
+
+ imageStreamFound = true;
+ streams_.image.sink = route.sink;
+ streams_.image.source = route.source;
+ break;
+
+ case MediaBusFormatInfo::Type::Metadata:
+ /*
+ * Skip metadata streams that are not sensor embedded
+ * data. The source stream reports a generic metadata
+ * format, check the sink stream for the exact format.
+ */
+ formats = subdev_->formats(route.sink);
+ if (formats.size() != 1)
+ continue;
+
+ if (MediaBusFormatInfo::info(formats.cbegin()->first).type !=
+ MediaBusFormatInfo::Type::EmbeddedData)
+ continue;
+
+ if (streams_.edata) {
+ LOG(CameraSensor, Error)
+ << "Multiple internal embedded data streams ("
+ << streams_.edata->sink << " and "
+ << route.sink << ")";
+ return { -EINVAL };
+ }
+
+ streams_.edata = { route.sink, route.source };
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (!imageStreamFound) {
+ LOG(CameraSensor, Error) << "No image stream found";
+ return { -EINVAL };
+ }
+
+ LOG(CameraSensor, Debug)
+ << "Found image stream " << streams_.image.sink
+ << " -> " << streams_.image.source;
+
+ if (streams_.edata)
+ LOG(CameraSensor, Debug)
+ << "Found embedded data stream " << streams_.edata->sink
+ << " -> " << streams_.edata->source;
+
+ /*
+ * 2. Enumerate and cache the media bus codes, sizes and colour filter
+ * array order for the image stream.
+ */
+
+ /*
+ * Get the native sensor CFA pattern. It is simpler to retrieve it from
+ * the internal image sink pad as it is guaranteed to expose a single
+ * format, and is not affected by flips.
+ */
+ V4L2Subdevice::Formats formats = subdev_->formats(streams_.image.sink);
+ if (formats.size() != 1) {
+ LOG(CameraSensor, Error)
+ << "Image pad has " << formats.size()
+ << " formats, expected 1";
+ return { -EINVAL };
+ }
+
+ uint32_t nativeFormat = formats.cbegin()->first;
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(nativeFormat);
+ if (!bayerFormat.isValid()) {
+ LOG(CameraSensor, Error)
+ << "Invalid native format " << nativeFormat;
+ return { 0 };
+ }
+
+ cfaPattern_ = bayerFormat.order;
+
+ /*
+ * Retrieve and cache the media bus codes and sizes on the source image
+ * stream.
+ */
+ formats_ = subdev_->formats(streams_.image.source);
+ if (formats_.empty()) {
+ LOG(CameraSensor, Error) << "No image format found";
+ return { -EINVAL };
+ }
+
+ /* Populate and sort the media bus codes and the sizes. */
+ for (const auto &[code, ranges] : formats_) {
+ /* Drop non-raw formats (in case we have a hybrid sensor). */
+ const MediaBusFormatInfo &info = MediaBusFormatInfo::info(code);
+ if (info.colourEncoding != PixelFormatInfo::ColourEncodingRAW)
+ continue;
+
+ mbusCodes_.push_back(code);
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes_),
+ [](const SizeRange &range) { return range.max; });
+ }
+
+ if (mbusCodes_.empty()) {
+ LOG(CameraSensor, Debug) << "No raw image formats found";
+ return { 0 };
+ }
+
+ std::sort(mbusCodes_.begin(), mbusCodes_.end());
+ std::sort(sizes_.begin(), sizes_.end());
+
+ /*
+ * Remove duplicate sizes. There are no duplicate media bus codes as
+ * they are the keys in the formats map.
+ */
+ auto last = std::unique(sizes_.begin(), sizes_.end());
+ sizes_.erase(last, sizes_.end());
+
+ /*
+ * 3. Query selection rectangles. Retrieve properties, and verify that
+ * all the expected selection rectangles are supported.
+ */
+
+ Rectangle rect;
+ ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP_BOUNDS,
+ &rect);
+ if (ret) {
+ LOG(CameraSensor, Error) << "No pixel array crop bounds";
+ return { ret };
+ }
+
+ pixelArraySize_ = rect.size();
+
+ ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP_DEFAULT,
+ &activeArea_);
+ if (ret) {
+ LOG(CameraSensor, Error) << "No pixel array crop default";
+ return { ret };
+ }
+
+ ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP,
+ &rect);
+ if (ret) {
+ LOG(CameraSensor, Error) << "No pixel array crop rectangle";
+ return { ret };
+ }
+
+ /*
+ * 4. Verify that all required controls are present.
+ */
+
+ const ControlIdMap &controls = subdev_->controls().idmap();
+
+ static constexpr uint32_t mandatoryControls[] = {
+ V4L2_CID_ANALOGUE_GAIN,
+ V4L2_CID_CAMERA_ORIENTATION,
+ V4L2_CID_EXPOSURE,
+ V4L2_CID_HBLANK,
+ V4L2_CID_PIXEL_RATE,
+ V4L2_CID_VBLANK,
+ };
+
+ ret = 0;
+
+ for (uint32_t ctrl : mandatoryControls) {
+ if (!controls.count(ctrl)) {
+ LOG(CameraSensor, Error)
+ << "Mandatory V4L2 control " << utils::hex(ctrl)
+ << " not available";
+ ret = -EINVAL;
+ }
+ }
+
+ if (ret) {
+ LOG(CameraSensor, Error)
+ << "The sensor kernel driver needs to be fixed";
+ LOG(CameraSensor, Error)
+ << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information";
+ return { ret };
+ }
+
+ /*
+ * Verify if sensor supports horizontal/vertical flips
+ *
+ * \todo Handle horizontal and vertical flips independently.
+ */
+ const struct v4l2_query_ext_ctrl *hflipInfo = subdev_->controlInfo(V4L2_CID_HFLIP);
+ const struct v4l2_query_ext_ctrl *vflipInfo = subdev_->controlInfo(V4L2_CID_VFLIP);
+ if (hflipInfo && !(hflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY) &&
+ vflipInfo && !(vflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ supportFlips_ = true;
+
+ if (hflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT ||
+ vflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT)
+ flipsAlterBayerOrder_ = true;
+ }
+
+ if (!supportFlips_)
+ LOG(CameraSensor, Debug)
+ << "Camera sensor does not support horizontal/vertical flip";
+
+ /*
+ * 5. Discover ancillary devices.
+ *
+ * \todo This code may be shared by different V4L2 sensor classes.
+ */
+ for (MediaEntity *ancillary : entity_->ancillaryEntities()) {
+ switch (ancillary->function()) {
+ case MEDIA_ENT_F_LENS:
+ focusLens_ = std::make_unique<CameraLens>(ancillary);
+ ret = focusLens_->init();
+ if (ret) {
+ LOG(CameraSensor, Error)
+ << "Lens initialisation failed, lens disabled";
+ focusLens_.reset();
+ }
+ break;
+
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported ancillary entity function "
+ << ancillary->function();
+ break;
+ }
+ }
+
+ /*
+ * 6. Initialize properties.
+ */
+
+ ret = initProperties();
+ if (ret)
+ return { ret };
+
+ /*
+ * 7. Initialize controls.
+ */
+
+ /*
+ * Set HBLANK to the minimum to start with a well-defined line length,
+ * allowing IPA modules that do not modify HBLANK to use the sensor
+ * minimum line length in their calculations.
+ */
+ const struct v4l2_query_ext_ctrl *hblankInfo = subdev_->controlInfo(V4L2_CID_HBLANK);
+ if (hblankInfo && !(hblankInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ ControlList ctrl(subdev_->controls());
+
+ ctrl.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblankInfo->minimum));
+ ret = subdev_->setControls(&ctrl);
+ if (ret)
+ return ret;
+ }
+
+ ret = applyTestPatternMode(controls::draft::TestPatternModeEnum::TestPatternModeOff);
+ if (ret)
+ return { ret };
+
+ return {};
+}
+
+int CameraSensorRaw::initProperties()
+{
+ model_ = subdev_->model();
+ properties_.set(properties::Model, utils::toAscii(model_));
+
+ /* Generate a unique ID for the sensor. */
+ id_ = sysfs::firmwareNodePath(subdev_->devicePath());
+ if (id_.empty()) {
+ LOG(CameraSensor, Error) << "Can't generate sensor ID";
+ return -EINVAL;
+ }
+
+ /* Initialize the static properties from the sensor database. */
+ initStaticProperties();
+
+ /* Retrieve and register properties from the kernel interface. */
+ const ControlInfoMap &controls = subdev_->controls();
+
+ const auto &orientation = controls.find(V4L2_CID_CAMERA_ORIENTATION);
+ if (orientation != controls.end()) {
+ int32_t v4l2Orientation = orientation->second.def().get<int32_t>();
+ int32_t propertyValue;
+
+ switch (v4l2Orientation) {
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported camera location "
+ << v4l2Orientation << ", setting to External";
+ [[fallthrough]];
+ case V4L2_CAMERA_ORIENTATION_EXTERNAL:
+ propertyValue = properties::CameraLocationExternal;
+ break;
+ case V4L2_CAMERA_ORIENTATION_FRONT:
+ propertyValue = properties::CameraLocationFront;
+ break;
+ case V4L2_CAMERA_ORIENTATION_BACK:
+ propertyValue = properties::CameraLocationBack;
+ break;
+ }
+ properties_.set(properties::Location, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning) << "Failed to retrieve the camera location";
+ }
+
+ const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION);
+ if (rotationControl != controls.end()) {
+ int32_t propertyValue = rotationControl->second.def().get<int32_t>();
+
+ /*
+ * Cache the Transform associated with the camera mounting
+ * rotation for later use in computeTransform().
+ */
+ bool success;
+ mountingOrientation_ = orientationFromRotation(propertyValue, &success);
+ if (!success) {
+ LOG(CameraSensor, Warning)
+ << "Invalid rotation of " << propertyValue
+ << " degrees - ignoring";
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
+ properties_.set(properties::Rotation, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning)
+ << "Rotation control not available, default to 0 degrees";
+ properties_.set(properties::Rotation, 0);
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
+ properties_.set(properties::PixelArraySize, pixelArraySize_);
+ properties_.set(properties::PixelArrayActiveAreas, { activeArea_ });
+
+ /* Color filter array pattern. */
+ uint32_t cfa;
+
+ switch (cfaPattern_) {
+ case BayerFormat::BGGR:
+ cfa = properties::draft::BGGR;
+ break;
+ case BayerFormat::GBRG:
+ cfa = properties::draft::GBRG;
+ break;
+ case BayerFormat::GRBG:
+ cfa = properties::draft::GRBG;
+ break;
+ case BayerFormat::RGGB:
+ cfa = properties::draft::RGGB;
+ break;
+ case BayerFormat::MONO:
+ default:
+ cfa = properties::draft::MONO;
+ break;
+ }
+
+ properties_.set(properties::draft::ColorFilterArrangement, cfa);
+
+ return 0;
+}
+
+void CameraSensorRaw::initStaticProperties()
+{
+ staticProps_ = CameraSensorProperties::get(model_);
+ if (!staticProps_)
+ return;
+
+ /* Register the properties retrieved from the sensor database. */
+ properties_.set(properties::UnitCellSize, staticProps_->unitCellSize);
+
+ initTestPatternModes();
+}
+
+const CameraSensorProperties::SensorDelays &CameraSensorRaw::sensorDelays()
+{
+ static constexpr CameraSensorProperties::SensorDelays defaultSensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 1,
+ .vblankDelay = 2,
+ .hblankDelay = 2,
+ };
+
+ if (!staticProps_ ||
+ (!staticProps_->sensorDelays.exposureDelay &&
+ !staticProps_->sensorDelays.gainDelay &&
+ !staticProps_->sensorDelays.vblankDelay &&
+ !staticProps_->sensorDelays.hblankDelay)) {
+ LOG(CameraSensor, Warning)
+ << "No sensor delays found in static properties. "
+ "Assuming unverified defaults.";
+
+ return defaultSensorDelays;
+ }
+
+ return staticProps_->sensorDelays;
+}
+
+void CameraSensorRaw::initTestPatternModes()
+{
+ const auto &v4l2TestPattern = controls().find(V4L2_CID_TEST_PATTERN);
+ if (v4l2TestPattern == controls().end()) {
+ LOG(CameraSensor, Debug) << "V4L2_CID_TEST_PATTERN is not supported";
+ return;
+ }
+
+ const auto &testPatternModes = staticProps_->testPatternModes;
+ if (testPatternModes.empty()) {
+ /*
+ * The camera sensor supports test patterns but we don't know
+ * how to map them so this should be fixed.
+ */
+ LOG(CameraSensor, Debug) << "No static test pattern map for \'"
+ << model() << "\'";
+ return;
+ }
+
+ /*
+ * Create a map that associates the V4L2 control index to the test
+ * pattern mode by reversing the testPatternModes map provided by the
+ * camera sensor properties. This makes it easier to verify if the
+ * control index is supported in the below for loop that creates the
+ * list of supported test patterns.
+ */
+ std::map<int32_t, controls::draft::TestPatternModeEnum> indexToTestPatternMode;
+ for (const auto &it : testPatternModes)
+ indexToTestPatternMode[it.second] = it.first;
+
+ for (const ControlValue &value : v4l2TestPattern->second.values()) {
+ const int32_t index = value.get<int32_t>();
+
+ const auto it = indexToTestPatternMode.find(index);
+ if (it == indexToTestPatternMode.end()) {
+ LOG(CameraSensor, Debug)
+ << "Test pattern mode " << index << " ignored";
+ continue;
+ }
+
+ testPatternModes_.push_back(it->second);
+ }
+}
+
+std::vector<Size> CameraSensorRaw::sizes(unsigned int mbusCode) const
+{
+ std::vector<Size> sizes;
+
+ const auto &format = formats_.find(mbusCode);
+ if (format == formats_.end())
+ return sizes;
+
+ const std::vector<SizeRange> &ranges = format->second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+
+ std::sort(sizes.begin(), sizes.end());
+
+ return sizes;
+}
+
+Size CameraSensorRaw::resolution() const
+{
+ return std::min(sizes_.back(), activeArea_.size());
+}
+
+V4L2SubdeviceFormat
+CameraSensorRaw::getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size, Size maxSize) const
+{
+ unsigned int desiredArea = size.width * size.height;
+ unsigned int bestArea = UINT_MAX;
+ float desiredRatio = static_cast<float>(size.width) / size.height;
+ float bestRatio = FLT_MAX;
+ const Size *bestSize = nullptr;
+ uint32_t bestCode = 0;
+
+ for (unsigned int code : mbusCodes) {
+ const auto formats = formats_.find(code);
+ if (formats == formats_.end())
+ continue;
+
+ for (const SizeRange &range : formats->second) {
+ const Size &sz = range.max;
+
+ if (!maxSize.isNull() &&
+ (sz.width > maxSize.width || sz.height > maxSize.height))
+ continue;
+
+ if (sz.width < size.width || sz.height < size.height)
+ continue;
+
+ float ratio = static_cast<float>(sz.width) / sz.height;
+ float ratioDiff = std::abs(ratio - desiredRatio);
+ unsigned int area = sz.width * sz.height;
+ unsigned int areaDiff = area - desiredArea;
+
+ if (ratioDiff > bestRatio)
+ continue;
+
+ if (ratioDiff < bestRatio || areaDiff < bestArea) {
+ bestRatio = ratioDiff;
+ bestArea = areaDiff;
+ bestSize = &sz;
+ bestCode = code;
+ }
+ }
+ }
+
+ if (!bestSize) {
+ LOG(CameraSensor, Debug) << "No supported format or size found";
+ return {};
+ }
+
+ V4L2SubdeviceFormat format{
+ .code = bestCode,
+ .size = *bestSize,
+ .colorSpace = ColorSpace::Raw,
+ };
+
+ return format;
+}
+
+int CameraSensorRaw::setFormat(V4L2SubdeviceFormat *format, Transform transform)
+{
+ /* Configure flips if the sensor supports that. */
+ if (supportFlips_) {
+ ControlList flipCtrls(subdev_->controls());
+
+ flipCtrls.set(V4L2_CID_HFLIP,
+ static_cast<int32_t>(!!(transform & Transform::HFlip)));
+ flipCtrls.set(V4L2_CID_VFLIP,
+ static_cast<int32_t>(!!(transform & Transform::VFlip)));
+
+ int ret = subdev_->setControls(&flipCtrls);
+ if (ret)
+ return ret;
+ }
+
+ /* Apply format on the subdev. */
+ int ret = subdev_->setFormat(streams_.image.source, format);
+ if (ret)
+ return ret;
+
+ subdev_->updateControlInfo();
+ return 0;
+}
+
+int CameraSensorRaw::tryFormat(V4L2SubdeviceFormat *format) const
+{
+ return subdev_->setFormat(streams_.image.source, format,
+ V4L2Subdevice::Whence::TryFormat);
+}
+
+int CameraSensorRaw::applyConfiguration(const SensorConfiguration &config,
+ Transform transform,
+ V4L2SubdeviceFormat *sensorFormat)
+{
+ if (!config.isValid()) {
+ LOG(CameraSensor, Error) << "Invalid sensor configuration";
+ return -EINVAL;
+ }
+
+ std::vector<unsigned int> filteredCodes;
+ std::copy_if(mbusCodes_.begin(), mbusCodes_.end(),
+ std::back_inserter(filteredCodes),
+ [&config](unsigned int mbusCode) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(mbusCode);
+ if (bayer.bitDepth == config.bitDepth)
+ return true;
+ return false;
+ });
+ if (filteredCodes.empty()) {
+ LOG(CameraSensor, Error)
+ << "Cannot find any format with bit depth "
+ << config.bitDepth;
+ return -EINVAL;
+ }
+
+ /*
+ * Compute the sensor's data frame size by applying the cropping
+ * rectangle, subsampling and output crop to the sensor's pixel array
+ * size.
+ *
+ * \todo The actual size computation is for now ignored and only the
+ * output size is considered. This implies that resolutions obtained
+ * with two different cropping/subsampling will look identical and
+ * only the first found one will be considered.
+ */
+ V4L2SubdeviceFormat subdevFormat = {};
+ for (unsigned int code : filteredCodes) {
+ for (const Size &size : sizes(code)) {
+ if (size.width != config.outputSize.width ||
+ size.height != config.outputSize.height)
+ continue;
+
+ subdevFormat.code = code;
+ subdevFormat.size = size;
+ break;
+ }
+ }
+ if (!subdevFormat.code) {
+ LOG(CameraSensor, Error) << "Invalid output size in sensor configuration";
+ return -EINVAL;
+ }
+
+ int ret = setFormat(&subdevFormat, transform);
+ if (ret)
+ return ret;
+
+ /*
+ * Return to the caller the format actually applied to the sensor.
+ * This is relevant if transform has changed the bayer pattern order.
+ */
+ if (sensorFormat)
+ *sensorFormat = subdevFormat;
+
+ /* \todo Handle AnalogCrop. Most sensors do not support set_selection */
+ /* \todo Handle scaling in the digital domain. */
+
+ return 0;
+}
+
+V4L2Subdevice::Stream CameraSensorRaw::imageStream() const
+{
+ return streams_.image.source;
+}
+
+std::optional<V4L2Subdevice::Stream> CameraSensorRaw::embeddedDataStream() const
+{
+ if (!streams_.edata)
+ return {};
+
+ return { streams_.edata->source };
+}
+
+V4L2SubdeviceFormat CameraSensorRaw::embeddedDataFormat() const
+{
+ if (!streams_.edata)
+ return {};
+
+ V4L2SubdeviceFormat format;
+ int ret = subdev_->getFormat(streams_.edata->source, &format);
+ if (ret)
+ return {};
+
+ return format;
+}
+
+int CameraSensorRaw::setEmbeddedDataEnabled(bool enable)
+{
+ if (!streams_.edata)
+ return enable ? -ENOSTR : 0;
+
+ V4L2Subdevice::Routing routing{ 2 };
+
+ routing[0].sink = streams_.image.sink;
+ routing[0].source = streams_.image.source;
+ routing[0].flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE;
+
+ routing[1].sink = streams_.edata->sink;
+ routing[1].source = streams_.edata->source;
+ routing[1].flags = enable ? V4L2_SUBDEV_ROUTE_FL_ACTIVE : 0;
+
+ int ret = subdev_->setRouting(&routing);
+ if (ret)
+ return ret;
+
+ /*
+ * Check if the embedded data stream has been enabled or disabled
+ * correctly. Assume at least one route will match the embedded data
+ * source stream, as there would be something seriously wrong
+ * otherwise.
+ */
+ bool enabled = false;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (route.source != streams_.edata->source)
+ continue;
+
+ enabled = route.flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE;
+ break;
+ }
+
+ if (enabled != enable)
+ return enabled ? -EISCONN : -ENOSTR;
+
+ return 0;
+}
+
+int CameraSensorRaw::sensorInfo(IPACameraSensorInfo *info) const
+{
+ info->model = model();
+
+ /*
+ * The active area size is a static property, while the crop
+ * rectangle needs to be re-read as it depends on the sensor
+ * configuration.
+ */
+ info->activeAreaSize = { activeArea_.width, activeArea_.height };
+
+ int ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP,
+ &info->analogCrop);
+ if (ret)
+ return ret;
+
+ /*
+ * IPACameraSensorInfo::analogCrop::x and IPACameraSensorInfo::analogCrop::y
+ * are defined relatively to the active pixel area, while V4L2's
+ * TGT_CROP target is defined in respect to the full pixel array.
+ *
+ * Compensate it by subtracting the active area offset.
+ */
+ info->analogCrop.x -= activeArea_.x;
+ info->analogCrop.y -= activeArea_.y;
+
+ /* The bit depth and image size depend on the currently applied format. */
+ V4L2SubdeviceFormat format{};
+ ret = subdev_->getFormat(streams_.image.source, &format);
+ if (ret)
+ return ret;
+ info->bitsPerPixel = MediaBusFormatInfo::info(format.code).bitsPerPixel;
+ info->outputSize = format.size;
+
+ std::optional<int32_t> cfa = properties_.get(properties::draft::ColorFilterArrangement);
+ info->cfaPattern = cfa ? *cfa : properties::draft::RGB;
+
+ /*
+ * Retrieve the pixel rate, line length and minimum/maximum frame
+ * duration through V4L2 controls. Support for the V4L2_CID_PIXEL_RATE,
+ * V4L2_CID_HBLANK and V4L2_CID_VBLANK controls is mandatory.
+ */
+ ControlList ctrls = subdev_->getControls({ V4L2_CID_PIXEL_RATE,
+ V4L2_CID_HBLANK,
+ V4L2_CID_VBLANK });
+ if (ctrls.empty()) {
+ LOG(CameraSensor, Error)
+ << "Failed to retrieve camera info controls";
+ return -EINVAL;
+ }
+
+ info->pixelRate = ctrls.get(V4L2_CID_PIXEL_RATE).get<int64_t>();
+
+ const ControlInfo hblank = ctrls.infoMap()->at(V4L2_CID_HBLANK);
+ info->minLineLength = info->outputSize.width + hblank.min().get<int32_t>();
+ info->maxLineLength = info->outputSize.width + hblank.max().get<int32_t>();
+
+ const ControlInfo vblank = ctrls.infoMap()->at(V4L2_CID_VBLANK);
+ info->minFrameLength = info->outputSize.height + vblank.min().get<int32_t>();
+ info->maxFrameLength = info->outputSize.height + vblank.max().get<int32_t>();
+
+ return 0;
+}
+
+Transform CameraSensorRaw::computeTransform(Orientation *orientation) const
+{
+ /*
+ * If we cannot do any flips we cannot change the native camera mounting
+ * orientation.
+ */
+ if (!supportFlips_) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ /*
+ * Now compute the required transform to obtain 'orientation' starting
+ * from the mounting rotation.
+ *
+ * As a note:
+ * orientation / mountingOrientation_ = transform
+ * mountingOrientation_ * transform = orientation
+ */
+ Transform transform = *orientation / mountingOrientation_;
+
+ /*
+ * If transform contains any Transpose we cannot do it, so adjust
+ * 'orientation' to report the image native orientation and return Identity.
+ */
+ if (!!(transform & Transform::Transpose)) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ return transform;
+}
+
+BayerFormat::Order CameraSensorRaw::bayerOrder(Transform t) const
+{
+ if (!flipsAlterBayerOrder_)
+ return cfaPattern_;
+
+ /*
+ * Apply the transform to the native (i.e. untransformed) Bayer order,
+ * using the rest of the Bayer format supplied by the caller.
+ */
+ BayerFormat format{ cfaPattern_, 8, BayerFormat::Packing::None };
+ return format.transform(t).order;
+}
+
+const ControlInfoMap &CameraSensorRaw::controls() const
+{
+ return subdev_->controls();
+}
+
+ControlList CameraSensorRaw::getControls(const std::vector<uint32_t> &ids)
+{
+ return subdev_->getControls(ids);
+}
+
+int CameraSensorRaw::setControls(ControlList *ctrls)
+{
+ return subdev_->setControls(ctrls);
+}
+
+int CameraSensorRaw::setTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternMode_ == mode)
+ return 0;
+
+ if (testPatternModes_.empty()) {
+ LOG(CameraSensor, Error)
+ << "Camera sensor does not support test pattern modes.";
+ return -EINVAL;
+ }
+
+ return applyTestPatternMode(mode);
+}
+
+int CameraSensorRaw::applyTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternModes_.empty())
+ return 0;
+
+ auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(),
+ mode);
+ if (it == testPatternModes_.end()) {
+ LOG(CameraSensor, Error) << "Unsupported test pattern mode "
+ << mode;
+ return -EINVAL;
+ }
+
+ LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode;
+
+ int32_t index = staticProps_->testPatternModes.at(mode);
+ ControlList ctrls{ controls() };
+ ctrls.set(V4L2_CID_TEST_PATTERN, index);
+
+ int ret = setControls(&ctrls);
+ if (ret)
+ return ret;
+
+ testPatternMode_ = mode;
+
+ return 0;
+}
+
+std::string CameraSensorRaw::logPrefix() const
+{
+ return "'" + entity_->name() + "'";
+}
+
+REGISTER_CAMERA_SENSOR(CameraSensorRaw, 0)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/sensor/meson.build b/src/libcamera/sensor/meson.build
new file mode 100644
index 00000000..dce74ed6
--- /dev/null
+++ b/src/libcamera/sensor/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'camera_sensor.cpp',
+ 'camera_sensor_legacy.cpp',
+ 'camera_sensor_properties.cpp',
+ 'camera_sensor_raw.cpp',
+])
diff --git a/src/libcamera/shared_mem_object.cpp b/src/libcamera/shared_mem_object.cpp
new file mode 100644
index 00000000..d9b61d37
--- /dev/null
+++ b/src/libcamera/shared_mem_object.cpp
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ * Copyright (C) 2024 Andrei Konovalov
+ * Copyright (C) 2024 Dennis Bonke
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Helpers for shared memory allocations
+ */
+
+#include "libcamera/internal/shared_mem_object.h"
+
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
+#include <libcamera/base/memfd.h>
+
+/**
+ * \file shared_mem_object.cpp
+ * \brief Helpers for shared memory allocations
+ */
+
+namespace libcamera {
+
+/**
+ * \class SharedMem
+ * \brief Helper class to allocate and manage memory shareable between processes
+ *
+ * SharedMem manages memory suitable for sharing between processes. When an
+ * instance is constructed, it allocates a memory buffer of the requested size
+ * backed by an anonymous file, using the memfd API.
+ *
+ * The allocated memory is exposed by the mem() function. If memory allocation
+ * fails, the function returns an empty Span. This can be also checked using the
+ * bool() operator.
+ *
+ * The file descriptor for the backing file is exposed as a SharedFD by the fd()
+ * function. It can be shared with other processes across IPC boundaries, which
+ * can then map the memory with mmap().
+ *
+ * A single memfd is created for every SharedMem. If there is a need to allocate
+ * a large number of objects in shared memory, these objects should be grouped
+ * together and use the shared memory allocated by a single SharedMem object if
+ * possible. This will help to minimize the number of created memfd's.
+ */
+
+SharedMem::SharedMem() = default;
+
+/**
+ * \brief Construct a SharedMem with memory of the given \a size
+ * \param[in] name Name of the SharedMem
+ * \param[in] size Size of the shared memory to allocate and map
+ *
+ * The \a name is used for debugging purpose only. Multiple SharedMem instances
+ * can have the same name.
+ */
+SharedMem::SharedMem(const std::string &name, std::size_t size)
+{
+ UniqueFD memfd = MemFd::create(name.c_str(), size,
+ MemFd::Seal::Shrink | MemFd::Seal::Grow);
+ if (!memfd.isValid())
+ return;
+
+ fd_ = SharedFD(std::move(memfd));
+
+ void *mem = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd_.get(), 0);
+ if (mem == MAP_FAILED) {
+ fd_ = SharedFD();
+ return;
+ }
+
+ mem_ = { static_cast<uint8_t *>(mem), size };
+}
+
+/**
+ * \brief Move constructor for SharedMem
+ * \param[in] rhs The object to move
+ */
+SharedMem::SharedMem(SharedMem &&rhs)
+{
+ this->fd_ = std::move(rhs.fd_);
+ this->mem_ = rhs.mem_;
+ rhs.mem_ = {};
+}
+
+/**
+ * \brief Destroy the SharedMem instance
+ *
+ * Destroying an instance invalidates the memory mapping exposed with mem().
+ * Other mappings of the backing file, created in this or other processes with
+ * mmap(), remain valid.
+ *
+ * Similarly, other references to the backing file descriptor created by copying
+ * the SharedFD returned by fd() remain valid. The underlying memory will be
+ * freed only when all file descriptors that reference the anonymous file get
+ * closed.
+ */
+SharedMem::~SharedMem()
+{
+ if (!mem_.empty())
+ munmap(mem_.data(), mem_.size_bytes());
+}
+
+/**
+ * \brief Move assignment operator for SharedMem
+ * \param[in] rhs The object to move
+ */
+SharedMem &SharedMem::operator=(SharedMem &&rhs)
+{
+ this->fd_ = std::move(rhs.fd_);
+ this->mem_ = rhs.mem_;
+ rhs.mem_ = {};
+ return *this;
+}
+
+/**
+ * \fn const SharedFD &SharedMem::fd() const
+ * \brief Retrieve the file descriptor for the underlying shared memory
+ * \return The file descriptor, or an invalid SharedFD if allocation failed
+ */
+
+/**
+ * \fn Span<uint8_t> SharedMem::mem() const
+ * \brief Retrieve the underlying shared memory
+ * \return The memory buffer, or an empty Span if allocation failed
+ */
+
+/**
+ * \fn SharedMem::operator bool()
+ * \brief Check if the shared memory allocation succeeded
+ * \return True if allocation of the shared memory succeeded, false otherwise
+ */
+
+/**
+ * \class SharedMemObject
+ * \brief Helper class to allocate an object in shareable memory
+ * \tparam The object type
+ *
+ * The SharedMemObject class is a specialization of the SharedMem class that
+ * wraps an object of type \a T and constructs it in shareable memory. It uses
+ * the same underlying memory allocation and sharing mechanism as the SharedMem
+ * class.
+ *
+ * The wrapped object is constructed at the same time as the SharedMemObject
+ * instance, by forwarding the arguments passed to the SharedMemObject
+ * constructor. The underlying memory allocation is sized to the object \a T
+ * size. The bool() operator should be used to check the allocation was
+ * successful. The object can be accessed using the dereference operators
+ * operator*() and operator->().
+ *
+ * While no restriction on the type \a T is enforced, not all types are suitable
+ * for sharing between multiple processes. Most notably, any object type that
+ * contains pointer or reference members will likely cause issues. Even if those
+ * members refer to other members of the same object, the shared memory will be
+ * mapped at different addresses in different processes, and the pointers will
+ * not be valid.
+ *
+ * A new anonymous file is created for every SharedMemObject instance. If there
+ * is a need to share a large number of small objects, these objects should be
+ * grouped into a single larger object to limit the number of file descriptors.
+ *
+ * To share the object with other processes, see the SharedMem documentation.
+ */
+
+/**
+ * \var SharedMemObject::kSize
+ * \brief The size of the object stored in shared memory
+ */
+
+/**
+ * \fn SharedMemObject::SharedMemObject(const std::string &name, Args &&...args)
+ * \brief Construct a SharedMemObject
+ * \param[in] name Name of the SharedMemObject
+ * \param[in] args Arguments to pass to the constructor of the object T
+ *
+ * The \a name is used for debugging purpose only. Multiple SharedMem instances
+ * can have the same name.
+ */
+
+/**
+ * \fn SharedMemObject::SharedMemObject(SharedMemObject<T> &&rhs)
+ * \brief Move constructor for SharedMemObject
+ * \param[in] rhs The object to move
+ */
+
+/**
+ * \fn SharedMemObject::~SharedMemObject()
+ * \brief Destroy the SharedMemObject instance
+ *
+ * Destroying a SharedMemObject calls the wrapped T object's destructor. While
+ * the underlying memory may not be freed immediately if other mappings have
+ * been created manually (see SharedMem::~SharedMem() for more information), the
+ * stored object may be modified. Depending on the ~T() destructor, accessing
+ * the object after destruction of the SharedMemObject causes undefined
+ * behaviour. It is the responsibility of the user of this class to synchronize
+ * with other users who have access to the shared object.
+ */
+
+/**
+ * \fn SharedMemObject::operator=(SharedMemObject<T> &&rhs)
+ * \brief Move assignment operator for SharedMemObject
+ * \param[in] rhs The SharedMemObject object to take the data from
+ *
+ * Moving a SharedMemObject does not affect the stored object.
+ */
+
+/**
+ * \fn SharedMemObject::operator->()
+ * \brief Dereference the stored object
+ * \return Pointer to the stored object
+ */
+
+/**
+ * \fn const T *SharedMemObject::operator->() const
+ * \copydoc SharedMemObject::operator->
+ */
+
+/**
+ * \fn SharedMemObject::operator*()
+ * \brief Dereference the stored object
+ * \return Reference to the stored object
+ */
+
+/**
+ * \fn const T &SharedMemObject::operator*() const
+ * \copydoc SharedMemObject::operator*
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/TODO b/src/libcamera/software_isp/TODO
new file mode 100644
index 00000000..a50db668
--- /dev/null
+++ b/src/libcamera/software_isp/TODO
@@ -0,0 +1,208 @@
+2. Reconsider stats sharing
+
+>>> +void SwStatsCpu::finishFrame(void)
+>>> +{
+>>> + *sharedStats_ = stats_;
+>>
+>> Is it more efficient to copy the stats instead of operating directly on
+>> the shared memory ?
+>
+> I inherited doing things this way from Andrey. I kept this because
+> we don't really have any synchronization with the IPA reading this.
+>
+> So the idea is to only touch this when the next set of statistics
+> is ready since we don't know when the IPA is done with accessing
+> the previous set of statistics ...
+>
+> This is both something which seems mostly a theoretic problem,
+> yet also definitely something which I think we need to fix.
+>
+> Maybe use a ringbuffer of stats buffers and pass the index into
+> the ringbuffer to the emit signal ?
+
+That would match how we deal with hardware ISPs, and I think that's a
+good idea. It will help decoupling the processing side from the IPA.
+
+---
+
+3. Remove statsReady signal
+
+> class SwStatsCpu
+> {
+> /**
+> * \brief Signals that the statistics are ready
+> */
+> Signal<> statsReady;
+
+But better, I wonder if the signal could be dropped completely. The
+SwStatsCpu class does not operate asynchronously. Shouldn't whoever
+calls the finishFrame() function then handle emitting the signal ?
+
+Now, the trouble is that this would be the DebayerCpu class, whose name
+doesn't indicate as a prime candidate to handle stats. However, it
+already exposes a getStatsFD() function, so we're already calling for
+trouble :-) Either that should be moved to somewhere else, or the class
+should be renamed. Considering that the class applies colour gains in
+addition to performing the interpolation, it may be more of a naming
+issue.
+
+Removing the signal and refactoring those classes doesn't have to be
+addressed now, I think it would be part of a larger refactoring
+(possibly also considering platforms that have no ISP but can produce
+stats in hardware, such as the i.MX7), but please keep it on your radar.
+
+---
+
+5. Store ISP parameters in per-frame buffers
+
+> /**
+> * \fn void Debayer::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+> * \brief Process the bayer data into the requested format.
+> * \param[in] input The input buffer.
+> * \param[in] output The output buffer.
+> * \param[in] params The parameters to be used in debayering.
+> *
+> * \note DebayerParams is passed by value deliberately so that a copy is passed
+> * when this is run in another thread by invokeMethod().
+> */
+
+Possibly something to address later, by storing ISP parameters in
+per-frame buffers like we do for hardware ISPs.
+
+---
+
+6. Input buffer copying configuration
+
+> DebayerCpu::DebayerCpu(std::unique_ptr<SwStatsCpu> stats)
+> : stats_(std::move(stats)), gammaCorrection_(1.0)
+> {
+> enableInputMemcpy_ = true;
+
+Set this appropriately and/or make it configurable.
+
+---
+
+7. Performance measurement configuration
+
+> void DebayerCpu::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+> /* Measure before emitting signals */
+> if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure &&
+> ++measuredFrames_ > DebayerCpu::kFramesToSkip) {
+> timespec frameEndTime = {};
+> clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime);
+> frameProcessTime_ += timeDiff(frameEndTime, frameStartTime);
+> if (measuredFrames_ == DebayerCpu::kLastFrameToMeasure) {
+> const unsigned int measuredFrames = DebayerCpu::kLastFrameToMeasure -
+> DebayerCpu::kFramesToSkip;
+> LOG(Debayer, Info)
+> << "Processed " << measuredFrames
+> << " frames in " << frameProcessTime_ / 1000 << "us, "
+> << frameProcessTime_ / (1000 * measuredFrames)
+> << " us/frame";
+> }
+> }
+
+I wonder if there would be a way to control at runtime when/how to
+perform those measurements. Maybe that's a bit overkill.
+
+---
+
+8. DebayerCpu cleanups
+
+> >> class DebayerCpu : public Debayer, public Object
+> >> const SharedFD &getStatsFD() { return stats_->getStatsFD(); }
+> >
+> > This,
+>
+> Note the statistics pass-through stuff is sort of a necessary evil
+> since we want one main loop going over the data line by line and
+> doing both debayering as well as stats while the line is still
+> hot in the l2 cache. And things like the process2() and process4()
+> loops are highly CPU debayering specific so I don't think we should
+> move those out of the CpuDebayer code.
+
+Yes, that I understood from the review. "necessary evil" is indeed the
+right term :-) I expect it will take quite some design skills to balance
+the need for performances and the need for a maintainable architecture.
+
+> > plus the fact that this class handles colour gains and gamma,
+> > makes me thing we have either a naming issue, or an architecture issue.
+>
+> I agree that this does a bit more then debayering, although
+> the debayering really is the main thing it does.
+>
+> I guess the calculation of the rgb lookup tables which do the
+> color gains and gamma could be moved outside of this class,
+> that might even be beneficial for GPU based debayering assuming
+> that that is going to use rgb lookup tables too (it could
+> implement actual color gains + gamma correction in some different
+> way).
+>
+> I think this falls under the lets wait until we have a GPU
+> based SoftISP MVP/POC and then do some refactoring to see which
+> bits should go where.
+
+---
+
+8. Decouple pipeline and IPA naming
+
+> The current src/ipa/meson.build assumes the IPA name to match the
+> pipeline name. For this reason "-Dipas=simple" is used for the
+> Soft IPA module.
+
+This should be addressed.
+
+---
+
+9. Doxyfile cleanup
+
+>> diff --git a/Documentation/Doxyfile.in b/Documentation/Doxyfile.in
+>> index a86ea6c1..2be8d47b 100644
+>> --- a/Documentation/Doxyfile.in
+>> +++ b/Documentation/Doxyfile.in
+>> @@ -44,6 +44,7 @@ EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
+>> @TOP_SRCDIR@/src/libcamera/pipeline/ \
+>> @TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
+>> @TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
+>> + @TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \
+> Why is this needed ?
+>
+>> @TOP_BUILDDIR@/src/libcamera/proxy/
+>> EXCLUDE_PATTERNS = @TOP_BUILDDIR@/include/libcamera/ipa/*_serializer.h \
+>> diff --git a/include/libcamera/ipa/meson.build b/include/libcamera/ipa/meson.build
+>> index f3b4881c..3352d08f 100644
+>> --- a/include/libcamera/ipa/meson.build
+>> +++ b/include/libcamera/ipa/meson.build
+>> @@ -65,6 +65,7 @@ pipeline_ipa_mojom_mapping = {
+>> 'ipu3': 'ipu3.mojom',
+>> 'rkisp1': 'rkisp1.mojom',
+>> 'rpi/vc4': 'raspberrypi.mojom',
+>> + 'simple': 'soft.mojom',
+>> 'vimc': 'vimc.mojom',
+>> }
+>> diff --git a/include/libcamera/ipa/soft.mojom b/include/libcamera/ipa/soft.mojom
+>> new file mode 100644
+>> index 00000000..c249bd75
+>> --- /dev/null
+>> +++ b/include/libcamera/ipa/soft.mojom
+>> @@ -0,0 +1,28 @@
+>> +/* SPDX-License-Identifier: LGPL-2.1-or-later */
+>> +
+>> +/*
+>> + * \todo Document the interface and remove the related EXCLUDE_PATTERNS entry.
+> Ah that's why.
+
+Yes, because, well... all the other IPAs were doing that...
+
+> It doesn't have to be done before merging, but could you
+> address this sooner than later ?
+
+---
+
+13. Improve black level and colour gains application
+
+I think the black level should eventually be moved before debayering, and
+ideally the colour gains as well. I understand the need for optimizations to
+lower the CPU consumption, but at the same time I don't feel comfortable
+building up on top of an implementation that may work a bit more by chance than
+by correctness, as that's not very maintainable.
diff --git a/src/libcamera/software_isp/debayer.cpp b/src/libcamera/software_isp/debayer.cpp
new file mode 100644
index 00000000..f0b83261
--- /dev/null
+++ b/src/libcamera/software_isp/debayer.cpp
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, 2024 Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayer base class
+ */
+
+#include "debayer.h"
+
+namespace libcamera {
+
+/**
+ * \struct DebayerParams
+ * \brief Struct to hold the debayer parameters.
+ */
+
+/**
+ * \var DebayerParams::kRGBLookupSize
+ * \brief Size of a color lookup table
+ */
+
+/**
+ * \typedef DebayerParams::ColorLookupTable
+ * \brief Type of the lookup tables for red, green, blue values
+ */
+
+/**
+ * \var DebayerParams::red
+ * \brief Lookup table for red color, mapping input values to output values
+ */
+
+/**
+ * \var DebayerParams::green
+ * \brief Lookup table for green color, mapping input values to output values
+ */
+
+/**
+ * \var DebayerParams::blue
+ * \brief Lookup table for blue color, mapping input values to output values
+ */
+
+/**
+ * \class Debayer
+ * \brief Base debayering class
+ *
+ * Base class that provides functions for setting up the debayering process.
+ */
+
+LOG_DEFINE_CATEGORY(Debayer)
+
+Debayer::~Debayer()
+{
+}
+
+/**
+ * \fn int Debayer::configure(const StreamConfiguration &inputCfg, const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+ * \brief Configure the debayer object according to the passed in parameters
+ * \param[in] inputCfg The input configuration
+ * \param[in] outputCfgs The output configurations
+ *
+ * \return 0 on success, a negative errno on failure
+ */
+
+/**
+ * \fn Size Debayer::patternSize(PixelFormat inputFormat)
+ * \brief Get the width and height at which the bayer pattern repeats
+ * \param[in] inputFormat The input format
+ *
+ * Valid sizes are: 2x2, 4x2 or 4x4.
+ *
+ * \return Pattern size or an empty size for unsupported inputFormats
+ */
+
+/**
+ * \fn std::vector<PixelFormat> Debayer::formats(PixelFormat inputFormat)
+ * \brief Get the supported output formats
+ * \param[in] inputFormat The input format
+ *
+ * \return All supported output formats or an empty vector if there are none
+ */
+
+/**
+ * \fn std::tuple<unsigned int, unsigned int> Debayer::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+ * \brief Get the stride and the frame size
+ * \param[in] outputFormat The output format
+ * \param[in] size The output size
+ *
+ * \return A tuple of the stride and the frame size, or a tuple with 0,0 if
+ * there is no valid output config
+ */
+
+/**
+ * \fn void Debayer::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+ * \brief Process the bayer data into the requested format
+ * \param[in] frame The frame number
+ * \param[in] input The input buffer
+ * \param[in] output The output buffer
+ * \param[in] params The parameters to be used in debayering
+ *
+ * \note DebayerParams is passed by value deliberately so that a copy is passed
+ * when this is run in another thread by invokeMethod().
+ */
+
+/**
+ * \fn virtual SizeRange Debayer::sizes(PixelFormat inputFormat, const Size &inputSize)
+ * \brief Get the supported output sizes for the given input format and size
+ * \param[in] inputFormat The input format
+ * \param[in] inputSize The input size
+ *
+ * \return The valid size ranges or an empty range if there are none
+ */
+
+/**
+ * \var Signal<FrameBuffer *> Debayer::inputBufferReady
+ * \brief Signals when the input buffer is ready
+ */
+
+/**
+ * \var Signal<FrameBuffer *> Debayer::outputBufferReady
+ * \brief Signals when the output buffer is ready
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer.h b/src/libcamera/software_isp/debayer.h
new file mode 100644
index 00000000..d7ca060d
--- /dev/null
+++ b/src/libcamera/software_isp/debayer.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayering base class
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/signal.h>
+
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/software_isp/debayer_params.h"
+
+namespace libcamera {
+
+class FrameBuffer;
+
+LOG_DECLARE_CATEGORY(Debayer)
+
+class Debayer
+{
+public:
+ virtual ~Debayer() = 0;
+
+ virtual int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs) = 0;
+
+ virtual std::vector<PixelFormat> formats(PixelFormat inputFormat) = 0;
+
+ virtual std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size) = 0;
+
+ virtual void process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params) = 0;
+
+ virtual SizeRange sizes(PixelFormat inputFormat, const Size &inputSize) = 0;
+
+ Signal<FrameBuffer *> inputBufferReady;
+ Signal<FrameBuffer *> outputBufferReady;
+
+private:
+ virtual Size patternSize(PixelFormat inputFormat) = 0;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer_cpu.cpp b/src/libcamera/software_isp/debayer_cpu.cpp
new file mode 100644
index 00000000..31ab96ab
--- /dev/null
+++ b/src/libcamera/software_isp/debayer_cpu.cpp
@@ -0,0 +1,835 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based debayering class
+ */
+
+#include "debayer_cpu.h"
+
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <time.h>
+
+#include <linux/dma-buf.h>
+
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/dma_buf_allocator.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+
+namespace libcamera {
+
+/**
+ * \class DebayerCpu
+ * \brief Class for debayering on the CPU
+ *
+ * Implementation for CPU based debayering
+ */
+
+/**
+ * \brief Constructs a DebayerCpu object
+ * \param[in] stats Pointer to the stats object to use
+ */
+DebayerCpu::DebayerCpu(std::unique_ptr<SwStatsCpu> stats)
+ : stats_(std::move(stats))
+{
+ /*
+ * Reading from uncached buffers may be very slow.
+ * In such a case, it's better to copy input buffer data to normal memory.
+ * But in case of cached buffers, copying the data is unnecessary overhead.
+ * enable_input_memcpy_ makes this behavior configurable. At the moment, we
+ * always set it to true as the safer choice but this should be changed in
+ * future.
+ */
+ enableInputMemcpy_ = true;
+
+ /* Initialize color lookup tables */
+ for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++)
+ red_[i] = green_[i] = blue_[i] = i;
+}
+
+DebayerCpu::~DebayerCpu() = default;
+
+#define DECLARE_SRC_POINTERS(pixel_t) \
+ const pixel_t *prev = (const pixel_t *)src[0] + xShift_; \
+ const pixel_t *curr = (const pixel_t *)src[1] + xShift_; \
+ const pixel_t *next = (const pixel_t *)src[2] + xShift_;
+
+/*
+ * RGR
+ * GBG
+ * RGR
+ */
+#define BGGR_BGR888(p, n, div) \
+ *dst++ = blue_[curr[x] / (div)]; \
+ *dst++ = green_[(prev[x] + curr[x - p] + curr[x + n] + next[x]) / (4 * (div))]; \
+ *dst++ = red_[(prev[x - p] + prev[x + n] + next[x - p] + next[x + n]) / (4 * (div))]; \
+ if constexpr (addAlphaByte) \
+ *dst++ = 255; \
+ x++;
+
+/*
+ * GBG
+ * RGR
+ * GBG
+ */
+#define GRBG_BGR888(p, n, div) \
+ *dst++ = blue_[(prev[x] + next[x]) / (2 * (div))]; \
+ *dst++ = green_[curr[x] / (div)]; \
+ *dst++ = red_[(curr[x - p] + curr[x + n]) / (2 * (div))]; \
+ if constexpr (addAlphaByte) \
+ *dst++ = 255; \
+ x++;
+
+/*
+ * GRG
+ * BGB
+ * GRG
+ */
+#define GBRG_BGR888(p, n, div) \
+ *dst++ = blue_[(curr[x - p] + curr[x + n]) / (2 * (div))]; \
+ *dst++ = green_[curr[x] / (div)]; \
+ *dst++ = red_[(prev[x] + next[x]) / (2 * (div))]; \
+ if constexpr (addAlphaByte) \
+ *dst++ = 255; \
+ x++;
+
+/*
+ * BGB
+ * GRG
+ * BGB
+ */
+#define RGGB_BGR888(p, n, div) \
+ *dst++ = blue_[(prev[x - p] + prev[x + n] + next[x - p] + next[x + n]) / (4 * (div))]; \
+ *dst++ = green_[(prev[x] + curr[x - p] + curr[x + n] + next[x]) / (4 * (div))]; \
+ *dst++ = red_[curr[x] / (div)]; \
+ if constexpr (addAlphaByte) \
+ *dst++ = 255; \
+ x++;
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer8_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint8_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ BGGR_BGR888(1, 1, 1)
+ GBRG_BGR888(1, 1, 1)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer8_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint8_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ GRBG_BGR888(1, 1, 1)
+ RGGB_BGR888(1, 1, 1)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 4 for 10 -> 8 bpp value */
+ BGGR_BGR888(1, 1, 4)
+ GBRG_BGR888(1, 1, 4)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 4 for 10 -> 8 bpp value */
+ GRBG_BGR888(1, 1, 4)
+ RGGB_BGR888(1, 1, 4)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer12_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 16 for 12 -> 8 bpp value */
+ BGGR_BGR888(1, 1, 16)
+ GBRG_BGR888(1, 1, 16)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer12_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 16 for 12 -> 8 bpp value */
+ GRBG_BGR888(1, 1, 16)
+ RGGB_BGR888(1, 1, 16)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10P_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ /*
+ * For the first pixel getting a pixel from the previous column uses
+ * x - 2 to skip the 5th byte with least-significant bits for 4 pixels.
+ * Same for last pixel (uses x + 2) and looking at the next column.
+ */
+ for (int x = 0; x < widthInBytes;) {
+ /* First pixel */
+ BGGR_BGR888(2, 1, 1)
+ /* Second pixel BGGR -> GBRG */
+ GBRG_BGR888(1, 1, 1)
+ /* Same thing for third and fourth pixels */
+ BGGR_BGR888(1, 1, 1)
+ GBRG_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10P_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* First pixel */
+ GRBG_BGR888(2, 1, 1)
+ /* Second pixel GRBG -> RGGB */
+ RGGB_BGR888(1, 1, 1)
+ /* Same thing for third and fourth pixels */
+ GRBG_BGR888(1, 1, 1)
+ RGGB_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10P_GBGB_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* Even pixel */
+ GBRG_BGR888(2, 1, 1)
+ /* Odd pixel GBGR -> BGGR */
+ BGGR_BGR888(1, 1, 1)
+ /* Same thing for next 2 pixels */
+ GBRG_BGR888(1, 1, 1)
+ BGGR_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* Even pixel */
+ RGGB_BGR888(2, 1, 1)
+ /* Odd pixel RGGB -> GRBG */
+ GRBG_BGR888(1, 1, 1)
+ /* Same thing for next 2 pixels */
+ RGGB_BGR888(1, 1, 1)
+ GRBG_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+static bool isStandardBayerOrder(BayerFormat::Order order)
+{
+ return order == BayerFormat::BGGR || order == BayerFormat::GBRG ||
+ order == BayerFormat::GRBG || order == BayerFormat::RGGB;
+}
+
+/*
+ * Setup the Debayer object according to the passed in parameters.
+ * Return 0 on success, a negative errno value on failure
+ * (unsupported parameters).
+ */
+int DebayerCpu::getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputFormat);
+
+ if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10 || bayerFormat.bitDepth == 12) &&
+ bayerFormat.packing == BayerFormat::Packing::None &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ config.bpp = (bayerFormat.bitDepth + 7) & ~7;
+ config.patternSize.width = 2;
+ config.patternSize.height = 2;
+ config.outputFormats = std::vector<PixelFormat>({ formats::RGB888,
+ formats::XRGB8888,
+ formats::ARGB8888,
+ formats::BGR888,
+ formats::XBGR8888,
+ formats::ABGR8888 });
+ return 0;
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2 &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ config.bpp = 10;
+ config.patternSize.width = 4; /* 5 bytes per *4* pixels */
+ config.patternSize.height = 2;
+ config.outputFormats = std::vector<PixelFormat>({ formats::RGB888,
+ formats::XRGB8888,
+ formats::ARGB8888,
+ formats::BGR888,
+ formats::XBGR8888,
+ formats::ABGR8888 });
+ return 0;
+ }
+
+ LOG(Debayer, Info)
+ << "Unsupported input format " << inputFormat.toString();
+ return -EINVAL;
+}
+
+int DebayerCpu::getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config)
+{
+ if (outputFormat == formats::RGB888 || outputFormat == formats::BGR888) {
+ config.bpp = 24;
+ return 0;
+ }
+
+ if (outputFormat == formats::XRGB8888 || outputFormat == formats::ARGB8888 ||
+ outputFormat == formats::XBGR8888 || outputFormat == formats::ABGR8888) {
+ config.bpp = 32;
+ return 0;
+ }
+
+ LOG(Debayer, Info)
+ << "Unsupported output format " << outputFormat.toString();
+ return -EINVAL;
+}
+
+/*
+ * Check for standard Bayer orders and set xShift_ and swap debayer0/1, so that
+ * a single pair of BGGR debayer functions can be used for all 4 standard orders.
+ */
+int DebayerCpu::setupStandardBayerOrder(BayerFormat::Order order)
+{
+ switch (order) {
+ case BayerFormat::BGGR:
+ break;
+ case BayerFormat::GBRG:
+ xShift_ = 1; /* BGGR -> GBRG */
+ break;
+ case BayerFormat::GRBG:
+ std::swap(debayer0_, debayer1_); /* BGGR -> GRBG */
+ break;
+ case BayerFormat::RGGB:
+ xShift_ = 1; /* BGGR -> GBRG */
+ std::swap(debayer0_, debayer1_); /* GBRG -> RGGB */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int DebayerCpu::setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputFormat);
+ bool addAlphaByte = false;
+
+ xShift_ = 0;
+ swapRedBlueGains_ = false;
+
+ auto invalidFmt = []() -> int {
+ LOG(Debayer, Error) << "Unsupported input output format combination";
+ return -EINVAL;
+ };
+
+ switch (outputFormat) {
+ case formats::XRGB8888:
+ case formats::ARGB8888:
+ addAlphaByte = true;
+ [[fallthrough]];
+ case formats::RGB888:
+ break;
+ case formats::XBGR8888:
+ case formats::ABGR8888:
+ addAlphaByte = true;
+ [[fallthrough]];
+ case formats::BGR888:
+ /* Swap R and B in bayer order to generate BGR888 instead of RGB888 */
+ swapRedBlueGains_ = true;
+
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ bayerFormat.order = BayerFormat::RGGB;
+ break;
+ case BayerFormat::GBRG:
+ bayerFormat.order = BayerFormat::GRBG;
+ break;
+ case BayerFormat::GRBG:
+ bayerFormat.order = BayerFormat::GBRG;
+ break;
+ case BayerFormat::RGGB:
+ bayerFormat.order = BayerFormat::BGGR;
+ break;
+ default:
+ return invalidFmt();
+ }
+ break;
+ default:
+ return invalidFmt();
+ }
+
+ if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10 || bayerFormat.bitDepth == 12) &&
+ bayerFormat.packing == BayerFormat::Packing::None &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ switch (bayerFormat.bitDepth) {
+ case 8:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer8_BGBG_BGR888<true> : &DebayerCpu::debayer8_BGBG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer8_GRGR_BGR888<true> : &DebayerCpu::debayer8_GRGR_BGR888<false>;
+ break;
+ case 10:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10_BGBG_BGR888<true> : &DebayerCpu::debayer10_BGBG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10_GRGR_BGR888<true> : &DebayerCpu::debayer10_GRGR_BGR888<false>;
+ break;
+ case 12:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer12_BGBG_BGR888<true> : &DebayerCpu::debayer12_BGBG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer12_GRGR_BGR888<true> : &DebayerCpu::debayer12_GRGR_BGR888<false>;
+ break;
+ }
+ setupStandardBayerOrder(bayerFormat.order);
+ return 0;
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2) {
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_BGBG_BGR888<true> : &DebayerCpu::debayer10P_BGBG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_GRGR_BGR888<true> : &DebayerCpu::debayer10P_GRGR_BGR888<false>;
+ return 0;
+ case BayerFormat::GBRG:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_GBGB_BGR888<true> : &DebayerCpu::debayer10P_GBGB_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_RGRG_BGR888<true> : &DebayerCpu::debayer10P_RGRG_BGR888<false>;
+ return 0;
+ case BayerFormat::GRBG:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_GRGR_BGR888<true> : &DebayerCpu::debayer10P_GRGR_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_BGBG_BGR888<true> : &DebayerCpu::debayer10P_BGBG_BGR888<false>;
+ return 0;
+ case BayerFormat::RGGB:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_RGRG_BGR888<true> : &DebayerCpu::debayer10P_RGRG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_GBGB_BGR888<true> : &DebayerCpu::debayer10P_GBGB_BGR888<false>;
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ return invalidFmt();
+}
+
+int DebayerCpu::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+{
+ if (getInputConfig(inputCfg.pixelFormat, inputConfig_) != 0)
+ return -EINVAL;
+
+ if (stats_->configure(inputCfg) != 0)
+ return -EINVAL;
+
+ const Size &statsPatternSize = stats_->patternSize();
+ if (inputConfig_.patternSize.width != statsPatternSize.width ||
+ inputConfig_.patternSize.height != statsPatternSize.height) {
+ LOG(Debayer, Error)
+ << "mismatching stats and debayer pattern sizes for "
+ << inputCfg.pixelFormat.toString();
+ return -EINVAL;
+ }
+
+ inputConfig_.stride = inputCfg.stride;
+
+ if (outputCfgs.size() != 1) {
+ LOG(Debayer, Error)
+ << "Unsupported number of output streams: "
+ << outputCfgs.size();
+ return -EINVAL;
+ }
+
+ const StreamConfiguration &outputCfg = outputCfgs[0];
+ SizeRange outSizeRange = sizes(inputCfg.pixelFormat, inputCfg.size);
+ std::tie(outputConfig_.stride, outputConfig_.frameSize) =
+ strideAndFrameSize(outputCfg.pixelFormat, outputCfg.size);
+
+ if (!outSizeRange.contains(outputCfg.size) || outputConfig_.stride != outputCfg.stride) {
+ LOG(Debayer, Error)
+ << "Invalid output size/stride: "
+ << "\n " << outputCfg.size << " (" << outSizeRange << ")"
+ << "\n " << outputCfg.stride << " (" << outputConfig_.stride << ")";
+ return -EINVAL;
+ }
+
+ if (setDebayerFunctions(inputCfg.pixelFormat, outputCfg.pixelFormat) != 0)
+ return -EINVAL;
+
+ window_.x = ((inputCfg.size.width - outputCfg.size.width) / 2) &
+ ~(inputConfig_.patternSize.width - 1);
+ window_.y = ((inputCfg.size.height - outputCfg.size.height) / 2) &
+ ~(inputConfig_.patternSize.height - 1);
+ window_.width = outputCfg.size.width;
+ window_.height = outputCfg.size.height;
+
+ /* Don't pass x,y since process() already adjusts src before passing it */
+ stats_->setWindow(Rectangle(window_.size()));
+
+ /* pad with patternSize.Width on both left and right side */
+ lineBufferPadding_ = inputConfig_.patternSize.width * inputConfig_.bpp / 8;
+ lineBufferLength_ = window_.width * inputConfig_.bpp / 8 +
+ 2 * lineBufferPadding_;
+
+ if (enableInputMemcpy_) {
+ for (unsigned int i = 0; i <= inputConfig_.patternSize.height; i++)
+ lineBuffers_[i].resize(lineBufferLength_);
+ }
+
+ measuredFrames_ = 0;
+ frameProcessTime_ = 0;
+
+ return 0;
+}
+
+/*
+ * Get width and height at which the bayer-pattern repeats.
+ * Return pattern-size or an empty Size for an unsupported inputFormat.
+ */
+Size DebayerCpu::patternSize(PixelFormat inputFormat)
+{
+ DebayerCpu::DebayerInputConfig config;
+
+ if (getInputConfig(inputFormat, config) != 0)
+ return {};
+
+ return config.patternSize;
+}
+
+std::vector<PixelFormat> DebayerCpu::formats(PixelFormat inputFormat)
+{
+ DebayerCpu::DebayerInputConfig config;
+
+ if (getInputConfig(inputFormat, config) != 0)
+ return std::vector<PixelFormat>();
+
+ return config.outputFormats;
+}
+
+std::tuple<unsigned int, unsigned int>
+DebayerCpu::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+{
+ DebayerCpu::DebayerOutputConfig config;
+
+ if (getOutputConfig(outputFormat, config) != 0)
+ return std::make_tuple(0, 0);
+
+ /* round up to multiple of 8 for 64 bits alignment */
+ unsigned int stride = (size.width * config.bpp / 8 + 7) & ~7;
+
+ return std::make_tuple(stride, stride * size.height);
+}
+
+void DebayerCpu::setupInputMemcpy(const uint8_t *linePointers[])
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ if (!enableInputMemcpy_)
+ return;
+
+ for (unsigned int i = 0; i < patternHeight; i++) {
+ memcpy(lineBuffers_[i].data(),
+ linePointers[i + 1] - lineBufferPadding_,
+ lineBufferLength_);
+ linePointers[i + 1] = lineBuffers_[i].data() + lineBufferPadding_;
+ }
+
+ /* Point lineBufferIndex_ to first unused lineBuffer */
+ lineBufferIndex_ = patternHeight;
+}
+
+void DebayerCpu::shiftLinePointers(const uint8_t *linePointers[], const uint8_t *src)
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ for (unsigned int i = 0; i < patternHeight; i++)
+ linePointers[i] = linePointers[i + 1];
+
+ linePointers[patternHeight] = src +
+ (patternHeight / 2) * (int)inputConfig_.stride;
+}
+
+void DebayerCpu::memcpyNextLine(const uint8_t *linePointers[])
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ if (!enableInputMemcpy_)
+ return;
+
+ memcpy(lineBuffers_[lineBufferIndex_].data(),
+ linePointers[patternHeight] - lineBufferPadding_,
+ lineBufferLength_);
+ linePointers[patternHeight] = lineBuffers_[lineBufferIndex_].data() + lineBufferPadding_;
+
+ lineBufferIndex_ = (lineBufferIndex_ + 1) % (patternHeight + 1);
+}
+
+void DebayerCpu::process2(const uint8_t *src, uint8_t *dst)
+{
+ unsigned int yEnd = window_.y + window_.height;
+ /* Holds [0] previous- [1] current- [2] next-line */
+ const uint8_t *linePointers[3];
+
+ /* Adjust src to top left corner of the window */
+ src += window_.y * inputConfig_.stride + window_.x * inputConfig_.bpp / 8;
+
+ /* [x] becomes [x - 1] after initial shiftLinePointers() call */
+ if (window_.y) {
+ linePointers[1] = src - inputConfig_.stride; /* previous-line */
+ linePointers[2] = src;
+ } else {
+ /* window_.y == 0, use the next line as prev line */
+ linePointers[1] = src + inputConfig_.stride;
+ linePointers[2] = src;
+ /* Last 2 lines also need special handling */
+ yEnd -= 2;
+ }
+
+ setupInputMemcpy(linePointers);
+
+ for (unsigned int y = window_.y; y < yEnd; y += 2) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(y, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+
+ if (window_.y == 0) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(yEnd, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ /* next line may point outside of src, use prev. */
+ linePointers[2] = linePointers[0];
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+}
+
+void DebayerCpu::process4(const uint8_t *src, uint8_t *dst)
+{
+ const unsigned int yEnd = window_.y + window_.height;
+ /*
+ * This holds pointers to [0] 2-lines-up [1] 1-line-up [2] current-line
+ * [3] 1-line-down [4] 2-lines-down.
+ */
+ const uint8_t *linePointers[5];
+
+ /* Adjust src to top left corner of the window */
+ src += window_.y * inputConfig_.stride + window_.x * inputConfig_.bpp / 8;
+
+ /* [x] becomes [x - 1] after initial shiftLinePointers() call */
+ linePointers[1] = src - 2 * inputConfig_.stride;
+ linePointers[2] = src - inputConfig_.stride;
+ linePointers[3] = src;
+ linePointers[4] = src + inputConfig_.stride;
+
+ setupInputMemcpy(linePointers);
+
+ for (unsigned int y = window_.y; y < yEnd; y += 4) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(y, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine2(y, linePointers);
+ (this->*debayer2_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer3_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+}
+
+namespace {
+
+inline int64_t timeDiff(timespec &after, timespec &before)
+{
+ return (after.tv_sec - before.tv_sec) * 1000000000LL +
+ (int64_t)after.tv_nsec - (int64_t)before.tv_nsec;
+}
+
+} /* namespace */
+
+void DebayerCpu::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+{
+ timespec frameStartTime;
+
+ if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure) {
+ frameStartTime = {};
+ clock_gettime(CLOCK_MONOTONIC_RAW, &frameStartTime);
+ }
+
+ std::vector<DmaSyncer> dmaSyncers;
+ for (const FrameBuffer::Plane &plane : input->planes())
+ dmaSyncers.emplace_back(plane.fd, DmaSyncer::SyncType::Read);
+
+ for (const FrameBuffer::Plane &plane : output->planes())
+ dmaSyncers.emplace_back(plane.fd, DmaSyncer::SyncType::Write);
+
+ green_ = params.green;
+ red_ = swapRedBlueGains_ ? params.blue : params.red;
+ blue_ = swapRedBlueGains_ ? params.red : params.blue;
+
+ /* Copy metadata from the input buffer */
+ FrameMetadata &metadata = output->_d()->metadata();
+ metadata.status = input->metadata().status;
+ metadata.sequence = input->metadata().sequence;
+ metadata.timestamp = input->metadata().timestamp;
+
+ MappedFrameBuffer in(input, MappedFrameBuffer::MapFlag::Read);
+ MappedFrameBuffer out(output, MappedFrameBuffer::MapFlag::Write);
+ if (!in.isValid() || !out.isValid()) {
+ LOG(Debayer, Error) << "mmap-ing buffer(s) failed";
+ metadata.status = FrameMetadata::FrameError;
+ return;
+ }
+
+ stats_->startFrame();
+
+ if (inputConfig_.patternSize.height == 2)
+ process2(in.planes()[0].data(), out.planes()[0].data());
+ else
+ process4(in.planes()[0].data(), out.planes()[0].data());
+
+ metadata.planes()[0].bytesused = out.planes()[0].size();
+
+ dmaSyncers.clear();
+
+ /* Measure before emitting signals */
+ if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure &&
+ ++measuredFrames_ > DebayerCpu::kFramesToSkip) {
+ timespec frameEndTime = {};
+ clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime);
+ frameProcessTime_ += timeDiff(frameEndTime, frameStartTime);
+ if (measuredFrames_ == DebayerCpu::kLastFrameToMeasure) {
+ const unsigned int measuredFrames = DebayerCpu::kLastFrameToMeasure -
+ DebayerCpu::kFramesToSkip;
+ LOG(Debayer, Info)
+ << "Processed " << measuredFrames
+ << " frames in " << frameProcessTime_ / 1000 << "us, "
+ << frameProcessTime_ / (1000 * measuredFrames)
+ << " us/frame";
+ }
+ }
+
+ /*
+ * Buffer ids are currently not used, so pass zeros as its parameter.
+ *
+ * \todo Pass real bufferId once stats buffer passing is changed.
+ */
+ stats_->finishFrame(frame, 0);
+ outputBufferReady.emit(output);
+ inputBufferReady.emit(input);
+}
+
+SizeRange DebayerCpu::sizes(PixelFormat inputFormat, const Size &inputSize)
+{
+ Size patternSize = this->patternSize(inputFormat);
+ unsigned int borderHeight = patternSize.height;
+
+ if (patternSize.isNull())
+ return {};
+
+ /* No need for top/bottom border with a pattern height of 2 */
+ if (patternSize.height == 2)
+ borderHeight = 0;
+
+ /*
+ * For debayer interpolation a border is kept around the entire image
+ * and the minimum output size is pattern-height x pattern-width.
+ */
+ if (inputSize.width < (3 * patternSize.width) ||
+ inputSize.height < (2 * borderHeight + patternSize.height)) {
+ LOG(Debayer, Warning)
+ << "Input format size too small: " << inputSize.toString();
+ return {};
+ }
+
+ return SizeRange(Size(patternSize.width, patternSize.height),
+ Size((inputSize.width - 2 * patternSize.width) & ~(patternSize.width - 1),
+ (inputSize.height - 2 * borderHeight) & ~(patternSize.height - 1)),
+ patternSize.width, patternSize.height);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer_cpu.h b/src/libcamera/software_isp/debayer_cpu.h
new file mode 100644
index 00000000..2c47e7c6
--- /dev/null
+++ b/src/libcamera/software_isp/debayer_cpu.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based debayering header
+ */
+
+#pragma once
+
+#include <memory>
+#include <stdint.h>
+#include <vector>
+
+#include <libcamera/base/object.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+#include "debayer.h"
+#include "swstats_cpu.h"
+
+namespace libcamera {
+
+class DebayerCpu : public Debayer, public Object
+{
+public:
+ DebayerCpu(std::unique_ptr<SwStatsCpu> stats);
+ ~DebayerCpu();
+
+ int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs);
+ Size patternSize(PixelFormat inputFormat);
+ std::vector<PixelFormat> formats(PixelFormat input);
+ std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size);
+ void process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params);
+ SizeRange sizes(PixelFormat inputFormat, const Size &inputSize);
+
+ /**
+ * \brief Get the file descriptor for the statistics
+ *
+ * \return the file descriptor pointing to the statistics
+ */
+ const SharedFD &getStatsFD() { return stats_->getStatsFD(); }
+
+ /**
+ * \brief Get the output frame size
+ *
+ * \return The output frame size
+ */
+ unsigned int frameSize() { return outputConfig_.frameSize; }
+
+private:
+ /**
+ * \brief Called to debayer 1 line of Bayer input data to output format
+ * \param[out] dst Pointer to the start of the output line to write
+ * \param[in] src The input data
+ *
+ * Input data is an array of (patternSize_.height + 1) src
+ * pointers each pointing to a line in the Bayer source. The middle
+ * element of the array will point to the actual line being processed.
+ * Earlier element(s) will point to the previous line(s) and later
+ * element(s) to the next line(s).
+ *
+ * These functions take an array of src pointers, rather than
+ * a single src pointer + a stride for the source, so that when the src
+ * is slow uncached memory it can be copied to faster memory before
+ * debayering. Debayering a standard 2x2 Bayer pattern requires access
+ * to the previous and next src lines for interpolating the missing
+ * colors. To allow copying the src lines only once 3 temporary buffers
+ * each holding a single line are used, re-using the oldest buffer for
+ * the next line and the pointers are swizzled so that:
+ * src[0] = previous-line, src[1] = currrent-line, src[2] = next-line.
+ * This way the 3 pointers passed to the debayer functions form
+ * a sliding window over the src avoiding the need to copy each
+ * line more than once.
+ *
+ * Similarly for bayer patterns which repeat every 4 lines, 5 src
+ * pointers are passed holding: src[0] = 2-lines-up, src[1] = 1-line-up
+ * src[2] = current-line, src[3] = 1-line-down, src[4] = 2-lines-down.
+ */
+ using debayerFn = void (DebayerCpu::*)(uint8_t *dst, const uint8_t *src[]);
+
+ /* 8-bit raw bayer format */
+ template<bool addAlphaByte>
+ void debayer8_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer8_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* unpacked 10-bit raw bayer format */
+ template<bool addAlphaByte>
+ void debayer10_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer10_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* unpacked 12-bit raw bayer format */
+ template<bool addAlphaByte>
+ void debayer12_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer12_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* CSI-2 packed 10-bit raw bayer format (all the 4 orders) */
+ template<bool addAlphaByte>
+ void debayer10P_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer10P_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer10P_GBGB_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[]);
+
+ struct DebayerInputConfig {
+ Size patternSize;
+ unsigned int bpp; /* Memory used per pixel, not precision */
+ unsigned int stride;
+ std::vector<PixelFormat> outputFormats;
+ };
+
+ struct DebayerOutputConfig {
+ unsigned int bpp; /* Memory used per pixel, not precision */
+ unsigned int stride;
+ unsigned int frameSize;
+ };
+
+ int getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config);
+ int getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config);
+ int setupStandardBayerOrder(BayerFormat::Order order);
+ int setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat);
+ void setupInputMemcpy(const uint8_t *linePointers[]);
+ void shiftLinePointers(const uint8_t *linePointers[], const uint8_t *src);
+ void memcpyNextLine(const uint8_t *linePointers[]);
+ void process2(const uint8_t *src, uint8_t *dst);
+ void process4(const uint8_t *src, uint8_t *dst);
+
+ /* Max. supported Bayer pattern height is 4, debayering this requires 5 lines */
+ static constexpr unsigned int kMaxLineBuffers = 5;
+
+ DebayerParams::ColorLookupTable red_;
+ DebayerParams::ColorLookupTable green_;
+ DebayerParams::ColorLookupTable blue_;
+ debayerFn debayer0_;
+ debayerFn debayer1_;
+ debayerFn debayer2_;
+ debayerFn debayer3_;
+ Rectangle window_;
+ DebayerInputConfig inputConfig_;
+ DebayerOutputConfig outputConfig_;
+ std::unique_ptr<SwStatsCpu> stats_;
+ std::vector<uint8_t> lineBuffers_[kMaxLineBuffers];
+ unsigned int lineBufferLength_;
+ unsigned int lineBufferPadding_;
+ unsigned int lineBufferIndex_;
+ unsigned int xShift_; /* Offset of 0/1 applied to window_.x */
+ bool enableInputMemcpy_;
+ bool swapRedBlueGains_;
+ unsigned int measuredFrames_;
+ int64_t frameProcessTime_;
+ /* Skip 30 frames for things to stabilize then measure 30 frames */
+ static constexpr unsigned int kFramesToSkip = 30;
+ static constexpr unsigned int kLastFrameToMeasure = 60;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/meson.build b/src/libcamera/software_isp/meson.build
new file mode 100644
index 00000000..aac7eda7
--- /dev/null
+++ b/src/libcamera/software_isp/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: CC0-1.0
+
+softisp_enabled = pipelines.contains('simple')
+summary({'SoftISP support' : softisp_enabled}, section : 'Configuration')
+
+if not softisp_enabled
+ subdir_done()
+endif
+
+libcamera_internal_sources += files([
+ 'debayer.cpp',
+ 'debayer_cpu.cpp',
+ 'software_isp.cpp',
+ 'swstats_cpu.cpp',
+])
diff --git a/src/libcamera/software_isp/software_isp.cpp b/src/libcamera/software_isp/software_isp.cpp
new file mode 100644
index 00000000..2bea64d9
--- /dev/null
+++ b/src/libcamera/software_isp/software_isp.cpp
@@ -0,0 +1,370 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * Simple software ISP implementation
+ */
+
+#include "libcamera/internal/software_isp/software_isp.h"
+
+#include <cmath>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <libcamera/controls.h>
+#include <libcamera/formats.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/software_isp/debayer_params.h"
+
+#include "debayer_cpu.h"
+
+/**
+ * \file software_isp.cpp
+ * \brief Simple software ISP implementation
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(SoftwareIsp)
+
+/**
+ * \class SoftwareIsp
+ * \brief Class for the Software ISP
+ */
+
+/**
+ * \var SoftwareIsp::inputBufferReady
+ * \brief A signal emitted when the input frame buffer completes
+ */
+
+/**
+ * \var SoftwareIsp::outputBufferReady
+ * \brief A signal emitted when the output frame buffer completes
+ */
+
+/**
+ * \var SoftwareIsp::ispStatsReady
+ * \brief A signal emitted when the statistics for IPA are ready
+ */
+
+/**
+ * \var SoftwareIsp::setSensorControls
+ * \brief A signal emitted when the values to write to the sensor controls are
+ * ready
+ */
+
+/**
+ * \brief Constructs SoftwareIsp object
+ * \param[in] pipe The pipeline handler in use
+ * \param[in] sensor Pointer to the CameraSensor instance owned by the pipeline
+ * \param[out] ipaControls The IPA controls to update
+ * handler
+ */
+SoftwareIsp::SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor,
+ ControlInfoMap *ipaControls)
+ : dmaHeap_(DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap |
+ DmaBufAllocator::DmaBufAllocatorFlag::SystemHeap |
+ DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf)
+{
+ /*
+ * debayerParams_ must be initialized because the initial value is used for
+ * the first two frames, i.e. until stats processing starts providing its
+ * own parameters.
+ *
+ * \todo This should be handled in the same place as the related
+ * operations, in the IPA module.
+ */
+ std::array<uint8_t, 256> gammaTable;
+ for (unsigned int i = 0; i < 256; i++)
+ gammaTable[i] = UINT8_MAX * std::pow(i / 256.0, 0.5);
+ for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++) {
+ debayerParams_.red[i] = gammaTable[i];
+ debayerParams_.green[i] = gammaTable[i];
+ debayerParams_.blue[i] = gammaTable[i];
+ }
+
+ if (!dmaHeap_.isValid()) {
+ LOG(SoftwareIsp, Error) << "Failed to create DmaBufAllocator object";
+ return;
+ }
+
+ sharedParams_ = SharedMemObject<DebayerParams>("softIsp_params");
+ if (!sharedParams_) {
+ LOG(SoftwareIsp, Error) << "Failed to create shared memory for parameters";
+ return;
+ }
+
+ auto stats = std::make_unique<SwStatsCpu>();
+ if (!stats->isValid()) {
+ LOG(SoftwareIsp, Error) << "Failed to create SwStatsCpu object";
+ return;
+ }
+ stats->statsReady.connect(this, &SoftwareIsp::statsReady);
+
+ debayer_ = std::make_unique<DebayerCpu>(std::move(stats));
+ debayer_->inputBufferReady.connect(this, &SoftwareIsp::inputReady);
+ debayer_->outputBufferReady.connect(this, &SoftwareIsp::outputReady);
+
+ ipa_ = IPAManager::createIPA<ipa::soft::IPAProxySoft>(pipe, 0, 0);
+ if (!ipa_) {
+ LOG(SoftwareIsp, Error)
+ << "Creating IPA for software ISP failed";
+ debayer_.reset();
+ return;
+ }
+
+ /*
+ * The API tuning file is made from the sensor name. If the tuning file
+ * isn't found, fall back to the 'uncalibrated' file.
+ */
+ std::string ipaTuningFile =
+ ipa_->configurationFile(sensor->model() + ".yaml", "uncalibrated.yaml");
+
+ int ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() },
+ debayer_->getStatsFD(),
+ sharedParams_.fd(),
+ sensor->controls(),
+ ipaControls);
+ if (ret) {
+ LOG(SoftwareIsp, Error) << "IPA init failed";
+ debayer_.reset();
+ return;
+ }
+
+ ipa_->setIspParams.connect(this, &SoftwareIsp::saveIspParams);
+ ipa_->setSensorControls.connect(this, &SoftwareIsp::setSensorCtrls);
+
+ debayer_->moveToThread(&ispWorkerThread_);
+}
+
+SoftwareIsp::~SoftwareIsp()
+{
+ /* make sure to destroy the DebayerCpu before the ispWorkerThread_ is gone */
+ debayer_.reset();
+}
+
+/**
+ * \fn int SoftwareIsp::loadConfiguration([[maybe_unused]] const std::string &filename)
+ * \brief Load a configuration from a file
+ * \param[in] filename The file to load the configuration data from
+ *
+ * Currently is a stub doing nothing and always returning "success".
+ *
+ * \return 0 on success
+ */
+
+/**
+ * \brief Process the statistics gathered
+ * \param[in] frame The frame number
+ * \param[in] bufferId ID of the statistics buffer
+ * \param[in] sensorControls The sensor controls
+ *
+ * Requests the IPA to calculate new parameters for ISP and new control
+ * values for the sensor.
+ */
+void SoftwareIsp::processStats(const uint32_t frame, const uint32_t bufferId,
+ const ControlList &sensorControls)
+{
+ ASSERT(ipa_);
+ ipa_->processStats(frame, bufferId, sensorControls);
+}
+
+/**
+ * \brief Check the validity of Software Isp object
+ * \return True if Software Isp is valid, false otherwise
+ */
+bool SoftwareIsp::isValid() const
+{
+ return !!debayer_;
+}
+
+/**
+ * \brief Get the output formats supported for the given input format
+ * \param[in] inputFormat The input format
+ * \return All the supported output formats or an empty vector if there are none
+ */
+std::vector<PixelFormat> SoftwareIsp::formats(PixelFormat inputFormat)
+{
+ ASSERT(debayer_);
+
+ return debayer_->formats(inputFormat);
+}
+
+/**
+ * \brief Get the supported output sizes for the given input format and size
+ * \param[in] inputFormat The input format
+ * \param[in] inputSize The input frame size
+ * \return The valid size range or an empty range if there are none
+ */
+SizeRange SoftwareIsp::sizes(PixelFormat inputFormat, const Size &inputSize)
+{
+ ASSERT(debayer_);
+
+ return debayer_->sizes(inputFormat, inputSize);
+}
+
+/**
+ * Get the output stride and the frame size in bytes for the given output format and size
+ * \param[in] outputFormat The output format
+ * \param[in] size The output size (width and height in pixels)
+ * \return A tuple of the stride and the frame size in bytes, or a tuple of 0,0
+ * if there is no valid output config
+ */
+std::tuple<unsigned int, unsigned int>
+SoftwareIsp::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+{
+ ASSERT(debayer_);
+
+ return debayer_->strideAndFrameSize(outputFormat, size);
+}
+
+/**
+ * \brief Configure the SoftwareIsp object according to the passed in parameters
+ * \param[in] inputCfg The input configuration
+ * \param[in] outputCfgs The output configurations
+ * \param[in] configInfo The IPA configuration data, received from the pipeline
+ * handler
+ * \return 0 on success, a negative errno on failure
+ */
+int SoftwareIsp::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs,
+ const ipa::soft::IPAConfigInfo &configInfo)
+{
+ ASSERT(ipa_ && debayer_);
+
+ int ret = ipa_->configure(configInfo);
+ if (ret < 0)
+ return ret;
+
+ return debayer_->configure(inputCfg, outputCfgs);
+}
+
+/**
+ * \brief Export the buffers from the Software ISP
+ * \param[in] stream Output stream exporting the buffers
+ * \param[in] count Number of buffers to allocate
+ * \param[out] buffers Vector to store the allocated buffers
+ * \return The number of allocated buffers on success or a negative error code
+ * otherwise
+ */
+int SoftwareIsp::exportBuffers(const Stream *stream, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ ASSERT(debayer_ != nullptr);
+
+ /* single output for now */
+ if (stream == nullptr)
+ return -EINVAL;
+
+ return dmaHeap_.exportBuffers(count, { debayer_->frameSize() }, buffers);
+}
+
+/**
+ * \brief Queue a request and process the control list from the application
+ * \param[in] frame The number of the frame which will be processed next
+ * \param[in] controls The controls for the \a frame
+ */
+void SoftwareIsp::queueRequest(const uint32_t frame, const ControlList &controls)
+{
+ ipa_->queueRequest(frame, controls);
+}
+
+/**
+ * \brief Queue buffers to Software ISP
+ * \param[in] frame The frame number
+ * \param[in] input The input framebuffer
+ * \param[in] outputs The container holding the output stream pointers and
+ * their respective frame buffer outputs
+ * \return 0 on success, a negative errno on failure
+ */
+int SoftwareIsp::queueBuffers(uint32_t frame, FrameBuffer *input,
+ const std::map<const Stream *, FrameBuffer *> &outputs)
+{
+ /*
+ * Validate the outputs as a sanity check: at least one output is
+ * required, all outputs must reference a valid stream.
+ */
+ if (outputs.empty())
+ return -EINVAL;
+
+ for (auto [stream, buffer] : outputs) {
+ if (!buffer)
+ return -EINVAL;
+ if (outputs.size() != 1) /* only single stream atm */
+ return -EINVAL;
+ }
+
+ for (auto iter = outputs.begin(); iter != outputs.end(); iter++)
+ process(frame, input, iter->second);
+
+ return 0;
+}
+
+/**
+ * \brief Starts the Software ISP streaming operation
+ * \return 0 on success, any other value indicates an error
+ */
+int SoftwareIsp::start()
+{
+ int ret = ipa_->start();
+ if (ret)
+ return ret;
+
+ ispWorkerThread_.start();
+ return 0;
+}
+
+/**
+ * \brief Stops the Software ISP streaming operation
+ */
+void SoftwareIsp::stop()
+{
+ ispWorkerThread_.exit();
+ ispWorkerThread_.wait();
+
+ ipa_->stop();
+}
+
+/**
+ * \brief Passes the input framebuffer to the ISP worker to process
+ * \param[in] frame The frame number
+ * \param[in] input The input framebuffer
+ * \param[out] output The framebuffer to write the processed frame to
+ */
+void SoftwareIsp::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output)
+{
+ ipa_->computeParams(frame);
+ debayer_->invokeMethod(&DebayerCpu::process,
+ ConnectionTypeQueued, frame, input, output, debayerParams_);
+}
+
+void SoftwareIsp::saveIspParams()
+{
+ debayerParams_ = *sharedParams_;
+}
+
+void SoftwareIsp::setSensorCtrls(const ControlList &sensorControls)
+{
+ setSensorControls.emit(sensorControls);
+}
+
+void SoftwareIsp::statsReady(uint32_t frame, uint32_t bufferId)
+{
+ ispStatsReady.emit(frame, bufferId);
+}
+
+void SoftwareIsp::inputReady(FrameBuffer *input)
+{
+ inputBufferReady.emit(input);
+}
+
+void SoftwareIsp::outputReady(FrameBuffer *output)
+{
+ outputBufferReady.emit(output);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/swstats_cpu.cpp b/src/libcamera/software_isp/swstats_cpu.cpp
new file mode 100644
index 00000000..c520c806
--- /dev/null
+++ b/src/libcamera/software_isp/swstats_cpu.cpp
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based software statistics implementation
+ */
+
+#include "swstats_cpu.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+namespace libcamera {
+
+/**
+ * \class SwStatsCpu
+ * \brief Class for gathering statistics on the CPU
+ *
+ * CPU based software ISP statistics implementation.
+ *
+ * This class offers a configure function + functions to gather statistics on a
+ * line by line basis. This allows CPU based software debayering to interleave
+ * debayering and statistics gathering on a line by line basis while the input
+ * data is still hot in the cache.
+ *
+ * It is also possible to specify a window over which to gather statistics
+ * instead of processing the whole frame.
+ */
+
+/**
+ * \fn bool SwStatsCpu::isValid() const
+ * \brief Gets whether the statistics object is valid
+ *
+ * \return True if it's valid, false otherwise
+ */
+
+/**
+ * \fn const SharedFD &SwStatsCpu::getStatsFD()
+ * \brief Get the file descriptor for the statistics
+ *
+ * \return The file descriptor
+ */
+
+/**
+ * \fn const Size &SwStatsCpu::patternSize()
+ * \brief Get the pattern size
+ *
+ * For some input-formats, e.g. Bayer data, processing is done multiple lines
+ * and/or columns at a time. Get width and height at which the (bayer) pattern
+ * repeats. Window values are rounded down to a multiple of this and the height
+ * also indicates if processLine2() should be called or not.
+ * This may only be called after a successful configure() call.
+ *
+ * \return The pattern size
+ */
+
+/**
+ * \fn void SwStatsCpu::processLine0(unsigned int y, const uint8_t *src[])
+ * \brief Process line 0
+ * \param[in] y The y coordinate.
+ * \param[in] src The input data.
+ *
+ * This function processes line 0 for input formats with
+ * patternSize height == 1.
+ * It'll process line 0 and 1 for input formats with patternSize height >= 2.
+ * This function may only be called after a successful setWindow() call.
+ */
+
+/**
+ * \fn void SwStatsCpu::processLine2(unsigned int y, const uint8_t *src[])
+ * \brief Process line 2 and 3
+ * \param[in] y The y coordinate.
+ * \param[in] src The input data.
+ *
+ * This function processes line 2 and 3 for input formats with
+ * patternSize height == 4.
+ * This function may only be called after a successful setWindow() call.
+ */
+
+/**
+ * \var Signal<> SwStatsCpu::statsReady
+ * \brief Signals that the statistics are ready
+ */
+
+/**
+ * \typedef SwStatsCpu::statsProcessFn
+ * \brief Called when there is data to get statistics from
+ * \param[in] src The input data
+ *
+ * These functions take an array of (patternSize_.height + 1) src
+ * pointers each pointing to a line in the source image. The middle
+ * element of the array will point to the actual line being processed.
+ * Earlier element(s) will point to the previous line(s) and later
+ * element(s) to the next line(s).
+ *
+ * See the documentation of DebayerCpu::debayerFn for more details.
+ */
+
+/**
+ * \var unsigned int SwStatsCpu::ySkipMask_
+ * \brief Skip lines where this bitmask is set in y
+ */
+
+/**
+ * \var Rectangle SwStatsCpu::window_
+ * \brief Statistics window, set by setWindow(), used every line
+ */
+
+/**
+ * \var Size SwStatsCpu::patternSize_
+ * \brief The size of the bayer pattern
+ *
+ * Valid sizes are: 2x2, 4x2 or 4x4.
+ */
+
+/**
+ * \var unsigned int SwStatsCpu::xShift_
+ * \brief The offset of x, applied to window_.x for bayer variants
+ *
+ * This can either be 0 or 1.
+ */
+
+LOG_DEFINE_CATEGORY(SwStatsCpu)
+
+SwStatsCpu::SwStatsCpu()
+ : sharedStats_("softIsp_stats")
+{
+ if (!sharedStats_)
+ LOG(SwStatsCpu, Error)
+ << "Failed to create shared memory for statistics";
+}
+
+static constexpr unsigned int kRedYMul = 77; /* 0.299 * 256 */
+static constexpr unsigned int kGreenYMul = 150; /* 0.587 * 256 */
+static constexpr unsigned int kBlueYMul = 29; /* 0.114 * 256 */
+
+#define SWSTATS_START_LINE_STATS(pixel_t) \
+ pixel_t r, g, g2, b; \
+ uint64_t yVal; \
+ \
+ uint64_t sumR = 0; \
+ uint64_t sumG = 0; \
+ uint64_t sumB = 0;
+
+#define SWSTATS_ACCUMULATE_LINE_STATS(div) \
+ sumR += r; \
+ sumG += g; \
+ sumB += b; \
+ \
+ yVal = r * kRedYMul; \
+ yVal += g * kGreenYMul; \
+ yVal += b * kBlueYMul; \
+ stats_.yHistogram[yVal * SwIspStats::kYHistogramSize / (256 * 256 * (div))]++;
+
+#define SWSTATS_FINISH_LINE_STATS() \
+ stats_.sumR_ += sumR; \
+ stats_.sumG_ += sumG; \
+ stats_.sumB_ += sumB;
+
+void SwStatsCpu::statsBGGR8Line0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x;
+ const uint8_t *src1 = src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR10Line0(const uint8_t *src[])
+{
+ const uint16_t *src0 = (const uint16_t *)src[1] + window_.x;
+ const uint16_t *src1 = (const uint16_t *)src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint16_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ /* divide Y by 4 for 10 -> 8 bpp value */
+ SWSTATS_ACCUMULATE_LINE_STATS(4)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR12Line0(const uint8_t *src[])
+{
+ const uint16_t *src0 = (const uint16_t *)src[1] + window_.x;
+ const uint16_t *src1 = (const uint16_t *)src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint16_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ /* divide Y by 16 for 12 -> 8 bpp value */
+ SWSTATS_ACCUMULATE_LINE_STATS(16)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR10PLine0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x * 5 / 4;
+ const uint8_t *src1 = src[2] + window_.x * 5 / 4;
+ const int widthInBytes = window_.width * 5 / 4;
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ /* x += 5 sample every other 2x2 block */
+ for (int x = 0; x < widthInBytes; x += 5) {
+ /* BGGR */
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+ g = (g + g2) / 2;
+ /* Data is already 8 bits, divide by 1 */
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsGBRG10PLine0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x * 5 / 4;
+ const uint8_t *src1 = src[2] + window_.x * 5 / 4;
+ const int widthInBytes = window_.width * 5 / 4;
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ /* x += 5 sample every other 2x2 block */
+ for (int x = 0; x < widthInBytes; x += 5) {
+ /* GBRG */
+ g = src0[x];
+ b = src0[x + 1];
+ r = src1[x];
+ g2 = src1[x + 1];
+ g = (g + g2) / 2;
+ /* Data is already 8 bits, divide by 1 */
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+/**
+ * \brief Reset state to start statistics gathering for a new frame
+ *
+ * This may only be called after a successful setWindow() call.
+ */
+void SwStatsCpu::startFrame(void)
+{
+ if (window_.width == 0)
+ LOG(SwStatsCpu, Error) << "Calling startFrame() without setWindow()";
+
+ stats_.sumR_ = 0;
+ stats_.sumB_ = 0;
+ stats_.sumG_ = 0;
+ stats_.yHistogram.fill(0);
+}
+
+/**
+ * \brief Finish statistics calculation for the current frame
+ * \param[in] frame The frame number
+ * \param[in] bufferId ID of the statistics buffer
+ *
+ * This may only be called after a successful setWindow() call.
+ */
+void SwStatsCpu::finishFrame(uint32_t frame, uint32_t bufferId)
+{
+ *sharedStats_ = stats_;
+ statsReady.emit(frame, bufferId);
+}
+
+/**
+ * \brief Setup SwStatsCpu object for standard Bayer orders
+ * \param[in] order The Bayer order
+ *
+ * Check if order is a standard Bayer order and setup xShift_ and swapLines_
+ * so that a single BGGR stats function can be used for all 4 standard orders.
+ */
+int SwStatsCpu::setupStandardBayerOrder(BayerFormat::Order order)
+{
+ switch (order) {
+ case BayerFormat::BGGR:
+ xShift_ = 0;
+ swapLines_ = false;
+ break;
+ case BayerFormat::GBRG:
+ xShift_ = 1; /* BGGR -> GBRG */
+ swapLines_ = false;
+ break;
+ case BayerFormat::GRBG:
+ xShift_ = 0;
+ swapLines_ = true; /* BGGR -> GRBG */
+ break;
+ case BayerFormat::RGGB:
+ xShift_ = 1; /* BGGR -> GBRG */
+ swapLines_ = true; /* GBRG -> RGGB */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ patternSize_.height = 2;
+ patternSize_.width = 2;
+ ySkipMask_ = 0x02; /* Skip every 3th and 4th line */
+ return 0;
+}
+
+/**
+ * \brief Configure the statistics object for the passed in input format
+ * \param[in] inputCfg The input format
+ *
+ * \return 0 on success, a negative errno value on failure
+ */
+int SwStatsCpu::configure(const StreamConfiguration &inputCfg)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputCfg.pixelFormat);
+
+ if (bayerFormat.packing == BayerFormat::Packing::None &&
+ setupStandardBayerOrder(bayerFormat.order) == 0) {
+ switch (bayerFormat.bitDepth) {
+ case 8:
+ stats0_ = &SwStatsCpu::statsBGGR8Line0;
+ return 0;
+ case 10:
+ stats0_ = &SwStatsCpu::statsBGGR10Line0;
+ return 0;
+ case 12:
+ stats0_ = &SwStatsCpu::statsBGGR12Line0;
+ return 0;
+ }
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2) {
+ patternSize_.height = 2;
+ patternSize_.width = 4; /* 5 bytes per *4* pixels */
+ /* Skip every 3th and 4th line, sample every other 2x2 block */
+ ySkipMask_ = 0x02;
+ xShift_ = 0;
+
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ case BayerFormat::GRBG:
+ stats0_ = &SwStatsCpu::statsBGGR10PLine0;
+ swapLines_ = bayerFormat.order == BayerFormat::GRBG;
+ return 0;
+ case BayerFormat::GBRG:
+ case BayerFormat::RGGB:
+ stats0_ = &SwStatsCpu::statsGBRG10PLine0;
+ swapLines_ = bayerFormat.order == BayerFormat::RGGB;
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ LOG(SwStatsCpu, Info)
+ << "Unsupported input format " << inputCfg.pixelFormat.toString();
+ return -EINVAL;
+}
+
+/**
+ * \brief Specify window coordinates over which to gather statistics
+ * \param[in] window The window object.
+ */
+void SwStatsCpu::setWindow(const Rectangle &window)
+{
+ window_ = window;
+
+ window_.x &= ~(patternSize_.width - 1);
+ window_.x += xShift_;
+ window_.y &= ~(patternSize_.height - 1);
+
+ /* width_ - xShift_ to make sure the window fits */
+ window_.width -= xShift_;
+ window_.width &= ~(patternSize_.width - 1);
+ window_.height &= ~(patternSize_.height - 1);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/swstats_cpu.h b/src/libcamera/software_isp/swstats_cpu.h
new file mode 100644
index 00000000..26a2f462
--- /dev/null
+++ b/src/libcamera/software_isp/swstats_cpu.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based software statistics implementation
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include <libcamera/base/signal.h>
+
+#include <libcamera/geometry.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/shared_mem_object.h"
+#include "libcamera/internal/software_isp/swisp_stats.h"
+
+namespace libcamera {
+
+class PixelFormat;
+struct StreamConfiguration;
+
+class SwStatsCpu
+{
+public:
+ SwStatsCpu();
+ ~SwStatsCpu() = default;
+
+ bool isValid() const { return sharedStats_.fd().isValid(); }
+
+ const SharedFD &getStatsFD() { return sharedStats_.fd(); }
+
+ const Size &patternSize() { return patternSize_; }
+
+ int configure(const StreamConfiguration &inputCfg);
+ void setWindow(const Rectangle &window);
+ void startFrame();
+ void finishFrame(uint32_t frame, uint32_t bufferId);
+
+ void processLine0(unsigned int y, const uint8_t *src[])
+ {
+ if ((y & ySkipMask_) || y < static_cast<unsigned int>(window_.y) ||
+ y >= (window_.y + window_.height))
+ return;
+
+ (this->*stats0_)(src);
+ }
+
+ void processLine2(unsigned int y, const uint8_t *src[])
+ {
+ if ((y & ySkipMask_) || y < static_cast<unsigned int>(window_.y) ||
+ y >= (window_.y + window_.height))
+ return;
+
+ (this->*stats2_)(src);
+ }
+
+ Signal<uint32_t, uint32_t> statsReady;
+
+private:
+ using statsProcessFn = void (SwStatsCpu::*)(const uint8_t *src[]);
+
+ int setupStandardBayerOrder(BayerFormat::Order order);
+ /* Bayer 8 bpp unpacked */
+ void statsBGGR8Line0(const uint8_t *src[]);
+ /* Bayer 10 bpp unpacked */
+ void statsBGGR10Line0(const uint8_t *src[]);
+ /* Bayer 12 bpp unpacked */
+ void statsBGGR12Line0(const uint8_t *src[]);
+ /* Bayer 10 bpp packed */
+ void statsBGGR10PLine0(const uint8_t *src[]);
+ void statsGBRG10PLine0(const uint8_t *src[]);
+
+ /* Variables set by configure(), used every line */
+ statsProcessFn stats0_;
+ statsProcessFn stats2_;
+ bool swapLines_;
+
+ unsigned int ySkipMask_;
+
+ Rectangle window_;
+
+ Size patternSize_;
+
+ unsigned int xShift_;
+
+ SharedMemObject<SwIspStats> sharedStats_;
+ SwIspStats stats_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/source_paths.cpp b/src/libcamera/source_paths.cpp
index 19689585..1af5386a 100644
--- a/src/libcamera/source_paths.cpp
+++ b/src/libcamera/source_paths.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * source_paths.cpp - Identify libcamera source and build paths
+ * Identify libcamera source and build paths
*/
#include "libcamera/internal/source_paths.h"
diff --git a/src/libcamera/stream.cpp b/src/libcamera/stream.cpp
index 686e693b..978d7275 100644
--- a/src/libcamera/stream.cpp
+++ b/src/libcamera/stream.cpp
@@ -2,22 +2,22 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * stream.cpp - Video stream for a Camera
+ * Video stream for a Camera
*/
#include <libcamera/stream.h>
#include <algorithm>
#include <array>
-#include <iomanip>
#include <limits.h>
-#include <sstream>
-
-#include <libcamera/request.h>
+#include <ostream>
+#include <string>
+#include <vector>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
+#include <libcamera/request.h>
/**
* \file stream.h
@@ -311,7 +311,8 @@ StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
* The stride value reports the number of bytes between the beginning of
* successive lines in an image buffer for this stream. The value is
* valid after successfully validating the configuration with a call to
- * CameraConfiguration::validate().
+ * CameraConfiguration::validate(). For compressed formats (such as MJPEG),
+ * this value will be zero.
*/
/**
@@ -391,7 +392,23 @@ StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
*/
std::string StreamConfiguration::toString() const
{
- return size.toString() + "-" + pixelFormat.toString();
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \brief Insert a text representation of a StreamConfiguration into an output
+ * stream
+ * \param[in] out The output stream
+ * \param[in] cfg The StreamConfiguration
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const StreamConfiguration &cfg)
+{
+ out << cfg.size << "-" << cfg.pixelFormat;
+ return out;
}
/**
@@ -418,9 +435,23 @@ std::string StreamConfiguration::toString() const
*/
/**
- * \typedef StreamRoles
- * \brief A vector of StreamRole
+ * \brief Insert a text representation of a StreamRole into an output stream
+ * \param[in] out The output stream
+ * \param[in] role The StreamRole
+ * \return The output stream \a out
*/
+std::ostream &operator<<(std::ostream &out, StreamRole role)
+{
+ static constexpr std::array<const char *, 4> names{
+ "Raw",
+ "StillCapture",
+ "VideoRecording",
+ "Viewfinder",
+ };
+
+ out << names[utils::to_underlying(role)];
+ return out;
+}
/**
* \class Stream
diff --git a/src/libcamera/sysfs.cpp b/src/libcamera/sysfs.cpp
index 44c3331b..3d9885b0 100644
--- a/src/libcamera/sysfs.cpp
+++ b/src/libcamera/sysfs.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * sysfs.cpp - Miscellaneous utility functions to access sysfs
+ * Miscellaneous utility functions to access sysfs
*/
#include "libcamera/internal/sysfs.h"
diff --git a/src/libcamera/tracepoints.cpp b/src/libcamera/tracepoints.cpp
index 0173b75a..90662d12 100644
--- a/src/libcamera/tracepoints.cpp
+++ b/src/libcamera/tracepoints.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * tracepoints.cpp - Tracepoints with lttng
+ * Tracepoints with lttng
*/
#define TRACEPOINT_CREATE_PROBES
#define TRACEPOINT_DEFINE
diff --git a/src/libcamera/transform.cpp b/src/libcamera/transform.cpp
index 99a043ba..9fe8b562 100644
--- a/src/libcamera/transform.cpp
+++ b/src/libcamera/transform.cpp
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * transform.cpp - 2D plane transforms.
+ * 2D plane transforms.
*/
#include <libcamera/transform.h>
+#include <libcamera/orientation.h>
+
/**
* \file transform.h
* \brief Enum to represent and manipulate 2D plane transforms
@@ -187,24 +189,24 @@ Input image | | goes to output image | |
*/
/**
- * \brief Compose two transforms together
- * \param[in] t1 The second transform
- * \param[in] t0 The first transform
+ * \brief Compose two transforms by applying \a t0 first then \a t1
+ * \param[in] t0 The first transform to apply
+ * \param[in] t1 The second transform to apply
*
- * Composing transforms follows the usual mathematical convention for
- * composing functions. That is, when performing `t1 * t0`, \a t0 is applied
- * first, and then \a t1.
- * For example, `Transpose * HFlip` performs `HFlip` first and then the
- * `Transpose` yielding `Rot270`, as shown below.
+ * Compose two transforms into a transform that is equivalent to first applying
+ * \a t0 and then applying \a t1. For example, `HFlip * Transpose` performs
+ * `HFlip` first and then the `Transpose` yielding `Rot270`, as shown below.
~~~
A-B B-A B-D
Input image | | -> HFLip -> | | -> Transpose -> | | = Rot270
C-D D-C A-C
~~~
- * Note that composition is generally non-commutative for Transforms,
- * and not the same as XOR-ing the underlying bit representations.
+ * Note that composition is generally non-commutative for Transforms, and not
+ * the same as XOR-ing the underlying bit representations.
+ *
+ * \return A Transform equivalent to applying \a t0 and then \a t1
*/
-Transform operator*(Transform t1, Transform t0)
+Transform operator*(Transform t0, Transform t1)
{
/*
* Reorder the operations so that we imagine doing t0's transpose
@@ -299,6 +301,91 @@ Transform transformFromRotation(int angle, bool *success)
return Transform::Identity;
}
+namespace {
+
+/**
+ * \brief Return the transform representing \a orientation
+ * \param[in] orientation The orientation to convert
+ * \return The transform corresponding to \a orientation
+ */
+Transform transformFromOrientation(const Orientation &orientation)
+{
+ switch (orientation) {
+ case Orientation::Rotate0:
+ return Transform::Identity;
+ case Orientation::Rotate0Mirror:
+ return Transform::HFlip;
+ case Orientation::Rotate180:
+ return Transform::Rot180;
+ case Orientation::Rotate180Mirror:
+ return Transform::VFlip;
+ case Orientation::Rotate90Mirror:
+ return Transform::Transpose;
+ case Orientation::Rotate90:
+ return Transform::Rot90;
+ case Orientation::Rotate270Mirror:
+ return Transform::Rot180Transpose;
+ case Orientation::Rotate270:
+ return Transform::Rot270;
+ }
+
+ return Transform::Identity;
+}
+
+} /* namespace */
+
+/**
+ * \brief Return the Transform that applied to \a o2 gives \a o1
+ * \param o1 The Orientation to obtain
+ * \param o2 The base Orientation
+ *
+ * This operation can be used to easily compute the Transform to apply to a
+ * base orientation \a o2 to get the desired orientation \a o1.
+ *
+ * \return A Transform that applied to \a o2 gives \a o1
+ */
+Transform operator/(const Orientation &o1, const Orientation &o2)
+{
+ Transform t1 = transformFromOrientation(o1);
+ Transform t2 = transformFromOrientation(o2);
+
+ return -t2 * t1;
+}
+
+/**
+ * \brief Apply the Transform \a t on the orientation \a o
+ * \param o The orientation
+ * \param t The transform to apply on \a o
+ * \return The Orientation resulting from applying \a t on \a o
+ */
+Orientation operator*(const Orientation &o, const Transform &t)
+{
+ /*
+ * Apply a Transform corresponding to the orientation first and
+ * then apply \a t to it.
+ */
+ switch (transformFromOrientation(o) * t) {
+ case Transform::Identity:
+ return Orientation::Rotate0;
+ case Transform::HFlip:
+ return Orientation::Rotate0Mirror;
+ case Transform::VFlip:
+ return Orientation::Rotate180Mirror;
+ case Transform::Rot180:
+ return Orientation::Rotate180;
+ case Transform::Transpose:
+ return Orientation::Rotate90Mirror;
+ case Transform::Rot270:
+ return Orientation::Rotate270;
+ case Transform::Rot90:
+ return Orientation::Rotate90;
+ case Transform::Rot180Transpose:
+ return Orientation::Rotate270Mirror;
+ }
+
+ return Orientation::Rotate0;
+}
+
/**
* \brief Return a character string describing the transform
* \param[in] t The transform to be described.
diff --git a/src/libcamera/v4l2_device.cpp b/src/libcamera/v4l2_device.cpp
index 3fc8438f..2f65a43a 100644
--- a/src/libcamera/v4l2_device.cpp
+++ b/src/libcamera/v4l2_device.cpp
@@ -2,15 +2,14 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_device.cpp - Common base for V4L2 video devices and subdevices
+ * Common base for V4L2 video devices and subdevices
*/
#include "libcamera/internal/v4l2_device.h"
#include <fcntl.h>
-#include <iomanip>
-#include <limits.h>
#include <map>
+#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
@@ -24,6 +23,7 @@
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
+#include "libcamera/internal/formats.h"
#include "libcamera/internal/sysfs.h"
/**
@@ -85,18 +85,18 @@ int V4L2Device::open(unsigned int flags)
return -EBUSY;
}
- UniqueFD fd(syscall(SYS_openat, AT_FDCWD, deviceNode_.c_str(), flags));
+ UniqueFD fd(syscall(SYS_openat, AT_FDCWD, deviceNode_.c_str(),
+ flags | O_CLOEXEC));
if (!fd.isValid()) {
int ret = -errno;
- LOG(V4L2, Error) << "Failed to open V4L2 device: "
+ LOG(V4L2, Error) << "Failed to open V4L2 device '"
+ << deviceNode_ << "': "
<< strerror(-ret);
return ret;
}
setFd(std::move(fd));
- listControls();
-
return 0;
}
@@ -127,6 +127,8 @@ int V4L2Device::setFd(UniqueFD fd)
fdEventNotifier_->activated.connect(this, &V4L2Device::eventAvailable);
fdEventNotifier_->setEnabled(false);
+ listControls();
+
return 0;
}
@@ -203,10 +205,29 @@ ControlList V4L2Device::getControls(const std::vector<uint32_t> &ids)
if (info.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD) {
ControlType type;
+ ControlValue &value = ctrl.second;
+ Span<uint8_t> data;
switch (info.type) {
case V4L2_CTRL_TYPE_U8:
type = ControlTypeByte;
+ value.reserve(type, true, info.elems);
+ data = value.data();
+ v4l2Ctrl.p_u8 = data.data();
+ break;
+
+ case V4L2_CTRL_TYPE_U16:
+ type = ControlTypeUnsigned16;
+ value.reserve(type, true, info.elems);
+ data = value.data();
+ v4l2Ctrl.p_u16 = reinterpret_cast<uint16_t *>(data.data());
+ break;
+
+ case V4L2_CTRL_TYPE_U32:
+ type = ControlTypeUnsigned32;
+ value.reserve(type, true, info.elems);
+ data = value.data();
+ v4l2Ctrl.p_u32 = reinterpret_cast<uint32_t *>(data.data());
break;
default:
@@ -216,11 +237,6 @@ ControlList V4L2Device::getControls(const std::vector<uint32_t> &ids)
return {};
}
- ControlValue &value = ctrl.second;
- value.reserve(type, true, info.elems);
- Span<uint8_t> data = value.data();
-
- v4l2Ctrl.p_u8 = data.data();
v4l2Ctrl.size = data.size();
}
}
@@ -242,7 +258,8 @@ ControlList V4L2Device::getControls(const std::vector<uint32_t> &ids)
}
/* A specific control failed. */
- LOG(V4L2, Error) << "Unable to read control " << errorIdx
+ const unsigned int id = v4l2Ctrls[errorIdx].id;
+ LOG(V4L2, Error) << "Unable to read control " << utils::hex(id)
<< ": " << strerror(-ret);
v4l2Ctrls.resize(errorIdx);
@@ -297,6 +314,30 @@ int V4L2Device::setControls(ControlList *ctrls)
/* Set the v4l2_ext_control value for the write operation. */
ControlValue &value = ctrl->second;
switch (iter->first->type()) {
+ case ControlTypeUnsigned16: {
+ if (value.isArray()) {
+ Span<uint8_t> data = value.data();
+ v4l2Ctrl.p_u16 = reinterpret_cast<uint16_t *>(data.data());
+ v4l2Ctrl.size = data.size();
+ } else {
+ v4l2Ctrl.value = value.get<uint16_t>();
+ }
+
+ break;
+ }
+
+ case ControlTypeUnsigned32: {
+ if (value.isArray()) {
+ Span<uint8_t> data = value.data();
+ v4l2Ctrl.p_u32 = reinterpret_cast<uint32_t *>(data.data());
+ v4l2Ctrl.size = data.size();
+ } else {
+ v4l2Ctrl.value = value.get<uint32_t>();
+ }
+
+ break;
+ }
+
case ControlTypeInteger32: {
if (value.isArray()) {
Span<uint8_t> data = value.data();
@@ -352,7 +393,8 @@ int V4L2Device::setControls(ControlList *ctrls)
}
/* A specific control failed. */
- LOG(V4L2, Error) << "Unable to set control " << errorIdx
+ const unsigned int id = v4l2Ctrls[errorIdx].id;
+ LOG(V4L2, Error) << "Unable to set control " << utils::hex(id)
<< ": " << strerror(-ret);
v4l2Ctrls.resize(errorIdx);
@@ -485,6 +527,12 @@ ControlType V4L2Device::v4l2CtrlType(uint32_t ctrlType)
case V4L2_CTRL_TYPE_BOOLEAN:
return ControlTypeBool;
+ case V4L2_CTRL_TYPE_U16:
+ return ControlTypeUnsigned16;
+
+ case V4L2_CTRL_TYPE_U32:
+ return ControlTypeUnsigned32;
+
case V4L2_CTRL_TYPE_INTEGER:
return ControlTypeInteger32;
@@ -517,7 +565,15 @@ std::unique_ptr<ControlId> V4L2Device::v4l2ControlId(const v4l2_query_ext_ctrl &
const std::string name(static_cast<const char *>(ctrl.name), len);
const ControlType type = v4l2CtrlType(ctrl.type);
- return std::make_unique<ControlId>(ctrl.id, name, type);
+ ControlId::DirectionFlags flags;
+ if (ctrl.flags & V4L2_CTRL_FLAG_READ_ONLY)
+ flags = ControlId::Direction::Out;
+ else if (ctrl.flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ flags = ControlId::Direction::In;
+ else
+ flags = ControlId::Direction::In | ControlId::Direction::Out;
+
+ return std::make_unique<ControlId>(ctrl.id, name, "v4l2", type, flags);
}
/**
@@ -525,7 +581,7 @@ std::unique_ptr<ControlId> V4L2Device::v4l2ControlId(const v4l2_query_ext_ctrl &
* \param[in] ctrl The v4l2_query_ext_ctrl that represents a V4L2 control
* \return A ControlInfo that represents \a ctrl
*/
-ControlInfo V4L2Device::v4l2ControlInfo(const v4l2_query_ext_ctrl &ctrl)
+std::optional<ControlInfo> V4L2Device::v4l2ControlInfo(const v4l2_query_ext_ctrl &ctrl)
{
switch (ctrl.type) {
case V4L2_CTRL_TYPE_U8:
@@ -533,6 +589,16 @@ ControlInfo V4L2Device::v4l2ControlInfo(const v4l2_query_ext_ctrl &ctrl)
static_cast<uint8_t>(ctrl.maximum),
static_cast<uint8_t>(ctrl.default_value));
+ case V4L2_CTRL_TYPE_U16:
+ return ControlInfo(static_cast<uint16_t>(ctrl.minimum),
+ static_cast<uint16_t>(ctrl.maximum),
+ static_cast<uint16_t>(ctrl.default_value));
+
+ case V4L2_CTRL_TYPE_U32:
+ return ControlInfo(static_cast<uint32_t>(ctrl.minimum),
+ static_cast<uint32_t>(ctrl.maximum),
+ static_cast<uint32_t>(ctrl.default_value));
+
case V4L2_CTRL_TYPE_BOOLEAN:
return ControlInfo(static_cast<bool>(ctrl.minimum),
static_cast<bool>(ctrl.maximum),
@@ -562,14 +628,14 @@ ControlInfo V4L2Device::v4l2ControlInfo(const v4l2_query_ext_ctrl &ctrl)
*
* \return A ControlInfo that represents \a ctrl
*/
-ControlInfo V4L2Device::v4l2MenuControlInfo(const struct v4l2_query_ext_ctrl &ctrl)
+std::optional<ControlInfo> V4L2Device::v4l2MenuControlInfo(const struct v4l2_query_ext_ctrl &ctrl)
{
std::vector<ControlValue> indices;
struct v4l2_querymenu menu = {};
menu.id = ctrl.id;
if (ctrl.minimum < 0)
- return ControlInfo();
+ return std::nullopt;
for (int32_t index = ctrl.minimum; index <= ctrl.maximum; ++index) {
menu.index = index;
@@ -579,6 +645,14 @@ ControlInfo V4L2Device::v4l2MenuControlInfo(const struct v4l2_query_ext_ctrl &ct
indices.push_back(index);
}
+ /*
+ * Some faulty UVC devices are known to return an empty menu control.
+ * Controls without a menu option can not be set, or read, so they are
+ * not exposed.
+ */
+ if (indices.size() == 0)
+ return std::nullopt;
+
return ControlInfo(indices,
ControlValue(static_cast<int32_t>(ctrl.default_value)));
}
@@ -611,6 +685,8 @@ void V4L2Device::listControls()
case V4L2_CTRL_TYPE_BITMASK:
case V4L2_CTRL_TYPE_INTEGER_MENU:
case V4L2_CTRL_TYPE_U8:
+ case V4L2_CTRL_TYPE_U16:
+ case V4L2_CTRL_TYPE_U32:
break;
/* \todo Support other control types. */
default:
@@ -627,7 +703,17 @@ void V4L2Device::listControls()
controlIdMap_[ctrl.id] = controlIds_.back().get();
controlInfo_.emplace(ctrl.id, ctrl);
- ctrls.emplace(controlIds_.back().get(), v4l2ControlInfo(ctrl));
+ std::optional<ControlInfo> info = v4l2ControlInfo(ctrl);
+
+ if (!info) {
+ LOG(V4L2, Error)
+ << "Control " << ctrl.name
+ << " cannot be registered";
+
+ continue;
+ }
+
+ ctrls.emplace(controlIds_.back().get(), *info);
}
controls_ = ControlInfoMap(std::move(ctrls), controlIdMap_);
@@ -666,7 +752,7 @@ void V4L2Device::updateControlInfo()
continue;
}
- info = v4l2ControlInfo(ctrl);
+ info = *v4l2ControlInfo(ctrl);
}
}
@@ -745,8 +831,12 @@ void V4L2Device::eventAvailable()
static const std::map<uint32_t, ColorSpace> v4l2ToColorSpace = {
{ V4L2_COLORSPACE_RAW, ColorSpace::Raw },
- { V4L2_COLORSPACE_JPEG, ColorSpace::Jpeg },
- { V4L2_COLORSPACE_SRGB, ColorSpace::Srgb },
+ { V4L2_COLORSPACE_SRGB, {
+ ColorSpace::Primaries::Rec709,
+ ColorSpace::TransferFunction::Srgb,
+ ColorSpace::YcbcrEncoding::Rec601,
+ ColorSpace::Range::Limited } },
+ { V4L2_COLORSPACE_JPEG, ColorSpace::Sycc },
{ V4L2_COLORSPACE_SMPTE170M, ColorSpace::Smpte170m },
{ V4L2_COLORSPACE_REC709, ColorSpace::Rec709 },
{ V4L2_COLORSPACE_BT2020, ColorSpace::Rec2020 },
@@ -771,8 +861,7 @@ static const std::map<uint32_t, ColorSpace::Range> v4l2ToRange = {
static const std::vector<std::pair<ColorSpace, v4l2_colorspace>> colorSpaceToV4l2 = {
{ ColorSpace::Raw, V4L2_COLORSPACE_RAW },
- { ColorSpace::Jpeg, V4L2_COLORSPACE_JPEG },
- { ColorSpace::Srgb, V4L2_COLORSPACE_SRGB },
+ { ColorSpace::Sycc, V4L2_COLORSPACE_JPEG },
{ ColorSpace::Smpte170m, V4L2_COLORSPACE_SMPTE170M },
{ ColorSpace::Rec709, V4L2_COLORSPACE_REC709 },
{ ColorSpace::Rec2020, V4L2_COLORSPACE_BT2020 },
@@ -792,6 +881,8 @@ static const std::map<ColorSpace::TransferFunction, v4l2_xfer_func> transferFunc
};
static const std::map<ColorSpace::YcbcrEncoding, v4l2_ycbcr_encoding> ycbcrEncodingToV4l2 = {
+ /* V4L2 has no "none" encoding. */
+ { ColorSpace::YcbcrEncoding::None, V4L2_YCBCR_ENC_DEFAULT },
{ ColorSpace::YcbcrEncoding::Rec601, V4L2_YCBCR_ENC_601 },
{ ColorSpace::YcbcrEncoding::Rec709, V4L2_YCBCR_ENC_709 },
{ ColorSpace::YcbcrEncoding::Rec2020, V4L2_YCBCR_ENC_BT2020 },
@@ -805,6 +896,7 @@ static const std::map<ColorSpace::Range, v4l2_quantization> rangeToV4l2 = {
/**
* \brief Convert the color space fields in a V4L2 format to a ColorSpace
* \param[in] v4l2Format A V4L2 format containing color space information
+ * \param[in] colourEncoding Type of colour encoding
*
* The colorspace, ycbcr_enc, xfer_func and quantization fields within a
* V4L2 format structure are converted to a corresponding ColorSpace.
@@ -816,7 +908,8 @@ static const std::map<ColorSpace::Range, v4l2_quantization> rangeToV4l2 = {
* \retval std::nullopt One or more V4L2 color space fields were not recognised
*/
template<typename T>
-std::optional<ColorSpace> V4L2Device::toColorSpace(const T &v4l2Format)
+std::optional<ColorSpace> V4L2Device::toColorSpace(const T &v4l2Format,
+ PixelFormatInfo::ColourEncoding colourEncoding)
{
auto itColor = v4l2ToColorSpace.find(v4l2Format.colorspace);
if (itColor == v4l2ToColorSpace.end())
@@ -839,6 +932,14 @@ std::optional<ColorSpace> V4L2Device::toColorSpace(const T &v4l2Format)
return std::nullopt;
colorSpace.ycbcrEncoding = itYcbcrEncoding->second;
+
+ /*
+ * V4L2 has no "none" encoding, override the value returned by
+ * the kernel for non-YUV formats as YCbCr encoding isn't
+ * applicable in that case.
+ */
+ if (colourEncoding != PixelFormatInfo::ColourEncodingYUV)
+ colorSpace.ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
}
if (v4l2Format.quantization != V4L2_QUANTIZATION_DEFAULT) {
@@ -847,14 +948,24 @@ std::optional<ColorSpace> V4L2Device::toColorSpace(const T &v4l2Format)
return std::nullopt;
colorSpace.range = itRange->second;
+
+ /*
+ * "Limited" quantization range is only meant for YUV formats.
+ * Override the range to "Full" for all other formats.
+ */
+ if (colourEncoding != PixelFormatInfo::ColourEncodingYUV)
+ colorSpace.range = ColorSpace::Range::Full;
}
return colorSpace;
}
-template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_pix_format &);
-template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_pix_format_mplane &);
-template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_mbus_framefmt &);
+template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_pix_format &,
+ PixelFormatInfo::ColourEncoding);
+template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_pix_format_mplane &,
+ PixelFormatInfo::ColourEncoding);
+template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_mbus_framefmt &,
+ PixelFormatInfo::ColourEncoding);
/**
* \brief Fill in the color space fields of a V4L2 format from a ColorSpace
diff --git a/src/libcamera/v4l2_pixelformat.cpp b/src/libcamera/v4l2_pixelformat.cpp
index 0e0da3f0..e8b3eb9c 100644
--- a/src/libcamera/v4l2_pixelformat.cpp
+++ b/src/libcamera/v4l2_pixelformat.cpp
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * v4l2_pixelformat.cpp - V4L2 Pixel Format
+ * V4L2 Pixel Format
*/
#include "libcamera/internal/v4l2_pixelformat.h"
@@ -61,6 +61,8 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::BGRX8888, "32-bit XRGB 8-8-8-8" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_RGBX32),
{ formats::XBGR8888, "32-bit RGBX 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_BGRX32),
+ { formats::RGBX8888, "32-bit XBGR 8-8-8-8" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_RGBA32),
{ formats::ABGR8888, "32-bit RGBA 8-8-8-8" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_ABGR32),
@@ -69,6 +71,10 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::BGRA8888, "32-bit ARGB 8-8-8-8" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_BGRA32),
{ formats::RGBA8888, "32-bit ABGR 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_RGB48),
+ { formats::BGR161616, "48-bit RGB 16-16-16" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_BGR48),
+ { formats::RGB161616, "48-bit BGR 16-16-16" } },
/* YUV packed formats. */
{ V4L2PixelFormat(V4L2_PIX_FMT_YUYV),
@@ -79,6 +85,10 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::UYVY, "UYVY 4:2:2" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_VYUY),
{ formats::VYUY, "VYUY 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUVA32),
+ { formats::AVUY8888, "32-bit YUVA 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUVX32),
+ { formats::XVUY8888, "32-bit YUVX 8-8-8-8" } },
/* YUV planar formats. */
{ V4L2PixelFormat(V4L2_PIX_FMT_NV16),
@@ -113,14 +123,26 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::YUV422, "Planar YUV 4:2:2" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_YUV422M),
{ formats::YUV422, "Planar YUV 4:2:2 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YVU422M),
+ { formats::YVU422, "Planar YVU 4:2:2 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUV444M),
+ { formats::YUV444, "Planar YUV 4:4:4 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YVU444M),
+ { formats::YVU444, "Planar YVU 4:4:4 (N-C)" } },
/* Greyscale formats. */
{ V4L2PixelFormat(V4L2_PIX_FMT_GREY),
{ formats::R8, "8-bit Greyscale" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_Y10),
{ formats::R10, "10-bit Greyscale" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y10P),
+ { formats::R10_CSI2P, "10-bit Greyscale Packed" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_Y12),
{ formats::R12, "12-bit Greyscale" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y12P),
+ { formats::R12_CSI2P, "12-bit Greyscale Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y16),
+ { formats::R16, "16-bit Greyscale" } },
/* Bayer formats. */
{ V4L2PixelFormat(V4L2_PIX_FMT_SBGGR8),
@@ -163,6 +185,22 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::SGRBG12_CSI2P, "12-bit Bayer GRGR/BGBG Packed" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P),
{ formats::SRGGB12_CSI2P, "12-bit Bayer RGRG/GBGB Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14),
+ { formats::SBGGR14, "14-bit Bayer BGBG/GRGR" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14),
+ { formats::SGBRG14, "14-bit Bayer GBGB/RGRG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14),
+ { formats::SGRBG14, "14-bit Bayer GRGR/BGBG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14),
+ { formats::SRGGB14, "14-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14P),
+ { formats::SBGGR14_CSI2P, "14-bit Bayer BGBG/GRGR Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14P),
+ { formats::SGBRG14_CSI2P, "14-bit Bayer GBGB/RGRG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14P),
+ { formats::SGRBG14_CSI2P, "14-bit Bayer GRGR/BGBG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14P),
+ { formats::SRGGB14_CSI2P, "14-bit Bayer RGRG/GBGB Packed" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16),
{ formats::SBGGR16, "16-bit Bayer BGBG/GRGR" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_SGBRG16),
@@ -171,10 +209,22 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::SGRBG16, "16-bit Bayer GRGR/BGBG" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16),
{ formats::SRGGB16, "16-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR),
+ { formats::BGGR_PISP_COMP1, "16-bit Bayer BGBG/GRGR PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG),
+ { formats::GBRG_PISP_COMP1, "16-bit Bayer GBGB/RGRG PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG),
+ { formats::GRBG_PISP_COMP1, "16-bit Bayer GRGR/BGBG PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB),
+ { formats::RGGB_PISP_COMP1, "16-bit Bayer RGRG/GBGB PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO),
+ { formats::MONO_PISP_COMP1, "16-bit Mono PiSP Compress Mode 1" } },
/* Compressed formats. */
{ V4L2PixelFormat(V4L2_PIX_FMT_MJPEG),
{ formats::MJPEG, "Motion-JPEG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_JPEG),
+ { formats::MJPEG, "JPEG JFIF" } },
};
} /* namespace */
@@ -278,15 +328,23 @@ const char *V4L2PixelFormat::description() const
/**
* \brief Convert the V4L2 pixel format to the corresponding PixelFormat
+ * \param[in] warn When true, log a warning message if the V4L2 pixel format
+ * isn't known
+ *
+ * Users of this function might try to convert a V4L2PixelFormat to a
+ * PixelFormat just to check if the format is supported or not. In that case,
+ * they can suppress the warning message by setting the \a warn argument to
+ * false to not pollute the log with unnecessary messages.
+ *
* \return The PixelFormat corresponding to the V4L2 pixel format
*/
-PixelFormat V4L2PixelFormat::toPixelFormat() const
+PixelFormat V4L2PixelFormat::toPixelFormat(bool warn) const
{
const auto iter = vpf2pf.find(*this);
if (iter == vpf2pf.end()) {
- LOG(V4L2, Warning)
- << "Unsupported V4L2 pixel format "
- << toString();
+ if (warn)
+ LOG(V4L2, Warning) << "Unsupported V4L2 pixel format "
+ << toString();
return PixelFormat();
}
@@ -294,26 +352,71 @@ PixelFormat V4L2PixelFormat::toPixelFormat() const
}
/**
- * \brief Convert \a pixelFormat to its corresponding V4L2PixelFormat
+ * \brief Retrieve the list of V4L2PixelFormat associated with \a pixelFormat
* \param[in] pixelFormat The PixelFormat to convert
- * \param[in] multiplanar V4L2 Multiplanar API support flag
*
- * Multiple V4L2 formats may exist for one PixelFormat when the format uses
- * multiple planes, as V4L2 defines separate 4CCs for contiguous and separate
- * planes formats. Set the \a multiplanar parameter to false to select a format
- * with contiguous planes, or to true to select a format with non-contiguous
- * planes.
+ * Multiple V4L2 formats may exist for one PixelFormat as V4L2 defines separate
+ * 4CCs for contiguous and non-contiguous versions of the same image format.
*
- * \return The V4L2PixelFormat corresponding to \a pixelFormat
+ * \return The list of V4L2PixelFormat corresponding to \a pixelFormat
*/
-V4L2PixelFormat V4L2PixelFormat::fromPixelFormat(const PixelFormat &pixelFormat,
- bool multiplanar)
+const std::vector<V4L2PixelFormat> &
+V4L2PixelFormat::fromPixelFormat(const PixelFormat &pixelFormat)
{
+ static const std::vector<V4L2PixelFormat> empty;
+
const PixelFormatInfo &info = PixelFormatInfo::info(pixelFormat);
if (!info.isValid())
- return V4L2PixelFormat();
+ return empty;
- return multiplanar ? info.v4l2Formats.multi : info.v4l2Formats.single;
+ return info.v4l2Formats;
+}
+
+/**
+ * \brief Test if a V4L2PixelFormat is one of the line based generic metadata
+ * formats
+ *
+ * A limited number of metadata formats, the ones that represents generic
+ * line-based metadata buffers, need to have their width, height and
+ * bytesperline set by userspace.
+ *
+ * This function tests if the current V4L2PixelFormat is one of those.
+ *
+ * Note: It would have been nicer to store this information in a
+ * V4L2PixelFormat::Info instance, but as metadata format are not exposed to
+ * applications, there are no PixelFormat and DRM fourcc codes associated to
+ * them.
+ *
+ * \return True if the V4L2PixelFormat() is a generic line based format, false
+ * otherwise
+ */
+bool V4L2PixelFormat::isGenericLineBasedMetadata() const
+{
+ switch (fourcc_) {
+ case V4L2_META_FMT_GENERIC_8:
+ case V4L2_META_FMT_GENERIC_CSI2_10:
+ case V4L2_META_FMT_GENERIC_CSI2_12:
+ case V4L2_META_FMT_GENERIC_CSI2_14:
+ case V4L2_META_FMT_GENERIC_CSI2_16:
+ case V4L2_META_FMT_GENERIC_CSI2_20:
+ case V4L2_META_FMT_GENERIC_CSI2_24:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * \brief Insert a text representation of a V4L2PixelFormat into an output
+ * stream
+ * \param[in] out The output stream
+ * \param[in] f The V4L2PixelFormat
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2PixelFormat &f)
+{
+ out << f.toString();
+ return out;
}
} /* namespace libcamera */
diff --git a/src/libcamera/v4l2_subdevice.cpp b/src/libcamera/v4l2_subdevice.cpp
index fa216e85..7a064d87 100644
--- a/src/libcamera/v4l2_subdevice.cpp
+++ b/src/libcamera/v4l2_subdevice.cpp
@@ -2,13 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_subdevice.cpp - V4L2 Subdevice
+ * V4L2 Subdevice
*/
#include "libcamera/internal/v4l2_subdevice.h"
#include <fcntl.h>
-#include <iomanip>
#include <regex>
#include <sstream>
#include <string.h>
@@ -18,11 +17,12 @@
#include <linux/media-bus-format.h>
#include <linux/v4l2-subdev.h>
-#include <libcamera/geometry.h>
-
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
+#include <libcamera/geometry.h>
+
+#include "libcamera/internal/formats.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/media_object.h"
@@ -35,103 +35,832 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(V4L2)
-namespace {
-
-/*
- * \struct V4L2SubdeviceFormatInfo
+/**
+ * \class MediaBusFormatInfo
* \brief Information about media bus formats
- * \param bitsPerPixel Bits per pixel
- * \param name Name of MBUS format
+ *
+ * The MediaBusFormatInfo class groups together information describing a media
+ * bus format. It facilitates handling of media bus formats by providing data
+ * commonly used in pipeline handlers.
+ *
+ * \var MediaBusFormatInfo::name
+ * \brief The format name as a human-readable string, used as the text
+ * representation of the format
+ *
+ * \var MediaBusFormatInfo::code
+ * \brief The media bus format code described by this instance (MEDIA_BUS_FMT_*)
+ *
+ * \var MediaBusFormatInfo::type
+ * \brief The media bus format type
+ *
+ * \var MediaBusFormatInfo::bitsPerPixel
+ * \brief The average number of bits per pixel
+ *
+ * The number of bits per pixel averages the total number of bits for all
+ * colour components over the whole image, excluding any padding bits or
+ * padding pixels.
+ *
+ * For formats that transmit multiple or fractional pixels per sample, the
+ * value will differ from the bus width.
+ *
+ * Formats that don't have a fixed number of bits per pixel, such as compressed
+ * formats, or device-specific embedded data formats, report 0 in this field.
+ *
+ * \var MediaBusFormatInfo::colourEncoding
+ * \brief The colour encoding type
+ *
+ * This field is valid for Type::Image formats only.
*/
-struct V4L2SubdeviceFormatInfo {
- unsigned int bitsPerPixel;
- const char *name;
-};
-/*
- * \var formatInfoMap
- * \brief A map that associates V4L2SubdeviceFormatInfo struct to V4L2 media
- * bus codes
- */
-const std::map<uint32_t, V4L2SubdeviceFormatInfo> formatInfoMap = {
- { V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE, { 16, "RGB444_2X8_PADHI_BE" } },
- { V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE, { 16, "RGB444_2X8_PADHI_LE" } },
- { V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, { 16, "RGB555_2X8_PADHI_BE" } },
- { V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, { 16, "RGB555_2X8_PADHI_LE" } },
- { V4L2_MBUS_FMT_BGR565_2X8_BE, { 16, "BGR565_2X8_BE" } },
- { V4L2_MBUS_FMT_BGR565_2X8_LE, { 16, "BGR565_2X8_LE" } },
- { V4L2_MBUS_FMT_RGB565_2X8_BE, { 16, "RGB565_2X8_BE" } },
- { V4L2_MBUS_FMT_RGB565_2X8_LE, { 16, "RGB565_2X8_LE" } },
- { V4L2_MBUS_FMT_RGB666_1X18, { 18, "RGB666_1X18" } },
- { V4L2_MBUS_FMT_RGB888_1X24, { 24, "RGB888_1X24" } },
- { V4L2_MBUS_FMT_RGB888_2X12_BE, { 24, "RGB888_2X12_BE" } },
- { V4L2_MBUS_FMT_RGB888_2X12_LE, { 24, "RGB888_2X12_LE" } },
- { V4L2_MBUS_FMT_ARGB8888_1X32, { 32, "ARGB8888_1X32" } },
- { V4L2_MBUS_FMT_Y8_1X8, { 8, "Y8_1X8" } },
- { V4L2_MBUS_FMT_UV8_1X8, { 8, "UV8_1X8" } },
- { V4L2_MBUS_FMT_UYVY8_1_5X8, { 12, "UYVY8_1_5X8" } },
- { V4L2_MBUS_FMT_VYUY8_1_5X8, { 12, "VYUY8_1_5X8" } },
- { V4L2_MBUS_FMT_YUYV8_1_5X8, { 12, "YUYV8_1_5X8" } },
- { V4L2_MBUS_FMT_YVYU8_1_5X8, { 12, "YVYU8_1_5X8" } },
- { V4L2_MBUS_FMT_UYVY8_2X8, { 16, "UYVY8_2X8" } },
- { V4L2_MBUS_FMT_VYUY8_2X8, { 16, "VYUY8_2X8" } },
- { V4L2_MBUS_FMT_YUYV8_2X8, { 16, "YUYV8_2X8" } },
- { V4L2_MBUS_FMT_YVYU8_2X8, { 16, "YVYU8_2X8" } },
- { V4L2_MBUS_FMT_Y10_1X10, { 10, "Y10_1X10" } },
- { V4L2_MBUS_FMT_UYVY10_2X10, { 20, "UYVY10_2X10" } },
- { V4L2_MBUS_FMT_VYUY10_2X10, { 20, "VYUY10_2X10" } },
- { V4L2_MBUS_FMT_YUYV10_2X10, { 20, "YUYV10_2X10" } },
- { V4L2_MBUS_FMT_YVYU10_2X10, { 20, "YVYU10_2X10" } },
- { V4L2_MBUS_FMT_Y12_1X12, { 12, "Y12_1X12" } },
- { V4L2_MBUS_FMT_UYVY8_1X16, { 16, "UYVY8_1X16" } },
- { V4L2_MBUS_FMT_VYUY8_1X16, { 16, "VYUY8_1X16" } },
- { V4L2_MBUS_FMT_YUYV8_1X16, { 16, "YUYV8_1X16" } },
- { V4L2_MBUS_FMT_YVYU8_1X16, { 16, "YVYU8_1X16" } },
- { V4L2_MBUS_FMT_YDYUYDYV8_1X16, { 16, "YDYUYDYV8_1X16" } },
- { V4L2_MBUS_FMT_UYVY10_1X20, { 20, "UYVY10_1X20" } },
- { V4L2_MBUS_FMT_VYUY10_1X20, { 20, "VYUY10_1X20" } },
- { V4L2_MBUS_FMT_YUYV10_1X20, { 20, "YUYV10_1X20" } },
- { V4L2_MBUS_FMT_YVYU10_1X20, { 20, "YVYU10_1X20" } },
- { V4L2_MBUS_FMT_YUV10_1X30, { 30, "YUV10_1X30" } },
- { V4L2_MBUS_FMT_AYUV8_1X32, { 32, "AYUV8_1X32" } },
- { V4L2_MBUS_FMT_UYVY12_2X12, { 24, "UYVY12_2X12" } },
- { V4L2_MBUS_FMT_VYUY12_2X12, { 24, "VYUY12_2X12" } },
- { V4L2_MBUS_FMT_YUYV12_2X12, { 24, "YUYV12_2X12" } },
- { V4L2_MBUS_FMT_YVYU12_2X12, { 24, "YVYU12_2X12" } },
- { V4L2_MBUS_FMT_UYVY12_1X24, { 24, "UYVY12_1X24" } },
- { V4L2_MBUS_FMT_VYUY12_1X24, { 24, "VYUY12_1X24" } },
- { V4L2_MBUS_FMT_YUYV12_1X24, { 24, "YUYV12_1X24" } },
- { V4L2_MBUS_FMT_YVYU12_1X24, { 24, "YVYU12_1X24" } },
- { V4L2_MBUS_FMT_SBGGR8_1X8, { 8, "SBGGR8_1X8" } },
- { V4L2_MBUS_FMT_SGBRG8_1X8, { 8, "SGBRG8_1X8" } },
- { V4L2_MBUS_FMT_SGRBG8_1X8, { 8, "SGRBG8_1X8" } },
- { V4L2_MBUS_FMT_SRGGB8_1X8, { 8, "SRGGB8_1X8" } },
- { V4L2_MBUS_FMT_SBGGR10_ALAW8_1X8, { 8, "SBGGR10_ALAW8_1X8" } },
- { V4L2_MBUS_FMT_SGBRG10_ALAW8_1X8, { 8, "SGBRG10_ALAW8_1X8" } },
- { V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8, { 8, "SGRBG10_ALAW8_1X8" } },
- { V4L2_MBUS_FMT_SRGGB10_ALAW8_1X8, { 8, "SRGGB10_ALAW8_1X8" } },
- { V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, { 8, "SBGGR10_DPCM8_1X8" } },
- { V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, { 8, "SGBRG10_DPCM8_1X8" } },
- { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, { 8, "SGRBG10_DPCM8_1X8" } },
- { V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, { 8, "SRGGB10_DPCM8_1X8" } },
- { V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE, { 16, "SBGGR10_2X8_PADHI_BE" } },
- { V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, { 16, "SBGGR10_2X8_PADHI_LE" } },
- { V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE, { 16, "SBGGR10_2X8_PADLO_BE" } },
- { V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE, { 16, "SBGGR10_2X8_PADLO_LE" } },
- { V4L2_MBUS_FMT_SBGGR10_1X10, { 10, "SBGGR10_1X10" } },
- { V4L2_MBUS_FMT_SGBRG10_1X10, { 10, "SGBRG10_1X10" } },
- { V4L2_MBUS_FMT_SGRBG10_1X10, { 10, "SGRBG10_1X10" } },
- { V4L2_MBUS_FMT_SRGGB10_1X10, { 10, "SRGGB10_1X10" } },
- { V4L2_MBUS_FMT_SBGGR12_1X12, { 12, "SBGGR12_1X12" } },
- { V4L2_MBUS_FMT_SGBRG12_1X12, { 12, "SGBRG12_1X12" } },
- { V4L2_MBUS_FMT_SGRBG12_1X12, { 12, "SGRBG12_1X12" } },
- { V4L2_MBUS_FMT_SRGGB12_1X12, { 12, "SRGGB12_1X12" } },
- { V4L2_MBUS_FMT_AHSV8888_1X32, { 32, "AHSV8888_1X32" } },
+/**
+ * \enum MediaBusFormatInfo::Type
+ * \brief The format type
+ *
+ * \var MediaBusFormatInfo::Type::Image
+ * \brief The format describes image data
+ *
+ * \var MediaBusFormatInfo::Type::Metadata
+ * \brief The format describes generic metadata
+ *
+ * \var MediaBusFormatInfo::Type::EmbeddedData
+ * \brief The format describes sensor embedded data
+ */
+
+namespace {
+
+const std::map<uint32_t, MediaBusFormatInfo> mediaBusFormatInfo{
+ /* This table is sorted to match the order in linux/media-bus-format.h */
+ { MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE, {
+ .name = "RGB444_2X8_PADHI_BE",
+ .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE, {
+ .name = "RGB444_2X8_PADHI_LE",
+ .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, {
+ .name = "RGB555_2X8_PADHI_BE",
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, {
+ .name = "RGB555_2X8_PADHI_LE",
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB565_1X16, {
+ .name = "RGB565_1X16",
+ .code = MEDIA_BUS_FMT_RGB565_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_BGR565_2X8_BE, {
+ .name = "BGR565_2X8_BE",
+ .code = MEDIA_BUS_FMT_BGR565_2X8_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_BGR565_2X8_LE, {
+ .name = "BGR565_2X8_LE",
+ .code = MEDIA_BUS_FMT_BGR565_2X8_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB565_2X8_BE, {
+ .name = "RGB565_2X8_BE",
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB565_2X8_LE, {
+ .name = "RGB565_2X8_LE",
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB666_1X18, {
+ .name = "RGB666_1X18",
+ .code = MEDIA_BUS_FMT_RGB666_1X18,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 18,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_BGR888_1X24, {
+ .name = "BGR888_1X24",
+ .code = MEDIA_BUS_FMT_BGR888_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB888_1X24, {
+ .name = "RGB888_1X24",
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB888_2X12_BE, {
+ .name = "RGB888_2X12_BE",
+ .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB888_2X12_LE, {
+ .name = "RGB888_2X12_LE",
+ .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB121212_1X36, {
+ .name = "RGB121212_1X36",
+ .code = MEDIA_BUS_FMT_RGB121212_1X36,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 36,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB202020_1X60, {
+ .name = "RGB202020_1X60",
+ .code = MEDIA_BUS_FMT_RGB202020_1X60,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 60,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_ARGB8888_1X32, {
+ .name = "ARGB8888_1X32",
+ .code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_Y8_1X8, {
+ .name = "Y8_1X8",
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UV8_1X8, {
+ .name = "UV8_1X8",
+ .code = MEDIA_BUS_FMT_UV8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY8_1_5X8, {
+ .name = "UYVY8_1_5X8",
+ .code = MEDIA_BUS_FMT_UYVY8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY8_1_5X8, {
+ .name = "VYUY8_1_5X8",
+ .code = MEDIA_BUS_FMT_VYUY8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV8_1_5X8, {
+ .name = "YUYV8_1_5X8",
+ .code = MEDIA_BUS_FMT_YUYV8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU8_1_5X8, {
+ .name = "YVYU8_1_5X8",
+ .code = MEDIA_BUS_FMT_YVYU8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY8_2X8, {
+ .name = "UYVY8_2X8",
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, {
+ .name = "VYUY8_2X8",
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, {
+ .name = "YUYV8_2X8",
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, {
+ .name = "YVYU8_2X8",
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_Y10_1X10, {
+ .name = "Y10_1X10",
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY10_2X10, {
+ .name = "UYVY10_2X10",
+ .code = MEDIA_BUS_FMT_UYVY10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY10_2X10, {
+ .name = "VYUY10_2X10",
+ .code = MEDIA_BUS_FMT_VYUY10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV10_2X10, {
+ .name = "YUYV10_2X10",
+ .code = MEDIA_BUS_FMT_YUYV10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU10_2X10, {
+ .name = "YVYU10_2X10",
+ .code = MEDIA_BUS_FMT_YVYU10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_Y12_1X12, {
+ .name = "Y12_1X12",
+ .code = MEDIA_BUS_FMT_Y12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_Y16_1X16, {
+ .name = "Y16_1X16",
+ .code = MEDIA_BUS_FMT_Y16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY8_1X16, {
+ .name = "UYVY8_1X16",
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY8_1X16, {
+ .name = "VYUY8_1X16",
+ .code = MEDIA_BUS_FMT_VYUY8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV8_1X16, {
+ .name = "YUYV8_1X16",
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU8_1X16, {
+ .name = "YVYU8_1X16",
+ .code = MEDIA_BUS_FMT_YVYU8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YDYUYDYV8_1X16, {
+ .name = "YDYUYDYV8_1X16",
+ .code = MEDIA_BUS_FMT_YDYUYDYV8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY10_1X20, {
+ .name = "UYVY10_1X20",
+ .code = MEDIA_BUS_FMT_UYVY10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY10_1X20, {
+ .name = "VYUY10_1X20",
+ .code = MEDIA_BUS_FMT_VYUY10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV10_1X20, {
+ .name = "YUYV10_1X20",
+ .code = MEDIA_BUS_FMT_YUYV10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU10_1X20, {
+ .name = "YVYU10_1X20",
+ .code = MEDIA_BUS_FMT_YVYU10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUV8_1X24, {
+ .name = "YUV8_1X24",
+ .code = MEDIA_BUS_FMT_YUV8_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUV10_1X30, {
+ .name = "YUV10_1X30",
+ .code = MEDIA_BUS_FMT_YUV10_1X30,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 30,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_AYUV8_1X32, {
+ .name = "AYUV8_1X32",
+ .code = MEDIA_BUS_FMT_AYUV8_1X32,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY12_2X12, {
+ .name = "UYVY12_2X12",
+ .code = MEDIA_BUS_FMT_UYVY12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY12_2X12, {
+ .name = "VYUY12_2X12",
+ .code = MEDIA_BUS_FMT_VYUY12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV12_2X12, {
+ .name = "YUYV12_2X12",
+ .code = MEDIA_BUS_FMT_YUYV12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU12_2X12, {
+ .name = "YVYU12_2X12",
+ .code = MEDIA_BUS_FMT_YVYU12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY12_1X24, {
+ .name = "UYVY12_1X24",
+ .code = MEDIA_BUS_FMT_UYVY12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY12_1X24, {
+ .name = "VYUY12_1X24",
+ .code = MEDIA_BUS_FMT_VYUY12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV12_1X24, {
+ .name = "YUYV12_1X24",
+ .code = MEDIA_BUS_FMT_YUYV12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU12_1X24, {
+ .name = "YVYU12_1X24",
+ .code = MEDIA_BUS_FMT_YVYU12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, {
+ .name = "SBGGR8_1X8",
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, {
+ .name = "SGBRG8_1X8",
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, {
+ .name = "SGRBG8_1X8",
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, {
+ .name = "SRGGB8_1X8",
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8, {
+ .name = "SBGGR10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8, {
+ .name = "SGBRG10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8, {
+ .name = "SGRBG10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8, {
+ .name = "SRGGB10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, {
+ .name = "SBGGR10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, {
+ .name = "SGBRG10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, {
+ .name = "SGRBG10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, {
+ .name = "SRGGB10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, {
+ .name = "SBGGR10_2X8_PADHI_BE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, {
+ .name = "SBGGR10_2X8_PADHI_LE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, {
+ .name = "SBGGR10_2X8_PADLO_BE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, {
+ .name = "SBGGR10_2X8_PADLO_LE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, {
+ .name = "SBGGR10_1X10",
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, {
+ .name = "SGBRG10_1X10",
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, {
+ .name = "SGRBG10_1X10",
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, {
+ .name = "SRGGB10_1X10",
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, {
+ .name = "SBGGR12_1X12",
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, {
+ .name = "SGBRG12_1X12",
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, {
+ .name = "SGRBG12_1X12",
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, {
+ .name = "SRGGB12_1X12",
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, {
+ .name = "SBGGR14_1X14",
+ .code = MEDIA_BUS_FMT_SBGGR14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, {
+ .name = "SGBRG14_1X14",
+ .code = MEDIA_BUS_FMT_SGBRG14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, {
+ .name = "SGRBG14_1X14",
+ .code = MEDIA_BUS_FMT_SGRBG14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, {
+ .name = "SRGGB14_1X14",
+ .code = MEDIA_BUS_FMT_SRGGB14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR16_1X16, {
+ .name = "SBGGR16_1X16",
+ .code = MEDIA_BUS_FMT_SBGGR16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGBRG16_1X16, {
+ .name = "SGBRG16_1X16",
+ .code = MEDIA_BUS_FMT_SGBRG16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGRBG16_1X16, {
+ .name = "SGRBG16_1X16",
+ .code = MEDIA_BUS_FMT_SGRBG16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SRGGB16_1X16, {
+ .name = "SRGGB16_1X16",
+ .code = MEDIA_BUS_FMT_SRGGB16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SBGGR20_1X20, {
+ .name = "SBGGR20_1X20",
+ .code = MEDIA_BUS_FMT_SBGGR20_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGBRG20_1X20, {
+ .name = "SGBRG20_1X20",
+ .code = MEDIA_BUS_FMT_SGBRG20_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGRBG20_1X20, {
+ .name = "SGRBG20_1X20",
+ .code = MEDIA_BUS_FMT_SGRBG20_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SRGGB20_1X20, {
+ .name = "SRGGB20_1X20",
+ .code = MEDIA_BUS_FMT_SRGGB20_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ /* \todo Clarify colour encoding for HSV formats */
+ { MEDIA_BUS_FMT_AHSV8888_1X32, {
+ .name = "AHSV8888_1X32",
+ .code = MEDIA_BUS_FMT_AHSV8888_1X32,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_JPEG_1X8, {
+ .name = "JPEG_1X8",
+ .code = MEDIA_BUS_FMT_JPEG_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_METADATA_FIXED, {
+ .name = "METADATA_FIXED",
+ .code = MEDIA_BUS_FMT_METADATA_FIXED,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_8, {
+ .name = "META_8",
+ .code = MEDIA_BUS_FMT_META_8,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_10, {
+ .name = "META_10",
+ .code = MEDIA_BUS_FMT_META_10,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_12, {
+ .name = "META_12",
+ .code = MEDIA_BUS_FMT_META_12,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_14, {
+ .name = "META_14",
+ .code = MEDIA_BUS_FMT_META_14,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_16, {
+ .name = "META_16",
+ .code = MEDIA_BUS_FMT_META_16,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_20, {
+ .name = "META_20",
+ .code = MEDIA_BUS_FMT_META_20,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_24, {
+ .name = "META_24",
+ .code = MEDIA_BUS_FMT_META_24,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_CCS_EMBEDDED, {
+ .name = "CCS_EMBEDDED",
+ .code = MEDIA_BUS_FMT_CCS_EMBEDDED,
+ .type = MediaBusFormatInfo::Type::EmbeddedData,
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_OV2740_EMBEDDED, {
+ .name = "OV2740_EMBEDDED",
+ .code = MEDIA_BUS_FMT_CCS_EMBEDDED,
+ .type = MediaBusFormatInfo::Type::EmbeddedData,
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
};
} /* namespace */
/**
+ * \fn bool MediaBusFormatInfo::isValid() const
+ * \brief Check if the media bus format info is valid
+ * \return True if the media bus format info is valid, false otherwise
+ */
+
+/**
+ * \brief Retrieve information about a media bus format
+ * \param[in] code The media bus format code
+ * \return The MediaBusFormatInfo describing the \a code if known, or an invalid
+ * MediaBusFormatInfo otherwise
+ */
+const MediaBusFormatInfo &MediaBusFormatInfo::info(uint32_t code)
+{
+ static const MediaBusFormatInfo invalid{};
+
+ const auto it = mediaBusFormatInfo.find(code);
+ if (it == mediaBusFormatInfo.end()) {
+ LOG(V4L2, Warning)
+ << "Unsupported media bus format "
+ << utils::hex(code, 4);
+ return invalid;
+ }
+
+ return it->second;
+}
+
+/**
+ * \struct V4L2SubdeviceCapability
+ * \brief struct v4l2_subdev_capability object wrapper and helpers
+ *
+ * The V4L2SubdeviceCapability structure manages the information returned by the
+ * VIDIOC_SUBDEV_QUERYCAP ioctl.
+ */
+
+/**
+ * \fn V4L2SubdeviceCapability::isReadOnly()
+ * \brief Retrieve if a subdevice is registered as read-only
+ *
+ * A V4L2 subdevice is registered as read-only if V4L2_SUBDEV_CAP_RO_SUBDEV
+ * is listed as part of its capabilities.
+ *
+ * \return True if the subdevice is registered as read-only, false otherwise
+ */
+
+/**
+ * \fn V4L2SubdeviceCapability::hasStreams()
+ * \brief Retrieve if a subdevice supports the V4L2 streams API
+ * \return True if the subdevice supports the streams API, false otherwise
+ */
+
+/**
* \struct V4L2SubdeviceFormat
* \brief The V4L2 sub-device image format and sizes
*
@@ -160,7 +889,7 @@ const std::map<uint32_t, V4L2SubdeviceFormatInfo> formatInfoMap = {
*/
/**
- * \var V4L2SubdeviceFormat::mbus_code
+ * \var V4L2SubdeviceFormat::code
* \brief The image format bus code
*/
@@ -190,34 +919,31 @@ const std::map<uint32_t, V4L2SubdeviceFormatInfo> formatInfoMap = {
*/
const std::string V4L2SubdeviceFormat::toString() const
{
- std::stringstream mbus;
- mbus << size.toString() << "-";
+ std::stringstream ss;
+ ss << *this;
- const auto it = formatInfoMap.find(mbus_code);
-
- if (it == formatInfoMap.end())
- mbus << utils::hex(mbus_code, 4);
- else
- mbus << it->second.name;
-
- return mbus.str();
+ return ss.str();
}
/**
- * \brief Retrieve the number of bits per pixel for the V4L2 subdevice format
- * \return The number of bits per pixel for the format, or 0 if the format is
- * not supported
+ * \brief Insert a text representation of a V4L2SubdeviceFormat into an output
+ * stream
+ * \param[in] out The output stream
+ * \param[in] f The V4L2SubdeviceFormat
+ * \return The output stream \a out
*/
-uint8_t V4L2SubdeviceFormat::bitsPerPixel() const
+std::ostream &operator<<(std::ostream &out, const V4L2SubdeviceFormat &f)
{
- const auto it = formatInfoMap.find(mbus_code);
- if (it == formatInfoMap.end()) {
- LOG(V4L2, Error) << "No information available for format '"
- << toString() << "'";
- return 0;
- }
+ out << f.size << "-";
- return it->second.bitsPerPixel;
+ const auto it = mediaBusFormatInfo.find(f.code);
+
+ if (it == mediaBusFormatInfo.end())
+ out << utils::hex(f.code, 4);
+ else
+ out << it->second.name;
+
+ return out;
}
/**
@@ -249,6 +975,134 @@ uint8_t V4L2SubdeviceFormat::bitsPerPixel() const
*/
/**
+ * \class V4L2Subdevice::Stream
+ * \brief V4L2 subdevice stream
+ *
+ * This class identifies a subdev stream, by bundling the pad number with the
+ * stream number. It is used in all stream-aware functions of the V4L2Subdevice
+ * class to identify the stream the functions operate on.
+ *
+ * \var V4L2Subdevice::Stream::pad
+ * \brief The 0-indexed pad number
+ *
+ * \var V4L2Subdevice::Stream::stream
+ * \brief The stream number
+ */
+
+/**
+ * \fn V4L2Subdevice::Stream::Stream()
+ * \brief Construct a Stream with pad and stream set to 0
+ */
+
+/**
+ * \fn V4L2Subdevice::Stream::Stream(unsigned int pad, unsigned int stream)
+ * \brief Construct a Stream with a given \a pad and \a stream number
+ * \param[in] pad The indexed pad number
+ * \param[in] stream The stream number
+ */
+
+/**
+ * \brief Compare streams for equality
+ * \return True if the two streams are equal, false otherwise
+ */
+bool operator==(const V4L2Subdevice::Stream &lhs, const V4L2Subdevice::Stream &rhs)
+{
+ return lhs.pad == rhs.pad && lhs.stream == rhs.stream;
+}
+
+/**
+ * \fn bool operator!=(const V4L2Subdevice::Stream &lhs, const V4L2Subdevice::Stream &rhs)
+ * \brief Compare streams for inequality
+ * \return True if the two streams are not equal, false otherwise
+ */
+
+/**
+ * \brief Insert a text representation of a V4L2Subdevice::Stream into an
+ * output stream
+ * \param[in] out The output stream
+ * \param[in] stream The V4L2Subdevice::Stream
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Stream &stream)
+{
+ out << stream.pad << "/" << stream.stream;
+
+ return out;
+}
+
+/**
+ * \class V4L2Subdevice::Route
+ * \brief V4L2 subdevice routing table entry
+ *
+ * This class models a route in the subdevice routing table. It is similar to
+ * the v4l2_subdev_route structure, but uses the V4L2Subdevice::Stream class
+ * for easier usage with the V4L2Subdevice stream-aware functions.
+ *
+ * \var V4L2Subdevice::Route::sink
+ * \brief The sink stream of the route
+ *
+ * \var V4L2Subdevice::Route::source
+ * \brief The source stream of the route
+ *
+ * \var V4L2Subdevice::Route::flags
+ * \brief The route flags (V4L2_SUBDEV_ROUTE_FL_*)
+ */
+
+/**
+ * \fn V4L2Subdevice::Route::Route()
+ * \brief Construct a Route with default streams
+ */
+
+/**
+ * \fn V4L2Subdevice::Route::Route(const Stream &sink, const Stream &source,
+ * uint32_t flags)
+ * \brief Construct a Route from \a sink to \a source
+ * \param[in] sink The sink stream
+ * \param[in] source The source stream
+ * \param[in] flags The route flags
+ */
+
+/**
+ * \brief Insert a text representation of a V4L2Subdevice::Route into an
+ * output stream
+ * \param[in] out The output stream
+ * \param[in] route The V4L2Subdevice::Route
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Route &route)
+{
+ out << route.sink << " -> " << route.source
+ << " (" << utils::hex(route.flags) << ")";
+
+ return out;
+}
+
+/**
+ * \typedef V4L2Subdevice::Routing
+ * \brief V4L2 subdevice routing table
+ *
+ * This class stores a subdevice routing table as a vector of routes.
+ */
+
+/**
+ * \brief Insert a text representation of a V4L2Subdevice::Routing into an
+ * output stream
+ * \param[in] out The output stream
+ * \param[in] routing The V4L2Subdevice::Routing
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Routing &routing)
+{
+ for (const auto &[i, route] : utils::enumerate(routing)) {
+ out << "[" << i << "] " << route;
+ if (i != routing.size() - 1)
+ out << ", ";
+ }
+
+ return out;
+}
+
+/**
* \brief Create a V4L2 subdevice from a MediaEntity using its device node
* path
*/
@@ -268,7 +1122,40 @@ V4L2Subdevice::~V4L2Subdevice()
*/
int V4L2Subdevice::open()
{
- return V4L2Device::open(O_RDWR);
+ int ret = V4L2Device::open(O_RDWR);
+ if (ret)
+ return ret;
+
+ /*
+ * Try to query the subdev capabilities. The VIDIOC_SUBDEV_QUERYCAP API
+ * was introduced in kernel v5.8, ENOTTY errors must be ignored to
+ * support older kernels.
+ */
+ caps_ = {};
+ ret = ioctl(VIDIOC_SUBDEV_QUERYCAP, &caps_);
+ if (ret < 0 && errno != ENOTTY) {
+ ret = -errno;
+ LOG(V4L2, Error)
+ << "Unable to query capabilities: " << strerror(-ret);
+ return ret;
+ }
+
+ /* If the subdev supports streams, enable the streams API. */
+ if (caps_.hasStreams()) {
+ struct v4l2_subdev_client_capability clientCaps{};
+ clientCaps.capabilities = V4L2_SUBDEV_CLIENT_CAP_STREAMS;
+
+ ret = ioctl(VIDIOC_SUBDEV_S_CLIENT_CAP, &clientCaps);
+ if (ret < 0) {
+ ret = -errno;
+ LOG(V4L2, Error)
+ << "Unable to set client capabilities: "
+ << strerror(-ret);
+ return ret;
+ }
+ }
+
+ return 0;
}
/**
@@ -279,7 +1166,7 @@ int V4L2Subdevice::open()
/**
* \brief Get selection rectangle \a rect for \a target
- * \param[in] pad The 0-indexed pad number the rectangle is retrieved from
+ * \param[in] stream The stream the rectangle is retrieved from
* \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
* \param[out] rect The retrieved selection rectangle
*
@@ -287,13 +1174,14 @@ int V4L2Subdevice::open()
*
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
+int V4L2Subdevice::getSelection(const Stream &stream, unsigned int target,
Rectangle *rect)
{
struct v4l2_subdev_selection sel = {};
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- sel.pad = pad;
+ sel.pad = stream.pad;
+ sel.stream = stream.stream;
sel.target = target;
sel.flags = 0;
@@ -301,7 +1189,7 @@ int V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
if (ret < 0) {
LOG(V4L2, Error)
<< "Unable to get rectangle " << target << " on pad "
- << pad << ": " << strerror(-ret);
+ << stream << ": " << strerror(-ret);
return ret;
}
@@ -314,8 +1202,19 @@ int V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
}
/**
+ * \fn V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
+ * Rectangle *rect)
+ * \brief Get selection rectangle \a rect for \a target
+ * \param[in] pad The 0-indexed pad number the rectangle is retrieved from
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[out] rect The retrieved selection rectangle
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
* \brief Set selection rectangle \a rect for \a target
- * \param[in] pad The 0-indexed pad number the rectangle is to be applied to
+ * \param[in] stream The stream the rectangle is to be applied to
* \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
* \param[inout] rect The selection rectangle to be applied
*
@@ -323,13 +1222,14 @@ int V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
*
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::setSelection(unsigned int pad, unsigned int target,
+int V4L2Subdevice::setSelection(const Stream &stream, unsigned int target,
Rectangle *rect)
{
struct v4l2_subdev_selection sel = {};
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- sel.pad = pad;
+ sel.pad = stream.pad;
+ sel.stream = stream.stream;
sel.target = target;
sel.flags = 0;
@@ -342,7 +1242,7 @@ int V4L2Subdevice::setSelection(unsigned int pad, unsigned int target,
if (ret < 0) {
LOG(V4L2, Error)
<< "Unable to set rectangle " << target << " on pad "
- << pad << ": " << strerror(-ret);
+ << stream << ": " << strerror(-ret);
return ret;
}
@@ -353,26 +1253,40 @@ int V4L2Subdevice::setSelection(unsigned int pad, unsigned int target,
return 0;
}
+
/**
- * \brief Enumerate all media bus codes and frame sizes on a \a pad
- * \param[in] pad The 0-indexed pad number to enumerate formats on
+ * \fn V4L2Subdevice::setSelection(unsigned int pad, unsigned int target,
+ * Rectangle *rect)
+ * \brief Set selection rectangle \a rect for \a target
+ * \param[in] pad The 0-indexed pad number the rectangle is to be applied to
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[inout] rect The selection rectangle to be applied
+ *
+ * \todo Define a V4L2SelectionTarget enum for the selection target
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \brief Enumerate all media bus codes and frame sizes on a \a stream
+ * \param[in] stream The stream to enumerate formats for
*
* Enumerate all media bus codes and frame sizes supported by the subdevice on
- * a \a pad.
+ * a \a stream.
*
* \return A list of the supported device formats
*/
-V4L2Subdevice::Formats V4L2Subdevice::formats(unsigned int pad)
+V4L2Subdevice::Formats V4L2Subdevice::formats(const Stream &stream)
{
Formats formats;
- if (pad >= entity_->pads().size()) {
- LOG(V4L2, Error) << "Invalid pad: " << pad;
+ if (stream.pad >= entity_->pads().size()) {
+ LOG(V4L2, Error) << "Invalid pad: " << stream.pad;
return {};
}
- for (unsigned int code : enumPadCodes(pad)) {
- std::vector<SizeRange> sizes = enumPadSizes(pad, code);
+ for (unsigned int code : enumPadCodes(stream)) {
+ std::vector<SizeRange> sizes = enumPadSizes(stream, code);
if (sizes.empty())
return {};
@@ -380,7 +1294,7 @@ V4L2Subdevice::Formats V4L2Subdevice::formats(unsigned int pad)
if (!inserted.second) {
LOG(V4L2, Error)
<< "Could not add sizes for media bus code "
- << code << " on pad " << pad;
+ << code << " on pad " << stream.pad;
return {};
}
}
@@ -389,79 +1303,387 @@ V4L2Subdevice::Formats V4L2Subdevice::formats(unsigned int pad)
}
/**
- * \brief Retrieve the image format set on one of the V4L2 subdevice pads
- * \param[in] pad The 0-indexed pad number the format is to be retrieved from
+ * \fn V4L2Subdevice::formats(unsigned int pad)
+ * \brief Enumerate all media bus codes and frame sizes on a \a pad
+ * \param[in] pad The 0-indexed pad number to enumerate formats on
+ *
+ * Enumerate all media bus codes and frame sizes supported by the subdevice on
+ * a \a pad
+ *
+ * \return A list of the supported device formats
+ */
+
+std::optional<ColorSpace> V4L2Subdevice::toColorSpace(const v4l2_mbus_framefmt &format) const
+{
+ /*
+ * Only image formats have a color space, for other formats (such as
+ * metadata formats) the color space concept isn't applicable. V4L2
+ * subdev drivers return a colorspace set to V4L2_COLORSPACE_DEFAULT in
+ * that case (as well as for image formats when the driver hasn't
+ * bothered implementing color space support). Check the colorspace
+ * field here and return std::nullopt directly to avoid logging a
+ * warning.
+ */
+ if (format.colorspace == V4L2_COLORSPACE_DEFAULT)
+ return std::nullopt;
+
+ PixelFormatInfo::ColourEncoding colourEncoding;
+ const MediaBusFormatInfo &info = MediaBusFormatInfo::info(format.code);
+ if (info.isValid()) {
+ colourEncoding = info.colourEncoding;
+ } else {
+ LOG(V4L2, Warning)
+ << "Unknown subdev format "
+ << utils::hex(format.code, 4)
+ << ", defaulting to RGB encoding";
+
+ colourEncoding = PixelFormatInfo::ColourEncodingRGB;
+ }
+
+ return V4L2Device::toColorSpace(format, colourEncoding);
+}
+
+/**
+ * \brief Retrieve the image format set on one of the V4L2 subdevice streams
+ * \param[in] stream The stream the format is to be retrieved from
* \param[out] format The image bus format
* \param[in] whence The format to get, \ref V4L2Subdevice::ActiveFormat
* "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::getFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+int V4L2Subdevice::getFormat(const Stream &stream, V4L2SubdeviceFormat *format,
Whence whence)
{
struct v4l2_subdev_format subdevFmt = {};
- subdevFmt.which = whence == ActiveFormat ? V4L2_SUBDEV_FORMAT_ACTIVE
- : V4L2_SUBDEV_FORMAT_TRY;
- subdevFmt.pad = pad;
+ subdevFmt.which = whence;
+ subdevFmt.pad = stream.pad;
+ subdevFmt.stream = stream.stream;
int ret = ioctl(VIDIOC_SUBDEV_G_FMT, &subdevFmt);
if (ret) {
LOG(V4L2, Error)
- << "Unable to get format on pad " << pad
- << ": " << strerror(-ret);
+ << "Unable to get format on pad " << stream << ": "
+ << strerror(-ret);
return ret;
}
format->size.width = subdevFmt.format.width;
format->size.height = subdevFmt.format.height;
- format->mbus_code = subdevFmt.format.code;
+ format->code = subdevFmt.format.code;
format->colorSpace = toColorSpace(subdevFmt.format);
return 0;
}
/**
+ * \fn V4L2Subdevice::getFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+ * Whence whence)
+ * \brief Retrieve the image format set on one of the V4L2 subdevice pads
+ * \param[in] pad The 0-indexed pad number the format is to be retrieved from
+ * \param[out] format The image bus format
+ * \param[in] whence The format to get, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
* \brief Set an image format on one of the V4L2 subdevice pads
- * \param[in] pad The 0-indexed pad number the format is to be applied to
- * \param[inout] format The image bus format to apply to the subdevice's pad
+ * \param[in] stream The stream the format is to be applied to
+ * \param[inout] format The image bus format to apply to the stream
* \param[in] whence The format to set, \ref V4L2Subdevice::ActiveFormat
* "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
*
- * Apply the requested image format to the desired media pad and return the
+ * Apply the requested image format to the desired stream and return the
* actually applied format parameters, as getFormat() would do.
*
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::setFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+int V4L2Subdevice::setFormat(const Stream &stream, V4L2SubdeviceFormat *format,
Whence whence)
{
struct v4l2_subdev_format subdevFmt = {};
- subdevFmt.which = whence == ActiveFormat ? V4L2_SUBDEV_FORMAT_ACTIVE
- : V4L2_SUBDEV_FORMAT_TRY;
- subdevFmt.pad = pad;
+ subdevFmt.which = whence;
+ subdevFmt.pad = stream.pad;
+ subdevFmt.stream = stream.stream;
subdevFmt.format.width = format->size.width;
subdevFmt.format.height = format->size.height;
- subdevFmt.format.code = format->mbus_code;
+ subdevFmt.format.code = format->code;
subdevFmt.format.field = V4L2_FIELD_NONE;
- fromColorSpace(format->colorSpace, subdevFmt.format);
+ if (format->colorSpace) {
+ fromColorSpace(format->colorSpace, subdevFmt.format);
+
+ /* The CSC flag is only applicable to source pads. */
+ if (entity_->pads()[stream.pad]->flags() & MEDIA_PAD_FL_SOURCE)
+ subdevFmt.format.flags |= V4L2_MBUS_FRAMEFMT_SET_CSC;
+ }
int ret = ioctl(VIDIOC_SUBDEV_S_FMT, &subdevFmt);
if (ret) {
LOG(V4L2, Error)
- << "Unable to set format on pad " << pad
- << ": " << strerror(-ret);
+ << "Unable to set format on pad " << stream << ": "
+ << strerror(-ret);
return ret;
}
format->size.width = subdevFmt.format.width;
format->size.height = subdevFmt.format.height;
- format->mbus_code = subdevFmt.format.code;
+ format->code = subdevFmt.format.code;
format->colorSpace = toColorSpace(subdevFmt.format);
return 0;
}
/**
+ * \fn V4L2Subdevice::setFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+ * Whence whence)
+ * \brief Set an image format on one of the V4L2 subdevice pads
+ * \param[in] pad The 0-indexed pad number the format is to be applied to
+ * \param[inout] format The image bus format to apply to the subdevice's pad
+ * \param[in] whence The format to set, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ *
+ * Apply the requested image format to the desired media pad and return the
+ * actually applied format parameters, as getFormat() would do.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+namespace {
+
+void routeFromKernel(V4L2Subdevice::Route &route,
+ const struct v4l2_subdev_route &kroute)
+{
+ route.sink.pad = kroute.sink_pad;
+ route.sink.stream = kroute.sink_stream;
+ route.source.pad = kroute.source_pad;
+ route.source.stream = kroute.source_stream;
+ route.flags = kroute.flags;
+}
+
+void routeToKernel(const V4L2Subdevice::Route &route,
+ struct v4l2_subdev_route &kroute)
+{
+ kroute.sink_pad = route.sink.pad;
+ kroute.sink_stream = route.sink.stream;
+ kroute.source_pad = route.source.pad;
+ kroute.source_stream = route.source.stream;
+ kroute.flags = route.flags;
+}
+
+/*
+ * Legacy routing support for pre-v6.10-rc1 kernels. Drop when v6.12-rc1 gets
+ * released.
+ */
+struct v4l2_subdev_routing_legacy {
+ __u32 which;
+ __u32 num_routes;
+ __u64 routes;
+ __u32 reserved[6];
+};
+
+#define VIDIOC_SUBDEV_G_ROUTING_LEGACY _IOWR('V', 38, struct v4l2_subdev_routing_legacy)
+#define VIDIOC_SUBDEV_S_ROUTING_LEGACY _IOWR('V', 39, struct v4l2_subdev_routing_legacy)
+
+} /* namespace */
+
+int V4L2Subdevice::getRoutingLegacy(Routing *routing, Whence whence)
+{
+ struct v4l2_subdev_routing_legacy rt = {};
+
+ rt.which = whence;
+
+ int ret = ioctl(VIDIOC_SUBDEV_G_ROUTING_LEGACY, &rt);
+ if (ret == 0 || ret == -ENOTTY)
+ return ret;
+
+ if (ret != -ENOSPC) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve number of routes: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ std::vector<struct v4l2_subdev_route> routes{ rt.num_routes };
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ ret = ioctl(VIDIOC_SUBDEV_G_ROUTING_LEGACY, &rt);
+ if (ret) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve routes: " << strerror(-ret);
+ return ret;
+ }
+
+ if (rt.num_routes != routes.size()) {
+ LOG(V4L2, Error) << "Invalid number of routes";
+ return -EINVAL;
+ }
+
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
+
+ return 0;
+}
+
+/**
+ * \brief Retrieve the subdevice's internal routing table
+ * \param[out] routing The routing table
+ * \param[in] whence The routing table to get, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int V4L2Subdevice::getRouting(Routing *routing, Whence whence)
+{
+ routing->clear();
+
+ if (!caps_.hasStreams())
+ return 0;
+
+ struct v4l2_subdev_routing rt = {};
+
+ rt.which = whence;
+
+ int ret = ioctl(VIDIOC_SUBDEV_G_ROUTING, &rt);
+ if (ret == -ENOTTY)
+ return V4L2Subdevice::getRoutingLegacy(routing, whence);
+
+ if (ret) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve number of routes: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ if (!rt.num_routes)
+ return 0;
+
+ std::vector<struct v4l2_subdev_route> routes{ rt.num_routes };
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ rt.len_routes = rt.num_routes;
+ rt.num_routes = 0;
+
+ ret = ioctl(VIDIOC_SUBDEV_G_ROUTING, &rt);
+ if (ret) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve routes: " << strerror(-ret);
+ return ret;
+ }
+
+ if (rt.num_routes != routes.size()) {
+ LOG(V4L2, Error) << "Invalid number of routes";
+ return -EINVAL;
+ }
+
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
+
+ return 0;
+}
+
+int V4L2Subdevice::setRoutingLegacy(Routing *routing, Whence whence)
+{
+ std::vector<struct v4l2_subdev_route> routes{ routing->size() };
+
+ for (const auto &[i, route] : utils::enumerate(*routing))
+ routeToKernel(route, routes[i]);
+
+ struct v4l2_subdev_routing_legacy rt = {};
+ rt.which = whence;
+ rt.num_routes = routes.size();
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ int ret = ioctl(VIDIOC_SUBDEV_S_ROUTING_LEGACY, &rt);
+ if (ret) {
+ LOG(V4L2, Error) << "Failed to set routes: " << strerror(-ret);
+ return ret;
+ }
+
+ routes.resize(rt.num_routes);
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
+
+ return 0;
+}
+
+/**
+ * \brief Set a routing table on the V4L2 subdevice
+ * \param[inout] routing The routing table
+ * \param[in] whence The routing table to set, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ *
+ * Apply to the V4L2 subdevice the routing table \a routing and update its
+ * content to reflect the actually applied routing table as getRouting() would
+ * do.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int V4L2Subdevice::setRouting(Routing *routing, Whence whence)
+{
+ if (!caps_.hasStreams()) {
+ routing->clear();
+ return 0;
+ }
+
+ std::vector<struct v4l2_subdev_route> routes{ routing->size() };
+
+ for (const auto &[i, route] : utils::enumerate(*routing))
+ routeToKernel(route, routes[i]);
+
+ struct v4l2_subdev_routing rt = {};
+ rt.which = whence;
+ rt.len_routes = routes.size();
+ rt.num_routes = routes.size();
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ int ret = ioctl(VIDIOC_SUBDEV_S_ROUTING, &rt);
+ if (ret == -ENOTTY)
+ return setRoutingLegacy(routing, whence);
+
+ if (ret) {
+ LOG(V4L2, Error) << "Failed to set routes: " << strerror(-ret);
+ return ret;
+ }
+
+ /*
+ * The kernel may want to return more routes than we have space for. In
+ * that event, we must issue a VIDIOC_SUBDEV_G_ROUTING call to retrieve
+ * the additional routes.
+ */
+ if (rt.num_routes > routes.size()) {
+ routes.resize(rt.num_routes);
+
+ rt.len_routes = rt.num_routes;
+ rt.num_routes = 0;
+
+ ret = ioctl(VIDIOC_SUBDEV_G_ROUTING, &rt);
+ if (ret) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve routes: " << strerror(-ret);
+ return ret;
+ }
+ }
+
+ if (rt.num_routes != routes.size()) {
+ LOG(V4L2, Error) << "Invalid number of routes";
+ return -EINVAL;
+ }
+
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
+
+ return 0;
+}
+
+/**
* \brief Retrieve the model name of the device
*
* The model name allows identification of the specific device model. This can
@@ -514,6 +1736,12 @@ const std::string &V4L2Subdevice::model()
}
/**
+ * \fn V4L2Subdevice::caps()
+ * \brief Retrieve the subdevice V4L2 capabilities
+ * \return The subdevice V4L2 capabilities
+ */
+
+/**
* \brief Create a new video subdevice instance from \a entity in media device
* \a media
* \param[in] media The media device where the entity is registered
@@ -537,14 +1765,15 @@ std::string V4L2Subdevice::logPrefix() const
return "'" + entity_->name() + "'";
}
-std::vector<unsigned int> V4L2Subdevice::enumPadCodes(unsigned int pad)
+std::vector<unsigned int> V4L2Subdevice::enumPadCodes(const Stream &stream)
{
std::vector<unsigned int> codes;
int ret;
for (unsigned int index = 0; ; index++) {
struct v4l2_subdev_mbus_code_enum mbusEnum = {};
- mbusEnum.pad = pad;
+ mbusEnum.pad = stream.pad;
+ mbusEnum.stream = stream.stream;
mbusEnum.index = index;
mbusEnum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
@@ -557,7 +1786,7 @@ std::vector<unsigned int> V4L2Subdevice::enumPadCodes(unsigned int pad)
if (ret < 0 && ret != -EINVAL) {
LOG(V4L2, Error)
- << "Unable to enumerate formats on pad " << pad
+ << "Unable to enumerate formats on pad " << stream
<< ": " << strerror(-ret);
return {};
}
@@ -565,7 +1794,7 @@ std::vector<unsigned int> V4L2Subdevice::enumPadCodes(unsigned int pad)
return codes;
}
-std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
+std::vector<SizeRange> V4L2Subdevice::enumPadSizes(const Stream &stream,
unsigned int code)
{
std::vector<SizeRange> sizes;
@@ -574,7 +1803,8 @@ std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
for (unsigned int index = 0;; index++) {
struct v4l2_subdev_frame_size_enum sizeEnum = {};
sizeEnum.index = index;
- sizeEnum.pad = pad;
+ sizeEnum.pad = stream.pad;
+ sizeEnum.stream = stream.stream;
sizeEnum.code = code;
sizeEnum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
@@ -588,7 +1818,7 @@ std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
if (ret < 0 && ret != -EINVAL && ret != -ENOTTY) {
LOG(V4L2, Error)
- << "Unable to enumerate sizes on pad " << pad
+ << "Unable to enumerate sizes on pad " << stream
<< ": " << strerror(-ret);
return {};
}
diff --git a/src/libcamera/v4l2_videodevice.cpp b/src/libcamera/v4l2_videodevice.cpp
index 5f36ee20..e241eb47 100644
--- a/src/libcamera/v4l2_videodevice.cpp
+++ b/src/libcamera/v4l2_videodevice.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_videodevice.cpp - V4L2 Video Device
+ * V4L2 Video Device
*/
#include "libcamera/internal/v4l2_videodevice.h"
@@ -10,7 +10,6 @@
#include <algorithm>
#include <array>
#include <fcntl.h>
-#include <iomanip>
#include <sstream>
#include <string.h>
#include <sys/ioctl.h>
@@ -202,6 +201,19 @@ V4L2BufferCache::~V4L2BufferCache()
}
/**
+ * \brief Check if all the entries in the cache are unused
+ */
+bool V4L2BufferCache::isEmpty() const
+{
+ for (auto const &entry : cache_) {
+ if (!entry.free_)
+ return false;
+ }
+
+ return true;
+}
+
+/**
* \brief Find the best V4L2 buffer for a FrameBuffer
* \param[in] buffer The FrameBuffer
*
@@ -420,11 +432,25 @@ bool V4L2BufferCache::Entry::operator==(const FrameBuffer &buffer) const
const std::string V4L2DeviceFormat::toString() const
{
std::stringstream ss;
- ss << size.toString() << "-" << fourcc.toString();
+ ss << *this;
+
return ss.str();
}
/**
+ * \brief Insert a text representation of a V4L2DeviceFormat into an output
+ * stream
+ * \param[in] out The output stream
+ * \param[in] f The V4L2DeviceFormat
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2DeviceFormat &f)
+{
+ out << f.size << "-" << f.fourcc;
+ return out;
+}
+
+/**
* \class V4L2VideoDevice
* \brief V4L2VideoDevice object and API
*
@@ -507,7 +533,8 @@ const std::string V4L2DeviceFormat::toString() const
*/
V4L2VideoDevice::V4L2VideoDevice(const std::string &deviceNode)
: V4L2Device(deviceNode), formatInfo_(nullptr), cache_(nullptr),
- fdBufferNotifier_(nullptr), streaming_(false)
+ fdBufferNotifier_(nullptr), state_(State::Stopped),
+ watchdogDuration_(0.0)
{
/*
* We default to an MMAP based CAPTURE video device, however this will
@@ -526,6 +553,7 @@ V4L2VideoDevice::V4L2VideoDevice(const std::string &deviceNode)
V4L2VideoDevice::V4L2VideoDevice(const MediaEntity *entity)
: V4L2VideoDevice(entity->deviceNode())
{
+ watchdog_.timeout.connect(this, &V4L2VideoDevice::watchdogExpired);
}
V4L2VideoDevice::~V4L2VideoDevice()
@@ -604,13 +632,9 @@ int V4L2VideoDevice::open()
<< "Opened device " << caps_.bus_info() << ": "
<< caps_.driver() << ": " << caps_.card();
- ret = getFormat(&format_);
- if (ret) {
- LOG(V4L2, Error) << "Failed to get format";
+ ret = initFormats();
+ if (ret)
return ret;
- }
-
- formatInfo_ = &PixelFormatInfo::info(format_.fourcc);
return 0;
}
@@ -634,14 +658,17 @@ int V4L2VideoDevice::open()
*/
int V4L2VideoDevice::open(SharedFD handle, enum v4l2_buf_type type)
{
+ int ret;
+
UniqueFD newFd = handle.dup();
if (!newFd.isValid()) {
+ ret = -errno;
LOG(V4L2, Error) << "Failed to duplicate file handle: "
- << strerror(errno);
- return -errno;
+ << strerror(-ret);
+ return ret;
}
- int ret = V4L2Device::setFd(std::move(newFd));
+ ret = V4L2Device::setFd(std::move(newFd));
if (ret < 0) {
LOG(V4L2, Error) << "Failed to set file handle: "
<< strerror(-ret);
@@ -694,7 +721,24 @@ int V4L2VideoDevice::open(SharedFD handle, enum v4l2_buf_type type)
<< "Opened device " << caps_.bus_info() << ": "
<< caps_.driver() << ": " << caps_.card();
- ret = getFormat(&format_);
+ ret = initFormats();
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int V4L2VideoDevice::initFormats()
+{
+ const std::vector<V4L2PixelFormat> &deviceFormats = enumPixelformats(0);
+ if (deviceFormats.empty()) {
+ LOG(V4L2, Error) << "Failed to initialize device formats";
+ return -EINVAL;
+ }
+
+ pixelFormats_ = { deviceFormats.begin(), deviceFormats.end() };
+
+ int ret = getFormat(&format_);
if (ret) {
LOG(V4L2, Error) << "Failed to get format";
return ret;
@@ -758,12 +802,19 @@ std::string V4L2VideoDevice::logPrefix() const
*/
int V4L2VideoDevice::getFormat(V4L2DeviceFormat *format)
{
- if (caps_.isMeta())
- return getFormatMeta(format);
- else if (caps_.isMultiplanar())
- return getFormatMultiplane(format);
- else
+ switch (bufferType_) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
return getFormatSingleplane(format);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return getFormatMultiplane(format);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ return getFormatMeta(format);
+ default:
+ return -EINVAL;
+ }
}
/**
@@ -778,12 +829,19 @@ int V4L2VideoDevice::getFormat(V4L2DeviceFormat *format)
*/
int V4L2VideoDevice::tryFormat(V4L2DeviceFormat *format)
{
- if (caps_.isMeta())
- return trySetFormatMeta(format, false);
- else if (caps_.isMultiplanar())
- return trySetFormatMultiplane(format, false);
- else
+ switch (bufferType_) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
return trySetFormatSingleplane(format, false);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return trySetFormatMultiplane(format, false);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ return trySetFormatMeta(format, false);
+ default:
+ return -EINVAL;
+ }
}
/**
@@ -797,13 +855,25 @@ int V4L2VideoDevice::tryFormat(V4L2DeviceFormat *format)
*/
int V4L2VideoDevice::setFormat(V4L2DeviceFormat *format)
{
- int ret = 0;
- if (caps_.isMeta())
- ret = trySetFormatMeta(format, true);
- else if (caps_.isMultiplanar())
- ret = trySetFormatMultiplane(format, true);
- else
+ int ret;
+
+ switch (bufferType_) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
ret = trySetFormatSingleplane(format, true);
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ ret = trySetFormatMultiplane(format, true);
+ break;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ ret = trySetFormatMeta(format, true);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
/* Cache the set format on success. */
if (ret)
@@ -818,7 +888,7 @@ int V4L2VideoDevice::setFormat(V4L2DeviceFormat *format)
int V4L2VideoDevice::getFormatMeta(V4L2DeviceFormat *format)
{
struct v4l2_format v4l2Format = {};
- struct v4l2_meta_format *pix = &v4l2Format.fmt.meta;
+ struct v4l2_meta_format *meta = &v4l2Format.fmt.meta;
int ret;
v4l2Format.type = bufferType_;
@@ -828,25 +898,42 @@ int V4L2VideoDevice::getFormatMeta(V4L2DeviceFormat *format)
return ret;
}
- format->size.width = 0;
- format->size.height = 0;
- format->fourcc = V4L2PixelFormat(pix->dataformat);
+ format->fourcc = V4L2PixelFormat(meta->dataformat);
+ format->planes[0].size = meta->buffersize;
format->planesCount = 1;
- format->planes[0].bpl = pix->buffersize;
- format->planes[0].size = pix->buffersize;
+
+ bool genericLineBased = caps_.isMetaCapture() &&
+ format->fourcc.isGenericLineBasedMetadata();
+
+ if (genericLineBased) {
+ format->size.width = meta->width;
+ format->size.height = meta->height;
+ format->planes[0].bpl = meta->bytesperline;
+ } else {
+ format->size.width = 0;
+ format->size.height = 0;
+ format->planes[0].bpl = meta->buffersize;
+ }
return 0;
}
int V4L2VideoDevice::trySetFormatMeta(V4L2DeviceFormat *format, bool set)
{
+ bool genericLineBased = caps_.isMetaCapture() &&
+ format->fourcc.isGenericLineBasedMetadata();
struct v4l2_format v4l2Format = {};
- struct v4l2_meta_format *pix = &v4l2Format.fmt.meta;
+ struct v4l2_meta_format *meta = &v4l2Format.fmt.meta;
int ret;
v4l2Format.type = bufferType_;
- pix->dataformat = format->fourcc;
- pix->buffersize = format->planes[0].size;
+ meta->dataformat = format->fourcc;
+ meta->buffersize = format->planes[0].size;
+ if (genericLineBased) {
+ meta->width = format->size.width;
+ meta->height = format->size.height;
+ meta->bytesperline = format->planes[0].bpl;
+ }
ret = ioctl(set ? VIDIOC_S_FMT : VIDIOC_TRY_FMT, &v4l2Format);
if (ret) {
LOG(V4L2, Error)
@@ -859,16 +946,29 @@ int V4L2VideoDevice::trySetFormatMeta(V4L2DeviceFormat *format, bool set)
* Return to caller the format actually applied on the video device,
* which might differ from the requested one.
*/
- format->size.width = 0;
- format->size.height = 0;
- format->fourcc = V4L2PixelFormat(pix->dataformat);
+ format->fourcc = V4L2PixelFormat(meta->dataformat);
format->planesCount = 1;
- format->planes[0].bpl = pix->buffersize;
- format->planes[0].size = pix->buffersize;
+ format->planes[0].size = meta->buffersize;
+ if (genericLineBased) {
+ format->size.width = meta->width;
+ format->size.height = meta->height;
+ format->planes[0].bpl = meta->bytesperline;
+ } else {
+ format->size.width = 0;
+ format->size.height = 0;
+ format->planes[0].bpl = meta->buffersize;
+ }
return 0;
}
+template<typename T>
+std::optional<ColorSpace> V4L2VideoDevice::toColorSpace(const T &v4l2Format)
+{
+ V4L2PixelFormat fourcc{ v4l2Format.pixelformat };
+ return V4L2Device::toColorSpace(v4l2Format, PixelFormatInfo::info(fourcc).colourEncoding);
+}
+
int V4L2VideoDevice::getFormatMultiplane(V4L2DeviceFormat *format)
{
struct v4l2_format v4l2Format = {};
@@ -908,7 +1008,12 @@ int V4L2VideoDevice::trySetFormatMultiplane(V4L2DeviceFormat *format, bool set)
pix->pixelformat = format->fourcc;
pix->num_planes = format->planesCount;
pix->field = V4L2_FIELD_NONE;
- fromColorSpace(format->colorSpace, *pix);
+ if (format->colorSpace) {
+ fromColorSpace(format->colorSpace, *pix);
+
+ if (caps_.isVideoCapture())
+ pix->flags |= V4L2_PIX_FMT_FLAG_SET_CSC;
+ }
ASSERT(pix->num_planes <= std::size(pix->plane_fmt));
@@ -978,7 +1083,12 @@ int V4L2VideoDevice::trySetFormatSingleplane(V4L2DeviceFormat *format, bool set)
pix->pixelformat = format->fourcc;
pix->bytesperline = format->planes[0].bpl;
pix->field = V4L2_FIELD_NONE;
- fromColorSpace(format->colorSpace, *pix);
+ if (format->colorSpace) {
+ fromColorSpace(format->colorSpace, *pix);
+
+ if (caps_.isVideoCapture())
+ pix->flags |= V4L2_PIX_FMT_FLAG_SET_CSC;
+ }
ret = ioctl(set ? VIDIOC_S_FMT : VIDIOC_TRY_FMT, &v4l2Format);
if (ret) {
@@ -1128,6 +1238,38 @@ std::vector<SizeRange> V4L2VideoDevice::enumSizes(V4L2PixelFormat pixelFormat)
}
/**
+ * \brief Get the selection rectangle for \a target
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[out] rect The selection rectangle to retrieve
+ *
+ * \todo Define a V4L2SelectionTarget enum for the selection target
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int V4L2VideoDevice::getSelection(unsigned int target, Rectangle *rect)
+{
+ struct v4l2_selection sel = {};
+
+ sel.type = bufferType_;
+ sel.target = target;
+ sel.flags = 0;
+
+ int ret = ioctl(VIDIOC_G_SELECTION, &sel);
+ if (ret < 0) {
+ LOG(V4L2, Error) << "Unable to get rectangle " << target
+ << ": " << strerror(-ret);
+ return ret;
+ }
+
+ rect->x = sel.r.left;
+ rect->y = sel.r.top;
+ rect->width = sel.r.width;
+ rect->height = sel.r.height;
+
+ return 0;
+}
+
+/**
* \brief Set a selection rectangle \a rect for \a target
* \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
* \param[inout] rect The selection rectangle to be applied
@@ -1409,7 +1551,7 @@ UniqueFD V4L2VideoDevice::exportDmabufFd(unsigned int index,
expbuf.type = bufferType_;
expbuf.index = index;
expbuf.plane = plane;
- expbuf.flags = O_RDWR;
+ expbuf.flags = O_CLOEXEC | O_RDWR;
ret = ioctl(VIDIOC_EXPBUF, &expbuf);
if (ret < 0) {
@@ -1471,6 +1613,9 @@ int V4L2VideoDevice::importBuffers(unsigned int count)
*/
int V4L2VideoDevice::releaseBuffers()
{
+ if (!cache_)
+ return 0;
+
LOG(V4L2, Debug) << "Releasing buffers";
delete cache_;
@@ -1491,6 +1636,9 @@ int V4L2VideoDevice::releaseBuffers()
* The best available V4L2 buffer is picked for \a buffer using the V4L2 buffer
* cache.
*
+ * Note that queueBuffer() will fail if the device is in the process of being
+ * stopped from a streaming state through streamOff().
+ *
* \return 0 on success or a negative error code otherwise
*/
int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
@@ -1499,6 +1647,11 @@ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
struct v4l2_buffer buf = {};
int ret;
+ if (state_ == State::Stopping) {
+ LOG(V4L2, Error) << "Device is in a stopping state.";
+ return -ESHUTDOWN;
+ }
+
/*
* Pipeline handlers should not requeue buffers after releasing the
* buffers on the device. Any occurence of this error should be fixed
@@ -1553,6 +1706,11 @@ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
if (V4L2_TYPE_IS_OUTPUT(buf.type)) {
const FrameMetadata &metadata = buffer->metadata();
+ for (const auto &plane : metadata.planes()) {
+ if (!plane.bytesused)
+ LOG(V4L2, Warning) << "byteused == 0 is deprecated";
+ }
+
if (numV4l2Planes != planes.size()) {
/*
* If we have a multi-planar buffer with a V4L2
@@ -1600,7 +1758,14 @@ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
buf.length = planes[0].length;
}
- buf.sequence = metadata.sequence;
+ /*
+ * Timestamps are to be supplied if the device is a mem-to-mem
+ * device. The drivers will have V4L2_BUF_FLAG_TIMESTAMP_COPY
+ * set hence these timestamps will be copied from the output
+ * buffers to capture buffers. If the device is not mem-to-mem,
+ * there is no harm in setting the timestamps as they will be
+ * ignored (and over-written).
+ */
buf.timestamp.tv_sec = metadata.timestamp / 1000000000;
buf.timestamp.tv_usec = (metadata.timestamp / 1000) % 1000000;
}
@@ -1615,8 +1780,11 @@ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
return ret;
}
- if (queuedBuffers_.empty())
+ if (queuedBuffers_.empty()) {
fdBufferNotifier_->setEnabled(true);
+ if (watchdogDuration_)
+ watchdog_.start(std::chrono::duration_cast<std::chrono::milliseconds>(watchdogDuration_));
+ }
queuedBuffers_[buf.index] = buffer;
@@ -1684,7 +1852,7 @@ FrameBuffer *V4L2VideoDevice::dequeueBuffer()
* not find it in the queuedBuffers_ list.
*
* Whilst this kernel bug has been fixed in mainline, ensure that we
- * safely ingore buffers which are unexpected to prevetn crashes on
+ * safely ignore buffers which are unexpected to prevent crashes on
* older kernels.
*/
auto it = queuedBuffers_.find(buf.index);
@@ -1700,21 +1868,43 @@ FrameBuffer *V4L2VideoDevice::dequeueBuffer()
FrameBuffer *buffer = it->second;
queuedBuffers_.erase(it);
- if (queuedBuffers_.empty())
+ if (queuedBuffers_.empty()) {
fdBufferNotifier_->setEnabled(false);
+ watchdog_.stop();
+ } else if (watchdogDuration_) {
+ /*
+ * Restart the watchdog timer if there are buffers still queued
+ * in the device.
+ */
+ watchdog_.start(std::chrono::duration_cast<std::chrono::milliseconds>(watchdogDuration_));
+ }
+
+ FrameMetadata &metadata = buffer->_d()->metadata();
- buffer->metadata_.status = buf.flags & V4L2_BUF_FLAG_ERROR
- ? FrameMetadata::FrameError
- : FrameMetadata::FrameSuccess;
- buffer->metadata_.sequence = buf.sequence;
- buffer->metadata_.timestamp = buf.timestamp.tv_sec * 1000000000ULL
- + buf.timestamp.tv_usec * 1000ULL;
+ metadata.status = buf.flags & V4L2_BUF_FLAG_ERROR
+ ? FrameMetadata::FrameError
+ : FrameMetadata::FrameSuccess;
+ metadata.sequence = buf.sequence;
+ metadata.timestamp = buf.timestamp.tv_sec * 1000000000ULL
+ + buf.timestamp.tv_usec * 1000ULL;
if (V4L2_TYPE_IS_OUTPUT(buf.type))
return buffer;
+ /*
+ * Detect kernel drivers which do not reset the sequence number to zero
+ * on stream start.
+ */
+ if (!firstFrame_.has_value()) {
+ if (buf.sequence)
+ LOG(V4L2, Info)
+ << "Zero sequence expected for first frame (got "
+ << buf.sequence << ")";
+ firstFrame_ = buf.sequence;
+ }
+ metadata.sequence -= firstFrame_.value();
+
unsigned int numV4l2Planes = multiPlanar ? buf.length : 1;
- FrameMetadata &metadata = buffer->metadata_;
if (numV4l2Planes != buffer->planes().size()) {
/*
@@ -1789,6 +1979,8 @@ int V4L2VideoDevice::streamOn()
{
int ret;
+ firstFrame_.reset();
+
ret = ioctl(VIDIOC_STREAMON, &bufferType_);
if (ret < 0) {
LOG(V4L2, Error)
@@ -1796,7 +1988,9 @@ int V4L2VideoDevice::streamOn()
return ret;
}
- streaming_ = true;
+ state_ = State::Streaming;
+ if (watchdogDuration_ && !queuedBuffers_.empty())
+ watchdog_.start(std::chrono::duration_cast<std::chrono::milliseconds>(watchdogDuration_));
return 0;
}
@@ -1818,9 +2012,12 @@ int V4L2VideoDevice::streamOff()
{
int ret;
- if (!streaming_ && queuedBuffers_.empty())
+ if (state_ != State::Streaming && queuedBuffers_.empty())
return 0;
+ if (watchdogDuration_.count())
+ watchdog_.stop();
+
ret = ioctl(VIDIOC_STREAMOFF, &bufferType_);
if (ret < 0) {
LOG(V4L2, Error)
@@ -1828,22 +2025,72 @@ int V4L2VideoDevice::streamOff()
return ret;
}
+ state_ = State::Stopping;
+
/* Send back all queued buffers. */
for (auto it : queuedBuffers_) {
FrameBuffer *buffer = it.second;
+ FrameMetadata &metadata = buffer->_d()->metadata();
- buffer->metadata_.status = FrameMetadata::FrameCancelled;
+ cache_->put(it.first);
+ metadata.status = FrameMetadata::FrameCancelled;
bufferReady.emit(buffer);
}
+ ASSERT(cache_->isEmpty());
+
queuedBuffers_.clear();
fdBufferNotifier_->setEnabled(false);
- streaming_ = false;
+ state_ = State::Stopped;
return 0;
}
/**
+ * \brief Set the dequeue timeout value
+ * \param[in] timeout The timeout value to be used
+ *
+ * Sets a timeout value, given by \a timeout, that will be used by a watchdog
+ * timer to ensure buffer dequeue events are periodically occurring when the
+ * device is streaming. The watchdog timer is only active when the device is
+ * streaming, so it is not necessary to disable it when the device stops
+ * streaming. The timeout value can be safely updated at any time.
+ *
+ * If the timer expires, the \ref V4L2VideoDevice::dequeueTimeout signal is
+ * emitted. This can typically be used by pipeline handlers to be notified of
+ * stalled devices.
+ *
+ * Set \a timeout to 0 to disable the watchdog timer.
+ */
+void V4L2VideoDevice::setDequeueTimeout(utils::Duration timeout)
+{
+ watchdogDuration_ = timeout;
+
+ watchdog_.stop();
+ if (watchdogDuration_ && state_ == State::Streaming && !queuedBuffers_.empty())
+ watchdog_.start(std::chrono::duration_cast<std::chrono::milliseconds>(timeout));
+}
+
+/**
+ * \var V4L2VideoDevice::dequeueTimeout
+ * \brief A Signal emitted when the dequeue watchdog timer expires
+ */
+
+/**
+ * \brief Slot to handle an expired dequeue timer
+ *
+ * When this slot is called, the time between successive dequeue events is over
+ * the required timeout. Emit the \ref V4L2VideoDevice::dequeueTimeout signal.
+ */
+void V4L2VideoDevice::watchdogExpired()
+{
+ LOG(V4L2, Warning)
+ << "Dequeue timer of " << watchdogDuration_ << " has expired!";
+
+ dequeueTimeout.emit();
+}
+
+/**
* \brief Create a new video device instance from \a entity in media device
* \a media
* \param[in] media The media device where the entity is registered
@@ -1863,18 +2110,61 @@ V4L2VideoDevice::fromEntityName(const MediaDevice *media,
}
/**
+ * \brief Convert \a PixelFormat to a V4L2PixelFormat supported by the device
+ * \param[in] pixelFormat The PixelFormat to convert
+ *
+ * Convert \a pixelformat to a V4L2 FourCC that is known to be supported by
+ * the video device.
+ *
+ * A V4L2VideoDevice may support different V4L2 pixel formats that map the same
+ * PixelFormat. This is the case of the contiguous and non-contiguous variants
+ * of multiplanar formats, and with the V4L2 MJPEG and JPEG pixel formats.
+ * Converting a PixelFormat to a V4L2PixelFormat may thus have multiple answers.
+ *
+ * This function converts the \a pixelFormat using the list of V4L2 pixel
+ * formats that the V4L2VideoDevice supports. This guarantees that the returned
+ * V4L2PixelFormat will be valid for the device. If multiple matches are still
+ * possible, contiguous variants are preferred. If the \a pixelFormat is not
+ * supported by the device, the function returns an invalid V4L2PixelFormat.
+ *
+ * \return The V4L2PixelFormat corresponding to \a pixelFormat if supported by
+ * the device, or an invalid V4L2PixelFormat otherwise
+ */
+V4L2PixelFormat V4L2VideoDevice::toV4L2PixelFormat(const PixelFormat &pixelFormat) const
+{
+ const std::vector<V4L2PixelFormat> &v4l2PixelFormats =
+ V4L2PixelFormat::fromPixelFormat(pixelFormat);
+
+ for (const V4L2PixelFormat &v4l2Format : v4l2PixelFormats) {
+ if (pixelFormats_.count(v4l2Format))
+ return v4l2Format;
+ }
+
+ return {};
+}
+
+/**
* \class V4L2M2MDevice
* \brief Memory-to-Memory video device
*
+ * Memory to Memory devices in the kernel using the V4L2 M2M API can
+ * operate with multiple contexts for parallel operations on a single
+ * device. Each instance of a V4L2M2MDevice represents a single context.
+ *
* The V4L2M2MDevice manages two V4L2VideoDevice instances on the same
* deviceNode which operate together using two queues to implement the V4L2
* Memory to Memory API.
*
- * The two devices should be opened by calling open() on the V4L2M2MDevice, and
- * can be closed by calling close on the V4L2M2MDevice.
+ * Users of this class should create a new instance of the V4L2M2MDevice for
+ * each desired execution context and then open it by calling open() on the
+ * V4L2M2MDevice and close it by calling close() on the V4L2M2MDevice.
*
* Calling V4L2VideoDevice::open() and V4L2VideoDevice::close() on the capture
* or output V4L2VideoDevice is not permitted.
+ *
+ * Once the M2M device is open, users can operate on the output and capture
+ * queues represented by the V4L2VideoDevice returned by the output() and
+ * capture() functions.
*/
/**
diff --git a/src/libcamera/version.cpp.in b/src/libcamera/version.cpp.in
index 5aec08a1..bf5a2c30 100644
--- a/src/libcamera/version.cpp.in
+++ b/src/libcamera/version.cpp.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * version.cpp - libcamera version
+ * libcamera version
*
* This file is auto-generated. Do not edit.
*/
diff --git a/src/libcamera/yaml_parser.cpp b/src/libcamera/yaml_parser.cpp
new file mode 100644
index 00000000..a5e42461
--- /dev/null
+++ b/src/libcamera/yaml_parser.cpp
@@ -0,0 +1,784 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * libcamera YAML parsing helper
+ */
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include <charconv>
+#include <errno.h>
+#include <functional>
+#include <limits>
+#include <stdlib.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+#include <yaml.h>
+
+/**
+ * \file yaml_parser.h
+ * \brief A YAML parser helper
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(YamlParser)
+
+namespace {
+
+/* Empty static YamlObject as a safe result for invalid operations */
+static const YamlObject empty;
+
+} /* namespace */
+
+/**
+ * \class YamlObject
+ * \brief A class representing the tree structure of the YAML content
+ *
+ * The YamlObject class represents the tree structure of YAML content. A
+ * YamlObject can be empty, a dictionary or list of YamlObjects, or a value if a
+ * tree leaf.
+ */
+
+YamlObject::YamlObject()
+ : type_(Type::Empty)
+{
+}
+
+YamlObject::~YamlObject() = default;
+
+/**
+ * \fn YamlObject::isValue()
+ * \brief Return whether the YamlObject is a value
+ *
+ * \return True if the YamlObject is a value, false otherwise
+ */
+
+/**
+ * \fn YamlObject::isList()
+ * \brief Return whether the YamlObject is a list
+ *
+ * \return True if the YamlObject is a list, false otherwise
+ */
+
+/**
+ * \fn YamlObject::isDictionary()
+ * \brief Return whether the YamlObject is a dictionary
+ *
+ * \return True if the YamlObject is a dictionary, false otherwise
+ */
+
+/**
+ * \fn YamlObject::isEmpty()
+ * \brief Return whether the YamlObject is an empty
+ *
+ * \return True if the YamlObject is empty, false otherwise
+ */
+
+/**
+ * \fn YamlObject::operator bool()
+ * \brief Return whether the YamlObject is a non-empty
+ *
+ * \return False if the YamlObject is empty, true otherwise
+ */
+
+/**
+ * \fn YamlObject::size()
+ * \brief Retrieve the number of elements in a dictionary or list YamlObject
+ *
+ * This function retrieves the size of the YamlObject, defined as the number of
+ * child elements it contains. Only YamlObject instances of Dictionary or List
+ * types have a size, calling this function on other types of instances is
+ * invalid and results in undefined behaviour.
+ *
+ * \return The size of the YamlObject
+ */
+std::size_t YamlObject::size() const
+{
+ switch (type_) {
+ case Type::Dictionary:
+ case Type::List:
+ return list_.size();
+ default:
+ return 0;
+ }
+}
+
+/**
+ * \fn template<typename T> YamlObject::get<T>() const
+ * \brief Parse the YamlObject as a \a T value
+ *
+ * This function parses the value of the YamlObject as a \a T object, and
+ * returns the value. If parsing fails (usually because the YamlObject doesn't
+ * store a \a T value), std::nullopt is returned.
+ *
+ * \return The YamlObject value, or std::nullopt if parsing failed
+ */
+
+/**
+ * \fn template<typename T, typename U> YamlObject::get<T>(U &&defaultValue) const
+ * \brief Parse the YamlObject as a \a T value
+ * \param[in] defaultValue The default value when failing to parse
+ *
+ * This function parses the value of the YamlObject as a \a T object, and
+ * returns the value. If parsing fails (usually because the YamlObject doesn't
+ * store a \a T value), the \a defaultValue is returned.
+ *
+ * \return The YamlObject value, or \a defaultValue if parsing failed
+ */
+
+#ifndef __DOXYGEN__
+
+template<>
+std::optional<bool>
+YamlObject::Getter<bool>::get(const YamlObject &obj) const
+{
+ if (obj.type_ != Type::Value)
+ return std::nullopt;
+
+ if (obj.value_ == "true")
+ return true;
+ else if (obj.value_ == "false")
+ return false;
+
+ return std::nullopt;
+}
+
+template<typename T>
+struct YamlObject::Getter<T, std::enable_if_t<
+ std::is_same_v<int8_t, T> ||
+ std::is_same_v<uint8_t, T> ||
+ std::is_same_v<int16_t, T> ||
+ std::is_same_v<uint16_t, T> ||
+ std::is_same_v<int32_t, T> ||
+ std::is_same_v<uint32_t, T>>>
+{
+ std::optional<T> get(const YamlObject &obj) const
+ {
+ if (obj.type_ != Type::Value)
+ return std::nullopt;
+
+ const std::string &str = obj.value_;
+ T value;
+
+ auto [ptr, ec] = std::from_chars(str.data(), str.data() + str.size(),
+ value);
+ if (ptr != str.data() + str.size() || ec != std::errc())
+ return std::nullopt;
+
+ return value;
+ }
+};
+
+template struct YamlObject::Getter<int8_t>;
+template struct YamlObject::Getter<uint8_t>;
+template struct YamlObject::Getter<int16_t>;
+template struct YamlObject::Getter<uint16_t>;
+template struct YamlObject::Getter<int32_t>;
+template struct YamlObject::Getter<uint32_t>;
+
+template<>
+std::optional<float>
+YamlObject::Getter<float>::get(const YamlObject &obj) const
+{
+ return obj.get<double>();
+}
+
+template<>
+std::optional<double>
+YamlObject::Getter<double>::get(const YamlObject &obj) const
+{
+ if (obj.type_ != Type::Value)
+ return std::nullopt;
+
+ if (obj.value_.empty())
+ return std::nullopt;
+
+ char *end;
+
+ errno = 0;
+ double value = utils::strtod(obj.value_.c_str(), &end);
+
+ if ('\0' != *end || errno == ERANGE)
+ return std::nullopt;
+
+ return value;
+}
+
+template<>
+std::optional<std::string>
+YamlObject::Getter<std::string>::get(const YamlObject &obj) const
+{
+ if (obj.type_ != Type::Value)
+ return std::nullopt;
+
+ return obj.value_;
+}
+
+template<>
+std::optional<Size>
+YamlObject::Getter<Size>::get(const YamlObject &obj) const
+{
+ if (obj.type_ != Type::List)
+ return std::nullopt;
+
+ if (obj.list_.size() != 2)
+ return std::nullopt;
+
+ auto width = obj.list_[0].value->get<uint32_t>();
+ if (!width)
+ return std::nullopt;
+
+ auto height = obj.list_[1].value->get<uint32_t>();
+ if (!height)
+ return std::nullopt;
+
+ return Size(*width, *height);
+}
+
+#endif /* __DOXYGEN__ */
+
+/**
+ * \fn template<typename T> YamlObject::getList<T>() const
+ * \brief Parse the YamlObject as a list of \a T
+ *
+ * This function parses the value of the YamlObject as a list of \a T objects,
+ * and returns the value as a \a std::vector<T>. If parsing fails, std::nullopt
+ * is returned.
+ *
+ * \return The YamlObject value as a std::vector<T>, or std::nullopt if parsing
+ * failed
+ */
+
+#ifndef __DOXYGEN__
+
+template<typename T,
+ std::enable_if_t<
+ std::is_same_v<bool, T> ||
+ std::is_same_v<float, T> ||
+ std::is_same_v<double, T> ||
+ std::is_same_v<int8_t, T> ||
+ std::is_same_v<uint8_t, T> ||
+ std::is_same_v<int16_t, T> ||
+ std::is_same_v<uint16_t, T> ||
+ std::is_same_v<int32_t, T> ||
+ std::is_same_v<uint32_t, T> ||
+ std::is_same_v<std::string, T> ||
+ std::is_same_v<Size, T>> *>
+std::optional<std::vector<T>> YamlObject::getList() const
+{
+ if (type_ != Type::List)
+ return std::nullopt;
+
+ std::vector<T> values;
+ values.reserve(list_.size());
+
+ for (const YamlObject &entry : asList()) {
+ const auto value = entry.get<T>();
+ if (!value)
+ return std::nullopt;
+ values.emplace_back(*value);
+ }
+
+ return values;
+}
+
+template std::optional<std::vector<bool>> YamlObject::getList<bool>() const;
+template std::optional<std::vector<float>> YamlObject::getList<float>() const;
+template std::optional<std::vector<double>> YamlObject::getList<double>() const;
+template std::optional<std::vector<int8_t>> YamlObject::getList<int8_t>() const;
+template std::optional<std::vector<uint8_t>> YamlObject::getList<uint8_t>() const;
+template std::optional<std::vector<int16_t>> YamlObject::getList<int16_t>() const;
+template std::optional<std::vector<uint16_t>> YamlObject::getList<uint16_t>() const;
+template std::optional<std::vector<int32_t>> YamlObject::getList<int32_t>() const;
+template std::optional<std::vector<uint32_t>> YamlObject::getList<uint32_t>() const;
+template std::optional<std::vector<std::string>> YamlObject::getList<std::string>() const;
+template std::optional<std::vector<Size>> YamlObject::getList<Size>() const;
+
+#endif /* __DOXYGEN__ */
+
+/**
+ * \fn YamlObject::asDict() const
+ * \brief Wrap a dictionary YamlObject in an adapter that exposes iterators
+ *
+ * The YamlObject class doesn't directly implement iterators, as the iterator
+ * type depends on whether the object is a Dictionary or List. This function
+ * wraps a YamlObject of Dictionary type into an adapter that exposes
+ * iterators, as well as begin() and end() functions, allowing usage of
+ * range-based for loops with YamlObject. As YAML mappings are not ordered, the
+ * iteration order is not specified.
+ *
+ * The iterator's value_type is a
+ * <em>std::pair<const std::string &, const \ref YamlObject &></em>.
+ *
+ * If the YamlObject is not of Dictionary type, the returned adapter operates
+ * as an empty container.
+ *
+ * \return An adapter of unspecified type compatible with range-based for loops
+ */
+
+/**
+ * \fn YamlObject::asList() const
+ * \brief Wrap a list YamlObject in an adapter that exposes iterators
+ *
+ * The YamlObject class doesn't directly implement iterators, as the iterator
+ * type depends on whether the object is a Dictionary or List. This function
+ * wraps a YamlObject of List type into an adapter that exposes iterators, as
+ * well as begin() and end() functions, allowing usage of range-based for loops
+ * with YamlObject. As YAML lists are ordered, the iteration order is identical
+ * to the list order in the YAML data.
+ *
+ * The iterator's value_type is a <em>const YamlObject &</em>.
+ *
+ * If the YamlObject is not of List type, the returned adapter operates as an
+ * empty container.
+ *
+ * \return An adapter of unspecified type compatible with range-based for loops
+ */
+
+/**
+ * \fn YamlObject::operator[](std::size_t index) const
+ * \brief Retrieve the element from list YamlObject by index
+ *
+ * This function retrieves an element of the YamlObject. Only YamlObject
+ * instances of List type associate elements with index, calling this function
+ * on other types of instances or with an invalid index results in an empty
+ * object.
+ *
+ * \return The YamlObject as an element of the list
+ */
+const YamlObject &YamlObject::operator[](std::size_t index) const
+{
+ if (type_ != Type::List || index >= size())
+ return empty;
+
+ return *list_[index].value;
+}
+
+/**
+ * \fn YamlObject::contains()
+ * \brief Check if an element of a dictionary exists
+ *
+ * This function check if the YamlObject contains an element. Only YamlObject
+ * instances of Dictionary type associate elements with names, calling this
+ * function on other types of instances is invalid and results in undefined
+ * behaviour.
+ *
+ * \return True if an element exists, false otherwise
+ */
+bool YamlObject::contains(std::string_view key) const
+{
+ return dictionary_.find(key) != dictionary_.end();
+}
+
+/**
+ * \fn YamlObject::operator[](std::string_view key) const
+ * \brief Retrieve a member by name from the dictionary
+ *
+ * This function retrieve a member of a YamlObject by name. Only YamlObject
+ * instances of Dictionary type associate elements with names, calling this
+ * function on other types of instances or with a nonexistent key results in an
+ * empty object.
+ *
+ * \return The YamlObject corresponding to the \a key member
+ */
+const YamlObject &YamlObject::operator[](std::string_view key) const
+{
+ if (type_ != Type::Dictionary)
+ return empty;
+
+ auto iter = dictionary_.find(key);
+ if (iter == dictionary_.end())
+ return empty;
+
+ return *iter->second;
+}
+
+#ifndef __DOXYGEN__
+
+class YamlParserContext
+{
+public:
+ YamlParserContext();
+ ~YamlParserContext();
+
+ int init(File &file);
+ int parseContent(YamlObject &yamlObject);
+
+private:
+ struct EventDeleter {
+ void operator()(yaml_event_t *event) const
+ {
+ yaml_event_delete(event);
+ delete event;
+ }
+ };
+ using EventPtr = std::unique_ptr<yaml_event_t, EventDeleter>;
+
+ static int yamlRead(void *data, unsigned char *buffer, size_t size,
+ size_t *sizeRead);
+
+ EventPtr nextEvent();
+
+ void readValue(std::string &value, EventPtr event);
+ int parseDictionaryOrList(YamlObject::Type type,
+ const std::function<int(EventPtr event)> &parseItem);
+ int parseNextYamlObject(YamlObject &yamlObject, EventPtr event);
+
+ bool parserValid_;
+ yaml_parser_t parser_;
+};
+
+/**
+ * \class YamlParserContext
+ * \brief Class for YamlParser parsing and context data
+ *
+ * The YamlParserContext class stores the internal yaml_parser_t and provides
+ * helper functions to do event-based parsing for YAML files.
+ */
+YamlParserContext::YamlParserContext()
+ : parserValid_(false)
+{
+}
+
+/**
+ * \class YamlParserContext
+ * \brief Destructor of YamlParserContext
+ */
+YamlParserContext::~YamlParserContext()
+{
+ if (parserValid_) {
+ yaml_parser_delete(&parser_);
+ parserValid_ = false;
+ }
+}
+
+/**
+ * \fn YamlParserContext::init()
+ * \brief Initialize a parser with an opened file for parsing
+ * \param[in] fh The YAML file to parse
+ *
+ * Prior to parsing the YAML content, the YamlParserContext must be initialized
+ * with a file to create an internal parser. The file needs to stay valid until
+ * parsing completes.
+ *
+ * \return 0 on success or a negative error code otherwise
+ * \retval -EINVAL The parser has failed to initialize
+ */
+int YamlParserContext::init(File &file)
+{
+ /* yaml_parser_initialize returns 1 when it succeededs */
+ if (!yaml_parser_initialize(&parser_)) {
+ LOG(YamlParser, Error) << "Failed to initialize YAML parser";
+ return -EINVAL;
+ }
+ parserValid_ = true;
+ yaml_parser_set_input(&parser_, &YamlParserContext::yamlRead, &file);
+
+ return 0;
+}
+
+int YamlParserContext::yamlRead(void *data, unsigned char *buffer, size_t size,
+ size_t *sizeRead)
+{
+ File *file = static_cast<File *>(data);
+
+ Span<unsigned char> buf{ buffer, size };
+ ssize_t ret = file->read(buf);
+ if (ret < 0)
+ return 0;
+
+ *sizeRead = ret;
+ return 1;
+}
+
+/**
+ * \fn YamlParserContext::nextEvent()
+ * \brief Get the next event
+ *
+ * Get the next event in the current YAML event stream, and return nullptr when
+ * there is no more event.
+ *
+ * \return The next event on success or nullptr otherwise
+ */
+YamlParserContext::EventPtr YamlParserContext::nextEvent()
+{
+ EventPtr event(new yaml_event_t);
+
+ /* yaml_parser_parse returns 1 when it succeeds */
+ if (!yaml_parser_parse(&parser_, event.get())) {
+ File *file = static_cast<File *>(parser_.read_handler_data);
+
+ LOG(YamlParser, Error) << file->fileName() << ":"
+ << parser_.problem_mark.line << ":"
+ << parser_.problem_mark.column << " "
+ << parser_.problem << " "
+ << parser_.context;
+
+ return nullptr;
+ }
+
+ return event;
+}
+
+/**
+ * \fn YamlParserContext::parseContent()
+ * \brief Parse the content of a YAML document
+ * \param[in] yamlObject The result of YamlObject
+ *
+ * Check YAML start and end events of a YAML document, and parse the root object
+ * of the YAML document into a YamlObject.
+ *
+ * \return 0 on success or a negative error code otherwise
+ * \retval -EINVAL The parser has failed to validate end of a YAML file
+ */
+int YamlParserContext::parseContent(YamlObject &yamlObject)
+{
+ /* Check start of the YAML file. */
+ EventPtr event = nextEvent();
+ if (!event || event->type != YAML_STREAM_START_EVENT)
+ return -EINVAL;
+
+ event = nextEvent();
+ if (!event || event->type != YAML_DOCUMENT_START_EVENT)
+ return -EINVAL;
+
+ /* Parse the root object. */
+ event = nextEvent();
+ if (parseNextYamlObject(yamlObject, std::move(event)))
+ return -EINVAL;
+
+ /* Check end of the YAML file. */
+ event = nextEvent();
+ if (!event || event->type != YAML_DOCUMENT_END_EVENT)
+ return -EINVAL;
+
+ event = nextEvent();
+ if (!event || event->type != YAML_STREAM_END_EVENT)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * \fn YamlParserContext::readValue()
+ * \brief Parse event scalar and fill its content into a string
+ * \param[in] value The string reference to fill value
+ *
+ * A helper function to parse a scalar event as string. The caller needs to
+ * guarantee the event is of scaler type.
+ */
+void YamlParserContext::readValue(std::string &value, EventPtr event)
+{
+ value.assign(reinterpret_cast<char *>(event->data.scalar.value),
+ event->data.scalar.length);
+}
+
+/**
+ * \fn YamlParserContext::parseDictionaryOrList()
+ * \brief A helper function to abstract the common part of parsing dictionary or list
+ *
+ * \param[in] isDictionary True for parsing a dictionary, and false for a list
+ * \param[in] parseItem The callback to handle an item
+ *
+ * A helper function to abstract parsing an item from a dictionary or a list.
+ * The differences of them in a YAML event stream are:
+ *
+ * 1. The start and end event types are different
+ * 2. There is a leading scalar string as key in the items of a dictionary
+ *
+ * The caller should handle the leading key string in its callback parseItem
+ * when it's a dictionary.
+ *
+ * \return 0 on success or a negative error code otherwise
+ * \retval -EINVAL The parser is failed to initialize
+ */
+int YamlParserContext::parseDictionaryOrList(YamlObject::Type type,
+ const std::function<int(EventPtr event)> &parseItem)
+{
+ yaml_event_type_t endEventType = YAML_SEQUENCE_END_EVENT;
+ if (type == YamlObject::Type::Dictionary)
+ endEventType = YAML_MAPPING_END_EVENT;
+
+ /*
+ * Add a safety counter to make sure we don't loop indefinitely in case
+ * the YAML file is malformed.
+ */
+ for (unsigned int sentinel = 2000; sentinel; sentinel--) {
+ auto evt = nextEvent();
+ if (!evt)
+ return -EINVAL;
+
+ if (evt->type == endEventType)
+ return 0;
+
+ int ret = parseItem(std::move(evt));
+ if (ret)
+ return ret;
+ }
+
+ LOG(YamlParser, Error) << "The YAML file contains a List or Dictionary"
+ " whose size exceeds the parser's limit (1000)";
+
+ return -EINVAL;
+}
+
+/**
+ * \fn YamlParserContext::parseNextYamlObject()
+ * \brief Parse next YAML event and read it as a YamlObject
+ * \param[in] yamlObject The result of YamlObject
+ * \param[in] event The leading event of the object
+ *
+ * Parse next YAML object separately as a value, list or dictionary.
+ *
+ * \return 0 on success or a negative error code otherwise
+ * \retval -EINVAL Fail to parse the YAML file.
+ */
+int YamlParserContext::parseNextYamlObject(YamlObject &yamlObject, EventPtr event)
+{
+ if (!event)
+ return -EINVAL;
+
+ switch (event->type) {
+ case YAML_SCALAR_EVENT:
+ yamlObject.type_ = YamlObject::Type::Value;
+ readValue(yamlObject.value_, std::move(event));
+ return 0;
+
+ case YAML_SEQUENCE_START_EVENT: {
+ yamlObject.type_ = YamlObject::Type::List;
+ auto &list = yamlObject.list_;
+ auto handler = [this, &list](EventPtr evt) {
+ list.emplace_back(std::string{}, std::make_unique<YamlObject>());
+ return parseNextYamlObject(*list.back().value, std::move(evt));
+ };
+ return parseDictionaryOrList(YamlObject::Type::List, handler);
+ }
+
+ case YAML_MAPPING_START_EVENT: {
+ yamlObject.type_ = YamlObject::Type::Dictionary;
+ auto &list = yamlObject.list_;
+ auto handler = [this, &list](EventPtr evtKey) {
+ /* Parse key */
+ if (evtKey->type != YAML_SCALAR_EVENT) {
+ LOG(YamlParser, Error) << "Expect key at line: "
+ << evtKey->start_mark.line
+ << " column: "
+ << evtKey->start_mark.column;
+ return -EINVAL;
+ }
+
+ std::string key;
+ readValue(key, std::move(evtKey));
+
+ /* Parse value */
+ EventPtr evtValue = nextEvent();
+ if (!evtValue)
+ return -EINVAL;
+
+ auto &elem = list.emplace_back(std::move(key),
+ std::make_unique<YamlObject>());
+ return parseNextYamlObject(*elem.value, std::move(evtValue));
+ };
+ int ret = parseDictionaryOrList(YamlObject::Type::Dictionary, handler);
+ if (ret)
+ return ret;
+
+ auto &dictionary = yamlObject.dictionary_;
+ for (const auto &elem : list)
+ dictionary.emplace(elem.key, elem.value.get());
+
+ return 0;
+ }
+
+ default:
+ LOG(YamlParser, Error) << "Invalid YAML file";
+ return -EINVAL;
+ }
+}
+
+#endif /* __DOXYGEN__ */
+
+/**
+ * \class YamlParser
+ * \brief A helper class for parsing a YAML file
+ *
+ * The YamlParser class provides an easy interface to parse the contents of a
+ * YAML file into a tree of YamlObject instances.
+ *
+ * Example usage:
+ *
+ * \code{.unparsed}
+ *
+ * name:
+ * "John"
+ * numbers:
+ * - 1
+ * - 2
+ *
+ * \endcode
+ *
+ * The following code illustrates how to parse the above YAML file:
+ *
+ * \code{.cpp}
+ *
+ * std::unique_ptr<YamlObject> root = YamlParser::parse(fh);
+ * if (!root)
+ * return;
+ *
+ * if (!root->isDictionary())
+ * return;
+ *
+ * const YamlObject &name = (*root)["name"];
+ * std::cout << name.get<std::string>("") << std::endl;
+ *
+ * const YamlObject &numbers = (*root)["numbers"];
+ * if (!numbers.isList())
+ * return;
+ *
+ * for (std::size_t i = 0; i < numbers.size(); i++)
+ * std::cout << numbers[i].get<int32_t>(0) << std::endl;
+ *
+ * \endcode
+ *
+ * The YamlParser::parse() function takes an open FILE, parses its contents, and
+ * returns a pointer to a YamlObject corresponding to the root node of the YAML
+ * document.
+ *
+ * The parser preserves the order of items in the YAML file, for both lists and
+ * dictionaries.
+ */
+
+/**
+ * \brief Parse a YAML file as a YamlObject
+ * \param[in] file The YAML file to parse
+ *
+ * The YamlParser::parse() function takes a file, parses its contents, and
+ * returns a pointer to a YamlObject corresponding to the root node of the YAML
+ * document.
+ *
+ * \return Pointer to result YamlObject on success or nullptr otherwise
+ */
+std::unique_ptr<YamlObject> YamlParser::parse(File &file)
+{
+ YamlParserContext context;
+
+ if (context.init(file))
+ return nullptr;
+
+ std::unique_ptr<YamlObject> root(new YamlObject());
+
+ if (context.parseContent(*root)) {
+ LOG(YamlParser, Error)
+ << "Failed to parse YAML content from "
+ << file.fileName();
+ return nullptr;
+ }
+
+ return root;
+}
+
+} /* namespace libcamera */