summaryrefslogtreecommitdiff
path: root/src/libcamera
diff options
context:
space:
mode:
Diffstat (limited to 'src/libcamera')
-rw-r--r--src/libcamera/base/backtrace.cpp335
-rw-r--r--src/libcamera/base/bound_method.cpp (renamed from src/libcamera/bound_method.cpp)22
-rw-r--r--src/libcamera/base/class.cpp208
-rw-r--r--src/libcamera/base/event_dispatcher.cpp (renamed from src/libcamera/event_dispatcher.cpp)9
-rw-r--r--src/libcamera/base/event_dispatcher_poll.cpp (renamed from src/libcamera/event_dispatcher_poll.cpp)40
-rw-r--r--src/libcamera/base/event_notifier.cpp (renamed from src/libcamera/event_notifier.cpp)24
-rw-r--r--src/libcamera/base/file.cpp478
-rw-r--r--src/libcamera/base/flags.cpp192
-rw-r--r--src/libcamera/base/log.cpp (renamed from src/libcamera/log.cpp)387
-rw-r--r--src/libcamera/base/memfd.cpp123
-rw-r--r--src/libcamera/base/meson.build81
-rw-r--r--src/libcamera/base/message.cpp (renamed from src/libcamera/message.cpp)19
-rw-r--r--src/libcamera/base/mutex.cpp55
-rw-r--r--src/libcamera/base/object.cpp361
-rw-r--r--src/libcamera/base/semaphore.cpp (renamed from src/libcamera/semaphore.cpp)27
-rw-r--r--src/libcamera/base/shared_fd.cpp291
-rw-r--r--src/libcamera/base/signal.cpp (renamed from src/libcamera/signal.cpp)44
-rw-r--r--src/libcamera/base/thread.cpp (renamed from src/libcamera/thread.cpp)365
-rw-r--r--src/libcamera/base/timer.cpp (renamed from src/libcamera/timer.cpp)38
-rw-r--r--src/libcamera/base/unique_fd.cpp123
-rw-r--r--src/libcamera/base/utils.cpp674
-rw-r--r--src/libcamera/bayer_format.cpp460
-rw-r--r--src/libcamera/buffer.cpp214
-rw-r--r--src/libcamera/byte_stream_buffer.cpp23
-rw-r--r--src/libcamera/camera.cpp805
-rw-r--r--src/libcamera/camera_controls.cpp6
-rw-r--r--src/libcamera/camera_lens.cpp153
-rw-r--r--src/libcamera/camera_manager.cpp400
-rw-r--r--src/libcamera/camera_sensor.cpp369
-rw-r--r--src/libcamera/color_space.cpp520
-rw-r--r--src/libcamera/control_ids.cpp.in108
-rw-r--r--src/libcamera/control_ids.yaml53
-rw-r--r--src/libcamera/control_ids_core.yaml1052
-rw-r--r--src/libcamera/control_ids_debug.yaml6
-rw-r--r--src/libcamera/control_ids_draft.yaml327
-rw-r--r--src/libcamera/control_ids_rpi.yaml61
-rw-r--r--src/libcamera/control_ranges.yaml20
-rw-r--r--src/libcamera/control_serializer.cpp262
-rw-r--r--src/libcamera/control_validator.cpp6
-rw-r--r--src/libcamera/controls.cpp451
-rw-r--r--src/libcamera/converter.cpp458
-rw-r--r--src/libcamera/converter/converter_v4l2_m2m.cpp751
-rw-r--r--src/libcamera/converter/meson.build5
-rw-r--r--src/libcamera/debug_controls.cpp164
-rw-r--r--src/libcamera/delayed_controls.cpp285
-rw-r--r--src/libcamera/device_enumerator.cpp46
-rw-r--r--src/libcamera/device_enumerator_sysfs.cpp11
-rw-r--r--src/libcamera/device_enumerator_udev.cpp35
-rw-r--r--src/libcamera/dma_buf_allocator.cpp351
-rw-r--r--src/libcamera/fence.cpp112
-rw-r--r--src/libcamera/file_descriptor.cpp203
-rw-r--r--src/libcamera/formats.cpp1217
-rw-r--r--src/libcamera/formats.yaml212
-rw-r--r--src/libcamera/framebuffer.cpp441
-rw-r--r--src/libcamera/framebuffer_allocator.cpp34
-rwxr-xr-xsrc/libcamera/gen-controls.py173
-rw-r--r--src/libcamera/geometry.cpp684
-rw-r--r--src/libcamera/include/byte_stream_buffer.h89
-rw-r--r--src/libcamera/include/camera_controls.h30
-rw-r--r--src/libcamera/include/camera_sensor.h66
-rw-r--r--src/libcamera/include/control_serializer.h55
-rw-r--r--src/libcamera/include/control_validator.h27
-rw-r--r--src/libcamera/include/device_enumerator.h57
-rw-r--r--src/libcamera/include/device_enumerator_sysfs.h32
-rw-r--r--src/libcamera/include/device_enumerator_udev.h75
-rw-r--r--src/libcamera/include/event_dispatcher_poll.h58
-rw-r--r--src/libcamera/include/formats.h34
-rw-r--r--src/libcamera/include/ipa_context_wrapper.h47
-rw-r--r--src/libcamera/include/ipa_manager.h42
-rw-r--r--src/libcamera/include/ipa_module.h55
-rw-r--r--src/libcamera/include/ipa_proxy.h65
-rw-r--r--src/libcamera/include/ipc_unixsocket.h59
-rw-r--r--src/libcamera/include/log.h130
-rw-r--r--src/libcamera/include/media_device.h93
-rw-r--r--src/libcamera/include/media_object.h124
-rw-r--r--src/libcamera/include/meson.build30
-rw-r--r--src/libcamera/include/message.h70
-rw-r--r--src/libcamera/include/pipeline_handler.h150
-rw-r--r--src/libcamera/include/process.h55
-rw-r--r--src/libcamera/include/semaphore.h34
-rw-r--r--src/libcamera/include/thread.h77
-rw-r--r--src/libcamera/include/utils.h152
-rw-r--r--src/libcamera/include/v4l2_controls.h31
-rw-r--r--src/libcamera/include/v4l2_device.h60
-rw-r--r--src/libcamera/include/v4l2_subdevice.h71
-rw-r--r--src/libcamera/include/v4l2_videodevice.h295
-rw-r--r--src/libcamera/ipa/meson.build14
-rw-r--r--src/libcamera/ipa_context_wrapper.cpp251
-rw-r--r--src/libcamera/ipa_controls.cpp55
-rw-r--r--src/libcamera/ipa_data_serializer.cpp626
-rw-r--r--src/libcamera/ipa_interface.cpp497
-rw-r--r--src/libcamera/ipa_manager.cpp160
-rw-r--r--src/libcamera/ipa_module.cpp296
-rw-r--r--src/libcamera/ipa_proxy.cpp223
-rw-r--r--src/libcamera/ipa_pub_key.cpp.in22
-rw-r--r--src/libcamera/ipc_pipe.cpp227
-rw-r--r--src/libcamera/ipc_pipe_unixsocket.cpp147
-rw-r--r--src/libcamera/ipc_unixsocket.cpp93
-rw-r--r--src/libcamera/mapped_framebuffer.cpp243
-rw-r--r--src/libcamera/matrix.cpp145
-rw-r--r--src/libcamera/media_device.cpp181
-rw-r--r--src/libcamera/media_object.cpp148
-rw-r--r--src/libcamera/meson.build233
-rw-r--r--src/libcamera/object.cpp245
-rw-r--r--src/libcamera/orientation.cpp118
-rw-r--r--src/libcamera/pipeline/imx8-isi/imx8-isi.cpp1116
-rw-r--r--src/libcamera/pipeline/imx8-isi/meson.build5
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.cpp431
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.h79
-rw-r--r--src/libcamera/pipeline/ipu3/frames.cpp144
-rw-r--r--src/libcamera/pipeline/ipu3/frames.h67
-rw-r--r--src/libcamera/pipeline/ipu3/imgu.cpp767
-rw-r--r--src/libcamera/pipeline/ipu3/imgu.h124
-rw-r--r--src/libcamera/pipeline/ipu3/ipu3.cpp1767
-rw-r--r--src/libcamera/pipeline/ipu3/meson.build7
-rw-r--r--src/libcamera/pipeline/mali-c55/mali-c55.cpp1755
-rw-r--r--src/libcamera/pipeline/mali-c55/meson.build5
-rw-r--r--src/libcamera/pipeline/meson.build19
-rw-r--r--src/libcamera/pipeline/rkisp1/meson.build6
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1.cpp1554
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1_path.cpp567
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1_path.h104
-rw-r--r--src/libcamera/pipeline/rkisp1/timeline.cpp227
-rw-r--r--src/libcamera/pipeline/rkisp1/timeline.h72
-rw-r--r--src/libcamera/pipeline/rpi/common/delayed_controls.cpp293
-rw-r--r--src/libcamera/pipeline/rpi/common/delayed_controls.h87
-rw-r--r--src/libcamera/pipeline/rpi/common/meson.build7
-rw-r--r--src/libcamera/pipeline/rpi/common/pipeline_base.cpp1528
-rw-r--r--src/libcamera/pipeline/rpi/common/pipeline_base.h300
-rw-r--r--src/libcamera/pipeline/rpi/common/rpi_stream.cpp283
-rw-r--r--src/libcamera/pipeline/rpi/common/rpi_stream.h199
-rw-r--r--src/libcamera/pipeline/rpi/meson.build12
-rw-r--r--src/libcamera/pipeline/rpi/vc4/data/example.yaml46
-rw-r--r--src/libcamera/pipeline/rpi/vc4/data/meson.build9
-rw-r--r--src/libcamera/pipeline/rpi/vc4/meson.build7
-rw-r--r--src/libcamera/pipeline/rpi/vc4/vc4.cpp1030
-rw-r--r--src/libcamera/pipeline/simple/meson.build5
-rw-r--r--src/libcamera/pipeline/simple/simple.cpp1768
-rw-r--r--src/libcamera/pipeline/uvcvideo/meson.build4
-rw-r--r--src/libcamera/pipeline/uvcvideo/uvcvideo.cpp604
-rw-r--r--src/libcamera/pipeline/vimc/meson.build4
-rw-r--r--src/libcamera/pipeline/vimc/vimc.cpp412
-rw-r--r--src/libcamera/pipeline/virtual/README.md65
-rw-r--r--src/libcamera/pipeline/virtual/config_parser.cpp260
-rw-r--r--src/libcamera/pipeline/virtual/config_parser.h39
-rw-r--r--src/libcamera/pipeline/virtual/data/virtual.yaml36
-rw-r--r--src/libcamera/pipeline/virtual/frame_generator.h29
-rw-r--r--src/libcamera/pipeline/virtual/image_frame_generator.cpp172
-rw-r--r--src/libcamera/pipeline/virtual/image_frame_generator.h49
-rw-r--r--src/libcamera/pipeline/virtual/meson.build13
-rw-r--r--src/libcamera/pipeline/virtual/test_pattern_generator.cpp125
-rw-r--r--src/libcamera/pipeline/virtual/test_pattern_generator.h48
-rw-r--r--src/libcamera/pipeline/virtual/virtual.cpp397
-rw-r--r--src/libcamera/pipeline/virtual/virtual.h61
-rw-r--r--src/libcamera/pipeline/vivid/meson.build5
-rw-r--r--src/libcamera/pipeline/vivid/vivid.cpp409
-rw-r--r--src/libcamera/pipeline_handler.cpp691
-rw-r--r--src/libcamera/pixel_format.cpp (renamed from src/libcamera/pixelformats.cpp)89
-rw-r--r--src/libcamera/process.cpp100
-rw-r--r--src/libcamera/property_ids.cpp.in43
-rw-r--r--src/libcamera/property_ids_core.yaml (renamed from src/libcamera/property_ids.yaml)325
-rw-r--r--src/libcamera/property_ids_draft.yaml39
-rw-r--r--src/libcamera/proxy/ipa_proxy_linux.cpp95
-rw-r--r--src/libcamera/proxy/meson.build23
-rw-r--r--src/libcamera/proxy/worker/ipa_proxy_linux_worker.cpp90
-rw-r--r--src/libcamera/proxy/worker/meson.build34
-rw-r--r--src/libcamera/pub_key.cpp140
-rw-r--r--src/libcamera/request.cpp485
-rw-r--r--src/libcamera/sensor/camera_sensor.cpp583
-rw-r--r--src/libcamera/sensor/camera_sensor_legacy.cpp1045
-rw-r--r--src/libcamera/sensor/camera_sensor_properties.cpp473
-rw-r--r--src/libcamera/sensor/camera_sensor_raw.cpp1157
-rw-r--r--src/libcamera/sensor/meson.build8
-rw-r--r--src/libcamera/shared_mem_object.cpp231
-rw-r--r--src/libcamera/software_isp/TODO208
-rw-r--r--src/libcamera/software_isp/debayer.cpp127
-rw-r--r--src/libcamera/software_isp/debayer.h54
-rw-r--r--src/libcamera/software_isp/debayer_cpu.cpp835
-rw-r--r--src/libcamera/software_isp/debayer_cpu.h163
-rw-r--r--src/libcamera/software_isp/meson.build15
-rw-r--r--src/libcamera/software_isp/software_isp.cpp370
-rw-r--r--src/libcamera/software_isp/swstats_cpu.cpp434
-rw-r--r--src/libcamera/software_isp/swstats_cpu.h97
-rw-r--r--src/libcamera/source_paths.cpp139
-rw-r--r--src/libcamera/stream.cpp120
-rw-r--r--src/libcamera/sysfs.cpp111
-rw-r--r--src/libcamera/tracepoints.cpp10
-rw-r--r--src/libcamera/transform.cpp409
-rw-r--r--src/libcamera/utils.cpp374
-rw-r--r--src/libcamera/v4l2_controls.cpp151
-rw-r--r--src/libcamera/v4l2_device.cpp838
-rw-r--r--src/libcamera/v4l2_pixelformat.cpp422
-rw-r--r--src/libcamera/v4l2_subdevice.cpp1656
-rw-r--r--src/libcamera/v4l2_videodevice.cpp1126
-rw-r--r--src/libcamera/version.cpp.in2
-rw-r--r--src/libcamera/yaml_parser.cpp784
196 files changed, 42241 insertions, 9438 deletions
diff --git a/src/libcamera/base/backtrace.cpp b/src/libcamera/base/backtrace.cpp
new file mode 100644
index 00000000..0b04629c
--- /dev/null
+++ b/src/libcamera/base/backtrace.cpp
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Ideas on Board Oy
+ *
+ * Call stack backtraces
+ */
+
+#include <libcamera/base/backtrace.h>
+
+#if HAVE_BACKTRACE
+#include <execinfo.h>
+#include <stdlib.h>
+#endif
+
+#ifdef HAVE_DW
+#include <elfutils/libdwfl.h>
+#include <unistd.h>
+#endif
+
+#if HAVE_UNWIND
+/*
+ * Disable support for remote unwinding to enable a more optimized
+ * implementation.
+ */
+#define UNW_LOCAL_ONLY
+#include <libunwind.h>
+#endif
+
+#include <cxxabi.h>
+#include <sstream>
+
+#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
+
+/**
+ * \file backtrace.h
+ * \brief Generate call stack backtraces
+ */
+
+namespace libcamera {
+
+namespace {
+
+#if HAVE_DW
+class DwflParser
+{
+public:
+ DwflParser();
+ ~DwflParser();
+
+ bool isValid() const { return valid_; }
+ std::string stackEntry(const void *ip);
+
+private:
+ Dwfl_Callbacks callbacks_;
+ Dwfl *dwfl_;
+ bool valid_;
+};
+
+DwflParser::DwflParser()
+ : callbacks_({}), dwfl_(nullptr), valid_(false)
+{
+ callbacks_.find_elf = dwfl_linux_proc_find_elf;
+ callbacks_.find_debuginfo = dwfl_standard_find_debuginfo;
+
+ dwfl_ = dwfl_begin(&callbacks_);
+ if (!dwfl_)
+ return;
+
+ int ret = dwfl_linux_proc_report(dwfl_, getpid());
+ if (ret)
+ return;
+
+ ret = dwfl_report_end(dwfl_, nullptr, nullptr);
+ if (ret)
+ return;
+
+ valid_ = true;
+}
+
+DwflParser::~DwflParser()
+{
+ if (dwfl_)
+ dwfl_end(dwfl_);
+}
+
+std::string DwflParser::stackEntry(const void *ip)
+{
+ Dwarf_Addr addr = reinterpret_cast<Dwarf_Addr>(ip);
+
+ Dwfl_Module *module = dwfl_addrmodule(dwfl_, addr);
+ if (!module)
+ return std::string();
+
+ std::ostringstream entry;
+
+ GElf_Off offset;
+ GElf_Sym sym;
+ const char *symbol = dwfl_module_addrinfo(module, addr, &offset, &sym,
+ nullptr, nullptr, nullptr);
+ if (symbol) {
+ char *name = abi::__cxa_demangle(symbol, nullptr, nullptr, nullptr);
+ entry << (name ? name : symbol) << "+0x" << std::hex << offset
+ << std::dec;
+ free(name);
+ } else {
+ entry << "??? [" << utils::hex(addr) << "]";
+ }
+
+ entry << " (";
+
+ Dwfl_Line *line = dwfl_module_getsrc(module, addr);
+ if (line) {
+ const char *filename;
+ int lineNumber = 0;
+
+ filename = dwfl_lineinfo(line, &addr, &lineNumber, nullptr,
+ nullptr, nullptr);
+
+ entry << (filename ? filename : "???") << ":" << lineNumber;
+ } else {
+ const char *filename = nullptr;
+
+ dwfl_module_info(module, nullptr, nullptr, nullptr, nullptr,
+ nullptr, &filename, nullptr);
+
+ entry << (filename ? filename : "???") << " [" << utils::hex(addr) << "]";
+ }
+
+ entry << ")";
+ return entry.str();
+}
+#endif /* HAVE_DW */
+
+} /* namespace */
+
+/**
+ * \class Backtrace
+ * \brief Representation of a call stack backtrace
+ *
+ * The Backtrace class represents a function call stack. Constructing an
+ * instance captures the call stack at the point the instance is constructed.
+ * The instance can later be used to access the call stack and to generate a
+ * human-readable representation with the toString() function.
+ *
+ * Depending on the platform, different backends can be used to generate the
+ * backtrace. The Backtrace class provides a best effort to capture accurate
+ * backtraces, but doesn't offer any guarantee of a particular backtrace format.
+ */
+
+/**
+ * \brief Construct a backtrace
+ *
+ * The backtrace captures the call stack at the point where it is constructed.
+ * It can later be converted to a string with toString().
+ */
+Backtrace::Backtrace()
+{
+ /* Try libunwind first and fall back to backtrace() if it fails. */
+ if (unwindTrace())
+ return;
+
+ backtraceTrace();
+}
+
+/*
+ * Avoid inlining to make sure that the Backtrace constructor adds exactly two
+ * calls to the stack, which are later skipped in toString().
+ */
+__attribute__((__noinline__))
+bool Backtrace::backtraceTrace()
+{
+#if HAVE_BACKTRACE
+ backtrace_.resize(32);
+
+ int num_entries = backtrace(backtrace_.data(), backtrace_.size());
+ if (num_entries < 0) {
+ backtrace_.clear();
+ return false;
+ }
+
+ backtrace_.resize(num_entries);
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+__attribute__((__noinline__))
+bool Backtrace::unwindTrace()
+{
+#if HAVE_UNWIND
+/*
+ * unw_getcontext() for ARM32 is an inline assembly function using the stmia
+ * instruction to store SP and PC. This is considered by clang-11 as deprecated,
+ * and generates a warning.
+ */
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Winline-asm"
+#endif
+ unw_context_t uc;
+ int ret = unw_getcontext(&uc);
+ if (ret)
+ return false;
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+
+ unw_cursor_t cursor;
+ ret = unw_init_local(&cursor, &uc);
+ if (ret)
+ return false;
+
+ do {
+#if HAVE_BACKTRACE || HAVE_DW
+ /*
+ * If backtrace() or libdw is available, they will be used in
+ * toString() to provide symbol information for the stack
+ * frames using the IP register value.
+ */
+ unw_word_t ip;
+ ret = unw_get_reg(&cursor, UNW_REG_IP, &ip);
+ if (ret) {
+ backtrace_.push_back(nullptr);
+ continue;
+ }
+
+ backtrace_.push_back(reinterpret_cast<void *>(ip));
+#else
+ /*
+ * Otherwise, use libunwind to get the symbol information. As
+ * the libunwind API uses cursors, we can't store the IP values
+ * and delay symbol lookup to toString().
+ */
+ char symbol[256];
+ unw_word_t offset = 0;
+ ret = unw_get_proc_name(&cursor, symbol, sizeof(symbol), &offset);
+ if (ret) {
+ backtraceText_.emplace_back("???\n");
+ continue;
+ }
+
+ std::ostringstream entry;
+
+ char *name = abi::__cxa_demangle(symbol, nullptr, nullptr, nullptr);
+ entry << (name ? name : symbol);
+ free(name);
+
+ entry << "+0x" << std::hex << offset << "\n";
+ backtraceText_.emplace_back(entry.str());
+#endif
+ } while (unw_step(&cursor) > 0);
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+/**
+ * \brief Convert a backtrace to a string representation
+ * \param[in] skipLevels Number of initial levels to skip in the backtrace
+ *
+ * The string representation of the backtrace is a multi-line string, with one
+ * line per call stack entry. The format of the entries isn't specified and is
+ * platform-dependent.
+ *
+ * The \a skipLevels parameter indicates how many initial entries to skip from
+ * the backtrace. This can be used to hide functions that wrap the construction
+ * of the Backtrace instance from the call stack. The Backtrace constructor
+ * itself is automatically skipped and never shown in the backtrace.
+ *
+ * If backtrace generation fails for any reason (usually because the platform
+ * doesn't support this feature), an empty string is returned.
+ *
+ * \return A string representation of the backtrace, or an empty string if
+ * backtrace generation isn't possible
+ */
+std::string Backtrace::toString(unsigned int skipLevels) const
+{
+ /*
+ * Skip the first two entries, corresponding to the Backtrace
+ * construction.
+ */
+ skipLevels += 2;
+
+ if (backtrace_.size() <= skipLevels &&
+ backtraceText_.size() <= skipLevels)
+ return std::string();
+
+ if (!backtraceText_.empty()) {
+ Span<const std::string> trace{ backtraceText_ };
+ return utils::join(trace.subspan(skipLevels), "");
+ }
+
+#if HAVE_DW
+ DwflParser dwfl;
+
+ if (dwfl.isValid()) {
+ std::ostringstream msg;
+
+ Span<void *const> trace{ backtrace_ };
+ for (const void *ip : trace.subspan(skipLevels)) {
+ if (ip)
+ msg << dwfl.stackEntry(ip) << std::endl;
+ else
+ msg << "???" << std::endl;
+ }
+
+ return msg.str();
+ }
+#endif
+
+#if HAVE_BACKTRACE
+ Span<void *const> trace{ backtrace_ };
+ trace = trace.subspan(skipLevels);
+
+ char **strings = backtrace_symbols(trace.data(), trace.size());
+ if (strings) {
+ std::ostringstream msg;
+
+ for (unsigned int i = 0; i < trace.size(); ++i)
+ msg << strings[i] << std::endl;
+
+ free(strings);
+ return msg.str();
+ }
+#endif
+
+ return std::string();
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/bound_method.cpp b/src/libcamera/base/bound_method.cpp
index 9aa59dc3..322029a8 100644
--- a/src/libcamera/bound_method.cpp
+++ b/src/libcamera/base/bound_method.cpp
@@ -2,17 +2,17 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * bound_method.cpp - Method bind and invocation
+ * Method bind and invocation
*/
-#include <libcamera/bound_method.h>
-
-#include "message.h"
-#include "semaphore.h"
-#include "thread.h"
+#include <libcamera/base/bound_method.h>
+#include <libcamera/base/message.h>
+#include <libcamera/base/object.h>
+#include <libcamera/base/semaphore.h>
+#include <libcamera/base/thread.h>
/**
- * \file bound_method.h
+ * \file base/bound_method.h
* \brief Method bind and invocation
*/
@@ -26,22 +26,22 @@ namespace libcamera {
* between a sender and a receiver. It applies to Signal::emit() and
* Object::invokeMethod().
*
- * \var ConnectionType::ConnectionTypeAuto
+ * \var ConnectionTypeAuto
* \brief If the sender and the receiver live in the same thread,
* ConnectionTypeDirect is used. Otherwise ConnectionTypeQueued is used.
*
- * \var ConnectionType::ConnectionTypeDirect
+ * \var ConnectionTypeDirect
* \brief The receiver is invoked immediately and synchronously in the sender's
* thread.
*
- * \var ConnectionType::ConnectionTypeQueued
+ * \var ConnectionTypeQueued
* \brief The receiver is invoked asynchronously
*
* Invoke the receiver asynchronously in its thread when control returns to the
* thread's event loop. The sender proceeds without waiting for the invocation
* to complete.
*
- * \var ConnectionType::ConnectionTypeBlocking
+ * \var ConnectionTypeBlocking
* \brief The receiver is invoked synchronously
*
* If the sender and the receiver live in the same thread, this is equivalent to
diff --git a/src/libcamera/base/class.cpp b/src/libcamera/base/class.cpp
new file mode 100644
index 00000000..61998398
--- /dev/null
+++ b/src/libcamera/base/class.cpp
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Utilities and helpers for classes
+ */
+
+#include <libcamera/base/class.h>
+
+/**
+ * \file class.h
+ * \brief Utilities to help constructing class interfaces
+ *
+ * The extensible class can be inherited to create public classes with stable
+ * ABIs.
+ */
+
+namespace libcamera {
+
+/**
+ * \def LIBCAMERA_DISABLE_COPY
+ * \brief Disable copy construction and assignment of the \a klass
+ * \param klass The name of the class
+ *
+ * Example usage:
+ * \code{.cpp}
+ * class NonCopyable
+ * {
+ * public:
+ * NonCopyable();
+ * ...
+ *
+ * private:
+ * LIBCAMERA_DISABLE_COPY(NonCopyable)
+ * };
+ * \endcode
+ */
+
+/**
+ * \def LIBCAMERA_DISABLE_MOVE
+ * \brief Disable move construction and assignment of the \a klass
+ * \param klass The name of the class
+ *
+ * Example usage:
+ * \code{.cpp}
+ * class NonMoveable
+ * {
+ * public:
+ * NonMoveable();
+ * ...
+ *
+ * private:
+ * LIBCAMERA_DISABLE_MOVE(NonMoveable)
+ * };
+ * \endcode
+ */
+
+/**
+ * \def LIBCAMERA_DISABLE_COPY_AND_MOVE
+ * \brief Disable copy and move construction and assignment of the \a klass
+ * \param klass The name of the class
+ *
+ * Example usage:
+ * \code{.cpp}
+ * class NonCopyableNonMoveable
+ * {
+ * public:
+ * NonCopyableNonMoveable();
+ * ...
+ *
+ * private:
+ * LIBCAMERA_DISABLE_COPY_AND_MOVE(NonCopyableNonMoveable)
+ * };
+ * \endcode
+ */
+
+/**
+ * \def LIBCAMERA_DECLARE_PRIVATE
+ * \brief Declare private data for a public class
+ *
+ * The LIBCAMERA_DECLARE_PRIVATE() macro plumbs the infrastructure necessary to
+ * make a class manage its private data through a d-pointer. It shall be used at
+ * the very top of the class definition.
+ */
+
+/**
+ * \def LIBCAMERA_DECLARE_PUBLIC
+ * \brief Declare public data for a private class
+ * \param klass The public class name
+ *
+ * The LIBCAMERA_DECLARE_PUBLIC() macro is the counterpart of
+ * LIBCAMERA_DECLARE_PRIVATE() to be used in the private data class. It shall be
+ * used at the very top of the private class definition, with the public class
+ * name passed as the \a klass parameter.
+ */
+
+/**
+ * \def LIBCAMERA_O_PTR()
+ * \brief Retrieve the public instance corresponding to the private data
+ *
+ * This macro is part of the libcamera::Extensible class infrastructure. It may
+ * be used in any member function of a libcamera::Extensible::Private subclass
+ * to access the public class instance corresponding to the private data.
+ */
+
+/**
+ * \class Extensible
+ * \brief Base class to manage private data through a d-pointer
+ *
+ * The Extensible class provides a base class to implement the
+ * <a href="https://wiki.qt.io/D-Pointer">d-pointer</a> design pattern (also
+ * known as <a href="https://en.wikipedia.org/wiki/Opaque_pointer">opaque pointer</a>
+ * or <a href="https://en.cppreference.com/w/cpp/language/pimpl">pImpl idiom</a>).
+ * It helps creating public classes that can be extended without breaking their
+ * ABI. Such classes store their private data in a separate private data object,
+ * referenced by a pointer in the public class (hence the name d-pointer).
+ *
+ * Classes that follow this design pattern are referred herein as extensible
+ * classes. To be extensible, a class PublicClass shall:
+ *
+ * - inherit from the Extensible class or from another extensible class
+ * - invoke the LIBCAMERA_DECLARE_PRIVATE() macro at the very top of the class
+ * definition
+ * - define a private data class named PublicClass::Private that inherits from
+ * the Private data class of the base class
+ * - invoke the LIBCAMERA_DECLARE_PUBLIC() macro at the very top of the Private
+ * data class definition
+ * - pass a pointer to a newly allocated Private data object to the constructor
+ * of the base class
+ *
+ * Additionally, if the PublicClass is not final, it shall expose one or more
+ * constructors that takes a pointer to a Private data instance, to be used by
+ * derived classes.
+ *
+ * The Private class is fully opaque to users of the libcamera public API.
+ * Internally, it can be kept private to the implementation of PublicClass, or
+ * be exposed to other classes. In the latter case, the members of the Private
+ * class need to be qualified with appropriate access specifiers. The
+ * PublicClass and Private classes always have full access to each other's
+ * protected and private members.
+ *
+ * The PublicClass exposes its Private data pointer through the _d() function.
+ * In the other direction, the pointer to the PublicClass can be retrieved in
+ * functions of the Private class using the LIBCAMERA_O_PTR() macro.
+ */
+
+/**
+ * \brief Construct an instance of an Extensible class
+ * \param[in] d Pointer to the private data instance
+ *
+ * The private data lifetime is managed by the Extensible class, which destroys
+ * it when the Extensible instance is destroyed.
+ */
+Extensible::Extensible(std::unique_ptr<Extensible::Private> d)
+ : d_(std::move(d))
+{
+ *const_cast<Extensible **>(&d_->o_) = this;
+}
+
+/**
+ * \fn Extensible::_d() const
+ * \brief Retrieve the private data instance
+ *
+ * This template function isn't meant to be called directly. Instead, classes
+ * derived from Extensible get, through the LIBCAMERA_DECLARE_PRIVATE() macro,
+ * overriden _d() functions that return the correct pointer type to the
+ * corresponding derived Private class.
+ *
+ * The lifetime of the private data is tied to the Extensible class. The caller
+ * shall not retain any reference to the returned pointer for longer than it
+ * holds a reference to the Extensible instance.
+ *
+ * \return A pointer to the private data instance
+ */
+
+/**
+ * \fn Extensible::_d()
+ * \copydoc Extensible::_d() const
+ */
+
+/**
+ * \var Extensible::d_
+ * \brief Pointer to the private data instance
+ */
+
+/**
+ * \class Extensible::Private
+ * \brief Base class for private data managed through a d-pointer
+ */
+
+/**
+ * \brief Construct an instance of an Extensible class private data
+ */
+Extensible::Private::Private()
+ : o_(nullptr)
+{
+}
+
+Extensible::Private::~Private()
+{
+}
+
+/**
+ * \var Extensible::Private::o_
+ * \brief Pointer to the public class object
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/event_dispatcher.cpp b/src/libcamera/base/event_dispatcher.cpp
index bb4fddff..5f4a5cb4 100644
--- a/src/libcamera/event_dispatcher.cpp
+++ b/src/libcamera/base/event_dispatcher.cpp
@@ -2,15 +2,14 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_dispatcher.cpp - Event dispatcher
+ * Event dispatcher
*/
-#include <libcamera/event_dispatcher.h>
-
-#include "log.h"
+#include <libcamera/base/event_dispatcher.h>
+#include <libcamera/base/log.h>
/**
- * \file event_dispatcher.h
+ * \file base/event_dispatcher.h
*/
namespace libcamera {
diff --git a/src/libcamera/event_dispatcher_poll.cpp b/src/libcamera/base/event_dispatcher_poll.cpp
index 51ac5adf..52bfb34e 100644
--- a/src/libcamera/event_dispatcher_poll.cpp
+++ b/src/libcamera/base/event_dispatcher_poll.cpp
@@ -2,29 +2,27 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_dispatcher_poll.cpp - Poll-based event dispatcher
+ * Poll-based event dispatcher
*/
-#include "event_dispatcher_poll.h"
+#include <libcamera/base/event_dispatcher_poll.h>
-#include <algorithm>
-#include <chrono>
#include <iomanip>
#include <poll.h>
#include <stdint.h>
#include <string.h>
#include <sys/eventfd.h>
#include <unistd.h>
+#include <vector>
-#include <libcamera/event_notifier.h>
-#include <libcamera/timer.h>
-
-#include "log.h"
-#include "thread.h"
-#include "utils.h"
+#include <libcamera/base/event_notifier.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/thread.h>
+#include <libcamera/base/timer.h>
+#include <libcamera/base/utils.h>
/**
- * \file event_dispatcher_poll.h
+ * \file base/event_dispatcher_poll.h
*/
namespace libcamera {
@@ -55,14 +53,13 @@ EventDispatcherPoll::EventDispatcherPoll()
* Create the event fd. Failures are fatal as we can't implement an
* interruptible dispatcher without the fd.
*/
- eventfd_ = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
- if (eventfd_ < 0)
+ eventfd_ = UniqueFD(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK));
+ if (!eventfd_.isValid())
LOG(Event, Fatal) << "Unable to create eventfd";
}
EventDispatcherPoll::~EventDispatcherPoll()
{
- close(eventfd_);
}
void EventDispatcherPoll::registerEventNotifier(EventNotifier *notifier)
@@ -102,7 +99,7 @@ void EventDispatcherPoll::unregisterEventNotifier(EventNotifier *notifier)
set.notifiers[type] = nullptr;
/*
- * Don't race with event processing if this method is called from an
+ * Don't race with event processing if this function is called from an
* event notifier. The notifiers_ entry will be erased by
* processEvents().
*/
@@ -155,7 +152,7 @@ void EventDispatcherPoll::processEvents()
for (auto notifier : notifiers_)
pollfds.push_back({ notifier.first, notifier.second.events(), 0 });
- pollfds.push_back({ eventfd_, POLLIN, 0 });
+ pollfds.push_back({ eventfd_.get(), POLLIN, 0 });
/* Wait for events and process notifiers and timers. */
do {
@@ -177,7 +174,7 @@ void EventDispatcherPoll::processEvents()
void EventDispatcherPoll::interrupt()
{
uint64_t value = 1;
- ssize_t ret = write(eventfd_, &value, sizeof(value));
+ ssize_t ret = write(eventfd_.get(), &value, sizeof(value));
if (ret != sizeof(value)) {
if (ret < 0)
ret = -errno;
@@ -216,7 +213,8 @@ int EventDispatcherPoll::poll(std::vector<struct pollfd> *pollfds)
timeout = { 0, 0 };
LOG(Event, Debug)
- << "timeout " << timeout.tv_sec << "."
+ << "next timer " << nextTimer << " expires in "
+ << timeout.tv_sec << "."
<< std::setfill('0') << std::setw(9)
<< timeout.tv_nsec;
}
@@ -231,7 +229,7 @@ void EventDispatcherPoll::processInterrupt(const struct pollfd &pfd)
return;
uint64_t value;
- ssize_t ret = read(eventfd_, &value, sizeof(value));
+ ssize_t ret = read(eventfd_.get(), &value, sizeof(value));
if (ret != sizeof(value)) {
if (ret < 0)
ret = -errno;
@@ -279,7 +277,7 @@ void EventDispatcherPoll::processNotifiers(const std::vector<struct pollfd> &pol
}
if (pfd.revents & event.events)
- notifier->activated.emit(notifier);
+ notifier->activated.emit();
}
/* Erase the notifiers_ entry if it is now empty. */
@@ -301,7 +299,7 @@ void EventDispatcherPoll::processTimers()
timers_.pop_front();
timer->stop();
- timer->timeout.emit(timer);
+ timer->timeout.emit();
}
}
diff --git a/src/libcamera/event_notifier.cpp b/src/libcamera/base/event_notifier.cpp
index a9be686f..495c281d 100644
--- a/src/libcamera/event_notifier.cpp
+++ b/src/libcamera/base/event_notifier.cpp
@@ -2,16 +2,17 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_notifier.cpp - File descriptor event notifier
+ * File descriptor event notifier
*/
-#include <libcamera/event_notifier.h>
+#include <libcamera/base/event_notifier.h>
-#include <libcamera/camera_manager.h>
-#include <libcamera/event_dispatcher.h>
+#include <libcamera/base/event_dispatcher.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/message.h>
+#include <libcamera/base/thread.h>
-#include "message.h"
-#include "thread.h"
+#include <libcamera/camera_manager.h>
/**
* \file event_notifier.h
@@ -20,6 +21,8 @@
namespace libcamera {
+LOG_DECLARE_CATEGORY(Event)
+
/**
* \class EventNotifier
* \brief Notify of activity on a file descriptor
@@ -35,9 +38,9 @@ namespace libcamera {
* multiple event types on the same file descriptor multiple notifiers must be
* created.
*
- * The notifier can be disabled with the setEnable() function. When the notifier
+ * The notifier can be disabled with the setEnabled() function. When the notifier
* is disabled it ignores events and does not emit the \ref activated signal.
- * The notifier can then be re-enabled with the setEnable() function.
+ * The notifier can then be re-enabled with the setEnabled() function.
*
* Creating multiple notifiers of the same type for the same file descriptor is
* not allowed and results in undefined behaviour.
@@ -90,7 +93,7 @@ EventNotifier::~EventNotifier()
* \fn EventNotifier::enabled()
* \brief Retrieve the notifier state
* \return True if the notifier is enabled, or false otherwise
- * \sa setEnable()
+ * \sa setEnabled()
*/
/**
@@ -104,6 +107,9 @@ EventNotifier::~EventNotifier()
*/
void EventNotifier::setEnabled(bool enable)
{
+ if (!assertThreadBound("EventNotifier can't be enabled from another thread"))
+ return;
+
if (enabled_ == enable)
return;
diff --git a/src/libcamera/base/file.cpp b/src/libcamera/base/file.cpp
new file mode 100644
index 00000000..2b83a517
--- /dev/null
+++ b/src/libcamera/base/file.cpp
@@ -0,0 +1,478 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * File I/O operations
+ */
+
+#include <libcamera/base/file.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/shared_fd.h>
+
+/**
+ * \file base/file.h
+ * \brief File I/O operations
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(File)
+
+/**
+ * \class File
+ * \brief Interface for I/O operations on files
+ *
+ * The File class provides an interface to perform I/O operations on files. It
+ * wraps opening, closing and mapping files in memory, and handles the cleaning
+ * of allocated resources.
+ *
+ * File instances are usually constructed with a file name, but the name can be
+ * set later through the setFileName() function. Instances are not automatically
+ * opened when constructed, and shall be opened explictly with open().
+ *
+ * Files can be mapped to the process memory with map(). Mapped regions can be
+ * unmapped manually with munmap(), and are automatically unmapped when the File
+ * is destroyed or when it is used to reference another file with setFileName().
+ */
+
+/**
+ * \enum File::MapFlag
+ * \brief Flags for the File::map() function
+ * \var File::MapFlag::NoOption
+ * \brief No option (used as default value)
+ * \var File::MapFlag::Private
+ * \brief The memory region is mapped as private, changes are not reflected in
+ * the file constents
+ */
+
+/**
+ * \typedef File::MapFlags
+ * \brief A bitwise combination of File::MapFlag values
+ */
+
+/**
+ * \enum File::OpenModeFlag
+ * \brief Mode in which a file is opened
+ * \var File::OpenModeFlag::NotOpen
+ * \brief The file is not open
+ * \var File::OpenModeFlag::ReadOnly
+ * \brief The file is open for reading
+ * \var File::OpenModeFlag::WriteOnly
+ * \brief The file is open for writing
+ * \var File::OpenModeFlag::ReadWrite
+ * \brief The file is open for reading and writing
+ */
+
+/**
+ * \typedef File::OpenMode
+ * \brief A bitwise combination of File::OpenModeFlag values
+ */
+
+/**
+ * \brief Construct a File to represent the file \a name
+ * \param[in] name The file name
+ *
+ * Upon construction the File object is closed and shall be opened with open()
+ * before performing I/O operations.
+ */
+File::File(const std::string &name)
+ : name_(name), mode_(OpenModeFlag::NotOpen), error_(0)
+{
+}
+
+/**
+ * \brief Construct a File without an associated name
+ *
+ * Before being used for any purpose, the file name shall be set with
+ * setFileName().
+ */
+File::File()
+ : mode_(OpenModeFlag::NotOpen), error_(0)
+{
+}
+
+/**
+ * \brief Destroy a File instance
+ *
+ * Any memory mapping associated with the File is unmapped, and the File is
+ * closed if it is open.
+ */
+File::~File()
+{
+ unmapAll();
+ close();
+}
+
+/**
+ * \fn const std::string &File::fileName() const
+ * \brief Retrieve the file name
+ * \return The file name
+ */
+
+/**
+ * \brief Set the name of the file
+ * \param[in] name The name of the file
+ *
+ * The \a name can contain an absolute path, a relative path or no path at all.
+ * Calling this function on an open file results in undefined behaviour.
+ *
+ * Any memory mapping associated with the File is unmapped.
+ */
+void File::setFileName(const std::string &name)
+{
+ if (isOpen()) {
+ LOG(File, Error)
+ << "Can't set file name on already open file " << name_;
+ return;
+ }
+
+ unmapAll();
+
+ name_ = name;
+}
+
+/**
+ * \brief Check if the file specified by fileName() exists
+ *
+ * This function checks if the file specified by fileName() exists. The File
+ * instance doesn't need to be open to check for file existence, and this
+ * function may return false even if the file is open, if it was deleted from
+ * the file system.
+ *
+ * \return True if the the file exists, false otherwise
+ */
+bool File::exists() const
+{
+ return exists(name_);
+}
+
+/**
+ * \brief Open the file in the given mode
+ * \param[in] mode The open mode
+ *
+ * This function opens the file specified by fileName() in \a mode. If the file
+ * doesn't exist and the mode is WriteOnly or ReadWrite, this function will
+ * attempt to create the file with initial permissions set to 0666 (modified by
+ * the process' umask).
+ *
+ * The file is opened with the O_CLOEXEC flag, and will be closed automatically
+ * when a new binary is executed with one of the exec(3) functions.
+ *
+ * The error() status is updated.
+ *
+ * \return True on success, false otherwise
+ */
+bool File::open(File::OpenMode mode)
+{
+ if (isOpen()) {
+ LOG(File, Error) << "File " << name_ << " is already open";
+ return false;
+ }
+
+ int flags = static_cast<OpenMode::Type>(mode & OpenModeFlag::ReadWrite) - 1;
+ if (mode & OpenModeFlag::WriteOnly)
+ flags |= O_CREAT;
+
+ fd_ = UniqueFD(::open(name_.c_str(), flags | O_CLOEXEC, 0666));
+ if (!fd_.isValid()) {
+ error_ = -errno;
+ return false;
+ }
+
+ mode_ = mode;
+ error_ = 0;
+ return true;
+}
+
+/**
+ * \fn bool File::isOpen() const
+ * \brief Check if the file is open
+ * \return True if the file is open, false otherwise
+ */
+
+/**
+ * \fn OpenMode File::openMode() const
+ * \brief Retrieve the file open mode
+ * \return The file open mode
+ */
+
+/**
+ * \brief Close the file
+ *
+ * This function closes the File. If the File is not open, it performs no
+ * operation. Memory mappings created with map() are not destroyed when the
+ * file is closed.
+ */
+void File::close()
+{
+ if (!fd_.isValid())
+ return;
+
+ fd_.reset();
+ mode_ = OpenModeFlag::NotOpen;
+}
+
+/**
+ * \fn int File::error() const
+ * \brief Retrieve the file error status
+ *
+ * This function retrieves the error status from the last file open or I/O
+ * operation. The error status is a negative number as defined by errno.h. If
+ * no error occurred, this function returns 0.
+ *
+ * \return The file error status
+ */
+
+/**
+ * \brief Retrieve the file size
+ *
+ * This function retrieves the size of the file on the filesystem. The File
+ * instance shall be open to retrieve its size. The error() status is not
+ * modified, error codes are returned directly on failure.
+ *
+ * \return The file size in bytes on success, or a negative error code otherwise
+ */
+ssize_t File::size() const
+{
+ if (!isOpen())
+ return -EINVAL;
+
+ struct stat st;
+ int ret = fstat(fd_.get(), &st);
+ if (ret < 0)
+ return -errno;
+
+ return st.st_size;
+}
+
+/**
+ * \brief Return current read or write position
+ *
+ * If the file is closed, this function returns 0.
+ *
+ * \return The current read or write position
+ */
+off_t File::pos() const
+{
+ if (!isOpen())
+ return 0;
+
+ return lseek(fd_.get(), 0, SEEK_CUR);
+}
+
+/**
+ * \brief Set the read or write position
+ * \param[in] pos The desired position
+ * \return The resulting offset from the beginning of the file on success, or a
+ * negative error code otherwise
+ */
+off_t File::seek(off_t pos)
+{
+ if (!isOpen())
+ return -EINVAL;
+
+ off_t ret = lseek(fd_.get(), pos, SEEK_SET);
+ if (ret < 0)
+ return -errno;
+
+ return ret;
+}
+
+/**
+ * \brief Read data from the file
+ * \param[in] data Memory to read data into
+ *
+ * Read at most \a data.size() bytes from the file into \a data.data(), and
+ * return the number of bytes read. If less data than requested is available,
+ * the returned byte count may be smaller than the requested size. If no more
+ * data is available, 0 is returned.
+ *
+ * The position of the file as returned by pos() is advanced by the number of
+ * bytes read. If an error occurs, the position isn't modified.
+ *
+ * \return The number of bytes read on success, or a negative error code
+ * otherwise
+ */
+ssize_t File::read(const Span<uint8_t> &data)
+{
+ if (!isOpen())
+ return -EINVAL;
+
+ size_t readBytes = 0;
+ ssize_t ret = 0;
+
+ /* Retry in case of interrupted system calls. */
+ while (readBytes < data.size()) {
+ ret = ::read(fd_.get(), data.data() + readBytes,
+ data.size() - readBytes);
+ if (ret <= 0)
+ break;
+
+ readBytes += ret;
+ }
+
+ if (ret < 0 && !readBytes)
+ return -errno;
+
+ return readBytes;
+}
+
+/**
+ * \brief Write data to the file
+ * \param[in] data Memory containing data to be written
+ *
+ * Write at most \a data.size() bytes from \a data.data() to the file, and
+ * return the number of bytes written. If the file system doesn't have enough
+ * space for the data, the returned byte count may be less than requested.
+ *
+ * The position of the file as returned by pos() is advanced by the number of
+ * bytes written. If an error occurs, the position isn't modified.
+ *
+ * \return The number of bytes written on success, or a negative error code
+ * otherwise
+ */
+ssize_t File::write(const Span<const uint8_t> &data)
+{
+ if (!isOpen())
+ return -EINVAL;
+
+ size_t writtenBytes = 0;
+
+ /* Retry in case of interrupted system calls. */
+ while (writtenBytes < data.size()) {
+ ssize_t ret = ::write(fd_.get(), data.data() + writtenBytes,
+ data.size() - writtenBytes);
+ if (ret <= 0)
+ break;
+
+ writtenBytes += ret;
+ }
+
+ if (data.size() && !writtenBytes)
+ return -errno;
+
+ return writtenBytes;
+}
+
+/**
+ * \brief Map a region of the file in the process memory
+ * \param[in] offset The region offset within the file
+ * \param[in] size The region sise
+ * \param[in] flags The mapping flags
+ *
+ * This function maps a region of \a size bytes of the file starting at \a
+ * offset into the process memory. The File instance shall be open, but may be
+ * closed after mapping the region. Mappings stay valid when the File is
+ * closed, and are destroyed automatically when the File is deleted.
+ *
+ * If \a size is a negative value, this function maps the region starting at \a
+ * offset until the end of the file.
+ *
+ * The mapping memory protection is controlled by the file open mode, unless \a
+ * flags contains MapFlag::Private in which case the region is mapped in
+ * read/write mode.
+ *
+ * The error() status is updated.
+ *
+ * \return The mapped memory on success, or an empty span otherwise
+ */
+Span<uint8_t> File::map(off_t offset, ssize_t size, File::MapFlags flags)
+{
+ if (!isOpen()) {
+ error_ = -EBADF;
+ return {};
+ }
+
+ if (size < 0) {
+ size = File::size();
+ if (size < 0) {
+ error_ = size;
+ return {};
+ }
+
+ size -= offset;
+ }
+
+ int mmapFlags = flags & MapFlag::Private ? MAP_PRIVATE : MAP_SHARED;
+
+ int prot = 0;
+ if (mode_ & OpenModeFlag::ReadOnly)
+ prot |= PROT_READ;
+ if (mode_ & OpenModeFlag::WriteOnly)
+ prot |= PROT_WRITE;
+ if (flags & MapFlag::Private)
+ prot |= PROT_WRITE;
+
+ void *map = mmap(NULL, size, prot, mmapFlags, fd_.get(), offset);
+ if (map == MAP_FAILED) {
+ error_ = -errno;
+ return {};
+ }
+
+ maps_.emplace(map, size);
+
+ error_ = 0;
+ return { static_cast<uint8_t *>(map), static_cast<size_t>(size) };
+}
+
+/**
+ * \brief Unmap a region mapped with map()
+ * \param[in] addr The region address
+ *
+ * The error() status is updated.
+ *
+ * \return True on success, or false if an error occurs
+ */
+bool File::unmap(uint8_t *addr)
+{
+ auto iter = maps_.find(static_cast<void *>(addr));
+ if (iter == maps_.end()) {
+ error_ = -ENOENT;
+ return false;
+ }
+
+ int ret = munmap(addr, iter->second);
+ if (ret < 0) {
+ error_ = -errno;
+ return false;
+ }
+
+ maps_.erase(iter);
+
+ error_ = 0;
+ return true;
+}
+
+void File::unmapAll()
+{
+ for (const auto &map : maps_)
+ munmap(map.first, map.second);
+
+ maps_.clear();
+}
+
+/**
+ * \brief Check if the file specified by \a name exists
+ * \param[in] name The file name
+ * \return True if the file exists, false otherwise
+ */
+bool File::exists(const std::string &name)
+{
+ struct stat st;
+ int ret = stat(name.c_str(), &st);
+ if (ret < 0)
+ return false;
+
+ /* Directories can not be handled here, even if they exist. */
+ return !S_ISDIR(st.st_mode);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/flags.cpp b/src/libcamera/base/flags.cpp
new file mode 100644
index 00000000..9981f2ed
--- /dev/null
+++ b/src/libcamera/base/flags.cpp
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Type-safe enum-based bitfields
+ */
+
+#include <libcamera/base/flags.h>
+
+/**
+ * \file base/flags.h
+ * \brief Enum-based bit fields
+ */
+
+namespace libcamera {
+
+/**
+ * \class Flags
+ * \brief Type-safe container for enum-based bitfields
+ *
+ * The Flags template class provides type-safe bitwise operators on enum values.
+ * It allows using enum types for bitfields, while preventing unsafe casts from
+ * integer types and mixing of flags from different enum types.
+ *
+ * To use the Flags class, declare an enum containing the desired bit flags, and
+ * use the Flags<enum> class to store bitfields based on the enum. If bitwise
+ * operators on the underlying enum are also desired, they can be enabled with
+ * the LIBCAMERA_FLAGS_ENABLE_OPERATORS(enum) macro.
+ */
+
+/**
+ * \typedef Flags::Type
+ * \brief The underlying data type of the enum
+ */
+
+/**
+ * \fn Flags::Flags()
+ * \brief Construct a Flags instance with a zero value
+ */
+
+/**
+ * \fn Flags::Flags(E flag)
+ * \brief Construct a Flags instance storing the \a flag
+ * \param[in] flag The initial value
+ */
+
+/**
+ * \fn Flags &Flags::operator&=(E flag)
+ * \brief Store the bitwise AND of this Flags and the \a flag in this Flags
+ * \param[in] flag The second operand
+ * \return A reference to this Flags
+ */
+
+/**
+ * \fn Flags &Flags::operator&=(Flags other)
+ * \brief Store the bitwise AND of this Flags and the \a other Flags in this Flags
+ * \param[in] other The second operand
+ * \return A reference to this Flags
+ */
+
+/**
+ * \fn Flags &Flags::operator|=(E flag)
+ * \brief Store the bitwise OR of this Flags and the \a flag in this Flags
+ * \param[in] flag The second operand
+ * \return A reference to this Flags
+ */
+
+/**
+ * \fn Flags &Flags::operator|=(Flags other)
+ * \brief Store the bitwise OR of this Flags and the \a other Flags in this Flags
+ * \param[in] other The second operand
+ * \return A reference to this Flags
+ */
+
+/**
+ * \fn Flags &Flags::operator^=(E flag)
+ * \brief Store the bitwise XOR of this Flags and the \a flag in this Flags
+ * \param[in] flag The second operand
+ * \return A reference to this Flags
+ */
+
+/**
+ * \fn Flags &Flags::operator^=(Flags other)
+ * \brief Store the bitwise XOR of this Flags and the \a other Flags in this Flags
+ * \param[in] other The second operand
+ * \return A reference to this Flags
+ */
+
+/**
+ * \fn bool Flags::operator==(E flag)
+ * \brief Compare flags for equality
+ * \param[in] flag The second operand
+ * \return True if the Flags and \a flag are equal, false otherwise
+ */
+
+/**
+ * \fn bool Flags::operator==(Flags other)
+ * \brief Compare flags for equality
+ * \param[in] other The second operand
+ * \return True if the Flags and \a other are equal, false otherwise
+ */
+
+/**
+ * \fn bool Flags::operator!=(E flag)
+ * \brief Compare flags for non-equality
+ * \param[in] flag The second operand
+ * \return True if the Flags and \a flag are not equal, false otherwise
+ */
+
+/**
+ * \fn bool Flags::operator!=(Flags other)
+ * \brief Compare flags for non-equality
+ * \param[in] other The second operand
+ * \return True if the Flags and \a other are not equal, false otherwise
+ */
+
+/**
+ * \fn Flags::operator Type() const
+ * \brief Convert the Flags to the underlying integer type
+ * \return The Flags value as an integer
+ */
+
+/**
+ * \fn Flags::operator bool() const
+ * \brief Convert the Flags to a boolean
+ * \return True if at least one flag is set, false otherwise
+ */
+
+/**
+ * \fn Flags Flags::operator&(E flag) const
+ * \brief Compute the bitwise AND of this Flags and the \a flag
+ * \param[in] flag The second operand
+ * \return A Flags containing the result of the AND operation
+ */
+
+/**
+ * \fn Flags Flags::operator&(Flags other) const
+ * \brief Compute the bitwise AND of this Flags and the \a other Flags
+ * \param[in] other The second operand
+ * \return A Flags containing the result of the AND operation
+ */
+
+/**
+ * \fn Flags Flags::operator|(E flag) const
+ * \brief Compute the bitwise OR of this Flags and the \a flag
+ * \param[in] flag The second operand
+ * \return A Flags containing the result of the OR operation
+ */
+
+/**
+ * \fn Flags Flags::operator|(Flags other) const
+ * \brief Compute the bitwise OR of this Flags and the \a other Flags
+ * \param[in] other The second operand
+ * \return A Flags containing the result of the OR operation
+ */
+
+/**
+ * \fn Flags Flags::operator^(E flag) const
+ * \brief Compute the bitwise XOR of this Flags and the \a flag
+ * \param[in] flag The second operand
+ * \return A Flags containing the result of the XOR operation
+ */
+
+/**
+ * \fn Flags Flags::operator^(Flags other) const
+ * \brief Compute the bitwise XOR of this Flags and the \a other Flags
+ * \param[in] other The second operand
+ * \return A Flags containing the result of the XOR operation
+ */
+
+/**
+ * \fn Flags Flags::operator~() const
+ * \brief Compute the bitwise NOT of this Flags
+ * \return A Flags containing the result of the NOT operation
+ */
+
+/**
+ * \fn bool Flags::operator!() const
+ * \brief Check if flags are set
+ * \return True if no flags is set, false otherwise
+ */
+
+/**
+ * \def LIBCAMERA_FLAGS_ENABLE_OPERATORS(enum)
+ * \brief Enable bitwise operations on the \a enum enumeration
+ *
+ * This macro enables the bitwise AND, OR, XOR and NOT operators on the given
+ * \a enum. This allows the enum values to be safely used in bitwise operations
+ * with the Flags<> class.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/log.cpp b/src/libcamera/base/log.cpp
index fd1b5c39..3a656b8f 100644
--- a/src/libcamera/log.cpp
+++ b/src/libcamera/base/log.cpp
@@ -2,14 +2,12 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * log.cpp - Logging infrastructure
+ * Logging infrastructure
*/
-#include "log.h"
+#include <libcamera/base/log.h>
-#if HAVE_BACKTRACE
-#include <execinfo.h>
-#endif
+#include <array>
#include <fstream>
#include <iostream>
#include <list>
@@ -22,11 +20,13 @@
#include <libcamera/logging.h>
-#include "thread.h"
-#include "utils.h"
+#include <libcamera/base/backtrace.h>
+#include <libcamera/base/mutex.h>
+#include <libcamera/base/thread.h>
+#include <libcamera/base/utils.h>
/**
- * \file log.h
+ * \file base/log.h
* \brief Logging infrastructure
*
* libcamera includes a logging infrastructure used through the library that
@@ -47,11 +47,11 @@
* their category are output to the log, while other messages are silently
* discarded.
*
- * By default log messages are output to stderr. They can be redirected to a log
- * file by setting the LIBCAMERA_LOG_FILE environment variable to the name of
- * the file. The file must be writable and is truncated if it exists. If any
+ * By default log messages are output to std::cerr. They can be redirected to a
+ * log file by setting the LIBCAMERA_LOG_FILE environment variable to the name
+ * of the file. The file must be writable and is truncated if it exists. If any
* error occurs when opening the file, the file is ignored and the log is output
- * to stderr.
+ * to std::cerr.
*/
/**
@@ -91,7 +91,7 @@ static const char *log_severity_name(LogSeverity severity)
"FATAL",
};
- if (static_cast<unsigned int>(severity) < ARRAY_SIZE(names))
+ if (static_cast<unsigned int>(severity) < std::size(names))
return names[severity];
else
return "UNKWN";
@@ -105,8 +105,8 @@ static const char *log_severity_name(LogSeverity severity)
class LogOutput
{
public:
- LogOutput(const char *path);
- LogOutput(std::ostream *stream);
+ LogOutput(const char *path, bool color);
+ LogOutput(std::ostream *stream, bool color);
LogOutput();
~LogOutput();
@@ -120,14 +120,16 @@ private:
std::ostream *stream_;
LoggingTarget target_;
+ bool color_;
};
/**
* \brief Construct a log output based on a file
* \param[in] path Full path to log file
+ * \param[in] color True to output colored messages
*/
-LogOutput::LogOutput(const char *path)
- : target_(LoggingTargetFile)
+LogOutput::LogOutput(const char *path, bool color)
+ : target_(LoggingTargetFile), color_(color)
{
stream_ = new std::ofstream(path);
}
@@ -135,9 +137,10 @@ LogOutput::LogOutput(const char *path)
/**
* \brief Construct a log output based on a stream
* \param[in] stream Stream to send log output to
+ * \param[in] color True to output colored messages
*/
-LogOutput::LogOutput(std::ostream *stream)
- : stream_(stream), target_(LoggingTargetStream)
+LogOutput::LogOutput(std::ostream *stream, bool color)
+ : stream_(stream), target_(LoggingTargetStream), color_(color)
{
}
@@ -145,7 +148,7 @@ LogOutput::LogOutput(std::ostream *stream)
* \brief Construct a log output to syslog
*/
LogOutput::LogOutput()
- : stream_(nullptr), target_(LoggingTargetSyslog)
+ : stream_(nullptr), target_(LoggingTargetSyslog), color_(false)
{
openlog("libcamera", LOG_PID, 0);
}
@@ -180,28 +183,72 @@ bool LogOutput::isValid() const
}
}
+namespace {
+
+/*
+ * For more information about ANSI escape codes, see
+ * https://en.wikipedia.org/wiki/ANSI_escape_code#Colors.
+ */
+constexpr const char *kColorReset = "\033[0m";
+constexpr const char *kColorGreen = "\033[0;32m";
+constexpr const char *kColorBrightRed = "\033[1;31m";
+constexpr const char *kColorBrightGreen = "\033[1;32m";
+constexpr const char *kColorBrightYellow = "\033[1;33m";
+constexpr const char *kColorBrightBlue = "\033[1;34m";
+constexpr const char *kColorBrightMagenta = "\033[1;35m";
+constexpr const char *kColorBrightCyan = "\033[1;36m";
+constexpr const char *kColorBrightWhite = "\033[1;37m";
+
+} /* namespace */
+
/**
* \brief Write message to log output
* \param[in] msg Message to write
*/
void LogOutput::write(const LogMessage &msg)
{
+ static const char *const severityColors[] = {
+ kColorBrightCyan,
+ kColorBrightGreen,
+ kColorBrightYellow,
+ kColorBrightRed,
+ kColorBrightMagenta,
+ };
+
+ const char *categoryColor = color_ ? kColorBrightWhite : "";
+ const char *fileColor = color_ ? kColorBrightBlue : "";
+ const char *prefixColor = color_ ? kColorGreen : "";
+ const char *resetColor = color_ ? kColorReset : "";
+ const char *severityColor = "";
+ LogSeverity severity = msg.severity();
std::string str;
+ if (color_) {
+ if (static_cast<unsigned int>(severity) < std::size(severityColors))
+ severityColor = severityColors[severity];
+ else
+ severityColor = kColorBrightWhite;
+ }
+
switch (target_) {
case LoggingTargetSyslog:
- str = std::string(log_severity_name(msg.severity())) + " "
- + msg.category().name() + " " + msg.fileInfo() + " "
- + msg.msg();
- writeSyslog(msg.severity(), str);
+ str = std::string(log_severity_name(severity)) + " "
+ + msg.category().name() + " " + msg.fileInfo() + " ";
+ if (!msg.prefix().empty())
+ str += msg.prefix() + ": ";
+ str += msg.msg();
+ writeSyslog(severity, str);
break;
case LoggingTargetStream:
case LoggingTargetFile:
str = "[" + utils::time_point_to_string(msg.timestamp()) + "] ["
+ std::to_string(Thread::currentId()) + "] "
- + log_severity_name(msg.severity()) + " "
- + msg.category().name() + " " + msg.fileInfo() + " "
- + msg.msg();
+ + severityColor + log_severity_name(severity) + " "
+ + categoryColor + msg.category().name() + " "
+ + fileColor + msg.fileInfo() + " ";
+ if (!msg.prefix().empty())
+ str += prefixColor + msg.prefix() + ": ";
+ str += resetColor + msg.msg();
writeStream(str);
break;
default:
@@ -247,13 +294,15 @@ void LogOutput::writeStream(const std::string &str)
class Logger
{
public:
+ ~Logger();
+
static Logger *instance();
void write(const LogMessage &msg);
void backtrace();
- int logSetFile(const char *path);
- int logSetStream(std::ostream *stream);
+ int logSetFile(const char *path, bool color);
+ int logSetStream(std::ostream *stream, bool color);
int logSetTarget(LoggingTarget target);
void logSetLevel(const char *category, const char *level);
@@ -266,14 +315,18 @@ private:
friend LogCategory;
void registerCategory(LogCategory *category);
- void unregisterCategory(LogCategory *category);
+ LogCategory *findCategory(const char *name) const;
+
+ static bool destroyed_;
- std::unordered_set<LogCategory *> categories_;
+ std::vector<LogCategory *> categories_;
std::list<std::pair<std::string, LogSeverity>> levels_;
std::shared_ptr<LogOutput> output_;
};
+bool Logger::destroyed_ = false;
+
/**
* \enum LoggingTarget
* \brief Log destination type
@@ -294,35 +347,47 @@ private:
/**
* \brief Direct logging to a file
* \param[in] path Full path to the log file
+ * \param[in] color True to output colored messages
*
* This function directs the log output to the file identified by \a path. The
* previous log target, if any, is closed, and all new log messages will be
* written to the new log file.
*
+ * \a color controls whether or not the messages will be colored with standard
+ * ANSI escape codes. This is done regardless of whether \a path refers to a
+ * standard file or a TTY, the caller is responsible for disabling coloring when
+ * not suitable for the log target.
+ *
* If the function returns an error, the log target is not changed.
*
* \return Zero on success, or a negative error code otherwise
*/
-int logSetFile(const char *path)
+int logSetFile(const char *path, bool color)
{
- return Logger::instance()->logSetFile(path);
+ return Logger::instance()->logSetFile(path, color);
}
/**
* \brief Direct logging to a stream
* \param[in] stream Stream to send log output to
+ * \param[in] color True to output colored messages
*
* This function directs the log output to \a stream. The previous log target,
* if any, is closed, and all new log messages will be written to the new log
* stream.
*
+ * \a color controls whether or not the messages will be colored with standard
+ * ANSI escape codes. This is done regardless of whether \a stream refers to a
+ * standard file or a TTY, the caller is responsible for disabling coloring when
+ * not suitable for the log target.
+ *
* If the function returns an error, the log file is not changed
*
* \return Zero on success, or a negative error code otherwise.
*/
-int logSetStream(std::ostream *stream)
+int logSetStream(std::ostream *stream, bool color)
{
- return Logger::instance()->logSetStream(stream);
+ return Logger::instance()->logSetStream(stream, color);
}
/**
@@ -368,6 +433,14 @@ void logSetLevel(const char *category, const char *level)
Logger::instance()->logSetLevel(category, level);
}
+Logger::~Logger()
+{
+ destroyed_ = true;
+
+ for (LogCategory *category : categories_)
+ delete category;
+}
+
/**
* \brief Retrieve the logger instance
*
@@ -379,6 +452,10 @@ void logSetLevel(const char *category, const char *level)
Logger *Logger::instance()
{
static Logger instance;
+
+ if (destroyed_)
+ return nullptr;
+
return &instance;
}
@@ -400,44 +477,37 @@ void Logger::write(const LogMessage &msg)
*/
void Logger::backtrace()
{
-#if HAVE_BACKTRACE
std::shared_ptr<LogOutput> output = std::atomic_load(&output_);
if (!output)
return;
- void *buffer[32];
- int num_entries = ::backtrace(buffer, ARRAY_SIZE(buffer));
- char **strings = backtrace_symbols(buffer, num_entries);
- if (!strings)
- return;
-
- std::ostringstream msg;
- msg << "Backtrace:" << std::endl;
-
/*
- * Skip the first two entries that correspond to this method and
+ * Skip the first two entries that correspond to this function and
* ~LogMessage().
*/
- for (int i = 2; i < num_entries; ++i)
- msg << strings[i] << std::endl;
-
- output->write(msg.str());
+ std::string backtrace = Backtrace().toString(2);
+ if (backtrace.empty()) {
+ output->write("Backtrace not available\n");
+ return;
+ }
- free(strings);
-#endif
+ output->write("Backtrace:\n");
+ output->write(backtrace);
}
/**
* \brief Set the log file
* \param[in] path Full path to the log file
+ * \param[in] color True to output colored messages
*
* \sa libcamera::logSetFile()
*
* \return Zero on success, or a negative error code otherwise.
*/
-int Logger::logSetFile(const char *path)
+int Logger::logSetFile(const char *path, bool color)
{
- std::shared_ptr<LogOutput> output = std::make_shared<LogOutput>(path);
+ std::shared_ptr<LogOutput> output =
+ std::make_shared<LogOutput>(path, color);
if (!output->isValid())
return -EINVAL;
@@ -448,14 +518,16 @@ int Logger::logSetFile(const char *path)
/**
* \brief Set the log stream
* \param[in] stream Stream to send log output to
+ * \param[in] color True to output colored messages
*
* \sa libcamera::logSetStream()
*
* \return Zero on success, or a negative error code otherwise.
*/
-int Logger::logSetStream(std::ostream *stream)
+int Logger::logSetStream(std::ostream *stream, bool color)
{
- std::shared_ptr<LogOutput> output = std::make_shared<LogOutput>(stream);
+ std::shared_ptr<LogOutput> output =
+ std::make_shared<LogOutput>(stream, color);
std::atomic_store(&output_, output);
return 0;
}
@@ -470,15 +542,11 @@ int Logger::logSetStream(std::ostream *stream)
*/
int Logger::logSetTarget(enum LoggingTarget target)
{
- std::shared_ptr<LogOutput> output;
-
switch (target) {
case LoggingTargetSyslog:
- output = std::make_shared<LogOutput>();
- std::atomic_store(&output_, output);
+ std::atomic_store(&output_, std::make_shared<LogOutput>());
break;
case LoggingTargetNone:
- output = nullptr;
std::atomic_store(&output_, std::shared_ptr<LogOutput>());
break;
default:
@@ -502,7 +570,7 @@ void Logger::logSetLevel(const char *category, const char *level)
return;
for (LogCategory *c : categories_) {
- if (!strcmp(c->name(), category)) {
+ if (c->name() == category) {
c->setSeverity(severity);
break;
}
@@ -511,9 +579,16 @@ void Logger::logSetLevel(const char *category, const char *level)
/**
* \brief Construct a logger
+ *
+ * If the environment variable is not set, log to std::cerr. The log messages
+ * are then colored by default. This can be overridden by setting the
+ * LIBCAMERA_LOG_NO_COLOR environment variable to disable coloring.
*/
Logger::Logger()
{
+ bool color = !utils::secure_getenv("LIBCAMERA_LOG_NO_COLOR");
+ logSetStream(&std::cerr, color);
+
parseLogFile();
parseLogLevels();
}
@@ -524,22 +599,21 @@ Logger::Logger()
* If the LIBCAMERA_LOG_FILE environment variable is set, open the file it
* points to and redirect the logger output to it. If the environment variable
* is set to "syslog", then the logger output will be directed to syslog. Errors
- * are silently ignored and don't affect the logger output (set to stderr).
+ * are silently ignored and don't affect the logger output (set to std::cerr by
+ * default).
*/
void Logger::parseLogFile()
{
const char *file = utils::secure_getenv("LIBCAMERA_LOG_FILE");
- if (!file) {
- logSetStream(&std::cerr);
+ if (!file)
return;
- }
if (!strcmp(file, "syslog")) {
logSetTarget(LoggingTargetSyslog);
return;
}
- logSetFile(file);
+ logSetFile(file, false);
}
/**
@@ -620,7 +694,7 @@ LogSeverity Logger::parseLogLevel(const std::string &level)
severity = LogInvalid;
} else {
severity = LogInvalid;
- for (unsigned int i = 0; i < ARRAY_SIZE(names); ++i) {
+ for (unsigned int i = 0; i < std::size(names); ++i) {
if (names[i] == level) {
severity = i;
break;
@@ -635,12 +709,12 @@ LogSeverity Logger::parseLogLevel(const std::string &level)
* \brief Register a log category with the logger
* \param[in] category The log category
*
- * Log categories must have unique names. If a category with the same name
- * already exists this function performs no operation.
+ * Log categories must have unique names. It is invalid to call this function
+ * if a log category with the same name already exists.
*/
void Logger::registerCategory(LogCategory *category)
{
- categories_.insert(category);
+ categories_.push_back(category);
const std::string &name = category->name();
for (const std::pair<std::string, LogSeverity> &level : levels_) {
@@ -665,15 +739,19 @@ void Logger::registerCategory(LogCategory *category)
}
/**
- * \brief Unregister a log category from the logger
- * \param[in] category The log category
- *
- * If the \a category hasn't been registered with the logger this function
- * performs no operation.
+ * \brief Find an existing log category with the given name
+ * \param[in] name Name of the log category
+ * \return The pointer to the found log category or nullptr if not found
*/
-void Logger::unregisterCategory(LogCategory *category)
+LogCategory *Logger::findCategory(const char *name) const
{
- categories_.erase(category);
+ if (auto it = std::find_if(categories_.begin(), categories_.end(),
+ [name](auto c) { return c->name() == name; });
+ it != categories_.end()) {
+ return *it;
+ }
+
+ return nullptr;
}
/**
@@ -701,18 +779,35 @@ void Logger::unregisterCategory(LogCategory *category)
*/
/**
+ * \brief Create a new LogCategory or return an existing one
+ * \param[in] name Name of the log category
+ *
+ * Create and return a new LogCategory with the given name if such a category
+ * does not yet exist, or return the existing one.
+ *
+ * \return The pointer to the LogCategory
+ */
+LogCategory *LogCategory::create(const char *name)
+{
+ static Mutex mutex_;
+ MutexLocker locker(mutex_);
+ LogCategory *category = Logger::instance()->findCategory(name);
+
+ if (!category) {
+ category = new LogCategory(name);
+ Logger::instance()->registerCategory(category);
+ }
+
+ return category;
+}
+
+/**
* \brief Construct a log category
* \param[in] name The category name
*/
LogCategory::LogCategory(const char *name)
: name_(name), severity_(LogSeverity::LogInfo)
{
- Logger::instance()->registerCategory(this);
-}
-
-LogCategory::~LogCategory()
-{
- Logger::instance()->unregisterCategory(this);
}
/**
@@ -745,12 +840,12 @@ void LogCategory::setSeverity(LogSeverity severity)
* The default log category is named "default" and is used by the LOG() macro
* when no log category is specified.
*
- * \return A pointer to the default log category
+ * \return A reference to the default log category
*/
const LogCategory &LogCategory::defaultCategory()
{
- static const LogCategory category("default");
- return category;
+ static const LogCategory *category = LogCategory::create("default");
+ return *category;
}
/**
@@ -763,24 +858,6 @@ const LogCategory &LogCategory::defaultCategory()
*/
/**
- * \brief Construct a log message for the default category
- * \param[in] fileName The file name where the message is logged from
- * \param[in] line The line number where the message is logged from
- * \param[in] severity The log message severity, controlling how the message
- * will be displayed
- *
- * Create a log message pertaining to line \a line of file \a fileName. The
- * \a severity argument sets the message severity to control whether it will be
- * output or dropped.
- */
-LogMessage::LogMessage(const char *fileName, unsigned int line,
- LogSeverity severity)
- : category_(LogCategory::defaultCategory()), severity_(severity)
-{
- init(fileName, line);
-}
-
-/**
* \brief Construct a log message for a given category
* \param[in] fileName The file name where the message is logged from
* \param[in] line The line number where the message is logged from
@@ -788,14 +865,17 @@ LogMessage::LogMessage(const char *fileName, unsigned int line,
* will be displayed
* \param[in] severity The log message severity, controlling how the message
* will be displayed
+ * \param[in] prefix The log message prefix
*
* Create a log message pertaining to line \a line of file \a fileName. The
* \a severity argument sets the message severity to control whether it will be
- * output or dropped.
+ * output or dropped. The \a prefix optionally identifies the object instance
+ * logging the message.
*/
LogMessage::LogMessage(const char *fileName, unsigned int line,
- const LogCategory &category, LogSeverity severity)
- : category_(category), severity_(severity)
+ const LogCategory &category, LogSeverity severity,
+ const std::string &prefix)
+ : category_(category), severity_(severity), prefix_(prefix)
{
init(fileName, line);
}
@@ -835,13 +915,17 @@ LogMessage::~LogMessage()
if (severity_ == LogInvalid)
return;
+ Logger *logger = Logger::instance();
+ if (!logger)
+ return;
+
msgStream_ << std::endl;
if (severity_ >= category_.severity())
- Logger::instance()->write(*this);
+ logger->write(*this);
if (severity_ == LogSeverity::LogFatal) {
- Logger::instance()->backtrace();
+ logger->backtrace();
std::abort();
}
}
@@ -881,6 +965,12 @@ LogMessage::~LogMessage()
*/
/**
+ * \fn LogMessage::prefix()
+ * \brief Retrieve the prefix of the log message
+ * \return The prefix of the message
+ */
+
+/**
* \fn LogMessage::msg()
* \brief Retrieve the message text of the log message
* \return The message text of the message, as a string
@@ -892,9 +982,9 @@ LogMessage::~LogMessage()
*
* The Loggable class allows classes to extend log messages without any change
* to the way the LOG() macro is invoked. By inheriting from Loggable and
- * implementing the logPrefix() virtual method, a class can specify extra
+ * implementing the logPrefix() virtual function, a class can specify extra
* information to be automatically added to messages logged from class member
- * methods.
+ * function.
*/
Loggable::~Loggable()
@@ -905,7 +995,7 @@ Loggable::~Loggable()
* \fn Loggable::logPrefix()
* \brief Retrieve a string to be prefixed to the log message
*
- * This method allows classes inheriting from the Loggable class to extend the
+ * This function allows classes inheriting from the Loggable class to extend the
* logger with an object-specific prefix output right before the log message
* contents.
*
@@ -914,78 +1004,42 @@ Loggable::~Loggable()
/**
* \brief Create a temporary LogMessage object to log a message
- * \param[in] fileName The file name where the message is logged from
- * \param[in] line The line number where the message is logged from
+ * \param[in] category The log message category
* \param[in] severity The log message severity
- *
- * This method is used as a backeng by the LOG() macro to create a log message
- * for locations inheriting from the Loggable class.
- *
- * \return A log message
- */
-LogMessage Loggable::_log(const char *fileName, unsigned int line,
- LogSeverity severity) const
-{
- LogMessage msg(fileName, line, severity);
-
- msg.stream() << logPrefix() << ": ";
- return msg;
-}
-
-/**
- * \brief Create a temporary LogMessage object to log a message
* \param[in] fileName The file name where the message is logged from
* \param[in] line The line number where the message is logged from
- * \param[in] category The log message category
- * \param[in] severity The log message severity
*
- * This method is used as a backeng by the LOG() macro to create a log message
+ * This function is used as a backend by the LOG() macro to create a log message
* for locations inheriting from the Loggable class.
*
* \return A log message
*/
-LogMessage Loggable::_log(const char *fileName, unsigned int line,
- const LogCategory &category,
- LogSeverity severity) const
+LogMessage Loggable::_log(const LogCategory *category, LogSeverity severity,
+ const char *fileName, unsigned int line) const
{
- LogMessage msg(fileName, line, category, severity);
-
- msg.stream() << logPrefix() << ": ";
- return msg;
+ return LogMessage(fileName, line,
+ category ? *category : LogCategory::defaultCategory(),
+ severity, logPrefix());
}
/**
* \brief Create a temporary LogMessage object to log a message
- * \param[in] fileName The file name where the message is logged from
- * \param[in] line The line number where the message is logged from
+ * \param[in] category The log message category
* \param[in] severity The log message severity
- *
- * This function is used as a backeng by the LOG() macro to create a log
- * message for locations not inheriting from the Loggable class.
- *
- * \return A log message
- */
-LogMessage _log(const char *fileName, unsigned int line, LogSeverity severity)
-{
- return LogMessage(fileName, line, severity);
-}
-
-/**
- * \brief Create a temporary LogMessage object to log a message
* \param[in] fileName The file name where the message is logged from
* \param[in] line The line number where the message is logged from
- * \param[in] category The log message category
- * \param[in] severity The log message severity
*
- * This function is used as a backeng by the LOG() macro to create a log
+ * This function is used as a backend by the LOG() macro to create a log
* message for locations not inheriting from the Loggable class.
*
* \return A log message
*/
-LogMessage _log(const char *fileName, unsigned int line,
- const LogCategory &category, LogSeverity severity)
+LogMessage _log(const LogCategory *category, LogSeverity severity,
+ const char *fileName, unsigned int line)
{
- return LogMessage(fileName, line, category, severity);
+ return LogMessage(fileName, line,
+ category ? *category : LogCategory::defaultCategory(),
+ severity);
}
/**
@@ -1030,10 +1084,17 @@ LogMessage _log(const char *fileName, unsigned int line,
*
* If the severity is set to Fatal, execution is aborted and the program
* terminates immediately after printing the message.
+ *
+ * \warning Logging from the destructor of a global object, either directly or
+ * indirectly, results in undefined behaviour.
+ *
+ * \todo Allow logging from destructors of global objects to the largest
+ * possible extent
*/
/**
* \def ASSERT(condition)
+ * \hideinitializer
* \brief Abort program execution if assertion fails
*
* If \a condition is false, ASSERT() logs an error message with the Fatal log
diff --git a/src/libcamera/base/memfd.cpp b/src/libcamera/base/memfd.cpp
new file mode 100644
index 00000000..ed0b299b
--- /dev/null
+++ b/src/libcamera/base/memfd.cpp
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Anonymous file creation
+ */
+
+#include <libcamera/base/memfd.h>
+
+#include <fcntl.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file base/memfd.h
+ * \brief Anonymous file creation
+ */
+
+#ifndef __DOXYGEN__
+namespace {
+
+/* uClibc doesn't provide the file sealing API. */
+#if not HAVE_FILE_SEALS
+#define F_ADD_SEALS 1033
+#define F_SEAL_SHRINK 0x0002
+#define F_SEAL_GROW 0x0004
+#endif
+
+#if not HAVE_MEMFD_CREATE
+int memfd_create(const char *name, unsigned int flags)
+{
+ return syscall(SYS_memfd_create, name, flags);
+}
+#endif
+
+} /* namespace */
+#endif /* __DOXYGEN__ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(File)
+
+/**
+ * \class MemFd
+ * \brief Helper class to create anonymous files
+ *
+ * Anonymous files behave like regular files, and can be modified, truncated,
+ * memory-mapped and so on. Unlike regular files, they however live in RAM and
+ * don't have permanent backing storage.
+ */
+
+/**
+ * \enum MemFd::Seal
+ * \brief Seals for the MemFd::create() function
+ * \var MemFd::Seal::None
+ * \brief No seals (used as default value)
+ * \var MemFd::Seal::Shrink
+ * \brief Prevent the memfd from shrinking
+ * \var MemFd::Seal::Grow
+ * \brief Prevent the memfd from growing
+ */
+
+/**
+ * \typedef MemFd::Seals
+ * \brief A bitwise combination of MemFd::Seal values
+ */
+
+/**
+ * \brief Create an anonymous file
+ * \param[in] name The file name (displayed in symbolic links in /proc/self/fd/)
+ * \param[in] size The file size
+ * \param[in] seals The file seals
+ *
+ * This function is a helper that wraps anonymous file (memfd) creation and
+ * sets the file size and optional seals.
+ *
+ * \return The descriptor of the anonymous file if creation succeeded, or an
+ * invalid UniqueFD otherwise
+ */
+UniqueFD MemFd::create(const char *name, std::size_t size, Seals seals)
+{
+ int ret = memfd_create(name, MFD_ALLOW_SEALING | MFD_CLOEXEC);
+ if (ret < 0) {
+ ret = errno;
+ LOG(File, Error)
+ << "Failed to allocate memfd storage for " << name
+ << ": " << strerror(ret);
+ return {};
+ }
+
+ UniqueFD memfd(ret);
+
+ ret = ftruncate(memfd.get(), size);
+ if (ret < 0) {
+ ret = errno;
+ LOG(File, Error)
+ << "Failed to set memfd size for " << name
+ << ": " << strerror(ret);
+ return {};
+ }
+
+ if (seals) {
+ int fileSeals = (seals & Seal::Shrink ? F_SEAL_SHRINK : 0)
+ | (seals & Seal::Grow ? F_SEAL_GROW : 0);
+
+ ret = fcntl(memfd.get(), F_ADD_SEALS, fileSeals);
+ if (ret < 0) {
+ ret = errno;
+ LOG(File, Error)
+ << "Failed to seal the memfd for " << name
+ << ": " << strerror(ret);
+ return {};
+ }
+ }
+
+ return memfd;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/meson.build b/src/libcamera/base/meson.build
new file mode 100644
index 00000000..a742dfdf
--- /dev/null
+++ b/src/libcamera/base/meson.build
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_base_public_sources = files([
+ 'bound_method.cpp',
+ 'class.cpp',
+ 'flags.cpp',
+ 'object.cpp',
+ 'shared_fd.cpp',
+ 'signal.cpp',
+ 'unique_fd.cpp',
+])
+
+libcamera_base_internal_sources = files([
+ 'backtrace.cpp',
+ 'event_dispatcher.cpp',
+ 'event_dispatcher_poll.cpp',
+ 'event_notifier.cpp',
+ 'file.cpp',
+ 'log.cpp',
+ 'memfd.cpp',
+ 'message.cpp',
+ 'mutex.cpp',
+ 'semaphore.cpp',
+ 'thread.cpp',
+ 'timer.cpp',
+ 'utils.cpp',
+])
+
+libdw = dependency('libdw', required : false)
+libunwind = dependency('libunwind', required : false)
+
+if cc.has_header_symbol('execinfo.h', 'backtrace')
+ config_h.set('HAVE_BACKTRACE', 1)
+endif
+
+if libdw.found()
+ config_h.set('HAVE_DW', 1)
+endif
+
+if libunwind.found()
+ config_h.set('HAVE_UNWIND', 1)
+endif
+
+libcamera_base_deps = [
+ libatomic,
+ libdw,
+ libthreads,
+ libunwind,
+]
+
+# Internal components must use the libcamera_base_private dependency to enable
+# the use of headers which must not be exposed to the libcamera public api.
+libcamera_base_args = [ '-DLIBCAMERA_BASE_PRIVATE' ]
+
+libcamera_base_lib = shared_library('libcamera-base',
+ [
+ libcamera_base_public_sources,
+ libcamera_base_internal_sources,
+ libcamera_base_headers,
+ ],
+ version : libcamera_version,
+ soversion : libcamera_soversion,
+ name_prefix : '',
+ install : true,
+ cpp_args : libcamera_base_args,
+ include_directories : libcamera_includes,
+ dependencies : libcamera_base_deps)
+
+libcamera_base = declare_dependency(sources : [
+ libcamera_base_headers,
+ ],
+ include_directories : libcamera_includes,
+ link_with : libcamera_base_lib)
+
+pkg_mod = import('pkgconfig')
+pkg_mod.generate(libcamera_base_lib,
+ description : 'Camera support base utility library',
+ subdirs : 'libcamera')
+
+libcamera_base_private = declare_dependency(dependencies : libcamera_base,
+ compile_args : libcamera_base_args)
diff --git a/src/libcamera/message.cpp b/src/libcamera/base/message.cpp
index 77f2bdd5..098faac6 100644
--- a/src/libcamera/message.cpp
+++ b/src/libcamera/base/message.cpp
@@ -2,17 +2,16 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * message.cpp - Message queue support
+ * Message queue support
*/
-#include "message.h"
+#include <libcamera/base/message.h>
-#include <libcamera/signal.h>
-
-#include "log.h"
+#include <libcamera/base/log.h>
+#include <libcamera/base/signal.h>
/**
- * \file message.h
+ * \file base/message.h
* \brief Message queue support
*
* The messaging API enables inter-thread communication through message
@@ -25,7 +24,7 @@
* thus the message shall not store any temporary data.
*
* The message is delivered in the context of the object's thread, through the
- * Object::message() virtual method. After delivery the message is
+ * Object::message() virtual function. After delivery the message is
* automatically deleted.
*/
@@ -49,6 +48,8 @@ std::atomic_uint Message::nextUserType_{ Message::UserMessage };
* \brief Asynchronous method invocation across threads
* \var Message::ThreadMoveMessage
* \brief Object is being moved to a different thread
+ * \var Message::DeferredDelete
+ * \brief Object is scheduled for deletion
* \var Message::UserMessage
* \brief First value available for user-defined messages
*/
@@ -83,10 +84,10 @@ Message::~Message()
*
* Custom message types use values starting at Message::UserMessage. Assigning
* custom types manually may lead to accidental duplicated types. To avoid this
- * problem, this method reserves and returns the next available user-defined
+ * problem, this function reserves and returns the next available user-defined
* message type.
*
- * The recommended way to use this method is to subclass Message and provide a
+ * The recommended way to use this function is to subclass Message and provide a
* static accessor for the custom message type.
*
* \code{.cpp}
diff --git a/src/libcamera/base/mutex.cpp b/src/libcamera/base/mutex.cpp
new file mode 100644
index 00000000..2a4542c4
--- /dev/null
+++ b/src/libcamera/base/mutex.cpp
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Mutex classes with clang thread safety annotation
+ */
+
+#include <libcamera/base/mutex.h>
+
+/**
+ * \file base/mutex.h
+ * \brief Mutex classes with clang thread safety annotation
+ */
+
+namespace libcamera {
+
+/**
+ * \class Mutex
+ * \brief std::mutex wrapper with clang thread safety annotation
+ *
+ * The Mutex class wraps a std::mutex instance to add clang thread safety
+ * annotation support. The class exposes the same interface as std::mutex and
+ * can be used as a transparent replacement. It integrates with the
+ * MutexLocker and ConditionVariable classes.
+ *
+ * See https://en.cppreference.com/w/cpp/thread/mutex for the complete API
+ * documentation.
+ */
+
+/**
+ * \class MutexLocker
+ * \brief std::unique_lock wrapper with clang thread safety annotation
+ *
+ * The MutexLocker class wraps a std::unique_lock instance to add clang thread
+ * safety annotation support. The class exposes the same interface as
+ * std::unique_lock and can be used as a transparent replacement. It integrates
+ * with the Mutex and ConditionVariable classes.
+ *
+ * See https://en.cppreference.com/w/cpp/thread/unique_lock for the complete API
+ * documentation.
+ */
+
+/**
+ * \class ConditionVariable
+ * \brief std::condition_variable wrapper integrating with MutexLocker
+ *
+ * The ConditionVariable class wraps a std::condition_variable instance to
+ * integrate with the MutexLocker class. The class exposes the same interface as
+ * std::condition_variable and can be used as a transparent replacement.
+ *
+ * See https://en.cppreference.com/w/cpp/thread/condition_variable for the
+ * complete API documentation.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/object.cpp b/src/libcamera/base/object.cpp
new file mode 100644
index 00000000..745d2565
--- /dev/null
+++ b/src/libcamera/base/object.cpp
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Base object
+ */
+
+#include <libcamera/base/object.h>
+
+#include <algorithm>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/message.h>
+#include <libcamera/base/semaphore.h>
+#include <libcamera/base/signal.h>
+#include <libcamera/base/thread.h>
+#include <libcamera/base/utils.h>
+
+/**
+ * \file base/object.h
+ * \brief Base object to support automatic signal disconnection
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Object)
+
+/**
+ * \class Object
+ * \brief Base object to support automatic signal disconnection
+ *
+ * The Object class simplifies signal/slot handling for classes implementing
+ * slots. By inheriting from Object, an object is automatically disconnected
+ * from all connected signals when it gets destroyed.
+ *
+ * Object instances are bound to the thread of their parent, or the thread in
+ * which they're created when they have no parent. When a message is posted to
+ * an object, its handler will run in the object's thread. This allows
+ * implementing easy message passing between threads by inheriting from the
+ * Object class.
+ *
+ * Deleting an object from a thread other than the one the object is bound to is
+ * unsafe, unless the caller ensures that the object's thread is stopped and no
+ * parent or child of the object gets deleted concurrently. See
+ * Object::~Object() for more information.
+ *
+ * Object slots connected to signals will also run in the context of the
+ * object's thread, regardless of whether the signal is emitted in the same or
+ * in another thread.
+ *
+ * Objects can be connected to multiple signals, but they can only be connected
+ * to each signal once. Attempting to create multiple concurrent connections
+ * between the same signal and the same Object (to either the same or differents
+ * slots of the object) will cause an assertion failure. While it would be
+ * possible to allow the implementation to let objects connect to the same
+ * signal multiple times, there are no expected use cases for this in libcamera
+ * and this behaviour is restricted to favour defensive programming.
+ *
+ * \sa Message, Signal, Thread
+ */
+
+/**
+ * \brief Construct an Object instance
+ * \param[in] parent The object parent
+ *
+ * The new Object instance is bound to the thread of its \a parent, or to the
+ * current thread if the \a parent is nullptr.
+ */
+Object::Object(Object *parent)
+ : parent_(parent), pendingMessages_(0)
+{
+ thread_ = parent ? parent->thread() : Thread::current();
+
+ if (parent)
+ parent->children_.push_back(this);
+}
+
+/**
+ * \brief Destroy an Object instance
+ *
+ * Deleting an Object automatically disconnects all signals from the Object's
+ * slots. All the Object's children are made orphan, but stay bound to their
+ * current thread.
+ *
+ * Object instances shall be destroyed from the thread they are bound to,
+ * otherwise undefined behaviour may occur. If deletion of an Object needs to
+ * be scheduled from a different thread, deleteLater() shall be used.
+ *
+ * As an exception to this rule, Object instances may be deleted from a
+ * different thread if the thread the instance is bound to is stopped through
+ * the whole duration of the object's destruction, *and* the parent and children
+ * of the object do not get deleted concurrently. The caller is responsible for
+ * fulfilling those requirements.
+ *
+ * In all cases Object instances shall be deleted before the Thread they are
+ * bound to.
+ */
+Object::~Object()
+{
+ ASSERT(Thread::current() == thread_ || !thread_->isRunning());
+
+ /*
+ * Move signals to a private list to avoid concurrent iteration and
+ * deletion of items from Signal::disconnect().
+ */
+ std::list<SignalBase *> signals(std::move(signals_));
+ for (SignalBase *signal : signals)
+ signal->disconnect(this);
+
+ if (pendingMessages_)
+ thread()->removeMessages(this);
+
+ if (parent_) {
+ auto it = std::find(parent_->children_.begin(),
+ parent_->children_.end(), this);
+ ASSERT(it != parent_->children_.end());
+ parent_->children_.erase(it);
+ }
+
+ for (auto child : children_)
+ child->parent_ = nullptr;
+}
+
+/**
+ * \brief Schedule deletion of the instance in the thread it belongs to
+ *
+ * This function schedules deletion of the Object when control returns to the
+ * event loop that the object belongs to. This ensures the object is destroyed
+ * from the right context, as required by the libcamera threading model.
+ *
+ * If this function is called before the thread's event loop is started or after
+ * it has stopped, the object will be deleted when the event loop (re)starts. If
+ * this never occurs, the object will be leaked.
+ *
+ * Deferred deletion can be used to control the destruction context with shared
+ * pointers. An object managed with shared pointers is deleted when the last
+ * reference is destroyed, which makes difficult to ensure through software
+ * design which context the deletion will take place in. With a custom deleter
+ * for the shared pointer using deleteLater(), the deletion can be guaranteed to
+ * happen in the thread the object is bound to.
+ *
+ * \code{.cpp}
+ * std::shared_ptr<MyObject> createObject()
+ * {
+ * struct Deleter : std::default_delete<MyObject> {
+ * void operator()(MyObject *obj)
+ * {
+ * obj->deleteLater();
+ * }
+ * };
+ *
+ * MyObject *obj = new MyObject();
+ *
+ * return std::shared_ptr<MyObject>(obj, Deleter());
+ * }
+ * \endcode
+ *
+ * \context This function is \threadsafe.
+ */
+void Object::deleteLater()
+{
+ postMessage(std::make_unique<Message>(Message::DeferredDelete));
+}
+
+/**
+ * \brief Post a message to the object's thread
+ * \param[in] msg The message
+ *
+ * This function posts the message \a msg to the message queue of the object's
+ * thread, to be delivered to the object through the message() function in the
+ * context of its thread. Message ownership is passed to the thread, and the
+ * message will be deleted after being delivered.
+ *
+ * Messages are delivered through the thread's event loop. If the thread is not
+ * running its event loop the message will not be delivered until the event
+ * loop gets started.
+ *
+ * Due to their asynchronous nature, threads do not provide any guarantee that
+ * all posted messages are delivered before the thread is stopped. See
+ * \ref thread-stop for additional information.
+ *
+ * \context This function is \threadsafe.
+ */
+void Object::postMessage(std::unique_ptr<Message> msg)
+{
+ thread()->postMessage(std::move(msg), this);
+}
+
+/**
+ * \brief Message handler for the object
+ * \param[in] msg The message
+ *
+ * This virtual function receives messages for the object. It is called in the
+ * context of the object's thread, and can be overridden to process custom
+ * messages. The parent Object::message() function shall be called for any
+ * message not handled by the override function.
+ *
+ * The message \a msg is valid only for the duration of the call, no reference
+ * to it shall be kept after this function returns.
+ */
+void Object::message(Message *msg)
+{
+ switch (msg->type()) {
+ case Message::InvokeMessage: {
+ /*
+ * A static_cast should be enough, but gcc 10 and 11 choke on
+ * it in release mode (with -O2 or -O3).
+ */
+ InvokeMessage *iMsg = dynamic_cast<InvokeMessage *>(msg);
+ Semaphore *semaphore = iMsg->semaphore();
+ iMsg->invoke();
+
+ if (semaphore)
+ semaphore->release();
+
+ break;
+ }
+
+ case Message::DeferredDelete:
+ delete this;
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * \fn Object::assertThreadBound()
+ * \brief Check if the caller complies with thread-bound constraints
+ * \param[in] message The message to be printed on error
+ *
+ * This function verifies the calling constraints required by the \threadbound
+ * definition. It shall be called at the beginning of member functions of an
+ * Object subclass that are explicitly marked as thread-bound in their
+ * documentation.
+ *
+ * If the thread-bound constraints are not met, the function prints \a message
+ * as an error message. For debug builds, it additionally causes an assertion
+ * error.
+ *
+ * \todo Verify the thread-bound requirements for functions marked as
+ * thread-bound at the class level.
+ *
+ * \return True if the call is thread-bound compliant, false otherwise
+ */
+bool Object::assertThreadBound(const char *message)
+{
+ if (Thread::current() == thread_)
+ return true;
+
+ LOG(Object, Error) << message;
+ ASSERT(false);
+ return false;
+}
+
+/**
+ * \fn R Object::invokeMethod()
+ * \brief Invoke a method asynchronously on an Object instance
+ * \param[in] func The object method to invoke
+ * \param[in] type Connection type for method invocation
+ * \param[in] args The method arguments
+ *
+ * This function invokes the member method \a func with arguments \a args, based
+ * on the connection \a type. Depending on the type, the method will be called
+ * synchronously in the same thread or asynchronously in the object's thread.
+ *
+ * Arguments \a args passed by value or reference are copied, while pointers
+ * are passed untouched. The caller shall ensure that any pointer argument
+ * remains valid until the method is invoked.
+ *
+ * Due to the asynchronous nature of threads, functions invoked asynchronously
+ * with the ConnectionTypeQueued type are not guaranteed to be called before
+ * the thread is stopped. See \ref thread-stop for additional information.
+ *
+ * \context This function is \threadsafe.
+ *
+ * \return For connection types ConnectionTypeDirect and
+ * ConnectionTypeBlocking, return the return value of the invoked method. For
+ * connection type ConnectionTypeQueued, return a default-constructed R value.
+ */
+
+/**
+ * \fn Object::thread()
+ * \brief Retrieve the thread the object is bound to
+ * \context This function is \threadsafe.
+ * \return The thread the object is bound to
+ */
+
+/**
+ * \brief Move the object and all its children to a different thread
+ * \param[in] thread The target thread
+ *
+ * This function moves the object and all its children from the current thread
+ * to the new \a thread.
+ *
+ * Before the object is moved, a Message::ThreadMoveMessage message is sent to
+ * it. The message() function can be reimplement in derived classes to be
+ * notified of the upcoming thread move and perform any required processing.
+ *
+ * Moving an object that has a parent is not allowed, and causes undefined
+ * behaviour.
+ *
+ * \context This function is \threadbound.
+ */
+void Object::moveToThread(Thread *thread)
+{
+ if (!assertThreadBound("Object can't be moved from another thread"))
+ return;
+
+ if (thread_ == thread)
+ return;
+
+ if (parent_) {
+ LOG(Object, Error)
+ << "Moving object to thread with a parent is not permitted";
+ return;
+ }
+
+ notifyThreadMove();
+
+ thread->moveObject(this);
+}
+
+void Object::notifyThreadMove()
+{
+ Message msg(Message::ThreadMoveMessage);
+ message(&msg);
+
+ for (auto child : children_)
+ child->notifyThreadMove();
+}
+
+/**
+ * \fn Object::parent()
+ * \brief Retrieve the object's parent
+ * \return The object's parent
+ */
+
+void Object::connect(SignalBase *signal)
+{
+ /*
+ * Connecting the same signal to an object multiple times is not
+ * supported.
+ */
+ ASSERT(std::find(signals_.begin(), signals_.end(), signal) == signals_.end());
+
+ signals_.push_back(signal);
+}
+
+void Object::disconnect(SignalBase *signal)
+{
+ for (auto iter = signals_.begin(); iter != signals_.end(); ) {
+ if (*iter == signal)
+ iter = signals_.erase(iter);
+ else
+ iter++;
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/semaphore.cpp b/src/libcamera/base/semaphore.cpp
index ce1eae49..862f3b31 100644
--- a/src/libcamera/semaphore.cpp
+++ b/src/libcamera/base/semaphore.cpp
@@ -2,14 +2,13 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * semaphore.cpp - General-purpose counting semaphore
+ * General-purpose counting semaphore
*/
-#include "semaphore.h"
-#include "thread.h"
+#include <libcamera/base/semaphore.h>
/**
- * \file semaphore.h
+ * \file base/semaphore.h
* \brief General-purpose counting semaphore
*/
@@ -21,10 +20,10 @@ namespace libcamera {
*
* A semaphore is a locking primitive that protects resources. It is created
* with an initial number of resources (which may be 0), and offers two
- * primitives to acquire and release resources. The acquire() method tries to
+ * primitives to acquire and release resources. The acquire() function tries to
* acquire a number of resources, and blocks if not enough resources are
- * available until they get released. The release() method releases a number of
- * resources, waking up any consumer blocked on an acquire() call.
+ * available until they get released. The release() function releases a number
+ * of resources, waking up any consumer blocked on an acquire() call.
*/
/**
@@ -50,14 +49,16 @@ unsigned int Semaphore::available()
* \brief Acquire \a n resources
* \param[in] n The resource count
*
- * This method attempts to acquire \a n resources. If \a n is higher than the
+ * This function attempts to acquire \a n resources. If \a n is higher than the
* number of available resources, the call will block until enough resources
* become available.
*/
void Semaphore::acquire(unsigned int n)
{
MutexLocker locker(mutex_);
- cv_.wait(locker, [&] { return available_ >= n; });
+ cv_.wait(locker, [&]() LIBCAMERA_TSA_REQUIRES(mutex_) {
+ return available_ >= n;
+ });
available_ -= n;
}
@@ -65,7 +66,7 @@ void Semaphore::acquire(unsigned int n)
* \brief Try to acquire \a n resources without blocking
* \param[in] n The resource count
*
- * This method attempts to acquire \a n resources. If \a n is higher than the
+ * This function attempts to acquire \a n resources. If \a n is higher than the
* number of available resources, it returns false immediately without
* acquiring any resource. Otherwise it acquires the resources and returns
* true.
@@ -86,9 +87,9 @@ bool Semaphore::tryAcquire(unsigned int n)
* \brief Release \a n resources
* \param[in] n The resource count
*
- * This method releases \a n resources, increasing the available resource count
- * by \a n. If the number of available resources becomes large enough for any
- * consumer blocked on an acquire() call, those consumers get woken up.
+ * This function releases \a n resources, increasing the available resource
+ * count by \a n. If the number of available resources becomes large enough for
+ * any consumer blocked on an acquire() call, those consumers get woken up.
*/
void Semaphore::release(unsigned int n)
{
diff --git a/src/libcamera/base/shared_fd.cpp b/src/libcamera/base/shared_fd.cpp
new file mode 100644
index 00000000..7afc8ca5
--- /dev/null
+++ b/src/libcamera/base/shared_fd.cpp
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * File descriptor wrapper with shared ownership
+ */
+
+#include <libcamera/base/shared_fd.h>
+
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <utility>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/unique_fd.h>
+
+/**
+ * \file base/shared_fd.h
+ * \brief File descriptor wrapper
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(SharedFD)
+
+/**
+ * \class SharedFD
+ * \brief RAII-style wrapper for file descriptors
+ *
+ * The SharedFD class provides RAII-style lifetime management of file
+ * descriptors with an efficient mechanism for ownership sharing. At its core,
+ * an internal Descriptor object wraps a file descriptor (expressed as a signed
+ * integer) with an RAII-style interface. The Descriptor is then implicitly
+ * shared with all SharedFD instances constructed as copies.
+ *
+ * When constructed from a numerical file descriptor, the SharedFD instance
+ * either duplicates or takes over the file descriptor:
+ *
+ * - The SharedFD(const int &) constructor duplicates the numerical file
+ * descriptor and wraps the duplicate in a Descriptor. The caller is
+ * responsible for closing the original file descriptor, and the value
+ * returned by fd() will be different from the value passed to the
+ * constructor.
+ *
+ * - The SharedFD(int &&) constructor takes over the numerical file descriptor
+ * and wraps it in a Descriptor. The caller shall not touch the original file
+ * descriptor once the function returns, and the value returned by fd() will
+ * be identical to the value passed to the constructor.
+ *
+ * The copy constructor and assignment operator create copies that share the
+ * Descriptor, while the move versions of those functions additionally make the
+ * other SharedFD invalid. When the last SharedFD that references a Descriptor
+ * is destroyed, the file descriptor is closed.
+ *
+ * The numerical file descriptor is available through the fd() function. All
+ * SharedFD instances created as copies of a SharedFD will report the same fd()
+ * value. Callers can perform operations on the fd(), but shall never close it
+ * manually.
+ */
+
+/**
+ * \brief Create a SharedFD copying a given \a fd
+ * \param[in] fd File descriptor
+ *
+ * Construct a SharedFD from a numerical file descriptor by duplicating the
+ * \a fd, and take ownership of the copy. The original \a fd is left untouched,
+ * and the caller is responsible for closing it when appropriate. The duplicated
+ * file descriptor will be closed automatically when all SharedFD instances that
+ * reference it are destroyed.
+ *
+ * If the \a fd is negative, the SharedFD is constructed as invalid and the fd()
+ * function will return -1.
+ */
+SharedFD::SharedFD(const int &fd)
+{
+ if (fd < 0)
+ return;
+
+ fd_ = std::make_shared<Descriptor>(fd, true);
+ if (fd_->fd() < 0)
+ fd_.reset();
+}
+
+/**
+ * \brief Create a SharedFD taking ownership of a given \a fd
+ * \param[in] fd File descriptor
+ *
+ * Construct a SharedFD from a numerical file descriptor by taking ownership of
+ * the \a fd. The original \a fd is set to -1 and shall not be touched by the
+ * caller anymore. In particular, the caller shall not close the original \a fd
+ * manually. The duplicated file descriptor will be closed automatically when
+ * all SharedFD instances that reference it are destroyed.
+ *
+ * If the \a fd is negative, the SharedFD is constructed as invalid and the fd()
+ * function will return -1.
+ */
+SharedFD::SharedFD(int &&fd)
+{
+ if (fd < 0)
+ return;
+
+ fd_ = std::make_shared<Descriptor>(fd, false);
+ /*
+ * The Descriptor constructor can't have failed here, as it took over
+ * the fd without duplicating it. Just set the original fd to -1 to
+ * implement move semantics.
+ */
+ fd = -1;
+}
+
+/**
+ * \brief Create a SharedFD taking ownership of a given UniqueFD \a fd
+ * \param[in] fd UniqueFD
+ *
+ * Construct a SharedFD from UniqueFD by taking ownership of the \a fd. The
+ * original \a fd becomes invalid.
+ */
+SharedFD::SharedFD(UniqueFD fd)
+ : SharedFD(fd.release())
+{
+}
+
+/**
+ * \brief Copy constructor, create a SharedFD from a copy of \a other
+ * \param[in] other The other SharedFD
+ *
+ * Copying a SharedFD implicitly shares ownership of the wrapped file
+ * descriptor. The original SharedFD is left untouched, and the caller is
+ * responsible for destroying it when appropriate. The wrapped file descriptor
+ * will be closed automatically when all SharedFD instances that reference it
+ * are destroyed.
+ */
+SharedFD::SharedFD(const SharedFD &other)
+ : fd_(other.fd_)
+{
+}
+
+/**
+ * \brief Move constructor, create a SharedFD by taking over \a other
+ * \param[in] other The other SharedFD
+ *
+ * Moving a SharedFD moves the reference to the wrapped descriptor owned by
+ * \a other to the new SharedFD. The \a other SharedFD is invalidated and its
+ * fd() function will return -1. The wrapped file descriptor will be closed
+ * automatically when all SharedFD instances that reference it are destroyed.
+ */
+SharedFD::SharedFD(SharedFD &&other)
+ : fd_(std::move(other.fd_))
+{
+}
+
+/**
+ * \brief Destroy the SharedFD instance
+ *
+ * Destroying a SharedFD instance releases its reference to the wrapped
+ * descriptor, if any. When the last instance that references a wrapped
+ * descriptor is destroyed, the file descriptor is automatically closed.
+ */
+SharedFD::~SharedFD()
+{
+}
+
+/**
+ * \brief Copy assignment operator, replace the wrapped file descriptor with a
+ * copy of \a other
+ * \param[in] other The other SharedFD
+ *
+ * Copying a SharedFD creates a new reference to the wrapped file descriptor
+ * owner by \a other. If \a other is invalid, *this will also be invalid. The
+ * original SharedFD is left untouched, and the caller is responsible for
+ * destroying it when appropriate. The wrapped file descriptor will be closed
+ * automatically when all SharedFD instances that reference it are destroyed.
+ *
+ * \return A reference to this SharedFD
+ */
+SharedFD &SharedFD::operator=(const SharedFD &other)
+{
+ fd_ = other.fd_;
+
+ return *this;
+}
+
+/**
+ * \brief Move assignment operator, replace the wrapped file descriptor by
+ * taking over \a other
+ * \param[in] other The other SharedFD
+ *
+ * Moving a SharedFD moves the reference to the wrapped descriptor owned by
+ * \a other to the new SharedFD. If \a other is invalid, *this will also be
+ * invalid. The \a other SharedFD is invalidated and its fd() function will
+ * return -1. The wrapped file descriptor will be closed automatically when
+ * all SharedFD instances that reference it are destroyed.
+ *
+ * \return A reference to this SharedFD
+ */
+SharedFD &SharedFD::operator=(SharedFD &&other)
+{
+ fd_ = std::move(other.fd_);
+
+ return *this;
+}
+
+/**
+ * \fn SharedFD::isValid()
+ * \brief Check if the SharedFD instance is valid
+ * \return True if the SharedFD is valid, false otherwise
+ */
+
+/**
+ * \fn SharedFD::get()
+ * \brief Retrieve the numerical file descriptor
+ * \return The numerical file descriptor, which may be -1 if the SharedFD
+ * instance is invalid
+ */
+
+/**
+ * \fn bool operator==(const SharedFD &lhs, const SharedFD &rhs)
+ * \brief Compare the owned file descriptors of two SharedFD for equality
+ * \param[in] lhs The first SharedFD
+ * \param[in] rhs The second SharedFD
+ *
+ * Two file descriptors are considered equal if they have the same numerical
+ * value. File descriptors with different values that both reference the same
+ * file (for instance obtained using dup()) are considered not equal.
+ *
+ * \return True if the two file descriptors are equal, false otherwise
+ */
+
+/**
+ * \fn bool operator!=(const SharedFD &lhs, const SharedFD &rhs)
+ * \brief Compare the owned file descriptors of two SharedFD for equality
+ * \param[in] lhs The first SharedFD
+ * \param[in] rhs The second SharedFD
+ *
+ * Two file descriptors are considered equal if they have the same numerical
+ * value. File descriptors with different values that both reference the same
+ * file (for instance obtained using dup()) are considered not equal.
+ *
+ * \return True if the two file descriptors are not equal, false otherwise
+ */
+
+/**
+ * \brief Duplicate a SharedFD
+ *
+ * Duplicating a SharedFD creates a duplicate of the wrapped file descriptor and
+ * returns a UniqueFD that owns the duplicate. The fd() function of the original
+ * and the get() function of the duplicate will return different values. The
+ * duplicate instance will not be affected by destruction of the original
+ * instance or its copies.
+ *
+ * \return A UniqueFD owning a duplicate of the original file descriptor
+ */
+UniqueFD SharedFD::dup() const
+{
+ if (!isValid())
+ return {};
+
+ UniqueFD dupFd(::dup(get()));
+ if (!dupFd.isValid()) {
+ int ret = -errno;
+ LOG(SharedFD, Error)
+ << "Failed to dup() fd: " << strerror(-ret);
+ }
+
+ return dupFd;
+}
+
+SharedFD::Descriptor::Descriptor(int fd, bool duplicate)
+{
+ if (!duplicate) {
+ fd_ = fd;
+ return;
+ }
+
+ /* Failing to dup() a fd should not happen and is fatal. */
+ fd_ = ::dup(fd);
+ if (fd_ == -1) {
+ int ret = -errno;
+ LOG(SharedFD, Fatal)
+ << "Failed to dup() fd: " << strerror(-ret);
+ }
+}
+
+SharedFD::Descriptor::~Descriptor()
+{
+ if (fd_ != -1)
+ close(fd_);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/signal.cpp b/src/libcamera/base/signal.cpp
index 6eab1fa7..b782e050 100644
--- a/src/libcamera/signal.cpp
+++ b/src/libcamera/base/signal.cpp
@@ -2,15 +2,16 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * signal.cpp - Signal & slot implementation
+ * Signal & slot implementation
*/
-#include <libcamera/signal.h>
+#include <libcamera/base/signal.h>
-#include "thread.h"
+#include <libcamera/base/mutex.h>
+#include <libcamera/base/object.h>
/**
- * \file signal.h
+ * \file base/signal.h
* \brief Signal & slot implementation
*/
@@ -74,7 +75,7 @@ SignalBase::SlotList SignalBase::slots()
*
* Signals and slots are a language construct aimed at communication between
* objects through the observer pattern without the need for boilerplate code.
- * See http://doc.qt.io/qt-5/signalsandslots.html for more information.
+ * See http://doc.qt.io/qt-6/signalsandslots.html for more information.
*
* Signals model events that can be observed from objects unrelated to the event
* source. Slots are functions that are called in response to a signal. Signals
@@ -93,18 +94,19 @@ SignalBase::SlotList SignalBase::slots()
* Connecting a signal to a slot results in the slot being called with the
* arguments passed to the emit() function when the signal is emitted. Multiple
* slots can be connected to the same signal, and multiple signals can connected
- * to the same slot. Duplicate connections between a signal and a slot are
- * allowed and result in the slot being called multiple times for the same
- * signal emission.
+ * to the same slot.
*
* When a slot belongs to an instance of the Object class, the slot is called
* in the context of the thread that the object is bound to. If the signal is
* emitted from the same thread, the slot will be called synchronously, before
* Signal::emit() returns. If the signal is emitted from a different thread,
* the slot will be called asynchronously from the object's thread's event
- * loop, after the Signal::emit() method returns, with a copy of the signal's
+ * loop, after the Signal::emit() function returns, with a copy of the signal's
* arguments. The emitter shall thus ensure that any pointer or reference
* passed through the signal will remain valid after the signal is emitted.
+ *
+ * Duplicate connections between a signal and a slot are not expected and use of
+ * the Object class to manage signals will enforce this restriction.
*/
/**
@@ -122,6 +124,30 @@ SignalBase::SlotList SignalBase::slots()
*/
/**
+ * \fn Signal::connect(T *object, Func func)
+ * \brief Connect the signal to a function object slot
+ * \param[in] object The slot object pointer
+ * \param[in] func The function object
+ *
+ * If the typename T inherits from Object, the signal will be automatically
+ * disconnected from the \a func slot of \a object when \a object is destroyed.
+ * Otherwise the caller shall disconnect signals manually before destroying \a
+ * object.
+ *
+ * The function object is typically a lambda function, but may be any object
+ * that satisfies the FunctionObject named requirements. The types of the
+ * function object arguments shall match the types of the signal arguments.
+ *
+ * No matching disconnect() function exist, as it wouldn't be possible to pass
+ * to a disconnect() function the same lambda that was passed to connect(). The
+ * connection created by this function can not be removed selectively if the
+ * signal is connected to multiple slots of the same receiver, but may be
+ * otherwise be removed using the disconnect(T *object) function.
+ *
+ * \context This function is \threadsafe.
+ */
+
+/**
* \fn Signal::connect(R (*func)(Args...))
* \brief Connect the signal to a static function slot
* \param[in] func The slot static function
diff --git a/src/libcamera/thread.cpp b/src/libcamera/base/thread.cpp
index 85293c18..319bfda9 100644
--- a/src/libcamera/thread.cpp
+++ b/src/libcamera/base/thread.cpp
@@ -2,23 +2,24 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * thread.cpp - Thread support
+ * Thread support
*/
-#include "thread.h"
+#include <libcamera/base/thread.h>
#include <atomic>
-#include <condition_variable>
#include <list>
+#include <optional>
#include <sys/syscall.h>
#include <sys/types.h>
#include <unistd.h>
-#include <libcamera/event_dispatcher.h>
-
-#include "event_dispatcher_poll.h"
-#include "log.h"
-#include "message.h"
+#include <libcamera/base/event_dispatcher.h>
+#include <libcamera/base/event_dispatcher_poll.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/message.h>
+#include <libcamera/base/mutex.h>
+#include <libcamera/base/object.h>
/**
* \page thread Thread Support
@@ -29,6 +30,13 @@
* interactions with application threads. Careful compliance with the threading
* model will ensure avoidance of race conditions.
*
+ * Every thread created by libcamera is associated with an instance of the
+ * Thread class. Those threads run an internal event loop by default to
+ * dispatch events to objects. Additionally, the main thread of the application
+ * (defined as the thread that calls CameraManager::start()) is also associated
+ * with a Thread instance, but has no event loop accessible to libcamera. Other
+ * application threads are not visible to libcamera.
+ *
* \section thread-objects Threads and Objects
*
* Instances of the Object class and all its derived classes are thread-aware
@@ -40,13 +48,12 @@
* explicitly connected with ConnectionTypeDirect, will also be delivered from
* the object thread's event loop.
*
- * All Object instances created by libcamera are bound to an internal thread,
- * and applications don't need to provide an event loop to support them. Object
- * instances created by applications require an event loop. It is the
- * responsibility of applications to provide that event loop, either explicitly
- * through CameraManager::setEventDispatcher(), or by running the default event
- * loop provided by CameraManager::eventDispatcher() in their main thread. The
- * main thread of an application is the one that calls CameraManager::start().
+ * All Object instances created internally by libcamera are bound to internal
+ * threads. As objects interact with thread event loops for proper operation,
+ * creating an Object instance in a thread that has no internal event loop (such
+ * as the main application thread, or libcamera threads that have a custom main
+ * loop), prevents some features of the Object class from being used. See
+ * Thread::exec() for more details.
*
* \section thread-signals Threads and Signals
*
@@ -58,54 +65,10 @@
* receiver's event loop, running in the receiver's thread. This mechanism can
* be overridden by selecting a different connection type when calling
* Signal::connect().
- *
- * Asynchronous signal delivery is used internally in libcamera, but is also
- * available to applications if desired. To use this feature, applications
- * shall create receiver classes that inherit from the Object class, and
- * provide an event loop to the CameraManager as explained above. Note that
- * Object instances created by the application are limited to living in the
- * application's main thread. Creating Object instances from another thread of
- * an application causes undefined behaviour.
- *
- * \section thread-reentrancy Reentrancy and Thread-Safety
- *
- * Through the documentation, several terms are used to define how classes and
- * their member functions can be used from multiple threads.
- *
- * - A **reentrant** function may be called simultaneously from multiple
- * threads if and only if each invocation uses a different instance of the
- * class. This is the default for all member functions not explictly marked
- * otherwise.
- *
- * - \anchor thread-safe A **thread-safe** function may be called
- * simultaneously from multiple threads on the same instance of a class. A
- * thread-safe function is thus reentrant. Thread-safe functions may also be
- * called simultaneously with any other reentrant function of the same class
- * on the same instance.
- *
- * - \anchor thread-bound A **thread-bound** function may be called only from
- * the thread that the class instances lives in (see section \ref
- * thread-objects). For instances of classes that do not derive from the
- * Object class, this is the thread in which the instance was created. A
- * thread-bound function is not thread-safe, and may or may not be reentrant.
- *
- * Neither reentrancy nor thread-safety, in this context, mean that a function
- * may be called simultaneously from the same thread, for instance from a
- * callback invoked by the function. This may deadlock and isn't allowed unless
- * separately documented.
- *
- * A class is defined as reentrant, thread-safe or thread-bound if all its
- * member functions are reentrant, thread-safe or thread-bound respectively.
- * Some member functions may additionally be documented as having additional
- * thread-related attributes.
- *
- * Most classes are reentrant but not thread-safe, as making them fully
- * thread-safe would incur locking costs considered prohibitive for the
- * expected use cases.
*/
/**
- * \file thread.h
+ * \file base/thread.h
* \brief Thread support
*/
@@ -129,6 +92,11 @@ public:
* \brief Protects the \ref list_
*/
Mutex mutex_;
+ /**
+ * \brief The recursion level for recursive Thread::dispatchMessages()
+ * calls
+ */
+ unsigned int recursion_ = 0;
};
/**
@@ -149,18 +117,20 @@ private:
friend class ThreadMain;
Thread *thread_;
- bool running_;
+ bool running_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
pid_t tid_;
Mutex mutex_;
std::atomic<EventDispatcher *> dispatcher_;
- std::condition_variable cv_;
+ ConditionVariable cv_;
std::atomic<bool> exit_;
int exitCode_;
MessageQueue messages_;
+
+ std::optional<cpu_set_t> cpuset_;
};
/**
@@ -204,28 +174,57 @@ ThreadData *ThreadData::current()
}
/**
- * \typedef Mutex
- * \brief An alias for std::mutex
- */
-
-/**
- * \typedef MutexLocker
- * \brief An alias for std::unique_lock<std::mutex>
- */
-
-/**
* \class Thread
* \brief A thread of execution
*
* The Thread class is a wrapper around std::thread that handles integration
* with the Object, Signal and EventDispatcher classes.
*
- * Thread instances by default run an event loop until the exit() method is
- * called. A custom event dispatcher may be installed with
- * setEventDispatcher(), otherwise a poll-based event dispatcher is used. This
- * behaviour can be overriden by overloading the run() method.
- *
- * \context This class is \threadsafe.
+ * Thread instances by default run an event loop until the exit() function is
+ * called. The event loop dispatches events (messages, notifiers and timers)
+ * sent to the objects living in the thread. This behaviour can be modified by
+ * overriding the run() function.
+ *
+ * \section thread-stop Stopping Threads
+ *
+ * Threads can't be forcibly stopped. Instead, a thread user first requests the
+ * thread to exit and then waits for the thread's main function to react to the
+ * request and return, at which points the thread will stop.
+ *
+ * For threads running exec(), the exit() function is used to request the thread
+ * to exit. For threads subclassing the Thread class and implementing a custom
+ * run() function, a subclass-specific mechanism shall be provided. In either
+ * case, the wait() function shall be called to wait for the thread to stop.
+ *
+ * Due to their asynchronous nature, threads are subject to race conditions when
+ * they stop. This is of particular importance for messages posted to the thread
+ * with postMessage() (and the other mechanisms that rely on it, such as
+ * Object::invokeMethod() or asynchronous signal delivery). To understand the
+ * issues, three contexts need to be considered:
+ *
+ * - The worker is the Thread performing work and being instructed to stop.
+ * - The controller is the context which instructs the worker thread to stop.
+ * - The other contexts are any threads other than the worker and controller
+ * that interact with the worker thread.
+ *
+ * Messages posted to the worker thread from the controller context before
+ * calling exit() are queued to the thread's message queue, and the Thread class
+ * offers no guarantee that those messages will be processed before the thread
+ * stops. This allows threads to stop fast.
+ *
+ * A thread that requires delivery of messages posted from the controller
+ * context before exit() should reimplement the run() function and call
+ * dispatchMessages() after exec().
+ *
+ * Messages posted to the worker thread from the other contexts are asynchronous
+ * with respect to the exit() call from the controller context. There is no
+ * guarantee as to whether those messages will be processed or not before the
+ * thread stops.
+ *
+ * Messages that are not processed will stay in the queue, in the exact same way
+ * as messages posted after the thread has stopped. They will be processed when
+ * the thread is restarted. If the thread is never restarted, they will be
+ * deleted without being processed when the Thread instance is destroyed.
*/
/**
@@ -258,6 +257,8 @@ void Thread::start()
data_->exit_.store(false, std::memory_order_relaxed);
thread_ = std::thread(&Thread::startThread, this);
+
+ setThreadAffinityInternal();
}
void Thread::startThread()
@@ -277,7 +278,7 @@ void Thread::startThread()
};
/*
- * Make sure the thread is cleaned up even if the run method exits
+ * Make sure the thread is cleaned up even if the run() function exits
* abnormally (for instance via a direct call to pthread_cancel()).
*/
thread_local ThreadCleaner cleaner(this, &Thread::finishThread);
@@ -291,12 +292,12 @@ void Thread::startThread()
/**
* \brief Enter the event loop
*
- * This method enter an event loop based on the event dispatcher instance for
- * the thread, and blocks until the exit() method is called. It is meant to be
- * called within the thread from the run() method and shall not be called
+ * This function enters an event loop based on the event dispatcher instance for
+ * the thread, and blocks until the exit() function is called. It is meant to be
+ * called within the thread from the run() function and shall not be called
* outside of the thread.
*
- * \return The exit code passed to the exit() method
+ * \return The exit code passed to the exit() function
*/
int Thread::exec()
{
@@ -315,12 +316,20 @@ int Thread::exec()
}
/**
- * \brief Main method of the thread
+ * \brief Main function of the thread
+ *
+ * When the thread is started with start(), it calls this function in the
+ * context of the new thread. The run() function can be overridden to perform
+ * custom work, either custom initialization and cleanup before and after
+ * calling the Thread::exec() function, or a custom thread loop altogether. When
+ * this function returns the thread execution is stopped, and the \ref finished
+ * signal is emitted.
*
- * When the thread is started with start(), it calls this method in the context
- * of the new thread. The run() method can be overloaded to perform custom
- * work. When this method returns the thread execution is stopped, and the \ref
- * finished signal is emitted.
+ * Note that if this function is overridden and doesn't call Thread::exec(), no
+ * events will be dispatched to the objects living in the thread. These objects
+ * will not be able to use the EventNotifier, Timer or Message facilities. This
+ * includes functions that rely on message dispatching, such as
+ * Object::deleteLater().
*
* The base implementation just calls exec().
*/
@@ -331,11 +340,17 @@ void Thread::run()
void Thread::finishThread()
{
+ /*
+ * Objects may have been scheduled for deletion right before the thread
+ * exited. Ensure they get deleted now, before the thread stops.
+ */
+ dispatchMessages(Message::Type::DeferredDelete);
+
data_->mutex_.lock();
data_->running_ = false;
data_->mutex_.unlock();
- finished.emit(this);
+ finished.emit();
data_->cv_.notify_all();
}
@@ -343,11 +358,13 @@ void Thread::finishThread()
* \brief Stop the thread's event loop
* \param[in] code The exit code
*
- * This method interrupts the event loop started by the exec() method, causing
- * exec() to return \a code.
+ * This function interrupts the event loop started by the exec() function,
+ * causing exec() to return \a code.
*
- * Calling exit() on a thread that reimplements the run() method and doesn't
+ * Calling exit() on a thread that reimplements the run() function and doesn't
* call exec() will likely have no effect.
+ *
+ * \context This function is \threadsafe.
*/
void Thread::exit(int code)
{
@@ -370,34 +387,84 @@ void Thread::exit(int code)
* utils::duration::max(), the wait never times out. If the thread is not
* running the function returns immediately.
*
+ * \context This function is \threadsafe.
+ *
* \return True if the thread has finished, or false if the wait timed out
*/
bool Thread::wait(utils::duration duration)
{
- bool finished = true;
+ bool hasFinished = true;
{
MutexLocker locker(data_->mutex_);
+ auto isRunning = ([&]() LIBCAMERA_TSA_REQUIRES(data_->mutex_) {
+ return !data_->running_;
+ });
+
if (duration == utils::duration::max())
- data_->cv_.wait(locker, [&]() { return !data_->running_; });
+ data_->cv_.wait(locker, isRunning);
else
- finished = data_->cv_.wait_for(locker, duration,
- [&]() { return !data_->running_; });
+ hasFinished = data_->cv_.wait_for(locker, duration,
+ isRunning);
}
if (thread_.joinable())
thread_.join();
- return finished;
+ return hasFinished;
+}
+
+/**
+ * \brief Set the CPU affinity mask of the thread
+ * \param[in] cpus The list of CPU indices that the thread is set affinity to
+ *
+ * The CPU indices should be within [0, std::thread::hardware_concurrency()).
+ * If any index is invalid, this function won't modify the thread affinity and
+ * will return an error.
+ *
+ * \return 0 if all indices are valid, -EINVAL otherwise
+ */
+int Thread::setThreadAffinity(const Span<const unsigned int> &cpus)
+{
+ const unsigned int numCpus = std::thread::hardware_concurrency();
+
+ MutexLocker locker(data_->mutex_);
+ data_->cpuset_ = cpu_set_t();
+ CPU_ZERO(&data_->cpuset_.value());
+
+ for (const unsigned int &cpu : cpus) {
+ if (cpu >= numCpus) {
+ LOG(Thread, Error) << "Invalid CPU " << cpu << "for thread affinity";
+ return -EINVAL;
+ }
+
+ CPU_SET(cpu, &data_->cpuset_.value());
+ }
+
+ if (data_->running_)
+ setThreadAffinityInternal();
+
+ return 0;
+}
+
+void Thread::setThreadAffinityInternal()
+{
+ if (!data_->cpuset_)
+ return;
+
+ const cpu_set_t &cpuset = data_->cpuset_.value();
+ pthread_setaffinity_np(thread_.native_handle(), sizeof(cpuset), &cpuset);
}
/**
* \brief Check if the thread is running
*
* A Thread instance is considered as running once the underlying thread has
- * started. This method guarantees that it returns true after the start()
- * method returns, and false after the wait() method returns.
+ * started. This function guarantees that it returns true after the start()
+ * function returns, and false after the wait() function returns.
+ *
+ * \context This function is \threadsafe.
*
* \return True if the thread is running, false otherwise
*/
@@ -414,6 +481,7 @@ bool Thread::isRunning()
/**
* \brief Retrieve the Thread instance for the current thread
+ * \context This function is \threadsafe.
* \return The Thread instance for the current thread
*/
Thread *Thread::current()
@@ -428,6 +496,8 @@ Thread *Thread::current()
* The thread ID corresponds to the Linux thread ID (TID) as returned by the
* gettid system call.
*
+ * \context This function is \threadsafe.
+ *
* \return The ID of the current thread
*/
pid_t Thread::currentId()
@@ -437,37 +507,12 @@ pid_t Thread::currentId()
}
/**
- * \brief Set the event dispatcher
- * \param[in] dispatcher Pointer to the event dispatcher
- *
- * Threads that run an event loop require an event dispatcher to integrate
- * event notification and timers with the loop. Users that want to provide
- * their own event dispatcher shall call this method once and only once before
- * the thread is started with start(). If no event dispatcher is provided, a
- * default poll-based implementation will be used.
- *
- * The Thread takes ownership of the event dispatcher and will delete it when
- * the thread is destroyed.
- */
-void Thread::setEventDispatcher(std::unique_ptr<EventDispatcher> dispatcher)
-{
- if (data_->dispatcher_.load(std::memory_order_relaxed)) {
- LOG(Thread, Warning) << "Event dispatcher is already set";
- return;
- }
-
- data_->dispatcher_.store(dispatcher.release(),
- std::memory_order_relaxed);
-}
-
-/**
* \brief Retrieve the event dispatcher
*
- * This method retrieves the event dispatcher set with setEventDispatcher().
- * If no dispatcher has been set, a default poll-based implementation is created
- * and returned, and no custom event dispatcher may be installed anymore.
+ * This function retrieves the internal event dispatcher for the thread. The
+ * returned event dispatcher is valid until the thread is destroyed.
*
- * The returned event dispatcher is valid until the thread is destroyed.
+ * \context This function is \threadsafe.
*
* \return Pointer to the event dispatcher
*/
@@ -485,16 +530,21 @@ EventDispatcher *Thread::eventDispatcher()
* \param[in] msg The message
* \param[in] receiver The receiver
*
- * This method stores the message \a msg in the message queue of the thread for
- * the \a receiver and wake up the thread's event loop. Message ownership is
+ * This function stores the message \a msg in the message queue of the thread
+ * for the \a receiver and wake up the thread's event loop. Message ownership is
* passed to the thread, and the message will be deleted after being delivered.
*
* Messages are delivered through the thread's event loop. If the thread is not
* running its event loop the message will not be delivered until the event
* loop gets started.
*
+ * When the thread is stopped, posted messages may not have all been processed.
+ * See \ref thread-stop for additional information.
+ *
* If the \a receiver is not bound to this thread the behaviour is undefined.
*
+ * \context This function is \threadsafe.
+ *
* \sa exec()
*/
void Thread::postMessage(std::unique_ptr<Message> msg, Object *receiver)
@@ -552,27 +602,68 @@ void Thread::removeMessages(Object *receiver)
}
/**
- * \brief Dispatch all posted messages for this thread
+ * \brief Dispatch posted messages for this thread
+ * \param[in] type The message type
+ *
+ * This function immediately dispatches all the messages previously posted for
+ * this thread with postMessage() that match the message \a type. If the \a type
+ * is Message::Type::None, all messages are dispatched.
+ *
+ * Messages shall only be dispatched from the current thread, typically within
+ * the thread from the run() function. Calling this function outside of the
+ * thread results in undefined behaviour.
+ *
+ * This function is not thread-safe, but it may be called recursively in the
+ * same thread from an object's message handler. It guarantees delivery of
+ * messages in the order they have been posted in all cases.
*/
-void Thread::dispatchMessages()
+void Thread::dispatchMessages(Message::Type type)
{
+ ASSERT(data_ == ThreadData::current());
+
+ ++data_->messages_.recursion_;
+
MutexLocker locker(data_->messages_.mutex_);
- while (!data_->messages_.list_.empty()) {
- std::unique_ptr<Message> msg = std::move(data_->messages_.list_.front());
- data_->messages_.list_.pop_front();
+ std::list<std::unique_ptr<Message>> &messages = data_->messages_.list_;
+
+ for (std::unique_ptr<Message> &msg : messages) {
if (!msg)
continue;
- Object *receiver = msg->receiver_;
- ASSERT(data_ == receiver->thread()->data_);
+ if (type != Message::Type::None && msg->type() != type)
+ continue;
+ /*
+ * Move the message, setting the entry in the list to null. It
+ * will cause recursive calls to ignore the entry, and the erase
+ * loop at the end of the function to delete it from the list.
+ */
+ std::unique_ptr<Message> message = std::move(msg);
+
+ Object *receiver = message->receiver_;
+ ASSERT(data_ == receiver->thread()->data_);
receiver->pendingMessages_--;
locker.unlock();
- receiver->message(msg.get());
+ receiver->message(message.get());
+ message.reset();
locker.lock();
}
+
+ /*
+ * If the recursion level is 0, erase all null messages in the list. We
+ * can't do so during recursion, as it would invalidate the iterator of
+ * the outer calls.
+ */
+ if (!--data_->messages_.recursion_) {
+ for (auto iter = messages.begin(); iter != messages.end(); ) {
+ if (!*iter)
+ iter = messages.erase(iter);
+ else
+ ++iter;
+ }
+ }
}
/**
diff --git a/src/libcamera/timer.cpp b/src/libcamera/base/timer.cpp
index 24da5152..7b0f3725 100644
--- a/src/libcamera/timer.cpp
+++ b/src/libcamera/base/timer.cpp
@@ -2,23 +2,23 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * timer.cpp - Generic timer
+ * Generic timer
*/
-#include <libcamera/timer.h>
+#include <libcamera/base/timer.h>
#include <chrono>
-#include <libcamera/camera_manager.h>
-#include <libcamera/event_dispatcher.h>
+#include <libcamera/base/event_dispatcher.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/message.h>
+#include <libcamera/base/thread.h>
+#include <libcamera/base/utils.h>
-#include "log.h"
-#include "message.h"
-#include "thread.h"
-#include "utils.h"
+#include <libcamera/camera_manager.h>
/**
- * \file timer.h
+ * \file base/timer.h
* \brief Generic timer
*/
@@ -63,16 +63,6 @@ Timer::~Timer()
}
/**
- * \fn Timer::start(unsigned int msec)
- * \brief Start or restart the timer with a timeout of \a msec
- * \param[in] msec The timer duration in milliseconds
- *
- * If the timer is already running it will be stopped and restarted.
- *
- * \context This function is \threadbound.
- */
-
-/**
* \brief Start or restart the timer with a timeout of \a duration
* \param[in] duration The timer duration in milliseconds
*
@@ -95,10 +85,8 @@ void Timer::start(std::chrono::milliseconds duration)
*/
void Timer::start(std::chrono::steady_clock::time_point deadline)
{
- if (Thread::current() != thread()) {
- LOG(Timer, Error) << "Timer can't be started from another thread";
+ if (!assertThreadBound("Timer can't be started from another thread"))
return;
- }
deadline_ = deadline;
@@ -124,13 +112,11 @@ void Timer::start(std::chrono::steady_clock::time_point deadline)
*/
void Timer::stop()
{
- if (!isRunning())
+ if (!assertThreadBound("Timer can't be stopped from another thread"))
return;
- if (Thread::current() != thread()) {
- LOG(Timer, Error) << "Timer can't be stopped from another thread";
+ if (!isRunning())
return;
- }
unregisterTimer();
}
diff --git a/src/libcamera/base/unique_fd.cpp b/src/libcamera/base/unique_fd.cpp
new file mode 100644
index 00000000..d0649e4d
--- /dev/null
+++ b/src/libcamera/base/unique_fd.cpp
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * File descriptor wrapper that owns a file descriptor
+ */
+
+#include <libcamera/base/unique_fd.h>
+
+#include <unistd.h>
+#include <utility>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file base/unique_fd.h
+ * \brief File descriptor wrapper that owns a file descriptor
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(UniqueFD)
+
+/**
+ * \class UniqueFD
+ * \brief unique_ptr-like wrapper for a file descriptor
+ *
+ * The UniqueFD is a wrapper that owns and manages the lifetime of a file
+ * descriptor. It is constructed from a numerical file descriptor, and takes
+ * over its ownership. The file descriptor is closed when the UniqueFD is
+ * destroyed, or when it is assigned another file descriptor with operator=()
+ * or reset().
+ */
+
+/**
+ * \fn UniqueFD::UniqueFD()
+ * \brief Construct a UniqueFD that owns no file descriptor
+ */
+
+/**
+ * \fn UniqueFD::UniqueFD(int fd)
+ * \brief Construct a UniqueFD that owns \a fd
+ * \param[in] fd A file descriptor to manage
+ */
+
+/**
+ * \fn UniqueFD::UniqueFD(UniqueFD &&other)
+ * \brief Move constructor, create a UniqueFD by taking over \a other
+ * \param[in] other The other UniqueFD
+ *
+ * Create a UniqueFD by transferring ownership of the file descriptor owned by
+ * \a other. Upon return, the \a other UniqueFD is invalid.
+ */
+
+/**
+ * \fn UniqueFD::~UniqueFD()
+ * \brief Destroy the UniqueFD instance
+ *
+ * If a file descriptor is owned, it is closed.
+ */
+
+/**
+ * \fn UniqueFD::operator=(UniqueFD &&other)
+ * \brief Move assignment operator, replace a UniqueFD by taking over \a other
+ * \param[in] other The other UniqueFD
+ *
+ * If this UniqueFD owns a file descriptor, the file descriptor is closed
+ * first. The file descriptor is then replaced by the one of \a other. Upon
+ * return, \a other is invalid.
+ *
+ * \return A reference to this UniqueFD
+ */
+
+/**
+ * \fn UniqueFD::release()
+ * \brief Release ownership of the file descriptor without closing it
+ *
+ * This function releases and returns the owned file descriptor without closing
+ * it. The caller owns the returned value and must take care of handling its
+ * life time to avoid file descriptor leakages. Upon return this UniqueFD is
+ * invalid.
+ *
+ * \return The managed file descriptor, or -1 if no file descriptor was owned
+ */
+
+/**
+ * \brief Replace the managed file descriptor
+ * \param[in] fd The new file descriptor to manage
+ *
+ * Close the managed file descriptor, if any, and replace it with the new \a fd.
+ *
+ * Self-resetting (passing an \a fd already managed by this instance) is invalid
+ * and results in undefined behaviour.
+ */
+void UniqueFD::reset(int fd)
+{
+ ASSERT(!isValid() || fd != fd_);
+
+ std::swap(fd, fd_);
+
+ if (fd >= 0)
+ close(fd);
+}
+
+/**
+ * \fn UniqueFD::swap(UniqueFD &other)
+ * \brief Swap the managed file descriptors with another UniqueFD
+ * \param[in] other Another UniqueFD to swap the file descriptor with
+ */
+
+/**
+ * \fn UniqueFD::get()
+ * \brief Retrieve the managed file descriptor
+ * \return The managed file descriptor, or -1 if no file descriptor is owned
+ */
+
+/**
+ * \fn UniqueFD::isValid()
+ * \brief Check if the UniqueFD owns a valid file descriptor
+ * \return True if the UniqueFD owns a valid file descriptor, false otherwise
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/utils.cpp b/src/libcamera/base/utils.cpp
new file mode 100644
index 00000000..bcfc1941
--- /dev/null
+++ b/src/libcamera/base/utils.cpp
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Miscellaneous utility functions
+ */
+
+#include <libcamera/base/utils.h>
+
+#include <iomanip>
+#include <locale.h>
+#include <sstream>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+/**
+ * \file base/utils.h
+ * \brief Miscellaneous utility functions
+ */
+
+namespace libcamera {
+
+namespace utils {
+
+/**
+ * \brief Strip the directory prefix from the path
+ * \param[in] path The path to process
+ *
+ * basename is implemented differently across different C libraries. This
+ * implementation matches the one provided by the GNU libc, and does not
+ * modify its input parameter.
+ *
+ * \return A pointer within the given path without any leading directory
+ * components.
+ */
+const char *basename(const char *path)
+{
+ const char *base = strrchr(path, '/');
+ return base ? base + 1 : path;
+}
+
+/**
+ * \brief Get an environment variable
+ * \param[in] name The name of the variable to return
+ *
+ * The environment list is searched to find the variable 'name', and the
+ * corresponding string is returned.
+ *
+ * If 'secure execution' is required then this function always returns NULL to
+ * avoid vulnerabilities that could occur if set-user-ID or set-group-ID
+ * programs accidentally trust the environment.
+ *
+ * \note Not all platforms may support the features required to implement the
+ * secure execution check, in which case this function behaves as getenv(). A
+ * notable example of this is Android.
+ *
+ * \return A pointer to the value in the environment or NULL if the requested
+ * environment variable doesn't exist or if secure execution is required.
+ */
+char *secure_getenv(const char *name)
+{
+#if HAVE_SECURE_GETENV
+ return ::secure_getenv(name);
+#else
+#if HAVE_ISSETUGID
+ if (issetugid())
+ return NULL;
+#endif
+ return getenv(name);
+#endif
+}
+
+/**
+ * \brief Identify the dirname portion of a path
+ * \param[in] path The full path to parse
+ *
+ * This function conforms with the behaviour of the %dirname() function as
+ * defined by POSIX.
+ *
+ * \return A string of the directory component of the path
+ */
+std::string dirname(const std::string &path)
+{
+ if (path.empty())
+ return ".";
+
+ /*
+ * Skip all trailing slashes. If the path is only made of slashes,
+ * return "/".
+ */
+ size_t pos = path.size() - 1;
+ while (path[pos] == '/') {
+ if (!pos)
+ return "/";
+ pos--;
+ }
+
+ /*
+ * Find the previous slash. If the path contains no non-trailing slash,
+ * return ".".
+ */
+ while (path[pos] != '/') {
+ if (!pos)
+ return ".";
+ pos--;
+ }
+
+ /*
+ * Return the directory name up to (but not including) any trailing
+ * slash. If this would result in an empty string, return "/".
+ */
+ while (path[pos] == '/') {
+ if (!pos)
+ return "/";
+ pos--;
+ }
+
+ return path.substr(0, pos + 1);
+}
+
+/**
+ * \fn std::vector<typename T::key_type> map_keys(const T &map)
+ * \brief Retrieve the keys of a std::map<>
+ * \param[in] map The map whose keys to retrieve
+ * \return A std::vector<> containing the keys of \a map
+ */
+
+/**
+ * \fn libcamera::utils::set_overlap(InputIt1 first1, InputIt1 last1,
+ * InputIt2 first2, InputIt2 last2)
+ * \brief Count the number of elements in the intersection of two ranges
+ *
+ * Count the number of elements in the intersection of the sorted ranges [\a
+ * first1, \a last1) and [\a first1, \a last2). Elements are compared using
+ * operator< and the ranges must be sorted with respect to the same.
+ *
+ * \return The number of elements in the intersection of the two ranges
+ */
+
+/**
+ * \typedef clock
+ * \brief The libcamera clock (monotonic)
+ */
+
+/**
+ * \typedef duration
+ * \brief The libcamera duration related to libcamera::utils::clock
+ */
+
+/**
+ * \typedef time_point
+ * \brief The libcamera time point related to libcamera::utils::clock
+ */
+
+/**
+ * \brief Convert a duration to a timespec
+ * \param[in] value The duration
+ * \return A timespec expressing the duration
+ */
+struct timespec duration_to_timespec(const duration &value)
+{
+ uint64_t nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>(value).count();
+ struct timespec ts;
+ ts.tv_sec = nsecs / 1000000000ULL;
+ ts.tv_nsec = nsecs % 1000000000ULL;
+ return ts;
+}
+
+/**
+ * \brief Convert a time point to a string representation
+ * \param[in] time The time point
+ * \return A string representing the time point in hh:mm:ss.nanoseconds format
+ */
+std::string time_point_to_string(const time_point &time)
+{
+ uint64_t nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>(time.time_since_epoch()).count();
+ unsigned int secs = nsecs / 1000000000ULL;
+
+ std::ostringstream ossTimestamp;
+ ossTimestamp.fill('0');
+ ossTimestamp << secs / (60 * 60) << ":"
+ << std::setw(2) << (secs / 60) % 60 << ":"
+ << std::setw(2) << secs % 60 << "."
+ << std::setw(9) << nsecs % 1000000000ULL;
+ return ossTimestamp.str();
+}
+
+std::basic_ostream<char, std::char_traits<char>> &
+operator<<(std::basic_ostream<char, std::char_traits<char>> &stream, const _hex &h)
+{
+ stream << "0x";
+
+ std::ostream::fmtflags flags = stream.setf(std::ios_base::hex,
+ std::ios_base::basefield);
+ std::streamsize width = stream.width(h.w);
+ char fill = stream.fill('0');
+
+ stream << h.v;
+
+ stream.flags(flags);
+ stream.width(width);
+ stream.fill(fill);
+
+ return stream;
+}
+
+/**
+ * \fn hex(T value, unsigned int width)
+ * \brief Write an hexadecimal value to an output string
+ * \param value The value
+ * \param width The width
+ *
+ * Return an object of unspecified type such that, if \a os is the name of an
+ * output stream of type std::ostream, and T is an integer type, then the
+ * expression
+ *
+ * \code{.cpp}
+ * os << utils::hex(value)
+ * \endcode
+ *
+ * will output the \a value to the stream in hexadecimal form with the base
+ * prefix and the filling character set to '0'. The field width is set to \a
+ * width if specified to a non-zero value, or to the native width of type T
+ * otherwise. The \a os stream configuration is not modified.
+ */
+
+/**
+ * \brief Copy a string with a size limit
+ * \param[in] dst The destination string
+ * \param[in] src The source string
+ * \param[in] size The size of the destination string
+ *
+ * This function copies the null-terminated string \a src to \a dst with a limit
+ * of \a size - 1 characters, and null-terminates the result if \a size is
+ * larger than 0. If \a src is larger than \a size - 1, \a dst is truncated.
+ *
+ * \return The size of \a src
+ */
+size_t strlcpy(char *dst, const char *src, size_t size)
+{
+ if (size) {
+ strncpy(dst, src, size);
+ dst[size - 1] = '\0';
+ }
+
+ return strlen(src);
+}
+
+details::StringSplitter::StringSplitter(const std::string &str, const std::string &delim)
+ : str_(str), delim_(delim)
+{
+}
+
+details::StringSplitter::iterator::iterator(const details::StringSplitter *ss, std::string::size_type pos)
+ : ss_(ss), pos_(pos)
+{
+ next_ = ss_->str_.find(ss_->delim_, pos_);
+}
+
+details::StringSplitter::iterator &details::StringSplitter::iterator::operator++()
+{
+ pos_ = next_;
+ if (pos_ != std::string::npos) {
+ pos_ += ss_->delim_.length();
+ next_ = ss_->str_.find(ss_->delim_, pos_);
+ }
+
+ return *this;
+}
+
+std::string details::StringSplitter::iterator::operator*() const
+{
+ std::string::size_type count;
+ count = next_ != std::string::npos ? next_ - pos_ : next_;
+ return ss_->str_.substr(pos_, count);
+}
+
+/**
+ * \fn template<typename Container, typename UnaryOp> \
+ * std::string utils::join(const Container &items, const std::string &sep, UnaryOp op)
+ * \brief Join elements of a container in a string with a separator
+ * \param[in] items The container
+ * \param[in] sep The separator to add between elements
+ * \param[in] op A function that converts individual elements to strings
+ *
+ * This function joins all elements in the \a items container into a string and
+ * returns it. The \a sep separator is added between elements. If the container
+ * elements are not implicitly convertible to std::string, the \a op function
+ * shall be provided to perform conversion of elements to std::string.
+ *
+ * \return A string that concatenates all elements in the container
+ */
+
+/**
+ * \fn split(const std::string &str, const std::string &delim)
+ * \brief Split a string based on a delimiter
+ * \param[in] str The string to split
+ * \param[in] delim The delimiter string
+ *
+ * This function splits the string \a str into substrings based on the
+ * delimiter \a delim. It returns an object of unspecified type that can be
+ * used in a range-based for loop and yields the substrings in sequence.
+ *
+ * \return An object that can be used in a range-based for loop to iterate over
+ * the substrings
+ */
+details::StringSplitter split(const std::string &str, const std::string &delim)
+{
+ /** \todo Try to avoid copies of str and delim */
+ return details::StringSplitter(str, delim);
+}
+
+/**
+ * \brief Remove any non-ASCII characters from a string
+ * \param[in] str The string to strip
+ *
+ * Remove all non-ASCII characters from a string.
+ *
+ * \return A string equal to \a str stripped out of all non-ASCII characters
+ */
+std::string toAscii(const std::string &str)
+{
+ std::string ret;
+ for (const char &c : str)
+ if (!(c & 0x80))
+ ret += c;
+ return ret;
+}
+
+/**
+ * \fn alignDown(unsigned int value, unsigned int alignment)
+ * \brief Align \a value down to \a alignment
+ * \param[in] value The value to align
+ * \param[in] alignment The alignment
+ * \return The value rounded down to the nearest multiple of \a alignment
+ */
+
+/**
+ * \fn alignUp(unsigned int value, unsigned int alignment)
+ * \brief Align \a value up to \a alignment
+ * \param[in] value The value to align
+ * \param[in] alignment The alignment
+ * \return The value rounded up to the nearest multiple of \a alignment
+ */
+
+/**
+ * \fn reverse(T &&iterable)
+ * \brief Wrap an iterable to reverse iteration in a range-based loop
+ * \param[in] iterable The iterable
+ * \return A value of unspecified type that, when used in a range-based for
+ * loop, will cause the loop to iterate over the \a iterable in reverse order
+ */
+
+/**
+ * \fn enumerate(T &iterable)
+ * \brief Wrap an iterable to enumerate index and value in a range-based loop
+ * \param[in] iterable The iterable
+ *
+ * Range-based for loops are handy and widely preferred in C++, but are limited
+ * in their ability to replace for loops that require access to a loop counter.
+ * The enumerate() function solves this problem by wrapping the \a iterable in
+ * an adapter that, when used as a range-expression, will provide iterators
+ * whose value_type is a pair of index and value reference.
+ *
+ * The iterable must support std::begin() and std::end(). This includes all
+ * containers provided by the standard C++ library, as well as C-style arrays.
+ *
+ * A typical usage pattern would use structured binding to store the index and
+ * value in two separate variables:
+ *
+ * \code{.cpp}
+ * std::vector<int> values = ...;
+ *
+ * for (auto [index, value] : utils::enumerate(values)) {
+ * ...
+ * }
+ * \endcode
+ *
+ * Note that the argument to enumerate() has to be an lvalue, as the lifetime
+ * of any rvalue would not be extended to the whole for loop. The compiler will
+ * complain if an rvalue is passed to the function, in which case it should be
+ * stored in a local variable before the loop.
+ *
+ * \return A value of unspecified type that, when used in a range-based for
+ * loop, iterates over an indexed view of the \a iterable
+ */
+
+/**
+ * \class Duration
+ * \brief Helper class from std::chrono::duration that represents a time
+ * duration in nanoseconds with double precision
+ */
+
+/**
+ * \fn Duration::Duration(const Rep &r)
+ * \brief Construct a Duration with \a r ticks
+ * \param[in] r The number of ticks
+ *
+ * The constructed \a Duration object is internally represented in double
+ * precision with \a r nanoseconds ticks.
+ */
+
+/**
+ * \fn Duration::Duration(const std::chrono::duration<Rep, Period> &d)
+ * \brief Construct a Duration by converting an arbitrary std::chrono::duration
+ * \param[in] d The std::chrono::duration object to convert from
+ *
+ * The constructed \a Duration object is internally represented in double
+ * precision with nanoseconds ticks.
+ */
+
+/**
+ * \fn Duration::get<Period>()
+ * \brief Retrieve the tick count, converted to the timebase provided by the
+ * template argument Period of type \a std::ratio
+ *
+ * A typical usage example is given below:
+ *
+ * \code{.cpp}
+ * utils::Duration d = 5s;
+ * double d_in_ms = d.get<std::milli>();
+ * \endcode
+ *
+ * \return The tick count of the Duration expressed in \a Period
+ */
+
+/**
+ * \fn Duration::operator bool()
+ * \brief Boolean operator to test if a \a Duration holds a non-zero time value
+ *
+ * \return True if \a Duration is a non-zero time value, False otherwise
+ */
+
+/**
+ * \fn abs_diff(const T& a, const T& b)
+ * \brief Calculates the absolute value of the difference between two elements
+ * \param[in] a The first element
+ * \param[in] b The second element
+ *
+ * This function calculates the absolute value of the difference between two
+ * elements of the same type, in such a way that a negative value will never
+ * occur during the calculation.
+ *
+ * This is inspired by the std::abs_diff() candidate proposed in N4318
+ * (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2014/n4318.pdf).
+ *
+ * \return The absolute value of the difference of the two parameters \a a and
+ * \a b
+ */
+
+#if HAVE_LOCALE_T
+
+namespace {
+
+/*
+ * RAII wrapper around locale_t instances, to support global locale instances
+ * without leaking memory.
+ */
+class Locale
+{
+public:
+ Locale(const char *locale)
+ {
+ locale_ = newlocale(LC_ALL_MASK, locale, static_cast<locale_t>(0));
+ }
+
+ ~Locale()
+ {
+ freelocale(locale_);
+ }
+
+ locale_t locale() { return locale_; }
+
+private:
+ locale_t locale_;
+};
+
+Locale cLocale("C");
+
+} /* namespace */
+
+#endif /* HAVE_LOCALE_T */
+
+/**
+ * \brief Convert a string to a double independently of the current locale
+ * \param[in] nptr The string to convert
+ * \param[out] endptr Pointer to trailing portion of the string after conversion
+ *
+ * This function is a locale-independent version of the std::strtod() function.
+ * It behaves as the standard function, but uses the "C" locale instead of the
+ * current locale.
+ *
+ * \return The converted value, if any, or 0.0 if the conversion failed.
+ */
+double strtod(const char *__restrict nptr, char **__restrict endptr)
+{
+#if HAVE_LOCALE_T
+ return strtod_l(nptr, endptr, cLocale.locale());
+#else
+ /*
+ * If the libc implementation doesn't provide locale object support,
+ * assume that strtod() is locale-independent.
+ */
+ return ::strtod(nptr, endptr);
+#endif
+}
+
+/**
+ * \fn to_underlying(Enum e)
+ * \brief Convert an enumeration to its underlygin type
+ * \param[in] e Enumeration value to convert
+ *
+ * This function is equivalent to the C++23 std::to_underlying().
+ *
+ * \return The value of e converted to its underlying type
+ */
+
+/**
+ * \class ScopeExitActions
+ * \brief An object that performs actions upon destruction
+ *
+ * The ScopeExitActions class is a simple object that performs user-provided
+ * actions upon destruction. It is meant to simplify cleanup tasks in error
+ * handling paths.
+ *
+ * When the code flow performs multiple sequential actions that each need a
+ * corresponding cleanup action, error handling quickly become tedious:
+ *
+ * \code{.cpp}
+ * {
+ * int ret = allocateMemory();
+ * if (ret)
+ * return ret;
+ *
+ * ret = startProducer();
+ * if (ret) {
+ * freeMemory();
+ * return ret;
+ * }
+ *
+ * ret = startConsumer();
+ * if (ret) {
+ * stopProducer();
+ * freeMemory();
+ * return ret;
+ * }
+ *
+ * return 0;
+ * }
+ * \endcode
+ *
+ * This is prone to programming mistakes, as cleanup actions can easily be
+ * forgotten or ordered incorrectly. One strategy to simplify error handling is
+ * to use goto statements:
+ *
+ * \code{.cpp}
+ * {
+ * int ret = allocateMemory();
+ * if (ret)
+ * return ret;
+ *
+ * ret = startProducer();
+ * if (ret)
+ * goto error_free;
+ *
+ * ret = startConsumer();
+ * if (ret)
+ * goto error_stop;
+ *
+ * return 0;
+ *
+ * error_stop:
+ * stopProducer();
+ * error_free:
+ * freeMemory();
+ * return ret;
+ * }
+ * \endcode
+ *
+ * While this may be considered better, this solution is still quite
+ * error-prone. Beside the risk of picking the wrong error label, the error
+ * handling logic is separated from the normal code flow, which increases the
+ * risk of error when refactoring the code. Additionally, C++ doesn't allow
+ * goto statements to jump over local variable declarations, which can make
+ * usage of this pattern more difficult.
+ *
+ * The ScopeExitActions class solves these issues by allowing code that
+ * requires cleanup actions to be grouped with its corresponding error handling
+ * code:
+ *
+ * \code{.cpp}
+ * {
+ * ScopeExitActions actions;
+ *
+ * int ret = allocateMemory();
+ * if (ret)
+ * return ret;
+ *
+ * actions += [&]() { freeMemory(); };
+ *
+ * ret = startProducer();
+ * if (ret)
+ * return ret;
+ *
+ * actions += [&]() { stopProducer(); };
+ *
+ * ret = startConsumer();
+ * if (ret)
+ * return ret;
+ *
+ * actions.release();
+ * return 0;
+ * }
+ * \endcode
+ *
+ * Error handlers are executed when the ScopeExitActions instance is destroyed,
+ * in the reverse order of their addition.
+ */
+
+ScopeExitActions::~ScopeExitActions()
+{
+ for (const auto &action : utils::reverse(actions_))
+ action();
+}
+
+/**
+ * \brief Add an exit action
+ * \param[in] action The action
+ *
+ * Add an exit action to the ScopeExitActions. Actions will be called upon
+ * destruction in the reverse order of their addition.
+ */
+void ScopeExitActions::operator+=(std::function<void()> &&action)
+{
+ actions_.push_back(std::move(action));
+}
+
+/**
+ * \brief Remove all exit actions
+ *
+ * This function should be called in scope exit paths that don't need the
+ * actions to be executed, such as success return paths from a function when
+ * the ScopeExitActions is used for error cleanup.
+ */
+void ScopeExitActions::release()
+{
+ actions_.clear();
+}
+
+} /* namespace utils */
+
+#ifndef __DOXYGEN__
+template<class CharT, class Traits>
+std::basic_ostream<CharT, Traits> &operator<<(std::basic_ostream<CharT, Traits> &os,
+ const utils::Duration &d)
+{
+ std::basic_ostringstream<CharT, Traits> s;
+
+ s.flags(os.flags());
+ s.imbue(os.getloc());
+ s.setf(std::ios_base::fixed, std::ios_base::floatfield);
+ s.precision(2);
+ s << d.get<std::micro>() << "us";
+ return os << s.str();
+}
+
+template
+std::basic_ostream<char, std::char_traits<char>> &
+operator<< <char, std::char_traits<char>>(std::basic_ostream<char, std::char_traits<char>> &os,
+ const utils::Duration &d);
+#endif
+
+} /* namespace libcamera */
diff --git a/src/libcamera/bayer_format.cpp b/src/libcamera/bayer_format.cpp
new file mode 100644
index 00000000..3dab91fc
--- /dev/null
+++ b/src/libcamera/bayer_format.cpp
@@ -0,0 +1,460 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Class to represent Bayer formats
+ */
+
+#include "libcamera/internal/bayer_format.h"
+
+#include <algorithm>
+#include <map>
+#include <sstream>
+#include <unordered_map>
+
+#include <linux/media-bus-format.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/transform.h>
+
+/**
+ * \file bayer_format.h
+ * \brief Class to represent Bayer formats and manipulate them
+ */
+
+namespace libcamera {
+
+/**
+ * \class BayerFormat
+ * \brief Class to represent a raw image Bayer format
+ *
+ * This class encodes the different Bayer formats in such a way that they can
+ * be easily manipulated. For example, the bit depth or Bayer order can be
+ * easily altered - the Bayer order can even be "transformed" in the same
+ * manner as happens in many sensors when their horizontal or vertical "flip"
+ * controls are set.
+ */
+
+/**
+ * \enum BayerFormat::Order
+ * \brief The order of the colour channels in the Bayer pattern
+ *
+ * \var BayerFormat::BGGR
+ * \brief B then G on the first row, G then R on the second row.
+ * \var BayerFormat::GBRG
+ * \brief G then B on the first row, R then G on the second row.
+ * \var BayerFormat::GRBG
+ * \brief G then R on the first row, B then G on the second row.
+ * \var BayerFormat::RGGB
+ * \brief R then G on the first row, G then B on the second row.
+ * \var BayerFormat::MONO
+ * \brief Monochrome image data, there is no colour filter array.
+ */
+
+/**
+ * \enum BayerFormat::Packing
+ * \brief Different types of packing that can be applied to a BayerFormat
+ *
+ * \var BayerFormat::Packing::None
+ * \brief No packing
+ * \var BayerFormat::Packing::CSI2
+ * \brief Format uses MIPI CSI-2 style packing
+ * \var BayerFormat::Packing::IPU3
+ * \brief Format uses IPU3 style packing
+ * \var BayerFormat::Packing::PISP1
+ * \brief Format uses PISP mode 1 compression
+ * \var BayerFormat::Packing::PISP2
+ * \brief Format uses PISP mode 2 compression
+ */
+
+namespace {
+
+/* Define a slightly arbitrary ordering so that we can use a std::map. */
+struct BayerFormatComparator {
+ constexpr bool operator()(const BayerFormat &lhs, const BayerFormat &rhs) const
+ {
+ if (lhs.bitDepth < rhs.bitDepth)
+ return true;
+ else if (lhs.bitDepth > rhs.bitDepth)
+ return false;
+
+ if (lhs.order < rhs.order)
+ return true;
+ else if (lhs.order > rhs.order)
+ return false;
+
+ if (lhs.packing < rhs.packing)
+ return true;
+ else
+ return false;
+ }
+};
+
+struct Formats {
+ PixelFormat pixelFormat;
+ V4L2PixelFormat v4l2Format;
+};
+
+const std::map<BayerFormat, Formats, BayerFormatComparator> bayerToFormat{
+ { { BayerFormat::BGGR, 8, BayerFormat::Packing::None },
+ { formats::SBGGR8, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR8) } },
+ { { BayerFormat::GBRG, 8, BayerFormat::Packing::None },
+ { formats::SGBRG8, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG8) } },
+ { { BayerFormat::GRBG, 8, BayerFormat::Packing::None },
+ { formats::SGRBG8, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8) } },
+ { { BayerFormat::RGGB, 8, BayerFormat::Packing::None },
+ { formats::SRGGB8, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB8) } },
+ { { BayerFormat::BGGR, 10, BayerFormat::Packing::None },
+ { formats::SBGGR10, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10) } },
+ { { BayerFormat::GBRG, 10, BayerFormat::Packing::None },
+ { formats::SGBRG10, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10) } },
+ { { BayerFormat::GRBG, 10, BayerFormat::Packing::None },
+ { formats::SGRBG10, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10) } },
+ { { BayerFormat::RGGB, 10, BayerFormat::Packing::None },
+ { formats::SRGGB10, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10) } },
+ { { BayerFormat::BGGR, 10, BayerFormat::Packing::CSI2 },
+ { formats::SBGGR10_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10P) } },
+ { { BayerFormat::GBRG, 10, BayerFormat::Packing::CSI2 },
+ { formats::SGBRG10_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10P) } },
+ { { BayerFormat::GRBG, 10, BayerFormat::Packing::CSI2 },
+ { formats::SGRBG10_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10P) } },
+ { { BayerFormat::RGGB, 10, BayerFormat::Packing::CSI2 },
+ { formats::SRGGB10_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10P) } },
+ { { BayerFormat::BGGR, 10, BayerFormat::Packing::IPU3 },
+ { formats::SBGGR10_IPU3, V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SBGGR10) } },
+ { { BayerFormat::GBRG, 10, BayerFormat::Packing::IPU3 },
+ { formats::SGBRG10_IPU3, V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGBRG10) } },
+ { { BayerFormat::GRBG, 10, BayerFormat::Packing::IPU3 },
+ { formats::SGRBG10_IPU3, V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGRBG10) } },
+ { { BayerFormat::RGGB, 10, BayerFormat::Packing::IPU3 },
+ { formats::SRGGB10_IPU3, V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SRGGB10) } },
+ { { BayerFormat::BGGR, 12, BayerFormat::Packing::None },
+ { formats::SBGGR12, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12) } },
+ { { BayerFormat::GBRG, 12, BayerFormat::Packing::None },
+ { formats::SGBRG12, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12) } },
+ { { BayerFormat::GRBG, 12, BayerFormat::Packing::None },
+ { formats::SGRBG12, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12) } },
+ { { BayerFormat::RGGB, 12, BayerFormat::Packing::None },
+ { formats::SRGGB12, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12) } },
+ { { BayerFormat::BGGR, 12, BayerFormat::Packing::CSI2 },
+ { formats::SBGGR12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12P) } },
+ { { BayerFormat::GBRG, 12, BayerFormat::Packing::CSI2 },
+ { formats::SGBRG12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12P) } },
+ { { BayerFormat::GRBG, 12, BayerFormat::Packing::CSI2 },
+ { formats::SGRBG12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12P) } },
+ { { BayerFormat::RGGB, 12, BayerFormat::Packing::CSI2 },
+ { formats::SRGGB12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P) } },
+ { { BayerFormat::BGGR, 14, BayerFormat::Packing::None },
+ { formats::SBGGR14, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14) } },
+ { { BayerFormat::GBRG, 14, BayerFormat::Packing::None },
+ { formats::SGBRG14, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14) } },
+ { { BayerFormat::GRBG, 14, BayerFormat::Packing::None },
+ { formats::SGRBG14, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14) } },
+ { { BayerFormat::RGGB, 14, BayerFormat::Packing::None },
+ { formats::SRGGB14, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14) } },
+ { { BayerFormat::BGGR, 14, BayerFormat::Packing::CSI2 },
+ { formats::SBGGR14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14P) } },
+ { { BayerFormat::GBRG, 14, BayerFormat::Packing::CSI2 },
+ { formats::SGBRG14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14P) } },
+ { { BayerFormat::GRBG, 14, BayerFormat::Packing::CSI2 },
+ { formats::SGRBG14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14P) } },
+ { { BayerFormat::RGGB, 14, BayerFormat::Packing::CSI2 },
+ { formats::SRGGB14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14P) } },
+ { { BayerFormat::BGGR, 16, BayerFormat::Packing::None },
+ { formats::SBGGR16, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16) } },
+ { { BayerFormat::GBRG, 16, BayerFormat::Packing::None },
+ { formats::SGBRG16, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG16) } },
+ { { BayerFormat::GRBG, 16, BayerFormat::Packing::None },
+ { formats::SGRBG16, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG16) } },
+ { { BayerFormat::RGGB, 16, BayerFormat::Packing::None },
+ { formats::SRGGB16, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16) } },
+ { { BayerFormat::BGGR, 16, BayerFormat::Packing::PISP1 },
+ { formats::BGGR_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR) } },
+ { { BayerFormat::GBRG, 16, BayerFormat::Packing::PISP1 },
+ { formats::GBRG_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG) } },
+ { { BayerFormat::GRBG, 16, BayerFormat::Packing::PISP1 },
+ { formats::GRBG_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG) } },
+ { { BayerFormat::RGGB, 16, BayerFormat::Packing::PISP1 },
+ { formats::RGGB_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB) } },
+ { { BayerFormat::MONO, 8, BayerFormat::Packing::None },
+ { formats::R8, V4L2PixelFormat(V4L2_PIX_FMT_GREY) } },
+ { { BayerFormat::MONO, 10, BayerFormat::Packing::None },
+ { formats::R10, V4L2PixelFormat(V4L2_PIX_FMT_Y10) } },
+ { { BayerFormat::MONO, 10, BayerFormat::Packing::CSI2 },
+ { formats::R10_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_Y10P) } },
+ { { BayerFormat::MONO, 12, BayerFormat::Packing::None },
+ { formats::R12, V4L2PixelFormat(V4L2_PIX_FMT_Y12) } },
+ { { BayerFormat::MONO, 12, BayerFormat::Packing::CSI2 },
+ { formats::R12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_Y12P) } },
+ { { BayerFormat::MONO, 16, BayerFormat::Packing::None },
+ { formats::R16, V4L2PixelFormat(V4L2_PIX_FMT_Y16) } },
+ { { BayerFormat::MONO, 16, BayerFormat::Packing::PISP1 },
+ { formats::MONO_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO) } },
+};
+
+const std::unordered_map<unsigned int, BayerFormat> mbusCodeToBayer{
+ { MEDIA_BUS_FMT_SBGGR8_1X8, { BayerFormat::BGGR, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, { BayerFormat::GBRG, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, { BayerFormat::GRBG, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, { BayerFormat::RGGB, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8, { BayerFormat::BGGR, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8, { BayerFormat::GBRG, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8, { BayerFormat::GRBG, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8, { BayerFormat::RGGB, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, { BayerFormat::BGGR, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, { BayerFormat::GBRG, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, { BayerFormat::GRBG, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, { BayerFormat::RGGB, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, { BayerFormat::BGGR, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, { BayerFormat::BGGR, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, { BayerFormat::BGGR, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, { BayerFormat::BGGR, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, { BayerFormat::BGGR, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, { BayerFormat::GBRG, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, { BayerFormat::GRBG, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, { BayerFormat::RGGB, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, { BayerFormat::BGGR, 12, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, { BayerFormat::GBRG, 12, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, { BayerFormat::GRBG, 12, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, { BayerFormat::RGGB, 12, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, { BayerFormat::BGGR, 14, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, { BayerFormat::GBRG, 14, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, { BayerFormat::GRBG, 14, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, { BayerFormat::RGGB, 14, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR16_1X16, { BayerFormat::BGGR, 16, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG16_1X16, { BayerFormat::GBRG, 16, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG16_1X16, { BayerFormat::GRBG, 16, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB16_1X16, { BayerFormat::RGGB, 16, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR20_1X20, { BayerFormat::BGGR, 20, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG20_1X20, { BayerFormat::GBRG, 20, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG20_1X20, { BayerFormat::GRBG, 20, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB20_1X20, { BayerFormat::RGGB, 20, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_Y8_1X8, { BayerFormat::MONO, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_Y10_1X10, { BayerFormat::MONO, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_Y12_1X12, { BayerFormat::MONO, 12, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_Y16_1X16, { BayerFormat::MONO, 16, BayerFormat::Packing::None } },
+};
+
+} /* namespace */
+
+/**
+ * \fn BayerFormat::BayerFormat()
+ * \brief Construct an empty (and invalid) BayerFormat
+ */
+
+/**
+ * \fn BayerFormat::BayerFormat(Order o, uint8_t b, Packing p)
+ * \brief Construct a BayerFormat from explicit values
+ * \param[in] o The order of the Bayer pattern
+ * \param[in] b The bit depth of the Bayer samples
+ * \param[in] p The type of packing applied to the pixel values
+ */
+
+/**
+ * \brief Retrieve the BayerFormat associated with a media bus code
+ * \param[in] mbusCode The media bus code to convert into a BayerFormat
+ *
+ * The media bus code numeric identifiers are defined by the V4L2 specification.
+ */
+const BayerFormat &BayerFormat::fromMbusCode(unsigned int mbusCode)
+{
+ static BayerFormat empty;
+
+ const auto it = mbusCodeToBayer.find(mbusCode);
+ if (it == mbusCodeToBayer.end())
+ return empty;
+ else
+ return it->second;
+}
+
+/**
+ * \fn BayerFormat::isValid()
+ * \brief Return whether a BayerFormat is valid
+ */
+
+/**
+ * \brief Assemble and return a readable string representation of the
+ * BayerFormat
+ * \return A string describing the BayerFormat
+ */
+std::string BayerFormat::toString() const
+{
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \brief Compare two BayerFormats for equality
+ * \return True if order, bitDepth and packing are equal, or false otherwise
+ */
+bool operator==(const BayerFormat &lhs, const BayerFormat &rhs)
+{
+ return lhs.order == rhs.order && lhs.bitDepth == rhs.bitDepth &&
+ lhs.packing == rhs.packing;
+}
+
+/**
+ * \brief Insert a text representation of a BayerFormats into an output stream
+ * \param[in] out The output stream
+ * \param[in] f The BayerFormat
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const BayerFormat &f)
+{
+ static const char *orderStrings[] = {
+ "BGGR-",
+ "GBRG-",
+ "GRBG-",
+ "RGGB-",
+ "MONO-"
+ };
+
+ if (!f.isValid() || f.order > BayerFormat::MONO) {
+ out << "INVALID";
+ return out;
+ }
+
+ /* The cast is required to avoid bitDepth being interpreted as a char. */
+ out << orderStrings[f.order] << static_cast<unsigned int>(f.bitDepth);
+
+ if (f.packing == BayerFormat::Packing::CSI2)
+ out << "-CSI2P";
+ else if (f.packing == BayerFormat::Packing::IPU3)
+ out << "-IPU3P";
+ else if (f.packing == BayerFormat::Packing::PISP1)
+ out << "-PISP1";
+ else if (f.packing == BayerFormat::Packing::PISP2)
+ out << "-PISP2";
+
+ return out;
+}
+
+/**
+ * \fn bool operator!=(const BayerFormat &lhs, const BayerFormat &rhs)
+ * \brief Compare two BayerFormats for inequality
+ * \return True if either order, bitdepth or packing are not equal, or false
+ * otherwise
+ */
+
+/**
+ * \brief Convert a BayerFormat into the corresponding V4L2PixelFormat
+ * \return The V4L2PixelFormat corresponding to this BayerFormat
+ */
+V4L2PixelFormat BayerFormat::toV4L2PixelFormat() const
+{
+ const auto it = bayerToFormat.find(*this);
+ if (it != bayerToFormat.end())
+ return it->second.v4l2Format;
+
+ return V4L2PixelFormat();
+}
+
+/**
+ * \brief Convert \a v4l2Format to the corresponding BayerFormat
+ * \param[in] v4l2Format The raw format to convert into a BayerFormat
+ * \return The BayerFormat corresponding to \a v4l2Format
+ */
+BayerFormat BayerFormat::fromV4L2PixelFormat(V4L2PixelFormat v4l2Format)
+{
+ auto it = std::find_if(bayerToFormat.begin(), bayerToFormat.end(),
+ [v4l2Format](const auto &i) {
+ return i.second.v4l2Format == v4l2Format;
+ });
+ if (it != bayerToFormat.end())
+ return it->first;
+
+ return BayerFormat();
+}
+
+/**
+ * \brief Convert a BayerFormat into the corresponding PixelFormat
+ * \return The PixelFormat corresponding to this BayerFormat
+ */
+PixelFormat BayerFormat::toPixelFormat() const
+{
+ const auto it = bayerToFormat.find(*this);
+ if (it != bayerToFormat.end())
+ return it->second.pixelFormat;
+
+ return PixelFormat();
+}
+
+/**
+ * \brief Convert a PixelFormat into the corresponding BayerFormat
+ * \return The BayerFormat corresponding to this PixelFormat
+ */
+BayerFormat BayerFormat::fromPixelFormat(PixelFormat format)
+{
+ const auto it = std::find_if(bayerToFormat.begin(), bayerToFormat.end(),
+ [format](const auto &i) {
+ return i.second.pixelFormat == format;
+ });
+ if (it != bayerToFormat.end())
+ return it->first;
+
+ return BayerFormat();
+}
+
+/**
+ * \brief Apply a transform to this BayerFormat
+ * \param[in] t The transform to apply
+ *
+ * Applying a transform to an image stored in a Bayer format affects the Bayer
+ * order. For example, performing a horizontal flip on the Bayer pattern RGGB
+ * causes the RG rows of pixels to become GR, and the GB rows to become BG. The
+ * transformed image would have a GRBG order. Performing a vertical flip on the
+ * Bayer pattern RGGB causes the GB rows to come before the RG ones and the
+ * transformed image would have GBRG order. Applying both vertical and
+ * horizontal flips on the Bayer patter RGGB results in transformed images with
+ * BGGR order. The bit depth and modifiers are not affected.
+ *
+ * Horizontal and vertical flips are applied before transpose.
+ *
+ * \return The transformed Bayer format
+ */
+BayerFormat BayerFormat::transform(Transform t) const
+{
+ BayerFormat result = *this;
+
+ if (order == MONO)
+ return result;
+
+ /*
+ * Observe that flipping bit 0 of the Order enum performs a horizontal
+ * mirror on the Bayer pattern (e.g. RG/GB goes to GR/BG). Similarly,
+ * flipping bit 1 performs a vertical mirror operation on it (e.g RG/GB
+ * goes to GB/RG). Applying both vertical and horizontal flips
+ * combines vertical and horizontal mirroring on the Bayer pattern
+ * (e.g. RG/GB goes to BG/GR). Hence:
+ */
+ if (!!(t & Transform::HFlip))
+ result.order = static_cast<Order>(result.order ^ 1);
+ if (!!(t & Transform::VFlip))
+ result.order = static_cast<Order>(result.order ^ 2);
+
+ if (!!(t & Transform::Transpose) && result.order == 1)
+ result.order = static_cast<Order>(2);
+ else if (!!(t & Transform::Transpose) && result.order == 2)
+ result.order = static_cast<Order>(1);
+
+ return result;
+}
+
+/**
+ * \var BayerFormat::order
+ * \brief The order of the colour channels in the Bayer pattern
+ */
+
+/**
+ * \var BayerFormat::bitDepth
+ * \brief The bit depth of the samples in the Bayer pattern
+ */
+
+/**
+ * \var BayerFormat::packing
+ * \brief Any packing scheme applied to this BayerFormat
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/buffer.cpp b/src/libcamera/buffer.cpp
deleted file mode 100644
index 673a63d3..00000000
--- a/src/libcamera/buffer.cpp
+++ /dev/null
@@ -1,214 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * buffer.cpp - Buffer handling
- */
-
-#include <libcamera/buffer.h>
-
-#include <errno.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <unistd.h>
-
-#include "log.h"
-
-/**
- * \file buffer.h
- * \brief Buffer handling
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(Buffer)
-
-/**
- * \struct FrameMetadata
- * \brief Metadata related to a captured frame
- *
- * The FrameMetadata structure stores all metadata related to a captured frame,
- * as stored in a FrameBuffer, such as capture status, timestamp and bytesused.
- */
-
-/**
- * \enum FrameMetadata::Status
- * \brief Define the frame completion status
- * \var FrameMetadata::FrameSuccess
- * The frame has been captured with success and contains valid data. All fields
- * of the FrameMetadata structure are valid.
- * \var FrameMetadata::FrameError
- * An error occurred during capture of the frame. The frame data may be partly
- * or fully invalid. The sequence and timestamp fields of the FrameMetadata
- * structure is valid, the other fields may be invalid.
- * \var FrameMetadata::FrameCancelled
- * Capture stopped before the frame completed. The frame data is not valid. All
- * fields of the FrameMetadata structure but the status field are invalid.
- */
-
-/**
- * \struct FrameMetadata::Plane
- * \brief Per-plane frame metadata
- *
- * Frames are stored in memory in one or multiple planes. The
- * FrameMetadata::Plane structure stores per-plane metadata.
- */
-
-/**
- * \var FrameMetadata::Plane::bytesused
- * \brief Number of bytes occupied by the data in the plane, including line
- * padding
- *
- * This value may vary per frame for compressed formats. For uncompressed
- * formats it will be constant for all frames, but may be smaller than the
- * FrameBuffer size.
- */
-
-/**
- * \var FrameMetadata::status
- * \brief Status of the frame
- *
- * The validity of other fields of the FrameMetadata structure depends on the
- * status value.
- */
-
-/**
- * \var FrameMetadata::sequence
- * \brief Frame sequence number
- *
- * The sequence number is a monotonically increasing number assigned to the
- * frames captured by the stream. The value is increased by one for each frame.
- * Gaps in the sequence numbers indicate dropped frames.
- */
-
-/**
- * \var FrameMetadata::timestamp
- * \brief Time when the frame was captured
- *
- * The timestamp is expressed as a number of nanoseconds relative to the system
- * clock since an unspecified time point.
- *
- * \todo Be more precise on what timestamps refer to.
- */
-
-/**
- * \var FrameMetadata::planes
- * \brief Array of per-plane metadata
- */
-
-/**
- * \class FrameBuffer
- * \brief Frame buffer data and its associated dynamic metadata
- *
- * The FrameBuffer class is the primary interface for applications, IPAs and
- * pipeline handlers to interact with frame memory. It contains all the static
- * and dynamic information to manage the whole life cycle of a frame capture,
- * from buffer creation to consumption.
- *
- * The static information describes the memory planes that make a frame. The
- * planes are specified when creating the FrameBuffer and are expressed as a set
- * of dmabuf file descriptors and length.
- *
- * The dynamic information is grouped in a FrameMetadata instance. It is updated
- * during the processing of a queued capture request, and is valid from the
- * completion of the buffer as signaled by Camera::bufferComplete() until the
- * FrameBuffer is either reused in a new request or deleted.
- *
- * The creator of a FrameBuffer (application, IPA or pipeline handler) may
- * associate to it an integer cookie for any private purpose. The cookie may be
- * set when creating the FrameBuffer, and updated at any time with setCookie().
- * The cookie is transparent to the libcamera core and shall only be set by the
- * creator of the FrameBuffer. This mechanism supplements the Request cookie.
- */
-
-/**
- * \struct FrameBuffer::Plane
- * \brief A memory region to store a single plane of a frame
- *
- * Planar pixel formats use multiple memory regions to store the different
- * colour components of a frame. The Plane structure describes such a memory
- * region by a dmabuf file descriptor and a length. A FrameBuffer then
- * contains one or multiple planes, depending on the pixel format of the
- * frames it is meant to store.
- *
- * To support DMA access, planes are associated with dmabuf objects represented
- * by FileDescriptor handles. The Plane class doesn't handle mapping of the
- * memory to the CPU, but applications and IPAs may use the dmabuf file
- * descriptors to map the plane memory with mmap() and access its contents.
- *
- * \todo Once we have a Kernel API which can express offsets within a plane
- * this structure shall be extended to contain this information. See commit
- * 83148ce8be55e for initial documentation of this feature.
- */
-
-/**
- * \var FrameBuffer::Plane::fd
- * \brief The dmabuf file descriptor
- */
-
-/**
- * \var FrameBuffer::Plane::length
- * \brief The plane length in bytes
- */
-
-/**
- * \brief Construct a FrameBuffer with an array of planes
- * \param[in] planes The frame memory planes
- * \param[in] cookie Cookie
- */
-FrameBuffer::FrameBuffer(const std::vector<Plane> &planes, unsigned int cookie)
- : planes_(planes), request_(nullptr), cookie_(cookie)
-{
-}
-
-/**
- * \fn FrameBuffer::planes()
- * \brief Retrieve the static plane descriptors
- * \return Array of plane descriptors
- */
-
-/**
- * \fn FrameBuffer::request()
- * \brief Retrieve the request this buffer belongs to
- *
- * The intended callers of this method are buffer completion handlers that
- * need to associate a buffer to the request it belongs to.
- *
- * A Buffer is associated to a request by Request::addBuffer() and the
- * association is valid until the buffer completes. The returned request
- * pointer is valid only during that interval.
- *
- * \return The Request the Buffer belongs to, or nullptr if the buffer is
- * not associated with a request
- */
-
-/**
- * \fn FrameBuffer::metadata()
- * \brief Retrieve the dynamic metadata
- * \return Dynamic metadata for the frame contained in the buffer
- */
-
-/**
- * \fn FrameBuffer::cookie()
- * \brief Retrieve the cookie
- *
- * The cookie belongs to the creator of the FrameBuffer, which controls its
- * lifetime and value.
- *
- * \sa setCookie()
- *
- * \return The cookie
- */
-
-/**
- * \fn FrameBuffer::setCookie()
- * \brief Set the cookie
- * \param[in] cookie Cookie to set
- *
- * The cookie belongs to the creator of the FrameBuffer. Its value may be
- * modified at any time with this method. Applications and IPAs shall not modify
- * the cookie value of buffers they haven't created themselves. The libcamera
- * core never modifies the buffer cookie.
- */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/byte_stream_buffer.cpp b/src/libcamera/byte_stream_buffer.cpp
index 20d6a655..fba9a6f3 100644
--- a/src/libcamera/byte_stream_buffer.cpp
+++ b/src/libcamera/byte_stream_buffer.cpp
@@ -2,25 +2,25 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * byte_stream_buffer.cpp - Byte stream buffer
+ * Byte stream buffer
*/
-#include "byte_stream_buffer.h"
+#include "libcamera/internal/byte_stream_buffer.h"
#include <stdint.h>
#include <string.h>
-#include "log.h"
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(Serialization);
+#include <libcamera/base/log.h>
/**
* \file byte_stream_buffer.h
* \brief Managed memory container for serialized data
*/
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Serialization)
+
/**
* \class ByteStreamBuffer
* \brief Wrap a memory buffer and provide sequential data read and write
@@ -40,7 +40,8 @@ LOG_DEFINE_CATEGORY(Serialization);
* respectively. Access is strictly sequential, the buffer keeps track of the
* current access location and advances it automatically. Reading or writing
* the same location multiple times is thus not possible. Bytes may also be
- * skipped with the skip() method.
+ * skipped with the skip() function.
+ *
*
* The ByteStreamBuffer also supports carving out pieces of memory into other
* ByteStreamBuffer instances. Like a read or write operation, a carveOut()
@@ -52,7 +53,7 @@ LOG_DEFINE_CATEGORY(Serialization);
* the buffer being marked as having overflown. If the buffer has been carved
* out from a parent buffer, the parent buffer is also marked as having
* overflown. Any later access on an overflown buffer is blocked. The buffer
- * overflow status can be checked with the overflow() method.
+ * overflow status can be checked with the overflow() function.
*/
/**
@@ -155,7 +156,7 @@ void ByteStreamBuffer::setOverflow()
* \brief Carve out an area of \a size bytes into a new ByteStreamBuffer
* \param[in] size The size of the newly created memory buffer
*
- * This method carves out an area of \a size bytes from the buffer into a new
+ * This function carves out an area of \a size bytes from the buffer into a new
* ByteStreamBuffer, and returns the new buffer. It operates identically to a
* read or write access from the point of view of the current buffer, but allows
* the new buffer to be read or written at a later time after other read or
@@ -194,7 +195,7 @@ ByteStreamBuffer ByteStreamBuffer::carveOut(size_t size)
* \brief Skip \a size bytes from the buffer
* \param[in] size The number of bytes to skip
*
- * This method skips the next \a size bytes from the buffer.
+ * This function skips the next \a size bytes from the buffer.
*
* \return 0 on success, a negative error code otherwise
* \retval -ENOSPC no more space is available in the managed memory buffer
diff --git a/src/libcamera/camera.cpp b/src/libcamera/camera.cpp
index 8c3bb2c2..69a7ee53 100644
--- a/src/libcamera/camera.cpp
+++ b/src/libcamera/camera.cpp
@@ -2,42 +2,128 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * camera.cpp - Camera device
+ * Camera device
*/
#include <libcamera/camera.h>
+#include <array>
#include <atomic>
-#include <iomanip>
+#include <ios>
+#include <memory>
+#include <optional>
+#include <set>
+#include <sstream>
+#include <libcamera/base/log.h>
+#include <libcamera/base/thread.h>
+
+#include <libcamera/color_space.h>
#include <libcamera/framebuffer_allocator.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
-#include "log.h"
-#include "pipeline_handler.h"
-#include "utils.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_controls.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/request.h"
/**
- * \file camera.h
+ * \file libcamera/camera.h
* \brief Camera device handling
*
- * At the core of libcamera is the camera device, combining one image source
- * with processing hardware able to provide one or multiple image streams. The
- * Camera class represents a camera device.
- *
- * A camera device contains a single image source, and separate camera device
- * instances relate to different image sources. For instance, a phone containing
- * front and back image sensors will be modelled with two camera devices, one
- * for each sensor. When multiple streams can be produced from the same image
- * source, all those streams are guaranteed to be part of the same camera
- * device.
- *
- * While not sharing image sources, separate camera devices can share other
- * system resources, such as an ISP. For this reason camera device instances may
- * not be fully independent, in which case usage restrictions may apply. For
- * instance, a phone with a front and a back camera device may not allow usage
- * of the two devices simultaneously.
+ * \page camera-model Camera Model
+ *
+ * libcamera acts as a middleware between applications and camera hardware. It
+ * provides a solution to an unsolvable problem: reconciling applications,
+ * which need to run on different systems without dealing with device-specific
+ * details, and camera hardware, which exhibits a wide variety of features,
+ * limitations and architecture variations. In order to do so, it creates an
+ * abstract camera model that hides the camera hardware from applications. The
+ * model is designed to strike the right balance between genericity, to please
+ * generic applications, and flexibility, to expose even the most specific
+ * hardware features to the most demanding applications.
+ *
+ * In libcamera, a Camera is defined as a device that can capture frames
+ * continuously from a camera sensor and store them in memory. If supported by
+ * the device and desired by the application, the camera may store each
+ * captured frame in multiple copies, possibly in different formats and sizes.
+ * Each of these memory outputs of the camera is called a Stream.
+ *
+ * A camera contains a single image source, and separate camera instances
+ * relate to different image sources. For instance, a phone containing front
+ * and back image sensors will be modelled with two cameras, one for each
+ * sensor. When multiple streams can be produced from the same image source,
+ * all those streams are guaranteed to be part of the same camera.
+ *
+ * While not sharing image sources, separate cameras can share other system
+ * resources, such as ISPs. For this reason camera instances may not be fully
+ * independent, in which case usage restrictions may apply. For instance, a
+ * phone with a front and a back camera may not allow usage of the two cameras
+ * simultaneously.
+ *
+ * The camera model defines an implicit pipeline, whose input is the camera
+ * sensor, and whose outputs are the streams. Along the pipeline, the frames
+ * produced by the camera sensor are transformed by the camera into a format
+ * suitable for applications, with image processing that improves the quality
+ * of the captured frames. The camera exposes a set of controls that
+ * applications may use to manually control the processing steps. This
+ * high-level camera model is the minimum baseline that all cameras must
+ * conform to.
+ *
+ * \section camera-pipeline-model Pipeline Model
+ *
+ * Camera hardware differs in the supported image processing operations and the
+ * order in which they are applied. The libcamera pipelines abstract the
+ * hardware differences and expose a logical view of the processing operations
+ * with a fixed order. This offers low-level control of those operations to
+ * applications, while keeping application code generic.
+ *
+ * Starting from the camera sensor, a pipeline applies the following
+ * operations, in that order.
+ *
+ * - Pixel exposure
+ * - Analog to digital conversion and readout
+ * - Black level subtraction
+ * - Defective pixel correction
+ * - Lens shading correction
+ * - Spatial noise filtering
+ * - Per-channel gains (white balance)
+ * - Demosaicing (color filter array interpolation)
+ * - Color correction matrix (typically RGB to RGB)
+ * - Gamma correction
+ * - Color space transformation (typically RGB to YUV)
+ * - Cropping
+ * - Scaling
+ *
+ * Not all cameras implement all operations, and they are not necessarily
+ * implemented in the above order at the hardware level. The libcamera pipeline
+ * handlers translate the pipeline model to the real hardware configuration.
+ *
+ * \subsection camera-sensor-model Camera Sensor Model
+ *
+ * By default, libcamera configures the camera sensor automatically based on the
+ * configuration of the streams. Applications may instead specify a manual
+ * configuration for the camera sensor. This allows precise control of the frame
+ * geometry and frame rate delivered by the sensor.
+ *
+ * More details about the camera sensor model implemented by libcamera are
+ * available in the libcamera camera-sensor-model documentation page.
+ *
+ * \subsection digital-zoom Digital Zoom
+ *
+ * Digital zoom is implemented as a combination of the cropping and scaling
+ * stages of the pipeline. Cropping is controlled explicitly through the
+ * controls::ScalerCrop control, while scaling is controlled implicitly based
+ * on the crop rectangle and the output stream size. The crop rectangle is
+ * expressed relatively to the full pixel array size and indicates how the field
+ * of view is affected by the pipeline.
+ */
+
+/**
+ * \internal
+ * \file libcamera/internal/camera.h
+ * \brief Internal camera device handling
*/
namespace libcamera {
@@ -45,15 +131,136 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(Camera)
/**
+ * \class SensorConfiguration
+ * \brief Camera sensor configuration
+ *
+ * The SensorConfiguration class collects parameters to control the operations
+ * of the camera sensor, according to the abstract camera sensor model
+ * implemented by libcamera.
+ *
+ * \todo Applications shall fully populate all fields of the
+ * CameraConfiguration::sensorConfig class members before validating the
+ * CameraConfiguration. If the SensorConfiguration is not fully populated, or if
+ * any of its parameters cannot be applied to the sensor in use, the
+ * CameraConfiguration validation process will fail and return
+ * CameraConfiguration::Status::Invalid.
+ *
+ * Applications that populate the SensorConfiguration class members are
+ * expected to be highly-specialized applications that know what sensor
+ * they are operating with and what parameters are valid for the sensor in use.
+ *
+ * A detailed description of the abstract camera sensor model implemented by
+ * libcamera and the description of its configuration parameters is available
+ * in the libcamera documentation camera-sensor-model file.
+ */
+
+/**
+ * \var SensorConfiguration::bitDepth
+ * \brief The sensor image format bit depth
+ *
+ * The number of bits (resolution) used to represent a pixel sample.
+ */
+
+/**
+ * \var SensorConfiguration::analogCrop
+ * \brief The analog crop rectangle
+ *
+ * The selected portion of the active pixel array used to produce the image
+ * frame.
+ */
+
+/**
+ * \var SensorConfiguration::binning
+ * \brief Sensor binning configuration
+ *
+ * Refer to the camera-sensor-model documentation for an accurate description
+ * of the binning operations. Disabled by default.
+ */
+
+/**
+ * \var SensorConfiguration::binX
+ * \brief Horizontal binning factor
+ *
+ * The horizontal binning factor. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::binY
+ * \brief Vertical binning factor
+ *
+ * The vertical binning factor. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::skipping
+ * \brief The sensor skipping configuration
+ *
+ * Refer to the camera-sensor-model documentation for an accurate description
+ * of the skipping operations.
+ *
+ * If no skipping is performed, all the structure fields should be
+ * set to 1. Disabled by default.
+ */
+
+/**
+ * \var SensorConfiguration::xOddInc
+ * \brief Horizontal increment for odd rows. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::xEvenInc
+ * \brief Horizontal increment for even rows. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::yOddInc
+ * \brief Vertical increment for odd columns. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::yEvenInc
+ * \brief Vertical increment for even columns. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::outputSize
+ * \brief The frame output (visible) size
+ *
+ * The size of the data frame as received by the host processor.
+ */
+
+/**
+ * \brief Check if the sensor configuration is valid
+ *
+ * A sensor configuration is valid if it's fully populated.
+ *
+ * \todo For now allow applications to populate the bitDepth and the outputSize
+ * only as skipping and binnings factors are initialized to 1 and the analog
+ * crop is ignored.
+ *
+ * \return True if the sensor configuration is valid, false otherwise
+ */
+bool SensorConfiguration::isValid() const
+{
+ if (bitDepth && binning.binX && binning.binY &&
+ skipping.xOddInc && skipping.yOddInc &&
+ skipping.xEvenInc && skipping.yEvenInc &&
+ !outputSize.isNull())
+ return true;
+
+ return false;
+}
+
+/**
* \class CameraConfiguration
* \brief Hold configuration for streams of the camera
* The CameraConfiguration holds an ordered list of stream configurations. It
* supports iterators and operates as a vector of StreamConfiguration instances.
* The stream configurations are inserted by addConfiguration(), and the
- * operator[](int) returns a reference to the StreamConfiguration based on its
- * insertion index. Accessing a stream configuration with an invalid index
- * results in undefined behaviour.
+ * at() function or operator[] return a reference to the StreamConfiguration
+ * based on its insertion index. Accessing a stream configuration with an
+ * invalid index results in undefined behaviour.
*
* CameraConfiguration instances are retrieved from the camera with
* Camera::generateConfiguration(). Applications may then inspect the
@@ -93,7 +300,7 @@ LOG_DECLARE_CATEGORY(Camera)
* \brief Create an empty camera configuration
*/
CameraConfiguration::CameraConfiguration()
- : config_({})
+ : orientation(Orientation::Rotate0), config_({})
{
}
@@ -114,22 +321,22 @@ void CameraConfiguration::addConfiguration(const StreamConfiguration &cfg)
* \fn CameraConfiguration::validate()
* \brief Validate and possibly adjust the camera configuration
*
- * This method adjusts the camera configuration to the closest valid
+ * This function adjusts the camera configuration to the closest valid
* configuration and returns the validation status.
*
- * \todo: Define exactly when to return each status code. Should stream
+ * \todo Define exactly when to return each status code. Should stream
* parameters set to 0 by the caller be adjusted without returning Adjusted ?
* This would potentially be useful for applications but would get in the way
* in Camera::configure(). Do we need an extra status code to signal this ?
*
- * \todo: Handle validation of buffers count when refactoring the buffers API.
+ * \todo Handle validation of buffers count when refactoring the buffers API.
*
* \return A CameraConfiguration::Status value that describes the validation
* status.
* \retval CameraConfiguration::Invalid The configuration is invalid and can't
* be adjusted. This may only occur in extreme cases such as when the
* configuration is empty.
- * \retval CameraConfigutation::Adjusted The configuration has been adjusted
+ * \retval CameraConfiguration::Adjusted The configuration has been adjusted
* and is now valid. Parameters may have changed for any stream, and stream
* configurations may have been removed. The caller shall check the
* configuration carefully.
@@ -143,7 +350,7 @@ void CameraConfiguration::addConfiguration(const StreamConfiguration &cfg)
*
* The \a index represents the zero based insertion order of stream
* configuration into the camera configuration with addConfiguration(). Calling
- * this method with an invalid index results in undefined behaviour.
+ * this function with an invalid index results in undefined behaviour.
*
* \return The stream configuration
*/
@@ -158,7 +365,7 @@ StreamConfiguration &CameraConfiguration::at(unsigned int index)
*
* The \a index represents the zero based insertion order of stream
* configuration into the camera configuration with addConfiguration(). Calling
- * this method with an invalid index results in undefined behaviour.
+ * this function with an invalid index results in undefined behaviour.
*
* \return The stream configuration
*/
@@ -174,7 +381,7 @@ const StreamConfiguration &CameraConfiguration::at(unsigned int index) const
*
* The \a index represents the zero based insertion order of stream
* configuration into the camera configuration with addConfiguration(). Calling
- * this method with an invalid index results in undefined behaviour.
+ * this function with an invalid index results in undefined behaviour.
*
* \return The stream configuration
*/
@@ -186,7 +393,7 @@ const StreamConfiguration &CameraConfiguration::at(unsigned int index) const
*
* The \a index represents the zero based insertion order of stream
* configuration into the camera configuration with addConfiguration(). Calling
- * this method with an invalid index results in undefined behaviour.
+ * this function with an invalid index results in undefined behaviour.
*
* \return The stream configuration
*/
@@ -251,44 +458,135 @@ std::size_t CameraConfiguration::size() const
}
/**
- * \var CameraConfiguration::config_
- * \brief The vector of stream configurations
+ * \enum CameraConfiguration::ColorSpaceFlag
+ * \brief Specify the behaviour of validateColorSpaces
+ * \var CameraConfiguration::ColorSpaceFlag::None
+ * \brief No extra validation of color spaces is required
+ * \var CameraConfiguration::ColorSpaceFlag::StreamsShareColorSpace
+ * \brief Non-raw output streams must share the same color space
+ */
+
+/**
+ * \typedef CameraConfiguration::ColorSpaceFlags
+ * \brief A bitwise combination of ColorSpaceFlag values
*/
-class Camera::Private
+/**
+ * \brief Check the color spaces requested for each stream
+ * \param[in] flags Flags to control the behaviour of this function
+ *
+ * This function performs certain consistency checks on the color spaces of
+ * the streams and may adjust them so that:
+ *
+ * - Any raw streams have the Raw color space
+ * - If the StreamsShareColorSpace flag is set, all output streams are forced
+ * to share the same color space (this may be a constraint on some platforms).
+ *
+ * It is optional for a pipeline handler to use this function.
+ *
+ * \return A CameraConfiguration::Status value that describes the validation
+ * status.
+ * \retval CameraConfigutation::Adjusted The configuration has been adjusted
+ * and is now valid. The color space of some or all of the streams may have
+ * been changed. The caller shall check the color spaces carefully.
+ * \retval CameraConfiguration::Valid The configuration was already valid and
+ * hasn't been adjusted.
+ */
+CameraConfiguration::Status CameraConfiguration::validateColorSpaces(ColorSpaceFlags flags)
{
-public:
- enum State {
- CameraAvailable,
- CameraAcquired,
- CameraConfigured,
- CameraRunning,
- };
+ Status status = Valid;
+
+ /*
+ * Set all raw streams to the Raw color space, and make a note of the
+ * largest non-raw stream with a defined color space (if there is one).
+ */
+ std::optional<ColorSpace> colorSpace;
+ Size size;
- Private(PipelineHandler *pipe, const std::string &name,
- const std::set<Stream *> &streams);
- ~Private();
+ for (StreamConfiguration &cfg : config_) {
+ if (!cfg.colorSpace)
+ continue;
- int isAccessAllowed(State state, bool allowDisconnected = false) const;
- int isAccessAllowed(State low, State high,
- bool allowDisconnected = false) const;
+ if (cfg.colorSpace->adjust(cfg.pixelFormat))
+ status = Adjusted;
- void disconnect();
- void setState(State state);
+ if (cfg.colorSpace != ColorSpace::Raw && cfg.size > size) {
+ colorSpace = cfg.colorSpace;
+ size = cfg.size;
+ }
+ }
- std::shared_ptr<PipelineHandler> pipe_;
- std::string name_;
- std::set<Stream *> streams_;
- std::set<Stream *> activeStreams_;
+ if (!colorSpace || !(flags & ColorSpaceFlag::StreamsShareColorSpace))
+ return status;
-private:
- bool disconnected_;
- std::atomic<State> state_;
-};
+ /* Make all output color spaces the same, if requested. */
+ for (auto &cfg : config_) {
+ if (cfg.colorSpace != ColorSpace::Raw &&
+ cfg.colorSpace != colorSpace) {
+ cfg.colorSpace = colorSpace;
+ status = Adjusted;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * \var CameraConfiguration::sensorConfig
+ * \brief The camera sensor configuration
+ *
+ * The sensorConfig member allows manual control of the configuration of the
+ * camera sensor. By default, if sensorConfig is not set, the camera will
+ * configure the sensor automatically based on the configuration of the streams.
+ * Applications can override this by manually specifying the full sensor
+ * configuration.
+ *
+ * Refer to the camera-sensor-model documentation and to the SensorConfiguration
+ * class documentation for details about the sensor configuration process.
+ *
+ * The camera sensor configuration applies to all streams produced by a camera
+ * from the same image source.
+ */
+
+/**
+ * \var CameraConfiguration::orientation
+ * \brief The desired orientation of the images produced by the camera
+ *
+ * The orientation field is a user-specified 2D plane transformation that
+ * specifies how the application wants the camera images to be rotated in
+ * the memory buffers.
+ *
+ * If the orientation requested by the application cannot be obtained, the
+ * camera will not rotate or flip the images, and the validate() function will
+ * Adjust this value to the native image orientation produced by the camera.
+ *
+ * By default the orientation field is set to Orientation::Rotate0.
+ */
+
+/**
+ * \var CameraConfiguration::config_
+ * \brief The vector of stream configurations
+ */
+
+#ifndef __DOXYGEN_PUBLIC__
+/**
+ * \class Camera::Private
+ * \brief Base class for camera private data
+ *
+ * The Camera::Private class stores all private data associated with a camera.
+ * In addition to hiding core Camera data from the public API, it is expected to
+ * be subclassed by pipeline handlers to store pipeline-specific data.
+ *
+ * Pipeline handlers can obtain the Camera::Private instance associated with a
+ * camera by calling Camera::_d().
+ */
-Camera::Private::Private(PipelineHandler *pipe, const std::string &name,
- const std::set<Stream *> &streams)
- : pipe_(pipe->shared_from_this()), name_(name), streams_(streams),
+/**
+ * \brief Construct a Camera::Private instance
+ * \param[in] pipe The pipeline handler responsible for the camera device
+ */
+Camera::Private::Private(PipelineHandler *pipe)
+ : requestSequence_(0), pipe_(pipe->shared_from_this()),
disconnected_(false), state_(CameraAvailable)
{
}
@@ -299,14 +597,86 @@ Camera::Private::~Private()
LOG(Camera, Error) << "Removing camera while still in use";
}
+/**
+ * \fn Camera::Private::pipe()
+ * \brief Retrieve the pipeline handler related to this camera
+ * \return The pipeline handler that created this camera
+ */
+
+/**
+ * \fn Camera::Private::pipe() const
+ * \copydoc Camera::Private::pipe()
+ */
+
+/**
+ * \fn Camera::Private::validator()
+ * \brief Retrieve the control validator related to this camera
+ * \return The control validator associated with this camera
+ */
+
+/**
+ * \var Camera::Private::queuedRequests_
+ * \brief The list of queued and not yet completed requests
+ *
+ * This list tracks requests queued in order to ensure completion of all
+ * requests when the pipeline handler is stopped.
+ *
+ * \sa PipelineHandler::queueRequest(), PipelineHandler::stop(),
+ * PipelineHandler::completeRequest()
+ */
+
+/**
+ * \var Camera::Private::controlInfo_
+ * \brief The set of controls supported by the camera
+ *
+ * The control information shall be initialised by the pipeline handler when
+ * creating the camera.
+ *
+ * \todo This member was initially meant to stay constant after the camera is
+ * created. Several pipeline handlers are already updating it when the camera
+ * is configured. Update the documentation accordingly, and possibly the API as
+ * well, when implementing official support for control info updates.
+ */
+
+/**
+ * \var Camera::Private::properties_
+ * \brief The list of properties supported by the camera
+ *
+ * The list of camera properties shall be initialised by the pipeline handler
+ * when creating the camera, and shall not be modified afterwards.
+ */
+
+/**
+ * \var Camera::Private::requestSequence_
+ * \brief The queuing sequence number of the request
+ *
+ * When requests are queued, they are given a per-camera sequence number to
+ * facilitate debugging of internal request usage.
+ *
+ * The requestSequence_ tracks the number of requests queued to a camera
+ * over a single capture session.
+ */
+
static const char *const camera_state_names[] = {
"Available",
"Acquired",
"Configured",
+ "Stopping",
"Running",
};
-int Camera::Private::isAccessAllowed(State state, bool allowDisconnected) const
+bool Camera::Private::isAcquired() const
+{
+ return state_.load(std::memory_order_acquire) != CameraAvailable;
+}
+
+bool Camera::Private::isRunning() const
+{
+ return state_.load(std::memory_order_acquire) == CameraRunning;
+}
+
+int Camera::Private::isAccessAllowed(State state, bool allowDisconnected,
+ const char *from) const
{
if (!allowDisconnected && disconnected_)
return -ENODEV;
@@ -315,17 +685,18 @@ int Camera::Private::isAccessAllowed(State state, bool allowDisconnected) const
if (currentState == state)
return 0;
- ASSERT(static_cast<unsigned int>(state) < ARRAY_SIZE(camera_state_names));
+ ASSERT(static_cast<unsigned int>(state) < std::size(camera_state_names));
- LOG(Camera, Debug) << "Camera in " << camera_state_names[currentState]
- << " state trying operation requiring state "
+ LOG(Camera, Error) << "Camera in " << camera_state_names[currentState]
+ << " state trying " << from << "() requiring state "
<< camera_state_names[state];
return -EACCES;
}
int Camera::Private::isAccessAllowed(State low, State high,
- bool allowDisconnected) const
+ bool allowDisconnected,
+ const char *from) const
{
if (!allowDisconnected && disconnected_)
return -ENODEV;
@@ -334,11 +705,12 @@ int Camera::Private::isAccessAllowed(State low, State high,
if (currentState >= low && currentState <= high)
return 0;
- ASSERT(static_cast<unsigned int>(low) < ARRAY_SIZE(camera_state_names) &&
- static_cast<unsigned int>(high) < ARRAY_SIZE(camera_state_names));
+ ASSERT(static_cast<unsigned int>(low) < std::size(camera_state_names) &&
+ static_cast<unsigned int>(high) < std::size(camera_state_names));
- LOG(Camera, Debug) << "Camera in " << camera_state_names[currentState]
- << " state trying operation requiring state between "
+ LOG(Camera, Error) << "Camera in " << camera_state_names[currentState]
+ << " state trying " << from
+ << "() requiring state between "
<< camera_state_names[low] << " and "
<< camera_state_names[high];
@@ -362,6 +734,7 @@ void Camera::Private::setState(State state)
{
state_.store(state, std::memory_order_release);
}
+#endif /* __DOXYGEN_PUBLIC__ */
/**
* \class Camera
@@ -409,6 +782,7 @@ void Camera::Private::setState(State state)
* node [shape = doublecircle ]; Available;
* node [shape = circle ]; Acquired;
* node [shape = circle ]; Configured;
+ * node [shape = circle ]; Stopping;
* node [shape = circle ]; Running;
*
* Available -> Available [label = "release()"];
@@ -421,7 +795,8 @@ void Camera::Private::setState(State state)
* Configured -> Configured [label = "configure(), createRequest()"];
* Configured -> Running [label = "start()"];
*
- * Running -> Configured [label = "stop()"];
+ * Running -> Stopping [label = "stop()"];
+ * Stopping -> Configured;
* Running -> Running [label = "createRequest(), queueRequest()"];
* }
* \enddot
@@ -441,6 +816,12 @@ void Camera::Private::setState(State state)
* release() the camera and to get back to the Available state or start()
* it to progress to the Running state.
*
+ * \subsubsection Stopping
+ * The camera has been asked to stop. Pending requests are being completed or
+ * cancelled, and no new requests are permitted to be queued. The camera will
+ * transition to the Configured state when all queued requests have been
+ * returned to the application.
+ *
* \subsubsection Running
* The camera is running and ready to process requests queued by the
* application. The camera remains in this state until it is stopped and moved
@@ -448,39 +829,66 @@ void Camera::Private::setState(State state)
*/
/**
+ * \internal
* \brief Create a camera instance
- * \param[in] name The name of the camera device
- * \param[in] pipe The pipeline handler responsible for the camera device
+ * \param[in] d Camera private data
+ * \param[in] id The ID of the camera device
* \param[in] streams Array of streams the camera provides
*
- * The caller is responsible for guaranteeing unicity of the camera name.
+ * The caller is responsible for guaranteeing a stable and unique camera ID
+ * matching the constraints described by Camera::id(). Parameters that are
+ * allocated dynamically at system startup, such as bus numbers that may be
+ * enumerated differently, are therefore not suitable to use in the ID.
+ *
+ * Pipeline handlers that use a CameraSensor may use the CameraSensor::id() to
+ * generate an ID that satisfies the criteria of a stable and unique camera ID.
*
* \return A shared pointer to the newly created camera object
*/
-std::shared_ptr<Camera> Camera::create(PipelineHandler *pipe,
- const std::string &name,
+std::shared_ptr<Camera> Camera::create(std::unique_ptr<Private> d,
+ const std::string &id,
const std::set<Stream *> &streams)
{
+ ASSERT(d);
+
struct Deleter : std::default_delete<Camera> {
void operator()(Camera *camera)
{
- delete camera;
+ if (Thread::current() == camera->thread())
+ delete camera;
+ else
+ camera->deleteLater();
}
};
- Camera *camera = new Camera(pipe, name, streams);
+ Camera *camera = new Camera(std::move(d), id, streams);
return std::shared_ptr<Camera>(camera, Deleter());
}
/**
- * \brief Retrieve the name of the camera
+ * \brief Retrieve the ID of the camera
+ *
+ * The camera ID is a free-form string that identifies a camera in the system.
+ * IDs are guaranteed to be unique and stable: the same camera, when connected
+ * to the system in the same way (e.g. in the same USB port), will have the same
+ * ID across both unplug/replug and system reboots.
+ *
+ * Applications may store the camera ID and use it later to acquire the same
+ * camera. They shall treat the ID as an opaque identifier, without interpreting
+ * its value.
+ *
+ * Camera IDs may change when the system hardware or firmware is modified, for
+ * instance when replacing a PCI USB controller or moving it to another PCI
+ * slot, or updating the ACPI tables or Device Tree.
+ *
* \context This function is \threadsafe.
- * \return Name of the camera device
+ *
+ * \return ID of the camera device
*/
-const std::string &Camera::name() const
+const std::string &Camera::id() const
{
- return p_->name_;
+ return _d()->id_;
}
/**
@@ -506,10 +914,13 @@ const std::string &Camera::name() const
* application API calls by returning errors immediately.
*/
-Camera::Camera(PipelineHandler *pipe, const std::string &name,
+Camera::Camera(std::unique_ptr<Private> d, const std::string &id,
const std::set<Stream *> &streams)
- : p_(new Private(pipe, name, streams))
+ : Extensible(std::move(d))
{
+ _d()->id_ = id;
+ _d()->streams_ = streams;
+ _d()->validator_ = std::make_unique<CameraControlValidator>(this);
}
Camera::~Camera()
@@ -519,7 +930,7 @@ Camera::~Camera()
/**
* \brief Notify camera disconnection
*
- * This method is used to notify the camera instance that the underlying
+ * This function is used to notify the camera instance that the underlying
* hardware has been unplugged. In response to the disconnection the camera
* instance notifies the application by emitting the #disconnected signal, and
* ensures that all new calls to the application-facing Camera API return an
@@ -530,28 +941,30 @@ Camera::~Camera()
*/
void Camera::disconnect()
{
- LOG(Camera, Debug) << "Disconnecting camera " << name();
+ LOG(Camera, Debug) << "Disconnecting camera " << id();
- p_->disconnect();
- disconnected.emit(this);
+ _d()->disconnect();
+ disconnected.emit();
}
int Camera::exportFrameBuffers(Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
- int ret = p_->isAccessAllowed(Private::CameraConfigured);
+ Private *const d = _d();
+
+ int ret = d->isAccessAllowed(Private::CameraConfigured);
if (ret < 0)
return ret;
if (streams().find(stream) == streams().end())
return -EINVAL;
- if (p_->activeStreams_.find(stream) == p_->activeStreams_.end())
+ if (d->activeStreams_.find(stream) == d->activeStreams_.end())
return -EINVAL;
- return p_->pipe_->invokeMethod(&PipelineHandler::exportFrameBuffers,
- ConnectionTypeBlocking, this, stream,
- buffers);
+ return d->pipe_->invokeMethod(&PipelineHandler::exportFrameBuffers,
+ ConnectionTypeBlocking, this, stream,
+ buffers);
}
/**
@@ -562,7 +975,7 @@ int Camera::exportFrameBuffers(Stream *stream,
* not blocking, if the device has already been acquired (by the same or another
* process) the -EBUSY error code is returned.
*
- * Acquiring a camera will limit usage of any other camera(s) provided by the
+ * Acquiring a camera may limit usage of any other camera(s) provided by the
* same pipeline handler to the same instance of libcamera. The limit is in
* effect until all cameras from the pipeline handler are released. Other
* instances of libcamera can still list and examine the cameras but will fail
@@ -580,21 +993,24 @@ int Camera::exportFrameBuffers(Stream *stream,
*/
int Camera::acquire()
{
+ Private *const d = _d();
+
/*
* No manual locking is required as PipelineHandler::lock() is
* thread-safe.
*/
- int ret = p_->isAccessAllowed(Private::CameraAvailable);
+ int ret = d->isAccessAllowed(Private::CameraAvailable);
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
- if (!p_->pipe_->lock()) {
+ if (!d->pipe_->invokeMethod(&PipelineHandler::acquire,
+ ConnectionTypeBlocking, this)) {
LOG(Camera, Info)
<< "Pipeline handler in use by another process";
return -EBUSY;
}
- p_->setState(Private::CameraAcquired);
+ d->setState(Private::CameraAcquired);
return 0;
}
@@ -615,14 +1031,18 @@ int Camera::acquire()
*/
int Camera::release()
{
- int ret = p_->isAccessAllowed(Private::CameraAvailable,
- Private::CameraConfigured, true);
+ Private *const d = _d();
+
+ int ret = d->isAccessAllowed(Private::CameraAvailable,
+ Private::CameraConfigured, true);
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
- p_->pipe_->unlock();
+ if (d->isAcquired())
+ d->pipe_->invokeMethod(&PipelineHandler::release,
+ ConnectionTypeBlocking, this);
- p_->setState(Private::CameraAvailable);
+ d->setState(Private::CameraAvailable);
return 0;
}
@@ -637,9 +1057,9 @@ int Camera::release()
*
* \return A ControlInfoMap listing the controls supported by the camera
*/
-const ControlInfoMap &Camera::controls()
+const ControlInfoMap &Camera::controls() const
{
- return p_->pipe_->controls(this);
+ return _d()->controlInfo_;
}
/**
@@ -650,9 +1070,9 @@ const ControlInfoMap &Camera::controls()
*
* \return A ControlList of properties supported by the camera
*/
-const ControlList &Camera::properties()
+const ControlList &Camera::properties() const
{
- return p_->pipe_->properties(this);
+ return _d()->properties_;
}
/**
@@ -664,11 +1084,11 @@ const ControlList &Camera::properties()
*
* \context This function is \threadsafe.
*
- * \return An array of all the camera's streams.
+ * \return An array of all the camera's streams
*/
const std::set<Stream *> &Camera::streams() const
{
- return p_->streams_;
+ return _d()->streams_;
}
/**
@@ -684,20 +1104,22 @@ const std::set<Stream *> &Camera::streams() const
* \context This function is \threadsafe.
*
* \return A CameraConfiguration if the requested roles can be satisfied, or a
- * null pointer otherwise. The ownership of the returned configuration is
- * passed to the caller.
+ * null pointer otherwise.
*/
-std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(Span<const StreamRole> roles)
{
- int ret = p_->isAccessAllowed(Private::CameraAvailable,
- Private::CameraRunning);
+ Private *const d = _d();
+
+ int ret = d->isAccessAllowed(Private::CameraAvailable,
+ Private::CameraRunning);
if (ret < 0)
return nullptr;
if (roles.size() > streams().size())
return nullptr;
- CameraConfiguration *config = p_->pipe_->generateConfiguration(this, roles);
+ std::unique_ptr<CameraConfiguration> config =
+ d->pipe_->generateConfiguration(this, roles);
if (!config) {
LOG(Camera, Debug)
<< "Pipeline handler failed to generate configuration";
@@ -714,10 +1136,16 @@ std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamR
LOG(Camera, Debug) << msg.str();
- return std::unique_ptr<CameraConfiguration>(config);
+ return config;
}
/**
+ * \fn std::unique_ptr<CameraConfiguration> \
+ * Camera::generateConfiguration(std::initializer_list<StreamRole> roles)
+ * \overload
+ */
+
+/**
* \brief Configure the camera prior to capture
* \param[in] config The camera configurations to setup
*
@@ -727,7 +1155,7 @@ std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamR
* by populating \a config.
*
* The configuration is created by generateConfiguration(), and adjusted by the
- * caller with CameraConfiguration::validate(). This method only accepts fully
+ * caller with CameraConfiguration::validate(). This function only accepts fully
* valid configurations and returns an error if \a config is not valid.
*
* Exclusive access to the camera shall be ensured by a call to acquire() prior
@@ -748,11 +1176,16 @@ std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamR
*/
int Camera::configure(CameraConfiguration *config)
{
- int ret = p_->isAccessAllowed(Private::CameraAcquired,
- Private::CameraConfigured);
+ Private *const d = _d();
+
+ int ret = d->isAccessAllowed(Private::CameraAcquired,
+ Private::CameraConfigured);
if (ret < 0)
return ret;
+ for (auto &cfg : *config)
+ cfg.setStream(nullptr);
+
if (config->validate() != CameraConfiguration::Valid) {
LOG(Camera, Error)
<< "Can't configure camera with invalid configuration";
@@ -763,29 +1196,31 @@ int Camera::configure(CameraConfiguration *config)
for (unsigned int index = 0; index < config->size(); ++index) {
StreamConfiguration &cfg = config->at(index);
- cfg.setStream(nullptr);
msg << " (" << index << ") " << cfg.toString();
}
LOG(Camera, Info) << msg.str();
- ret = p_->pipe_->invokeMethod(&PipelineHandler::configure,
- ConnectionTypeBlocking, this, config);
+ ret = d->pipe_->invokeMethod(&PipelineHandler::configure,
+ ConnectionTypeBlocking, this, config);
if (ret)
return ret;
- p_->activeStreams_.clear();
+ d->activeStreams_.clear();
for (const StreamConfiguration &cfg : *config) {
Stream *stream = cfg.stream();
- if (!stream)
+ if (!stream) {
LOG(Camera, Fatal)
<< "Pipeline handler failed to update stream configuration";
+ d->activeStreams_.clear();
+ return -EINVAL;
+ }
stream->configuration_ = cfg;
- p_->activeStreams_.insert(stream);
+ d->activeStreams_.insert(stream);
}
- p_->setState(Private::CameraConfigured);
+ d->setState(Private::CameraConfigured);
return 0;
}
@@ -794,37 +1229,45 @@ int Camera::configure(CameraConfiguration *config)
* \brief Create a request object for the camera
* \param[in] cookie Opaque cookie for application use
*
- * This method creates an empty request for the application to fill with
+ * This function creates an empty request for the application to fill with
* buffers and parameters, and queue for capture.
*
* The \a cookie is stored in the request and is accessible through the
- * Request::cookie() method at any time. It is typically used by applications
+ * Request::cookie() function at any time. It is typically used by applications
* to map the request to an external resource in the request completion
* handler, and is completely opaque to libcamera.
*
* The ownership of the returned request is passed to the caller, which is
- * responsible for either queueing the request or deleting it.
+ * responsible for deleting it. The request may be deleted in the completion
+ * handler, or reused after resetting its state with Request::reuse().
*
* \context This function is \threadsafe. It may only be called when the camera
* is in the Configured or Running state as defined in \ref camera_operation.
*
* \return A pointer to the newly created request, or nullptr on error
*/
-Request *Camera::createRequest(uint64_t cookie)
+std::unique_ptr<Request> Camera::createRequest(uint64_t cookie)
{
- int ret = p_->isAccessAllowed(Private::CameraConfigured,
- Private::CameraRunning);
+ Private *const d = _d();
+
+ int ret = d->isAccessAllowed(Private::CameraConfigured,
+ Private::CameraRunning);
if (ret < 0)
return nullptr;
- return new Request(this, cookie);
+ std::unique_ptr<Request> request = std::make_unique<Request>(this, cookie);
+
+ /* Associate the request with the pipeline handler. */
+ d->pipe_->registerRequest(request.get());
+
+ return request;
}
/**
* \brief Queue a request to the camera
* \param[in] request The request to queue to the camera
*
- * This method queues a \a request to the camera for capture.
+ * This function queues a \a request to the camera for capture.
*
* After allocating the request with createRequest(), the application shall
* fill it with at least one capture buffer before queuing it. Requests that
@@ -833,26 +1276,37 @@ Request *Camera::createRequest(uint64_t cookie)
* Once the request has been queued, the camera will notify its completion
* through the \ref requestCompleted signal.
*
- * Ownership of the request is transferred to the camera. It will be deleted
- * automatically after it completes.
- *
* \context This function is \threadsafe. It may only be called when the camera
* is in the Running state as defined in \ref camera_operation.
*
* \return 0 on success or a negative error code otherwise
* \retval -ENODEV The camera has been disconnected from the system
* \retval -EACCES The camera is not running so requests can't be queued
+ * \retval -EXDEV The request does not belong to this camera
* \retval -EINVAL The request is invalid
* \retval -ENOMEM No buffer memory was available to handle the request
*/
int Camera::queueRequest(Request *request)
{
- int ret = p_->isAccessAllowed(Private::CameraRunning);
+ Private *const d = _d();
+
+ int ret = d->isAccessAllowed(Private::CameraRunning);
if (ret < 0)
return ret;
+ /* Requests can only be queued to the camera that created them. */
+ if (request->_d()->camera() != this) {
+ LOG(Camera, Error) << "Request was not created by this camera";
+ return -EXDEV;
+ }
+
+ if (request->status() != Request::RequestPending) {
+ LOG(Camera, Error) << request->toString() << " is not valid";
+ return -EINVAL;
+ }
+
/*
- * The camera state may chance until the end of the function. No locking
+ * The camera state may change until the end of the function. No locking
* is however needed as PipelineHandler::queueRequest() will handle
* this.
*/
@@ -863,24 +1317,28 @@ int Camera::queueRequest(Request *request)
}
for (auto const &it : request->buffers()) {
- Stream *stream = it.first;
+ const Stream *stream = it.first;
- if (p_->activeStreams_.find(stream) == p_->activeStreams_.end()) {
+ if (d->activeStreams_.find(stream) == d->activeStreams_.end()) {
LOG(Camera, Error) << "Invalid request";
return -EINVAL;
}
}
- return p_->pipe_->invokeMethod(&PipelineHandler::queueRequest,
- ConnectionTypeQueued, this, request);
+ d->pipe_->invokeMethod(&PipelineHandler::queueRequest,
+ ConnectionTypeQueued, request);
+
+ return 0;
}
/**
* \brief Start capture from camera
+ * \param[in] controls Controls to be applied before starting the Camera
*
- * Start the camera capture session. Once the camera is started the application
- * can queue requests to the camera to process and return to the application
- * until the capture session is terminated with \a stop().
+ * Start the camera capture session, optionally providing a list of controls to
+ * apply before starting. Once the camera is started the application can queue
+ * requests to the camera to process and return to the application until the
+ * capture session is terminated with \a stop().
*
* \context This function may only be called when the camera is in the
* Configured state as defined in \ref camera_operation, and shall be
@@ -891,20 +1349,24 @@ int Camera::queueRequest(Request *request)
* \retval -ENODEV The camera has been disconnected from the system
* \retval -EACCES The camera is not in a state where it can be started
*/
-int Camera::start()
+int Camera::start(const ControlList *controls)
{
- int ret = p_->isAccessAllowed(Private::CameraConfigured);
+ Private *const d = _d();
+
+ int ret = d->isAccessAllowed(Private::CameraConfigured);
if (ret < 0)
return ret;
LOG(Camera, Debug) << "Starting capture";
- ret = p_->pipe_->invokeMethod(&PipelineHandler::start,
- ConnectionTypeBlocking, this);
+ ASSERT(d->requestSequence_ == 0);
+
+ ret = d->pipe_->invokeMethod(&PipelineHandler::start,
+ ConnectionTypeBlocking, this, controls);
if (ret)
return ret;
- p_->setState(Private::CameraRunning);
+ d->setState(Private::CameraRunning);
return 0;
}
@@ -912,12 +1374,13 @@ int Camera::start()
/**
* \brief Stop capture from camera
*
- * This method stops capturing and processing requests immediately. All pending
- * requests are cancelled and complete synchronously in an error state.
+ * This function stops capturing and processing requests immediately. All
+ * pending requests are cancelled and complete synchronously in an error state.
*
- * \context This function may only be called when the camera is in the Running
- * state as defined in \ref camera_operation, and shall be synchronized by the
- * caller with other functions that affect the camera state.
+ * \context This function may be called in any camera state as defined in \ref
+ * camera_operation, and shall be synchronized by the caller with other
+ * functions that affect the camera state. If called when the camera isn't
+ * running, it is a no-op.
*
* \return 0 on success or a negative error code otherwise
* \retval -ENODEV The camera has been disconnected from the system
@@ -925,16 +1388,29 @@ int Camera::start()
*/
int Camera::stop()
{
- int ret = p_->isAccessAllowed(Private::CameraRunning);
+ Private *const d = _d();
+
+ /*
+ * \todo Make calling stop() when not in 'Running' part of the state
+ * machine rather than take this shortcut
+ */
+ if (!d->isRunning())
+ return 0;
+
+ int ret = d->isAccessAllowed(Private::CameraRunning);
if (ret < 0)
return ret;
LOG(Camera, Debug) << "Stopping capture";
- p_->setState(Private::CameraConfigured);
+ d->setState(Private::CameraStopping);
+
+ d->pipe_->invokeMethod(&PipelineHandler::stop, ConnectionTypeBlocking,
+ this);
- p_->pipe_->invokeMethod(&PipelineHandler::stop, ConnectionTypeBlocking,
- this);
+ ASSERT(!d->pipe_->hasPendingRequests(this));
+
+ d->setState(Private::CameraConfigured);
return 0;
}
@@ -944,13 +1420,16 @@ int Camera::stop()
* \param[in] request The request that has completed
*
* This function is called by the pipeline handler to notify the camera that
- * the request has completed. It emits the requestCompleted signal and deletes
- * the request.
+ * the request has completed. It emits the requestCompleted signal.
*/
void Camera::requestComplete(Request *request)
{
+ /* Disconnected cameras are still able to complete requests. */
+ if (_d()->isAccessAllowed(Private::CameraStopping, Private::CameraRunning,
+ true))
+ LOG(Camera, Fatal) << "Trying to complete a request when stopped";
+
requestCompleted.emit(request);
- delete request;
}
} /* namespace libcamera */
diff --git a/src/libcamera/camera_controls.cpp b/src/libcamera/camera_controls.cpp
index 59dcede2..b672c7cf 100644
--- a/src/libcamera/camera_controls.cpp
+++ b/src/libcamera/camera_controls.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_controls.cpp - Camera controls
+ * Camera controls
*/
-#include "camera_controls.h"
+#include "libcamera/internal/camera_controls.h"
#include <libcamera/camera.h>
#include <libcamera/controls.h>
@@ -36,7 +36,7 @@ CameraControlValidator::CameraControlValidator(Camera *camera)
const std::string &CameraControlValidator::name() const
{
- return camera_->name();
+ return camera_->id();
}
/**
diff --git a/src/libcamera/camera_lens.cpp b/src/libcamera/camera_lens.cpp
new file mode 100644
index 00000000..ccc2a6a6
--- /dev/null
+++ b/src/libcamera/camera_lens.cpp
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * A camera lens
+ */
+
+#include "libcamera/internal/camera_lens.h"
+
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/v4l2_subdevice.h"
+
+/**
+ * \file camera_lens.h
+ * \brief A camera lens controller
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(CameraLens)
+
+/**
+ * \class CameraLens
+ * \brief A camera lens based on V4L2 subdevices
+ *
+ * The CameraLens class eases handling of lens for pipeline handlers by
+ * hiding the details of the V4L2 subdevice kernel API and caching lens
+ * information.
+ */
+
+/**
+ * \brief Construct a CameraLens
+ * \param[in] entity The media entity backing the camera lens controller
+ *
+ * Once constructed the instance must be initialized with init().
+ */
+CameraLens::CameraLens(const MediaEntity *entity)
+ : entity_(entity)
+{
+}
+
+/**
+ * \brief Destroy a CameraLens
+ */
+CameraLens::~CameraLens() = default;
+
+/**
+ * \brief Initialize the camera lens instance
+ *
+ * This function performs the initialisation steps of the CameraLens that may
+ * fail. It shall be called once and only once after constructing the instance.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int CameraLens::init()
+{
+ if (entity_->function() != MEDIA_ENT_F_LENS) {
+ LOG(CameraLens, Error)
+ << "Invalid lens function "
+ << utils::hex(entity_->function());
+ return -EINVAL;
+ }
+
+ /* Create and open the subdev. */
+ subdev_ = std::make_unique<V4L2Subdevice>(entity_);
+ int ret = subdev_->open();
+ if (ret < 0)
+ return ret;
+
+ ret = validateLensDriver();
+ if (ret)
+ return ret;
+
+ model_ = subdev_->model();
+ return 0;
+}
+
+/**
+ * \brief This function sets the focal point of the lens to a specific position.
+ * \param[in] position The focal point of the lens
+ *
+ * This function sets the value of focal point of the lens as in \a position.
+ *
+ * \return 0 on success or -EINVAL otherwise
+ */
+int CameraLens::setFocusPosition(int32_t position)
+{
+ ControlList lensCtrls(subdev_->controls());
+ lensCtrls.set(V4L2_CID_FOCUS_ABSOLUTE, static_cast<int32_t>(position));
+
+ if (subdev_->setControls(&lensCtrls))
+ return -EINVAL;
+
+ return 0;
+}
+
+int CameraLens::validateLensDriver()
+{
+ int ret = 0;
+ static constexpr uint32_t mandatoryControls[] = {
+ V4L2_CID_FOCUS_ABSOLUTE,
+ };
+
+ const ControlInfoMap &controls = subdev_->controls();
+ for (uint32_t ctrl : mandatoryControls) {
+ if (!controls.count(ctrl)) {
+ LOG(CameraLens, Error)
+ << "Mandatory V4L2 control " << utils::hex(ctrl)
+ << " not available";
+ ret = -EINVAL;
+ }
+ }
+
+ if (ret) {
+ LOG(CameraLens, Error)
+ << "The lens kernel driver needs to be fixed";
+ LOG(CameraLens, Error)
+ << "See Documentation/lens_driver_requirements.rst in"
+ << " the libcamera sources for more information";
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * \fn CameraLens::model()
+ * \brief Retrieve the lens model name
+ *
+ * The lens model name is a free-formed string that uniquely identifies the
+ * lens model.
+ *
+ * \return The lens model name
+ */
+
+std::string CameraLens::logPrefix() const
+{
+ return "'" + entity_->name() + "'";
+}
+
+/**
+ * \fn CameraLens::controls()
+ * \brief Retrieve the V4L2 controls of the lens' subdev
+ *
+ * \return A map of the V4L2 controls supported by the lens' driver
+ */
+const ControlInfoMap &CameraLens::controls() const
+{
+ return subdev_->controls();
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/camera_manager.cpp b/src/libcamera/camera_manager.cpp
index fddf7349..87e6717e 100644
--- a/src/libcamera/camera_manager.cpp
+++ b/src/libcamera/camera_manager.cpp
@@ -2,72 +2,45 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * camera_manager.h - Camera management
+ * Camera management
*/
-#include <libcamera/camera_manager.h>
+#include "libcamera/internal/camera_manager.h"
-#include <condition_variable>
-#include <map>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
-#include <libcamera/event_dispatcher.h>
+#include <libcamera/property_ids.h>
-#include "device_enumerator.h"
-#include "event_dispatcher_poll.h"
-#include "log.h"
-#include "pipeline_handler.h"
-#include "thread.h"
-#include "utils.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/pipeline_handler.h"
/**
- * \file camera_manager.h
+ * \file libcamera/camera_manager.h
* \brief The camera manager
*/
+/**
+ * \internal
+ * \file libcamera/internal/camera_manager.h
+ * \brief Internal camera manager support
+ */
+
+/**
+ * \brief Top-level libcamera namespace
+ */
namespace libcamera {
LOG_DEFINE_CATEGORY(Camera)
-class CameraManager::Private : public Thread
-{
-public:
- Private(CameraManager *cm);
-
- int start();
- void addCamera(std::shared_ptr<Camera> &camera, dev_t devnum);
- void removeCamera(Camera *camera);
-
- /*
- * This mutex protects
- *
- * - initialized_ and status_ during initialization
- * - cameras_ and camerasByDevnum_ after initialization
- */
- Mutex mutex_;
- std::vector<std::shared_ptr<Camera>> cameras_;
- std::map<dev_t, std::weak_ptr<Camera>> camerasByDevnum_;
-
-protected:
- void run() override;
-
-private:
- int init();
- void cleanup();
-
- CameraManager *cm_;
-
- std::condition_variable cv_;
- bool initialized_;
- int status_;
-
- std::vector<std::shared_ptr<PipelineHandler>> pipes_;
- std::unique_ptr<DeviceEnumerator> enumerator_;
-};
-
-CameraManager::Private::Private(CameraManager *cm)
- : cm_(cm), initialized_(false)
+#ifndef __DOXYGEN_PUBLIC__
+CameraManager::Private::Private()
+ : initialized_(false)
{
+ ipaManager_ = std::make_unique<IPAManager>();
}
int CameraManager::Private::start()
@@ -79,7 +52,9 @@ int CameraManager::Private::start()
{
MutexLocker locker(mutex_);
- cv_.wait(locker, [&] { return initialized_; });
+ cv_.wait(locker, [&]() LIBCAMERA_TSA_REQUIRES(mutex_) {
+ return initialized_;
+ });
status = status_;
}
@@ -105,8 +80,10 @@ void CameraManager::Private::run()
mutex_.unlock();
cv_.notify_one();
- if (ret < 0)
+ if (ret < 0) {
+ cleanup();
return;
+ }
/* Now start processing events and messages. */
exec();
@@ -120,97 +97,173 @@ int CameraManager::Private::init()
if (!enumerator_ || enumerator_->enumerate())
return -ENODEV;
+ createPipelineHandlers();
+ enumerator_->devicesAdded.connect(this, &Private::createPipelineHandlers);
+
+ return 0;
+}
+
+void CameraManager::Private::createPipelineHandlers()
+{
/*
- * TODO: Try to read handlers and order from configuration
- * file and only fallback on all handlers if there is no
- * configuration file.
+ * \todo Try to read handlers and order from configuration
+ * file and only fallback on environment variable or all handlers, if
+ * there is no configuration file.
*/
- std::vector<PipelineHandlerFactory *> &factories = PipelineHandlerFactory::factories();
-
- for (PipelineHandlerFactory *factory : factories) {
+ const char *pipesList =
+ utils::secure_getenv("LIBCAMERA_PIPELINES_MATCH_LIST");
+ if (pipesList) {
/*
- * Try each pipeline handler until it exhaust
- * all pipelines it can provide.
+ * When a list of preferred pipelines is defined, iterate
+ * through the ordered list to match the enumerated devices.
*/
- while (1) {
- std::shared_ptr<PipelineHandler> pipe = factory->create(cm_);
- if (!pipe->match(enumerator_.get()))
- break;
+ for (const auto &pipeName : utils::split(pipesList, ",")) {
+ const PipelineHandlerFactoryBase *factory;
+ factory = PipelineHandlerFactoryBase::getFactoryByName(pipeName);
+ if (!factory)
+ continue;
LOG(Camera, Debug)
- << "Pipeline handler \"" << factory->name()
- << "\" matched";
- pipes_.push_back(std::move(pipe));
+ << "Found listed pipeline handler '"
+ << pipeName << "'";
+ pipelineFactoryMatch(factory);
}
+
+ return;
}
- /* TODO: register hot-plug callback here */
+ const std::vector<PipelineHandlerFactoryBase *> &factories =
+ PipelineHandlerFactoryBase::factories();
- return 0;
+ /* Match all the registered pipeline handlers. */
+ for (const PipelineHandlerFactoryBase *factory : factories) {
+ LOG(Camera, Debug)
+ << "Found registered pipeline handler '"
+ << factory->name() << "'";
+ /*
+ * Try each pipeline handler until it exhaust
+ * all pipelines it can provide.
+ */
+ pipelineFactoryMatch(factory);
+ }
+}
+
+void CameraManager::Private::pipelineFactoryMatch(const PipelineHandlerFactoryBase *factory)
+{
+ CameraManager *const o = LIBCAMERA_O_PTR();
+
+ /* Provide as many matching pipelines as possible. */
+ while (1) {
+ std::shared_ptr<PipelineHandler> pipe = factory->create(o);
+ if (!pipe->match(enumerator_.get()))
+ break;
+
+ LOG(Camera, Debug)
+ << "Pipeline handler \"" << factory->name()
+ << "\" matched";
+ }
}
void CameraManager::Private::cleanup()
{
- /* TODO: unregister hot-plug callback here */
+ enumerator_->devicesAdded.disconnect(this);
/*
- * Release all references to cameras and pipeline handlers to ensure
- * they all get destroyed before the device enumerator deletes the
- * media devices.
+ * Release all references to cameras to ensure they all get destroyed
+ * before the device enumerator deletes the media devices. Cameras are
+ * destroyed via Object::deleteLater() API, hence we need to explicitly
+ * process deletion requests from the thread's message queue as the event
+ * loop is not in action here.
*/
- pipes_.clear();
- cameras_.clear();
+ {
+ MutexLocker locker(mutex_);
+ cameras_.clear();
+ }
+
+ dispatchMessages(Message::Type::DeferredDelete);
enumerator_.reset(nullptr);
}
-void CameraManager::Private::addCamera(std::shared_ptr<Camera> &camera,
- dev_t devnum)
+/**
+ * \brief Add a camera to the camera manager
+ * \param[in] camera The camera to be added
+ *
+ * This function is called by pipeline handlers to register the cameras they
+ * handle with the camera manager. Registered cameras are immediately made
+ * available to the system.
+ *
+ * Device numbers from the SystemDevices property are used by the V4L2
+ * compatibility layer to map V4L2 device nodes to Camera instances.
+ *
+ * \context This function shall be called from the CameraManager thread.
+ */
+void CameraManager::Private::addCamera(std::shared_ptr<Camera> camera)
{
+ ASSERT(Thread::current() == this);
+
MutexLocker locker(mutex_);
- for (std::shared_ptr<Camera> c : cameras_) {
- if (c->name() == camera->name()) {
- LOG(Camera, Warning)
- << "Registering camera with duplicate name '"
- << camera->name() << "'";
- break;
+ for (const std::shared_ptr<Camera> &c : cameras_) {
+ if (c->id() == camera->id()) {
+ LOG(Camera, Fatal)
+ << "Trying to register a camera with a duplicated ID '"
+ << camera->id() << "'";
+ return;
}
}
cameras_.push_back(std::move(camera));
- if (devnum) {
- unsigned int index = cameras_.size() - 1;
- camerasByDevnum_[devnum] = cameras_[index];
- }
+ unsigned int index = cameras_.size() - 1;
+
+ /* Report the addition to the public signal */
+ CameraManager *const o = LIBCAMERA_O_PTR();
+ o->cameraAdded.emit(cameras_[index]);
}
-void CameraManager::Private::removeCamera(Camera *camera)
+/**
+ * \brief Remove a camera from the camera manager
+ * \param[in] camera The camera to be removed
+ *
+ * This function is called by pipeline handlers to unregister cameras from the
+ * camera manager. Unregistered cameras won't be reported anymore by the
+ * cameras() and get() calls, but references may still exist in applications.
+ *
+ * \context This function shall be called from the CameraManager thread.
+ */
+void CameraManager::Private::removeCamera(std::shared_ptr<Camera> camera)
{
+ ASSERT(Thread::current() == this);
+
MutexLocker locker(mutex_);
auto iter = std::find_if(cameras_.begin(), cameras_.end(),
[camera](std::shared_ptr<Camera> &c) {
- return c.get() == camera;
+ return c.get() == camera.get();
});
if (iter == cameras_.end())
return;
LOG(Camera, Debug)
- << "Unregistering camera '" << camera->name() << "'";
-
- auto iter_d = std::find_if(camerasByDevnum_.begin(), camerasByDevnum_.end(),
- [camera](const std::pair<dev_t, std::weak_ptr<Camera>> &p) {
- return p.second.lock().get() == camera;
- });
- if (iter_d != camerasByDevnum_.end())
- camerasByDevnum_.erase(iter_d);
+ << "Unregistering camera '" << camera->id() << "'";
cameras_.erase(iter);
+
+ /* Report the removal to the public signal */
+ CameraManager *const o = LIBCAMERA_O_PTR();
+ o->cameraRemoved.emit(camera);
}
/**
+ * \fn CameraManager::Private::ipaManager() const
+ * \brief Retrieve the IPAManager
+ * \context This function is \threadsafe.
+ * \return The IPAManager for this CameraManager
+ */
+#endif /* __DOXYGEN_PUBLIC__ */
+
+/**
* \class CameraManager
* \brief Provide access and manage all cameras in the system
*
@@ -224,12 +277,8 @@ void CameraManager::Private::removeCamera(Camera *camera)
* a time. Attempting to create a second instance without first deleting the
* existing instance results in undefined behaviour.
*
- * The manager is initially stopped, and shall be configured before being
- * started. In particular a custom event dispatcher shall be installed if
- * needed with CameraManager::setEventDispatcher().
- *
- * Once the camera manager is configured, it shall be started with start().
- * This will enumerate all the cameras present in the system, which can then be
+ * The manager is initially stopped, and shall be started with start(). This
+ * will enumerate all the cameras present in the system, which can then be
* listed with list() and retrieved with get().
*
* Cameras are shared through std::shared_ptr<>, ensuring that a camera will
@@ -237,16 +286,12 @@ void CameraManager::Private::removeCamera(Camera *camera)
* action from the application. Once the application has released all the
* references it held to cameras, the camera manager can be stopped with
* stop().
- *
- * \todo Add interface to register a notification callback to the user to be
- * able to inform it new cameras have been hot-plugged or cameras have been
- * removed due to hot-unplug.
*/
CameraManager *CameraManager::self_ = nullptr;
CameraManager::CameraManager()
- : p_(new CameraManager::Private(this))
+ : Extensible(std::make_unique<CameraManager::Private>())
{
if (self_)
LOG(Camera, Fatal)
@@ -255,6 +300,11 @@ CameraManager::CameraManager()
self_ = this;
}
+/**
+ * \brief Destroy the camera manager
+ *
+ * Destroying the camera manager stops it if it is currently running.
+ */
CameraManager::~CameraManager()
{
stop();
@@ -276,7 +326,7 @@ int CameraManager::start()
{
LOG(Camera, Info) << "libcamera " << version_;
- int ret = p_->start();
+ int ret = _d()->start();
if (ret)
LOG(Camera, Error) << "Failed to start camera manager: "
<< strerror(-ret);
@@ -296,8 +346,9 @@ int CameraManager::start()
*/
void CameraManager::stop()
{
- p_->exit();
- p_->wait();
+ Private *const d = _d();
+ d->exit();
+ d->wait();
}
/**
@@ -313,14 +364,16 @@ void CameraManager::stop()
*/
std::vector<std::shared_ptr<Camera>> CameraManager::cameras() const
{
- MutexLocker locker(p_->mutex_);
+ const Private *const d = _d();
- return p_->cameras_;
+ MutexLocker locker(d->mutex_);
+
+ return d->cameras_;
}
/**
- * \brief Get a camera based on name
- * \param[in] name Name of camera to get
+ * \brief Get a camera based on ID
+ * \param[in] id ID of camera to get
*
* Before calling this function the caller is responsible for ensuring that
* the camera manager is running.
@@ -329,12 +382,14 @@ std::vector<std::shared_ptr<Camera>> CameraManager::cameras() const
*
* \return Shared pointer to Camera object or nullptr if camera not found
*/
-std::shared_ptr<Camera> CameraManager::get(const std::string &name)
+std::shared_ptr<Camera> CameraManager::get(const std::string &id)
{
- MutexLocker locker(p_->mutex_);
+ Private *const d = _d();
+
+ MutexLocker locker(d->mutex_);
- for (std::shared_ptr<Camera> camera : p_->cameras_) {
- if (camera->name() == name)
+ for (const std::shared_ptr<Camera> &camera : d->cameras_) {
+ if (camera->id() == id)
return camera;
}
@@ -342,69 +397,32 @@ std::shared_ptr<Camera> CameraManager::get(const std::string &name)
}
/**
- * \brief Retrieve a camera based on device number
- * \param[in] devnum Device number of camera to get
+ * \var CameraManager::cameraAdded
+ * \brief Notify of a new camera added to the system
*
- * This method is meant solely for the use of the V4L2 compatibility
- * layer, to map device nodes to Camera instances. Applications shall
- * not use it and shall instead retrieve cameras by name.
+ * This signal is emitted when a new camera is detected and successfully handled
+ * by the camera manager. The notification occurs alike for cameras detected
+ * when the manager is started with start() or when new cameras are later
+ * connected to the system. When the signal is emitted the new camera is already
+ * available from the list of cameras().
*
- * Before calling this function the caller is responsible for ensuring that
- * the camera manager is running.
- *
- * \context This function is \threadsafe.
- *
- * \return Shared pointer to Camera object, which is empty if the camera is
- * not found
+ * The signal is emitted from the CameraManager thread. Applications shall
+ * minimize the time spent in the signal handler and shall in particular not
+ * perform any blocking operation.
*/
-std::shared_ptr<Camera> CameraManager::get(dev_t devnum)
-{
- MutexLocker locker(p_->mutex_);
-
- auto iter = p_->camerasByDevnum_.find(devnum);
- if (iter == p_->camerasByDevnum_.end())
- return nullptr;
-
- return iter->second.lock();
-}
/**
- * \brief Add a camera to the camera manager
- * \param[in] camera The camera to be added
- * \param[in] devnum The device number to associate with \a camera
- *
- * This function is called by pipeline handlers to register the cameras they
- * handle with the camera manager. Registered cameras are immediately made
- * available to the system.
+ * \var CameraManager::cameraRemoved
+ * \brief Notify of a new camera removed from the system
*
- * \a devnum is used by the V4L2 compatibility layer to map V4L2 device nodes
- * to Camera instances.
+ * This signal is emitted when a camera is removed from the system. When the
+ * signal is emitted the camera is not available from the list of cameras()
+ * anymore.
*
- * \context This function shall be called from the CameraManager thread.
+ * The signal is emitted from the CameraManager thread. Applications shall
+ * minimize the time spent in the signal handler and shall in particular not
+ * perform any blocking operation.
*/
-void CameraManager::addCamera(std::shared_ptr<Camera> camera, dev_t devnum)
-{
- ASSERT(Thread::current() == p_.get());
-
- p_->addCamera(camera, devnum);
-}
-
-/**
- * \brief Remove a camera from the camera manager
- * \param[in] camera The camera to be removed
- *
- * This function is called by pipeline handlers to unregister cameras from the
- * camera manager. Unregistered cameras won't be reported anymore by the
- * cameras() and get() calls, but references may still exist in applications.
- *
- * \context This function shall be called from the CameraManager thread.
- */
-void CameraManager::removeCamera(Camera *camera)
-{
- ASSERT(Thread::current() == p_.get());
-
- p_->removeCamera(camera);
-}
/**
* \fn const std::string &CameraManager::version()
@@ -413,38 +431,4 @@ void CameraManager::removeCamera(Camera *camera)
* \return The libcamera version string
*/
-/**
- * \brief Set the event dispatcher
- * \param[in] dispatcher Pointer to the event dispatcher
- *
- * libcamera requires an event dispatcher to integrate event notification and
- * timers with the application event loop. Applications that want to provide
- * their own event dispatcher shall call this function once and only once before
- * the camera manager is started with start(). If no event dispatcher is
- * provided, a default poll-based implementation will be used.
- *
- * The CameraManager takes ownership of the event dispatcher and will delete it
- * when the application terminates.
- */
-void CameraManager::setEventDispatcher(std::unique_ptr<EventDispatcher> dispatcher)
-{
- thread()->setEventDispatcher(std::move(dispatcher));
-}
-
-/**
- * \brief Retrieve the event dispatcher
- *
- * This function retrieves the event dispatcher set with setEventDispatcher().
- * If no dispatcher has been set, a default poll-based implementation is created
- * and returned, and no custom event dispatcher may be installed anymore.
- *
- * The returned event dispatcher is valid until the camera manager is destroyed.
- *
- * \return Pointer to the event dispatcher
- */
-EventDispatcher *CameraManager::eventDispatcher()
-{
- return thread()->eventDispatcher();
-}
-
} /* namespace libcamera */
diff --git a/src/libcamera/camera_sensor.cpp b/src/libcamera/camera_sensor.cpp
deleted file mode 100644
index 2219a430..00000000
--- a/src/libcamera/camera_sensor.cpp
+++ /dev/null
@@ -1,369 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * camera_sensor.cpp - A camera sensor
- */
-
-#include "camera_sensor.h"
-
-#include <algorithm>
-#include <float.h>
-#include <iomanip>
-#include <limits.h>
-#include <math.h>
-
-#include <libcamera/property_ids.h>
-
-#include "formats.h"
-#include "utils.h"
-#include "v4l2_subdevice.h"
-
-/**
- * \file camera_sensor.h
- * \brief A camera sensor
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(CameraSensor);
-
-/**
- * \class CameraSensor
- * \brief A camera sensor based on V4L2 subdevices
- *
- * The CameraSensor class eases handling of sensors for pipeline handlers by
- * hiding the details of the V4L2 subdevice kernel API and caching sensor
- * information.
- *
- * The implementation is currently limited to sensors that expose a single V4L2
- * subdevice with a single pad, and support the same frame sizes for all
- * supported media bus codes. It will be extended to support more complex
- * devices as the needs arise.
- */
-
-/**
- * \brief Construct a CameraSensor
- * \param[in] entity The media entity backing the camera sensor
- *
- * Once constructed the instance must be initialized with init().
- */
-CameraSensor::CameraSensor(const MediaEntity *entity)
- : entity_(entity), properties_(properties::properties)
-{
- subdev_ = new V4L2Subdevice(entity);
-}
-
-/**
- * \brief Destroy a CameraSensor
- */
-CameraSensor::~CameraSensor()
-{
- delete subdev_;
-}
-
-/**
- * \brief Initialize the camera sensor instance
- *
- * This method performs the initialisation steps of the CameraSensor that may
- * fail. It shall be called once and only once after constructing the instance.
- *
- * \return 0 on success or a negative error code otherwise
- */
-int CameraSensor::init()
-{
- int ret;
-
- if (entity_->pads().size() != 1) {
- LOG(CameraSensor, Error)
- << "Sensors with more than one pad are not supported";
- return -EINVAL;
- }
-
- if (entity_->function() != MEDIA_ENT_F_CAM_SENSOR) {
- LOG(CameraSensor, Error)
- << "Invalid sensor function "
- << utils::hex(entity_->function());
- return -EINVAL;
- }
-
- ret = subdev_->open();
- if (ret < 0)
- return ret;
-
- /* Retrieve and store the camera sensor properties. */
- const ControlInfoMap &controls = subdev_->controls();
- int32_t propertyValue;
-
- /* Camera Location: default is front location. */
- const auto &locationControl = controls.find(V4L2_CID_CAMERA_SENSOR_LOCATION);
- if (locationControl != controls.end()) {
- int32_t v4l2Location =
- locationControl->second.def().get<int32_t>();
-
- switch (v4l2Location) {
- default:
- LOG(CameraSensor, Warning)
- << "Unsupported camera location "
- << v4l2Location << ", setting to Front";
- /* Fall-through */
- case V4L2_LOCATION_FRONT:
- propertyValue = properties::CameraLocationFront;
- break;
- case V4L2_LOCATION_BACK:
- propertyValue = properties::CameraLocationBack;
- break;
- case V4L2_LOCATION_EXTERNAL:
- propertyValue = properties::CameraLocationExternal;
- break;
- }
- } else {
- propertyValue = properties::CameraLocationFront;
- }
- properties_.set(properties::Location, propertyValue);
-
- /* Camera Rotation: default is 0 degrees. */
- const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION);
- if (rotationControl != controls.end())
- propertyValue = rotationControl->second.def().get<int32_t>();
- else
- propertyValue = 0;
- properties_.set(properties::Rotation, propertyValue);
-
- /* Enumerate and cache media bus codes and sizes. */
- const ImageFormats formats = subdev_->formats(0);
- if (formats.isEmpty()) {
- LOG(CameraSensor, Error) << "No image format found";
- return -EINVAL;
- }
-
- mbusCodes_ = formats.formats();
-
- /*
- * Extract the supported sizes from the first format as we only support
- * sensors that offer the same frame sizes for all media bus codes.
- * Verify this assumption and reject the sensor if it isn't true.
- */
- const std::vector<SizeRange> &sizes = formats.sizes(mbusCodes_[0]);
- std::transform(sizes.begin(), sizes.end(), std::back_inserter(sizes_),
- [](const SizeRange &range) { return range.max; });
-
- for (unsigned int code : mbusCodes_) {
- if (formats.sizes(code) != sizes) {
- LOG(CameraSensor, Error)
- << "Frame sizes differ between media bus codes";
- return -EINVAL;
- }
- }
-
- /* Sort the media bus codes and sizes. */
- std::sort(mbusCodes_.begin(), mbusCodes_.end());
- std::sort(sizes_.begin(), sizes_.end());
-
- return 0;
-}
-
-/**
- * \fn CameraSensor::entity()
- * \brief Retrieve the sensor media entity
- * \return The sensor media entity
- */
-
-/**
- * \fn CameraSensor::mbusCodes()
- * \brief Retrieve the media bus codes supported by the camera sensor
- * \return The supported media bus codes sorted in increasing order
- */
-
-/**
- * \fn CameraSensor::sizes()
- * \brief Retrieve the frame sizes supported by the camera sensor
- * \return The supported frame sizes sorted in increasing order
- */
-
-/**
- * \brief Retrieve the camera sensor resolution
- * \return The camera sensor resolution in pixels
- */
-const Size &CameraSensor::resolution() const
-{
- /*
- * The sizes_ vector is sorted in ascending order, the resolution is
- * thus the last element of the vector.
- */
- return sizes_.back();
-}
-
-/**
- * \brief Retrieve the best sensor format for a desired output
- * \param[in] mbusCodes The list of acceptable media bus codes
- * \param[in] size The desired size
- *
- * Media bus codes are selected from \a mbusCodes, which lists all acceptable
- * codes in decreasing order of preference. This method selects the first code
- * from the list that is supported by the sensor. If none of the desired codes
- * is supported, it returns an error.
- *
- * \a size indicates the desired size at the output of the sensor. This method
- * selects the best size supported by the sensor according to the following
- * criteria.
- *
- * - The desired \a size shall fit in the sensor output size to avoid the need
- * to up-scale.
- * - The sensor output size shall match the desired aspect ratio to avoid the
- * need to crop the field of view.
- * - The sensor output size shall be as small as possible to lower the required
- * bandwidth.
- *
- * The use of this method is optional, as the above criteria may not match the
- * needs of all pipeline handlers. Pipeline handlers may implement custom
- * sensor format selection when needed.
- *
- * The returned sensor output format is guaranteed to be acceptable by the
- * setFormat() method without any modification.
- *
- * \return The best sensor output format matching the desired media bus codes
- * and size on success, or an empty format otherwise.
- */
-V4L2SubdeviceFormat CameraSensor::getFormat(const std::vector<unsigned int> &mbusCodes,
- const Size &size) const
-{
- V4L2SubdeviceFormat format{};
-
- for (unsigned int code : mbusCodes) {
- if (std::any_of(mbusCodes_.begin(), mbusCodes_.end(),
- [code](unsigned int c) { return c == code; })) {
- format.mbus_code = code;
- break;
- }
- }
-
- if (!format.mbus_code) {
- LOG(CameraSensor, Debug) << "No supported format found";
- return format;
- }
-
- unsigned int desiredArea = size.width * size.height;
- unsigned int bestArea = UINT_MAX;
- float desiredRatio = static_cast<float>(size.width) / size.height;
- float bestRatio = FLT_MAX;
- const Size *bestSize = nullptr;
-
- for (const Size &sz : sizes_) {
- if (sz.width < size.width || sz.height < size.height)
- continue;
-
- float ratio = static_cast<float>(sz.width) / sz.height;
- float ratioDiff = fabsf(ratio - desiredRatio);
- unsigned int area = sz.width * sz.height;
- unsigned int areaDiff = area - desiredArea;
-
- if (ratioDiff > bestRatio)
- continue;
-
- if (ratioDiff < bestRatio || areaDiff < bestArea) {
- bestRatio = ratioDiff;
- bestArea = areaDiff;
- bestSize = &sz;
- }
- }
-
- if (!bestSize) {
- LOG(CameraSensor, Debug) << "No supported size found";
- return format;
- }
-
- format.size = *bestSize;
-
- return format;
-}
-
-/**
- * \brief Set the sensor output format
- * \param[in] format The desired sensor output format
- * \return 0 on success or a negative error code otherwise
- */
-int CameraSensor::setFormat(V4L2SubdeviceFormat *format)
-{
- return subdev_->setFormat(0, format);
-}
-
-/**
- * \brief Retrieve the supported V4L2 controls and their information
- * \return A map of the V4L2 controls supported by the sensor
- */
-const ControlInfoMap &CameraSensor::controls() const
-{
- return subdev_->controls();
-}
-
-/**
- * \brief Read controls from the sensor
- * \param[inout] ctrls The list of controls to read
- *
- * This method reads the value of all controls contained in \a ctrls, and stores
- * their values in the corresponding \a ctrls entry.
- *
- * If any control in \a ctrls is not supported by the device, is disabled (i.e.
- * has the V4L2_CTRL_FLAG_DISABLED flag set), is a compound control, or if any
- * other error occurs during validation of the requested controls, no control is
- * read and this method returns -EINVAL.
- *
- * If an error occurs while reading the controls, the index of the first control
- * that couldn't be read is returned. The value of all controls below that index
- * are updated in \a ctrls, while the value of all the other controls are not
- * changed.
- *
- * \sa V4L2Device::getControls()
- *
- * \return 0 on success or an error code otherwise
- * \retval -EINVAL One of the control is not supported or not accessible
- * \retval i The index of the control that failed
- */
-int CameraSensor::getControls(ControlList *ctrls)
-{
- return subdev_->getControls(ctrls);
-}
-
-/**
- * \fn CameraSensor::properties()
- * \brief Retrieve the camera sensor properties
- * \return The list of camera sensor properties
- */
-
-/**
- * \brief Write controls to the sensor
- * \param[in] ctrls The list of controls to write
- *
- * This method writes the value of all controls contained in \a ctrls, and
- * stores the values actually applied to the device in the corresponding
- * \a ctrls entry.
- *
- * If any control in \a ctrls is not supported by the device, is disabled (i.e.
- * has the V4L2_CTRL_FLAG_DISABLED flag set), is read-only, is a
- * compound control, or if any other error occurs during validation of
- * the requested controls, no control is written and this method returns
- * -EINVAL.
- *
- * If an error occurs while writing the controls, the index of the first
- * control that couldn't be written is returned. All controls below that index
- * are written and their values are updated in \a ctrls, while all other
- * controls are not written and their values are not changed.
- *
- * \sa V4L2Device::setControls()
- *
- * \return 0 on success or an error code otherwise
- * \retval -EINVAL One of the control is not supported or not accessible
- * \retval i The index of the control that failed
- */
-int CameraSensor::setControls(ControlList *ctrls)
-{
- return subdev_->setControls(ctrls);
-}
-
-std::string CameraSensor::logPrefix() const
-{
- return "'" + subdev_->entity()->name() + "'";
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/color_space.cpp b/src/libcamera/color_space.cpp
new file mode 100644
index 00000000..3d1c456c
--- /dev/null
+++ b/src/libcamera/color_space.cpp
@@ -0,0 +1,520 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Raspberry Pi Ltd
+ *
+ * color spaces.
+ */
+
+#include <libcamera/color_space.h>
+
+#include <algorithm>
+#include <array>
+#include <map>
+#include <sstream>
+#include <utility>
+#include <vector>
+
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/formats.h"
+
+/**
+ * \file color_space.h
+ * \brief Class and enums to represent color spaces
+ */
+
+namespace libcamera {
+
+/**
+ * \class ColorSpace
+ * \brief Class to describe a color space
+ *
+ * The ColorSpace class defines the color primaries, the transfer function,
+ * the Y'CbCr encoding associated with the color space, and the range
+ * (sometimes also referred to as the quantisation) of the color space.
+ *
+ * Certain combinations of these fields form well-known standard color
+ * spaces such as "sRGB" or "Rec709".
+ *
+ * In the strictest sense a "color space" formally only refers to the
+ * color primaries and white point. Here, however, the ColorSpace class
+ * adopts the common broader usage that includes the transfer function,
+ * Y'CbCr encoding method and quantisation.
+ *
+ * More information on color spaces is available in the V4L2 documentation, see
+ * in particular
+ *
+ * - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-srgb">sRGB</a>
+ * - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-jpeg">JPEG</a>
+ * - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-smpte-170m">SMPTE 170M</a>
+ * - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-rec709">Rec.709</a>
+ * - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-bt2020">Rec.2020</a>
+ *
+ * Note that there is no guarantee of a 1:1 mapping between color space names
+ * and definitions in libcamera and V4L2. Two notable differences are
+ *
+ * - The sRGB libcamera color space is defined for RGB formats only with no
+ * Y'CbCr encoding and a full quantization range, while the V4L2 SRGB color
+ * space has a Y'CbCr encoding and a limited quantization range.
+ * - The sYCC libcamera color space is called JPEG in V4L2 due to historical
+ * reasons.
+ *
+ * \todo Define the color space fully in the libcamera API to avoid referencing
+ * V4L2
+ */
+
+/**
+ * \enum ColorSpace::Primaries
+ * \brief The color primaries for this color space
+ *
+ * \var ColorSpace::Primaries::Raw
+ * \brief These are raw colors directly from a sensor, the primaries are
+ * unspecified
+ *
+ * \var ColorSpace::Primaries::Smpte170m
+ * \brief SMPTE 170M color primaries
+ *
+ * \var ColorSpace::Primaries::Rec709
+ * \brief Rec.709 color primaries
+ *
+ * \var ColorSpace::Primaries::Rec2020
+ * \brief Rec.2020 color primaries
+ */
+
+/**
+ * \enum ColorSpace::TransferFunction
+ * \brief The transfer function used for this color space
+ *
+ * \var ColorSpace::TransferFunction::Linear
+ * \brief This color space uses a linear (identity) transfer function
+ *
+ * \var ColorSpace::TransferFunction::Srgb
+ * \brief sRGB transfer function
+ *
+ * \var ColorSpace::TransferFunction::Rec709
+ * \brief Rec.709 transfer function
+ */
+
+/**
+ * \enum ColorSpace::YcbcrEncoding
+ * \brief The Y'CbCr encoding
+ *
+ * \var ColorSpace::YcbcrEncoding::None
+ * \brief There is no defined Y'CbCr encoding (used for non-YUV formats)
+ *
+ * \var ColorSpace::YcbcrEncoding::Rec601
+ * \brief Rec.601 Y'CbCr encoding
+ *
+ * \var ColorSpace::YcbcrEncoding::Rec709
+ * \brief Rec.709 Y'CbCr encoding
+ *
+ * \var ColorSpace::YcbcrEncoding::Rec2020
+ * \brief Rec.2020 Y'CbCr encoding
+ */
+
+/**
+ * \enum ColorSpace::Range
+ * \brief The range (sometimes "quantisation") for this color space
+ *
+ * \var ColorSpace::Range::Full
+ * \brief This color space uses full range pixel values
+ *
+ * \var ColorSpace::Range::Limited
+ * \brief This color space uses limited range pixel values, being
+ * 16 to 235 for Y' and 16 to 240 for Cb and Cr (8 bits per sample)
+ * or 64 to 940 for Y' and 16 to 960 for Cb and Cr (10 bits)
+ */
+
+/**
+ * \fn ColorSpace::ColorSpace(Primaries p, TransferFunction t, Encoding e, Range r)
+ * \brief Construct a ColorSpace from explicit values
+ * \param[in] p The color primaries
+ * \param[in] t The transfer function for the color space
+ * \param[in] e The Y'CbCr encoding
+ * \param[in] r The range of the pixel values in this color space
+ */
+
+/**
+ * \brief A constant representing a raw color space (from a sensor)
+ */
+const ColorSpace ColorSpace::Raw = {
+ Primaries::Raw,
+ TransferFunction::Linear,
+ YcbcrEncoding::None,
+ Range::Full
+};
+
+/**
+ * \brief A constant representing the sRGB color space (RGB formats only)
+ */
+const ColorSpace ColorSpace::Srgb = {
+ Primaries::Rec709,
+ TransferFunction::Srgb,
+ YcbcrEncoding::None,
+ Range::Full
+};
+
+/**
+ * \brief A constant representing the sYCC color space, typically used for
+ * encoding JPEG images
+ */
+const ColorSpace ColorSpace::Sycc = {
+ Primaries::Rec709,
+ TransferFunction::Srgb,
+ YcbcrEncoding::Rec601,
+ Range::Full
+};
+
+/**
+ * \brief A constant representing the SMPTE170M color space
+ */
+const ColorSpace ColorSpace::Smpte170m = {
+ Primaries::Smpte170m,
+ TransferFunction::Rec709,
+ YcbcrEncoding::Rec601,
+ Range::Limited
+};
+
+/**
+ * \brief A constant representing the Rec.709 color space
+ */
+const ColorSpace ColorSpace::Rec709 = {
+ Primaries::Rec709,
+ TransferFunction::Rec709,
+ YcbcrEncoding::Rec709,
+ Range::Limited
+};
+
+/**
+ * \brief A constant representing the Rec.2020 color space
+ */
+const ColorSpace ColorSpace::Rec2020 = {
+ Primaries::Rec2020,
+ TransferFunction::Rec709,
+ YcbcrEncoding::Rec2020,
+ Range::Limited
+};
+
+/**
+ * \var ColorSpace::primaries
+ * \brief The color primaries of this color space
+ */
+
+/**
+ * \var ColorSpace::transferFunction
+ * \brief The transfer function used by this color space
+ */
+
+/**
+ * \var ColorSpace::ycbcrEncoding
+ * \brief The Y'CbCr encoding used by this color space
+ */
+
+/**
+ * \var ColorSpace::range
+ * \brief The pixel range used with by color space
+ */
+
+namespace {
+
+const std::array<std::pair<ColorSpace, const char *>, 6> colorSpaceNames = { {
+ { ColorSpace::Raw, "RAW" },
+ { ColorSpace::Srgb, "sRGB" },
+ { ColorSpace::Sycc, "sYCC" },
+ { ColorSpace::Smpte170m, "SMPTE170M" },
+ { ColorSpace::Rec709, "Rec709" },
+ { ColorSpace::Rec2020, "Rec2020" },
+} };
+
+const std::map<ColorSpace::Primaries, std::string> primariesNames = {
+ { ColorSpace::Primaries::Raw, "RAW" },
+ { ColorSpace::Primaries::Smpte170m, "SMPTE170M" },
+ { ColorSpace::Primaries::Rec709, "Rec709" },
+ { ColorSpace::Primaries::Rec2020, "Rec2020" },
+};
+
+const std::map<ColorSpace::TransferFunction, std::string> transferNames = {
+ { ColorSpace::TransferFunction::Linear, "Linear" },
+ { ColorSpace::TransferFunction::Srgb, "sRGB" },
+ { ColorSpace::TransferFunction::Rec709, "Rec709" },
+};
+
+const std::map<ColorSpace::YcbcrEncoding, std::string> encodingNames = {
+ { ColorSpace::YcbcrEncoding::None, "None" },
+ { ColorSpace::YcbcrEncoding::Rec601, "Rec601" },
+ { ColorSpace::YcbcrEncoding::Rec709, "Rec709" },
+ { ColorSpace::YcbcrEncoding::Rec2020, "Rec2020" },
+};
+
+const std::map<ColorSpace::Range, std::string> rangeNames = {
+ { ColorSpace::Range::Full, "Full" },
+ { ColorSpace::Range::Limited, "Limited" },
+};
+
+} /* namespace */
+
+/**
+ * \brief Assemble and return a readable string representation of the
+ * ColorSpace
+ *
+ * If the color space matches a standard ColorSpace (such as ColorSpace::Sycc)
+ * then the short name of the color space ("sYCC") is returned. Otherwise
+ * the four constituent parts of the ColorSpace are assembled into a longer
+ * string.
+ *
+ * \return A string describing the ColorSpace
+ */
+std::string ColorSpace::toString() const
+{
+ /* Print out a brief name only for standard color spaces. */
+
+ auto it = std::find_if(colorSpaceNames.begin(), colorSpaceNames.end(),
+ [this](const auto &item) {
+ return *this == item.first;
+ });
+ if (it != colorSpaceNames.end())
+ return std::string(it->second);
+
+ /* Assemble a name made of the constituent fields. */
+
+ auto itPrimaries = primariesNames.find(primaries);
+ std::string primariesName =
+ itPrimaries == primariesNames.end() ? "Invalid" : itPrimaries->second;
+
+ auto itTransfer = transferNames.find(transferFunction);
+ std::string transferName =
+ itTransfer == transferNames.end() ? "Invalid" : itTransfer->second;
+
+ auto itEncoding = encodingNames.find(ycbcrEncoding);
+ std::string encodingName =
+ itEncoding == encodingNames.end() ? "Invalid" : itEncoding->second;
+
+ auto itRange = rangeNames.find(range);
+ std::string rangeName =
+ itRange == rangeNames.end() ? "Invalid" : itRange->second;
+
+ std::stringstream ss;
+ ss << primariesName << "/" << transferName << "/" << encodingName << "/" << rangeName;
+
+ return ss.str();
+}
+
+/**
+ * \brief Assemble and return a readable string representation of an
+ * optional ColorSpace
+ *
+ * This is a convenience helper to easily obtain a string representation
+ * for a ColorSpace in parts of the libcamera API where it is stored in a
+ * std::optional<>. If the ColorSpace is set, this function returns
+ * \a colorSpace.toString(), otherwise it returns "Unset".
+ *
+ * \return A string describing the optional ColorSpace
+ */
+std::string ColorSpace::toString(const std::optional<ColorSpace> &colorSpace)
+{
+ if (!colorSpace)
+ return "Unset";
+
+ return colorSpace->toString();
+}
+
+/**
+ * \brief Construct a color space from a string
+ * \param[in] str The string
+ *
+ * The string \a str can contain the name of a well-known color space, or be
+ * made of the four color space components separated by a '/' character, ordered
+ * as
+ *
+ * \verbatim primaries '/' transferFunction '/' ycbcrEncoding '/' range \endverbatim
+ *
+ * Any failure to parse the string, either because it doesn't match the expected
+ * format, or because the one of the names isn't recognized, will cause this
+ * function to return std::nullopt.
+ *
+ * \return The ColorSpace corresponding to the string, or std::nullopt if the
+ * string doesn't describe a known color space
+ */
+std::optional<ColorSpace> ColorSpace::fromString(const std::string &str)
+{
+ /* First search for a standard color space name match. */
+ auto itColorSpace = std::find_if(colorSpaceNames.begin(), colorSpaceNames.end(),
+ [&str](const auto &item) {
+ return str == item.second;
+ });
+ if (itColorSpace != colorSpaceNames.end())
+ return itColorSpace->first;
+
+ /*
+ * If not found, the string must contain the four color space
+ * components separated by a '/' character.
+ */
+ const auto &split = utils::split(str, "/");
+ std::vector<std::string> components{ split.begin(), split.end() };
+
+ if (components.size() != 4)
+ return std::nullopt;
+
+ ColorSpace colorSpace = ColorSpace::Raw;
+
+ /* Color primaries */
+ auto itPrimaries = std::find_if(primariesNames.begin(), primariesNames.end(),
+ [&components](const auto &item) {
+ return components[0] == item.second;
+ });
+ if (itPrimaries == primariesNames.end())
+ return std::nullopt;
+
+ colorSpace.primaries = itPrimaries->first;
+
+ /* Transfer function */
+ auto itTransfer = std::find_if(transferNames.begin(), transferNames.end(),
+ [&components](const auto &item) {
+ return components[1] == item.second;
+ });
+ if (itTransfer == transferNames.end())
+ return std::nullopt;
+
+ colorSpace.transferFunction = itTransfer->first;
+
+ /* YCbCr encoding */
+ auto itEncoding = std::find_if(encodingNames.begin(), encodingNames.end(),
+ [&components](const auto &item) {
+ return components[2] == item.second;
+ });
+ if (itEncoding == encodingNames.end())
+ return std::nullopt;
+
+ colorSpace.ycbcrEncoding = itEncoding->first;
+
+ /* Quantization range */
+ auto itRange = std::find_if(rangeNames.begin(), rangeNames.end(),
+ [&components](const auto &item) {
+ return components[3] == item.second;
+ });
+ if (itRange == rangeNames.end())
+ return std::nullopt;
+
+ colorSpace.range = itRange->first;
+
+ return colorSpace;
+}
+
+/**
+ * \brief Adjust the color space to match a pixel format
+ * \param[in] format The pixel format
+ *
+ * Not all combinations of pixel formats and color spaces make sense. For
+ * instance, nobody uses a limited quantization range with raw Bayer formats,
+ * and the YcbcrEncoding::None encoding isn't valid for YUV formats. This
+ * function adjusts the ColorSpace to make it compatible with the given \a
+ * format, by applying the following rules:
+ *
+ * - The color space for RAW formats must be Raw.
+ * - The Y'CbCr encoding and quantization range for RGB formats must be
+ * YcbcrEncoding::None and Range::Full respectively.
+ * - The Y'CbCr encoding for YUV formats must not be YcbcrEncoding::None. The
+ * best encoding is in that case guessed based on the primaries and transfer
+ * function.
+ *
+ * \return True if the color space has been adjusted, or false if it was
+ * already compatible with the format and hasn't been changed
+ */
+bool ColorSpace::adjust(PixelFormat format)
+{
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+ bool adjusted = false;
+
+ switch (info.colourEncoding) {
+ case PixelFormatInfo::ColourEncodingRAW:
+ /* Raw formats must use the raw color space. */
+ if (*this != ColorSpace::Raw) {
+ *this = ColorSpace::Raw;
+ adjusted = true;
+ }
+ break;
+
+ case PixelFormatInfo::ColourEncodingRGB:
+ /*
+ * RGB formats can't have a Y'CbCr encoding, and must use full
+ * range quantization.
+ */
+ if (ycbcrEncoding != YcbcrEncoding::None) {
+ ycbcrEncoding = YcbcrEncoding::None;
+ adjusted = true;
+ }
+
+ if (range != Range::Full) {
+ range = Range::Full;
+ adjusted = true;
+ }
+ break;
+
+ case PixelFormatInfo::ColourEncodingYUV:
+ if (ycbcrEncoding != YcbcrEncoding::None)
+ break;
+
+ /*
+ * YUV formats must have a Y'CbCr encoding. Infer the most
+ * probable option from the transfer function and primaries.
+ */
+ switch (transferFunction) {
+ case TransferFunction::Linear:
+ /*
+ * Linear YUV is not used in any standard color space,
+ * pick the widely supported and used Rec601 as default.
+ */
+ ycbcrEncoding = YcbcrEncoding::Rec601;
+ break;
+
+ case TransferFunction::Rec709:
+ switch (primaries) {
+ /* Raw should never happen. */
+ case Primaries::Raw:
+ case Primaries::Smpte170m:
+ ycbcrEncoding = YcbcrEncoding::Rec601;
+ break;
+ case Primaries::Rec709:
+ ycbcrEncoding = YcbcrEncoding::Rec709;
+ break;
+ case Primaries::Rec2020:
+ ycbcrEncoding = YcbcrEncoding::Rec2020;
+ break;
+ }
+ break;
+
+ case TransferFunction::Srgb:
+ /*
+ * Only the sYCC color space uses the sRGB transfer
+ * function, the corresponding encoding is Rec601.
+ */
+ ycbcrEncoding = YcbcrEncoding::Rec601;
+ break;
+ }
+
+ adjusted = true;
+ break;
+ }
+
+ return adjusted;
+}
+
+/**
+ * \brief Compare color spaces for equality
+ * \return True if the two color spaces are identical, false otherwise
+ */
+bool operator==(const ColorSpace &lhs, const ColorSpace &rhs)
+{
+ return lhs.primaries == rhs.primaries &&
+ lhs.transferFunction == rhs.transferFunction &&
+ lhs.ycbcrEncoding == rhs.ycbcrEncoding &&
+ lhs.range == rhs.range;
+}
+
+/**
+ * \fn bool operator!=(const ColorSpace &lhs, const ColorSpace &rhs)
+ * \brief Compare color spaces for inequality
+ * \return True if the two color spaces are not identical, false otherwise
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/control_ids.cpp.in b/src/libcamera/control_ids.cpp.in
index 99c511d0..65668d48 100644
--- a/src/libcamera/control_ids.cpp.in
+++ b/src/libcamera/control_ids.cpp.in
@@ -2,42 +2,122 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_ids.cpp : Control ID list
+ * {{mode}} ID list
*
* This file is auto-generated. Do not edit.
*/
-#include <libcamera/control_ids.h>
+#include <libcamera/{{filename}}.h>
+#include <libcamera/controls.h>
/**
- * \file control_ids.h
- * \brief Camera control identifiers
+ * \file {{filename}}.h
+ * \brief Camera {{mode}} identifiers
*/
namespace libcamera {
/**
- * \brief Namespace for libcamera controls
+ * \brief Namespace for libcamera {{mode}}
*/
-namespace controls {
+namespace {{mode}} {
-${controls_doc}
+{%- for vendor, ctrls in controls -%}
+
+{%- if vendor != 'libcamera' %}
+/**
+ * \brief Namespace for {{vendor}} {{mode}}
+ */
+namespace {{vendor}} {
+{%- endif -%}
+
+{% for ctrl in ctrls %}
+
+{% if ctrl.is_enum -%}
+/**
+ * \enum {{ctrl.name}}Enum
+ * \brief Supported {{ctrl.name}} values
+{%- for enum in ctrl.enum_values %}
+ *
+ * \var {{enum.name}}
+ * \brief {{enum.description|format_description}}
+{%- endfor %}
+ */
+
+/**
+ * \var {{ctrl.name}}Values
+ * \brief List of all {{ctrl.name}} supported values
+ */
+
+/**
+ * \var {{ctrl.name}}NameValueMap
+ * \brief Map of all {{ctrl.name}} supported value names (in std::string format) to value
+ */
+
+{% endif -%}
+/**
+ * \var {{ctrl.name}}
+ * \brief {{ctrl.description|format_description}}
+ */
+{%- endfor %}
+{% if vendor != 'libcamera' %}
+} /* namespace {{vendor}} */
+{% endif -%}
+
+{%- endfor %}
#ifndef __DOXYGEN__
/*
- * Keep the controls definitions hidden from doxygen as it incorrectly parses
+ * Keep the {{mode}} definitions hidden from doxygen as it incorrectly parses
* them as functions.
*/
-${controls_def}
-#endif
+{% for vendor, ctrls in controls -%}
+
+{% if vendor != 'libcamera' %}
+namespace {{vendor}} {
+{% endif %}
+
+{%- for ctrl in ctrls %}
+{% if ctrl.is_enum -%}
+extern const std::array<const ControlValue, {{ctrl.enum_values_count}}> {{ctrl.name}}Values = {
+{%- for enum in ctrl.enum_values %}
+ static_cast<{{ctrl.type}}>({{enum.name}}),
+{%- endfor %}
+};
+extern const std::map<std::string, {{ctrl.type}}> {{ctrl.name}}NameValueMap = {
+{%- for enum in ctrl.enum_values %}
+ { "{{enum.name}}", {{enum.name}} },
+{%- endfor %}
+};
+extern const Control<{{ctrl.type}}> {{ctrl.name}}({{ctrl.name|snake_case|upper}}, "{{ctrl.name}}", "{{vendor}}", {{ctrl.direction}}, {{ctrl.name}}NameValueMap);
+{% else -%}
+extern const Control<{{ctrl.type}}> {{ctrl.name}}({{ctrl.name|snake_case|upper}}, "{{ctrl.name}}", "{{vendor}}", {{ctrl.direction}});
+{% endif -%}
+{%- endfor %}
+
+{% if vendor != 'libcamera' %}
+} /* namespace {{vendor}} */
+{% endif -%}
+
+{%- endfor %}
+#endif /* __DOXYGEN__ */
/**
- * \brief List of all supported libcamera controls
+ * \brief List of all supported libcamera {{mode}}
+{%- if mode == 'controls' %}
+ *
+ * Unless otherwise stated, all controls are bi-directional, i.e. they can be
+ * set through Request::controls() and returned out through Request::metadata().
+{%- endif %}
*/
-extern const ControlIdMap controls {
-${controls_map}
+extern const ControlIdMap {{mode}} {
+{%- for vendor, ctrls in controls -%}
+{%- for ctrl in ctrls %}
+ { {{ctrl.namespace}}{{ctrl.name|snake_case|upper}}, &{{ctrl.namespace}}{{ctrl.name}} },
+{%- endfor -%}
+{%- endfor %}
};
-} /* namespace controls */
+} /* namespace {{mode}} */
} /* namespace libcamera */
diff --git a/src/libcamera/control_ids.yaml b/src/libcamera/control_ids.yaml
deleted file mode 100644
index 4befec74..00000000
--- a/src/libcamera/control_ids.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-# SPDX-License-Identifier: LGPL-2.1-or-later
-#
-# Copyright (C) 2019, Google Inc.
-#
-%YAML 1.2
----
-controls:
- - AeEnable:
- type: bool
- description: |
- Enable or disable the AE.
-
- \sa ManualExposure
-
- - AeLocked:
- type: bool
- description: |
- Report the lock status of a running AE algorithm.
-
- If the AE algorithm is locked the value shall be set to true, if it's
- converging it shall be set to false. If the AE algorithm is not
- running the control shall not be present in the metadata control list.
-
- \sa AeEnable
-
- - AwbEnable:
- type: bool
- description: |
- Enable or disable the AWB.
-
- \sa ManualGain
-
- - Brightness:
- type: int32_t
- description: Specify a fixed brightness parameter
-
- - Contrast:
- type: int32_t
- description: Specify a fixed contrast parameter
-
- - Saturation:
- type: int32_t
- description: Specify a fixed saturation parameter
-
- - ManualExposure:
- type: int32_t
- description: Specify a fixed exposure time in milli-seconds
-
- - ManualGain:
- type: int32_t
- description: Specify a fixed gain parameter
-
-...
diff --git a/src/libcamera/control_ids_core.yaml b/src/libcamera/control_ids_core.yaml
new file mode 100644
index 00000000..1dfaee0c
--- /dev/null
+++ b/src/libcamera/control_ids_core.yaml
@@ -0,0 +1,1052 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2019, Google Inc.
+#
+%YAML 1.1
+---
+# Unless otherwise stated, all controls are bi-directional, i.e. they can be
+# set through Request::controls() and returned out through Request::metadata().
+vendor: libcamera
+controls:
+ - AeEnable:
+ type: bool
+ direction: inout
+ description: |
+ Enable or disable the AE.
+
+ \sa ExposureTime AnalogueGain
+
+ - AeLocked:
+ type: bool
+ direction: out
+ description: |
+ Report the lock status of a running AE algorithm.
+
+ If the AE algorithm is locked the value shall be set to true, if it's
+ converging it shall be set to false. If the AE algorithm is not
+ running the control shall not be present in the metadata control list.
+
+ \sa AeEnable
+
+ # AeMeteringMode needs further attention:
+ # - Auto-generate max enum value.
+ # - Better handling of custom types.
+ - AeMeteringMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Specify a metering mode for the AE algorithm to use.
+
+ The metering modes determine which parts of the image are used to
+ determine the scene brightness. Metering modes may be platform specific
+ and not all metering modes may be supported.
+ enum:
+ - name: MeteringCentreWeighted
+ value: 0
+ description: Centre-weighted metering mode.
+ - name: MeteringSpot
+ value: 1
+ description: Spot metering mode.
+ - name: MeteringMatrix
+ value: 2
+ description: Matrix metering mode.
+ - name: MeteringCustom
+ value: 3
+ description: Custom metering mode.
+
+ # AeConstraintMode needs further attention:
+ # - Auto-generate max enum value.
+ # - Better handling of custom types.
+ - AeConstraintMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Specify a constraint mode for the AE algorithm to use.
+
+ The constraint modes determine how the measured scene brightness is
+ adjusted to reach the desired target exposure. Constraint modes may be
+ platform specific, and not all constraint modes may be supported.
+ enum:
+ - name: ConstraintNormal
+ value: 0
+ description: |
+ Default constraint mode.
+
+ This mode aims to balance the exposure of different parts of the
+ image so as to reach a reasonable average level. However, highlights
+ in the image may appear over-exposed and lowlights may appear
+ under-exposed.
+ - name: ConstraintHighlight
+ value: 1
+ description: |
+ Highlight constraint mode.
+
+ This mode adjusts the exposure levels in order to try and avoid
+ over-exposing the brightest parts (highlights) of an image.
+ Other non-highlight parts of the image may appear under-exposed.
+ - name: ConstraintShadows
+ value: 2
+ description: |
+ Shadows constraint mode.
+
+ This mode adjusts the exposure levels in order to try and avoid
+ under-exposing the dark parts (shadows) of an image. Other normally
+ exposed parts of the image may appear over-exposed.
+ - name: ConstraintCustom
+ value: 3
+ description: |
+ Custom constraint mode.
+
+ # AeExposureMode needs further attention:
+ # - Auto-generate max enum value.
+ # - Better handling of custom types.
+ - AeExposureMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Specify an exposure mode for the AE algorithm to use.
+
+ The exposure modes specify how the desired total exposure is divided
+ between the exposure time and the sensor's analogue gain. They are
+ platform specific, and not all exposure modes may be supported.
+ enum:
+ - name: ExposureNormal
+ value: 0
+ description: Default exposure mode.
+ - name: ExposureShort
+ value: 1
+ description: Exposure mode allowing only short exposure times.
+ - name: ExposureLong
+ value: 2
+ description: Exposure mode allowing long exposure times.
+ - name: ExposureCustom
+ value: 3
+ description: Custom exposure mode.
+
+ - ExposureValue:
+ type: float
+ direction: inout
+ description: |
+ Specify an Exposure Value (EV) parameter.
+
+ The EV parameter will only be applied if the AE algorithm is currently
+ enabled.
+
+ By convention EV adjusts the exposure as log2. For example
+ EV = [-2, -1, -0.5, 0, 0.5, 1, 2] results in an exposure adjustment
+ of [1/4x, 1/2x, 1/sqrt(2)x, 1x, sqrt(2)x, 2x, 4x].
+
+ \sa AeEnable
+
+ - ExposureTime:
+ type: int32_t
+ direction: inout
+ description: |
+ Exposure time for the frame applied in the sensor device.
+
+ This value is specified in micro-seconds.
+
+ Setting this value means that it is now fixed and the AE algorithm may
+ not change it. Setting it back to zero returns it to the control of the
+ AE algorithm.
+
+ \sa AnalogueGain AeEnable
+
+ \todo Document the interactions between AeEnable and setting a fixed
+ value for this control. Consider interactions with other AE features,
+ such as aperture and aperture/shutter priority mode, and decide if
+ control of which features should be automatically adjusted shouldn't
+ better be handled through a separate AE mode control.
+
+ - AnalogueGain:
+ type: float
+ direction: inout
+ description: |
+ Analogue gain value applied in the sensor device.
+
+ The value of the control specifies the gain multiplier applied to all
+ colour channels. This value cannot be lower than 1.0.
+
+ Setting this value means that it is now fixed and the AE algorithm may
+ not change it. Setting it back to zero returns it to the control of the
+ AE algorithm.
+
+ \sa ExposureTime AeEnable
+
+ \todo Document the interactions between AeEnable and setting a fixed
+ value for this control. Consider interactions with other AE features,
+ such as aperture and aperture/shutter priority mode, and decide if
+ control of which features should be automatically adjusted shouldn't
+ better be handled through a separate AE mode control.
+
+ - AeFlickerMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Set the flicker avoidance mode for AGC/AEC.
+
+ The flicker mode determines whether, and how, the AGC/AEC algorithm
+ attempts to hide flicker effects caused by the duty cycle of artificial
+ lighting.
+
+ Although implementation dependent, many algorithms for "flicker
+ avoidance" work by restricting this exposure time to integer multiples
+ of the cycle period, wherever possible.
+
+ Implementations may not support all of the flicker modes listed below.
+
+ By default the system will start in FlickerAuto mode if this is
+ supported, otherwise the flicker mode will be set to FlickerOff.
+
+ enum:
+ - name: FlickerOff
+ value: 0
+ description: |
+ No flicker avoidance is performed.
+ - name: FlickerManual
+ value: 1
+ description: |
+ Manual flicker avoidance.
+
+ Suppress flicker effects caused by lighting running with a period
+ specified by the AeFlickerPeriod control.
+ \sa AeFlickerPeriod
+ - name: FlickerAuto
+ value: 2
+ description: |
+ Automatic flicker period detection and avoidance.
+
+ The system will automatically determine the most likely value of
+ flicker period, and avoid flicker of this frequency. Once flicker
+ is being corrected, it is implementation dependent whether the
+ system is still able to detect a change in the flicker period.
+ \sa AeFlickerDetected
+
+ - AeFlickerPeriod:
+ type: int32_t
+ direction: inout
+ description: |
+ Manual flicker period in microseconds.
+
+ This value sets the current flicker period to avoid. It is used when
+ AeFlickerMode is set to FlickerManual.
+
+ To cancel 50Hz mains flicker, this should be set to 10000 (corresponding
+ to 100Hz), or 8333 (120Hz) for 60Hz mains.
+
+ Setting the mode to FlickerManual when no AeFlickerPeriod has ever been
+ set means that no flicker cancellation occurs (until the value of this
+ control is updated).
+
+ Switching to modes other than FlickerManual has no effect on the
+ value of the AeFlickerPeriod control.
+
+ \sa AeFlickerMode
+
+ - AeFlickerDetected:
+ type: int32_t
+ direction: out
+ description: |
+ Flicker period detected in microseconds.
+
+ The value reported here indicates the currently detected flicker
+ period, or zero if no flicker at all is detected.
+
+ When AeFlickerMode is set to FlickerAuto, there may be a period during
+ which the value reported here remains zero. Once a non-zero value is
+ reported, then this is the flicker period that has been detected and is
+ now being cancelled.
+
+ In the case of 50Hz mains flicker, the value would be 10000
+ (corresponding to 100Hz), or 8333 (120Hz) for 60Hz mains flicker.
+
+ It is implementation dependent whether the system can continue to detect
+ flicker of different periods when another frequency is already being
+ cancelled.
+
+ \sa AeFlickerMode
+
+ - Brightness:
+ type: float
+ direction: inout
+ description: |
+ Specify a fixed brightness parameter.
+
+ Positive values (up to 1.0) produce brighter images; negative values
+ (up to -1.0) produce darker images and 0.0 leaves pixels unchanged.
+
+ - Contrast:
+ type: float
+ direction: inout
+ description: |
+ Specify a fixed contrast parameter.
+
+ Normal contrast is given by the value 1.0; larger values produce images
+ with more contrast.
+
+ - Lux:
+ type: float
+ direction: out
+ description: |
+ Report an estimate of the current illuminance level in lux.
+
+ The Lux control can only be returned in metadata.
+
+ - AwbEnable:
+ type: bool
+ direction: inout
+ description: |
+ Enable or disable the AWB.
+
+ When AWB is enabled, the algorithm estimates the colour temperature of
+ the scene and computes colour gains and the colour correction matrix
+ automatically. The computed colour temperature, gains and correction
+ matrix are reported in metadata. The corresponding controls are ignored
+ if set in a request.
+
+ When AWB is disabled, the colour temperature, gains and correction
+ matrix are not updated automatically and can be set manually in
+ requests.
+
+ \sa ColourCorrectionMatrix
+ \sa ColourGains
+ \sa ColourTemperature
+
+ # AwbMode needs further attention:
+ # - Auto-generate max enum value.
+ # - Better handling of custom types.
+ - AwbMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Specify the range of illuminants to use for the AWB algorithm.
+
+ The modes supported are platform specific, and not all modes may be
+ supported.
+ enum:
+ - name: AwbAuto
+ value: 0
+ description: Search over the whole colour temperature range.
+ - name: AwbIncandescent
+ value: 1
+ description: Incandescent AWB lamp mode.
+ - name: AwbTungsten
+ value: 2
+ description: Tungsten AWB lamp mode.
+ - name: AwbFluorescent
+ value: 3
+ description: Fluorescent AWB lamp mode.
+ - name: AwbIndoor
+ value: 4
+ description: Indoor AWB lighting mode.
+ - name: AwbDaylight
+ value: 5
+ description: Daylight AWB lighting mode.
+ - name: AwbCloudy
+ value: 6
+ description: Cloudy AWB lighting mode.
+ - name: AwbCustom
+ value: 7
+ description: Custom AWB mode.
+
+ - AwbLocked:
+ type: bool
+ direction: out
+ description: |
+ Report the lock status of a running AWB algorithm.
+
+ If the AWB algorithm is locked the value shall be set to true, if it's
+ converging it shall be set to false. If the AWB algorithm is not
+ running the control shall not be present in the metadata control list.
+
+ \sa AwbEnable
+
+ - ColourGains:
+ type: float
+ direction: inout
+ description: |
+ Pair of gain values for the Red and Blue colour channels, in that
+ order.
+
+ ColourGains can only be applied in a Request when the AWB is disabled.
+ If ColourGains is set in a request but ColourTemperature is not, the
+ implementation shall calculate and set the ColourTemperature based on
+ the ColourGains.
+
+ \sa AwbEnable
+ \sa ColourTemperature
+ size: [2]
+
+ - ColourTemperature:
+ type: int32_t
+ direction: out
+ description: |
+ ColourTemperature of the frame, in kelvin.
+
+ ColourTemperature can only be applied in a Request when the AWB is
+ disabled.
+
+ If ColourTemperature is set in a request but ColourGains is not, the
+ implementation shall calculate and set the ColourGains based on the
+ given ColourTemperature. If ColourTemperature is set (either directly,
+ or indirectly by setting ColourGains) but ColourCorrectionMatrix is not,
+ the ColourCorrectionMatrix is updated based on the ColourTemperature.
+
+ The ColourTemperature used to process the frame is reported in metadata.
+
+ \sa AwbEnable
+ \sa ColourCorrectionMatrix
+ \sa ColourGains
+
+ - Saturation:
+ type: float
+ direction: inout
+ description: |
+ Specify a fixed saturation parameter.
+
+ Normal saturation is given by the value 1.0; larger values produce more
+ saturated colours; 0.0 produces a greyscale image.
+
+ - SensorBlackLevels:
+ type: int32_t
+ direction: out
+ description: |
+ Reports the sensor black levels used for processing a frame.
+
+ The values are in the order R, Gr, Gb, B. They are returned as numbers
+ out of a 16-bit pixel range (as if pixels ranged from 0 to 65535). The
+ SensorBlackLevels control can only be returned in metadata.
+ size: [4]
+
+ - Sharpness:
+ type: float
+ direction: inout
+ description: |
+ Intensity of the sharpening applied to the image.
+
+ A value of 0.0 means no sharpening. The minimum value means
+ minimal sharpening, and shall be 0.0 unless the camera can't
+ disable sharpening completely. The default value shall give a
+ "reasonable" level of sharpening, suitable for most use cases.
+ The maximum value may apply extremely high levels of sharpening,
+ higher than anyone could reasonably want. Negative values are
+ not allowed. Note also that sharpening is not applied to raw
+ streams.
+
+ - FocusFoM:
+ type: int32_t
+ direction: out
+ description: |
+ Reports a Figure of Merit (FoM) to indicate how in-focus the frame is.
+
+ A larger FocusFoM value indicates a more in-focus frame. This singular
+ value may be based on a combination of statistics gathered from
+ multiple focus regions within an image. The number of focus regions and
+ method of combination is platform dependent. In this respect, it is not
+ necessarily aimed at providing a way to implement a focus algorithm by
+ the application, rather an indication of how in-focus a frame is.
+
+ - ColourCorrectionMatrix:
+ type: float
+ direction: inout
+ description: |
+ The 3x3 matrix that converts camera RGB to sRGB within the imaging
+ pipeline.
+
+ This should describe the matrix that is used after pixels have been
+ white-balanced, but before any gamma transformation. The 3x3 matrix is
+ stored in conventional reading order in an array of 9 floating point
+ values.
+
+ ColourCorrectionMatrix can only be applied in a Request when the AWB is
+ disabled.
+
+ \sa AwbEnable
+ \sa ColourTemperature
+ size: [3,3]
+
+ - ScalerCrop:
+ type: Rectangle
+ direction: inout
+ description: |
+ Sets the image portion that will be scaled to form the whole of
+ the final output image.
+
+ The (x,y) location of this rectangle is relative to the
+ PixelArrayActiveAreas that is being used. The units remain native
+ sensor pixels, even if the sensor is being used in a binning or
+ skipping mode.
+
+ This control is only present when the pipeline supports scaling. Its
+ maximum valid value is given by the properties::ScalerCropMaximum
+ property, and the two can be used to implement digital zoom.
+
+ - DigitalGain:
+ type: float
+ direction: inout
+ description: |
+ Digital gain value applied during the processing steps applied
+ to the image as captured from the sensor.
+
+ The global digital gain factor is applied to all the colour channels
+ of the RAW image. Different pipeline models are free to
+ specify how the global gain factor applies to each separate
+ channel.
+
+ If an imaging pipeline applies digital gain in distinct
+ processing steps, this value indicates their total sum.
+ Pipelines are free to decide how to adjust each processing
+ step to respect the received gain factor and shall report
+ their total value in the request metadata.
+
+ - FrameDuration:
+ type: int64_t
+ direction: out
+ description: |
+ The instantaneous frame duration from start of frame exposure to start
+ of next exposure, expressed in microseconds.
+
+ This control is meant to be returned in metadata.
+
+ - FrameDurationLimits:
+ type: int64_t
+ direction: inout
+ description: |
+ The minimum and maximum (in that order) frame duration, expressed in
+ microseconds.
+
+ When provided by applications, the control specifies the sensor frame
+ duration interval the pipeline has to use. This limits the largest
+ exposure time the sensor can use. For example, if a maximum frame
+ duration of 33ms is requested (corresponding to 30 frames per second),
+ the sensor will not be able to raise the exposure time above 33ms.
+ A fixed frame duration is achieved by setting the minimum and maximum
+ values to be the same. Setting both values to 0 reverts to using the
+ camera defaults.
+
+ The maximum frame duration provides the absolute limit to the exposure
+ time computed by the AE algorithm and it overrides any exposure mode
+ setting specified with controls::AeExposureMode. Similarly, when a
+ manual exposure time is set through controls::ExposureTime, it also
+ gets clipped to the limits set by this control. When reported in
+ metadata, the control expresses the minimum and maximum frame durations
+ used after being clipped to the sensor provided frame duration limits.
+
+ \sa AeExposureMode
+ \sa ExposureTime
+
+ \todo Define how to calculate the capture frame rate by
+ defining controls to report additional delays introduced by
+ the capture pipeline or post-processing stages (ie JPEG
+ conversion, frame scaling).
+
+ \todo Provide an explicit definition of default control values, for
+ this and all other controls.
+
+ size: [2]
+
+ - SensorTemperature:
+ type: float
+ direction: out
+ description: |
+ Temperature measure from the camera sensor in Celsius.
+
+ This value is typically obtained by a thermal sensor present on-die or
+ in the camera module. The range of reported temperatures is device
+ dependent.
+
+ The SensorTemperature control will only be returned in metadata if a
+ thermal sensor is present.
+
+ - SensorTimestamp:
+ type: int64_t
+ direction: out
+ description: |
+ The time when the first row of the image sensor active array is exposed.
+
+ The timestamp, expressed in nanoseconds, represents a monotonically
+ increasing counter since the system boot time, as defined by the
+ Linux-specific CLOCK_BOOTTIME clock id.
+
+ The SensorTimestamp control can only be returned in metadata.
+
+ \todo Define how the sensor timestamp has to be used in the reprocessing
+ use case.
+
+ - AfMode:
+ type: int32_t
+ direction: inout
+ description: |
+ The mode of the AF (autofocus) algorithm.
+
+ An implementation may choose not to implement all the modes.
+
+ enum:
+ - name: AfModeManual
+ value: 0
+ description: |
+ The AF algorithm is in manual mode.
+
+ In this mode it will never perform any action nor move the lens of
+ its own accord, but an application can specify the desired lens
+ position using the LensPosition control. The AfState will always
+ report AfStateIdle.
+
+ If the camera is started in AfModeManual, it will move the focus
+ lens to the position specified by the LensPosition control.
+
+ This mode is the recommended default value for the AfMode control.
+ External cameras (as reported by the Location property set to
+ CameraLocationExternal) may use a different default value.
+ - name: AfModeAuto
+ value: 1
+ description: |
+ The AF algorithm is in auto mode.
+
+ In this mode the algorithm will never move the lens or change state
+ unless the AfTrigger control is used. The AfTrigger control can be
+ used to initiate a focus scan, the results of which will be
+ reported by AfState.
+
+ If the autofocus algorithm is moved from AfModeAuto to another mode
+ while a scan is in progress, the scan is cancelled immediately,
+ without waiting for the scan to finish.
+
+ When first entering this mode the AfState will report AfStateIdle.
+ When a trigger control is sent, AfState will report AfStateScanning
+ for a period before spontaneously changing to AfStateFocused or
+ AfStateFailed, depending on the outcome of the scan. It will remain
+ in this state until another scan is initiated by the AfTrigger
+ control. If a scan is cancelled (without changing to another mode),
+ AfState will return to AfStateIdle.
+ - name: AfModeContinuous
+ value: 2
+ description: |
+ The AF algorithm is in continuous mode.
+
+ In this mode the lens can re-start a scan spontaneously at any
+ moment, without any user intervention. The AfState still reports
+ whether the algorithm is currently scanning or not, though the
+ application has no ability to initiate or cancel scans, nor to move
+ the lens for itself.
+
+ However, applications can pause the AF algorithm from continuously
+ scanning by using the AfPause control. This allows video or still
+ images to be captured whilst guaranteeing that the focus is fixed.
+
+ When set to AfModeContinuous, the system will immediately initiate a
+ scan so AfState will report AfStateScanning, and will settle on one
+ of AfStateFocused or AfStateFailed, depending on the scan result.
+
+ - AfRange:
+ type: int32_t
+ direction: inout
+ description: |
+ The range of focus distances that is scanned.
+
+ An implementation may choose not to implement all the options here.
+ enum:
+ - name: AfRangeNormal
+ value: 0
+ description: |
+ A wide range of focus distances is scanned.
+
+ Scanned distances cover all the way from infinity down to close
+ distances, though depending on the implementation, possibly not
+ including the very closest macro positions.
+ - name: AfRangeMacro
+ value: 1
+ description: |
+ Only close distances are scanned.
+ - name: AfRangeFull
+ value: 2
+ description: |
+ The full range of focus distances is scanned.
+
+ This range is similar to AfRangeNormal but includes the very
+ closest macro positions.
+
+ - AfSpeed:
+ type: int32_t
+ direction: inout
+ description: |
+ Determine whether the AF is to move the lens as quickly as possible or
+ more steadily.
+
+ For example, during video recording it may be desirable not to move the
+ lens too abruptly, but when in a preview mode (waiting for a still
+ capture) it may be helpful to move the lens as quickly as is reasonably
+ possible.
+ enum:
+ - name: AfSpeedNormal
+ value: 0
+ description: Move the lens at its usual speed.
+ - name: AfSpeedFast
+ value: 1
+ description: Move the lens more quickly.
+
+ - AfMetering:
+ type: int32_t
+ direction: inout
+ description: |
+ The parts of the image used by the AF algorithm to measure focus.
+ enum:
+ - name: AfMeteringAuto
+ value: 0
+ description: |
+ Let the AF algorithm decide for itself where it will measure focus.
+ - name: AfMeteringWindows
+ value: 1
+ description: |
+ Use the rectangles defined by the AfWindows control to measure focus.
+
+ If no windows are specified the behaviour is platform dependent.
+
+ - AfWindows:
+ type: Rectangle
+ direction: inout
+ description: |
+ The focus windows used by the AF algorithm when AfMetering is set to
+ AfMeteringWindows.
+
+ The units used are pixels within the rectangle returned by the
+ ScalerCropMaximum property.
+
+ In order to be activated, a rectangle must be programmed with non-zero
+ width and height. Internally, these rectangles are intersected with the
+ ScalerCropMaximum rectangle. If the window becomes empty after this
+ operation, then the window is ignored. If all the windows end up being
+ ignored, then the behaviour is platform dependent.
+
+ On platforms that support the ScalerCrop control (for implementing
+ digital zoom, for example), no automatic recalculation or adjustment of
+ AF windows is performed internally if the ScalerCrop is changed. If any
+ window lies outside the output image after the scaler crop has been
+ applied, it is up to the application to recalculate them.
+
+ The details of how the windows are used are platform dependent. We note
+ that when there is more than one AF window, a typical implementation
+ might find the optimal focus position for each one and finally select
+ the window where the focal distance for the objects shown in that part
+ of the image are closest to the camera.
+
+ size: [n]
+
+ - AfTrigger:
+ type: int32_t
+ direction: in
+ description: |
+ Start an autofocus scan.
+
+ This control starts an autofocus scan when AfMode is set to AfModeAuto,
+ and is ignored if AfMode is set to AfModeManual or AfModeContinuous. It
+ can also be used to terminate a scan early.
+
+ enum:
+ - name: AfTriggerStart
+ value: 0
+ description: |
+ Start an AF scan.
+
+ Setting the control to AfTriggerStart is ignored if a scan is in
+ progress.
+ - name: AfTriggerCancel
+ value: 1
+ description: |
+ Cancel an AF scan.
+
+ This does not cause the lens to move anywhere else. Ignored if no
+ scan is in progress.
+
+ - AfPause:
+ type: int32_t
+ direction: in
+ description: |
+ Pause lens movements when in continuous autofocus mode.
+
+ This control has no effect except when in continuous autofocus mode
+ (AfModeContinuous). It can be used to pause any lens movements while
+ (for example) images are captured. The algorithm remains inactive
+ until it is instructed to resume.
+
+ enum:
+ - name: AfPauseImmediate
+ value: 0
+ description: |
+ Pause the continuous autofocus algorithm immediately.
+
+ The autofocus algorithm is paused whether or not any kind of scan
+ is underway. AfPauseState will subsequently report
+ AfPauseStatePaused. AfState may report any of AfStateScanning,
+ AfStateFocused or AfStateFailed, depending on the algorithm's state
+ when it received this control.
+ - name: AfPauseDeferred
+ value: 1
+ description: |
+ Pause the continuous autofocus algorithm at the end of the scan.
+
+ This is similar to AfPauseImmediate, and if the AfState is
+ currently reporting AfStateFocused or AfStateFailed it will remain
+ in that state and AfPauseState will report AfPauseStatePaused.
+
+ However, if the algorithm is scanning (AfStateScanning),
+ AfPauseState will report AfPauseStatePausing until the scan is
+ finished, at which point AfState will report one of AfStateFocused
+ or AfStateFailed, and AfPauseState will change to
+ AfPauseStatePaused.
+
+ - name: AfPauseResume
+ value: 2
+ description: |
+ Resume continuous autofocus operation.
+
+ The algorithm starts again from exactly where it left off, and
+ AfPauseState will report AfPauseStateRunning.
+
+ - LensPosition:
+ type: float
+ direction: inout
+ description: |
+ Set and report the focus lens position.
+
+ This control instructs the lens to move to a particular position and
+ also reports back the position of the lens for each frame.
+
+ The LensPosition control is ignored unless the AfMode is set to
+ AfModeManual, though the value is reported back unconditionally in all
+ modes.
+
+ This value, which is generally a non-integer, is the reciprocal of the
+ focal distance in metres, also known as dioptres. That is, to set a
+ focal distance D, the lens position LP is given by
+
+ \f$LP = \frac{1\mathrm{m}}{D}\f$
+
+ For example:
+
+ - 0 moves the lens to infinity.
+ - 0.5 moves the lens to focus on objects 2m away.
+ - 2 moves the lens to focus on objects 50cm away.
+ - And larger values will focus the lens closer.
+
+ The default value of the control should indicate a good general
+ position for the lens, often corresponding to the hyperfocal distance
+ (the closest position for which objects at infinity are still
+ acceptably sharp). The minimum will often be zero (meaning infinity),
+ and the maximum value defines the closest focus position.
+
+ \todo Define a property to report the Hyperfocal distance of calibrated
+ lenses.
+
+ - AfState:
+ type: int32_t
+ direction: out
+ description: |
+ The current state of the AF algorithm.
+
+ This control reports the current state of the AF algorithm in
+ conjunction with the reported AfMode value and (in continuous AF mode)
+ the AfPauseState value. The possible state changes are described below,
+ though we note the following state transitions that occur when the
+ AfMode is changed.
+
+ If the AfMode is set to AfModeManual, then the AfState will always
+ report AfStateIdle (even if the lens is subsequently moved). Changing
+ to the AfModeManual state does not initiate any lens movement.
+
+ If the AfMode is set to AfModeAuto then the AfState will report
+ AfStateIdle. However, if AfModeAuto and AfTriggerStart are sent
+ together then AfState will omit AfStateIdle and move straight to
+ AfStateScanning (and start a scan).
+
+ If the AfMode is set to AfModeContinuous then the AfState will
+ initially report AfStateScanning.
+
+ enum:
+ - name: AfStateIdle
+ value: 0
+ description: |
+ The AF algorithm is in manual mode (AfModeManual) or in auto mode
+ (AfModeAuto) and a scan has not yet been triggered, or an
+ in-progress scan was cancelled.
+ - name: AfStateScanning
+ value: 1
+ description: |
+ The AF algorithm is in auto mode (AfModeAuto), and a scan has been
+ started using the AfTrigger control.
+
+ The scan can be cancelled by sending AfTriggerCancel at which point
+ the algorithm will either move back to AfStateIdle or, if the scan
+ actually completes before the cancel request is processed, to one
+ of AfStateFocused or AfStateFailed.
+
+ Alternatively the AF algorithm could be in continuous mode
+ (AfModeContinuous) at which point it may enter this state
+ spontaneously whenever it determines that a rescan is needed.
+ - name: AfStateFocused
+ value: 2
+ description: |
+ The AF algorithm is in auto (AfModeAuto) or continuous
+ (AfModeContinuous) mode and a scan has completed with the result
+ that the algorithm believes the image is now in focus.
+ - name: AfStateFailed
+ value: 3
+ description: |
+ The AF algorithm is in auto (AfModeAuto) or continuous
+ (AfModeContinuous) mode and a scan has completed with the result
+ that the algorithm did not find a good focus position.
+
+ - AfPauseState:
+ type: int32_t
+ direction: out
+ description: |
+ Report whether the autofocus is currently running, paused or pausing.
+
+ This control is only applicable in continuous (AfModeContinuous) mode,
+ and reports whether the algorithm is currently running, paused or
+ pausing (that is, will pause as soon as any in-progress scan
+ completes).
+
+ Any change to AfMode will cause AfPauseStateRunning to be reported.
+
+ enum:
+ - name: AfPauseStateRunning
+ value: 0
+ description: |
+ Continuous AF is running and the algorithm may restart a scan
+ spontaneously.
+ - name: AfPauseStatePausing
+ value: 1
+ description: |
+ Continuous AF has been sent an AfPauseDeferred control, and will
+ pause as soon as any in-progress scan completes.
+
+ When the scan completes, the AfPauseState control will report
+ AfPauseStatePaused. No new scans will be start spontaneously until
+ the AfPauseResume control is sent.
+ - name: AfPauseStatePaused
+ value: 2
+ description: |
+ Continuous AF is paused.
+
+ No further state changes or lens movements will occur until the
+ AfPauseResume control is sent.
+
+ - HdrMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Set the mode to be used for High Dynamic Range (HDR) imaging.
+
+ HDR techniques typically include multiple exposure, image fusion and
+ tone mapping techniques to improve the dynamic range of the resulting
+ images.
+
+ When using an HDR mode, images are captured with different sets of AGC
+ settings called HDR channels. Channels indicate in particular the type
+ of exposure (short, medium or long) used to capture the raw image,
+ before fusion. Each HDR image is tagged with the corresponding channel
+ using the HdrChannel control.
+
+ \sa HdrChannel
+
+ enum:
+ - name: HdrModeOff
+ value: 0
+ description: |
+ HDR is disabled.
+
+ Metadata for this frame will not include the HdrChannel control.
+ - name: HdrModeMultiExposureUnmerged
+ value: 1
+ description: |
+ Multiple exposures will be generated in an alternating fashion.
+
+ The multiple exposures will not be merged together and will be
+ returned to the application as they are. Each image will be tagged
+ with the correct HDR channel, indicating what kind of exposure it
+ is. The tag should be the same as in the HdrModeMultiExposure case.
+
+ The expectation is that an application using this mode would merge
+ the frames to create HDR images for itself if it requires them.
+ - name: HdrModeMultiExposure
+ value: 2
+ description: |
+ Multiple exposures will be generated and merged to create HDR
+ images.
+
+ Each image will be tagged with the HDR channel (long, medium or
+ short) that arrived and which caused this image to be output.
+
+ Systems that use two channels for HDR will return images tagged
+ alternately as the short and long channel. Systems that use three
+ channels for HDR will cycle through the short, medium and long
+ channel before repeating.
+ - name: HdrModeSingleExposure
+ value: 3
+ description: |
+ Multiple frames all at a single exposure will be used to create HDR
+ images.
+
+ These images should be reported as all corresponding to the HDR
+ short channel.
+ - name: HdrModeNight
+ value: 4
+ description: |
+ Multiple frames will be combined to produce "night mode" images.
+
+ It is up to the implementation exactly which HDR channels it uses,
+ and the images will all be tagged accordingly with the correct HDR
+ channel information.
+
+ - HdrChannel:
+ type: int32_t
+ direction: out
+ description: |
+ The HDR channel used to capture the frame.
+
+ This value is reported back to the application so that it can discover
+ whether this capture corresponds to the short or long exposure image
+ (or any other image used by the HDR procedure). An application can
+ monitor the HDR channel to discover when the differently exposed images
+ have arrived.
+
+ This metadata is only available when an HDR mode has been enabled.
+
+ \sa HdrMode
+
+ enum:
+ - name: HdrChannelNone
+ value: 0
+ description: |
+ This image does not correspond to any of the captures used to create
+ an HDR image.
+ - name: HdrChannelShort
+ value: 1
+ description: |
+ This is a short exposure image.
+ - name: HdrChannelMedium
+ value: 2
+ description: |
+ This is a medium exposure image.
+ - name: HdrChannelLong
+ value: 3
+ description: |
+ This is a long exposure image.
+
+ - Gamma:
+ type: float
+ direction: inout
+ description: |
+ Specify a fixed gamma value.
+
+ The default gamma value must be 2.2 which closely mimics sRGB gamma.
+ Note that this is camera gamma, so it is applied as 1.0/gamma.
+
+ - DebugMetadataEnable:
+ type: bool
+ direction: inout
+ description: |
+ Enable or disable the debug metadata.
+
+...
diff --git a/src/libcamera/control_ids_debug.yaml b/src/libcamera/control_ids_debug.yaml
new file mode 100644
index 00000000..79753271
--- /dev/null
+++ b/src/libcamera/control_ids_debug.yaml
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+%YAML 1.1
+---
+vendor: debug
+controls: []
diff --git a/src/libcamera/control_ids_draft.yaml b/src/libcamera/control_ids_draft.yaml
new file mode 100644
index 00000000..87e4e02d
--- /dev/null
+++ b/src/libcamera/control_ids_draft.yaml
@@ -0,0 +1,327 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2019, Google Inc.
+#
+%YAML 1.1
+---
+# Unless otherwise stated, all controls are bi-directional, i.e. they can be
+# set through Request::controls() and returned out through Request::metadata().
+vendor: draft
+controls:
+ - AePrecaptureTrigger:
+ type: int32_t
+ direction: inout
+ description: |
+ Control for AE metering trigger. Currently identical to
+ ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER.
+
+ Whether the camera device will trigger a precapture metering sequence
+ when it processes this request.
+ enum:
+ - name: AePrecaptureTriggerIdle
+ value: 0
+ description: The trigger is idle.
+ - name: AePrecaptureTriggerStart
+ value: 1
+ description: The pre-capture AE metering is started by the camera.
+ - name: AePrecaptureTriggerCancel
+ value: 2
+ description: |
+ The camera will cancel any active or completed metering sequence.
+ The AE algorithm is reset to its initial state.
+
+ - NoiseReductionMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Control to select the noise reduction algorithm mode. Currently
+ identical to ANDROID_NOISE_REDUCTION_MODE.
+
+ Mode of operation for the noise reduction algorithm.
+ enum:
+ - name: NoiseReductionModeOff
+ value: 0
+ description: No noise reduction is applied
+ - name: NoiseReductionModeFast
+ value: 1
+ description: |
+ Noise reduction is applied without reducing the frame rate.
+ - name: NoiseReductionModeHighQuality
+ value: 2
+ description: |
+ High quality noise reduction at the expense of frame rate.
+ - name: NoiseReductionModeMinimal
+ value: 3
+ description: |
+ Minimal noise reduction is applied without reducing the frame rate.
+ - name: NoiseReductionModeZSL
+ value: 4
+ description: |
+ Noise reduction is applied at different levels to different streams.
+
+ - ColorCorrectionAberrationMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Control to select the color correction aberration mode. Currently
+ identical to ANDROID_COLOR_CORRECTION_ABERRATION_MODE.
+
+ Mode of operation for the chromatic aberration correction algorithm.
+ enum:
+ - name: ColorCorrectionAberrationOff
+ value: 0
+ description: No aberration correction is applied.
+ - name: ColorCorrectionAberrationFast
+ value: 1
+ description: Aberration correction will not slow down the frame rate.
+ - name: ColorCorrectionAberrationHighQuality
+ value: 2
+ description: |
+ High quality aberration correction which might reduce the frame
+ rate.
+
+ - AeState:
+ type: int32_t
+ direction: out
+ description: |
+ Control to report the current AE algorithm state. Currently identical to
+ ANDROID_CONTROL_AE_STATE.
+
+ Current state of the AE algorithm.
+ enum:
+ - name: AeStateInactive
+ value: 0
+ description: The AE algorithm is inactive.
+ - name: AeStateSearching
+ value: 1
+ description: The AE algorithm has not converged yet.
+ - name: AeStateConverged
+ value: 2
+ description: The AE algorithm has converged.
+ - name: AeStateLocked
+ value: 3
+ description: The AE algorithm is locked.
+ - name: AeStateFlashRequired
+ value: 4
+ description: The AE algorithm would need a flash for good results
+ - name: AeStatePrecapture
+ value: 5
+ description: |
+ The AE algorithm has started a pre-capture metering session.
+ \sa AePrecaptureTrigger
+
+ - AwbState:
+ type: int32_t
+ direction: out
+ description: |
+ Control to report the current AWB algorithm state. Currently identical
+ to ANDROID_CONTROL_AWB_STATE.
+
+ Current state of the AWB algorithm.
+ enum:
+ - name: AwbStateInactive
+ value: 0
+ description: The AWB algorithm is inactive.
+ - name: AwbStateSearching
+ value: 1
+ description: The AWB algorithm has not converged yet.
+ - name: AwbConverged
+ value: 2
+ description: The AWB algorithm has converged.
+ - name: AwbLocked
+ value: 3
+ description: The AWB algorithm is locked.
+
+ - SensorRollingShutterSkew:
+ type: int64_t
+ direction: out
+ description: |
+ Control to report the time between the start of exposure of the first
+ row and the start of exposure of the last row. Currently identical to
+ ANDROID_SENSOR_ROLLING_SHUTTER_SKEW
+
+ - LensShadingMapMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Control to report if the lens shading map is available. Currently
+ identical to ANDROID_STATISTICS_LENS_SHADING_MAP_MODE.
+ enum:
+ - name: LensShadingMapModeOff
+ value: 0
+ description: No lens shading map mode is available.
+ - name: LensShadingMapModeOn
+ value: 1
+ description: The lens shading map mode is available.
+
+ - PipelineDepth:
+ type: int32_t
+ direction: out
+ description: |
+ Specifies the number of pipeline stages the frame went through from when
+ it was exposed to when the final completed result was available to the
+ framework. Always less than or equal to PipelineMaxDepth. Currently
+ identical to ANDROID_REQUEST_PIPELINE_DEPTH.
+
+ The typical value for this control is 3 as a frame is first exposed,
+ captured and then processed in a single pass through the ISP. Any
+ additional processing step performed after the ISP pass (in example face
+ detection, additional format conversions etc) count as an additional
+ pipeline stage.
+
+ - MaxLatency:
+ type: int32_t
+ direction: out
+ description: |
+ The maximum number of frames that can occur after a request (different
+ than the previous) has been submitted, and before the result's state
+ becomes synchronized. A value of -1 indicates unknown latency, and 0
+ indicates per-frame control. Currently identical to
+ ANDROID_SYNC_MAX_LATENCY.
+
+ - TestPatternMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Control to select the test pattern mode. Currently identical to
+ ANDROID_SENSOR_TEST_PATTERN_MODE.
+ enum:
+ - name: TestPatternModeOff
+ value: 0
+ description: |
+ No test pattern mode is used. The camera device returns frames from
+ the image sensor.
+ - name: TestPatternModeSolidColor
+ value: 1
+ description: |
+ Each pixel in [R, G_even, G_odd, B] is replaced by its respective
+ color channel provided in test pattern data.
+ \todo Add control for test pattern data.
+ - name: TestPatternModeColorBars
+ value: 2
+ description: |
+ All pixel data is replaced with an 8-bar color pattern. The vertical
+ bars (left-to-right) are as follows; white, yellow, cyan, green,
+ magenta, red, blue and black. Each bar should take up 1/8 of the
+ sensor pixel array width. When this is not possible, the bar size
+ should be rounded down to the nearest integer and the pattern can
+ repeat on the right side. Each bar's height must always take up the
+ full sensor pixel array height.
+ - name: TestPatternModeColorBarsFadeToGray
+ value: 3
+ description: |
+ The test pattern is similar to TestPatternModeColorBars,
+ except that each bar should start at its specified color at the top
+ and fade to gray at the bottom. Furthermore each bar is further
+ subdevided into a left and right half. The left half should have a
+ smooth gradient, and the right half should have a quantized
+ gradient. In particular, the right half's should consist of blocks
+ of the same color for 1/16th active sensor pixel array width. The
+ least significant bits in the quantized gradient should be copied
+ from the most significant bits of the smooth gradient. The height of
+ each bar should always be a multiple of 128. When this is not the
+ case, the pattern should repeat at the bottom of the image.
+ - name: TestPatternModePn9
+ value: 4
+ description: |
+ All pixel data is replaced by a pseudo-random sequence generated
+ from a PN9 512-bit sequence (typically implemented in hardware with
+ a linear feedback shift register). The generator should be reset at
+ the beginning of each frame, and thus each subsequent raw frame with
+ this test pattern should be exactly the same as the last.
+ - name: TestPatternModeCustom1
+ value: 256
+ description: |
+ The first custom test pattern. All custom patterns that are
+ available only on this camera device are at least this numeric
+ value. All of the custom test patterns will be static (that is the
+ raw image must not vary from frame to frame).
+
+ - FaceDetectMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Control to select the face detection mode used by the pipeline.
+
+ Currently identical to ANDROID_STATISTICS_FACE_DETECT_MODE.
+
+ \sa FaceDetectFaceRectangles
+ \sa FaceDetectFaceScores
+ \sa FaceDetectFaceLandmarks
+ \sa FaceDetectFaceIds
+
+ enum:
+ - name: FaceDetectModeOff
+ value: 0
+ description: |
+ Pipeline doesn't perform face detection and doesn't report any
+ control related to face detection.
+ - name: FaceDetectModeSimple
+ value: 1
+ description: |
+ Pipeline performs face detection and reports the
+ FaceDetectFaceRectangles and FaceDetectFaceScores controls for each
+ detected face. FaceDetectFaceLandmarks and FaceDetectFaceIds are
+ optional.
+ - name: FaceDetectModeFull
+ value: 2
+ description: |
+ Pipeline performs face detection and reports all the controls
+ related to face detection including FaceDetectFaceRectangles,
+ FaceDetectFaceScores, FaceDetectFaceLandmarks, and
+ FaceDeteceFaceIds for each detected face.
+
+ - FaceDetectFaceRectangles:
+ type: Rectangle
+ direction: out
+ description: |
+ Boundary rectangles of the detected faces. The number of values is
+ the number of detected faces.
+
+ The FaceDetectFaceRectangles control can only be returned in metadata.
+
+ Currently identical to ANDROID_STATISTICS_FACE_RECTANGLES.
+ size: [n]
+
+ - FaceDetectFaceScores:
+ type: uint8_t
+ direction: out
+ description: |
+ Confidence score of each of the detected faces. The range of score is
+ [0, 100]. The number of values should be the number of faces reported
+ in FaceDetectFaceRectangles.
+
+ The FaceDetectFaceScores control can only be returned in metadata.
+
+ Currently identical to ANDROID_STATISTICS_FACE_SCORES.
+ size: [n]
+
+ - FaceDetectFaceLandmarks:
+ type: Point
+ direction: out
+ description: |
+ Array of human face landmark coordinates in format [..., left_eye_i,
+ right_eye_i, mouth_i, left_eye_i+1, ...], with i = index of face. The
+ number of values should be 3 * the number of faces reported in
+ FaceDetectFaceRectangles.
+
+ The FaceDetectFaceLandmarks control can only be returned in metadata.
+
+ Currently identical to ANDROID_STATISTICS_FACE_LANDMARKS.
+ size: [n]
+
+ - FaceDetectFaceIds:
+ type: int32_t
+ direction: out
+ description: |
+ Each detected face is given a unique ID that is valid for as long as the
+ face is visible to the camera device. A face that leaves the field of
+ view and later returns may be assigned a new ID. The number of values
+ should be the number of faces reported in FaceDetectFaceRectangles.
+
+ The FaceDetectFaceIds control can only be returned in metadata.
+
+ Currently identical to ANDROID_STATISTICS_FACE_IDS.
+ size: [n]
+
+...
diff --git a/src/libcamera/control_ids_rpi.yaml b/src/libcamera/control_ids_rpi.yaml
new file mode 100644
index 00000000..7524c5d2
--- /dev/null
+++ b/src/libcamera/control_ids_rpi.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+%YAML 1.1
+---
+# Raspberry Pi (VC4 and PiSP) specific vendor controls
+vendor: rpi
+controls:
+ - StatsOutputEnable:
+ type: bool
+ direction: inout
+ description: |
+ Toggles the Raspberry Pi IPA to output the hardware generated statistics.
+
+ When this control is set to true, the IPA outputs a binary dump of the
+ hardware generated statistics through the Request metadata in the
+ Bcm2835StatsOutput control.
+
+ \sa Bcm2835StatsOutput
+
+ - Bcm2835StatsOutput:
+ type: uint8_t
+ size: [n]
+ direction: out
+ description: |
+ Span of the BCM2835 ISP generated statistics for the current frame.
+
+ This is sent in the Request metadata if the StatsOutputEnable is set to
+ true. The statistics struct definition can be found in
+ include/linux/bcm2835-isp.h.
+
+ \sa StatsOutputEnable
+
+ - ScalerCrops:
+ type: Rectangle
+ size: [n]
+ direction: out
+ description: |
+ An array of rectangles, where each singular value has identical
+ functionality to the ScalerCrop control. This control allows the
+ Raspberry Pi pipeline handler to control individual scaler crops per
+ output stream.
+
+ The order of rectangles passed into the control must match the order of
+ streams configured by the application. The pipeline handler will only
+ configure crop retangles up-to the number of output streams configured.
+ All subsequent rectangles passed into this control are ignored by the
+ pipeline handler.
+
+ If both rpi::ScalerCrops and ScalerCrop controls are present in a
+ ControlList, the latter is discarded, and crops are obtained from this
+ control.
+
+ Note that using different crop rectangles for each output stream with
+ this control is only applicable on the Pi5/PiSP platform. This control
+ should also be considered temporary/draft and will be replaced with
+ official libcamera API support for per-stream controls in the future.
+
+ \sa ScalerCrop
+...
diff --git a/src/libcamera/control_ranges.yaml b/src/libcamera/control_ranges.yaml
new file mode 100644
index 00000000..6752eb98
--- /dev/null
+++ b/src/libcamera/control_ranges.yaml
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+%YAML 1.1
+---
+# Specifies the control id ranges/offsets for core/draft libcamera and vendor
+# controls and properties.
+ranges:
+ # Core libcamera controls
+ libcamera: 0
+ # Draft designated libcamera controls
+ draft: 10000
+ # Raspberry Pi vendor controls
+ rpi: 20000
+ # Controls for debug metadata
+ debug: 30000
+ # Next range starts at 40000
+
+...
diff --git a/src/libcamera/control_serializer.cpp b/src/libcamera/control_serializer.cpp
index fcff5e56..17834648 100644
--- a/src/libcamera/control_serializer.cpp
+++ b/src/libcamera/control_serializer.cpp
@@ -2,22 +2,25 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_serializer.cpp - Control (de)serializer
+ * Control (de)serializer
*/
-#include "control_serializer.h"
+#include "libcamera/internal/control_serializer.h"
#include <algorithm>
#include <memory>
#include <vector>
-#include <ipa/ipa_controls.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+
#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
-#include <libcamera/span.h>
+#include <libcamera/property_ids.h>
+
+#include <libcamera/ipa/ipa_controls.h>
-#include "byte_stream_buffer.h"
-#include "log.h"
+#include "libcamera/internal/byte_stream_buffer.h"
/**
* \file control_serializer.h
@@ -59,6 +62,14 @@ LOG_DEFINE_CATEGORY(Serializer)
* corresponding ControlInfoMap handle in the binary data, and when
* deserializing to retrieve the corresponding ControlInfoMap.
*
+ * As independent ControlSerializer instances are used on both sides of the IPC
+ * boundary, and the two instances operate without a shared point of control,
+ * there is a potential risk of collision of the numerical handles assigned to
+ * each serialized ControlInfoMap. For this reason the control serializer is
+ * initialized with a seed and the handle is incremented by 2, so that instances
+ * initialized with a different seed operate on a separate numerical space,
+ * avoiding any collision risk.
+ *
* In order to perform those tasks, the serializer keeps an internal state that
* needs to be properly populated. This mechanism requires the ControlInfoMap
* corresponding to a ControlList to have been serialized or deserialized
@@ -74,9 +85,45 @@ LOG_DEFINE_CATEGORY(Serializer)
* proceed with care to avoid stale references.
*/
-ControlSerializer::ControlSerializer()
- : serial_(0)
+/**
+ * \enum ControlSerializer::Role
+ * \brief Define the role of the IPC component using the control serializer
+ *
+ * The role of the component that creates the serializer is used to initialize
+ * the handles numerical space.
+ *
+ * \var ControlSerializer::Role::Proxy
+ * \brief The control serializer is used by the IPC Proxy classes
+ *
+ * \var ControlSerializer::Role::Worker
+ * \brief The control serializer is used by the IPC ProxyWorker classes
+ */
+
+/**
+ * \brief Construct a new ControlSerializer
+ * \param[in] role The role of the IPC component using the serializer
+ */
+ControlSerializer::ControlSerializer(Role role)
{
+ /*
+ * Initialize the handle numerical space using the role of the
+ * component that created the instance.
+ *
+ * Instances initialized for a different role will use a different
+ * numerical handle space, avoiding any collision risk when, in example,
+ * two instances of the ControlSerializer class are used at the IPC
+ * boundaries.
+ *
+ * Start counting handles from '1' as '0' is a special value used as
+ * place holder when serializing lists that do not have a ControlInfoMap
+ * associated (in example list of libcamera controls::controls).
+ *
+ * \todo This is a temporary hack and should probably be better
+ * engineered, but for the time being it avoids collisions on the handle
+ * value when using IPC.
+ */
+ serialSeed_ = role == Role::Proxy ? 1 : 2;
+ serial_ = serialSeed_;
}
/**
@@ -87,21 +134,22 @@ ControlSerializer::ControlSerializer()
*/
void ControlSerializer::reset()
{
- serial_ = 0;
+ serial_ = serialSeed_;
infoMapHandles_.clear();
infoMaps_.clear();
controlIds_.clear();
+ controlIdMaps_.clear();
}
size_t ControlSerializer::binarySize(const ControlValue &value)
{
- return value.data().size_bytes();
+ return sizeof(ControlType) + value.data().size_bytes();
}
size_t ControlSerializer::binarySize(const ControlInfo &info)
{
- return binarySize(info.min()) + binarySize(info.max());
+ return binarySize(info.min()) + binarySize(info.max()) + binarySize(info.def());
}
/**
@@ -147,6 +195,8 @@ size_t ControlSerializer::binarySize(const ControlList &list)
void ControlSerializer::store(const ControlValue &value,
ByteStreamBuffer &buffer)
{
+ const ControlType type = value.type();
+ buffer.write(&type);
buffer.write(value.data());
}
@@ -154,6 +204,7 @@ void ControlSerializer::store(const ControlInfo &info, ByteStreamBuffer &buffer)
{
store(info.min(), buffer);
store(info.max(), buffer);
+ store(info.def(), buffer);
}
/**
@@ -173,6 +224,12 @@ void ControlSerializer::store(const ControlInfo &info, ByteStreamBuffer &buffer)
int ControlSerializer::serialize(const ControlInfoMap &infoMap,
ByteStreamBuffer &buffer)
{
+ if (isCached(infoMap)) {
+ LOG(Serializer, Debug)
+ << "Skipping already serialized ControlInfoMap";
+ return 0;
+ }
+
/* Compute entries and data required sizes. */
size_t entriesSize = infoMap.size()
* sizeof(struct ipa_control_info_entry);
@@ -180,17 +237,36 @@ int ControlSerializer::serialize(const ControlInfoMap &infoMap,
for (const auto &ctrl : infoMap)
valuesSize += binarySize(ctrl.second);
- /* Prepare the packet header, assign a handle to the ControlInfoMap. */
+ const ControlIdMap *idmap = &infoMap.idmap();
+ enum ipa_controls_id_map_type idMapType;
+ if (idmap == &controls::controls)
+ idMapType = IPA_CONTROL_ID_MAP_CONTROLS;
+ else if (idmap == &properties::properties)
+ idMapType = IPA_CONTROL_ID_MAP_PROPERTIES;
+ else
+ idMapType = IPA_CONTROL_ID_MAP_V4L2;
+
+ /* Prepare the packet header. */
struct ipa_controls_header hdr;
hdr.version = IPA_CONTROLS_FORMAT_VERSION;
- hdr.handle = ++serial_;
+ hdr.handle = serial_;
hdr.entries = infoMap.size();
hdr.size = sizeof(hdr) + entriesSize + valuesSize;
hdr.data_offset = sizeof(hdr) + entriesSize;
+ hdr.id_map_type = idMapType;
buffer.write(&hdr);
/*
+ * Increment the handle for the ControlInfoMap by 2 to keep the handles
+ * numerical space partitioned between instances initialized for a
+ * different role.
+ *
+ * \sa ControlSerializer::Role
+ */
+ serial_ += 2;
+
+ /*
* Serialize all entries.
* \todo Serialize the control name too
*/
@@ -205,6 +281,7 @@ int ControlSerializer::serialize(const ControlInfoMap &infoMap,
entry.id = id->id();
entry.type = id->type();
entry.offset = values.offset();
+ entry.direction = static_cast<ControlId::DirectionFlags::Type>(id->direction());
entries.write(&entry);
store(info, values);
@@ -255,6 +332,15 @@ int ControlSerializer::serialize(const ControlList &list,
infoMapHandle = 0;
}
+ const ControlIdMap *idmap = list.idMap();
+ enum ipa_controls_id_map_type idMapType;
+ if (idmap == &controls::controls)
+ idMapType = IPA_CONTROL_ID_MAP_CONTROLS;
+ else if (idmap == &properties::properties)
+ idMapType = IPA_CONTROL_ID_MAP_PROPERTIES;
+ else
+ idMapType = IPA_CONTROL_ID_MAP_V4L2;
+
size_t entriesSize = list.size() * sizeof(struct ipa_control_value_entry);
size_t valuesSize = 0;
for (const auto &ctrl : list)
@@ -267,6 +353,7 @@ int ControlSerializer::serialize(const ControlList &list,
hdr.entries = list.size();
hdr.size = sizeof(hdr) + entriesSize + valuesSize;
hdr.data_offset = sizeof(hdr) + entriesSize;
+ hdr.id_map_type = idMapType;
buffer.write(&hdr);
@@ -295,11 +382,13 @@ int ControlSerializer::serialize(const ControlList &list,
return 0;
}
-ControlValue ControlSerializer::loadControlValue(ControlType type,
- ByteStreamBuffer &buffer,
+ControlValue ControlSerializer::loadControlValue(ByteStreamBuffer &buffer,
bool isArray,
unsigned int count)
{
+ ControlType type;
+ buffer.read(&type);
+
ControlValue value;
value.reserve(type, isArray, count);
@@ -308,16 +397,13 @@ ControlValue ControlSerializer::loadControlValue(ControlType type,
return value;
}
-ControlInfo ControlSerializer::loadControlInfo(ControlType type,
- ByteStreamBuffer &b)
+ControlInfo ControlSerializer::loadControlInfo(ByteStreamBuffer &b)
{
- if (type == ControlTypeString)
- type = ControlTypeInteger32;
-
- ControlValue min = loadControlValue(type, b);
- ControlValue max = loadControlValue(type, b);
+ ControlValue min = loadControlValue(b);
+ ControlValue max = loadControlValue(b);
+ ControlValue def = loadControlValue(b);
- return ControlInfo(min, max);
+ return ControlInfo(min, max, def);
}
/**
@@ -325,7 +411,7 @@ ControlInfo ControlSerializer::loadControlInfo(ControlType type,
* \brief Deserialize an object from a binary buffer
* \param[in] buffer The memory buffer that contains the object
*
- * This method is only valid when specialized for ControlInfoMap or
+ * This function is only valid when specialized for ControlInfoMap or
* ControlList. Any other typename \a T is not supported.
*/
@@ -334,7 +420,7 @@ ControlInfo ControlSerializer::loadControlInfo(ControlType type,
* \param[in] buffer The memory buffer that contains the serialized map
*
* Re-construct a ControlInfoMap from a binary \a buffer containing data
- * serialized using the serialize() method.
+ * serialized using the serialize() function.
*
* \return The deserialized ControlInfoMap
*/
@@ -347,6 +433,12 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
return {};
}
+ auto iter = infoMaps_.find(hdr->handle);
+ if (iter != infoMaps_.end()) {
+ LOG(Serializer, Debug) << "Use cached ControlInfoMap";
+ return iter->second;
+ }
+
if (hdr->version != IPA_CONTROLS_FORMAT_VERSION) {
LOG(Serializer, Error)
<< "Unsupported controls format version "
@@ -354,6 +446,33 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
return {};
}
+ /*
+ * Use the ControlIdMap corresponding to the id map type. If the type
+ * references a globally defined id map (such as controls::controls
+ * or properties::properties), use it. Otherwise, create a local id map
+ * that will be populated with dynamically created ControlId instances
+ * when deserializing individual ControlInfoMap entries.
+ */
+ const ControlIdMap *idMap = nullptr;
+ ControlIdMap *localIdMap = nullptr;
+ switch (hdr->id_map_type) {
+ case IPA_CONTROL_ID_MAP_CONTROLS:
+ idMap = &controls::controls;
+ break;
+ case IPA_CONTROL_ID_MAP_PROPERTIES:
+ idMap = &properties::properties;
+ break;
+ case IPA_CONTROL_ID_MAP_V4L2:
+ controlIdMaps_.emplace_back(std::make_unique<ControlIdMap>());
+ localIdMap = controlIdMaps_.back().get();
+ idMap = localIdMap;
+ break;
+ default:
+ LOG(Serializer, Error)
+ << "Unknown id map type: " << hdr->id_map_type;
+ return {};
+ }
+
ByteStreamBuffer entries = buffer.carveOut(hdr->data_offset - sizeof(*hdr));
ByteStreamBuffer values = buffer.carveOut(hdr->size - hdr->data_offset);
@@ -363,7 +482,6 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
}
ControlInfoMap::Map ctrls;
-
for (unsigned int i = 0; i < hdr->entries; ++i) {
const struct ipa_control_info_entry *entry =
entries.read<decltype(*entry)>();
@@ -372,13 +490,26 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
return {};
}
- /* Create and cache the individual ControlId. */
ControlType type = static_cast<ControlType>(entry->type);
- /**
- * \todo Find a way to preserve the control name for debugging
- * purpose.
- */
- controlIds_.emplace_back(std::make_unique<ControlId>(entry->id, "", type));
+
+ /* If we're using a local id map, populate it. */
+ if (localIdMap) {
+ ControlId::DirectionFlags flags{
+ static_cast<ControlId::Direction>(entry->direction)
+ };
+
+ /**
+ * \todo Find a way to preserve the control name for
+ * debugging purpose.
+ */
+ controlIds_.emplace_back(std::make_unique<ControlId>(entry->id,
+ "", "local", type,
+ flags));
+ (*localIdMap)[entry->id] = controlIds_.back().get();
+ }
+
+ const ControlId *controlId = idMap->at(entry->id);
+ ASSERT(controlId);
if (entry->offset != values.offset()) {
LOG(Serializer, Error)
@@ -388,15 +519,15 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
}
/* Create and store the ControlInfo. */
- ctrls.emplace(controlIds_.back().get(),
- loadControlInfo(type, values));
+ ctrls.emplace(controlId, loadControlInfo(values));
}
/*
* Create the ControlInfoMap in the cache, and store the map to handle
* association.
*/
- ControlInfoMap &map = infoMaps_[hdr->handle] = std::move(ctrls);
+ infoMaps_[hdr->handle] = ControlInfoMap(std::move(ctrls), *idMap);
+ ControlInfoMap &map = infoMaps_[hdr->handle];
infoMapHandles_[&map] = hdr->handle;
return map;
@@ -407,7 +538,7 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
* \param[in] buffer The memory buffer that contains the serialized list
*
* Re-construct a ControlList from a binary \a buffer containing data
- * serialized using the serialize() method.
+ * serialized using the serialize() function.
*
* \return The deserialized ControlList
*/
@@ -436,13 +567,15 @@ ControlList ControlSerializer::deserialize<ControlList>(ByteStreamBuffer &buffer
}
/*
- * Retrieve the ControlInfoMap associated with the ControlList based on
- * its ID. The mapping between infoMap and ID is set up when serializing
- * or deserializing ControlInfoMap. If no mapping is found (which is
- * currently the case for ControlList related to libcamera controls),
- * use the global control::control idmap.
+ * Retrieve the ControlIdMap associated with the ControlList.
+ *
+ * The idmap is either retrieved from the list's ControlInfoMap when
+ * a valid handle has been initialized at serialization time, or by
+ * using the header's id_map_type field for lists that refer to the
+ * globally defined libcamera controls and properties, for which no
+ * ControlInfoMap is available.
*/
- const ControlInfoMap *infoMap;
+ const ControlIdMap *idMap;
if (hdr->handle) {
auto iter = std::find_if(infoMapHandles_.begin(), infoMapHandles_.end(),
[&](decltype(infoMapHandles_)::value_type &entry) {
@@ -454,12 +587,33 @@ ControlList ControlSerializer::deserialize<ControlList>(ByteStreamBuffer &buffer
return {};
}
- infoMap = iter->first;
+ const ControlInfoMap *infoMap = iter->first;
+ idMap = &infoMap->idmap();
} else {
- infoMap = nullptr;
+ switch (hdr->id_map_type) {
+ case IPA_CONTROL_ID_MAP_CONTROLS:
+ idMap = &controls::controls;
+ break;
+
+ case IPA_CONTROL_ID_MAP_PROPERTIES:
+ idMap = &properties::properties;
+ break;
+
+ case IPA_CONTROL_ID_MAP_V4L2:
+ default:
+ LOG(Serializer, Fatal)
+ << "A list of V4L2 controls requires an ControlInfoMap";
+ return {};
+ }
}
- ControlList ctrls(infoMap ? infoMap->idmap() : controls::controls);
+ /*
+ * \todo When available, initialize the list with the ControlInfoMap
+ * so that controls can be validated against their limits.
+ * Currently no validation is performed, so it's fine relying on the
+ * idmap only.
+ */
+ ControlList ctrls(*idMap);
for (unsigned int i = 0; i < hdr->entries; ++i) {
const struct ipa_control_value_entry *entry =
@@ -476,13 +630,25 @@ ControlList ControlSerializer::deserialize<ControlList>(ByteStreamBuffer &buffer
return {};
}
- ControlType type = static_cast<ControlType>(entry->type);
ctrls.set(entry->id,
- loadControlValue(type, values, entry->is_array,
- entry->count));
+ loadControlValue(values, entry->is_array, entry->count));
}
return ctrls;
}
+/**
+ * \brief Check if a ControlInfoMap is cached
+ * \param[in] infoMap The ControlInfoMap to check
+ *
+ * The ControlSerializer caches all ControlInfoMaps that it has (de)serialized.
+ * This function checks if \a infoMap is in the cache.
+ *
+ * \return True if \a infoMap is in the cache or false otherwise
+ */
+bool ControlSerializer::isCached(const ControlInfoMap &infoMap)
+{
+ return infoMapHandles_.count(&infoMap);
+}
+
} /* namespace libcamera */
diff --git a/src/libcamera/control_validator.cpp b/src/libcamera/control_validator.cpp
index 8e5cf3c3..93982cff 100644
--- a/src/libcamera/control_validator.cpp
+++ b/src/libcamera/control_validator.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_validator.cpp - Control validator
+ * Control validator
*/
-#include "control_validator.h"
+#include "libcamera/internal/control_validator.h"
/**
* \file control_validator.h
@@ -35,7 +35,7 @@ namespace libcamera {
* \brief Validate a control
* \param[in] id The control ID
*
- * This method validates the control \a id against the object corresponding to
+ * This function validates the control \a id against the object corresponding to
* the validator. It shall at least validate that the control is applicable to
* the object instance, and may perform additional checks.
*
diff --git a/src/libcamera/controls.cpp b/src/libcamera/controls.cpp
index 540cc026..70f6f609 100644
--- a/src/libcamera/controls.cpp
+++ b/src/libcamera/controls.cpp
@@ -2,19 +2,19 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * controls.cpp - Control handling
+ * Control handling
*/
#include <libcamera/controls.h>
-#include <iomanip>
#include <sstream>
-#include <string>
#include <string.h>
+#include <string>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
-#include "control_validator.h"
-#include "log.h"
-#include "utils.h"
+#include "libcamera/internal/control_validator.h"
/**
* \file controls.h
@@ -40,7 +40,7 @@
* int32_t exposure = controls->get(controls::ManualExposure);
* \endcode
*
- * The ControlList::get() and ControlList::set() methods automatically deduce
+ * The ControlList::get() and ControlList::set() functions automatically deduce
* the data type based on the control.
*/
@@ -54,10 +54,15 @@ static constexpr size_t ControlValueSize[] = {
[ControlTypeNone] = 0,
[ControlTypeBool] = sizeof(bool),
[ControlTypeByte] = sizeof(uint8_t),
+ [ControlTypeUnsigned16] = sizeof(uint16_t),
+ [ControlTypeUnsigned32] = sizeof(uint32_t),
[ControlTypeInteger32] = sizeof(int32_t),
[ControlTypeInteger64] = sizeof(int64_t),
[ControlTypeFloat] = sizeof(float),
[ControlTypeString] = sizeof(char),
+ [ControlTypeRectangle] = sizeof(Rectangle),
+ [ControlTypeSize] = sizeof(Size),
+ [ControlTypePoint] = sizeof(Point),
};
} /* namespace */
@@ -71,10 +76,14 @@ static constexpr size_t ControlValueSize[] = {
* The control stores a boolean value
* \var ControlTypeByte
* The control stores a byte value as an unsigned 8-bit integer
+ * \var ControlTypeUnsigned16
+ * The control stores an unsigned 16-bit integer value
+ * \var ControlTypeUnsigned32
+ * The control stores an unsigned 32-bit integer value
* \var ControlTypeInteger32
- * The control stores a 32-bit integer value
+ * The control stores a signed 32-bit integer value
* \var ControlTypeInteger64
- * The control stores a 64-bit integer value
+ * The control stores a signed 64-bit integer value
* \var ControlTypeFloat
* The control stores a 32-bit floating point value
* \var ControlTypeString
@@ -227,6 +236,16 @@ std::string ControlValue::toString() const
str += std::to_string(*value);
break;
}
+ case ControlTypeUnsigned16: {
+ const uint16_t *value = reinterpret_cast<const uint16_t *>(data);
+ str += std::to_string(*value);
+ break;
+ }
+ case ControlTypeUnsigned32: {
+ const uint32_t *value = reinterpret_cast<const uint32_t *>(data);
+ str += std::to_string(*value);
+ break;
+ }
case ControlTypeInteger32: {
const int32_t *value = reinterpret_cast<const int32_t *>(data);
str += std::to_string(*value);
@@ -242,6 +261,21 @@ std::string ControlValue::toString() const
str += std::to_string(*value);
break;
}
+ case ControlTypeRectangle: {
+ const Rectangle *value = reinterpret_cast<const Rectangle *>(data);
+ str += value->toString();
+ break;
+ }
+ case ControlTypeSize: {
+ const Size *value = reinterpret_cast<const Size *>(data);
+ str += value->toString();
+ break;
+ }
+ case ControlTypePoint: {
+ const Point *value = reinterpret_cast<const Point *>(data);
+ str += value->toString();
+ break;
+ }
case ControlTypeNone:
case ControlTypeString:
break;
@@ -376,8 +410,22 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen
* \brief Construct a ControlId instance
* \param[in] id The control numerical ID
* \param[in] name The control name
+ * \param[in] vendor The vendor name
* \param[in] type The control data type
- */
+ * \param[in] direction The direction of the control, if it can be used in Controls or Metadata
+ * \param[in] size The size of the array control, or 0 if scalar control
+ * \param[in] enumStrMap The map from enum names to values (optional)
+ */
+ControlId::ControlId(unsigned int id, const std::string &name,
+ const std::string &vendor, ControlType type,
+ DirectionFlags direction, std::size_t size,
+ const std::map<std::string, int32_t> &enumStrMap)
+ : id_(id), name_(name), vendor_(vendor), type_(type),
+ direction_(direction), size_(size), enumStrMap_(enumStrMap)
+{
+ for (const auto &pair : enumStrMap_)
+ reverseMap_[pair.second] = pair.first;
+}
/**
* \fn unsigned int ControlId::id() const
@@ -392,12 +440,68 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen
*/
/**
+ * \fn const std::string &ControlId::vendor() const
+ * \brief Retrieve the vendor name
+ * \return The vendor name, as a string
+ */
+
+/**
* \fn ControlType ControlId::type() const
* \brief Retrieve the control data type
* \return The control data type
*/
/**
+ * \fn DirectionFlags ControlId::direction() const
+ * \brief Return the direction that the control can be used in
+ *
+ * This is similar to \sa isInput() and \sa isOutput(), but returns the flags
+ * direction instead of booleans for each direction.
+ *
+ * \return The direction flags corresponding to if the control can be used as
+ * an input control or as output metadata
+ */
+
+/**
+ * \fn bool ControlId::isInput() const
+ * \brief Determine if the control is available to be used as an input control
+ *
+ * Controls can be used either as input in controls, or as output in metadata.
+ * This function checks if the control is allowed to be used as the former.
+ *
+ * \return True if the control can be used as an input control, false otherwise
+ */
+
+/**
+ * \fn bool ControlId::isOutput() const
+ * \brief Determine if the control is available to be used in output metadata
+ *
+ * Controls can be used either as input in controls, or as output in metadata.
+ * This function checks if the control is allowed to be used as the latter.
+ *
+ * \return True if the control can be returned in output metadata, false otherwise
+ */
+
+/**
+ * \fn bool ControlId::isArray() const
+ * \brief Determine if the control is an array control
+ * \return True if the control is an array control, false otherwise
+ */
+
+/**
+ * \fn std::size_t ControlId::size() const
+ * \brief Retrieve the size of the control if it is an array control
+ * \return The size of the array control, size_t::max for dynamic extent, or 0
+ * for non-array
+ */
+
+/**
+ * \fn const std::map<int32_t, std::string> &ControlId::enumerators() const
+ * \brief Retrieve the map of enum values to enum names
+ * \return The map of enum values to enum names
+ */
+
+/**
* \fn bool operator==(unsigned int lhs, const ControlId &rhs)
* \brief Compare a ControlId with a control numerical ID
* \param[in] lhs Left-hand side numerical ID
@@ -416,17 +520,33 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen
*/
/**
+ * \enum ControlId::Direction
+ * \brief The direction the control is capable of being passed from/to
+ *
+ * \var ControlId::Direction::In
+ * \brief The control can be passed as input in controls
+ *
+ * \var ControlId::Direction::Out
+ * \brief The control can be returned as output in metadata
+ */
+
+/**
+ * \typedef ControlId::DirectionFlags
+ * \brief A wrapper for ControlId::Direction so that it can be used as flags
+ */
+
+/**
* \class Control
* \brief Describe a control and its intrinsic properties
*
* The Control class models a control exposed by an object. Its template type
- * name T refers to the control data type, and allows methods that operate on
- * control values to be defined as template methods using the same type T for
- * the control value. See for instance how the ControlList::get() method
+ * name T refers to the control data type, and allows functions that operate on
+ * control values to be defined as template functions using the same type T for
+ * the control value. See for instance how the ControlList::get() function
* returns a value corresponding to the type of the requested control.
*
- * While this class is the main mean to refer to a control, the control
- * identifying information are stored in the non-template base ControlId class.
+ * While this class is the main means to refer to a control, the control
+ * identifying information is stored in the non-template base ControlId class.
* This allows code that operates on a set of controls of different types to
* reference those controls through a ControlId instead of a Control. For
* instance, the list of controls supported by a camera is exposed as ControlId
@@ -443,10 +563,14 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen
*/
/**
- * \fn Control::Control(unsigned int id, const char *name)
+ * \fn Control::Control(unsigned int id, const char *name, const char *vendor)
* \brief Construct a Control instance
* \param[in] id The control numerical ID
* \param[in] name The control name
+ * \param[in] vendor The vendor name
+ * \param[in] direction The direction of the control, if it can be used in
+ * Controls or Metadata
+ * \param[in] enumStrMap The map from enum names to values (optional)
*
* The control data type is automatically deduced from the template type T.
*/
@@ -480,6 +604,57 @@ ControlInfo::ControlInfo(const ControlValue &min,
}
/**
+ * \brief Construct a ControlInfo from the list of valid values
+ * \param[in] values The control valid values
+ * \param[in] def The control default value
+ *
+ * Construct a ControlInfo from a list of valid values. The ControlInfo
+ * minimum and maximum values are set to the first and last members of the
+ * values list respectively. The default value is set to \a def if provided, or
+ * to the minimum value otherwise.
+ */
+ControlInfo::ControlInfo(Span<const ControlValue> values,
+ const ControlValue &def)
+{
+ min_ = values.front();
+ max_ = values.back();
+ def_ = !def.isNone() ? def : values.front();
+
+ values_.reserve(values.size());
+ for (const ControlValue &value : values)
+ values_.push_back(value);
+}
+
+/**
+ * \brief Construct a boolean ControlInfo with both boolean values
+ * \param[in] values The control valid boolean values (both true and false)
+ * \param[in] def The control default boolean value
+ *
+ * Construct a ControlInfo for a boolean control, where both true and false are
+ * valid values. \a values must be { false, true } (the order is irrelevant).
+ * The minimum value will always be false, and the maximum always true. The
+ * default value is \a def.
+ */
+ControlInfo::ControlInfo(std::set<bool> values, bool def)
+ : min_(false), max_(true), def_(def), values_({ false, true })
+{
+ ASSERT(values.count(def) && values.size() == 2);
+}
+
+/**
+ * \brief Construct a boolean ControlInfo with only one valid value
+ * \param[in] value The control valid boolean value
+ *
+ * Construct a ControlInfo for a boolean control, where there is only valid
+ * value. The minimum, maximum, and default values will all be \a value.
+ */
+ControlInfo::ControlInfo(bool value)
+ : min_(value), max_(value), def_(value)
+{
+ values_ = { value };
+}
+
+/**
* \fn ControlInfo::min()
* \brief Retrieve the minimum value of the control
*
@@ -508,6 +683,17 @@ ControlInfo::ControlInfo(const ControlValue &min,
*/
/**
+ * \fn ControlInfo::values()
+ * \brief Retrieve the list of valid values
+ *
+ * For controls that support a pre-defined number of values, the enumeration of
+ * those is reported through a vector of ControlValue instances accessible with
+ * this function.
+ *
+ * \return A vector of ControlValue representing the control valid values
+ */
+
+/**
* \brief Provide a string representation of the ControlInfo
*/
std::string ControlInfo::toString() const
@@ -546,14 +732,14 @@ std::string ControlInfo::toString() const
*
* The ControlInfoMap class describes controls supported by an object as an
* unsorted map of ControlId pointers to ControlInfo instances. Unlike the
- * standard std::unsorted_map<> class, it is designed the be immutable once
+ * standard std::unsorted_map<> class, it is designed to be immutable once
* constructed, and thus only exposes the read accessors of the
* std::unsorted_map<> base class.
*
- * In addition to the features of the standard unsorted map, this class also
- * provides access to the mapped elements using numerical ID keys. It maintains
- * an internal map of numerical ID to ControlId for this purpose, and exposes it
- * through the idmap() method to help construction of ControlList instances.
+ * The class is constructed with a reference to a ControlIdMap. This allows
+ * providing access to the mapped elements using numerical ID keys, in addition
+ * to the features of the standard unsorted map. All ControlId keys in the map
+ * must appear in the ControlIdMap.
*/
/**
@@ -570,24 +756,27 @@ std::string ControlInfo::toString() const
/**
* \brief Construct a ControlInfoMap from an initializer list
* \param[in] init The initializer list
+ * \param[in] idmap The idmap used by the ControlInfoMap
*/
-ControlInfoMap::ControlInfoMap(std::initializer_list<Map::value_type> init)
- : Map(init)
+ControlInfoMap::ControlInfoMap(std::initializer_list<Map::value_type> init,
+ const ControlIdMap &idmap)
+ : Map(init), idmap_(&idmap)
{
- generateIdmap();
+ ASSERT(validate());
}
/**
* \brief Construct a ControlInfoMap from a plain map
* \param[in] info The control info plain map
+ * \param[in] idmap The idmap used by the ControlInfoMap
*
* Construct a new ControlInfoMap and populate its contents with those of
* \a info using move semantics. Upon return the \a info map will be empty.
*/
-ControlInfoMap::ControlInfoMap(Map &&info)
- : Map(std::move(info))
+ControlInfoMap::ControlInfoMap(Map &&info, const ControlIdMap &idmap)
+ : Map(std::move(info)), idmap_(&idmap)
{
- generateIdmap();
+ ASSERT(validate());
}
/**
@@ -597,32 +786,44 @@ ControlInfoMap::ControlInfoMap(Map &&info)
* \return A reference to the ControlInfoMap
*/
-/**
- * \brief Replace the contents with those from the initializer list
- * \param[in] init The initializer list
- * \return A reference to the ControlInfoMap
- */
-ControlInfoMap &ControlInfoMap::operator=(std::initializer_list<Map::value_type> init)
+bool ControlInfoMap::validate()
{
- Map::operator=(init);
- generateIdmap();
- return *this;
-}
+ if (!idmap_)
+ return false;
-/**
- * \brief Move assignment operator from a plain map
- * \param[in] info The control info plain map
- *
- * Populate the map by replacing its contents with those of \a info using move
- * semantics. Upon return the \a info map will be empty.
- *
- * \return A reference to the populated ControlInfoMap
- */
-ControlInfoMap &ControlInfoMap::operator=(Map &&info)
-{
- Map::operator=(std::move(info));
- generateIdmap();
- return *this;
+ for (const auto &ctrl : *this) {
+ const ControlId *id = ctrl.first;
+ auto it = idmap_->find(id->id());
+
+ /*
+ * Make sure all control ids are part of the idmap and verify
+ * the control info matches the expected type.
+ */
+ if (it == idmap_->end() || it->second != id) {
+ LOG(Controls, Error)
+ << "Control " << utils::hex(id->id())
+ << " not in the idmap";
+ return false;
+ }
+
+ /*
+ * For string controls, min and max define the valid
+ * range for the string size, not for the individual
+ * values.
+ */
+ ControlType rangeType = id->type() == ControlTypeString
+ ? ControlTypeInteger32 : id->type();
+ const ControlInfo &info = ctrl.second;
+
+ if (info.min().type() != rangeType) {
+ LOG(Controls, Error)
+ << "Control " << utils::hex(id->id())
+ << " type and info type mismatch";
+ return false;
+ }
+ }
+
+ return true;
}
/**
@@ -632,7 +833,9 @@ ControlInfoMap &ControlInfoMap::operator=(Map &&info)
*/
ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id)
{
- return at(idmap_.at(id));
+ ASSERT(idmap_);
+
+ return at(idmap_->at(id));
}
/**
@@ -642,7 +845,9 @@ ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id)
*/
const ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id) const
{
- return at(idmap_.at(id));
+ ASSERT(idmap_);
+
+ return at(idmap_->at(id));
}
/**
@@ -652,12 +857,15 @@ const ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id) const
*/
ControlInfoMap::size_type ControlInfoMap::count(unsigned int id) const
{
+ if (!idmap_)
+ return 0;
+
/*
* The ControlInfoMap and its idmap have a 1:1 mapping between their
* entries, we can thus just count the matching entries in idmap to
* avoid an additional lookup.
*/
- return idmap_.count(id);
+ return idmap_->count(id);
}
/**
@@ -668,8 +876,11 @@ ControlInfoMap::size_type ControlInfoMap::count(unsigned int id) const
*/
ControlInfoMap::iterator ControlInfoMap::find(unsigned int id)
{
- auto iter = idmap_.find(id);
- if (iter == idmap_.end())
+ if (!idmap_)
+ return end();
+
+ auto iter = idmap_->find(id);
+ if (iter == idmap_->end())
return end();
return find(iter->second);
@@ -683,8 +894,11 @@ ControlInfoMap::iterator ControlInfoMap::find(unsigned int id)
*/
ControlInfoMap::const_iterator ControlInfoMap::find(unsigned int id) const
{
- auto iter = idmap_.find(id);
- if (iter == idmap_.end())
+ if (!idmap_)
+ return end();
+
+ auto iter = idmap_->find(id);
+ if (iter == idmap_->end())
return end();
return find(iter->second);
@@ -695,45 +909,18 @@ ControlInfoMap::const_iterator ControlInfoMap::find(unsigned int id) const
* \brief Retrieve the ControlId map
*
* Constructing ControlList instances for V4L2 controls requires a ControlIdMap
- * for the V4L2 device that the control list targets. This helper method
+ * for the V4L2 device that the control list targets. This helper function
* returns a suitable idmap for that purpose.
*
* \return The ControlId map
*/
-void ControlInfoMap::generateIdmap()
-{
- idmap_.clear();
-
- for (const auto &ctrl : *this) {
- /*
- * For string controls, min and max define the valid
- * range for the string size, not for the individual
- * values.
- */
- ControlType rangeType = ctrl.first->type() == ControlTypeString
- ? ControlTypeInteger32 : ctrl.first->type();
- const ControlInfo &info = ctrl.second;
-
- if (info.min().type() != rangeType) {
- LOG(Controls, Error)
- << "Control " << utils::hex(ctrl.first->id())
- << " type and info type mismatch";
- idmap_.clear();
- clear();
- return;
- }
-
- idmap_[ctrl.first->id()] = ctrl.first;
- }
-}
-
/**
* \class ControlList
* \brief Associate a list of ControlId with their values for an object
*
* The ControlList class stores values of controls exposed by an object. The
- * lists returned by the Request::controls() and Request::metadata() methods
+ * lists returned by the Request::controls() and Request::metadata() functions
* refer to the camera that the request belongs to.
*
* Control lists are constructed with a map of all the controls supported by
@@ -761,7 +948,8 @@ ControlList::ControlList()
* controls is provided by controls::controls and can be used as the \a idmap
* argument.
*/
-ControlList::ControlList(const ControlIdMap &idmap, ControlValidator *validator)
+ControlList::ControlList(const ControlIdMap &idmap,
+ const ControlValidator *validator)
: validator_(validator), idmap_(&idmap), infoMap_(nullptr)
{
}
@@ -771,7 +959,8 @@ ControlList::ControlList(const ControlIdMap &idmap, ControlValidator *validator)
* \param[in] infoMap The ControlInfoMap for the control list target object
* \param[in] validator The validator (may be null)
*/
-ControlList::ControlList(const ControlInfoMap &infoMap, ControlValidator *validator)
+ControlList::ControlList(const ControlInfoMap &infoMap,
+ const ControlValidator *validator)
: validator_(validator), idmap_(&infoMap.idmap()), infoMap_(&infoMap)
{
}
@@ -830,14 +1019,57 @@ ControlList::ControlList(const ControlInfoMap &infoMap, ControlValidator *valida
*/
/**
- * \brief Check if the list contains a control with the specified \a id
- * \param[in] id The control ID
+ * \enum ControlList::MergePolicy
+ * \brief The policy used by the merge function
*
- * \return True if the list contains a matching control, false otherwise
+ * \var ControlList::MergePolicy::KeepExisting
+ * \brief Existing controls in the target list are kept
+ *
+ * \var ControlList::MergePolicy::OverwriteExisting
+ * \brief Existing controls in the target list are updated
+ */
+
+/**
+ * \brief Merge the \a source into the ControlList
+ * \param[in] source The ControlList to merge into this object
+ * \param[in] policy Controls if existing elements in *this shall be
+ * overwritten
+ *
+ * Merging two control lists copies elements from the \a source and inserts
+ * them in *this. If the \a source contains elements whose key is already
+ * present in *this, then those elements are only overwritten if
+ * \a policy is MergePolicy::OverwriteExisting.
+ *
+ * Only control lists created from the same ControlIdMap or ControlInfoMap may
+ * be merged. Attempting to do otherwise results in undefined behaviour.
+ *
+ * \todo Reimplement or implement an overloaded version which internally uses
+ * std::unordered_map::merge() and accepts a non-const argument.
*/
-bool ControlList::contains(const ControlId &id) const
+void ControlList::merge(const ControlList &source, MergePolicy policy)
{
- return controls_.find(id.id()) != controls_.end();
+ /**
+ * \todo ASSERT that the current and source ControlList are derived
+ * from a compatible ControlIdMap, to prevent undefined behaviour due to
+ * id collisions.
+ *
+ * This can not currently be a direct pointer comparison due to the
+ * duplication of the ControlIdMaps in the isolated IPA use cases.
+ * Furthermore, manually checking each entry of the id map is identical
+ * is expensive.
+ * See https://bugs.libcamera.org/show_bug.cgi?id=31 for further details
+ */
+
+ for (const auto &ctrl : source) {
+ if (policy == MergePolicy::KeepExisting && contains(ctrl.first)) {
+ const ControlId *id = idmap_->at(ctrl.first);
+ LOG(Controls, Warning)
+ << "Control " << id->name() << " not overwritten";
+ continue;
+ }
+
+ set(ctrl.first, ctrl.second);
+ }
}
/**
@@ -852,27 +1084,25 @@ bool ControlList::contains(unsigned int id) const
}
/**
- * \fn template<typename T> T ControlList::get(const Control<T> &ctrl) const
+ * \fn ControlList::get(const Control<T> &ctrl) const
* \brief Get the value of control \a ctrl
* \param[in] ctrl The control
*
- * The behaviour is undefined if the control \a ctrl is not present in the
- * list. Use ControlList::contains() to test for the presence of a control in
- * the list before retrieving its value.
- *
- * The control value type shall match the type T, otherwise the behaviour is
- * undefined.
+ * Beside getting the value of a control, this function can also be used to
+ * check if a control is present in the ControlList by converting the returned
+ * std::optional<T> to bool (or calling its has_value() function).
*
- * \return The control value
+ * \return A std::optional<T> containing the control value, or std::nullopt if
+ * the control \a ctrl is not present in the list
*/
/**
- * \fn template<typename T, typename V> void ControlList::set(const Control<T> &ctrl, const V &value)
+ * \fn ControlList::set(const Control<T> &ctrl, const V &value)
* \brief Set the control \a ctrl value to \a value
* \param[in] ctrl The control
* \param[in] value The control value
*
- * This method sets the value of a control in the control list. If the control
+ * This function sets the value of a control in the control list. If the control
* is already present in the list, its value is updated, otherwise it is added
* to the list.
*
@@ -881,8 +1111,7 @@ bool ControlList::contains(unsigned int id) const
*/
/**
- * \fn template<typename T, typename V> \
- * void ControlList::set(const Control<T> &ctrl, const std::initializer_list<V> &value)
+ * \fn ControlList::set(const Control<Span<T, Size>> &ctrl, const std::initializer_list<V> &value)
* \copydoc ControlList::set(const Control<T> &ctrl, const V &value)
*/
@@ -912,7 +1141,7 @@ const ControlValue &ControlList::get(unsigned int id) const
* \param[in] id The control ID
* \param[in] value The control value
*
- * This method sets the value of a control in the control list. If the control
+ * This function sets the value of a control in the control list. If the control
* is already present in the list, its value is updated, otherwise it is added
* to the list.
*
@@ -938,6 +1167,14 @@ void ControlList::set(unsigned int id, const ControlValue &value)
* associated ControlInfoMap, nullptr is returned in that case.
*/
+/**
+ * \fn ControlList::idMap()
+ * \brief Retrieve the ControlId map used to construct the ControlList
+ * \return The ControlId map used to construct the ControlList. ControlList
+ * instances constructed with the default contructor have no associated idmap,
+ * nullptr is returned in that case.
+ */
+
const ControlValue *ControlList::find(unsigned int id) const
{
const auto iter = controls_.find(id);
diff --git a/src/libcamera/converter.cpp b/src/libcamera/converter.cpp
new file mode 100644
index 00000000..d551b908
--- /dev/null
+++ b/src/libcamera/converter.cpp
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright 2022 NXP
+ *
+ * Generic format converter interface
+ */
+
+#include "libcamera/internal/converter.h"
+
+#include <algorithm>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/media_device.h"
+
+/**
+ * \file converter.h
+ * \brief Abstract converter
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Converter)
+
+/**
+ * \class Converter
+ * \brief Abstract Base Class for converter
+ *
+ * The Converter class is an Abstract Base Class defining the interfaces of
+ * converter implementations.
+ *
+ * Converters offer scaling and pixel format conversion services on an input
+ * stream. The converter can output multiple streams with individual conversion
+ * parameters from the same input stream.
+ */
+
+/**
+ * \enum Converter::Feature
+ * \brief Specify the features supported by the converter
+ * \var Converter::Feature::None
+ * \brief No extra features supported by the converter
+ * \var Converter::Feature::InputCrop
+ * \brief Cropping capability at input is supported by the converter
+ */
+
+/**
+ * \typedef Converter::Features
+ * \brief A bitwise combination of features supported by the converter
+ */
+
+/**
+ * \enum Converter::Alignment
+ * \brief The alignment mode specified when adjusting the converter input or
+ * output sizes
+ * \var Converter::Alignment::Down
+ * \brief Adjust the Converter sizes to a smaller valid size
+ * \var Converter::Alignment::Up
+ * \brief Adjust the Converter sizes to a larger valid size
+ */
+
+/**
+ * \brief Construct a Converter instance
+ * \param[in] media The media device implementing the converter
+ * \param[in] features Features flags representing supported features
+ *
+ * This searches for the entity implementing the data streaming function in the
+ * media graph entities and use its device node as the converter device node.
+ */
+Converter::Converter(MediaDevice *media, Features features)
+{
+ const std::vector<MediaEntity *> &entities = media->entities();
+ auto it = std::find_if(entities.begin(), entities.end(),
+ [](MediaEntity *entity) {
+ return entity->function() == MEDIA_ENT_F_IO_V4L;
+ });
+ if (it == entities.end()) {
+ LOG(Converter, Error)
+ << "No entity suitable for implementing a converter in "
+ << media->driver() << " entities list.";
+ return;
+ }
+
+ deviceNode_ = (*it)->deviceNode();
+ features_ = features;
+}
+
+Converter::~Converter()
+{
+}
+
+/**
+ * \fn Converter::loadConfiguration()
+ * \brief Load converter configuration from file
+ * \param[in] filename The file name path
+ *
+ * Load converter dependent configuration parameters to apply on the hardware.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::isValid()
+ * \brief Check if the converter configuration is valid
+ * \return True is the converter is valid, false otherwise
+ */
+
+/**
+ * \fn Converter::formats()
+ * \brief Retrieve the list of supported pixel formats for an input pixel format
+ * \param[in] input Input pixel format to retrieve output pixel format list for
+ * \return The list of supported output pixel formats
+ */
+
+/**
+ * \fn Converter::sizes()
+ * \brief Retrieve the range of minimum and maximum output sizes for an input size
+ * \param[in] input Input stream size to retrieve range for
+ * \return A range of output image sizes
+ */
+
+/**
+ * \fn Converter::adjustInputSize()
+ * \brief Adjust the converter input \a size to a valid value
+ * \param[in] pixFmt The pixel format of the converter input stream
+ * \param[in] size The converter input size to adjust to a valid value
+ * \param[in] align The desired alignment
+ * \return The adjusted converter input size or a null Size if \a size cannot
+ * be adjusted
+ */
+
+/**
+ * \fn Converter::adjustOutputSize()
+ * \brief Adjust the converter output \a size to a valid value
+ * \param[in] pixFmt The pixel format of the converter output stream
+ * \param[in] size The converter output size to adjust to a valid value
+ * \param[in] align The desired alignment
+ * \return The adjusted converter output size or a null Size if \a size cannot
+ * be adjusted
+ */
+
+/**
+ * \fn Converter::strideAndFrameSize()
+ * \brief Retrieve the output stride and frame size for an input configutation
+ * \param[in] pixelFormat Input stream pixel format
+ * \param[in] size Input stream size
+ * \return A tuple indicating the stride and frame size or an empty tuple on error
+ */
+
+/**
+ * \fn Converter::validateOutput()
+ * \brief Validate and possibily adjust \a cfg to a valid converter output
+ * \param[inout] cfg The StreamConfiguration to validate and adjust
+ * \param[out] adjusted Set to true if \a cfg has been adjusted
+ * \param[in] align The desired alignment
+ * \return 0 if \a cfg is valid or has been adjusted, a negative error code
+ * otherwise if \a cfg cannot be adjusted
+ */
+
+/**
+ * \fn Converter::configure()
+ * \brief Configure a set of output stream conversion from an input stream
+ * \param[in] inputCfg Input stream configuration
+ * \param[out] outputCfgs A list of output stream configurations
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::isConfigured()
+ * \brief Check if a given stream is configured
+ * \param[in] stream The output stream
+ * \return True if the \a stream is configured or false otherwise
+ */
+
+/**
+ * \fn Converter::exportBuffers()
+ * \brief Export buffers from the converter device
+ * \param[in] stream Output stream pointer exporting the buffers
+ * \param[in] count Number of buffers to allocate
+ * \param[out] buffers Vector to store allocated buffers
+ *
+ * This function operates similarly to V4L2VideoDevice::exportBuffers() on the
+ * output stream indicated by the \a output.
+ *
+ * \return The number of allocated buffers on success or a negative error code
+ * otherwise
+ */
+
+/**
+ * \fn Converter::start()
+ * \brief Start the converter streaming operation
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::stop()
+ * \brief Stop the converter streaming operation
+ */
+
+/**
+ * \fn Converter::queueBuffers()
+ * \brief Queue buffers to converter device
+ * \param[in] input The frame buffer to apply the conversion
+ * \param[out] outputs The container holding the output stream pointers and
+ * their respective frame buffer outputs.
+ *
+ * This function queues the \a input frame buffer on the output streams of the
+ * \a outputs map key and retrieve the output frame buffer indicated by the
+ * buffer map value.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::setInputCrop()
+ * \brief Set the crop rectangle \a rect for \a stream
+ * \param[in] stream The output stream
+ * \param[inout] rect The crop rectangle to apply and return the rectangle
+ * that is actually applied
+ *
+ * Set the crop rectangle \a rect for \a stream provided the converter supports
+ * cropping. The converter has the Feature::InputCrop flag in this case.
+ *
+ * The underlying hardware can adjust the rectangle supplied by the user
+ * due to hardware constraints. The caller can inspect \a rect to determine the
+ * actual rectangle that has been applied by the converter, after this function
+ * returns.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::inputCropBounds()
+ * \brief Retrieve the crop bounds of the converter
+ *
+ * Retrieve the minimum and maximum crop bounds of the converter. This can be
+ * used to query the crop bounds before configuring a stream.
+ *
+ * \return A pair containing the minimum and maximum crop bound in that order
+ */
+
+/**
+ * \fn Converter::inputCropBounds(const Stream *stream)
+ * \brief Retrieve the crop bounds for \a stream
+ * \param[in] stream The output stream
+ *
+ * Retrieve the minimum and maximum crop bounds for \a stream. The converter
+ * should support cropping (Feature::InputCrop).
+ *
+ * The crop bounds depend on the configuration of the output stream and hence
+ * this function should be called after the \a stream has been configured using
+ * configure().
+ *
+ * When called with an unconfigured \a stream, this function returns a pair of
+ * null rectangles.
+ *
+ * \return A pair containing the minimum and maximum crop bound in that order
+ */
+
+/**
+ * \var Converter::inputBufferReady
+ * \brief A signal emitted when the input frame buffer completes
+ */
+
+/**
+ * \var Converter::outputBufferReady
+ * \brief A signal emitted on each frame buffer completion of the output queue
+ */
+
+/**
+ * \var Converter::features_
+ * \brief Stores the features supported by the converter
+ */
+
+/**
+ * \fn Converter::deviceNode()
+ * \brief The converter device node attribute accessor
+ * \return The converter device node string
+ */
+
+/**
+ * \fn Converter::features()
+ * \brief Retrieve the features supported by the converter
+ * \return The converter Features flags
+ */
+
+/**
+ * \class ConverterFactoryBase
+ * \brief Base class for converter factories
+ *
+ * The ConverterFactoryBase class is the base of all specializations of the
+ * ConverterFactory class template. It implements the factory registration,
+ * maintains a registry of factories, and provides access to the registered
+ * factories.
+ */
+
+/**
+ * \brief Construct a converter factory base
+ * \param[in] name Name of the converter class
+ * \param[in] compatibles Name aliases of the converter class
+ *
+ * Creating an instance of the factory base registers it with the global list of
+ * factories, accessible through the factories() function.
+ *
+ * The factory \a name is used as unique identifier. If the converter
+ * implementation fully relies on a generic framework, the name should be the
+ * same as the framework. Otherwise, if the implementation is specialized, the
+ * factory name should match the driver name implementing the function.
+ *
+ * The factory \a compatibles holds a list of driver names implementing a generic
+ * subsystem without any personalizations.
+ */
+ConverterFactoryBase::ConverterFactoryBase(const std::string name, std::initializer_list<std::string> compatibles)
+ : name_(name), compatibles_(compatibles)
+{
+ registerType(this);
+}
+
+/**
+ * \fn ConverterFactoryBase::compatibles()
+ * \return The list of compatible name aliases of the converter
+ */
+
+/**
+ * \brief Create an instance of the converter corresponding to the media device
+ * \param[in] media The media device to create the converter for
+ *
+ * The converter is created by matching the factory name or any of its
+ * compatible aliases with the media device driver name.
+ *
+ * \return A new instance of the converter subclass corresponding to the media
+ * device, or null if the media device driver name doesn't match anything
+ */
+std::unique_ptr<Converter> ConverterFactoryBase::create(MediaDevice *media)
+{
+ const std::vector<ConverterFactoryBase *> &factories =
+ ConverterFactoryBase::factories();
+
+ for (const ConverterFactoryBase *factory : factories) {
+ const std::vector<std::string> &compatibles = factory->compatibles();
+ auto it = std::find(compatibles.begin(), compatibles.end(), media->driver());
+
+ if (it == compatibles.end() && media->driver() != factory->name_)
+ continue;
+
+ LOG(Converter, Debug)
+ << "Creating converter from "
+ << factory->name_ << " factory with "
+ << (it == compatibles.end() ? "no" : media->driver()) << " alias.";
+
+ std::unique_ptr<Converter> converter = factory->createInstance(media);
+ if (converter->isValid())
+ return converter;
+ }
+
+ return nullptr;
+}
+
+/**
+ * \brief Add a converter factory to the registry
+ * \param[in] factory Factory to use to construct the converter class
+ *
+ * The caller is responsible to guarantee the uniqueness of the converter
+ * factory name.
+ */
+void ConverterFactoryBase::registerType(ConverterFactoryBase *factory)
+{
+ std::vector<ConverterFactoryBase *> &factories =
+ ConverterFactoryBase::factories();
+
+ factories.push_back(factory);
+}
+
+/**
+ * \brief Retrieve the list of all converter factory names
+ * \return The list of all converter factory names
+ */
+std::vector<std::string> ConverterFactoryBase::names()
+{
+ std::vector<std::string> list;
+
+ std::vector<ConverterFactoryBase *> &factories =
+ ConverterFactoryBase::factories();
+
+ for (ConverterFactoryBase *factory : factories) {
+ list.push_back(factory->name_);
+
+ const auto &compatibles = factory->compatibles();
+ list.insert(list.end(), compatibles.begin(), compatibles.end());
+ }
+
+ return list;
+}
+
+/**
+ * \brief Retrieve the list of all converter factories
+ * \return The list of converter factories
+ */
+std::vector<ConverterFactoryBase *> &ConverterFactoryBase::factories()
+{
+ /*
+ * The static factories map is defined inside the function to ensure
+ * it gets initialized on first use, without any dependency on link
+ * order.
+ */
+ static std::vector<ConverterFactoryBase *> factories;
+ return factories;
+}
+
+/**
+ * \var ConverterFactoryBase::name_
+ * \brief The name of the factory
+ */
+
+/**
+ * \var ConverterFactoryBase::compatibles_
+ * \brief The list holding the factory compatibles
+ */
+
+/**
+ * \class ConverterFactory
+ * \brief Registration of ConverterFactory classes and creation of instances
+ * \param _Converter The converter class type for this factory
+ *
+ * To facilitate discovery and instantiation of Converter classes, the
+ * ConverterFactory class implements auto-registration of converter helpers.
+ * Each Converter subclass shall register itself using the REGISTER_CONVERTER()
+ * macro, which will create a corresponding instance of a ConverterFactory
+ * subclass and register it with the static list of factories.
+ */
+
+/**
+ * \fn ConverterFactory::ConverterFactory(const char *name, std::initializer_list<std::string> compatibles)
+ * \brief Construct a converter factory
+ * \details \copydetails ConverterFactoryBase::ConverterFactoryBase
+ */
+
+/**
+ * \fn ConverterFactory::createInstance() const
+ * \brief Create an instance of the Converter corresponding to the factory
+ * \param[in] media Media device pointer
+ * \return A unique pointer to a newly constructed instance of the Converter
+ * subclass corresponding to the factory
+ */
+
+/**
+ * \def REGISTER_CONVERTER
+ * \brief Register a converter with the Converter factory
+ * \param[in] name Converter name used to register the class
+ * \param[in] converter Class name of Converter derived class to register
+ * \param[in] compatibles List of compatible names
+ *
+ * Register a Converter subclass with the factory and make it available to try
+ * and match converters.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/converter/converter_v4l2_m2m.cpp b/src/libcamera/converter/converter_v4l2_m2m.cpp
new file mode 100644
index 00000000..566f18ce
--- /dev/null
+++ b/src/libcamera/converter/converter_v4l2_m2m.cpp
@@ -0,0 +1,751 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Laurent Pinchart
+ * Copyright 2022 NXP
+ *
+ * V4L2 M2M Format converter
+ */
+
+#include "libcamera/internal/converter/converter_v4l2_m2m.h"
+
+#include <algorithm>
+#include <limits.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/signal.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+/**
+ * \file converter/converter_v4l2_m2m.h
+ * \brief V4L2 M2M based converter
+ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Converter)
+
+namespace {
+
+int getCropBounds(V4L2VideoDevice *device, Rectangle &minCrop,
+ Rectangle &maxCrop)
+{
+ Rectangle minC;
+ Rectangle maxC;
+
+ /* Find crop bounds */
+ minC.width = 1;
+ minC.height = 1;
+ maxC.width = UINT_MAX;
+ maxC.height = UINT_MAX;
+
+ int ret = device->setSelection(V4L2_SEL_TGT_CROP, &minC);
+ if (ret) {
+ LOG(Converter, Error)
+ << "Could not query minimum selection crop: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ ret = device->getSelection(V4L2_SEL_TGT_CROP_BOUNDS, &maxC);
+ if (ret) {
+ LOG(Converter, Error)
+ << "Could not query maximum selection crop: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ /* Reset the crop to its maximum */
+ ret = device->setSelection(V4L2_SEL_TGT_CROP, &maxC);
+ if (ret) {
+ LOG(Converter, Error)
+ << "Could not reset selection crop: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ minCrop = minC;
+ maxCrop = maxC;
+ return 0;
+}
+
+} /* namespace */
+
+/* -----------------------------------------------------------------------------
+ * V4L2M2MConverter::V4L2M2MStream
+ */
+
+V4L2M2MConverter::V4L2M2MStream::V4L2M2MStream(V4L2M2MConverter *converter, const Stream *stream)
+ : converter_(converter), stream_(stream)
+{
+ m2m_ = std::make_unique<V4L2M2MDevice>(converter->deviceNode());
+
+ m2m_->output()->bufferReady.connect(this, &V4L2M2MStream::outputBufferReady);
+ m2m_->capture()->bufferReady.connect(this, &V4L2M2MStream::captureBufferReady);
+
+ int ret = m2m_->open();
+ if (ret < 0)
+ m2m_.reset();
+}
+
+int V4L2M2MConverter::V4L2M2MStream::configure(const StreamConfiguration &inputCfg,
+ const StreamConfiguration &outputCfg)
+{
+ V4L2PixelFormat videoFormat =
+ m2m_->output()->toV4L2PixelFormat(inputCfg.pixelFormat);
+
+ V4L2DeviceFormat format;
+ format.fourcc = videoFormat;
+ format.size = inputCfg.size;
+ format.planesCount = 1;
+ format.planes[0].bpl = inputCfg.stride;
+
+ int ret = m2m_->output()->setFormat(&format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set input format: " << strerror(-ret);
+ return ret;
+ }
+
+ if (format.fourcc != videoFormat || format.size != inputCfg.size ||
+ format.planes[0].bpl != inputCfg.stride) {
+ LOG(Converter, Error)
+ << "Input format not supported (requested "
+ << inputCfg.size << "-" << videoFormat
+ << ", got " << format << ")";
+ return -EINVAL;
+ }
+
+ /* Set the pixel format and size on the output. */
+ videoFormat = m2m_->capture()->toV4L2PixelFormat(outputCfg.pixelFormat);
+ format = {};
+ format.fourcc = videoFormat;
+ format.size = outputCfg.size;
+
+ ret = m2m_->capture()->setFormat(&format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set output format: " << strerror(-ret);
+ return ret;
+ }
+
+ if (format.fourcc != videoFormat || format.size != outputCfg.size) {
+ LOG(Converter, Error)
+ << "Output format not supported";
+ return -EINVAL;
+ }
+
+ inputBufferCount_ = inputCfg.bufferCount;
+ outputBufferCount_ = outputCfg.bufferCount;
+
+ if (converter_->features() & Feature::InputCrop) {
+ ret = getCropBounds(m2m_->output(), inputCropBounds_.first,
+ inputCropBounds_.second);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int V4L2M2MConverter::V4L2M2MStream::exportBuffers(unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ return m2m_->capture()->exportBuffers(count, buffers);
+}
+
+int V4L2M2MConverter::V4L2M2MStream::start()
+{
+ int ret = m2m_->output()->importBuffers(inputBufferCount_);
+ if (ret < 0)
+ return ret;
+
+ ret = m2m_->capture()->importBuffers(outputBufferCount_);
+ if (ret < 0) {
+ stop();
+ return ret;
+ }
+
+ ret = m2m_->output()->streamOn();
+ if (ret < 0) {
+ stop();
+ return ret;
+ }
+
+ ret = m2m_->capture()->streamOn();
+ if (ret < 0) {
+ stop();
+ return ret;
+ }
+
+ return 0;
+}
+
+void V4L2M2MConverter::V4L2M2MStream::stop()
+{
+ m2m_->capture()->streamOff();
+ m2m_->output()->streamOff();
+ m2m_->capture()->releaseBuffers();
+ m2m_->output()->releaseBuffers();
+}
+
+int V4L2M2MConverter::V4L2M2MStream::queueBuffers(FrameBuffer *input, FrameBuffer *output)
+{
+ int ret = m2m_->output()->queueBuffer(input);
+ if (ret < 0)
+ return ret;
+
+ ret = m2m_->capture()->queueBuffer(output);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int V4L2M2MConverter::V4L2M2MStream::getInputSelection(unsigned int target, Rectangle *rect)
+{
+ return m2m_->output()->getSelection(target, rect);
+}
+
+int V4L2M2MConverter::V4L2M2MStream::setInputSelection(unsigned int target, Rectangle *rect)
+{
+ return m2m_->output()->setSelection(target, rect);
+}
+
+std::pair<Rectangle, Rectangle> V4L2M2MConverter::V4L2M2MStream::inputCropBounds()
+{
+ return inputCropBounds_;
+}
+
+std::string V4L2M2MConverter::V4L2M2MStream::logPrefix() const
+{
+ return stream_->configuration().toString();
+}
+
+void V4L2M2MConverter::V4L2M2MStream::outputBufferReady(FrameBuffer *buffer)
+{
+ auto it = converter_->queue_.find(buffer);
+ if (it == converter_->queue_.end())
+ return;
+
+ if (!--it->second) {
+ converter_->inputBufferReady.emit(buffer);
+ converter_->queue_.erase(it);
+ }
+}
+
+void V4L2M2MConverter::V4L2M2MStream::captureBufferReady(FrameBuffer *buffer)
+{
+ converter_->outputBufferReady.emit(buffer);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2M2MConverter
+ */
+
+/**
+ * \class libcamera::V4L2M2MConverter
+ * \brief The V4L2 M2M converter implements the converter interface based on
+ * V4L2 M2M device.
+*/
+
+/**
+ * \fn V4L2M2MConverter::V4L2M2MConverter
+ * \brief Construct a V4L2M2MConverter instance
+ * \param[in] media The media device implementing the converter
+ */
+
+V4L2M2MConverter::V4L2M2MConverter(MediaDevice *media)
+ : Converter(media)
+{
+ if (deviceNode().empty())
+ return;
+
+ m2m_ = std::make_unique<V4L2M2MDevice>(deviceNode());
+ int ret = m2m_->open();
+ if (ret < 0) {
+ m2m_.reset();
+ return;
+ }
+
+ ret = getCropBounds(m2m_->output(), inputCropBounds_.first,
+ inputCropBounds_.second);
+ if (!ret && inputCropBounds_.first != inputCropBounds_.second) {
+ features_ |= Feature::InputCrop;
+
+ LOG(Converter, Info)
+ << "Converter supports cropping on its input";
+ }
+}
+
+/**
+ * \fn libcamera::V4L2M2MConverter::loadConfiguration
+ * \details \copydetails libcamera::Converter::loadConfiguration
+ */
+
+/**
+ * \fn libcamera::V4L2M2MConverter::isValid
+ * \details \copydetails libcamera::Converter::isValid
+ */
+
+/**
+ * \fn libcamera::V4L2M2MConverter::formats
+ * \details \copydetails libcamera::Converter::formats
+ */
+std::vector<PixelFormat> V4L2M2MConverter::formats(PixelFormat input)
+{
+ if (!m2m_)
+ return {};
+
+ /*
+ * Set the format on the input side (V4L2 output) of the converter to
+ * enumerate the conversion capabilities on its output (V4L2 capture).
+ */
+ V4L2DeviceFormat v4l2Format;
+ v4l2Format.fourcc = m2m_->output()->toV4L2PixelFormat(input);
+ v4l2Format.size = { 1, 1 };
+
+ int ret = m2m_->output()->setFormat(&v4l2Format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set format: " << strerror(-ret);
+ return {};
+ }
+
+ if (v4l2Format.fourcc != m2m_->output()->toV4L2PixelFormat(input)) {
+ LOG(Converter, Debug)
+ << "Input format " << input << " not supported.";
+ return {};
+ }
+
+ std::vector<PixelFormat> pixelFormats;
+
+ for (const auto &format : m2m_->capture()->formats()) {
+ PixelFormat pixelFormat = format.first.toPixelFormat();
+ if (pixelFormat)
+ pixelFormats.push_back(pixelFormat);
+ }
+
+ return pixelFormats;
+}
+
+/**
+ * \copydoc libcamera::Converter::sizes
+ */
+SizeRange V4L2M2MConverter::sizes(const Size &input)
+{
+ if (!m2m_)
+ return {};
+
+ /*
+ * Set the size on the input side (V4L2 output) of the converter to
+ * enumerate the scaling capabilities on its output (V4L2 capture).
+ */
+ V4L2DeviceFormat format;
+ format.fourcc = V4L2PixelFormat();
+ format.size = input;
+
+ int ret = m2m_->output()->setFormat(&format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set format: " << strerror(-ret);
+ return {};
+ }
+
+ SizeRange sizes;
+
+ format.size = { 1, 1 };
+ ret = m2m_->capture()->setFormat(&format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set format: " << strerror(-ret);
+ return {};
+ }
+
+ sizes.min = format.size;
+
+ format.size = { UINT_MAX, UINT_MAX };
+ ret = m2m_->capture()->setFormat(&format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set format: " << strerror(-ret);
+ return {};
+ }
+
+ sizes.max = format.size;
+
+ return sizes;
+}
+
+/**
+ * \copydoc libcamera::Converter::strideAndFrameSize
+ */
+std::tuple<unsigned int, unsigned int>
+V4L2M2MConverter::strideAndFrameSize(const PixelFormat &pixelFormat,
+ const Size &size)
+{
+ V4L2DeviceFormat format;
+ format.fourcc = m2m_->capture()->toV4L2PixelFormat(pixelFormat);
+ format.size = size;
+
+ int ret = m2m_->capture()->tryFormat(&format);
+ if (ret < 0)
+ return std::make_tuple(0, 0);
+
+ return std::make_tuple(format.planes[0].bpl, format.planes[0].size);
+}
+
+/**
+ * \copydoc libcamera::Converter::adjustInputSize
+ */
+Size V4L2M2MConverter::adjustInputSize(const PixelFormat &pixFmt,
+ const Size &size, Alignment align)
+{
+ auto formats = m2m_->output()->formats();
+ V4L2PixelFormat v4l2PixFmt = m2m_->output()->toV4L2PixelFormat(pixFmt);
+
+ auto it = formats.find(v4l2PixFmt);
+ if (it == formats.end()) {
+ LOG(Converter, Info)
+ << "Unsupported pixel format " << pixFmt;
+ return {};
+ }
+
+ return adjustSizes(size, it->second, align);
+}
+
+/**
+ * \copydoc libcamera::Converter::adjustOutputSize
+ */
+Size V4L2M2MConverter::adjustOutputSize(const PixelFormat &pixFmt,
+ const Size &size, Alignment align)
+{
+ auto formats = m2m_->capture()->formats();
+ V4L2PixelFormat v4l2PixFmt = m2m_->capture()->toV4L2PixelFormat(pixFmt);
+
+ auto it = formats.find(v4l2PixFmt);
+ if (it == formats.end()) {
+ LOG(Converter, Info)
+ << "Unsupported pixel format " << pixFmt;
+ return {};
+ }
+
+ return adjustSizes(size, it->second, align);
+}
+
+Size V4L2M2MConverter::adjustSizes(const Size &cfgSize,
+ const std::vector<SizeRange> &ranges,
+ Alignment align)
+{
+ Size size = cfgSize;
+
+ if (ranges.size() == 1) {
+ /*
+ * The device supports either V4L2_FRMSIZE_TYPE_CONTINUOUS or
+ * V4L2_FRMSIZE_TYPE_STEPWISE.
+ */
+ const SizeRange &range = *ranges.begin();
+
+ size.width = std::clamp(size.width, range.min.width,
+ range.max.width);
+ size.height = std::clamp(size.height, range.min.height,
+ range.max.height);
+
+ /*
+ * Check if any alignment is needed. If the sizes are already
+ * aligned, or the device supports V4L2_FRMSIZE_TYPE_CONTINUOUS
+ * with hStep and vStep equal to 1, we're done here.
+ */
+ int widthR = size.width % range.hStep;
+ int heightR = size.height % range.vStep;
+
+ /* Align up or down according to the caller request. */
+
+ if (widthR != 0)
+ size.width = size.width - widthR
+ + ((align == Alignment::Up) ? range.hStep : 0);
+
+ if (heightR != 0)
+ size.height = size.height - heightR
+ + ((align == Alignment::Up) ? range.vStep : 0);
+ } else {
+ /*
+ * The device supports V4L2_FRMSIZE_TYPE_DISCRETE, find the
+ * size closer to the requested output configuration.
+ *
+ * The size ranges vector is not ordered, so we sort it first.
+ * If we align up, start from the larger element.
+ */
+ std::vector<Size> sizes(ranges.size());
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+ std::sort(sizes.begin(), sizes.end());
+
+ if (align == Alignment::Up)
+ std::reverse(sizes.begin(), sizes.end());
+
+ /*
+ * Return true if s2 is valid according to the desired
+ * alignment: smaller than s1 if we align down, larger than s1
+ * if we align up.
+ */
+ auto nextSizeValid = [](const Size &s1, const Size &s2, Alignment a) {
+ return a == Alignment::Down
+ ? (s1.width > s2.width && s1.height > s2.height)
+ : (s1.width < s2.width && s1.height < s2.height);
+ };
+
+ Size newSize;
+ for (const Size &sz : sizes) {
+ if (!nextSizeValid(size, sz, align))
+ break;
+
+ newSize = sz;
+ }
+
+ if (newSize.isNull()) {
+ LOG(Converter, Error)
+ << "Cannot adjust " << cfgSize
+ << " to a supported converter size";
+ return {};
+ }
+
+ size = newSize;
+ }
+
+ return size;
+}
+
+/**
+ * \copydoc libcamera::Converter::configure
+ */
+int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+{
+ int ret = 0;
+
+ streams_.clear();
+
+ for (unsigned int i = 0; i < outputCfgs.size(); ++i) {
+ const StreamConfiguration &cfg = outputCfgs[i];
+ std::unique_ptr<V4L2M2MStream> stream =
+ std::make_unique<V4L2M2MStream>(this, cfg.stream());
+
+ if (!stream->isValid()) {
+ LOG(Converter, Error)
+ << "Failed to create stream " << i;
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = stream->configure(inputCfg, cfg);
+ if (ret < 0)
+ break;
+
+ streams_.emplace(cfg.stream(), std::move(stream));
+ }
+
+ if (ret < 0) {
+ streams_.clear();
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::Converter::isConfigured
+ */
+bool V4L2M2MConverter::isConfigured(const Stream *stream) const
+{
+ return streams_.find(stream) != streams_.end();
+}
+
+/**
+ * \copydoc libcamera::Converter::exportBuffers
+ */
+int V4L2M2MConverter::exportBuffers(const Stream *stream, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ auto iter = streams_.find(stream);
+ if (iter == streams_.end())
+ return -EINVAL;
+
+ return iter->second->exportBuffers(count, buffers);
+}
+
+/**
+ * \copydoc libcamera::Converter::setInputCrop
+ */
+int V4L2M2MConverter::setInputCrop(const Stream *stream, Rectangle *rect)
+{
+ if (!(features_ & Feature::InputCrop))
+ return -ENOTSUP;
+
+ auto iter = streams_.find(stream);
+ if (iter == streams_.end()) {
+ LOG(Converter, Error) << "Invalid output stream";
+ return -EINVAL;
+ }
+
+ return iter->second->setInputSelection(V4L2_SEL_TGT_CROP, rect);
+}
+
+/**
+ * \fn libcamera::V4L2M2MConverter::inputCropBounds()
+ * \copydoc libcamera::Converter::inputCropBounds()
+ */
+
+/**
+ * \copydoc libcamera::Converter::inputCropBounds(const Stream *stream)
+ */
+std::pair<Rectangle, Rectangle>
+V4L2M2MConverter::inputCropBounds(const Stream *stream)
+{
+ auto iter = streams_.find(stream);
+ if (iter == streams_.end()) {
+ LOG(Converter, Error) << "Invalid output stream";
+ return {};
+ }
+
+ return iter->second->inputCropBounds();
+}
+
+/**
+ * \copydoc libcamera::Converter::start
+ */
+int V4L2M2MConverter::start()
+{
+ int ret;
+
+ for (auto &iter : streams_) {
+ ret = iter.second->start();
+ if (ret < 0) {
+ stop();
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::Converter::stop
+ */
+void V4L2M2MConverter::stop()
+{
+ for (auto &iter : streams_)
+ iter.second->stop();
+}
+
+/**
+ * \copydoc libcamera::Converter::validateOutput
+ */
+int V4L2M2MConverter::validateOutput(StreamConfiguration *cfg, bool *adjusted,
+ Alignment align)
+{
+ V4L2VideoDevice *capture = m2m_->capture();
+ V4L2VideoDevice::Formats fmts = capture->formats();
+
+ if (adjusted)
+ *adjusted = false;
+
+ PixelFormat fmt = cfg->pixelFormat;
+ V4L2PixelFormat v4l2PixFmt = capture->toV4L2PixelFormat(fmt);
+
+ auto it = fmts.find(v4l2PixFmt);
+ if (it == fmts.end()) {
+ it = fmts.begin();
+ v4l2PixFmt = it->first;
+ cfg->pixelFormat = v4l2PixFmt.toPixelFormat();
+
+ if (adjusted)
+ *adjusted = true;
+
+ LOG(Converter, Info)
+ << "Converter output pixel format adjusted to "
+ << cfg->pixelFormat;
+ }
+
+ const Size cfgSize = cfg->size;
+ cfg->size = adjustSizes(cfgSize, it->second, align);
+
+ if (cfg->size.isNull())
+ return -EINVAL;
+
+ if (cfg->size.width != cfgSize.width ||
+ cfg->size.height != cfgSize.height) {
+ LOG(Converter, Info)
+ << "Converter size adjusted to "
+ << cfg->size;
+ if (adjusted)
+ *adjusted = true;
+ }
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::Converter::queueBuffers
+ */
+int V4L2M2MConverter::queueBuffers(FrameBuffer *input,
+ const std::map<const Stream *, FrameBuffer *> &outputs)
+{
+ std::set<FrameBuffer *> outputBufs;
+ int ret;
+
+ /*
+ * Validate the outputs as a sanity check: at least one output is
+ * required, all outputs must reference a valid stream and no two
+ * streams can reference same output framebuffers.
+ */
+ if (outputs.empty())
+ return -EINVAL;
+
+ for (auto [stream, buffer] : outputs) {
+ if (!buffer)
+ return -EINVAL;
+
+ outputBufs.insert(buffer);
+ }
+
+ if (outputBufs.size() != streams_.size())
+ return -EINVAL;
+
+ /* Queue the input and output buffers to all the streams. */
+ for (auto [stream, buffer] : outputs) {
+ ret = streams_.at(stream)->queueBuffers(input, buffer);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Add the input buffer to the queue, with the number of streams as a
+ * reference count. Completion of the input buffer will be signalled by
+ * the stream that releases the last reference.
+ */
+ queue_.emplace(std::piecewise_construct,
+ std::forward_as_tuple(input),
+ std::forward_as_tuple(outputs.size()));
+
+ return 0;
+}
+
+/*
+ * \todo: This should be extended to include Feature::Flag to denote
+ * what each converter supports feature-wise.
+ */
+static std::initializer_list<std::string> compatibles = {
+ "mtk-mdp",
+ "pxp",
+};
+
+REGISTER_CONVERTER("v4l2_m2m", V4L2M2MConverter, compatibles)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/converter/meson.build b/src/libcamera/converter/meson.build
new file mode 100644
index 00000000..af1a80fe
--- /dev/null
+++ b/src/libcamera/converter/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'converter_v4l2_m2m.cpp'
+])
diff --git a/src/libcamera/debug_controls.cpp b/src/libcamera/debug_controls.cpp
new file mode 100644
index 00000000..33960231
--- /dev/null
+++ b/src/libcamera/debug_controls.cpp
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Helper to easily record debug metadata inside libcamera.
+ */
+
+#include "libcamera/internal/debug_controls.h"
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(DebugControls)
+
+/**
+ * \file debug_controls.h
+ * \brief Helper to easily record debug metadata inside libcamera
+ */
+
+/**
+ * \class DebugMetadata
+ * \brief Helper to record metadata for later use
+ *
+ * Metadata is a useful tool for debugging the internal state of libcamera. It
+ * has the benefit that it is easy to use and related tooling is readily
+ * available. The difficulty is that the metadata control list is often not
+ * directly available (either because the variable to debug lives inside
+ * process() of an IPA or inside a closed algorithm class with no direct access
+ * to the IPA and therefore the metadata list).
+ *
+ * This class helps in both cases. It allows to forward the data to a parent or
+ * alternatively record the data and at a later point in time copy it to the
+ * metadata list when it becomes available. Both mechanisms allow easy reuse and
+ * loose coupling.
+ *
+ * Typical usage is to instantiate a DebugMetadata object in every
+ * class/algorithm where debug metadata shall be recorded (the inner object). If
+ * the IPA doesn't support debug metadata, the object is still usable, but the
+ * debug data gets dropped. If the IPA supports debug metadata it will either
+ * register a parent DebugMetadata object on the inner object or manually
+ * retrieve the data using enable()/moveToList().
+ *
+ * The concepts of forwarding to a parent and recording for later retrieval are
+ * mutually exclusive and the parent takes precedence. E.g. it is not allowed to
+ * enable a DebugMetadata object, log entries to it and later set the parent.
+ *
+ * This is done to keep the path open for using other means of data transport
+ * (like tracing). For every tracing event a corresponding context needs to be
+ * available on set() time. The parent can be treated as such, the top level
+ * object (the one where enable() get's called) also lives in a place where that
+ * information is also available.
+ */
+
+/**
+ * \fn DebugMetadata::enableByControl()
+ * \brief Enable based on controls::DebugMetadataEnable in the supplied
+ * ControlList
+ * \param[in] controls The supplied ControlList
+ *
+ * This function looks for controls::DebugMetadataEnable and enables or disables
+ * debug metadata handling accordingly.
+ */
+void DebugMetadata::enableByControl(const ControlList &controls)
+{
+ const auto &ctrl = controls.get(controls::DebugMetadataEnable);
+ if (ctrl)
+ enable(*ctrl);
+}
+
+/**
+ * \fn DebugMetadata::enable()
+ * \brief Enable or disable metadata handling
+ * \param[in] enable The enable state
+ *
+ * When \a enable is true, all calls to set() get cached and can later be
+ * retrieved using moveEntries(). When \a enable is false, the cache gets
+ * cleared and no further metadata is recorded.
+ *
+ * Forwarding to a parent is independent of the enabled state.
+ */
+void DebugMetadata::enable(bool enable)
+{
+ enabled_ = enable;
+ if (!enabled_)
+ cache_.clear();
+}
+
+/**
+ * \fn DebugMetadata::setParent()
+ * \brief Set the parent metadata handler to \a parent
+ * \param[in] parent The parent handler
+ *
+ * When a \a parent is set, all further calls to set() are unconditionally
+ * forwarded to that instance.
+ *
+ * The parent can be reset by passing a nullptr.
+ */
+void DebugMetadata::setParent(DebugMetadata *parent)
+{
+ parent_ = parent;
+
+ if (!parent_)
+ return;
+
+ if (!cache_.empty())
+ LOG(DebugControls, Error)
+ << "Controls were recorded before setting a parent."
+ << " These are dropped.";
+
+ cache_.clear();
+}
+
+/**
+ * \fn DebugMetadata::moveEntries()
+ * \brief Move all cached entries into control list \a list
+ * \param[in] list The control list
+ *
+ * This function moves all entries into the list specified by \a list. Duplicate
+ * entries in \a list get overwritten.
+ */
+void DebugMetadata::moveEntries(ControlList &list)
+{
+ list.merge(std::move(cache_), ControlList::MergePolicy::OverwriteExisting);
+ cache_.clear();
+}
+
+/**
+ * \fn DebugMetadata::set(const Control<T> &ctrl, const V &value)
+ * \brief Set the value of \a ctrl to \a value
+ * \param[in] ctrl The control to set
+ * \param[in] value The control value
+ *
+ * If a parent is set, the value gets passed there unconditionally. Otherwise it
+ * gets cached if the instance is enabled or dropped silently when disabled.
+ *
+ * \sa enable()
+ */
+
+/**
+ * \fn DebugMetadata::set(unsigned int id, const ControlValue &value)
+ * \brief Set the value of control \a id to \a value
+ * \param[in] id The id of the control
+ * \param[in] value The control value
+ *
+ * If a parent is set, the value gets passed there unconditionally. Otherwise it
+ * gets cached if the instance is enabled or dropped silently when disabled.
+ *
+ * \sa enable()
+ */
+void DebugMetadata::set(unsigned int id, const ControlValue &value)
+{
+ if (parent_) {
+ parent_->set(id, value);
+ return;
+ }
+
+ if (!enabled_)
+ return;
+
+ cache_.set(id, value);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/delayed_controls.cpp b/src/libcamera/delayed_controls.cpp
new file mode 100644
index 00000000..94d0a575
--- /dev/null
+++ b/src/libcamera/delayed_controls.cpp
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper to deal with controls that take effect with a delay
+ */
+
+#include "libcamera/internal/delayed_controls.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/v4l2_device.h"
+
+/**
+ * \file delayed_controls.h
+ * \brief Helper to deal with controls that take effect with a delay
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(DelayedControls)
+
+/**
+ * \class DelayedControls
+ * \brief Helper to deal with controls that take effect with a delay
+ *
+ * Some sensor controls take effect with a delay as the sensor needs time to
+ * adjust, for example exposure and analog gain. This is a helper class to deal
+ * with such controls and the intended users are pipeline handlers.
+ *
+ * The idea is to extend the concept of the buffer depth of a pipeline the
+ * application needs to maintain to also cover controls. Just as with buffer
+ * depth if the application keeps the number of requests queued above the
+ * control depth the controls are guaranteed to take effect for the correct
+ * request. The control depth is determined by the control with the greatest
+ * delay.
+ */
+
+/**
+ * \struct DelayedControls::ControlParams
+ * \brief Parameters associated with controls handled by the \a DelayedControls
+ * helper class
+ *
+ * \var ControlParams::delay
+ * \brief Frame delay from setting the control on a sensor device to when it is
+ * consumed during framing.
+ *
+ * \var ControlParams::priorityWrite
+ * \brief Flag to indicate that this control must be applied ahead of, and
+ * separately from the other controls.
+ *
+ * Typically set for the \a V4L2_CID_VBLANK control so that the device driver
+ * does not reject \a V4L2_CID_EXPOSURE control values that may be outside of
+ * the existing vertical blanking specified bounds, but are within the new
+ * blanking bounds.
+ */
+
+/**
+ * \brief Construct a DelayedControls instance
+ * \param[in] device The V4L2 device the controls have to be applied to
+ * \param[in] controlParams Map of the numerical V4L2 control ids to their
+ * associated control parameters.
+ *
+ * The control parameters comprise of delays (in frames) and a priority write
+ * flag. If this flag is set, the relevant control is written separately from,
+ * and ahead of the rest of the batched controls.
+ *
+ * Only controls specified in \a controlParams are handled. If it's desired to
+ * mix delayed controls and controls that take effect immediately the immediate
+ * controls must be listed in the \a controlParams map with a delay value of 0.
+ */
+DelayedControls::DelayedControls(V4L2Device *device,
+ const std::unordered_map<uint32_t, ControlParams> &controlParams)
+ : device_(device), maxDelay_(0)
+{
+ const ControlInfoMap &controls = device_->controls();
+
+ /*
+ * Create a map of control ids to delays for controls exposed by the
+ * device.
+ */
+ for (auto const &param : controlParams) {
+ auto it = controls.find(param.first);
+ if (it == controls.end()) {
+ LOG(DelayedControls, Error)
+ << "Delay request for control id "
+ << utils::hex(param.first)
+ << " but control is not exposed by device "
+ << device_->deviceNode();
+ continue;
+ }
+
+ const ControlId *id = it->first;
+
+ controlParams_[id] = param.second;
+
+ LOG(DelayedControls, Debug)
+ << "Set a delay of " << controlParams_[id].delay
+ << " and priority write flag " << controlParams_[id].priorityWrite
+ << " for " << id->name();
+
+ maxDelay_ = std::max(maxDelay_, controlParams_[id].delay);
+ }
+
+ reset();
+}
+
+/**
+ * \brief Reset state machine
+ *
+ * Resets the state machine to a starting position based on control values
+ * retrieved from the device.
+ */
+void DelayedControls::reset()
+{
+ queueCount_ = 1;
+ writeCount_ = 0;
+
+ /* Retrieve control as reported by the device. */
+ std::vector<uint32_t> ids;
+ for (auto const &param : controlParams_)
+ ids.push_back(param.first->id());
+
+ ControlList controls = device_->getControls(ids);
+
+ /* Seed the control queue with the controls reported by the device. */
+ values_.clear();
+ for (const auto &ctrl : controls) {
+ const ControlId *id = device_->controls().idmap().at(ctrl.first);
+ /*
+ * Do not mark this control value as updated, it does not need
+ * to be written to to device on startup.
+ */
+ values_[id][0] = Info(ctrl.second, false);
+ }
+}
+
+/**
+ * \brief Push a set of controls on the queue
+ * \param[in] controls List of controls to add to the device queue
+ *
+ * Push a set of controls to the control queue. This increases the control queue
+ * depth by one.
+ *
+ * \returns true if \a controls are accepted, or false otherwise
+ */
+bool DelayedControls::push(const ControlList &controls)
+{
+ /* Copy state from previous frame. */
+ for (auto &ctrl : values_) {
+ Info &info = ctrl.second[queueCount_];
+ info = values_[ctrl.first][queueCount_ - 1];
+ info.updated = false;
+ }
+
+ /* Update with new controls. */
+ const ControlIdMap &idmap = device_->controls().idmap();
+ for (const auto &control : controls) {
+ const auto &it = idmap.find(control.first);
+ if (it == idmap.end()) {
+ LOG(DelayedControls, Warning)
+ << "Unknown control " << control.first;
+ return false;
+ }
+
+ const ControlId *id = it->second;
+
+ if (controlParams_.find(id) == controlParams_.end())
+ return false;
+
+ Info &info = values_[id][queueCount_];
+
+ info = Info(control.second);
+
+ LOG(DelayedControls, Debug)
+ << "Queuing " << id->name()
+ << " to " << info.toString()
+ << " at index " << queueCount_;
+ }
+
+ queueCount_++;
+
+ return true;
+}
+
+/**
+ * \brief Read back controls in effect at a sequence number
+ * \param[in] sequence The sequence number to get controls for
+ *
+ * Read back what controls where in effect at a specific sequence number. The
+ * history is a ring buffer of 16 entries where new and old values coexist. It's
+ * the callers responsibility to not read too old sequence numbers that have been
+ * pushed out of the history.
+ *
+ * Historic values are evicted by pushing new values onto the queue using
+ * push(). The max history from the current sequence number that yields valid
+ * values are thus 16 minus number of controls pushed.
+ *
+ * \return The controls at \a sequence number
+ */
+ControlList DelayedControls::get(uint32_t sequence)
+{
+ unsigned int index = std::max<int>(0, sequence - maxDelay_);
+
+ ControlList out(device_->controls());
+ for (const auto &ctrl : values_) {
+ const ControlId *id = ctrl.first;
+ const Info &info = ctrl.second[index];
+
+ out.set(id->id(), info);
+
+ LOG(DelayedControls, Debug)
+ << "Reading " << id->name()
+ << " to " << info.toString()
+ << " at index " << index;
+ }
+
+ return out;
+}
+
+/**
+ * \brief Inform DelayedControls of the start of a new frame
+ * \param[in] sequence Sequence number of the frame that started
+ *
+ * Inform the state machine that a new frame has started and of its sequence
+ * number. Any user of these helpers is responsible to inform the helper about
+ * the start of any frame. This can be connected with ease to the start of a
+ * exposure (SOE) V4L2 event.
+ */
+void DelayedControls::applyControls(uint32_t sequence)
+{
+ LOG(DelayedControls, Debug) << "frame " << sequence << " started";
+
+ /*
+ * Create control list peeking ahead in the value queue to ensure
+ * values are set in time to satisfy the sensor delay.
+ */
+ ControlList out(device_->controls());
+ for (auto &ctrl : values_) {
+ const ControlId *id = ctrl.first;
+ unsigned int delayDiff = maxDelay_ - controlParams_[id].delay;
+ unsigned int index = std::max<int>(0, writeCount_ - delayDiff);
+ Info &info = ctrl.second[index];
+
+ if (info.updated) {
+ if (controlParams_[id].priorityWrite) {
+ /*
+ * This control must be written now, it could
+ * affect validity of the other controls.
+ */
+ ControlList priority(device_->controls());
+ priority.set(id->id(), info);
+ device_->setControls(&priority);
+ } else {
+ /*
+ * Batch up the list of controls and write them
+ * at the end of the function.
+ */
+ out.set(id->id(), info);
+ }
+
+ LOG(DelayedControls, Debug)
+ << "Setting " << id->name()
+ << " to " << info.toString()
+ << " at index " << index;
+
+ /* Done with this update, so mark as completed. */
+ info.updated = false;
+ }
+ }
+
+ writeCount_ = sequence + 1;
+
+ while (writeCount_ > queueCount_) {
+ LOG(DelayedControls, Debug)
+ << "Queue is empty, auto queue no-op.";
+ push({});
+ }
+
+ device_->setControls(&out);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/device_enumerator.cpp b/src/libcamera/device_enumerator.cpp
index dd17e3e3..ae17862f 100644
--- a/src/libcamera/device_enumerator.cpp
+++ b/src/libcamera/device_enumerator.cpp
@@ -2,17 +2,18 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * device_enumerator.cpp - Enumeration and matching
+ * Enumeration and matching
*/
-#include "device_enumerator.h"
-#include "device_enumerator_sysfs.h"
-#include "device_enumerator_udev.h"
+#include "libcamera/internal/device_enumerator.h"
#include <string.h>
-#include "log.h"
-#include "media_device.h"
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/device_enumerator_sysfs.h"
+#include "libcamera/internal/device_enumerator_udev.h"
+#include "libcamera/internal/media_device.h"
/**
* \file device_enumerator.h
@@ -55,7 +56,7 @@ LOG_DEFINE_CATEGORY(DeviceEnumerator)
* names can be added as match criteria.
*
* Pipeline handlers are recommended to add entities to DeviceMatch as
- * appropriare to ensure that the media device they need can be uniquely
+ * appropriate to ensure that the media device they need can be uniquely
* identified. This is useful when the corresponding kernel driver can produce
* different graphs, for instance as a result of different driver versions or
* hardware configurations, and not all those graphs are suitable for a pipeline
@@ -100,8 +101,14 @@ bool DeviceMatch::match(const MediaDevice *device) const
for (const MediaEntity *entity : device->entities()) {
if (name == entity->name()) {
- found = true;
- break;
+ if (!entity->deviceNode().empty()) {
+ found = true;
+ break;
+ } else {
+ LOG(DeviceEnumerator, Debug)
+ << "Skip " << entity->name()
+ << ": no device node";
+ }
}
}
@@ -160,7 +167,7 @@ std::unique_ptr<DeviceEnumerator> DeviceEnumerator::create()
DeviceEnumerator::~DeviceEnumerator()
{
- for (std::shared_ptr<MediaDevice> media : devices_) {
+ for (const std::shared_ptr<MediaDevice> &media : devices_) {
if (media->busy())
LOG(DeviceEnumerator, Error)
<< "Removing media device " << media->deviceNode()
@@ -228,20 +235,33 @@ std::unique_ptr<MediaDevice> DeviceEnumerator::createDevice(const std::string &d
}
/**
+* \var DeviceEnumerator::devicesAdded
+* \brief Notify of new media devices being found
+*
+* This signal is emitted when the device enumerator finds new media devices in
+* the system. It may be emitted for every newly detected device, or once for
+* multiple devices, at the discretion of the device enumerator. Not all device
+* enumerator types may support dynamic detection of new devices.
+*/
+
+/**
* \brief Add a media device to the enumerator
* \param[in] media media device instance to add
*
* Store the media device in the internal list for later matching with
* pipeline handlers. \a media shall be created with createDevice() first.
- * This method shall be called after all members of the entities of the
+ * This function shall be called after all members of the entities of the
* media graph have been confirmed to be initialized.
*/
-void DeviceEnumerator::addDevice(std::unique_ptr<MediaDevice> &&media)
+void DeviceEnumerator::addDevice(std::unique_ptr<MediaDevice> media)
{
LOG(DeviceEnumerator, Debug)
<< "Added device " << media->deviceNode() << ": " << media->driver();
devices_.push_back(std::move(media));
+
+ /* \todo To batch multiple additions, emit with a small delay here. */
+ devicesAdded.emit();
}
/**
@@ -274,7 +294,7 @@ void DeviceEnumerator::removeDevice(const std::string &deviceNode)
LOG(DeviceEnumerator, Debug)
<< "Media device for node " << deviceNode << " removed.";
- media->disconnected.emit(media.get());
+ media->disconnected.emit();
}
/**
diff --git a/src/libcamera/device_enumerator_sysfs.cpp b/src/libcamera/device_enumerator_sysfs.cpp
index 3446db59..7866885c 100644
--- a/src/libcamera/device_enumerator_sysfs.cpp
+++ b/src/libcamera/device_enumerator_sysfs.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * device_enumerator_sysfs.cpp - sysfs-based device enumerator
+ * sysfs-based device enumerator
*/
-#include "device_enumerator_sysfs.h"
+#include "libcamera/internal/device_enumerator_sysfs.h"
#include <dirent.h>
#include <fcntl.h>
@@ -17,8 +17,9 @@
#include <sys/types.h>
#include <unistd.h>
-#include "log.h"
-#include "media_device.h"
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/media_device.h"
namespace libcamera {
@@ -32,7 +33,7 @@ int DeviceEnumeratorSysfs::init()
int DeviceEnumeratorSysfs::enumerate()
{
struct dirent *ent;
- DIR *dir;
+ DIR *dir = nullptr;
static const char * const sysfs_dirs[] = {
"/sys/subsystem/media/devices",
diff --git a/src/libcamera/device_enumerator_udev.cpp b/src/libcamera/device_enumerator_udev.cpp
index 9cbc7e47..4e20a3cc 100644
--- a/src/libcamera/device_enumerator_udev.cpp
+++ b/src/libcamera/device_enumerator_udev.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2018-2019, Google Inc.
*
- * device_enumerator_udev.cpp - udev-based device enumerator
+ * udev-based device enumerator
*/
-#include "device_enumerator_udev.h"
+#include "libcamera/internal/device_enumerator_udev.h"
#include <algorithm>
#include <fcntl.h>
@@ -13,21 +13,22 @@
#include <list>
#include <map>
#include <string.h>
+#include <string_view>
#include <sys/ioctl.h>
#include <sys/sysmacros.h>
#include <unistd.h>
-#include <libcamera/event_notifier.h>
+#include <libcamera/base/event_notifier.h>
+#include <libcamera/base/log.h>
-#include "log.h"
-#include "media_device.h"
+#include "libcamera/internal/media_device.h"
namespace libcamera {
LOG_DECLARE_CATEGORY(DeviceEnumerator)
DeviceEnumeratorUdev::DeviceEnumeratorUdev()
- : udev_(nullptr)
+ : udev_(nullptr), monitor_(nullptr), notifier_(nullptr)
{
}
@@ -94,7 +95,8 @@ int DeviceEnumeratorUdev::addUdevDevice(struct udev_device *dev)
if (!deps.empty()) {
LOG(DeviceEnumerator, Debug)
<< "Defer media device " << media->deviceNode()
- << " due to " << ret << " missing dependencies";
+ << " due to " << deps.size()
+ << " missing dependencies";
pending_.emplace_back(std::move(media), std::move(deps));
MediaDeviceDeps *mediaDeps = &pending_.back();
@@ -314,6 +316,7 @@ int DeviceEnumeratorUdev::addV4L2Device(dev_t devnum)
* enumerator.
*/
deps->deps_.erase(devnum);
+ devMap_.erase(it);
if (deps->deps_.empty()) {
LOG(DeviceEnumerator, Debug)
@@ -326,21 +329,29 @@ int DeviceEnumeratorUdev::addV4L2Device(dev_t devnum)
return 0;
}
-void DeviceEnumeratorUdev::udevNotify(EventNotifier *notifier)
+void DeviceEnumeratorUdev::udevNotify()
{
struct udev_device *dev = udev_monitor_receive_device(monitor_);
- std::string action(udev_device_get_action(dev));
- std::string deviceNode(udev_device_get_devnode(dev));
+ if (!dev) {
+ int err = errno;
+ LOG(DeviceEnumerator, Warning)
+ << "Ignoring notfication received without a device: "
+ << strerror(err);
+ return;
+ }
+
+ std::string_view action(udev_device_get_action(dev));
+ std::string_view deviceNode(udev_device_get_devnode(dev));
LOG(DeviceEnumerator, Debug)
- << action << " device " << udev_device_get_devnode(dev);
+ << action << " device " << deviceNode;
if (action == "add") {
addUdevDevice(dev);
} else if (action == "remove") {
const char *subsystem = udev_device_get_subsystem(dev);
if (subsystem && !strcmp(subsystem, "media"))
- removeDevice(deviceNode);
+ removeDevice(std::string(deviceNode));
}
udev_device_unref(dev);
diff --git a/src/libcamera/dma_buf_allocator.cpp b/src/libcamera/dma_buf_allocator.cpp
new file mode 100644
index 00000000..a014c3b4
--- /dev/null
+++ b/src/libcamera/dma_buf_allocator.cpp
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper class for dma-buf allocations.
+ */
+
+#include "libcamera/internal/dma_buf_allocator.h"
+
+#include <array>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/udmabuf.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/memfd.h>
+#include <libcamera/base/shared_fd.h>
+
+#include <libcamera/framebuffer.h>
+
+/**
+ * \file dma_buf_allocator.cpp
+ * \brief dma-buf allocator
+ */
+
+namespace libcamera {
+
+#ifndef __DOXYGEN__
+struct DmaBufAllocatorInfo {
+ DmaBufAllocator::DmaBufAllocatorFlag type;
+ const char *deviceNodeName;
+};
+#endif
+
+static constexpr std::array<DmaBufAllocatorInfo, 4> providerInfos = { {
+ /*
+ * /dev/dma_heap/linux,cma is the CMA dma-heap. When the cma heap size is
+ * specified on the kernel command line, this gets renamed to "reserved".
+ */
+ { DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap, "/dev/dma_heap/linux,cma" },
+ { DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap, "/dev/dma_heap/reserved" },
+ { DmaBufAllocator::DmaBufAllocatorFlag::SystemHeap, "/dev/dma_heap/system" },
+ { DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf, "/dev/udmabuf" },
+} };
+
+LOG_DEFINE_CATEGORY(DmaBufAllocator)
+
+/**
+ * \class DmaBufAllocator
+ * \brief Helper class for dma-buf allocations
+ *
+ * This class wraps a userspace dma-buf provider selected at construction time,
+ * and exposes functions to allocate dma-buffers from this provider.
+ *
+ * Different providers may provide dma-buffers with different properties for
+ * the underlying memory. Which providers are acceptable is specified through
+ * the type argument passed to the DmaBufAllocator() constructor.
+ */
+
+/**
+ * \enum DmaBufAllocator::DmaBufAllocatorFlag
+ * \brief Type of the dma-buf provider
+ * \var DmaBufAllocator::CmaHeap
+ * \brief Allocate from a CMA dma-heap, providing physically-contiguous memory
+ * \var DmaBufAllocator::SystemHeap
+ * \brief Allocate from the system dma-heap, using the page allocator
+ * \var DmaBufAllocator::UDmaBuf
+ * \brief Allocate using a memfd + /dev/udmabuf
+ */
+
+/**
+ * \typedef DmaBufAllocator::DmaBufAllocatorFlags
+ * \brief A bitwise combination of DmaBufAllocator::DmaBufAllocatorFlag values
+ */
+
+/**
+ * \brief Construct a DmaBufAllocator of a given type
+ * \param[in] type The type(s) of the dma-buf providers to allocate from
+ *
+ * The dma-buf provider type is selected with the \a type parameter, which
+ * defaults to the CMA heap. If no provider of the given type can be accessed,
+ * the constructed DmaBufAllocator instance is invalid as indicated by
+ * the isValid() function.
+ *
+ * Multiple types can be selected by combining type flags, in which case
+ * the constructed DmaBufAllocator will match one of the types. If multiple
+ * requested types can work on the system, which provider is used is undefined.
+ */
+DmaBufAllocator::DmaBufAllocator(DmaBufAllocatorFlags type)
+{
+ for (const auto &info : providerInfos) {
+ if (!(type & info.type))
+ continue;
+
+ int ret = ::open(info.deviceNodeName, O_RDWR | O_CLOEXEC, 0);
+ if (ret < 0) {
+ ret = errno;
+ LOG(DmaBufAllocator, Debug)
+ << "Failed to open " << info.deviceNodeName << ": "
+ << strerror(ret);
+ continue;
+ }
+
+ LOG(DmaBufAllocator, Debug) << "Using " << info.deviceNodeName;
+ providerHandle_ = UniqueFD(ret);
+ type_ = info.type;
+ break;
+ }
+
+ if (!providerHandle_.isValid())
+ LOG(DmaBufAllocator, Error) << "Could not open any dma-buf provider";
+}
+
+/**
+ * \brief Destroy the DmaBufAllocator instance
+ */
+DmaBufAllocator::~DmaBufAllocator() = default;
+
+/**
+ * \fn DmaBufAllocator::isValid()
+ * \brief Check if the DmaBufAllocator instance is valid
+ * \return True if the DmaBufAllocator is valid, false otherwise
+ */
+UniqueFD DmaBufAllocator::allocFromUDmaBuf(const char *name, std::size_t size)
+{
+ /* Size must be a multiple of the page size. Round it up. */
+ std::size_t pageMask = sysconf(_SC_PAGESIZE) - 1;
+ size = (size + pageMask) & ~pageMask;
+
+ /* udmabuf dma-buffers *must* have the F_SEAL_SHRINK seal. */
+ UniqueFD memfd = MemFd::create(name, size, MemFd::Seal::Shrink);
+ if (!memfd.isValid())
+ return {};
+
+ struct udmabuf_create create;
+
+ create.memfd = memfd.get();
+ create.flags = UDMABUF_FLAGS_CLOEXEC;
+ create.offset = 0;
+ create.size = size;
+
+ int ret = ::ioctl(providerHandle_.get(), UDMABUF_CREATE, &create);
+ if (ret < 0) {
+ ret = errno;
+ LOG(DmaBufAllocator, Error)
+ << "Failed to create dma buf for " << name
+ << ": " << strerror(ret);
+ return {};
+ }
+
+ /* The underlying memfd is kept as as a reference in the kernel. */
+ return UniqueFD(ret);
+}
+
+UniqueFD DmaBufAllocator::allocFromHeap(const char *name, std::size_t size)
+{
+ struct dma_heap_allocation_data alloc = {};
+ int ret;
+
+ alloc.len = size;
+ alloc.fd_flags = O_CLOEXEC | O_RDWR;
+
+ ret = ::ioctl(providerHandle_.get(), DMA_HEAP_IOCTL_ALLOC, &alloc);
+ if (ret < 0) {
+ LOG(DmaBufAllocator, Error)
+ << "dma-heap allocation failure for " << name;
+ return {};
+ }
+
+ UniqueFD allocFd(alloc.fd);
+ ret = ::ioctl(allocFd.get(), DMA_BUF_SET_NAME, name);
+ if (ret < 0) {
+ LOG(DmaBufAllocator, Error)
+ << "dma-heap naming failure for " << name;
+ return {};
+ }
+
+ return allocFd;
+}
+
+/**
+ * \brief Allocate a dma-buf from the DmaBufAllocator
+ * \param [in] name The name to set for the allocated buffer
+ * \param [in] size The size of the buffer to allocate
+ *
+ * Allocates a dma-buf with read/write access.
+ *
+ * If the allocation fails, return an invalid UniqueFD.
+ *
+ * \return The UniqueFD of the allocated buffer
+ */
+UniqueFD DmaBufAllocator::alloc(const char *name, std::size_t size)
+{
+ if (!name)
+ return {};
+
+ if (type_ == DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf)
+ return allocFromUDmaBuf(name, size);
+ else
+ return allocFromHeap(name, size);
+}
+
+/**
+ * \brief Allocate and export buffers from the DmaBufAllocator
+ * \param[in] count The number of requested FrameBuffers
+ * \param[in] planeSizes The sizes of planes in each FrameBuffer
+ * \param[out] buffers Array of buffers successfully allocated
+ *
+ * Planes in a FrameBuffer are allocated with a single dma buf.
+ * \todo Add the option to allocate each plane with a dma buf respectively.
+ *
+ * \return The number of allocated buffers on success or a negative error code
+ * otherwise
+ */
+int DmaBufAllocator::exportBuffers(unsigned int count,
+ const std::vector<unsigned int> &planeSizes,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ for (unsigned int i = 0; i < count; ++i) {
+ std::unique_ptr<FrameBuffer> buffer =
+ createBuffer("frame-" + std::to_string(i), planeSizes);
+ if (!buffer) {
+ LOG(DmaBufAllocator, Error) << "Unable to create buffer";
+
+ buffers->clear();
+ return -EINVAL;
+ }
+
+ buffers->push_back(std::move(buffer));
+ }
+
+ return count;
+}
+
+std::unique_ptr<FrameBuffer>
+DmaBufAllocator::createBuffer(std::string name,
+ const std::vector<unsigned int> &planeSizes)
+{
+ std::vector<FrameBuffer::Plane> planes;
+
+ unsigned int frameSize = 0, offset = 0;
+ for (auto planeSize : planeSizes)
+ frameSize += planeSize;
+
+ SharedFD fd(alloc(name.c_str(), frameSize));
+ if (!fd.isValid())
+ return nullptr;
+
+ for (auto planeSize : planeSizes) {
+ planes.emplace_back(FrameBuffer::Plane{ fd, offset, planeSize });
+ offset += planeSize;
+ }
+
+ return std::make_unique<FrameBuffer>(planes);
+}
+
+/**
+ * \class DmaSyncer
+ * \brief Helper class for dma-buf's synchronization
+ *
+ * This class wraps a userspace dma-buf's synchronization process with an
+ * object's lifetime.
+ *
+ * It's used when the user needs to access a dma-buf with CPU, mostly mapped
+ * with MappedFrameBuffer, so that the buffer is synchronized between CPU and
+ * ISP.
+ */
+
+/**
+ * \enum DmaSyncer::SyncType
+ * \brief Read and/or write access via the CPU map
+ * \var DmaSyncer::Read
+ * \brief Indicates that the mapped dma-buf will be read by the client via the
+ * CPU map
+ * \var DmaSyncer::Write
+ * \brief Indicates that the mapped dm-buf will be written by the client via the
+ * CPU map
+ * \var DmaSyncer::ReadWrite
+ * \brief Indicates that the mapped dma-buf will be read and written by the
+ * client via the CPU map
+ */
+
+/**
+ * \brief Construct a DmaSyncer with a dma-buf's fd and the access type
+ * \param[in] fd The dma-buf's file descriptor to synchronize
+ * \param[in] type Read and/or write access via the CPU map
+ */
+DmaSyncer::DmaSyncer(SharedFD fd, SyncType type)
+ : fd_(fd)
+{
+ switch (type) {
+ case SyncType::Read:
+ flags_ = DMA_BUF_SYNC_READ;
+ break;
+ case SyncType::Write:
+ flags_ = DMA_BUF_SYNC_WRITE;
+ break;
+ case SyncType::ReadWrite:
+ flags_ = DMA_BUF_SYNC_RW;
+ break;
+ }
+
+ sync(DMA_BUF_SYNC_START);
+}
+
+/**
+ * \fn DmaSyncer::DmaSyncer(DmaSyncer &&other);
+ * \param[in] other The other instance
+ * \brief Enable move on class DmaSyncer
+ */
+
+/**
+ * \fn DmaSyncer::operator=(DmaSyncer &&other);
+ * \param[in] other The other instance
+ * \brief Enable move on class DmaSyncer
+ */
+
+DmaSyncer::~DmaSyncer()
+{
+ sync(DMA_BUF_SYNC_END);
+}
+
+void DmaSyncer::sync(uint64_t step)
+{
+ struct dma_buf_sync sync = {
+ .flags = flags_ | step
+ };
+
+ int ret;
+ do {
+ ret = ioctl(fd_.get(), DMA_BUF_IOCTL_SYNC, &sync);
+ } while (ret && (errno == EINTR || errno == EAGAIN));
+
+ if (ret) {
+ ret = errno;
+ LOG(DmaBufAllocator, Error)
+ << "Unable to sync dma fd: " << fd_.get()
+ << ", err: " << strerror(ret)
+ << ", flags: " << sync.flags;
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/fence.cpp b/src/libcamera/fence.cpp
new file mode 100644
index 00000000..73299b40
--- /dev/null
+++ b/src/libcamera/fence.cpp
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Synchronization fence
+ */
+
+#include "libcamera/fence.h"
+
+namespace libcamera {
+
+/**
+ *
+ * \file fence.h
+ * \brief Definition of the Fence class
+ */
+
+/**
+ * \class Fence
+ * \brief Synchronization primitive to manage resources
+ *
+ * The Fence class models a synchronization primitive that can be used by
+ * applications to explicitly synchronize resource usage, and can be shared by
+ * multiple processes.
+ *
+ * Fences are most commonly used in association with frame buffers. A
+ * FrameBuffer can be associated with a Fence so that the library can wait for
+ * the Fence to be signalled before allowing the camera device to actually
+ * access the memory area described by the FrameBuffer.
+ *
+ * \sa Request::addBuffer()
+ *
+ * By using a fence, applications can then synchronize between frame buffer
+ * consumers and producers, as for example a display device and a camera, to
+ * guarantee that a new data transfers only happen once the existing frames have
+ * been displayed.
+ *
+ * A Fence can be realized by different event notification primitives, the most
+ * common of which is represented by waiting for read events to happen on a
+ * <a href="https://www.kernel.org/doc/html/latest/driver-api/sync_file.html">kernel sync file.</a>
+ * This is currently the only mechanism supported by libcamera, but others can
+ * be implemented by extending or subclassing this class and implementing
+ * opportune handling in the core library.
+ *
+ * \internal
+ *
+ * The Fence class is a thin abstraction around a UniqueFD which simply allows
+ * to access it as a const reference or to move its ownership to the caller.
+ *
+ * The usage of the Fence class allows to abstract the underlying
+ * synchronization mechanism in use and implement an interface towards other
+ * library components that will not change when new synchronization primitives
+ * will be added as fences.
+ *
+ * A Fence is constructed with a UniqueFD whose ownership is moved in the Fence.
+ * A FrameBuffer can be associated with a Fence by passing it to the
+ * Request::addBuffer() function, which will move the Fence into the FrameBuffer
+ * itself. Once a Request is queued to the Camera, a preparation phase
+ * guarantees that before actually applying the Request to the hardware, all the
+ * valid fences of the frame buffers in a Request are correctly signalled. Once
+ * a Fence has completed, the library will release the FrameBuffer fence so that
+ * application won't be allowed to access it.
+ *
+ * An optional timeout can be started while waiting for a fence to complete. If
+ * waiting on a Fence fails for whatever reason, the FrameBuffer's fence is not
+ * reset and is made available to application for them to handle it, by
+ * releasing the Fence to correctly close the underlying UniqueFD.
+ *
+ * A failure in waiting for a Fence to complete will result in the Request to
+ * complete in failed state.
+ *
+ * \sa Request::prepare()
+ * \sa PipelineHandler::doQueueRequests()
+ */
+
+/**
+ * \brief Create a Fence
+ * \param[in] fd The fence file descriptor
+ *
+ * The file descriptor ownership is moved to the Fence.
+ */
+Fence::Fence(UniqueFD fd)
+ : fd_(std::move(fd))
+{
+}
+
+/**
+ * \fn Fence::isValid()
+ * \brief Check if a Fence is valid
+ *
+ * A Fence is valid if the file descriptor it wraps is valid.
+ *
+ * \return True if the Fence is valid, false otherwise
+ */
+
+/**
+ * \fn Fence::fd()
+ * \brief Retrieve a constant reference to the file descriptor
+ * \return A const reference to the fence file descriptor
+ */
+
+/**
+ * \fn Fence::release()
+ * \brief Release the ownership of the file descriptor
+ *
+ * Release the ownership of the wrapped file descriptor by returning it to the
+ * caller.
+ *
+ * \return The wrapper UniqueFD
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/file_descriptor.cpp b/src/libcamera/file_descriptor.cpp
deleted file mode 100644
index 88385476..00000000
--- a/src/libcamera/file_descriptor.cpp
+++ /dev/null
@@ -1,203 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * file_descriptor.cpp - File descriptor wrapper
- */
-
-#include <libcamera/file_descriptor.h>
-
-#include <string.h>
-#include <unistd.h>
-#include <utility>
-
-#include "log.h"
-
-/**
- * \file file_descriptor.h
- * \brief File descriptor wrapper
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(FileDescriptor)
-
-/**
- * \class FileDescriptor
- * \brief RAII-style wrapper for file descriptors
- *
- * The FileDescriptor class provides RAII-style lifetime management of file
- * descriptors with an efficient mechanism for ownership sharing. At its core,
- * an internal Descriptor object wraps a file descriptor (expressed as a signed
- * integer) with an RAII-style interface. The Descriptor is then implicitly
- * shared with all FileDescriptor instances constructed as copies.
- *
- * When constructed from a numerical file descriptor, the FileDescriptor
- * instance duplicates the file descriptor and wraps the duplicate as a
- * Descriptor. The copy constructor and assignment operator create copies that
- * share the Descriptor, while the move versions of those methods additionally
- * make the other FileDescriptor invalid. When the last FileDescriptor that
- * references a Descriptor is destroyed, the file descriptor is closed.
- *
- * The numerical file descriptor is available through the fd() method. As
- * constructing a FileDescriptor from a numerical file descriptor duplicates
- * the file descriptor, the value returned by fd() will be different than the
- * value passed to the constructor. All FileDescriptor instances created as
- * copies of a FileDescriptor will report the same fd() value. Callers can
- * perform operations on the fd(), but shall never close it manually.
- */
-
-/**
- * \brief Create a FileDescriptor wrapping a copy of a given \a fd
- * \param[in] fd File descriptor
- *
- * Constructing a FileDescriptor from a numerical file descriptor duplicates the
- * \a fd and takes ownership of the copy. The original \a fd is left untouched,
- * and the caller is responsible for closing it when appropriate. The duplicated
- * file descriptor will be closed automatically when all FileDescriptor
- * instances that reference it are destroyed.
- *
- * If the \a fd is negative, the FileDescriptor is constructed as invalid and
- * the fd() method will return -1.
- */
-FileDescriptor::FileDescriptor(int fd)
-{
- if (fd < 0)
- return;
-
- fd_ = std::make_shared<Descriptor>(fd);
- if (fd_->fd() < 0)
- fd_.reset();
-}
-
-/**
- * \brief Copy constructor, create a FileDescriptor from a copy of \a other
- * \param[in] other The other FileDescriptor
- *
- * Copying a FileDescriptor implicitly shares ownership of the wrapped file
- * descriptor. The original FileDescriptor is left untouched, and the caller is
- * responsible for destroying it when appropriate. The wrapped file descriptor
- * will be closed automatically when all FileDescriptor instances that
- * reference it are destroyed.
- */
-FileDescriptor::FileDescriptor(const FileDescriptor &other)
- : fd_(other.fd_)
-{
-}
-
-/**
- * \brief Move constructor, create a FileDescriptor by taking over \a other
- * \param[in] other The other FileDescriptor
- *
- * Moving a FileDescriptor moves the reference to the wrapped descriptor owned
- * by \a other to the new FileDescriptor. The \a other FileDescriptor is
- * invalidated and its fd() method will return -1. The wrapped file descriptor
- * will be closed automatically when all FileDescriptor instances that
- * reference it are destroyed.
- */
-FileDescriptor::FileDescriptor(FileDescriptor &&other)
- : fd_(std::move(other.fd_))
-{
-}
-
-/**
- * \brief Destroy the FileDescriptor instance
- *
- * Destroying a FileDescriptor instance releases its reference to the wrapped
- * descriptor, if any. When the last instance that references a wrapped
- * descriptor is destroyed, the file descriptor is automatically closed.
- */
-FileDescriptor::~FileDescriptor()
-{
-}
-
-/**
- * \brief Copy assignment operator, replace the wrapped file descriptor with a
- * copy of \a other
- * \param[in] other The other FileDescriptor
- *
- * Copying a FileDescriptor creates a new reference to the wrapped file
- * descriptor owner by \a other. If \a other is invalid, *this will also be
- * invalid. The original FileDescriptor is left untouched, and the caller is
- * responsible for destroying it when appropriate. The wrapped file descriptor
- * will be closed automatically when all FileDescriptor instances that
- * reference it are destroyed.
- *
- * \return A reference to this FileDescriptor
- */
-FileDescriptor &FileDescriptor::operator=(const FileDescriptor &other)
-{
- fd_ = other.fd_;
-
- return *this;
-}
-
-/**
- * \brief Move assignment operator, replace the wrapped file descriptor by
- * taking over \a other
- * \param[in] other The other FileDescriptor
- *
- * Moving a FileDescriptor moves the reference to the wrapped descriptor owned
- * by \a other to the new FileDescriptor. If \a other is invalid, *this will
- * also be invalid. The \a other FileDescriptor is invalidated and its fd()
- * method will return -1. The wrapped file descriptor will be closed
- * automatically when all FileDescriptor instances that reference it are
- * destroyed.
- *
- * \return A reference to this FileDescriptor
- */
-FileDescriptor &FileDescriptor::operator=(FileDescriptor &&other)
-{
- fd_ = std::move(other.fd_);
-
- return *this;
-}
-
-/**
- * \fn FileDescriptor::isValid()
- * \brief Check if the FileDescriptor instance is valid
- * \return True if the FileDescriptor is valid, false otherwise
- */
-
-/**
- * \fn FileDescriptor::fd()
- * \brief Retrieve the numerical file descriptor
- * \return The numerical file descriptor, which may be -1 if the FileDescriptor
- * instance is invalid
- */
-
-/**
- * \brief Duplicate a FileDescriptor
- *
- * Duplicating a FileDescriptor creates a duplicate of the wrapped file
- * descriptor and returns a new FileDescriptor instance that wraps the
- * duplicate. The fd() method of the original and duplicate instances will
- * return different values. The duplicate instance will not be affected by
- * destruction of the original instance or its copies.
- *
- * \return A new FileDescriptor instance wrapping a duplicate of the original
- * file descriptor
- */
-FileDescriptor FileDescriptor::dup() const
-{
- return FileDescriptor(fd());
-}
-
-FileDescriptor::Descriptor::Descriptor(int fd)
-{
- /* Failing to dup() a fd should not happen and is fatal. */
- fd_ = ::dup(fd);
- if (fd_ == -1) {
- int ret = -errno;
- LOG(FileDescriptor, Fatal)
- << "Failed to dup() fd: " << strerror(-ret);
- }
-}
-
-FileDescriptor::Descriptor::~Descriptor()
-{
- if (fd_ != -1)
- close(fd_);
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/formats.cpp b/src/libcamera/formats.cpp
index 5f6552a4..bfcdfc08 100644
--- a/src/libcamera/formats.cpp
+++ b/src/libcamera/formats.cpp
@@ -2,106 +2,1217 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * formats.cpp - libcamera image formats
+ * libcamera image formats
*/
-#include "formats.h"
+#include "libcamera/internal/formats.h"
-#include <errno.h>
+#include <map>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/formats.h>
/**
- * \file formats.h
- * \brief Types and helper methods to handle libcamera image formats
+ * \file libcamera/internal/formats.h
+ * \brief Types and helper functions to handle libcamera image formats
*/
namespace libcamera {
+LOG_DEFINE_CATEGORY(Formats)
+
+/**
+ * \class PixelFormatInfo
+ * \brief Information about pixel formats
+ *
+ * The PixelFormatInfo class groups together information describing a pixel
+ * format. It facilitates handling of pixel formats by providing data commonly
+ * used in pipeline handlers.
+ *
+ * \var PixelFormatInfo::name
+ * \brief The format name as a human-readable string, used as the text
+ * representation of the PixelFormat
+ *
+ * \var PixelFormatInfo::format
+ * \brief The PixelFormat described by this instance
+ *
+ * \var PixelFormatInfo::v4l2Formats
+ * \brief The V4L2 pixel formats corresponding to the PixelFormat
+ *
+ * Multiple V4L2 formats may exist for one PixelFormat, as V4L2 defines
+ * separate 4CCs for contiguous and non-contiguous versions of the same image
+ * format.
+ *
+ * \var PixelFormatInfo::bitsPerPixel
+ * \brief The average number of bits per pixel
+ *
+ * The number of bits per pixel averages the total number of bits for all
+ * colour components over the whole image, excluding any padding bits or
+ * padding pixels.
+ *
+ * For formats that store pixels with bit padding within words, only the
+ * effective bits are taken into account. For instance, 12-bit Bayer data
+ * stored in two bytes per pixel report 12, not 16, in this field.
+ *
+ * Formats that don't have a fixed number of bits per pixel, such as compressed
+ * formats, report 0 in this field.
+ *
+ * \var PixelFormatInfo::colourEncoding
+ * \brief The colour encoding type
+ *
+ * \var PixelFormatInfo::packed
+ * \brief Tell if multiple pixels are packed in the same bytes
+ *
+ * Packed formats are defined as storing data from multiple pixels in the same
+ * bytes. For instance, 12-bit Bayer data with two pixels stored in three bytes
+ * is packed, while the same data stored with 4 bits of padding in two bytes
+ * per pixel is not packed.
+ *
+ * \var PixelFormatInfo::pixelsPerGroup
+ * \brief The number of pixels in a pixel group
+ *
+ * A pixel group is defined as the minimum number of pixels (including padding)
+ * necessary in a row when the image has only one column of effective pixels.
+ * pixelsPerGroup refers to this value. PixelFormatInfo::Plane::bytesPerGroup,
+ * then, refers to the number of bytes that a pixel group consumes. This
+ * definition of a pixel group allows simple calculation of stride, as
+ * ceil(width / pixelsPerGroup) * bytesPerGroup. These values are determined
+ * only in terms of a row. The ceiling accounts for padding.
+ *
+ * A pixel group has a second constraint, such that the pixel group
+ * (bytesPerGroup and pixelsPerGroup) is the smallest repeatable unit.
+ * What this means is that, for example, in the IPU3 formats, if there is only
+ * one column of effective pixels, it looks like it could be fit in 5 bytes
+ * with 3 padding pixels (for a total of 4 pixels over 5 bytes). However, this
+ * unit is not repeatable, as at the 7th group in the same row, the pattern
+ * is broken. Therefore, the pixel group for IPU3 formats must be 25 pixels
+ * over 32 bytes.
+ *
+ * For example, for something simple like BGR888, it is self-explanatory:
+ * the pixel group size is 1, and the bytes necessary is 3, and there is
+ * only one plane with no (= 1) vertical subsampling. For YUYV, the
+ * CbCr pair is shared between two pixels, so even if you have only one
+ * pixel, you would still need a padded second Y sample, therefore the pixel
+ * group size is 2, and bytes necessary is 4. YUYV also has no vertical
+ * subsampling. NV12 has a pixel group size of 2 pixels, due to the CbCr plane.
+ * The bytes per group then, for both planes, is 2. The first plane has no
+ * vertical subsampling, but the second plane is subsampled by a factor of 2.
+ *
+ * The IPU3 raw Bayer formats are single-planar, and have a pixel group size of
+ * 25, consuming 32 bytes, due to the packing pattern being repeated in memory
+ * every 32 bytes. The IPU3 hardware, however, has an additional constraint on
+ * the DMA burst size, requiring lines to be multiple of 64 bytes. This isn't an
+ * intrinsic property of the formats and is thus not reflected here. It is
+ * instead enforced by the corresponding pipeline handler.
+ *
+ * \var PixelFormatInfo::planes
+ * \brief Information about pixels for each plane
+ *
+ * \sa PixelFormatInfo::Plane
+ */
+
/**
- * \class ImageFormats
- * \brief Describe V4L2Device and V4L2SubDevice image formats
+ * \enum PixelFormatInfo::ColourEncoding
+ * \brief The colour encoding type
*
- * This class stores a list of image formats, each associated with a
- * corresponding set of image sizes. It is used to describe the formats and
- * sizes supported by a V4L2Device or V4L2Subdevice.
+ * \var PixelFormatInfo::ColourEncodingRGB
+ * \brief RGB colour encoding
*
- * Formats are stored as an integer. When used for a V4L2Device, the image
- * formats are fourcc pixel formats. When used for a V4L2Subdevice they are
- * media bus codes. Both are defined by the V4L2 specification.
+ * \var PixelFormatInfo::ColourEncodingYUV
+ * \brief YUV colour encoding
*
- * Sizes are stored as a list of SizeRange.
+ * \var PixelFormatInfo::ColourEncodingRAW
+ * \brief RAW colour encoding
*/
/**
- * \brief Add a format and corresponding sizes to the description
- * \param[in] format Pixel format or media bus code to describe
- * \param[in] sizes List of supported size ranges for the format
+ * \struct PixelFormatInfo::Plane
+ * \brief Information about a single plane of a pixel format
+ *
+ * \var PixelFormatInfo::Plane::bytesPerGroup
+ * \brief The number of bytes that a pixel group consumes
+ *
+ * \sa PixelFormatInfo::pixelsPerGroup
*
- * \return 0 on success or a negative error code otherwise
- * \retval -EEXIST The format is already described
+ * \var PixelFormatInfo::Plane::verticalSubSampling
+ * \brief Vertical subsampling multiplier
+ *
+ * This value is the ratio between the number of rows of pixels in the frame
+ * to the number of rows of pixels in the plane.
+ */
+
+namespace {
+
+const PixelFormatInfo pixelFormatInfoInvalid{};
+
+const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
+ /* RGB formats. */
+ { formats::RGB565, {
+ .name = "RGB565",
+ .format = formats::RGB565,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB565), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGB565_BE, {
+ .name = "RGB565_BE",
+ .format = formats::RGB565_BE,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB565X), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::BGR888, {
+ .name = "BGR888",
+ .format = formats::BGR888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB24), },
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGB888, {
+ .name = "RGB888",
+ .format = formats::RGB888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGR24), },
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::XRGB8888, {
+ .name = "XRGB8888",
+ .format = formats::XRGB8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_XBGR32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::XBGR8888, {
+ .name = "XBGR8888",
+ .format = formats::XBGR8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGBX32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGBX8888, {
+ .name = "RGBX8888",
+ .format = formats::RGBX8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGRX32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::BGRX8888, {
+ .name = "BGRX8888",
+ .format = formats::BGRX8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_XRGB32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::ABGR8888, {
+ .name = "ABGR8888",
+ .format = formats::ABGR8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGBA32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::ARGB8888, {
+ .name = "ARGB8888",
+ .format = formats::ARGB8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_ABGR32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::BGRA8888, {
+ .name = "BGRA8888",
+ .format = formats::BGRA8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_ARGB32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGBA8888, {
+ .name = "RGBA8888",
+ .format = formats::RGBA8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGRA32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::BGR161616, {
+ .name = "BGR161616",
+ .format = formats::BGR161616,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB48), },
+ .bitsPerPixel = 48,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGB161616, {
+ .name = "RGB161616",
+ .format = formats::RGB161616,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGR48), },
+ .bitsPerPixel = 48,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+
+ /* YUV packed formats. */
+ { formats::YUYV, {
+ .name = "YUYV",
+ .format = formats::YUYV,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUYV), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::YVYU, {
+ .name = "YVYU",
+ .format = formats::YVYU,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YVYU), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::UYVY, {
+ .name = "UYVY",
+ .format = formats::UYVY,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_UYVY), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::VYUY, {
+ .name = "VYUY",
+ .format = formats::VYUY,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_VYUY), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::AVUY8888, {
+ .name = "AVUY8888",
+ .format = formats::AVUY8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUVA32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::XVUY8888, {
+ .name = "XVUY8888",
+ .format = formats::XVUY8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUVX32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+
+ /* YUV planar formats. */
+ { formats::NV12, {
+ .name = "NV12",
+ .format = formats::NV12,
+ .v4l2Formats = {
+ V4L2PixelFormat(V4L2_PIX_FMT_NV12),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV12M),
+ },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 2, 2 }, { 0, 0 } }},
+ } },
+ { formats::NV21, {
+ .name = "NV21",
+ .format = formats::NV21,
+ .v4l2Formats = {
+ V4L2PixelFormat(V4L2_PIX_FMT_NV21),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV21M),
+ },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 2, 2 }, { 0, 0 } }},
+ } },
+ { formats::NV16, {
+ .name = "NV16",
+ .format = formats::NV16,
+ .v4l2Formats = {
+ V4L2PixelFormat(V4L2_PIX_FMT_NV16),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV16M),
+ },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 2, 1 }, { 0, 0 } }},
+ } },
+ { formats::NV61, {
+ .name = "NV61",
+ .format = formats::NV61,
+ .v4l2Formats = {
+ V4L2PixelFormat(V4L2_PIX_FMT_NV61),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV61M),
+ },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 2, 1 }, { 0, 0 } }},
+ } },
+ { formats::NV24, {
+ .name = "NV24",
+ .format = formats::NV24,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_NV24), },
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 2, 1 }, { 0, 0 } }},
+ } },
+ { formats::NV42, {
+ .name = "NV42",
+ .format = formats::NV42,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_NV42), },
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 2, 1 }, { 0, 0 } }},
+ } },
+ { formats::YUV420, {
+ .name = "YUV420",
+ .format = formats::YUV420,
+ .v4l2Formats = {
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV420),
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV420M),
+ },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 1, 2 }, { 1, 2 } }},
+ } },
+ { formats::YVU420, {
+ .name = "YVU420",
+ .format = formats::YVU420,
+ .v4l2Formats = {
+ V4L2PixelFormat(V4L2_PIX_FMT_YVU420),
+ V4L2PixelFormat(V4L2_PIX_FMT_YVU420M),
+ },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 1, 2 }, { 1, 2 } }},
+ } },
+ { formats::YUV422, {
+ .name = "YUV422",
+ .format = formats::YUV422,
+ .v4l2Formats = {
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV422P),
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV422M),
+ },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 1, 1 }, { 1, 1 } }},
+ } },
+ { formats::YVU422, {
+ .name = "YVU422",
+ .format = formats::YVU422,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YVU422M), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 1, 1 }, { 1, 1 } }},
+ } },
+ { formats::YUV444, {
+ .name = "YUV444",
+ .format = formats::YUV444,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUV444M), },
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 1, 1 }, { 1, 1 } }},
+ } },
+ { formats::YVU444, {
+ .name = "YVU444",
+ .format = formats::YVU444,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YVU444M), },
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 1, 1 }, { 1, 1 } }},
+ } },
+
+ /* Greyscale formats. */
+ { formats::R8, {
+ .name = "R8",
+ .format = formats::R8,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_GREY), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::R10, {
+ .name = "R10",
+ .format = formats::R10,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::R10_CSI2P, {
+ .name = "R10_CSI2P",
+ .format = formats::R10_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y10P), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::R12_CSI2P, {
+ .name = "R12_CSI2P",
+ .format = formats::R12_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y12P), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::R12, {
+ .name = "R12",
+ .format = formats::R12,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y12), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::R16, {
+ .name = "R16",
+ .format = formats::R16,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y16), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::MONO_PISP_COMP1, {
+ .name = "MONO_PISP_COMP1",
+ .format = formats::MONO_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = true,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+
+ /* Bayer formats. */
+ { formats::SBGGR8, {
+ .name = "SBGGR8",
+ .format = formats::SBGGR8,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR8), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG8, {
+ .name = "SGBRG8",
+ .format = formats::SGBRG8,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG8), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG8, {
+ .name = "SGRBG8",
+ .format = formats::SGRBG8,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB8, {
+ .name = "SRGGB8",
+ .format = formats::SRGGB8,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB8), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR10, {
+ .name = "SBGGR10",
+ .format = formats::SBGGR10,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG10, {
+ .name = "SGBRG10",
+ .format = formats::SGBRG10,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG10, {
+ .name = "SGRBG10",
+ .format = formats::SGRBG10,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB10, {
+ .name = "SRGGB10",
+ .format = formats::SRGGB10,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR10_CSI2P, {
+ .name = "SBGGR10_CSI2P",
+ .format = formats::SBGGR10_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10P), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG10_CSI2P, {
+ .name = "SGBRG10_CSI2P",
+ .format = formats::SGBRG10_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10P), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG10_CSI2P, {
+ .name = "SGRBG10_CSI2P",
+ .format = formats::SGRBG10_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10P), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB10_CSI2P, {
+ .name = "SRGGB10_CSI2P",
+ .format = formats::SRGGB10_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10P), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR12, {
+ .name = "SBGGR12",
+ .format = formats::SBGGR12,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG12, {
+ .name = "SGBRG12",
+ .format = formats::SGBRG12,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG12, {
+ .name = "SGRBG12",
+ .format = formats::SGRBG12,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB12, {
+ .name = "SRGGB12",
+ .format = formats::SRGGB12,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR12_CSI2P, {
+ .name = "SBGGR12_CSI2P",
+ .format = formats::SBGGR12_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12P), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG12_CSI2P, {
+ .name = "SGBRG12_CSI2P",
+ .format = formats::SGBRG12_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12P), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG12_CSI2P, {
+ .name = "SGRBG12_CSI2P",
+ .format = formats::SGRBG12_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12P), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB12_CSI2P, {
+ .name = "SRGGB12_CSI2P",
+ .format = formats::SRGGB12_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR14, {
+ .name = "SBGGR14",
+ .format = formats::SBGGR14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG14, {
+ .name = "SGBRG14",
+ .format = formats::SGBRG14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG14, {
+ .name = "SGRBG14",
+ .format = formats::SGRBG14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB14, {
+ .name = "SRGGB14",
+ .format = formats::SRGGB14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR14_CSI2P, {
+ .name = "SBGGR14_CSI2P",
+ .format = formats::SBGGR14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG14_CSI2P, {
+ .name = "SGBRG14_CSI2P",
+ .format = formats::SGBRG14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG14_CSI2P, {
+ .name = "SGRBG14_CSI2P",
+ .format = formats::SGRBG14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB14_CSI2P, {
+ .name = "SRGGB14_CSI2P",
+ .format = formats::SRGGB14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR16, {
+ .name = "SBGGR16",
+ .format = formats::SBGGR16,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG16, {
+ .name = "SGBRG16",
+ .format = formats::SGBRG16,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG16), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG16, {
+ .name = "SGRBG16",
+ .format = formats::SGRBG16,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG16), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB16, {
+ .name = "SRGGB16",
+ .format = formats::SRGGB16,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR10_IPU3, {
+ .name = "SBGGR10_IPU3",
+ .format = formats::SBGGR10_IPU3,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SBGGR10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ /* \todo remember to double this in the ipu3 pipeline handler */
+ .pixelsPerGroup = 25,
+ .planes = {{ { 32, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG10_IPU3, {
+ .name = "SGBRG10_IPU3",
+ .format = formats::SGBRG10_IPU3,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGBRG10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 25,
+ .planes = {{ { 32, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG10_IPU3, {
+ .name = "SGRBG10_IPU3",
+ .format = formats::SGRBG10_IPU3,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGRBG10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 25,
+ .planes = {{ { 32, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB10_IPU3, {
+ .name = "SRGGB10_IPU3",
+ .format = formats::SRGGB10_IPU3,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SRGGB10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 25,
+ .planes = {{ { 32, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::BGGR_PISP_COMP1, {
+ .name = "BGGR_PISP_COMP1",
+ .format = formats::BGGR_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::GBRG_PISP_COMP1, {
+ .name = "GBRG_PISP_COMP1",
+ .format = formats::GBRG_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::GRBG_PISP_COMP1, {
+ .name = "GRBG_PISP_COMP1",
+ .format = formats::GRBG_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGGB_PISP_COMP1, {
+ .name = "RGGB_PISP_COMP1",
+ .format = formats::RGGB_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ /* Compressed formats. */
+ { formats::MJPEG, {
+ .name = "MJPEG",
+ .format = formats::MJPEG,
+ .v4l2Formats = {
+ V4L2PixelFormat(V4L2_PIX_FMT_MJPEG),
+ V4L2PixelFormat(V4L2_PIX_FMT_JPEG),
+ },
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+};
+
+} /* namespace */
+
+/**
+ * \fn bool PixelFormatInfo::isValid() const
+ * \brief Check if the pixel format info is valid
+ * \return True if the pixel format info is valid, false otherwise
+ */
+
+/**
+ * \brief Retrieve information about a pixel format
+ * \param[in] format The pixel format
+ * \return The PixelFormatInfo describing the \a format if known, or an invalid
+ * PixelFormatInfo otherwise
*/
-int ImageFormats::addFormat(unsigned int format, const std::vector<SizeRange> &sizes)
+const PixelFormatInfo &PixelFormatInfo::info(const PixelFormat &format)
{
- if (data_.find(format) != data_.end())
- return -EEXIST;
+ const auto iter = pixelFormatInfo.find(format);
+ if (iter == pixelFormatInfo.end()) {
+ LOG(Formats, Warning)
+ << "Unsupported pixel format "
+ << utils::hex(format.fourcc());
+ return pixelFormatInfoInvalid;
+ }
- data_[format] = sizes;
+ return iter->second;
+}
+
+/**
+ * \brief Retrieve information about a V4L2 pixel format
+ * \param[in] format The V4L2 pixel format
+ * \return The PixelFormatInfo describing the V4L2 \a format if known, or an
+ * invalid PixelFormatInfo otherwise
+ */
+const PixelFormatInfo &PixelFormatInfo::info(const V4L2PixelFormat &format)
+{
+ PixelFormat pixelFormat = format.toPixelFormat(false);
+ if (!pixelFormat.isValid())
+ return pixelFormatInfoInvalid;
+
+ const auto iter = pixelFormatInfo.find(pixelFormat);
+ if (iter == pixelFormatInfo.end())
+ return pixelFormatInfoInvalid;
- return 0;
+ return iter->second;
}
/**
- * \brief Check if the list of devices supported formats is empty
- * \return True if the list of supported formats is empty
+ * \brief Retrieve information about a pixel format
+ * \param[in] name The name of pixel format
+ * \return The PixelFormatInfo describing the PixelFormat matching the
+ * \a name if known, or an invalid PixelFormatInfo otherwise
*/
-bool ImageFormats::isEmpty() const
+const PixelFormatInfo &PixelFormatInfo::info(const std::string &name)
{
- return data_.empty();
+ for (const auto &info : pixelFormatInfo) {
+ if (info.second.name == name)
+ return info.second;
+ }
+
+ return pixelFormatInfoInvalid;
}
/**
- * \brief Retrieve a list of all supported image formats
- * \return List of pixel formats or media bus codes
+ * \brief Compute the stride
+ * \param[in] width The width of the line, in pixels
+ * \param[in] plane The index of the plane whose stride is to be computed
+ * \param[in] align The stride alignment, in bytes
+ *
+ * The stride is the number of bytes necessary to store a full line of a frame,
+ * including padding at the end of the line. This function takes into account
+ * the alignment constraints intrinsic to the format (for instance, the
+ * SGRBG12_CSI2P format stores two 12-bit pixels in 3 bytes, and thus has a
+ * required stride alignment of 3 bytes). Additional alignment constraints may
+ * be specified through the \a align parameter, which will cause the stride to
+ * be rounded up to the next multiple of \a align.
+ *
+ * For multi-planar formats, different planes may have different stride values.
+ * The \a plane parameter selects which plane to compute the stride for.
+ *
+ * \return The number of bytes necessary to store a line, or 0 if the
+ * PixelFormatInfo instance or the \a plane is not valid
*/
-std::vector<unsigned int> ImageFormats::formats() const
+unsigned int PixelFormatInfo::stride(unsigned int width, unsigned int plane,
+ unsigned int align) const
{
- std::vector<unsigned int> formats;
- formats.reserve(data_.size());
+ if (!isValid()) {
+ LOG(Formats, Warning) << "Invalid pixel format, stride is zero";
+ return 0;
+ }
+
+ if (plane >= planes.size() || !planes[plane].bytesPerGroup) {
+ LOG(Formats, Warning) << "Invalid plane index, stride is zero";
+ return 0;
+ }
- /* \todo: Should this be cached instead of computed each time? */
- for (auto const &it : data_)
- formats.push_back(it.first);
+ /* ceil(width / pixelsPerGroup) * bytesPerGroup */
+ unsigned int stride = (width + pixelsPerGroup - 1) / pixelsPerGroup
+ * planes[plane].bytesPerGroup;
- return formats;
+ /* ceil(stride / align) * align */
+ return (stride + align - 1) / align * align;
}
/**
- * \brief Retrieve all sizes for a specific format
- * \param[in] format The pixel format or mbus code
+ * \brief Compute the number of bytes necessary to store a plane of a frame
+ * \param[in] size The size of the frame, in pixels
+ * \param[in] plane The plane index
+ * \param[in] align The stride alignment, in bytes (1 for default alignment)
*
- * Retrieve all size ranges for a specific format. For V4L2Device \a format is a
- * pixel format while for a V4L2Subdevice \a format is a media bus code.
+ * The plane size is computed by multiplying the line stride and the frame
+ * height, taking subsampling and other format characteristics into account.
+ * Stride alignment constraints may be specified through the \a align parameter.
*
- * \return The list of image sizes supported for \a format, or an empty list if
- * the format is not supported
+ * \sa stride()
+ *
+ * \return The number of bytes necessary to store the plane, or 0 if the
+ * PixelFormatInfo instance is not valid or the plane number isn't valid for the
+ * format
*/
-const std::vector<SizeRange> &ImageFormats::sizes(unsigned int format) const
+unsigned int PixelFormatInfo::planeSize(const Size &size, unsigned int plane,
+ unsigned int align) const
{
- static const std::vector<SizeRange> empty;
+ unsigned int stride = PixelFormatInfo::stride(size.width, plane, align);
+ if (!stride)
+ return 0;
+
+ return planeSize(size.height, plane, stride);
+}
- auto const &it = data_.find(format);
- if (it == data_.end())
- return empty;
+/**
+ * \brief Compute the number of bytes necessary to store a plane of a frame
+ * \param[in] height The height of the frame, in pixels
+ * \param[in] plane The plane index
+ * \param[in] stride The plane stride, in bytes
+ *
+ * The plane size is computed by multiplying the line stride and the frame
+ * height, taking subsampling and other format characteristics into account.
+ * Stride alignment constraints may be specified through the \a align parameter.
+ *
+ * \return The number of bytes necessary to store the plane, or 0 if the
+ * PixelFormatInfo instance is not valid or the plane number isn't valid for the
+ * format
+ */
+unsigned int PixelFormatInfo::planeSize(unsigned int height, unsigned int plane,
+ unsigned int stride) const
+{
+ unsigned int vertSubSample = planes[plane].verticalSubSampling;
+ if (!vertSubSample)
+ return 0;
- return it->second;
+ /* stride * ceil(height / verticalSubSampling) */
+ return stride * ((height + vertSubSample - 1) / vertSubSample);
}
/**
- * \brief Retrieve the map that associates formats to image sizes
- * \return The map that associates formats to image sizes
+ * \brief Compute the number of bytes necessary to store a frame
+ * \param[in] size The size of the frame, in pixels
+ * \param[in] align The stride alignment, in bytes (1 for default alignment)
+ *
+ * The frame size is computed by adding the size of all planes, as computed by
+ * planeSize(), using the specified alignment constraints for all planes. For
+ * more complex stride constraints, use the frameSize() overloaded version that
+ * takes an array of stride values.
+ *
+ * \sa planeSize()
+ *
+ * \return The number of bytes necessary to store the frame, or 0 if the
+ * PixelFormatInfo instance is not valid
+ */
+unsigned int PixelFormatInfo::frameSize(const Size &size, unsigned int align) const
+{
+ unsigned int sum = 0;
+
+ for (const auto &[i, plane] : utils::enumerate(planes)) {
+ if (plane.bytesPerGroup == 0)
+ break;
+
+ sum += planeSize(size, i, align);
+ }
+
+ return sum;
+}
+
+/**
+ * \brief Compute the number of bytes necessary to store a frame
+ * \param[in] size The size of the frame, in pixels
+ * \param[in] strides The strides to use for each plane
+ *
+ * This function is an overloaded version that takes custom strides for each
+ * plane, to be used when the device has custom alignment constraints that
+ * can't be described by just an alignment value.
+ *
+ * \return The number of bytes necessary to store the frame, or 0 if the
+ * PixelFormatInfo instance is not valid
+ */
+unsigned int
+PixelFormatInfo::frameSize(const Size &size,
+ const std::array<unsigned int, 3> &strides) const
+{
+ /* stride * ceil(height / verticalSubSampling) */
+ unsigned int sum = 0;
+ for (unsigned int i = 0; i < 3; i++) {
+ unsigned int vertSubSample = planes[i].verticalSubSampling;
+ if (!vertSubSample)
+ continue;
+ sum += strides[i]
+ * ((size.height + vertSubSample - 1) / vertSubSample);
+ }
+
+ return sum;
+}
+
+/**
+ * \brief Retrieve the number of planes represented by the format
+ * \return The number of planes used by the format
*/
-const std::map<unsigned int, std::vector<SizeRange>> &ImageFormats::data() const
+unsigned int PixelFormatInfo::numPlanes() const
{
- return data_;
+ unsigned int count = 0;
+
+ for (const Plane &p : planes) {
+ if (p.bytesPerGroup == 0)
+ break;
+
+ count++;
+ }
+
+ return count;
}
} /* namespace libcamera */
diff --git a/src/libcamera/formats.yaml b/src/libcamera/formats.yaml
new file mode 100644
index 00000000..2d54d391
--- /dev/null
+++ b/src/libcamera/formats.yaml
@@ -0,0 +1,212 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2020, Google Inc.
+#
+%YAML 1.1
+---
+formats:
+ - R8:
+ fourcc: DRM_FORMAT_R8
+ - R10:
+ fourcc: DRM_FORMAT_R10
+ - R12:
+ fourcc: DRM_FORMAT_R12
+ - R16:
+ fourcc: DRM_FORMAT_R16
+
+ - RGB565:
+ fourcc: DRM_FORMAT_RGB565
+ - RGB565_BE:
+ fourcc: DRM_FORMAT_RGB565
+ big_endian: true
+
+ - RGB888:
+ fourcc: DRM_FORMAT_RGB888
+ - BGR888:
+ fourcc: DRM_FORMAT_BGR888
+
+ - XRGB8888:
+ fourcc: DRM_FORMAT_XRGB8888
+ - XBGR8888:
+ fourcc: DRM_FORMAT_XBGR8888
+ - RGBX8888:
+ fourcc: DRM_FORMAT_RGBX8888
+ - BGRX8888:
+ fourcc: DRM_FORMAT_BGRX8888
+
+ - ARGB8888:
+ fourcc: DRM_FORMAT_ARGB8888
+ - ABGR8888:
+ fourcc: DRM_FORMAT_ABGR8888
+ - RGBA8888:
+ fourcc: DRM_FORMAT_RGBA8888
+ - BGRA8888:
+ fourcc: DRM_FORMAT_BGRA8888
+
+ - RGB161616:
+ fourcc: DRM_FORMAT_RGB161616
+ - BGR161616:
+ fourcc: DRM_FORMAT_BGR161616
+
+ - YUYV:
+ fourcc: DRM_FORMAT_YUYV
+ - YVYU:
+ fourcc: DRM_FORMAT_YVYU
+ - UYVY:
+ fourcc: DRM_FORMAT_UYVY
+ - VYUY:
+ fourcc: DRM_FORMAT_VYUY
+ - AVUY8888:
+ fourcc: DRM_FORMAT_AVUY8888
+ - XVUY8888:
+ fourcc: DRM_FORMAT_XVUY8888
+
+ - NV12:
+ fourcc: DRM_FORMAT_NV12
+ - NV21:
+ fourcc: DRM_FORMAT_NV21
+ - NV16:
+ fourcc: DRM_FORMAT_NV16
+ - NV61:
+ fourcc: DRM_FORMAT_NV61
+ - NV24:
+ fourcc: DRM_FORMAT_NV24
+ - NV42:
+ fourcc: DRM_FORMAT_NV42
+
+ - YUV420:
+ fourcc: DRM_FORMAT_YUV420
+ - YVU420:
+ fourcc: DRM_FORMAT_YVU420
+ - YUV422:
+ fourcc: DRM_FORMAT_YUV422
+ - YVU422:
+ fourcc: DRM_FORMAT_YVU422
+ - YUV444:
+ fourcc: DRM_FORMAT_YUV444
+ - YVU444:
+ fourcc: DRM_FORMAT_YVU444
+
+ - MJPEG:
+ fourcc: DRM_FORMAT_MJPEG
+
+ - SRGGB8:
+ fourcc: DRM_FORMAT_SRGGB8
+ - SGRBG8:
+ fourcc: DRM_FORMAT_SGRBG8
+ - SGBRG8:
+ fourcc: DRM_FORMAT_SGBRG8
+ - SBGGR8:
+ fourcc: DRM_FORMAT_SBGGR8
+
+ - SRGGB10:
+ fourcc: DRM_FORMAT_SRGGB10
+ - SGRBG10:
+ fourcc: DRM_FORMAT_SGRBG10
+ - SGBRG10:
+ fourcc: DRM_FORMAT_SGBRG10
+ - SBGGR10:
+ fourcc: DRM_FORMAT_SBGGR10
+
+ - SRGGB12:
+ fourcc: DRM_FORMAT_SRGGB12
+ - SGRBG12:
+ fourcc: DRM_FORMAT_SGRBG12
+ - SGBRG12:
+ fourcc: DRM_FORMAT_SGBRG12
+ - SBGGR12:
+ fourcc: DRM_FORMAT_SBGGR12
+
+ - SRGGB14:
+ fourcc: DRM_FORMAT_SRGGB14
+ - SGRBG14:
+ fourcc: DRM_FORMAT_SGRBG14
+ - SGBRG14:
+ fourcc: DRM_FORMAT_SGBRG14
+ - SBGGR14:
+ fourcc: DRM_FORMAT_SBGGR14
+
+ - SRGGB16:
+ fourcc: DRM_FORMAT_SRGGB16
+ - SGRBG16:
+ fourcc: DRM_FORMAT_SGRBG16
+ - SGBRG16:
+ fourcc: DRM_FORMAT_SGBRG16
+ - SBGGR16:
+ fourcc: DRM_FORMAT_SBGGR16
+
+ - R10_CSI2P:
+ fourcc: DRM_FORMAT_R10
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - R12_CSI2P:
+ fourcc: DRM_FORMAT_R12
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+
+ - SRGGB10_CSI2P:
+ fourcc: DRM_FORMAT_SRGGB10
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGRBG10_CSI2P:
+ fourcc: DRM_FORMAT_SGRBG10
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGBRG10_CSI2P:
+ fourcc: DRM_FORMAT_SGBRG10
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SBGGR10_CSI2P:
+ fourcc: DRM_FORMAT_SBGGR10
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+
+ - SRGGB12_CSI2P:
+ fourcc: DRM_FORMAT_SRGGB12
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGRBG12_CSI2P:
+ fourcc: DRM_FORMAT_SGRBG12
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGBRG12_CSI2P:
+ fourcc: DRM_FORMAT_SGBRG12
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SBGGR12_CSI2P:
+ fourcc: DRM_FORMAT_SBGGR12
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+
+ - SRGGB14_CSI2P:
+ fourcc: DRM_FORMAT_SRGGB14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGRBG14_CSI2P:
+ fourcc: DRM_FORMAT_SGRBG14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGBRG14_CSI2P:
+ fourcc: DRM_FORMAT_SGBRG14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SBGGR14_CSI2P:
+ fourcc: DRM_FORMAT_SBGGR14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+
+ - SRGGB10_IPU3:
+ fourcc: DRM_FORMAT_SRGGB10
+ mod: IPU3_FORMAT_MOD_PACKED
+ - SGRBG10_IPU3:
+ fourcc: DRM_FORMAT_SGRBG10
+ mod: IPU3_FORMAT_MOD_PACKED
+ - SGBRG10_IPU3:
+ fourcc: DRM_FORMAT_SGBRG10
+ mod: IPU3_FORMAT_MOD_PACKED
+ - SBGGR10_IPU3:
+ fourcc: DRM_FORMAT_SBGGR10
+ mod: IPU3_FORMAT_MOD_PACKED
+
+ - RGGB_PISP_COMP1:
+ fourcc: DRM_FORMAT_SRGGB16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - GRBG_PISP_COMP1:
+ fourcc: DRM_FORMAT_SGRBG16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - GBRG_PISP_COMP1:
+ fourcc: DRM_FORMAT_SGBRG16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - BGGR_PISP_COMP1:
+ fourcc: DRM_FORMAT_SBGGR16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - MONO_PISP_COMP1:
+ fourcc: DRM_FORMAT_R16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+...
diff --git a/src/libcamera/framebuffer.cpp b/src/libcamera/framebuffer.cpp
new file mode 100644
index 00000000..826848f7
--- /dev/null
+++ b/src/libcamera/framebuffer.cpp
@@ -0,0 +1,441 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Frame buffer handling
+ */
+
+#include <libcamera/framebuffer.h>
+#include "libcamera/internal/framebuffer.h"
+
+#include <sys/stat.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/shared_fd.h>
+
+/**
+ * \file libcamera/framebuffer.h
+ * \brief Frame buffer handling
+ */
+
+/**
+ * \internal
+ * \file libcamera/internal/framebuffer.h
+ * \brief Internal frame buffer handling support
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Buffer)
+
+/**
+ * \struct FrameMetadata
+ * \brief Metadata related to a captured frame
+ *
+ * The FrameMetadata structure stores all metadata related to a captured frame,
+ * as stored in a FrameBuffer, such as capture status, timestamp and bytes used.
+ */
+
+/**
+ * \enum FrameMetadata::Status
+ * \brief Define the frame completion status
+ * \var FrameMetadata::FrameSuccess
+ * The frame has been captured with success and contains valid data. All fields
+ * of the FrameMetadata structure are valid.
+ * \var FrameMetadata::FrameError
+ * An error occurred during capture of the frame. The frame data may be partly
+ * or fully invalid. The sequence and timestamp fields of the FrameMetadata
+ * structure is valid, the other fields may be invalid.
+ * \var FrameMetadata::FrameCancelled
+ * Capture stopped before the frame completed. The frame data is not valid. All
+ * fields of the FrameMetadata structure but the status field are invalid.
+ */
+
+/**
+ * \struct FrameMetadata::Plane
+ * \brief Per-plane frame metadata
+ *
+ * Frames are stored in memory in one or multiple planes. The
+ * FrameMetadata::Plane structure stores per-plane metadata.
+ */
+
+/**
+ * \var FrameMetadata::Plane::bytesused
+ * \brief Number of bytes occupied by the data in the plane, including line
+ * padding
+ *
+ * This value may vary per frame for compressed formats. For uncompressed
+ * formats it will be constant for all frames, but may be smaller than the
+ * FrameBuffer size.
+ */
+
+/**
+ * \var FrameMetadata::status
+ * \brief Status of the frame
+ *
+ * The validity of other fields of the FrameMetadata structure depends on the
+ * status value.
+ */
+
+/**
+ * \var FrameMetadata::sequence
+ * \brief Frame sequence number
+ *
+ * The sequence number is a monotonically increasing number assigned to the
+ * frames captured by the stream. The value is increased by one for each frame.
+ * Gaps in the sequence numbers indicate dropped frames.
+ */
+
+/**
+ * \var FrameMetadata::timestamp
+ * \brief Time when the frame was captured
+ *
+ * The timestamp is expressed as a number of nanoseconds relative to the system
+ * clock since an unspecified time point.
+ *
+ * \todo Be more precise on what timestamps refer to.
+ */
+
+/**
+ * \fn FrameMetadata::planes()
+ * \copydoc FrameMetadata::planes() const
+ */
+
+/**
+ * \fn FrameMetadata::planes() const
+ * \brief Retrieve the array of per-plane metadata
+ * \return The array of per-plane metadata
+ */
+
+#ifndef __DOXYGEN_PUBLIC__
+/**
+ * \class FrameBuffer::Private
+ * \brief Base class for FrameBuffer private data
+ *
+ * The FrameBuffer::Private class stores all private data associated with a
+ * framebuffer. It implements the d-pointer design pattern to hide core
+ * FrameBuffer data from the public API, and exposes utility functions to
+ * pipeline handlers.
+ */
+
+/**
+ * \brief Construct a FrameBuffer::Private instance
+ * \param[in] planes The frame memory planes
+ * \param[in] cookie Cookie
+ */
+FrameBuffer::Private::Private(const std::vector<Plane> &planes, uint64_t cookie)
+ : planes_(planes), cookie_(cookie), request_(nullptr),
+ isContiguous_(true)
+{
+ metadata_.planes_.resize(planes_.size());
+}
+
+/**
+ * \brief FrameBuffer::Private destructor
+ */
+FrameBuffer::Private::~Private()
+{
+}
+
+/**
+ * \fn FrameBuffer::Private::setRequest()
+ * \brief Set the request this buffer belongs to
+ * \param[in] request Request to set
+ *
+ * For buffers added to requests by applications, this function is called by
+ * Request::addBuffer() or Request::reuse(). For buffers internal to pipeline
+ * handlers, it is called by the pipeline handlers themselves.
+ */
+
+/**
+ * \fn FrameBuffer::Private::isContiguous()
+ * \brief Check if the frame buffer stores planes contiguously in memory
+ *
+ * Multi-planar frame buffers can store their planes contiguously in memory, or
+ * split them into discontiguous memory areas. This function checks in which of
+ * these two categories the frame buffer belongs.
+ *
+ * \return True if the planes are stored contiguously in memory, false otherwise
+ */
+
+/**
+ * \fn FrameBuffer::Private::fence()
+ * \brief Retrieve a const pointer to the Fence
+ *
+ * This function does only return a reference to the the fence and does not
+ * change its ownership. The fence is stored in the FrameBuffer and can only be
+ * reset with FrameBuffer::releaseFence() in case the buffer has completed with
+ * error due to a Fence wait failure.
+ *
+ * If buffer with a Fence completes with errors due to a failure in handling
+ * the fence, applications are responsible for releasing the Fence before
+ * calling Request::addBuffer() again.
+ *
+ * \sa Request::addBuffer()
+ *
+ * \return A const pointer to the Fence if any, nullptr otherwise
+ */
+
+/**
+ * \fn FrameBuffer::Private::setFence()
+ * \brief Move a \a fence in this buffer
+ * \param[in] fence The Fence
+ *
+ * This function associates a Fence with this Framebuffer. The intended caller
+ * is the Request::addBuffer() function.
+ *
+ * Once a FrameBuffer is associated with a Fence, the FrameBuffer will only be
+ * made available to the hardware device once the Fence has been correctly
+ * signalled.
+ *
+ * \sa Request::prepare()
+ *
+ * If the FrameBuffer completes successfully the core releases the Fence and the
+ * Buffer can be reused immediately. If handling of the Fence fails during the
+ * request preparation, the Fence is not released and is left in the
+ * FrameBuffer. It is applications responsibility to correctly release the
+ * fence and handle it opportunely before using the buffer again.
+ */
+
+/**
+ * \fn FrameBuffer::Private::cancel()
+ * \brief Marks the buffer as cancelled
+ *
+ * If a buffer is not used by a request, it shall be marked as cancelled to
+ * indicate that the metadata is invalid.
+ */
+
+/**
+ * \fn FrameBuffer::Private::metadata()
+ * \brief Retrieve the dynamic metadata
+ * \return Dynamic metadata for the frame contained in the buffer
+ */
+#endif /* __DOXYGEN_PUBLIC__ */
+
+/**
+ * \class FrameBuffer
+ * \brief Frame buffer data and its associated dynamic metadata
+ *
+ * The FrameBuffer class is the primary interface for applications, IPAs and
+ * pipeline handlers to interact with frame memory. It contains all the static
+ * and dynamic information to manage the whole life cycle of a frame capture,
+ * from buffer creation to consumption.
+ *
+ * The static information describes the memory planes that make a frame. The
+ * planes are specified when creating the FrameBuffer and are expressed as a set
+ * of dmabuf file descriptors, offset and length.
+ *
+ * The dynamic information is grouped in a FrameMetadata instance. It is updated
+ * during the processing of a queued capture request, and is valid from the
+ * completion of the buffer as signaled by Camera::bufferComplete() until the
+ * FrameBuffer is either reused in a new request or deleted.
+ *
+ * The creator of a FrameBuffer (application, IPA or pipeline handler) may
+ * associate to it an integer cookie for any private purpose. The cookie may be
+ * set when creating the FrameBuffer, and updated at any time with setCookie().
+ * The cookie is transparent to the libcamera core and shall only be set by the
+ * creator of the FrameBuffer. This mechanism supplements the Request cookie.
+ */
+
+/**
+ * \struct FrameBuffer::Plane
+ * \brief A memory region to store a single plane of a frame
+ *
+ * Planar pixel formats use multiple memory regions to store the different
+ * colour components of a frame. The Plane structure describes such a memory
+ * region by a dmabuf file descriptor, an offset within the dmabuf and a length.
+ * A FrameBuffer then contains one or multiple planes, depending on the pixel
+ * format of the frames it is meant to store.
+ *
+ * The offset identifies the location of the plane data from the start of the
+ * memory referenced by the dmabuf file descriptor. Multiple planes may be
+ * stored in the same dmabuf, in which case they will reference the same dmabuf
+ * and different offsets. No two planes may overlap, as specified by their
+ * offset and length.
+ *
+ * To support DMA access, planes are associated with dmabuf objects represented
+ * by SharedFD handles. The Plane class doesn't handle mapping of the memory to
+ * the CPU, but applications and IPAs may use the dmabuf file descriptors to map
+ * the plane memory with mmap() and access its contents.
+ *
+ * \todo Specify how an application shall decide whether to use a single or
+ * multiple dmabufs, based on the camera requirements.
+ */
+
+/**
+ * \var FrameBuffer::Plane::kInvalidOffset
+ * \brief Invalid offset value, to identify uninitialized planes
+ */
+
+/**
+ * \var FrameBuffer::Plane::fd
+ * \brief The dmabuf file descriptor
+ */
+
+/**
+ * \var FrameBuffer::Plane::offset
+ * \brief The plane offset in bytes
+*/
+
+/**
+ * \var FrameBuffer::Plane::length
+ * \brief The plane length in bytes
+ */
+
+namespace {
+
+ino_t fileDescriptorInode(const SharedFD &fd)
+{
+ if (!fd.isValid())
+ return 0;
+
+ struct stat st;
+ int ret = fstat(fd.get(), &st);
+ if (ret < 0) {
+ ret = -errno;
+ LOG(Buffer, Fatal)
+ << "Failed to fstat() fd: " << strerror(-ret);
+ return 0;
+ }
+
+ return st.st_ino;
+}
+
+} /* namespace */
+
+/**
+ * \brief Construct a FrameBuffer with an array of planes
+ * \param[in] planes The frame memory planes
+ * \param[in] cookie Cookie
+ */
+FrameBuffer::FrameBuffer(const std::vector<Plane> &planes, unsigned int cookie)
+ : FrameBuffer(std::make_unique<Private>(planes, cookie))
+{
+}
+
+/**
+ * \brief Construct a FrameBuffer with an extensible private class
+ * \param[in] d The extensible private class
+ */
+FrameBuffer::FrameBuffer(std::unique_ptr<Private> d)
+ : Extensible(std::move(d))
+{
+ unsigned int offset = 0;
+ bool isContiguous = true;
+ ino_t inode = 0;
+
+ for (const auto &plane : _d()->planes_) {
+ ASSERT(plane.offset != Plane::kInvalidOffset);
+
+ if (plane.offset != offset) {
+ isContiguous = false;
+ break;
+ }
+
+ /*
+ * Two different dmabuf file descriptors may still refer to the
+ * same dmabuf instance. Check this using inodes.
+ */
+ if (plane.fd != _d()->planes_[0].fd) {
+ if (!inode)
+ inode = fileDescriptorInode(_d()->planes_[0].fd);
+ if (fileDescriptorInode(plane.fd) != inode) {
+ isContiguous = false;
+ break;
+ }
+ }
+
+ offset += plane.length;
+ }
+
+ LOG(Buffer, Debug)
+ << "Buffer is " << (isContiguous ? "" : "not ") << "contiguous";
+
+ _d()->isContiguous_ = isContiguous;
+}
+
+/**
+ * \brief Retrieve the static plane descriptors
+ * \return Array of plane descriptors
+ */
+const std::vector<FrameBuffer::Plane> &FrameBuffer::planes() const
+{
+ return _d()->planes_;
+}
+
+/**
+ * \brief Retrieve the request this buffer belongs to
+ *
+ * The intended callers of this function are buffer completion handlers that
+ * need to associate a buffer to the request it belongs to.
+ *
+ * A FrameBuffer is associated to a request by Request::addBuffer() and the
+ * association is valid until the buffer completes. The returned request
+ * pointer is valid only during that interval.
+ *
+ * \return The Request the FrameBuffer belongs to, or nullptr if the buffer is
+ * not associated with a request
+ */
+Request *FrameBuffer::request() const
+{
+ return _d()->request_;
+}
+
+/**
+ * \brief Retrieve the dynamic metadata
+ * \return Dynamic metadata for the frame contained in the buffer
+ */
+const FrameMetadata &FrameBuffer::metadata() const
+{
+ return _d()->metadata_;
+}
+
+/**
+ * \brief Retrieve the cookie
+ *
+ * The cookie belongs to the creator of the FrameBuffer, which controls its
+ * lifetime and value.
+ *
+ * \sa setCookie()
+ *
+ * \return The cookie
+ */
+uint64_t FrameBuffer::cookie() const
+{
+ return _d()->cookie_;
+}
+
+/**
+ * \brief Set the cookie
+ * \param[in] cookie Cookie to set
+ *
+ * The cookie belongs to the creator of the FrameBuffer. Its value may be
+ * modified at any time with this function. Applications and IPAs shall not
+ * modify the cookie value of buffers they haven't created themselves. The
+ * libcamera core never modifies the buffer cookie.
+ */
+void FrameBuffer::setCookie(uint64_t cookie)
+{
+ _d()->cookie_ = cookie;
+}
+
+/**
+ * \brief Extract the Fence associated with this Framebuffer
+ *
+ * This function moves the buffer's fence ownership to the caller.
+ * After the fence has been released, calling this function always return
+ * nullptr.
+ *
+ * If buffer with a Fence completes with errors due to a failure in handling
+ * the fence, applications are responsible for releasing the Fence before
+ * calling Request::addBuffer() again.
+ *
+ * \return A unique pointer to the Fence if set, or nullptr if the fence has
+ * been released already
+ */
+std::unique_ptr<Fence> FrameBuffer::releaseFence()
+{
+ return std::move(_d()->fence_);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/framebuffer_allocator.cpp b/src/libcamera/framebuffer_allocator.cpp
index a37b564c..3d53bde2 100644
--- a/src/libcamera/framebuffer_allocator.cpp
+++ b/src/libcamera/framebuffer_allocator.cpp
@@ -2,19 +2,20 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * framebuffer_allocator.cpp - FrameBuffer allocator
+ * FrameBuffer allocator
*/
#include <libcamera/framebuffer_allocator.h>
#include <errno.h>
-#include <libcamera/buffer.h>
+#include <libcamera/base/log.h>
+
#include <libcamera/camera.h>
+#include <libcamera/framebuffer.h>
#include <libcamera/stream.h>
-#include "log.h"
-#include "pipeline_handler.h"
+#include "libcamera/internal/pipeline_handler.h"
/**
* \file framebuffer_allocator.h
@@ -58,14 +59,11 @@ LOG_DEFINE_CATEGORY(Allocator)
* \param[in] camera The camera
*/
FrameBufferAllocator::FrameBufferAllocator(std::shared_ptr<Camera> camera)
- : camera_(camera)
+ : camera_(std::move(camera))
{
}
-FrameBufferAllocator::~FrameBufferAllocator()
-{
- buffers_.clear();
-}
+FrameBufferAllocator::~FrameBufferAllocator() = default;
/**
* \brief Allocate buffers for a configured stream
@@ -76,7 +74,7 @@ FrameBufferAllocator::~FrameBufferAllocator()
* stopped, and the stream shall be part of the active camera configuration.
*
* Upon successful allocation, the allocated buffers can be retrieved with the
- * buffers() method.
+ * buffers() function.
*
* \return The number of allocated buffers on success or a negative error code
* otherwise
@@ -87,16 +85,22 @@ FrameBufferAllocator::~FrameBufferAllocator()
*/
int FrameBufferAllocator::allocate(Stream *stream)
{
- if (buffers_.count(stream)) {
+ const auto &[it, inserted] = buffers_.try_emplace(stream);
+
+ if (!inserted) {
LOG(Allocator, Error) << "Buffers already allocated for stream";
return -EBUSY;
}
- int ret = camera_->exportFrameBuffers(stream, &buffers_[stream]);
+ int ret = camera_->exportFrameBuffers(stream, &it->second);
if (ret == -EINVAL)
LOG(Allocator, Error)
- << "Stream is not part of " << camera_->name()
+ << "Stream is not part of " << camera_->id()
<< " active configuration";
+
+ if (ret < 0)
+ buffers_.erase(it);
+
return ret;
}
@@ -118,8 +122,6 @@ int FrameBufferAllocator::free(Stream *stream)
if (iter == buffers_.end())
return -EINVAL;
- std::vector<std::unique_ptr<FrameBuffer>> &buffers = iter->second;
- buffers.clear();
buffers_.erase(iter);
return 0;
@@ -136,7 +138,7 @@ int FrameBufferAllocator::free(Stream *stream)
* \brief Retrieve the buffers allocated for a \a stream
* \param[in] stream The stream to retrieve buffers for
*
- * This method shall only be called after successfully allocating buffers for
+ * This function shall only be called after successfully allocating buffers for
* \a stream with allocate(). The returned buffers are valid until free() is
* called for the same stream or the FrameBufferAllocator instance is destroyed.
*
diff --git a/src/libcamera/gen-controls.py b/src/libcamera/gen-controls.py
deleted file mode 100755
index 87c3d52a..00000000
--- a/src/libcamera/gen-controls.py
+++ /dev/null
@@ -1,173 +0,0 @@
-#!/usr/bin/env python3
-# SPDX-License-Identifier: GPL-2.0-or-later
-# Copyright (C) 2019, Google Inc.
-#
-# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
-#
-# gen-controls.py - Generate control definitions from YAML
-
-import argparse
-import string
-import sys
-import yaml
-
-
-def snake_case(s):
- return ''.join([c.isupper() and ('_' + c) or c for c in s]).strip('_')
-
-
-def format_description(description):
- description = description.strip('\n').split('\n')
- description[0] = '\\brief ' + description[0]
- return '\n'.join([(line and ' * ' or ' *') + line for line in description])
-
-
-def generate_cpp(controls):
- enum_doc_start_template = string.Template('''/**
- * \\enum ${name}Values
- * \\brief Supported ${name} values''')
- enum_doc_value_template = string.Template(''' * \\var ${name}Values::${value}
-${description}''')
- doc_template = string.Template('''/**
- * \\var ${name}
-${description}
- */''')
- def_template = string.Template('extern const Control<${type}> ${name}(${id_name}, "${name}");')
-
- ctrls_doc = []
- ctrls_def = []
- ctrls_map = []
-
- for ctrl in controls:
- name, ctrl = ctrl.popitem()
- id_name = snake_case(name).upper()
-
- ctrl_type = ctrl['type']
- if ctrl_type == 'string':
- ctrl_type = 'std::string'
- elif ctrl.get('size'):
- ctrl_type = 'Span<const %s>' % ctrl_type
-
- info = {
- 'name': name,
- 'type': ctrl_type,
- 'description': format_description(ctrl['description']),
- 'id_name': id_name,
- }
-
- enum = ctrl.get('enum')
- if enum:
- enum_doc = []
- enum_doc.append(enum_doc_start_template.substitute(info))
-
- for entry in enum:
- value_info = {
- 'name' : name,
- 'value': entry['name'],
- 'description': format_description(entry['description']),
- }
- enum_doc.append(enum_doc_value_template.substitute(value_info))
-
- enum_doc = '\n *\n'.join(enum_doc)
- enum_doc += '\n */'
- ctrls_doc.append(enum_doc)
-
- ctrls_doc.append(doc_template.substitute(info))
- ctrls_def.append(def_template.substitute(info))
- ctrls_map.append('\t{ ' + id_name + ', &' + name + ' },')
-
- return {
- 'controls_doc': '\n\n'.join(ctrls_doc),
- 'controls_def': '\n'.join(ctrls_def),
- 'controls_map': '\n'.join(ctrls_map),
- }
-
-
-def generate_h(controls):
- enum_template_start = string.Template('''enum ${name}Values {''')
- enum_value_template = string.Template('''\t${name} = ${value},''')
- template = string.Template('''extern const Control<${type}> ${name};''')
-
- ctrls = []
- ids = []
- id_value = 1
-
- for ctrl in controls:
- name, ctrl = ctrl.popitem()
- id_name = snake_case(name).upper()
-
- ids.append('\t' + id_name + ' = ' + str(id_value) + ',')
-
- ctrl_type = ctrl['type']
- if ctrl_type == 'string':
- ctrl_type = 'std::string'
- elif ctrl.get('size'):
- ctrl_type = 'Span<const %s>' % ctrl_type
-
- info = {
- 'name': name,
- 'type': ctrl_type,
- }
-
- enum = ctrl.get('enum')
- if enum:
- ctrls.append(enum_template_start.substitute(info))
-
- for entry in enum:
- value_info = {
- 'name': entry['name'],
- 'value': entry['value'],
- }
- ctrls.append(enum_value_template.substitute(value_info))
- ctrls.append("};")
-
- ctrls.append(template.substitute(info))
- id_value += 1
-
- return {'ids': '\n'.join(ids), 'controls': '\n'.join(ctrls)}
-
-
-def fill_template(template, data):
-
- template = open(template, 'rb').read()
- template = template.decode('utf-8')
- template = string.Template(template)
- return template.substitute(data)
-
-
-def main(argv):
-
- # Parse command line arguments
- parser = argparse.ArgumentParser()
- parser.add_argument('-o', dest='output', metavar='file', type=str,
- help='Output file name. Defaults to standard output if not specified.')
- parser.add_argument('input', type=str,
- help='Input file name.')
- parser.add_argument('template', type=str,
- help='Template file name.')
- args = parser.parse_args(argv[1:])
-
- data = open(args.input, 'rb').read()
- controls = yaml.safe_load(data)['controls']
-
- if args.template.endswith('.cpp.in'):
- data = generate_cpp(controls)
- elif args.template.endswith('.h.in'):
- data = generate_h(controls)
- else:
- raise RuntimeError('Unknown template type')
-
- data = fill_template(args.template, data)
-
- if args.output:
- output = open(args.output, 'wb')
- output.write(data.encode('utf-8'))
- output.close()
- else:
- sys.stdout.write(data)
-
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(main(sys.argv))
diff --git a/src/libcamera/geometry.cpp b/src/libcamera/geometry.cpp
index 13f642be..81cc8cd5 100644
--- a/src/libcamera/geometry.cpp
+++ b/src/libcamera/geometry.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * geometry.cpp - Geometry-related structures
+ * Geometry-related structures
*/
#include <libcamera/geometry.h>
@@ -10,6 +10,8 @@
#include <sstream>
#include <stdint.h>
+#include <libcamera/base/log.h>
+
/**
* \file geometry.h
* \brief Data structures related to geometric objects
@@ -18,71 +20,85 @@
namespace libcamera {
/**
- * \struct Rectangle
- * \brief Describe a rectangle's position and dimensions
- *
- * Rectangles are used to identify an area of an image. They are specified by
- * the coordinates of top-left corner and their horizontal and vertical size.
+ * \class Point
+ * \brief Describe a point in two-dimensional space
*
- * The measure unit of the rectangle coordinates and size, as well as the
- * reference point from which the Rectangle::x and Rectangle::y displacements
- * refers to, are defined by the context were rectangle is used.
+ * The Point structure defines a point in two-dimensional space with integer
+ * precision. The coordinates of a Point may be negative as well as positive.
*/
/**
- * \var Rectangle::x
- * \brief The horizontal coordinate of the rectangle's top-left corner
+ * \fn Point::Point()
+ * \brief Construct a Point with x and y set to 0
*/
/**
- * \var Rectangle::y
- * \brief The vertical coordinate of the rectangle's top-left corner
+ * \fn Point::Point(int xpos, int ypos)
+ * \brief Construct a Point at given \a xpos and \a ypos values
+ * \param[in] xpos The x-coordinate
+ * \param[in] ypos The y-coordinate
*/
/**
- * \var Rectangle::w
- * \brief The distance between the left and right sides
+ * \var Point::x
+ * \brief The x-coordinate of the Point
*/
/**
- * \var Rectangle::h
- * \brief The distance between the top and bottom sides
+ * \var Point::y
+ * \brief The y-coordinate of the Point
*/
/**
- * \brief Assemble and return a string describing the rectangle
- * \return A string describing the Rectangle
+ * \brief Assemble and return a string describing the point
+ * \return A string describing the point
*/
-const std::string Rectangle::toString() const
+const std::string Point::toString() const
{
std::stringstream ss;
-
- ss << "(" << x << "x" << y << ")/" << w << "x" << h;
+ ss << *this;
return ss.str();
}
/**
- * \brief Compare rectangles for equality
- * \return True if the two rectangles are equal, false otherwise
+ * \fn Point Point::operator-() const
+ * \brief Negate a Point by negating both its x and y coordinates
+ * \return The negated point
*/
-bool operator==(const Rectangle &lhs, const Rectangle &rhs)
+
+/**
+ * \brief Compare points for equality
+ * \return True if the two points are equal, false otherwise
+ */
+bool operator==(const Point &lhs, const Point &rhs)
{
- return lhs.x == rhs.x && lhs.y == rhs.y &&
- lhs.w == rhs.w && lhs.h == rhs.h;
+ return lhs.x == rhs.x && lhs.y == rhs.y;
}
/**
- * \fn bool operator!=(const Rectangle &lhs, const Rectangle &rhs)
- * \brief Compare rectangles for inequality
- * \return True if the two rectangles are not equal, false otherwise
+ * \fn bool operator!=(const Point &lhs, const Point &rhs)
+ * \brief Compare points for inequality
+ * \return True if the two points are not equal, false otherwise
*/
/**
- * \struct Size
+ * \brief Insert a text representation of a Point into an output stream
+ * \param[in] out The output stream
+ * \param[in] p The point
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const Point &p)
+{
+ out << "(" << p.x << ", " << p.y << ")";
+ return out;
+}
+
+/**
+ * \class Size
* \brief Describe a two-dimensional size
*
- * The Size structure defines a two-dimensional size with integer precision.
+ * The Size class defines a two-dimensional size with integer precision.
*/
/**
@@ -108,12 +124,256 @@ bool operator==(const Rectangle &lhs, const Rectangle &rhs)
*/
/**
+ * \fn bool Size::isNull() const
+ * \brief Check if the size is null
+ * \return True if both the width and height are 0, or false otherwise
+ */
+
+/**
* \brief Assemble and return a string describing the size
* \return A string describing the size
*/
const std::string Size::toString() const
{
- return std::to_string(width) + "x" + std::to_string(height);
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \fn Size::alignDownTo(unsigned int hAlignment, unsigned int vAlignment)
+ * \brief Align the size down horizontally and vertically in place
+ * \param[in] hAlignment Horizontal alignment
+ * \param[in] vAlignment Vertical alignment
+ *
+ * This functions rounds the width and height down to the nearest multiple of
+ * \a hAlignment and \a vAlignment respectively.
+ *
+ * \return A reference to this object
+ */
+
+/**
+ * \fn Size::alignUpTo(unsigned int hAlignment, unsigned int vAlignment)
+ * \brief Align the size up horizontally and vertically in place
+ * \param[in] hAlignment Horizontal alignment
+ * \param[in] vAlignment Vertical alignment
+ *
+ * This functions rounds the width and height up to the nearest multiple of
+ * \a hAlignment and \a vAlignment respectively.
+ *
+ * \return A reference to this object
+ */
+
+/**
+ * \fn Size::boundTo(const Size &bound)
+ * \brief Bound the size to \a bound in place
+ * \param[in] bound The maximum size
+ *
+ * This function sets the width and height to the minimum of this size and the
+ * \a bound size.
+ *
+ * \return A reference to this object
+ */
+
+/**
+ * \fn Size::expandTo(const Size &expand)
+ * \brief Expand the size to \a expand
+ * \param[in] expand The minimum size
+ *
+ * This function sets the width and height to the maximum of this size and the
+ * \a expand size.
+ *
+ * \return A reference to this object
+ */
+
+/**
+ * \fn Size::growBy(const Size &margins)
+ * \brief Grow the size by \a margins in place
+ * \param[in] margins The margins to add to the size
+ *
+ * This function adds the width and height of the \a margin size to this size.
+ *
+ * \return A reference to this object
+ */
+
+/**
+ * \fn Size::shrinkBy(const Size &margins)
+ * \brief Shrink the size by \a margins in place
+ * \param[in] margins The margins to subtract to the size
+ *
+ * This function subtracts the width and height of the \a margin size from this
+ * size. If the width or height of the size are smaller than those of \a
+ * margins, the result is clamped to 0.
+ *
+ * \return A reference to this object
+ */
+
+/**
+ * \fn Size::alignedDownTo(unsigned int hAlignment, unsigned int vAlignment)
+ * \brief Align the size down horizontally and vertically
+ * \param[in] hAlignment Horizontal alignment
+ * \param[in] vAlignment Vertical alignment
+ * \return A Size whose width and height are equal to the width and height of
+ * this size rounded down to the nearest multiple of \a hAlignment and
+ * \a vAlignment respectively
+ */
+
+/**
+ * \fn Size::alignedUpTo(unsigned int hAlignment, unsigned int vAlignment)
+ * \brief Align the size up horizontally and vertically
+ * \param[in] hAlignment Horizontal alignment
+ * \param[in] vAlignment Vertical alignment
+ * \return A Size whose width and height are equal to the width and height of
+ * this size rounded up to the nearest multiple of \a hAlignment and
+ * \a vAlignment respectively
+ */
+
+/**
+ * \fn Size::boundedTo(const Size &bound)
+ * \brief Bound the size to \a bound
+ * \param[in] bound The maximum size
+ * \return A Size whose width and height are the minimum of the width and
+ * height of this size and the \a bound size
+ */
+
+/**
+ * \fn Size::expandedTo(const Size &expand)
+ * \brief Expand the size to \a expand
+ * \param[in] expand The minimum size
+ * \return A Size whose width and height are the maximum of the width and
+ * height of this size and the \a expand size
+ */
+
+/**
+ * \fn Size::grownBy(const Size &margins)
+ * \brief Grow the size by \a margins
+ * \param[in] margins The margins to add to the size
+ * \return A Size whose width and height are the sum of the width and height of
+ * this size and the \a margins size
+ */
+
+/**
+ * \fn Size::shrunkBy(const Size &margins)
+ * \brief Shrink the size by \a margins
+ * \param[in] margins The margins to subtract to the size
+ *
+ * If the width or height of the size are smaller than those of \a margins, the
+ * resulting size has its width or height clamped to 0.
+ *
+ * \return A Size whose width and height are the difference of the width and
+ * height of this size and the \a margins size, clamped to 0
+ */
+
+/**
+ * \brief Bound the size down to match the aspect ratio given by \a ratio
+ * \param[in] ratio The size whose aspect ratio must be matched
+ *
+ * The behaviour of this function is undefined if either the width or the
+ * height of the \a ratio is zero.
+ *
+ * \return A Size whose width and height are equal to the width and height
+ * of this Size aligned down to the aspect ratio of \a ratio
+ */
+Size Size::boundedToAspectRatio(const Size &ratio) const
+{
+ ASSERT(ratio.width && ratio.height);
+
+ uint64_t ratio1 = static_cast<uint64_t>(width) *
+ static_cast<uint64_t>(ratio.height);
+ uint64_t ratio2 = static_cast<uint64_t>(ratio.width) *
+ static_cast<uint64_t>(height);
+
+ if (ratio1 > ratio2)
+ return { static_cast<unsigned int>(ratio2 / ratio.height), height };
+ else
+ return { width, static_cast<unsigned int>(ratio1 / ratio.width) };
+}
+
+/**
+ * \brief Expand the size to match the aspect ratio given by \a ratio
+ * \param[in] ratio The size whose aspect ratio must be matched
+ *
+ * The behaviour of this function is undefined if either the width or the
+ * height of the \a ratio is zero.
+ *
+ * \return A Size whose width and height are equal to the width and height
+ * of this Size expanded up to the aspect ratio of \a ratio
+ */
+Size Size::expandedToAspectRatio(const Size &ratio) const
+{
+ ASSERT(ratio.width && ratio.height);
+
+ uint64_t ratio1 = static_cast<uint64_t>(width) *
+ static_cast<uint64_t>(ratio.height);
+ uint64_t ratio2 = static_cast<uint64_t>(ratio.width) *
+ static_cast<uint64_t>(height);
+
+ if (ratio1 < ratio2)
+ return { static_cast<unsigned int>(ratio2 / ratio.height), height };
+ else
+ return { width, static_cast<unsigned int>(ratio1 / ratio.width) };
+}
+
+/**
+ * \brief Center a rectangle of this size at a given Point
+ * \param[in] center The center point the Rectangle is to have
+ *
+ * A Rectangle of this object's size is positioned so that its center
+ * is at the given Point.
+ *
+ * \return A Rectangle of this size, centered at the given Point.
+ */
+Rectangle Size::centeredTo(const Point &center) const
+{
+ int x = center.x - width / 2;
+ int y = center.y - height / 2;
+
+ return { x, y, width, height };
+}
+
+/**
+ * \brief Scale size up by the given factor
+ * \param[in] factor The factor
+ * \return The scaled Size
+ */
+Size Size::operator*(float factor) const
+{
+ return Size(width * factor, height * factor);
+}
+
+/**
+ * \brief Scale size down by the given factor
+ * \param[in] factor The factor
+ * \return The scaled Size
+ */
+Size Size::operator/(float factor) const
+{
+ return Size(width / factor, height / factor);
+}
+
+/**
+ * \brief Scale this size up by the given factor in place
+ * \param[in] factor The factor
+ * \return A reference to this object
+ */
+Size &Size::operator*=(float factor)
+{
+ width *= factor;
+ height *= factor;
+ return *this;
+}
+
+/**
+ * \brief Scale this size down by the given factor in place
+ * \param[in] factor The factor
+ * \return A reference to this object
+ */
+Size &Size::operator/=(float factor)
+{
+ width /= factor;
+ height /= factor;
+ return *this;
}
/**
@@ -183,7 +443,19 @@ bool operator<(const Size &lhs, const Size &rhs)
*/
/**
- * \struct SizeRange
+ * \brief Insert a text representation of a Size into an output stream
+ * \param[in] out The output stream
+ * \param[in] s The size
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const Size &s)
+{
+ out << s.width << "x" << s.height;
+ return out;
+}
+
+/**
+ * \class SizeRange
* \brief Describe a range of sizes
*
* A SizeRange describes a range of sizes included in the [min, max] interval
@@ -282,9 +554,7 @@ bool SizeRange::contains(const Size &size) const
std::string SizeRange::toString() const
{
std::stringstream ss;
-
- ss << "(" << min.toString() << ")-(" << max.toString() << ")/(+"
- << hStep << ",+" << vStep << ")";
+ ss << *this;
return ss.str();
}
@@ -304,4 +574,344 @@ bool operator==(const SizeRange &lhs, const SizeRange &rhs)
* \return True if the two size ranges are not equal, false otherwise
*/
+/**
+ * \brief Insert a text representation of a SizeRange into an output stream
+ * \param[in] out The output stream
+ * \param[in] sr The size range
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const SizeRange &sr)
+{
+ out << "(" << sr.min << ")-(" << sr.max << ")/(+"
+ << sr.hStep << ",+" << sr.vStep << ")";
+
+ return out;
+}
+
+/**
+ * \class Rectangle
+ * \brief Describe a rectangle's position and dimensions
+ *
+ * Rectangles are used to identify an area of an image. They are specified by
+ * the coordinates of top-left corner and their horizontal and vertical size.
+ * By convention, the top-left corner is defined as the corner with the lowest
+ * x and y coordinates, regardless of the origin and direction of the axes.
+ *
+ * The measure unit of the rectangle coordinates and size, as well as the
+ * reference point from which the Rectangle::x and Rectangle::y displacements
+ * refers to, are defined by the context were rectangle is used.
+ */
+
+/**
+ * \fn Rectangle::Rectangle()
+ * \brief Construct a Rectangle with all coordinates set to 0
+ */
+
+/**
+ * \fn Rectangle::Rectangle(int x, int y, const Size &size)
+ * \brief Construct a Rectangle with the given position and size
+ * \param[in] x The horizontal coordinate of the top-left corner
+ * \param[in] y The vertical coordinate of the top-left corner
+ * \param[in] size The size
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
+ */
+
+/**
+ * \fn Rectangle::Rectangle(int x, int y, unsigned int width, unsigned int height)
+ * \brief Construct a Rectangle with the given position and size
+ * \param[in] x The horizontal coordinate of the top-left corner
+ * \param[in] y The vertical coordinate of the top-left corner
+ * \param[in] width The width
+ * \param[in] height The height
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
+ */
+
+/**
+ * \fn Rectangle::Rectangle(const Size &size)
+ * \brief Construct a Rectangle of \a size with its top left corner located
+ * at (0,0)
+ * \param[in] size The desired Rectangle size
+ */
+
+/**
+ * \fn Rectangle::Rectangle(const Point &point1, const Point &point2)
+ * \brief Construct a Rectangle from two opposite corners
+ * \param[in] point1 One of corners of the rectangle
+ * \param[in] point2 The opposite corner of \a point1
+ */
+
+/**
+ * \var Rectangle::x
+ * \brief The horizontal coordinate of the rectangle's top-left corner
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
+ */
+
+/**
+ * \var Rectangle::y
+ * \brief The vertical coordinate of the rectangle's top-left corner
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
+ */
+
+/**
+ * \var Rectangle::width
+ * \brief The distance between the left and right sides
+ */
+
+/**
+ * \var Rectangle::height
+ * \brief The distance between the top and bottom sides
+ */
+
+/**
+ * \fn bool Rectangle::isNull() const
+ * \brief Check if the rectangle is null
+ * \return True if both the width and height are 0, or false otherwise
+ */
+
+/**
+ * \brief Assemble and return a string describing the rectangle
+ * \return A string describing the Rectangle
+ */
+const std::string Rectangle::toString() const
+{
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \brief Retrieve the center point of this rectangle
+ * \return The center Point
+ */
+Point Rectangle::center() const
+{
+ return { x + static_cast<int>(width / 2), y + static_cast<int>(height / 2) };
+}
+
+/**
+ * \fn Size Rectangle::size() const
+ * \brief Retrieve the size of this rectangle
+ * \return The Rectangle size
+ */
+
+/**
+ * \fn Point Rectangle::topLeft() const
+ * \brief Retrieve the coordinates of the top left corner of this Rectangle
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
+ *
+ * \return The Rectangle's top left corner
+ */
+
+/**
+ * \brief Apply a non-uniform rational scaling in place to this Rectangle
+ * \param[in] numerator The numerators of the x and y scaling factors
+ * \param[in] denominator The denominators of the x and y scaling factors
+ *
+ * A non-uniform scaling is applied in place such the resulting x
+ * coordinates are multiplied by numerator.width / denominator.width,
+ * and similarly for the y coordinates (using height in place of width).
+ *
+ * \return A reference to this object
+ */
+Rectangle &Rectangle::scaleBy(const Size &numerator, const Size &denominator)
+{
+ x = static_cast<int64_t>(x) * numerator.width / denominator.width;
+ y = static_cast<int64_t>(y) * numerator.height / denominator.height;
+ width = static_cast<uint64_t>(width) * numerator.width / denominator.width;
+ height = static_cast<uint64_t>(height) * numerator.height / denominator.height;
+
+ return *this;
+}
+
+/**
+ * \brief Translate this Rectangle in place by the given Point
+ * \param[in] point The amount to translate the Rectangle by
+ *
+ * The Rectangle is translated in the x-direction by the point's x coordinate
+ * and in the y-direction by the point's y coordinate.
+ *
+ * \return A reference to this object
+ */
+Rectangle &Rectangle::translateBy(const Point &point)
+{
+ x += point.x;
+ y += point.y;
+
+ return *this;
+}
+
+/**
+ * \brief Calculate the intersection of this Rectangle with another
+ * \param[in] bound The Rectangle that is intersected with this Rectangle
+ *
+ * This function calculates the standard intersection of two rectangles. If the
+ * rectangles do not overlap in either the x or y direction, then the size
+ * of that dimension in the result (its width or height) is set to zero. Even
+ * when one dimension is set to zero, note that the other dimension may still
+ * have a positive value if there was some overlap.
+ *
+ * \return A Rectangle that is the intersection of the input rectangles
+ */
+Rectangle Rectangle::boundedTo(const Rectangle &bound) const
+{
+ int topLeftX = std::max(x, bound.x);
+ int topLeftY = std::max(y, bound.y);
+ int bottomRightX = std::min<int>(x + width, bound.x + bound.width);
+ int bottomRightY = std::min<int>(y + height, bound.y + bound.height);
+
+ unsigned int newWidth = std::max(bottomRightX - topLeftX, 0);
+ unsigned int newHeight = std::max(bottomRightY - topLeftY, 0);
+
+ return { topLeftX, topLeftY, newWidth, newHeight };
+}
+
+/**
+ * \brief Enclose a Rectangle so as not to exceed another Rectangle
+ * \param[in] boundary The limit that the returned Rectangle will not exceed
+ *
+ * The Rectangle is modified so that it does not exceed the given \a boundary.
+ * This process involves translating the Rectangle if any of its edges
+ * lie beyond \a boundary, so that those edges then lie along the boundary
+ * instead.
+ *
+ * If either width or height are larger than \a boundary, then the returned
+ * Rectangle is clipped to be no larger. But other than this, the
+ * Rectangle is not clipped or reduced in size, merely translated.
+ *
+ * Note that this is not a conventional Rectangle intersection function
+ * which is provided by boundedTo().
+ *
+ * \return A Rectangle that does not extend beyond a boundary Rectangle
+ */
+Rectangle Rectangle::enclosedIn(const Rectangle &boundary) const
+{
+ /* We can't be bigger than the boundary rectangle. */
+ Rectangle result = boundedTo(Rectangle{ x, y, boundary.size() });
+
+ result.x = std::clamp<int>(result.x, boundary.x,
+ boundary.x + boundary.width - result.width);
+ result.y = std::clamp<int>(result.y, boundary.y,
+ boundary.y + boundary.height - result.height);
+
+ return result;
+}
+
+/**
+ * \brief Apply a non-uniform rational scaling to this Rectangle
+ * \param[in] numerator The numerators of the x and y scaling factors
+ * \param[in] denominator The denominators of the x and y scaling factors
+ *
+ * A non-uniform scaling is applied such the resulting x
+ * coordinates are multiplied by numerator.width / denominator.width,
+ * and similarly for the y coordinates (using height in place of width).
+ *
+ * \return The non-uniformly scaled Rectangle
+ */
+Rectangle Rectangle::scaledBy(const Size &numerator, const Size &denominator) const
+{
+ int scaledX = static_cast<int64_t>(x) * numerator.width / denominator.width;
+ int scaledY = static_cast<int64_t>(y) * numerator.height / denominator.height;
+ unsigned int scaledWidth = static_cast<uint64_t>(width) * numerator.width / denominator.width;
+ unsigned int scaledHeight = static_cast<uint64_t>(height) * numerator.height / denominator.height;
+
+ return { scaledX, scaledY, scaledWidth, scaledHeight };
+}
+
+/**
+ * \brief Translate a Rectangle by the given amounts
+ * \param[in] point The amount to translate the Rectangle by
+ *
+ * The Rectangle is translated in the x-direction by the point's x coordinate
+ * and in the y-direction by the point's y coordinate.
+ *
+ * \return The translated Rectangle
+ */
+Rectangle Rectangle::translatedBy(const Point &point) const
+{
+ return { x + point.x, y + point.y, width, height };
+}
+
+/**
+ * \brief Transform a Rectangle from one reference rectangle to another
+ * \param[in] source The \a source reference rectangle
+ * \param[in] destination The \a destination reference rectangle
+ *
+ * The \a source and \a destination parameters describe two rectangles defined
+ * in different reference systems. The Rectangle is translated from the source
+ * reference system into the destination reference system.
+ *
+ * The typical use case for this function is to translate a selection rectangle
+ * specified in a reference system, in example the sensor's pixel array, into
+ * the same rectangle re-scaled and translated into a different reference
+ * system, in example the output frame on which the selection rectangle is
+ * applied to.
+ *
+ * For example, consider a sensor with a resolution of 4040x2360 pixels and a
+ * assume a rectangle of (100, 100)/3840x2160 (sensorFrame) in sensor
+ * coordinates is mapped to a rectangle (0,0)/(1920,1080) (displayFrame) in
+ * display coordinates. This function can be used to transform an arbitrary
+ * rectangle from display coordinates to sensor coordinates or vice versa:
+ *
+ * \code{.cpp}
+ * Rectangle sensorReference(100, 100, 3840, 2160);
+ * Rectangle displayReference(0, 0, 1920, 1080);
+ *
+ * // Bottom right quarter in sensor coordinates
+ * Rectangle sensorRect(2020, 100, 1920, 1080);
+ * displayRect = sensorRect.transformedBetween(sensorReference, displayReference);
+ * // displayRect is now (960, 540)/960x540
+ *
+ * // Transformation back to sensor coordinates
+ * sensorRect = displayRect.transformedBetween(displayReference, sensorReference);
+ * \endcode
+ */
+Rectangle Rectangle::transformedBetween(const Rectangle &source,
+ const Rectangle &destination) const
+{
+ Rectangle r;
+ double sx = static_cast<double>(destination.width) / source.width;
+ double sy = static_cast<double>(destination.height) / source.height;
+
+ r.x = static_cast<int>((x - source.x) * sx) + destination.x;
+ r.y = static_cast<int>((y - source.y) * sy) + destination.y;
+ r.width = static_cast<int>(width * sx);
+ r.height = static_cast<int>(height * sy);
+
+ return r;
+}
+
+/**
+ * \brief Compare rectangles for equality
+ * \return True if the two rectangles are equal, false otherwise
+ */
+bool operator==(const Rectangle &lhs, const Rectangle &rhs)
+{
+ return lhs.x == rhs.x && lhs.y == rhs.y &&
+ lhs.width == rhs.width && lhs.height == rhs.height;
+}
+
+/**
+ * \fn bool operator!=(const Rectangle &lhs, const Rectangle &rhs)
+ * \brief Compare rectangles for inequality
+ * \return True if the two rectangles are not equal, false otherwise
+ */
+
+/**
+ * \brief Insert a text representation of a Rectangle into an output stream
+ * \param[in] out The output stream
+ * \param[in] r The rectangle
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const Rectangle &r)
+{
+ out << "(" << r.x << ", " << r.y << ")/" << r.width << "x" << r.height;
+ return out;
+}
+
} /* namespace libcamera */
diff --git a/src/libcamera/include/byte_stream_buffer.h b/src/libcamera/include/byte_stream_buffer.h
deleted file mode 100644
index b3aaa8b9..00000000
--- a/src/libcamera/include/byte_stream_buffer.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * byte_stream_buffer.h - Byte stream buffer
- */
-#ifndef __LIBCAMERA_BYTE_STREAM_BUFFER_H__
-#define __LIBCAMERA_BYTE_STREAM_BUFFER_H__
-
-#include <stddef.h>
-#include <stdint.h>
-#include <type_traits>
-
-#include <libcamera/span.h>
-
-namespace libcamera {
-
-class ByteStreamBuffer
-{
-public:
- ByteStreamBuffer(const uint8_t *base, size_t size);
- ByteStreamBuffer(uint8_t *base, size_t size);
- ByteStreamBuffer(ByteStreamBuffer &&other);
- ByteStreamBuffer &operator=(ByteStreamBuffer &&other);
-
- const uint8_t *base() const { return base_; }
- uint32_t offset() const { return (write_ ? write_ : read_) - base_; }
- size_t size() const { return size_; }
- bool overflow() const { return overflow_; }
-
- ByteStreamBuffer carveOut(size_t size);
- int skip(size_t size);
-
- template<typename T>
- int read(T *t)
- {
- return read(reinterpret_cast<uint8_t *>(t), sizeof(*t));
- }
-
- template<typename T>
- int read(const Span<T> &data)
- {
- return read(reinterpret_cast<uint8_t *>(data.data()),
- data.size_bytes());
- }
-
- template<typename T>
- const std::remove_reference_t<T> *read(size_t count = 1)
- {
- using return_type = const std::remove_reference_t<T> *;
- return reinterpret_cast<return_type>(read(sizeof(T), count));
- }
-
- template<typename T>
- int write(const T *t)
- {
- return write(reinterpret_cast<const uint8_t *>(t), sizeof(*t));
- }
-
- template<typename T>
- int write(const Span<T> &data)
- {
- return write(reinterpret_cast<const uint8_t *>(data.data()),
- data.size_bytes());
- }
-
-private:
- ByteStreamBuffer(const ByteStreamBuffer &other) = delete;
- ByteStreamBuffer &operator=(const ByteStreamBuffer &other) = delete;
-
- void setOverflow();
-
- int read(uint8_t *data, size_t size);
- const uint8_t *read(size_t size, size_t count);
- int write(const uint8_t *data, size_t size);
-
- ByteStreamBuffer *parent_;
-
- const uint8_t *base_;
- size_t size_;
- bool overflow_;
-
- const uint8_t *read_;
- uint8_t *write_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_BYTE_STREAM_BUFFER_H__ */
diff --git a/src/libcamera/include/camera_controls.h b/src/libcamera/include/camera_controls.h
deleted file mode 100644
index 265c1fe3..00000000
--- a/src/libcamera/include/camera_controls.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * camera_controls.h - Camera controls
- */
-#ifndef __LIBCAMERA_CAMERA_CONTROLS_H__
-#define __LIBCAMERA_CAMERA_CONTROLS_H__
-
-#include "control_validator.h"
-
-namespace libcamera {
-
-class Camera;
-
-class CameraControlValidator final : public ControlValidator
-{
-public:
- CameraControlValidator(Camera *camera);
-
- const std::string &name() const override;
- bool validate(unsigned int id) const override;
-
-private:
- Camera *camera_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_CAMERA_CONTROLS_H__ */
diff --git a/src/libcamera/include/camera_sensor.h b/src/libcamera/include/camera_sensor.h
deleted file mode 100644
index 99cff981..00000000
--- a/src/libcamera/include/camera_sensor.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * camera_sensor.h - A camera sensor
- */
-#ifndef __LIBCAMERA_CAMERA_SENSOR_H__
-#define __LIBCAMERA_CAMERA_SENSOR_H__
-
-#include <string>
-#include <vector>
-
-#include <libcamera/controls.h>
-#include <libcamera/geometry.h>
-
-#include "log.h"
-
-namespace libcamera {
-
-class MediaEntity;
-class V4L2Subdevice;
-
-struct V4L2SubdeviceFormat;
-
-class CameraSensor : protected Loggable
-{
-public:
- explicit CameraSensor(const MediaEntity *entity);
- ~CameraSensor();
-
- CameraSensor(const CameraSensor &) = delete;
- CameraSensor &operator=(const CameraSensor &) = delete;
-
- int init();
-
- const MediaEntity *entity() const { return entity_; }
- const std::vector<unsigned int> &mbusCodes() const { return mbusCodes_; }
- const std::vector<Size> &sizes() const { return sizes_; }
- const Size &resolution() const;
-
- V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes,
- const Size &size) const;
- int setFormat(V4L2SubdeviceFormat *format);
-
- const ControlInfoMap &controls() const;
- int getControls(ControlList *ctrls);
- int setControls(ControlList *ctrls);
-
- const ControlList &properties() const { return properties_; }
-
-protected:
- std::string logPrefix() const;
-
-private:
- const MediaEntity *entity_;
- V4L2Subdevice *subdev_;
-
- std::vector<unsigned int> mbusCodes_;
- std::vector<Size> sizes_;
-
- ControlList properties_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_CAMERA_SENSOR_H__ */
diff --git a/src/libcamera/include/control_serializer.h b/src/libcamera/include/control_serializer.h
deleted file mode 100644
index 99bacd92..00000000
--- a/src/libcamera/include/control_serializer.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * control_serializer.h - Control (de)serializer
- */
-#ifndef __LIBCAMERA_CONTROL_SERIALIZER_H__
-#define __LIBCAMERA_CONTROL_SERIALIZER_H__
-
-#include <map>
-#include <memory>
-#include <vector>
-
-#include <libcamera/controls.h>
-
-namespace libcamera {
-
-class ByteStreamBuffer;
-
-class ControlSerializer
-{
-public:
- ControlSerializer();
-
- void reset();
-
- static size_t binarySize(const ControlInfoMap &infoMap);
- static size_t binarySize(const ControlList &list);
-
- int serialize(const ControlInfoMap &infoMap, ByteStreamBuffer &buffer);
- int serialize(const ControlList &list, ByteStreamBuffer &buffer);
-
- template<typename T>
- T deserialize(ByteStreamBuffer &buffer);
-
-private:
- static size_t binarySize(const ControlValue &value);
- static size_t binarySize(const ControlInfo &info);
-
- static void store(const ControlValue &value, ByteStreamBuffer &buffer);
- static void store(const ControlInfo &info, ByteStreamBuffer &buffer);
-
- ControlValue loadControlValue(ControlType type, ByteStreamBuffer &buffer,
- bool isArray = false, unsigned int count = 1);
- ControlInfo loadControlInfo(ControlType type, ByteStreamBuffer &buffer);
-
- unsigned int serial_;
- std::vector<std::unique_ptr<ControlId>> controlIds_;
- std::map<unsigned int, ControlInfoMap> infoMaps_;
- std::map<const ControlInfoMap *, unsigned int> infoMapHandles_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_CONTROL_SERIALIZER_H__ */
diff --git a/src/libcamera/include/control_validator.h b/src/libcamera/include/control_validator.h
deleted file mode 100644
index f1c9110b..00000000
--- a/src/libcamera/include/control_validator.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * control_validator.h - Control validator
- */
-#ifndef __LIBCAMERA_CONTROL_VALIDATOR_H__
-#define __LIBCAMERA_CONTROL_VALIDATOR_H__
-
-#include <string>
-
-namespace libcamera {
-
-class ControlId;
-
-class ControlValidator
-{
-public:
- virtual ~ControlValidator() {}
-
- virtual const std::string &name() const = 0;
- virtual bool validate(unsigned int id) const = 0;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_CONTROL_VALIDATOR_H__ */
diff --git a/src/libcamera/include/device_enumerator.h b/src/libcamera/include/device_enumerator.h
deleted file mode 100644
index 433e357a..00000000
--- a/src/libcamera/include/device_enumerator.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2018, Google Inc.
- *
- * device_enumerator.h - API to enumerate and find media devices
- */
-#ifndef __LIBCAMERA_DEVICE_ENUMERATOR_H__
-#define __LIBCAMERA_DEVICE_ENUMERATOR_H__
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include <linux/media.h>
-
-namespace libcamera {
-
-class MediaDevice;
-
-class DeviceMatch
-{
-public:
- DeviceMatch(const std::string &driver);
-
- void add(const std::string &entity);
-
- bool match(const MediaDevice *device) const;
-
-private:
- std::string driver_;
- std::vector<std::string> entities_;
-};
-
-class DeviceEnumerator
-{
-public:
- static std::unique_ptr<DeviceEnumerator> create();
-
- virtual ~DeviceEnumerator();
-
- virtual int init() = 0;
- virtual int enumerate() = 0;
-
- std::shared_ptr<MediaDevice> search(const DeviceMatch &dm);
-
-protected:
- std::unique_ptr<MediaDevice> createDevice(const std::string &deviceNode);
- void addDevice(std::unique_ptr<MediaDevice> &&media);
- void removeDevice(const std::string &deviceNode);
-
-private:
- std::vector<std::shared_ptr<MediaDevice>> devices_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_DEVICE_ENUMERATOR_H__ */
diff --git a/src/libcamera/include/device_enumerator_sysfs.h b/src/libcamera/include/device_enumerator_sysfs.h
deleted file mode 100644
index 5a5c9b0f..00000000
--- a/src/libcamera/include/device_enumerator_sysfs.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * device_enumerator_sysfs.h - sysfs-based device enumerator
- */
-#ifndef __LIBCAMERA_DEVICE_ENUMERATOR_SYSFS_H__
-#define __LIBCAMERA_DEVICE_ENUMERATOR_SYSFS_H__
-
-#include <memory>
-#include <string>
-
-#include "device_enumerator.h"
-
-class MediaDevice;
-
-namespace libcamera {
-
-class DeviceEnumeratorSysfs final : public DeviceEnumerator
-{
-public:
- int init();
- int enumerate();
-
-private:
- int populateMediaDevice(MediaDevice *media);
- std::string lookupDeviceNode(int major, int minor);
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_DEVICE_ENUMERATOR_SYSFS_H__ */
diff --git a/src/libcamera/include/device_enumerator_udev.h b/src/libcamera/include/device_enumerator_udev.h
deleted file mode 100644
index fdce4520..00000000
--- a/src/libcamera/include/device_enumerator_udev.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2018-2019, Google Inc.
- *
- * device_enumerator_udev.h - udev-based device enumerator
- */
-#ifndef __LIBCAMERA_DEVICE_ENUMERATOR_UDEV_H__
-#define __LIBCAMERA_DEVICE_ENUMERATOR_UDEV_H__
-
-#include <list>
-#include <map>
-#include <memory>
-#include <set>
-#include <string>
-#include <sys/types.h>
-
-#include "device_enumerator.h"
-
-struct udev;
-struct udev_device;
-struct udev_monitor;
-
-namespace libcamera {
-
-class EventNotifier;
-class MediaDevice;
-class MediaEntity;
-
-class DeviceEnumeratorUdev : public DeviceEnumerator
-{
-public:
- DeviceEnumeratorUdev();
- ~DeviceEnumeratorUdev();
-
- int init() final;
- int enumerate() final;
-
-private:
- struct udev *udev_;
- struct udev_monitor *monitor_;
- EventNotifier *notifier_;
-
- using DependencyMap = std::map<dev_t, std::list<MediaEntity *>>;
-
- struct MediaDeviceDeps {
- MediaDeviceDeps(std::unique_ptr<MediaDevice> &&media,
- DependencyMap &&deps)
- : media_(std::move(media)), deps_(std::move(deps))
- {
- }
-
- bool operator==(const MediaDeviceDeps &other) const
- {
- return media_ == other.media_;
- }
-
- std::unique_ptr<MediaDevice> media_;
- DependencyMap deps_;
- };
-
- std::set<dev_t> orphans_;
- std::list<MediaDeviceDeps> pending_;
- std::map<dev_t, MediaDeviceDeps *> devMap_;
-
- int addUdevDevice(struct udev_device *dev);
- int populateMediaDevice(MediaDevice *media, DependencyMap *deps);
- std::string lookupDeviceNode(dev_t devnum);
-
- int addV4L2Device(dev_t devnum);
- void udevNotify(EventNotifier *notifier);
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_DEVICE_ENUMERATOR_UDEV_H__ */
diff --git a/src/libcamera/include/event_dispatcher_poll.h b/src/libcamera/include/event_dispatcher_poll.h
deleted file mode 100644
index 1f073861..00000000
--- a/src/libcamera/include/event_dispatcher_poll.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * event_dispatcher_poll.h - Poll-based event dispatcher
- */
-#ifndef __LIBCAMERA_EVENT_DISPATCHER_POLL_H__
-#define __LIBCAMERA_EVENT_DISPATCHER_POLL_H__
-
-#include <list>
-#include <map>
-#include <vector>
-
-#include <libcamera/event_dispatcher.h>
-
-struct pollfd;
-
-namespace libcamera {
-
-class EventNotifier;
-class Timer;
-
-class EventDispatcherPoll final : public EventDispatcher
-{
-public:
- EventDispatcherPoll();
- ~EventDispatcherPoll();
-
- void registerEventNotifier(EventNotifier *notifier);
- void unregisterEventNotifier(EventNotifier *notifier);
-
- void registerTimer(Timer *timer);
- void unregisterTimer(Timer *timer);
-
- void processEvents();
- void interrupt();
-
-private:
- struct EventNotifierSetPoll {
- short events() const;
- EventNotifier *notifiers[3];
- };
-
- std::map<int, EventNotifierSetPoll> notifiers_;
- std::list<Timer *> timers_;
- int eventfd_;
-
- bool processingEvents_;
-
- int poll(std::vector<struct pollfd> *pollfds);
- void processInterrupt(const struct pollfd &pfd);
- void processNotifiers(const std::vector<struct pollfd> &pollfds);
- void processTimers();
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_EVENT_DISPATCHER_POLL_H__ */
diff --git a/src/libcamera/include/formats.h b/src/libcamera/include/formats.h
deleted file mode 100644
index f43bc8c0..00000000
--- a/src/libcamera/include/formats.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * formats.h - libcamera image formats
- */
-
-#ifndef __LIBCAMERA_FORMATS_H__
-#define __LIBCAMERA_FORMATS_H__
-
-#include <map>
-#include <vector>
-
-#include <libcamera/geometry.h>
-
-namespace libcamera {
-
-class ImageFormats
-{
-public:
- int addFormat(unsigned int format, const std::vector<SizeRange> &sizes);
-
- bool isEmpty() const;
- std::vector<unsigned int> formats() const;
- const std::vector<SizeRange> &sizes(unsigned int format) const;
- const std::map<unsigned int, std::vector<SizeRange>> &data() const;
-
-private:
- std::map<unsigned int, std::vector<SizeRange>> data_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_FORMATS_H__ */
diff --git a/src/libcamera/include/ipa_context_wrapper.h b/src/libcamera/include/ipa_context_wrapper.h
deleted file mode 100644
index c9e19412..00000000
--- a/src/libcamera/include/ipa_context_wrapper.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_context_wrapper.h - Image Processing Algorithm context wrapper
- */
-#ifndef __LIBCAMERA_IPA_CONTEXT_WRAPPER_H__
-#define __LIBCAMERA_IPA_CONTEXT_WRAPPER_H__
-
-#include <ipa/ipa_interface.h>
-
-#include "control_serializer.h"
-
-namespace libcamera {
-
-class IPAContextWrapper final : public IPAInterface
-{
-public:
- IPAContextWrapper(struct ipa_context *context);
- ~IPAContextWrapper();
-
- int init() override;
- void configure(const std::map<unsigned int, IPAStream> &streamConfig,
- const std::map<unsigned int, const ControlInfoMap &> &entityControls) override;
-
- void mapBuffers(const std::vector<IPABuffer> &buffers) override;
- void unmapBuffers(const std::vector<unsigned int> &ids) override;
-
- virtual void processEvent(const IPAOperationData &data) override;
-
-private:
- static void queue_frame_action(void *ctx, unsigned int frame,
- struct ipa_operation_data &data);
- static const struct ipa_callback_ops callbacks_;
-
- void doQueueFrameAction(unsigned int frame,
- const IPAOperationData &data);
-
- struct ipa_context *ctx_;
- IPAInterface *intf_;
-
- ControlSerializer serializer_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_IPA_CONTEXT_WRAPPER_H__ */
diff --git a/src/libcamera/include/ipa_manager.h b/src/libcamera/include/ipa_manager.h
deleted file mode 100644
index 467658e4..00000000
--- a/src/libcamera/include/ipa_manager.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_manager.h - Image Processing Algorithm module manager
- */
-#ifndef __LIBCAMERA_IPA_MANAGER_H__
-#define __LIBCAMERA_IPA_MANAGER_H__
-
-#include <vector>
-
-#include <ipa/ipa_interface.h>
-#include <ipa/ipa_module_info.h>
-
-#include "ipa_module.h"
-#include "pipeline_handler.h"
-
-namespace libcamera {
-
-class IPAManager
-{
-public:
- static IPAManager *instance();
-
- std::unique_ptr<IPAInterface> createIPA(PipelineHandler *pipe,
- uint32_t maxVersion,
- uint32_t minVersion);
-
-private:
- std::vector<IPAModule *> modules_;
-
- IPAManager();
- ~IPAManager();
-
- void parseDir(const char *libDir, unsigned int maxDepth,
- std::vector<std::string> &files);
- unsigned int addDir(const char *libDir, unsigned int maxDepth = 0);
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_IPA_MANAGER_H__ */
diff --git a/src/libcamera/include/ipa_module.h b/src/libcamera/include/ipa_module.h
deleted file mode 100644
index 2028b76a..00000000
--- a/src/libcamera/include/ipa_module.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_module.h - Image Processing Algorithm module
- */
-#ifndef __LIBCAMERA_IPA_MODULE_H__
-#define __LIBCAMERA_IPA_MODULE_H__
-
-#include <string>
-
-#include <ipa/ipa_interface.h>
-#include <ipa/ipa_module_info.h>
-
-#include "pipeline_handler.h"
-
-namespace libcamera {
-
-class IPAModule
-{
-public:
- explicit IPAModule(const std::string &libPath);
- ~IPAModule();
-
- bool isValid() const;
-
- const struct IPAModuleInfo &info() const;
- const std::string &path() const;
-
- bool load();
-
- struct ipa_context *createContext();
-
- bool match(PipelineHandler *pipe,
- uint32_t minVersion, uint32_t maxVersion) const;
-
- bool isOpenSource() const;
-
-private:
- struct IPAModuleInfo info_;
-
- std::string libPath_;
- bool valid_;
- bool loaded_;
-
- void *dlHandle_;
- typedef struct ipa_context *(*IPAIntfFactory)(void);
- IPAIntfFactory ipaCreate_;
-
- int loadIPAModuleInfo();
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_IPA_MODULE_H__ */
diff --git a/src/libcamera/include/ipa_proxy.h b/src/libcamera/include/ipa_proxy.h
deleted file mode 100644
index e696551a..00000000
--- a/src/libcamera/include/ipa_proxy.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_proxy.h - Image Processing Algorithm proxy
- */
-#ifndef __LIBCAMERA_IPA_PROXY_H__
-#define __LIBCAMERA_IPA_PROXY_H__
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include <ipa/ipa_interface.h>
-
-#include "ipa_module.h"
-
-namespace libcamera {
-
-class IPAProxy : public IPAInterface
-{
-public:
- IPAProxy();
- ~IPAProxy();
-
- bool isValid() const { return valid_; }
-
-protected:
- std::string resolvePath(const std::string &file) const;
-
- bool valid_;
-};
-
-class IPAProxyFactory
-{
-public:
- IPAProxyFactory(const char *name);
- virtual ~IPAProxyFactory() {}
-
- virtual std::unique_ptr<IPAProxy> create(IPAModule *ipam) = 0;
-
- const std::string &name() const { return name_; }
-
- static void registerType(IPAProxyFactory *factory);
- static std::vector<IPAProxyFactory *> &factories();
-
-private:
- std::string name_;
-};
-
-#define REGISTER_IPA_PROXY(proxy) \
-class proxy##Factory final : public IPAProxyFactory \
-{ \
-public: \
- proxy##Factory() : IPAProxyFactory(#proxy) {} \
- std::unique_ptr<IPAProxy> create(IPAModule *ipam) \
- { \
- return std::make_unique<proxy>(ipam); \
- } \
-}; \
-static proxy##Factory global_##proxy##Factory;
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_IPA_PROXY_H__ */
diff --git a/src/libcamera/include/ipc_unixsocket.h b/src/libcamera/include/ipc_unixsocket.h
deleted file mode 100644
index 820d0561..00000000
--- a/src/libcamera/include/ipc_unixsocket.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipc_unixsocket.h - IPC mechanism based on Unix sockets
- */
-
-#ifndef __LIBCAMERA_IPC_UNIXSOCKET_H__
-#define __LIBCAMERA_IPC_UNIXSOCKET_H__
-
-#include <stdint.h>
-#include <sys/types.h>
-#include <vector>
-
-#include <libcamera/event_notifier.h>
-
-namespace libcamera {
-
-class IPCUnixSocket
-{
-public:
- struct Payload {
- std::vector<uint8_t> data;
- std::vector<int32_t> fds;
- };
-
- IPCUnixSocket();
- ~IPCUnixSocket();
-
- int create();
- int bind(int fd);
- void close();
- bool isBound() const;
-
- int send(const Payload &payload);
- int receive(Payload *payload);
-
- Signal<IPCUnixSocket *> readyRead;
-
-private:
- struct Header {
- uint32_t data;
- uint8_t fds;
- };
-
- int sendData(const void *buffer, size_t length, const int32_t *fds, unsigned int num);
- int recvData(void *buffer, size_t length, int32_t *fds, unsigned int num);
-
- void dataNotifier(EventNotifier *notifier);
-
- int fd_;
- bool headerReceived_;
- struct Header header_;
- EventNotifier *notifier_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_IPC_UNIXSOCKET_H__ */
diff --git a/src/libcamera/include/log.h b/src/libcamera/include/log.h
deleted file mode 100644
index ee0b4069..00000000
--- a/src/libcamera/include/log.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2018, Google Inc.
- *
- * log.h - Logging infrastructure
- */
-#ifndef __LIBCAMERA_LOG_H__
-#define __LIBCAMERA_LOG_H__
-
-#include <chrono>
-#include <sstream>
-
-#include "utils.h"
-
-namespace libcamera {
-
-enum LogSeverity {
- LogInvalid = -1,
- LogDebug = 0,
- LogInfo,
- LogWarning,
- LogError,
- LogFatal,
-};
-
-class LogCategory
-{
-public:
- explicit LogCategory(const char *name);
- ~LogCategory();
-
- const char *name() const { return name_; }
- LogSeverity severity() const { return severity_; }
- void setSeverity(LogSeverity severity);
-
- static const LogCategory &defaultCategory();
-
-private:
- const char *name_;
- LogSeverity severity_;
-};
-
-#define LOG_DECLARE_CATEGORY(name) \
-extern const LogCategory &_LOG_CATEGORY(name)();
-
-#define LOG_DEFINE_CATEGORY(name) \
-const LogCategory &_LOG_CATEGORY(name)() \
-{ \
- static LogCategory category(#name); \
- return category; \
-}
-
-class LogMessage
-{
-public:
- LogMessage(const char *fileName, unsigned int line,
- LogSeverity severity);
- LogMessage(const char *fileName, unsigned int line,
- const LogCategory &category, LogSeverity severity);
- LogMessage(const LogMessage &) = delete;
- LogMessage(LogMessage &&);
- ~LogMessage();
-
- std::ostream &stream() { return msgStream_; }
-
- const utils::time_point &timestamp() const { return timestamp_; }
- LogSeverity severity() const { return severity_; }
- const LogCategory &category() const { return category_; }
- const std::string &fileInfo() const { return fileInfo_; }
- const std::string msg() const { return msgStream_.str(); }
-
-private:
- void init(const char *fileName, unsigned int line);
-
- std::ostringstream msgStream_;
- const LogCategory &category_;
- LogSeverity severity_;
- utils::time_point timestamp_;
- std::string fileInfo_;
-};
-
-class Loggable
-{
-public:
- virtual ~Loggable();
-
-protected:
- virtual std::string logPrefix() const = 0;
-
- LogMessage _log(const char *file, unsigned int line,
- LogSeverity severity) const;
- LogMessage _log(const char *file, unsigned int line,
- const LogCategory &category,
- LogSeverity severity) const;
-};
-
-LogMessage _log(const char *file, unsigned int line, LogSeverity severity);
-LogMessage _log(const char *file, unsigned int line,
- const LogCategory &category, LogSeverity severity);
-
-#ifndef __DOXYGEN__
-#define _LOG_CATEGORY(name) logCategory##name
-
-#define _LOG1(severity) \
- _log(__FILE__, __LINE__, Log##severity).stream()
-#define _LOG2(category, severity) \
- _log(__FILE__, __LINE__, _LOG_CATEGORY(category)(), Log##severity).stream()
-
-/*
- * Expand the LOG() macro to _LOG1() or _LOG2() based on the number of
- * arguments.
- */
-#define _LOG_MACRO(_1, _2, NAME, ...) NAME
-#define LOG(...) _LOG_MACRO(__VA_ARGS__, _LOG2, _LOG1)(__VA_ARGS__)
-#else /* __DOXYGEN___ */
-#define LOG(category, severity)
-#endif /* __DOXYGEN__ */
-
-#ifndef NDEBUG
-#define ASSERT(condition) static_cast<void>(({ \
- if (!(condition)) \
- LOG(Fatal) << "assertion \"" #condition "\" failed"; \
-}))
-#else
-#define ASSERT(condition) static_cast<void>(false && (condition))
-#endif
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_LOG_H__ */
diff --git a/src/libcamera/include/media_device.h b/src/libcamera/include/media_device.h
deleted file mode 100644
index 44a59e70..00000000
--- a/src/libcamera/include/media_device.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2018, Google Inc.
- *
- * media_device.h - Media device handler
- */
-#ifndef __LIBCAMERA_MEDIA_DEVICE_H__
-#define __LIBCAMERA_MEDIA_DEVICE_H__
-
-#include <map>
-#include <sstream>
-#include <string>
-#include <vector>
-
-#include <linux/media.h>
-
-#include <libcamera/signal.h>
-
-#include "log.h"
-#include "media_object.h"
-
-namespace libcamera {
-
-class MediaDevice : protected Loggable
-{
-public:
- MediaDevice(const std::string &deviceNode);
- ~MediaDevice();
-
- bool acquire();
- void release();
- bool busy() const { return acquired_; }
-
- bool lock();
- void unlock();
-
- int populate();
- bool valid() const { return valid_; }
-
- const std::string driver() const { return driver_; }
- const std::string deviceNode() const { return deviceNode_; }
- const std::string model() const { return model_; }
-
- const std::vector<MediaEntity *> &entities() const { return entities_; }
- MediaEntity *getEntityByName(const std::string &name) const;
-
- MediaLink *link(const std::string &sourceName, unsigned int sourceIdx,
- const std::string &sinkName, unsigned int sinkIdx);
- MediaLink *link(const MediaEntity *source, unsigned int sourceIdx,
- const MediaEntity *sink, unsigned int sinkIdx);
- MediaLink *link(const MediaPad *source, const MediaPad *sink);
- int disableLinks();
-
- Signal<MediaDevice *> disconnected;
-
-protected:
- std::string logPrefix() const;
-
-private:
- std::string driver_;
- std::string deviceNode_;
- std::string model_;
- unsigned int version_;
-
- int fd_;
- bool valid_;
- bool acquired_;
- bool lockOwner_;
-
- int open();
- void close();
-
- std::map<unsigned int, MediaObject *> objects_;
- MediaObject *object(unsigned int id);
- bool addObject(MediaObject *object);
- void clear();
-
- std::vector<MediaEntity *> entities_;
-
- struct media_v2_interface *findInterface(const struct media_v2_topology &topology,
- unsigned int entityId);
- bool populateEntities(const struct media_v2_topology &topology);
- bool populatePads(const struct media_v2_topology &topology);
- bool populateLinks(const struct media_v2_topology &topology);
- void fixupEntityFlags(struct media_v2_entity *entity);
-
- friend int MediaLink::setEnabled(bool enable);
- int setupLink(const MediaLink *link, unsigned int flags);
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_MEDIA_DEVICE_H__ */
diff --git a/src/libcamera/include/media_object.h b/src/libcamera/include/media_object.h
deleted file mode 100644
index 748eafdc..00000000
--- a/src/libcamera/include/media_object.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2018, Google Inc.
- *
- * media_object.h - Media Device objects: entities, pads and links.
- */
-#ifndef __LIBCAMERA_MEDIA_OBJECT_H__
-#define __LIBCAMERA_MEDIA_OBJECT_H__
-
-#include <string>
-#include <vector>
-
-#include <linux/media.h>
-
-namespace libcamera {
-
-class MediaDevice;
-class MediaEntity;
-class MediaPad;
-
-class MediaObject
-{
-public:
- MediaDevice *device() { return dev_; }
- unsigned int id() const { return id_; }
-
-protected:
- friend class MediaDevice;
-
- MediaObject(MediaDevice *dev, unsigned int id)
- : dev_(dev), id_(id)
- {
- }
- virtual ~MediaObject() {}
-
- MediaDevice *dev_;
- unsigned int id_;
-};
-
-class MediaLink : public MediaObject
-{
-public:
- MediaPad *source() const { return source_; }
- MediaPad *sink() const { return sink_; }
- unsigned int flags() const { return flags_; }
- int setEnabled(bool enable);
-
-private:
- friend class MediaDevice;
-
- MediaLink(const struct media_v2_link *link,
- MediaPad *source, MediaPad *sink);
- MediaLink(const MediaLink &) = delete;
- ~MediaLink() {}
-
- MediaPad *source_;
- MediaPad *sink_;
- unsigned int flags_;
-};
-
-class MediaPad : public MediaObject
-{
-public:
- unsigned int index() const { return index_; }
- MediaEntity *entity() const { return entity_; }
- unsigned int flags() const { return flags_; }
- const std::vector<MediaLink *> &links() const { return links_; }
-
- void addLink(MediaLink *link);
-
-private:
- friend class MediaDevice;
-
- MediaPad(const struct media_v2_pad *pad, MediaEntity *entity);
- MediaPad(const MediaPad &) = delete;
- ~MediaPad();
-
- unsigned int index_;
- MediaEntity *entity_;
- unsigned int flags_;
-
- std::vector<MediaLink *> links_;
-};
-
-class MediaEntity : public MediaObject
-{
-public:
- const std::string &name() const { return name_; }
- unsigned int function() const { return function_; }
- unsigned int flags() const { return flags_; }
- const std::string &deviceNode() const { return deviceNode_; }
- unsigned int deviceMajor() const { return major_; }
- unsigned int deviceMinor() const { return minor_; }
-
- const std::vector<MediaPad *> &pads() const { return pads_; }
-
- const MediaPad *getPadByIndex(unsigned int index) const;
- const MediaPad *getPadById(unsigned int id) const;
-
- int setDeviceNode(const std::string &deviceNode);
-
-private:
- friend class MediaDevice;
-
- MediaEntity(MediaDevice *dev, const struct media_v2_entity *entity,
- unsigned int major = 0, unsigned int minor = 0);
- MediaEntity(const MediaEntity &) = delete;
- ~MediaEntity();
-
- std::string name_;
- unsigned int function_;
- unsigned int flags_;
- std::string deviceNode_;
- unsigned int major_;
- unsigned int minor_;
-
- std::vector<MediaPad *> pads_;
-
- void addPad(MediaPad *pad);
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_MEDIA_OBJECT_H__ */
diff --git a/src/libcamera/include/meson.build b/src/libcamera/include/meson.build
deleted file mode 100644
index 17e2bed9..00000000
--- a/src/libcamera/include/meson.build
+++ /dev/null
@@ -1,30 +0,0 @@
-libcamera_headers = files([
- 'byte_stream_buffer.h',
- 'camera_controls.h',
- 'camera_sensor.h',
- 'control_serializer.h',
- 'control_validator.h',
- 'device_enumerator.h',
- 'device_enumerator_sysfs.h',
- 'device_enumerator_udev.h',
- 'event_dispatcher_poll.h',
- 'formats.h',
- 'ipa_context_wrapper.h',
- 'ipa_manager.h',
- 'ipa_module.h',
- 'ipa_proxy.h',
- 'ipc_unixsocket.h',
- 'log.h',
- 'media_device.h',
- 'media_object.h',
- 'message.h',
- 'pipeline_handler.h',
- 'process.h',
- 'semaphore.h',
- 'thread.h',
- 'utils.h',
- 'v4l2_controls.h',
- 'v4l2_device.h',
- 'v4l2_subdevice.h',
- 'v4l2_videodevice.h',
-])
diff --git a/src/libcamera/include/message.h b/src/libcamera/include/message.h
deleted file mode 100644
index 8e8b013d..00000000
--- a/src/libcamera/include/message.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * message.h - Message queue support
- */
-#ifndef __LIBCAMERA_MESSAGE_H__
-#define __LIBCAMERA_MESSAGE_H__
-
-#include <atomic>
-
-#include <libcamera/bound_method.h>
-
-namespace libcamera {
-
-class BoundMethodBase;
-class Object;
-class Semaphore;
-class Thread;
-
-class Message
-{
-public:
- enum Type {
- None = 0,
- InvokeMessage = 1,
- ThreadMoveMessage = 2,
- UserMessage = 1000,
- };
-
- Message(Type type);
- virtual ~Message();
-
- Type type() const { return type_; }
- Object *receiver() const { return receiver_; }
-
- static Type registerMessageType();
-
-private:
- friend class Thread;
-
- Type type_;
- Object *receiver_;
-
- static std::atomic_uint nextUserType_;
-};
-
-class InvokeMessage : public Message
-{
-public:
- InvokeMessage(BoundMethodBase *method,
- std::shared_ptr<BoundMethodPackBase> pack,
- Semaphore *semaphore = nullptr,
- bool deleteMethod = false);
- ~InvokeMessage();
-
- Semaphore *semaphore() const { return semaphore_; }
-
- void invoke();
-
-private:
- BoundMethodBase *method_;
- std::shared_ptr<BoundMethodPackBase> pack_;
- Semaphore *semaphore_;
- bool deleteMethod_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_MESSAGE_H__ */
diff --git a/src/libcamera/include/pipeline_handler.h b/src/libcamera/include/pipeline_handler.h
deleted file mode 100644
index 3fcfeda4..00000000
--- a/src/libcamera/include/pipeline_handler.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2018, Google Inc.
- *
- * pipeline_handler.h - Pipeline handler infrastructure
- */
-#ifndef __LIBCAMERA_PIPELINE_HANDLER_H__
-#define __LIBCAMERA_PIPELINE_HANDLER_H__
-
-#include <list>
-#include <map>
-#include <memory>
-#include <set>
-#include <string>
-#include <sys/types.h>
-#include <vector>
-
-#include <ipa/ipa_interface.h>
-#include <libcamera/controls.h>
-#include <libcamera/object.h>
-#include <libcamera/stream.h>
-
-namespace libcamera {
-
-class Camera;
-class CameraConfiguration;
-class CameraManager;
-class DeviceEnumerator;
-class DeviceMatch;
-class FrameBuffer;
-class MediaDevice;
-class PipelineHandler;
-class Request;
-
-class CameraData
-{
-public:
- explicit CameraData(PipelineHandler *pipe)
- : pipe_(pipe)
- {
- }
- virtual ~CameraData() {}
-
- Camera *camera_;
- PipelineHandler *pipe_;
- std::list<Request *> queuedRequests_;
- ControlInfoMap controlInfo_;
- ControlList properties_;
- std::unique_ptr<IPAInterface> ipa_;
-
-private:
- CameraData(const CameraData &) = delete;
- CameraData &operator=(const CameraData &) = delete;
-};
-
-class PipelineHandler : public std::enable_shared_from_this<PipelineHandler>,
- public Object
-{
-public:
- PipelineHandler(CameraManager *manager);
- virtual ~PipelineHandler();
-
- virtual bool match(DeviceEnumerator *enumerator) = 0;
- MediaDevice *acquireMediaDevice(DeviceEnumerator *enumerator,
- const DeviceMatch &dm);
-
- bool lock();
- void unlock();
-
- const ControlInfoMap &controls(Camera *camera);
- const ControlList &properties(Camera *camera);
-
- virtual CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) = 0;
- virtual int configure(Camera *camera, CameraConfiguration *config) = 0;
-
- virtual int exportFrameBuffers(Camera *camera, Stream *stream,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers) = 0;
-
- virtual int start(Camera *camera) = 0;
- virtual void stop(Camera *camera) = 0;
-
- int queueRequest(Camera *camera, Request *request);
-
- bool completeBuffer(Camera *camera, Request *request,
- FrameBuffer *buffer);
- void completeRequest(Camera *camera, Request *request);
-
- const char *name() const { return name_; }
-
-protected:
- void registerCamera(std::shared_ptr<Camera> camera,
- std::unique_ptr<CameraData> data, dev_t devnum = 0);
- void hotplugMediaDevice(MediaDevice *media);
-
- virtual int queueRequestDevice(Camera *camera, Request *request) = 0;
-
- CameraData *cameraData(const Camera *camera);
-
- CameraManager *manager_;
-
-private:
- void mediaDeviceDisconnected(MediaDevice *media);
- virtual void disconnect();
-
- std::vector<std::shared_ptr<MediaDevice>> mediaDevices_;
- std::vector<std::weak_ptr<Camera>> cameras_;
- std::map<const Camera *, std::unique_ptr<CameraData>> cameraData_;
-
- const char *name_;
-
- friend class PipelineHandlerFactory;
-};
-
-class PipelineHandlerFactory
-{
-public:
- PipelineHandlerFactory(const char *name);
- virtual ~PipelineHandlerFactory() {}
-
- std::shared_ptr<PipelineHandler> create(CameraManager *manager);
-
- const std::string &name() const { return name_; }
-
- static void registerType(PipelineHandlerFactory *factory);
- static std::vector<PipelineHandlerFactory *> &factories();
-
-private:
- virtual PipelineHandler *createInstance(CameraManager *manager) = 0;
-
- std::string name_;
-};
-
-#define REGISTER_PIPELINE_HANDLER(handler) \
-class handler##Factory final : public PipelineHandlerFactory \
-{ \
-public: \
- handler##Factory() : PipelineHandlerFactory(#handler) {} \
- \
-private: \
- PipelineHandler *createInstance(CameraManager *manager) \
- { \
- return new handler(manager); \
- } \
-}; \
-static handler##Factory global_##handler##Factory;
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_PIPELINE_HANDLER_H__ */
diff --git a/src/libcamera/include/process.h b/src/libcamera/include/process.h
deleted file mode 100644
index d322fce1..00000000
--- a/src/libcamera/include/process.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * process.h - Process object
- */
-#ifndef __LIBCAMERA_PROCESS_H__
-#define __LIBCAMERA_PROCESS_H__
-
-#include <string>
-#include <vector>
-
-#include <libcamera/event_notifier.h>
-
-namespace libcamera {
-
-class Process final
-{
-public:
- enum ExitStatus {
- NotExited,
- NormalExit,
- SignalExit,
- };
-
- Process();
- ~Process();
-
- int start(const std::string &path,
- const std::vector<std::string> &args = std::vector<std::string>(),
- const std::vector<int> &fds = std::vector<int>());
-
- ExitStatus exitStatus() const { return exitStatus_; }
- int exitCode() const { return exitCode_; }
-
- void kill();
-
- Signal<Process *, enum ExitStatus, int> finished;
-
-private:
- void closeAllFdsExcept(const std::vector<int> &fds);
- int isolate();
- void died(int wstatus);
-
- pid_t pid_;
- bool running_;
- enum ExitStatus exitStatus_;
- int exitCode_;
-
- friend class ProcessManager;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_PROCESS_H__ */
diff --git a/src/libcamera/include/semaphore.h b/src/libcamera/include/semaphore.h
deleted file mode 100644
index c6b28653..00000000
--- a/src/libcamera/include/semaphore.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * semaphore.h - General-purpose counting semaphore
- */
-#ifndef __LIBCAMERA_SEMAPHORE_H__
-#define __LIBCAMERA_SEMAPHORE_H__
-
-#include <condition_variable>
-
-#include "thread.h"
-
-namespace libcamera {
-
-class Semaphore
-{
-public:
- Semaphore(unsigned int n = 0);
-
- unsigned int available();
- void acquire(unsigned int n = 1);
- bool tryAcquire(unsigned int n = 1);
- void release(unsigned int n = 1);
-
-private:
- Mutex mutex_;
- std::condition_variable cv_;
- unsigned int available_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_SEMAPHORE_H__ */
diff --git a/src/libcamera/include/thread.h b/src/libcamera/include/thread.h
deleted file mode 100644
index d700f111..00000000
--- a/src/libcamera/include/thread.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * thread.h - Thread support
- */
-#ifndef __LIBCAMERA_THREAD_H__
-#define __LIBCAMERA_THREAD_H__
-
-#include <memory>
-#include <mutex>
-#include <sys/types.h>
-#include <thread>
-
-#include <libcamera/signal.h>
-
-#include "utils.h"
-
-namespace libcamera {
-
-class EventDispatcher;
-class Message;
-class Object;
-class ThreadData;
-class ThreadMain;
-
-using Mutex = std::mutex;
-using MutexLocker = std::unique_lock<std::mutex>;
-
-class Thread
-{
-public:
- Thread();
- virtual ~Thread();
-
- void start();
- void exit(int code = 0);
- bool wait(utils::duration duration = utils::duration::max());
-
- bool isRunning();
-
- Signal<Thread *> finished;
-
- static Thread *current();
- static pid_t currentId();
-
- EventDispatcher *eventDispatcher();
- void setEventDispatcher(std::unique_ptr<EventDispatcher> dispatcher);
-
- void dispatchMessages();
-
-protected:
- int exec();
- virtual void run();
-
-private:
- void startThread();
- void finishThread();
-
- void postMessage(std::unique_ptr<Message> msg, Object *receiver);
- void removeMessages(Object *receiver);
-
- friend class Object;
- friend class ThreadData;
- friend class ThreadMain;
-
- void moveObject(Object *object);
- void moveObject(Object *object, ThreadData *currentData,
- ThreadData *targetData);
-
- std::thread thread_;
- ThreadData *data_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_THREAD_H__ */
diff --git a/src/libcamera/include/utils.h b/src/libcamera/include/utils.h
deleted file mode 100644
index cfa620f2..00000000
--- a/src/libcamera/include/utils.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2018, Google Inc.
- *
- * utils.h - Miscellaneous utility functions
- */
-#ifndef __LIBCAMERA_UTILS_H__
-#define __LIBCAMERA_UTILS_H__
-
-#include <algorithm>
-#include <chrono>
-#include <memory>
-#include <ostream>
-#include <string>
-#include <string.h>
-#include <sys/time.h>
-
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
-
-#ifndef __DOXYGEN__
-
-/* uClibc and uClibc-ng don't provide O_TMPFILE */
-#ifndef O_TMPFILE
-#define O_TMPFILE (020000000 | O_DIRECTORY)
-#endif
-
-#endif
-
-namespace libcamera {
-
-namespace utils {
-
-const char *basename(const char *path);
-
-char *secure_getenv(const char *name);
-std::string dirname(const std::string &path);
-
-template<class InputIt1, class InputIt2>
-unsigned int set_overlap(InputIt1 first1, InputIt1 last1,
- InputIt2 first2, InputIt2 last2)
-{
- unsigned int count = 0;
-
- while (first1 != last1 && first2 != last2) {
- if (*first1 < *first2) {
- ++first1;
- } else {
- if (!(*first2 < *first1))
- count++;
- ++first2;
- }
- }
-
- return count;
-}
-
-/* C++11 doesn't provide std::clamp */
-template <typename T>
-const T& clamp(const T& v, const T& lo, const T& hi)
-{
- return std::max(lo, std::min(v, hi));
-}
-
-using clock = std::chrono::steady_clock;
-using duration = std::chrono::steady_clock::duration;
-using time_point = std::chrono::steady_clock::time_point;
-
-struct timespec duration_to_timespec(const duration &value);
-std::string time_point_to_string(const time_point &time);
-
-#ifndef __DOXYGEN__
-struct _hex {
- uint64_t v;
- unsigned int w;
-};
-
-std::basic_ostream<char, std::char_traits<char>> &
-operator<<(std::basic_ostream<char, std::char_traits<char>> &stream, const _hex &h);
-#endif
-
-template<typename T>
-_hex hex(T value, unsigned int width = 0);
-
-#ifndef __DOXYGEN__
-template<>
-inline _hex hex<int32_t>(int32_t value, unsigned int width)
-{
- return { static_cast<uint64_t>(value), width ? width : 8 };
-}
-
-template<>
-inline _hex hex<uint32_t>(uint32_t value, unsigned int width)
-{
- return { static_cast<uint64_t>(value), width ? width : 8 };
-}
-
-template<>
-inline _hex hex<int64_t>(int64_t value, unsigned int width)
-{
- return { static_cast<uint64_t>(value), width ? width : 16 };
-}
-
-template<>
-inline _hex hex<uint64_t>(uint64_t value, unsigned int width)
-{
- return { static_cast<uint64_t>(value), width ? width : 16 };
-}
-#endif
-
-size_t strlcpy(char *dst, const char *src, size_t size);
-
-namespace details {
-
-class StringSplitter
-{
-public:
- StringSplitter(const std::string &str, const std::string &delim);
-
- class iterator
- {
- public:
- iterator(const StringSplitter *ss, std::string::size_type pos);
-
- iterator &operator++();
- std::string operator*() const;
- bool operator!=(const iterator &other) const;
-
- private:
- const StringSplitter *ss_;
- std::string::size_type pos_;
- std::string::size_type next_;
- };
-
- iterator begin() const;
- iterator end() const;
-
-private:
- std::string str_;
- std::string delim_;
-};
-
-} /* namespace details */
-
-details::StringSplitter split(const std::string &str, const std::string &delim);
-
-std::string libcameraBuildPath();
-
-} /* namespace utils */
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_UTILS_H__ */
diff --git a/src/libcamera/include/v4l2_controls.h b/src/libcamera/include/v4l2_controls.h
deleted file mode 100644
index cffe9efd..00000000
--- a/src/libcamera/include/v4l2_controls.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * v4l2_controls.h - V4L2 Controls Support
- */
-
-#ifndef __LIBCAMERA_V4L2_CONTROLS_H__
-#define __LIBCAMERA_V4L2_CONTROLS_H__
-
-#include <linux/videodev2.h>
-
-#include <libcamera/controls.h>
-
-namespace libcamera {
-
-class V4L2ControlId : public ControlId
-{
-public:
- V4L2ControlId(const struct v4l2_query_ext_ctrl &ctrl);
-};
-
-class V4L2ControlInfo : public ControlInfo
-{
-public:
- V4L2ControlInfo(const struct v4l2_query_ext_ctrl &ctrl);
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_V4L2_CONTROLS_H__ */
diff --git a/src/libcamera/include/v4l2_device.h b/src/libcamera/include/v4l2_device.h
deleted file mode 100644
index ce8edd98..00000000
--- a/src/libcamera/include/v4l2_device.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * v4l2_device.h - Common base for V4L2 video devices and subdevices
- */
-#ifndef __LIBCAMERA_V4L2_DEVICE_H__
-#define __LIBCAMERA_V4L2_DEVICE_H__
-
-#include <map>
-#include <memory>
-#include <vector>
-
-#include <linux/videodev2.h>
-
-#include "log.h"
-#include "v4l2_controls.h"
-
-namespace libcamera {
-
-class V4L2Device : protected Loggable
-{
-public:
- void close();
- bool isOpen() const { return fd_ != -1; }
-
- const ControlInfoMap &controls() const { return controls_; }
-
- int getControls(ControlList *ctrls);
- int setControls(ControlList *ctrls);
-
- const std::string &deviceNode() const { return deviceNode_; }
-
-protected:
- V4L2Device(const std::string &deviceNode);
- ~V4L2Device();
-
- int open(unsigned int flags);
- int setFd(int fd);
-
- int ioctl(unsigned long request, void *argp);
-
- int fd() { return fd_; }
-
-private:
- void listControls();
- void updateControls(ControlList *ctrls,
- const struct v4l2_ext_control *v4l2Ctrls,
- unsigned int count);
-
- std::map<unsigned int, struct v4l2_query_ext_ctrl> controlInfo_;
- std::vector<std::unique_ptr<V4L2ControlId>> controlIds_;
- ControlInfoMap controls_;
- std::string deviceNode_;
- int fd_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_V4L2_DEVICE_H__ */
diff --git a/src/libcamera/include/v4l2_subdevice.h b/src/libcamera/include/v4l2_subdevice.h
deleted file mode 100644
index 9c077674..00000000
--- a/src/libcamera/include/v4l2_subdevice.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * v4l2_subdevice.h - V4L2 Subdevice
- */
-#ifndef __LIBCAMERA_V4L2_SUBDEVICE_H__
-#define __LIBCAMERA_V4L2_SUBDEVICE_H__
-
-#include <map>
-#include <string>
-#include <vector>
-
-#include <libcamera/geometry.h>
-
-#include "formats.h"
-#include "log.h"
-#include "media_object.h"
-#include "v4l2_device.h"
-
-namespace libcamera {
-
-class MediaDevice;
-
-struct V4L2SubdeviceFormat {
- uint32_t mbus_code;
- Size size;
-
- const std::string toString() const;
-};
-
-class V4L2Subdevice : public V4L2Device
-{
-public:
- explicit V4L2Subdevice(const MediaEntity *entity);
- V4L2Subdevice(const V4L2Subdevice &) = delete;
- V4L2Subdevice &operator=(const V4L2Subdevice &) = delete;
- ~V4L2Subdevice();
-
- int open();
-
- const MediaEntity *entity() const { return entity_; }
-
- int setCrop(unsigned int pad, Rectangle *rect);
- int setCompose(unsigned int pad, Rectangle *rect);
-
- ImageFormats formats(unsigned int pad);
-
- int getFormat(unsigned int pad, V4L2SubdeviceFormat *format);
- int setFormat(unsigned int pad, V4L2SubdeviceFormat *format);
-
- static V4L2Subdevice *fromEntityName(const MediaDevice *media,
- const std::string &entity);
-
-protected:
- std::string logPrefix() const;
-
-private:
- std::vector<unsigned int> enumPadCodes(unsigned int pad);
- std::vector<SizeRange> enumPadSizes(unsigned int pad,
- unsigned int code);
-
- int setSelection(unsigned int pad, unsigned int target,
- Rectangle *rect);
-
- const MediaEntity *entity_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_V4L2_SUBDEVICE_H__ */
diff --git a/src/libcamera/include/v4l2_videodevice.h b/src/libcamera/include/v4l2_videodevice.h
deleted file mode 100644
index 7d7c4a9e..00000000
--- a/src/libcamera/include/v4l2_videodevice.h
+++ /dev/null
@@ -1,295 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * v4l2_videodevice.h - V4L2 Video Device
- */
-#ifndef __LIBCAMERA_V4L2_VIDEODEVICE_H__
-#define __LIBCAMERA_V4L2_VIDEODEVICE_H__
-
-#include <atomic>
-#include <memory>
-#include <stdint.h>
-#include <string>
-#include <vector>
-
-#include <linux/videodev2.h>
-
-#include <libcamera/buffer.h>
-#include <libcamera/geometry.h>
-#include <libcamera/pixelformats.h>
-#include <libcamera/signal.h>
-
-#include "formats.h"
-#include "log.h"
-#include "v4l2_device.h"
-
-namespace libcamera {
-
-class EventNotifier;
-class FileDescriptor;
-class MediaDevice;
-class MediaEntity;
-
-struct V4L2Capability final : v4l2_capability {
- const char *driver() const
- {
- return reinterpret_cast<const char *>(v4l2_capability::driver);
- }
- const char *card() const
- {
- return reinterpret_cast<const char *>(v4l2_capability::card);
- }
- const char *bus_info() const
- {
- return reinterpret_cast<const char *>(v4l2_capability::bus_info);
- }
- unsigned int device_caps() const
- {
- return capabilities & V4L2_CAP_DEVICE_CAPS
- ? v4l2_capability::device_caps
- : v4l2_capability::capabilities;
- }
- bool isMultiplanar() const
- {
- return device_caps() & (V4L2_CAP_VIDEO_CAPTURE_MPLANE |
- V4L2_CAP_VIDEO_OUTPUT_MPLANE |
- V4L2_CAP_VIDEO_M2M_MPLANE);
- }
- bool isCapture() const
- {
- return device_caps() & (V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_VIDEO_CAPTURE_MPLANE |
- V4L2_CAP_META_CAPTURE);
- }
- bool isOutput() const
- {
- return device_caps() & (V4L2_CAP_VIDEO_OUTPUT |
- V4L2_CAP_VIDEO_OUTPUT_MPLANE |
- V4L2_CAP_META_OUTPUT);
- }
- bool isVideo() const
- {
- return device_caps() & (V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_VIDEO_CAPTURE_MPLANE |
- V4L2_CAP_VIDEO_OUTPUT |
- V4L2_CAP_VIDEO_OUTPUT_MPLANE);
- }
- bool isM2M() const
- {
- return device_caps() & (V4L2_CAP_VIDEO_M2M |
- V4L2_CAP_VIDEO_M2M_MPLANE);
- }
- bool isMeta() const
- {
- return device_caps() & (V4L2_CAP_META_CAPTURE |
- V4L2_CAP_META_OUTPUT);
- }
- bool isVideoCapture() const
- {
- return isVideo() && isCapture();
- }
- bool isVideoOutput() const
- {
- return isVideo() && isOutput();
- }
- bool isMetaCapture() const
- {
- return isMeta() && isCapture();
- }
- bool isMetaOutput() const
- {
- return isMeta() && isOutput();
- }
- bool hasStreaming() const
- {
- return device_caps() & V4L2_CAP_STREAMING;
- }
-};
-
-class V4L2BufferCache
-{
-public:
- V4L2BufferCache(unsigned int numEntries);
- V4L2BufferCache(const std::vector<std::unique_ptr<FrameBuffer>> &buffers);
- ~V4L2BufferCache();
-
- int get(const FrameBuffer &buffer);
- void put(unsigned int index);
-
-private:
- class Entry
- {
- public:
- Entry();
- Entry(bool free, uint64_t lastUsed, const FrameBuffer &buffer);
-
- bool operator==(const FrameBuffer &buffer) const;
-
- bool free;
- uint64_t lastUsed;
-
- private:
- struct Plane {
- Plane(const FrameBuffer::Plane &plane)
- : fd(plane.fd.fd()), length(plane.length)
- {
- }
-
- int fd;
- unsigned int length;
- };
-
- std::vector<Plane> planes_;
- };
-
- std::atomic<uint64_t> lastUsedCounter_;
- std::vector<Entry> cache_;
- /* \todo Expose the miss counter through an instrumentation API. */
- unsigned int missCounter_;
-};
-
-class V4L2PixelFormat
-{
-public:
- V4L2PixelFormat()
- : fourcc_(0)
- {
- }
-
- explicit V4L2PixelFormat(uint32_t fourcc)
- : fourcc_(fourcc)
- {
- }
-
- bool isValid() const { return fourcc_ != 0; }
- uint32_t fourcc() const { return fourcc_; }
- operator uint32_t() const { return fourcc_; }
-
- std::string toString() const;
-
-private:
- uint32_t fourcc_;
-};
-
-class V4L2DeviceFormat
-{
-public:
- V4L2PixelFormat fourcc;
- Size size;
-
- struct {
- uint32_t size;
- uint32_t bpl;
- } planes[3];
- unsigned int planesCount;
-
- const std::string toString() const;
-};
-
-class V4L2VideoDevice : public V4L2Device
-{
-public:
- explicit V4L2VideoDevice(const std::string &deviceNode);
- explicit V4L2VideoDevice(const MediaEntity *entity);
- V4L2VideoDevice(const V4L2VideoDevice &) = delete;
- ~V4L2VideoDevice();
-
- V4L2VideoDevice &operator=(const V4L2VideoDevice &) = delete;
-
- int open();
- int open(int handle, enum v4l2_buf_type type);
- void close();
-
- const char *driverName() const { return caps_.driver(); }
- const char *deviceName() const { return caps_.card(); }
- const char *busName() const { return caps_.bus_info(); }
-
- int getFormat(V4L2DeviceFormat *format);
- int setFormat(V4L2DeviceFormat *format);
- std::map<V4L2PixelFormat, std::vector<SizeRange>> formats();
-
- int setCrop(Rectangle *rect);
- int setCompose(Rectangle *rect);
-
- int allocateBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers);
- int exportBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers);
- int importBuffers(unsigned int count);
- int releaseBuffers();
-
- int queueBuffer(FrameBuffer *buffer);
- Signal<FrameBuffer *> bufferReady;
-
- int streamOn();
- int streamOff();
-
- static V4L2VideoDevice *fromEntityName(const MediaDevice *media,
- const std::string &entity);
-
- static PixelFormat toPixelFormat(V4L2PixelFormat v4l2Fourcc);
- V4L2PixelFormat toV4L2PixelFormat(const PixelFormat &pixelFormat);
- static V4L2PixelFormat toV4L2PixelFormat(const PixelFormat &pixelFormat,
- bool multiplanar);
-
-protected:
- std::string logPrefix() const;
-
-private:
- int getFormatMeta(V4L2DeviceFormat *format);
- int setFormatMeta(V4L2DeviceFormat *format);
-
- int getFormatMultiplane(V4L2DeviceFormat *format);
- int setFormatMultiplane(V4L2DeviceFormat *format);
-
- int getFormatSingleplane(V4L2DeviceFormat *format);
- int setFormatSingleplane(V4L2DeviceFormat *format);
-
- std::vector<V4L2PixelFormat> enumPixelformats();
- std::vector<SizeRange> enumSizes(V4L2PixelFormat pixelFormat);
-
- int setSelection(unsigned int target, Rectangle *rect);
-
- int requestBuffers(unsigned int count, enum v4l2_memory memoryType);
- int createBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers);
- std::unique_ptr<FrameBuffer> createBuffer(unsigned int index);
- FileDescriptor exportDmabufFd(unsigned int index, unsigned int plane);
-
- void bufferAvailable(EventNotifier *notifier);
- FrameBuffer *dequeueBuffer();
-
- V4L2Capability caps_;
-
- enum v4l2_buf_type bufferType_;
- enum v4l2_memory memoryType_;
-
- V4L2BufferCache *cache_;
- std::map<unsigned int, FrameBuffer *> queuedBuffers_;
-
- EventNotifier *fdEvent_;
-};
-
-class V4L2M2MDevice
-{
-public:
- V4L2M2MDevice(const std::string &deviceNode);
- ~V4L2M2MDevice();
-
- int open();
- void close();
-
- V4L2VideoDevice *output() { return output_; }
- V4L2VideoDevice *capture() { return capture_; }
-
-private:
- std::string deviceNode_;
-
- V4L2VideoDevice *output_;
- V4L2VideoDevice *capture_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_V4L2_VIDEODEVICE_H__ */
diff --git a/src/libcamera/ipa/meson.build b/src/libcamera/ipa/meson.build
new file mode 100644
index 00000000..ef73b3f9
--- /dev/null
+++ b/src/libcamera/ipa/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_ipa_interfaces = []
+
+foreach file : ipa_mojom_files
+ # {pipeline}_ipa_interface.cpp
+ libcamera_ipa_interfaces += \
+ custom_target(input : file,
+ output : '@BASENAME@_ipa_interface.cpp',
+ command : [
+ mojom_docs_extractor,
+ '-o', '@OUTPUT@', '@INPUT@'
+ ])
+endforeach
diff --git a/src/libcamera/ipa_context_wrapper.cpp b/src/libcamera/ipa_context_wrapper.cpp
deleted file mode 100644
index 946a2fd8..00000000
--- a/src/libcamera/ipa_context_wrapper.cpp
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_context_wrapper.cpp - Image Processing Algorithm context wrapper
- */
-
-#include "ipa_context_wrapper.h"
-
-#include <vector>
-
-#include <libcamera/controls.h>
-
-#include "byte_stream_buffer.h"
-#include "utils.h"
-
-/**
- * \file ipa_context_wrapper.h
- * \brief Image Processing Algorithm context wrapper
- */
-
-namespace libcamera {
-
-/**
- * \class IPAContextWrapper
- * \brief Wrap an ipa_context and expose it as an IPAInterface
- *
- * The IPAContextWrapper class wraps an ipa_context, provided by an IPA module, and
- * exposes an IPAInterface. This mechanism is used for IPAs that are not
- * isolated in a separate process to allow direct calls from pipeline handler
- * using the IPAInterface API instead of the lower-level ipa_context API.
- *
- * The IPAInterface methods are converted to the ipa_context API by translating
- * all C++ arguments into plain C structures or byte arrays that contain no
- * pointer, as required by the ipa_context API.
- */
-
-/**
- * \brief Construct an IPAContextWrapper instance that wraps the \a context
- * \param[in] context The IPA module context
- *
- * Ownership of the \a context is passed to the IPAContextWrapper. The context remains
- * valid for the whole lifetime of the wrapper and is destroyed automatically
- * with it.
- */
-IPAContextWrapper::IPAContextWrapper(struct ipa_context *context)
- : ctx_(context), intf_(nullptr)
-{
- if (!ctx_)
- return;
-
- bool forceCApi = !!utils::secure_getenv("LIBCAMERA_IPA_FORCE_C_API");
-
- if (!forceCApi && ctx_ && ctx_->ops->get_interface) {
- intf_ = reinterpret_cast<IPAInterface *>(ctx_->ops->get_interface(ctx_));
- intf_->queueFrameAction.connect(this, &IPAContextWrapper::doQueueFrameAction);
- return;
- }
-
- ctx_->ops->register_callbacks(ctx_, &IPAContextWrapper::callbacks_,
- this);
-}
-
-IPAContextWrapper::~IPAContextWrapper()
-{
- if (!ctx_)
- return;
-
- ctx_->ops->destroy(ctx_);
-}
-
-int IPAContextWrapper::init()
-{
- if (intf_)
- return intf_->init();
-
- if (!ctx_)
- return 0;
-
- ctx_->ops->init(ctx_);
-
- return 0;
-}
-
-void IPAContextWrapper::configure(const std::map<unsigned int, IPAStream> &streamConfig,
- const std::map<unsigned int, const ControlInfoMap &> &entityControls)
-{
- if (intf_)
- return intf_->configure(streamConfig, entityControls);
-
- if (!ctx_)
- return;
-
- serializer_.reset();
-
- /* Translate the IPA stream configurations map. */
- struct ipa_stream c_streams[streamConfig.size()];
-
- unsigned int i = 0;
- for (const auto &stream : streamConfig) {
- struct ipa_stream *c_stream = &c_streams[i];
- unsigned int id = stream.first;
- const IPAStream &ipaStream = stream.second;
-
- c_stream->id = id;
- c_stream->pixel_format = ipaStream.pixelFormat;
- c_stream->width = ipaStream.size.width;
- c_stream->height = ipaStream.size.height;
-
- ++i;
- }
-
- /* Translate the IPA entity controls map. */
- struct ipa_control_info_map c_info_maps[entityControls.size()];
- std::vector<std::vector<uint8_t>> data(entityControls.size());
-
- i = 0;
- for (const auto &info : entityControls) {
- struct ipa_control_info_map &c_info_map = c_info_maps[i];
- unsigned int id = info.first;
- const ControlInfoMap &infoMap = info.second;
-
- size_t infoMapSize = serializer_.binarySize(infoMap);
- data[i].resize(infoMapSize);
- ByteStreamBuffer byteStream(data[i].data(), data[i].size());
- serializer_.serialize(infoMap, byteStream);
-
- c_info_map.id = id;
- c_info_map.data = byteStream.base();
- c_info_map.size = byteStream.size();
-
- ++i;
- }
-
- ctx_->ops->configure(ctx_, c_streams, streamConfig.size(),
- c_info_maps, entityControls.size());
-}
-
-void IPAContextWrapper::mapBuffers(const std::vector<IPABuffer> &buffers)
-{
- if (intf_)
- return intf_->mapBuffers(buffers);
-
- if (!ctx_)
- return;
-
- struct ipa_buffer c_buffers[buffers.size()];
-
- for (unsigned int i = 0; i < buffers.size(); ++i) {
- struct ipa_buffer &c_buffer = c_buffers[i];
- const IPABuffer &buffer = buffers[i];
- const std::vector<FrameBuffer::Plane> &planes = buffer.planes;
-
- c_buffer.id = buffer.id;
- c_buffer.num_planes = planes.size();
-
- for (unsigned int j = 0; j < planes.size(); ++j) {
- const FrameBuffer::Plane &plane = planes[j];
- c_buffer.planes[j].dmabuf = plane.fd.fd();
- c_buffer.planes[j].length = plane.length;
- }
- }
-
- ctx_->ops->map_buffers(ctx_, c_buffers, buffers.size());
-}
-
-void IPAContextWrapper::unmapBuffers(const std::vector<unsigned int> &ids)
-{
- if (intf_)
- return intf_->unmapBuffers(ids);
-
- if (!ctx_)
- return;
-
- ctx_->ops->unmap_buffers(ctx_, ids.data(), ids.size());
-}
-
-void IPAContextWrapper::processEvent(const IPAOperationData &data)
-{
- if (intf_)
- return intf_->processEvent(data);
-
- if (!ctx_)
- return;
-
- struct ipa_operation_data c_data;
- c_data.operation = data.operation;
- c_data.data = data.data.data();
- c_data.num_data = data.data.size();
-
- struct ipa_control_list control_lists[data.controls.size()];
- c_data.lists = control_lists;
- c_data.num_lists = data.controls.size();
-
- std::size_t listsSize = 0;
- for (const auto &list : data.controls)
- listsSize += serializer_.binarySize(list);
-
- std::vector<uint8_t> binaryData(listsSize);
- ByteStreamBuffer byteStreamBuffer(binaryData.data(), listsSize);
-
- unsigned int i = 0;
- for (const auto &list : data.controls) {
- struct ipa_control_list &c_list = control_lists[i];
- c_list.size = serializer_.binarySize(list);
- ByteStreamBuffer b = byteStreamBuffer.carveOut(c_list.size);
-
- serializer_.serialize(list, b);
-
- c_list.data = b.base();
- }
-
- ctx_->ops->process_event(ctx_, &c_data);
-}
-
-void IPAContextWrapper::doQueueFrameAction(unsigned int frame,
- const IPAOperationData &data)
-{
- IPAInterface::queueFrameAction.emit(frame, data);
-}
-
-void IPAContextWrapper::queue_frame_action(void *ctx, unsigned int frame,
- struct ipa_operation_data &data)
-{
- IPAContextWrapper *_this = static_cast<IPAContextWrapper *>(ctx);
- IPAOperationData opData;
-
- opData.operation = data.operation;
- for (unsigned int i = 0; i < data.num_data; ++i)
- opData.data.push_back(data.data[i]);
-
- for (unsigned int i = 0; i < data.num_lists; ++i) {
- const struct ipa_control_list &c_list = data.lists[i];
- ByteStreamBuffer b(c_list.data, c_list.size);
- opData.controls.push_back(_this->serializer_.deserialize<ControlList>(b));
- }
-
- _this->doQueueFrameAction(frame, opData);
-}
-
-#ifndef __DOXYGEN__
-/*
- * This construct confuses Doygen and makes it believe that all members of the
- * operations is a member of IPAInterfaceWrapper. It must thus be hidden.
- */
-const struct ipa_callback_ops IPAContextWrapper::callbacks_ = {
- .queue_frame_action = &IPAContextWrapper::queue_frame_action,
-};
-#endif
-
-} /* namespace libcamera */
diff --git a/src/libcamera/ipa_controls.cpp b/src/libcamera/ipa_controls.cpp
index b1d14190..12d92ebe 100644
--- a/src/libcamera/ipa_controls.cpp
+++ b/src/libcamera/ipa_controls.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_controls.cpp - IPA control handling
+ * IPA control handling
*/
-#include <ipa/ipa_controls.h>
+#include <libcamera/ipa/ipa_controls.h>
/**
* \file ipa_controls.h
@@ -108,17 +108,19 @@
* +-------------------------+ .
* / | ... | | entry[n].offset
* | +-------------------------+ <-----´
- * Data | | minimum value (#n) | \
- * section | +-------------------------+ | Entry #n
- * | | maximum value (#n) | /
+ * | | minimum value (#n) | \
+ * Data | +-------------------------+ |
+ * section | | maximum value (#n) | | Entry #n
+ * | +-------------------------+ |
+ * | | default value (#n) | /
* | +-------------------------+
* \ | ... |
* +-------------------------+
* ~~~~
*
- * The minimum and maximum value are stored in the platform's native data
- * format. The ipa_control_info_entry::offset field stores the offset from the
- * beginning of the data section to the info data.
+ * The minimum, maximum and default values are stored in the platform's native
+ * data format. The ipa_control_info_entry::offset field stores the offset from
+ * the beginning of the data section to the info data.
*
* Info data in the data section shall be stored in the same order as the
* entries array, shall be aligned to a multiple of 8 bytes, and shall be
@@ -129,12 +131,41 @@
* shall be ignored when parsing the packet.
*/
+namespace libcamera {
+
/**
* \def IPA_CONTROLS_FORMAT_VERSION
* \brief The current control serialization format version
*/
/**
+ * \var ipa_controls_id_map_type
+ * \brief Enumerates the different control id map types
+ *
+ * Each ControlInfoMap and ControlList refers to a control id map that
+ * associates the ControlId references to a numerical identifier.
+ * During the serialization procedure the raw pointers to the ControlId
+ * instances cannot be transported on the wire, hence their numerical id is
+ * used to identify them in the serialized data buffer. At deserialization time
+ * it is required to associate back to the numerical id the ControlId instance
+ * it represents. This enumeration describes which ControlIdMap should be
+ * used to perform such operation.
+ *
+ * \var ipa_controls_id_map_type::IPA_CONTROL_ID_MAP_CONTROLS
+ * \brief The numerical control identifier are resolved to a ControlId * using
+ * the global controls::controls id map
+ * \var ipa_controls_id_map_type::IPA_CONTROL_ID_MAP_PROPERTIES
+ * \brief The numerical control identifier are resolved to a ControlId * using
+ * the global properties::properties id map
+ * \var ipa_controls_id_map_type::IPA_CONTROL_ID_MAP_V4L2
+ * \brief ControlId for V4L2 defined controls are created by the video device
+ * that enumerates them, and are not available across the IPC boundaries. The
+ * deserializer shall create new ControlId instances for them as well as store
+ * them in a dedicated ControlIdMap. Only lookup by numerical id can be
+ * performed on de-serialized ControlInfoMap that represents V4L2 controls.
+ */
+
+/**
* \struct ipa_controls_header
* \brief Serialized control packet header
* \var ipa_controls_header::version
@@ -149,6 +180,8 @@
* The total packet size in bytes
* \var ipa_controls_header::data_offset
* Offset in bytes from the beginning of the packet of the data section start
+ * \var ipa_controls_header::id_map_type
+ * The id map type as defined by the ipa_controls_id_map_type enumeration
* \var ipa_controls_header::reserved
* Reserved for future extensions
*/
@@ -187,9 +220,15 @@ static_assert(sizeof(ipa_control_value_entry) == 16,
* \var ipa_control_info_entry::offset
* The offset in bytes from the beginning of the data section to the control
* info data (shall be a multiple of 8 bytes)
+ * \var ipa_control_info_entry::direction
+ * The directions in which the control is allowed to be sent. This is a flags
+ * value, where 0x1 signifies input (as controls), and 0x2 signifies output (as
+ * metadata). \sa ControlId::Direction
* \var ipa_control_info_entry::padding
* Padding bytes (shall be set to 0)
*/
static_assert(sizeof(ipa_control_info_entry) == 16,
"Invalid ABI size change for struct ipa_control_info_entry");
+
+} /* namespace libcamera */
diff --git a/src/libcamera/ipa_data_serializer.cpp b/src/libcamera/ipa_data_serializer.cpp
new file mode 100644
index 00000000..2189a246
--- /dev/null
+++ b/src/libcamera/ipa_data_serializer.cpp
@@ -0,0 +1,626 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Image Processing Algorithm data serializer
+ */
+
+#include "libcamera/internal/ipa_data_serializer.h"
+
+#include <unistd.h>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/byte_stream_buffer.h"
+
+/**
+ * \file ipa_data_serializer.h
+ * \brief IPA Data Serializer
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPADataSerializer)
+
+/**
+ * \class IPADataSerializer
+ * \brief IPA Data Serializer
+ *
+ * Static template class that provides functions for serializing and
+ * deserializing IPA data.
+ *
+ * \todo Switch to Span instead of byte and fd vector
+ *
+ * \todo Harden the vector and map deserializer
+ *
+ * \todo For SharedFDs, instead of storing a validity flag, store an
+ * index into the fd array. This will allow us to use views instead of copying.
+ */
+
+namespace {
+
+/**
+ * \fn template<typename T> void appendPOD(std::vector<uint8_t> &vec, T val)
+ * \brief Append POD to end of byte vector, in little-endian order
+ * \tparam T Type of POD to append
+ * \param[in] vec Byte vector to append to
+ * \param[in] val Value to append
+ *
+ * This function is meant to be used by the IPA data serializer, and the
+ * generated IPA proxies.
+ */
+
+/**
+ * \fn template<typename T> T readPOD(std::vector<uint8_t>::iterator it, size_t pos,
+ * std::vector<uint8_t>::iterator end)
+ * \brief Read POD from byte vector, in little-endian order
+ * \tparam T Type of POD to read
+ * \param[in] it Iterator of byte vector to read from
+ * \param[in] pos Index in byte vector to read from
+ * \param[in] end Iterator marking end of byte vector
+ *
+ * This function is meant to be used by the IPA data serializer, and the
+ * generated IPA proxies.
+ *
+ * If the \a pos plus the byte-width of the desired POD is past \a end, it is
+ * a fata error will occur, as it means there is insufficient data for
+ * deserialization, which should never happen.
+ *
+ * \return The POD read from \a it at index \a pos
+ */
+
+/**
+ * \fn template<typename T> T readPOD(std::vector<uint8_t> &vec, size_t pos)
+ * \brief Read POD from byte vector, in little-endian order
+ * \tparam T Type of POD to read
+ * \param[in] vec Byte vector to read from
+ * \param[in] pos Index in vec to start reading from
+ *
+ * This function is meant to be used by the IPA data serializer, and the
+ * generated IPA proxies.
+ *
+ * If the \a pos plus the byte-width of the desired POD is past the end of
+ * \a vec, a fatal error will occur, as it means there is insufficient data
+ * for deserialization, which should never happen.
+ *
+ * \return The POD read from \a vec at index \a pos
+ */
+
+} /* namespace */
+
+/**
+ * \fn template<typename T> IPADataSerializer<T>::serialize(
+ * T data,
+ * ControlSerializer *cs = nullptr)
+ * \brief Serialize an object into byte vector and fd vector
+ * \tparam T Type of object to serialize
+ * \param[in] data Object to serialize
+ * \param[in] cs ControlSerializer
+ *
+ * \a cs is only necessary if the object type \a T or its members contain
+ * ControlList or ControlInfoMap.
+ *
+ * \return Tuple of byte vector and fd vector, that is the serialized form
+ * of \a data
+ */
+
+/**
+ * \fn template<typename T> IPADataSerializer<T>::deserialize(
+ * const std::vector<uint8_t> &data,
+ * ControlSerializer *cs = nullptr)
+ * \brief Deserialize byte vector into an object
+ * \tparam T Type of object to deserialize to
+ * \param[in] data Byte vector to deserialize from
+ * \param[in] cs ControlSerializer
+ *
+ * This version of deserialize() can be used if the object type \a T and its
+ * members don't have any SharedFD.
+ *
+ * \a cs is only necessary if the object type \a T or its members contain
+ * ControlList or ControlInfoMap.
+ *
+ * \return The deserialized object
+ */
+
+/**
+ * \fn template<typename T> IPADataSerializer<T>::deserialize(
+ * std::vector<uint8_t>::const_iterator dataBegin,
+ * std::vector<uint8_t>::const_iterator dataEnd,
+ * ControlSerializer *cs = nullptr)
+ * \brief Deserialize byte vector into an object
+ * \tparam T Type of object to deserialize to
+ * \param[in] dataBegin Begin iterator of byte vector to deserialize from
+ * \param[in] dataEnd End iterator of byte vector to deserialize from
+ * \param[in] cs ControlSerializer
+ *
+ * This version of deserialize() can be used if the object type \a T and its
+ * members don't have any SharedFD.
+ *
+ * \a cs is only necessary if the object type \a T or its members contain
+ * ControlList or ControlInfoMap.
+ *
+ * \return The deserialized object
+ */
+
+/**
+ * \fn template<typename T> IPADataSerializer<T>::deserialize(
+ * const std::vector<uint8_t> &data,
+ * const std::vector<SharedFD> &fds,
+ * ControlSerializer *cs = nullptr)
+ * \brief Deserialize byte vector and fd vector into an object
+ * \tparam T Type of object to deserialize to
+ * \param[in] data Byte vector to deserialize from
+ * \param[in] fds Fd vector to deserialize from
+ * \param[in] cs ControlSerializer
+ *
+ * This version of deserialize() (or the iterator version) must be used if
+ * the object type \a T or its members contain SharedFD.
+ *
+ * \a cs is only necessary if the object type \a T or its members contain
+ * ControlList or ControlInfoMap.
+ *
+ * \return The deserialized object
+ */
+
+/**
+ * \fn template<typename T> IPADataSerializer::deserialize(
+ * std::vector<uint8_t>::const_iterator dataBegin,
+ * std::vector<uint8_t>::const_iterator dataEnd,
+ * std::vector<SharedFD>::const_iterator fdsBegin,
+ * std::vector<SharedFD>::const_iterator fdsEnd,
+ * ControlSerializer *cs = nullptr)
+ * \brief Deserialize byte vector and fd vector into an object
+ * \tparam T Type of object to deserialize to
+ * \param[in] dataBegin Begin iterator of byte vector to deserialize from
+ * \param[in] dataEnd End iterator of byte vector to deserialize from
+ * \param[in] fdsBegin Begin iterator of fd vector to deserialize from
+ * \param[in] fdsEnd End iterator of fd vector to deserialize from
+ * \param[in] cs ControlSerializer
+ *
+ * This version of deserialize() (or the vector version) must be used if
+ * the object type \a T or its members contain SharedFD.
+ *
+ * \a cs is only necessary if the object type \a T or its members contain
+ * ControlList or ControlInfoMap.
+ *
+ * \return The deserialized object
+ */
+
+#ifndef __DOXYGEN__
+
+#define DEFINE_POD_SERIALIZER(type) \
+ \
+template<> \
+std::tuple<std::vector<uint8_t>, std::vector<SharedFD>> \
+IPADataSerializer<type>::serialize(const type &data, \
+ [[maybe_unused]] ControlSerializer *cs) \
+{ \
+ std::vector<uint8_t> dataVec; \
+ dataVec.reserve(sizeof(type)); \
+ appendPOD<type>(dataVec, data); \
+ \
+ return { dataVec, {} }; \
+} \
+ \
+template<> \
+type IPADataSerializer<type>::deserialize(std::vector<uint8_t>::const_iterator dataBegin, \
+ std::vector<uint8_t>::const_iterator dataEnd, \
+ [[maybe_unused]] ControlSerializer *cs) \
+{ \
+ return readPOD<type>(dataBegin, 0, dataEnd); \
+} \
+ \
+template<> \
+type IPADataSerializer<type>::deserialize(const std::vector<uint8_t> &data, \
+ ControlSerializer *cs) \
+{ \
+ return deserialize(data.cbegin(), data.end(), cs); \
+} \
+ \
+template<> \
+type IPADataSerializer<type>::deserialize(const std::vector<uint8_t> &data, \
+ [[maybe_unused]] const std::vector<SharedFD> &fds, \
+ ControlSerializer *cs) \
+{ \
+ return deserialize(data.cbegin(), data.end(), cs); \
+} \
+ \
+template<> \
+type IPADataSerializer<type>::deserialize(std::vector<uint8_t>::const_iterator dataBegin, \
+ std::vector<uint8_t>::const_iterator dataEnd, \
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsBegin, \
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsEnd, \
+ ControlSerializer *cs) \
+{ \
+ return deserialize(dataBegin, dataEnd, cs); \
+}
+
+DEFINE_POD_SERIALIZER(bool)
+DEFINE_POD_SERIALIZER(uint8_t)
+DEFINE_POD_SERIALIZER(uint16_t)
+DEFINE_POD_SERIALIZER(uint32_t)
+DEFINE_POD_SERIALIZER(uint64_t)
+DEFINE_POD_SERIALIZER(int8_t)
+DEFINE_POD_SERIALIZER(int16_t)
+DEFINE_POD_SERIALIZER(int32_t)
+DEFINE_POD_SERIALIZER(int64_t)
+DEFINE_POD_SERIALIZER(float)
+DEFINE_POD_SERIALIZER(double)
+
+/*
+ * Strings are serialized simply by converting by {string.cbegin(), string.end()}.
+ * The size of the string is recorded by the container (struct, vector, map, or
+ * function parameter serdes).
+ */
+template<>
+std::tuple<std::vector<uint8_t>, std::vector<SharedFD>>
+IPADataSerializer<std::string>::serialize(const std::string &data,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ return { { data.cbegin(), data.end() }, {} };
+}
+
+template<>
+std::string
+IPADataSerializer<std::string>::deserialize(const std::vector<uint8_t> &data,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ return { data.cbegin(), data.cend() };
+}
+
+template<>
+std::string
+IPADataSerializer<std::string>::deserialize(std::vector<uint8_t>::const_iterator dataBegin,
+ std::vector<uint8_t>::const_iterator dataEnd,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ return { dataBegin, dataEnd };
+}
+
+template<>
+std::string
+IPADataSerializer<std::string>::deserialize(const std::vector<uint8_t> &data,
+ [[maybe_unused]] const std::vector<SharedFD> &fds,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ return { data.cbegin(), data.cend() };
+}
+
+template<>
+std::string
+IPADataSerializer<std::string>::deserialize(std::vector<uint8_t>::const_iterator dataBegin,
+ std::vector<uint8_t>::const_iterator dataEnd,
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsBegin,
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsEnd,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ return { dataBegin, dataEnd };
+}
+
+/*
+ * ControlList is serialized as:
+ *
+ * 4 bytes - uint32_t Size of serialized ControlInfoMap, in bytes
+ * 4 bytes - uint32_t Size of serialized ControlList, in bytes
+ * X bytes - Serialized ControlInfoMap (using ControlSerializer)
+ * X bytes - Serialized ControlList (using ControlSerializer)
+ *
+ * If data.infoMap() is nullptr, then the default controls::controls will
+ * be used. The serialized ControlInfoMap will have zero length.
+ */
+template<>
+std::tuple<std::vector<uint8_t>, std::vector<SharedFD>>
+IPADataSerializer<ControlList>::serialize(const ControlList &data, ControlSerializer *cs)
+{
+ if (!cs)
+ LOG(IPADataSerializer, Fatal)
+ << "ControlSerializer not provided for serialization of ControlList";
+
+ size_t size;
+ std::vector<uint8_t> infoData;
+ int ret;
+
+ /*
+ * \todo Revisit this opportunistic serialization of the
+ * ControlInfoMap, as it could be fragile
+ */
+ if (data.infoMap() && !cs->isCached(*data.infoMap())) {
+ size = cs->binarySize(*data.infoMap());
+ infoData.resize(size);
+ ByteStreamBuffer buffer(infoData.data(), infoData.size());
+ ret = cs->serialize(*data.infoMap(), buffer);
+
+ if (ret < 0 || buffer.overflow()) {
+ LOG(IPADataSerializer, Error) << "Failed to serialize ControlList's ControlInfoMap";
+ return { {}, {} };
+ }
+ }
+
+ size = cs->binarySize(data);
+ std::vector<uint8_t> listData(size);
+ ByteStreamBuffer buffer(listData.data(), listData.size());
+ ret = cs->serialize(data, buffer);
+
+ if (ret < 0 || buffer.overflow()) {
+ LOG(IPADataSerializer, Error) << "Failed to serialize ControlList";
+ return { {}, {} };
+ }
+
+ std::vector<uint8_t> dataVec;
+ dataVec.reserve(8 + infoData.size() + listData.size());
+ appendPOD<uint32_t>(dataVec, infoData.size());
+ appendPOD<uint32_t>(dataVec, listData.size());
+ dataVec.insert(dataVec.end(), infoData.begin(), infoData.end());
+ dataVec.insert(dataVec.end(), listData.begin(), listData.end());
+
+ return { dataVec, {} };
+}
+
+template<>
+ControlList
+IPADataSerializer<ControlList>::deserialize(std::vector<uint8_t>::const_iterator dataBegin,
+ std::vector<uint8_t>::const_iterator dataEnd,
+ ControlSerializer *cs)
+{
+ if (!cs)
+ LOG(IPADataSerializer, Fatal)
+ << "ControlSerializer not provided for deserialization of ControlList";
+
+ if (std::distance(dataBegin, dataEnd) < 8)
+ return {};
+
+ uint32_t infoDataSize = readPOD<uint32_t>(dataBegin, 0, dataEnd);
+ uint32_t listDataSize = readPOD<uint32_t>(dataBegin, 4, dataEnd);
+
+ std::vector<uint8_t>::const_iterator it = dataBegin + 8;
+
+ if (infoDataSize + listDataSize < infoDataSize ||
+ static_cast<uint32_t>(std::distance(it, dataEnd)) < infoDataSize + listDataSize)
+ return {};
+
+ if (infoDataSize > 0) {
+ ByteStreamBuffer buffer(&*it, infoDataSize);
+ ControlInfoMap map = cs->deserialize<ControlInfoMap>(buffer);
+ /* It's fine if map is empty. */
+ if (buffer.overflow()) {
+ LOG(IPADataSerializer, Error)
+ << "Failed to deserialize ControlLists's ControlInfoMap: buffer overflow";
+ return ControlList();
+ }
+ }
+
+ it += infoDataSize;
+ ByteStreamBuffer buffer(&*it, listDataSize);
+ ControlList list = cs->deserialize<ControlList>(buffer);
+ if (buffer.overflow())
+ LOG(IPADataSerializer, Error) << "Failed to deserialize ControlList: buffer overflow";
+
+ return list;
+}
+
+template<>
+ControlList
+IPADataSerializer<ControlList>::deserialize(const std::vector<uint8_t> &data,
+ ControlSerializer *cs)
+{
+ return deserialize(data.cbegin(), data.end(), cs);
+}
+
+template<>
+ControlList
+IPADataSerializer<ControlList>::deserialize(const std::vector<uint8_t> &data,
+ [[maybe_unused]] const std::vector<SharedFD> &fds,
+ ControlSerializer *cs)
+{
+ return deserialize(data.cbegin(), data.end(), cs);
+}
+
+template<>
+ControlList
+IPADataSerializer<ControlList>::deserialize(std::vector<uint8_t>::const_iterator dataBegin,
+ std::vector<uint8_t>::const_iterator dataEnd,
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsBegin,
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsEnd,
+ ControlSerializer *cs)
+{
+ return deserialize(dataBegin, dataEnd, cs);
+}
+
+/*
+ * const ControlInfoMap is serialized as:
+ *
+ * 4 bytes - uint32_t Size of serialized ControlInfoMap, in bytes
+ * X bytes - Serialized ControlInfoMap (using ControlSerializer)
+ */
+template<>
+std::tuple<std::vector<uint8_t>, std::vector<SharedFD>>
+IPADataSerializer<ControlInfoMap>::serialize(const ControlInfoMap &map,
+ ControlSerializer *cs)
+{
+ if (!cs)
+ LOG(IPADataSerializer, Fatal)
+ << "ControlSerializer not provided for serialization of ControlInfoMap";
+
+ size_t size = cs->binarySize(map);
+ std::vector<uint8_t> infoData(size);
+ ByteStreamBuffer buffer(infoData.data(), infoData.size());
+ int ret = cs->serialize(map, buffer);
+
+ if (ret < 0 || buffer.overflow()) {
+ LOG(IPADataSerializer, Error) << "Failed to serialize ControlInfoMap";
+ return { {}, {} };
+ }
+
+ std::vector<uint8_t> dataVec;
+ appendPOD<uint32_t>(dataVec, infoData.size());
+ dataVec.insert(dataVec.end(), infoData.begin(), infoData.end());
+
+ return { dataVec, {} };
+}
+
+template<>
+ControlInfoMap
+IPADataSerializer<ControlInfoMap>::deserialize(std::vector<uint8_t>::const_iterator dataBegin,
+ std::vector<uint8_t>::const_iterator dataEnd,
+ ControlSerializer *cs)
+{
+ if (!cs)
+ LOG(IPADataSerializer, Fatal)
+ << "ControlSerializer not provided for deserialization of ControlInfoMap";
+
+ if (std::distance(dataBegin, dataEnd) < 4)
+ return {};
+
+ uint32_t infoDataSize = readPOD<uint32_t>(dataBegin, 0, dataEnd);
+
+ std::vector<uint8_t>::const_iterator it = dataBegin + 4;
+
+ if (static_cast<uint32_t>(std::distance(it, dataEnd)) < infoDataSize)
+ return {};
+
+ ByteStreamBuffer buffer(&*it, infoDataSize);
+ ControlInfoMap map = cs->deserialize<ControlInfoMap>(buffer);
+
+ return map;
+}
+
+template<>
+ControlInfoMap
+IPADataSerializer<ControlInfoMap>::deserialize(const std::vector<uint8_t> &data,
+ ControlSerializer *cs)
+{
+ return deserialize(data.cbegin(), data.end(), cs);
+}
+
+template<>
+ControlInfoMap
+IPADataSerializer<ControlInfoMap>::deserialize(const std::vector<uint8_t> &data,
+ [[maybe_unused]] const std::vector<SharedFD> &fds,
+ ControlSerializer *cs)
+{
+ return deserialize(data.cbegin(), data.end(), cs);
+}
+
+template<>
+ControlInfoMap
+IPADataSerializer<ControlInfoMap>::deserialize(std::vector<uint8_t>::const_iterator dataBegin,
+ std::vector<uint8_t>::const_iterator dataEnd,
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsBegin,
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsEnd,
+ ControlSerializer *cs)
+{
+ return deserialize(dataBegin, dataEnd, cs);
+}
+
+/*
+ * SharedFD instances are serialized into four bytes that tells if the SharedFD
+ * is valid or not. If it is valid, then for serialization the fd will be
+ * written to the fd vector, or for deserialization the fd vector const_iterator
+ * will be valid.
+ *
+ * This validity is necessary so that we don't send -1 fd over sendmsg(). It
+ * also allows us to simply send the entire fd vector into the deserializer
+ * and it will be recursively consumed as necessary.
+ */
+template<>
+std::tuple<std::vector<uint8_t>, std::vector<SharedFD>>
+IPADataSerializer<SharedFD>::serialize(const SharedFD &data,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ std::vector<uint8_t> dataVec;
+ std::vector<SharedFD> fdVec;
+
+ /*
+ * Store as uint32_t to prepare for conversion from validity flag
+ * to index, and for alignment.
+ */
+ appendPOD<uint32_t>(dataVec, data.isValid());
+
+ if (data.isValid())
+ fdVec.push_back(data);
+
+ return { dataVec, fdVec };
+}
+
+template<>
+SharedFD IPADataSerializer<SharedFD>::deserialize([[maybe_unused]] std::vector<uint8_t>::const_iterator dataBegin,
+ [[maybe_unused]] std::vector<uint8_t>::const_iterator dataEnd,
+ std::vector<SharedFD>::const_iterator fdsBegin,
+ std::vector<SharedFD>::const_iterator fdsEnd,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ ASSERT(std::distance(dataBegin, dataEnd) >= 4);
+
+ uint32_t valid = readPOD<uint32_t>(dataBegin, 0, dataEnd);
+
+ ASSERT(!(valid && std::distance(fdsBegin, fdsEnd) < 1));
+
+ return valid ? *fdsBegin : SharedFD();
+}
+
+template<>
+SharedFD IPADataSerializer<SharedFD>::deserialize(const std::vector<uint8_t> &data,
+ const std::vector<SharedFD> &fds,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ return deserialize(data.cbegin(), data.end(), fds.cbegin(), fds.end());
+}
+
+/*
+ * FrameBuffer::Plane is serialized as:
+ *
+ * 4 byte - SharedFD
+ * 4 bytes - uint32_t Offset
+ * 4 bytes - uint32_t Length
+ */
+template<>
+std::tuple<std::vector<uint8_t>, std::vector<SharedFD>>
+IPADataSerializer<FrameBuffer::Plane>::serialize(const FrameBuffer::Plane &data,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ std::vector<uint8_t> dataVec;
+ std::vector<SharedFD> fdsVec;
+
+ std::vector<uint8_t> fdBuf;
+ std::vector<SharedFD> fdFds;
+ std::tie(fdBuf, fdFds) =
+ IPADataSerializer<SharedFD>::serialize(data.fd);
+ dataVec.insert(dataVec.end(), fdBuf.begin(), fdBuf.end());
+ fdsVec.insert(fdsVec.end(), fdFds.begin(), fdFds.end());
+
+ appendPOD<uint32_t>(dataVec, data.offset);
+ appendPOD<uint32_t>(dataVec, data.length);
+
+ return { dataVec, fdsVec };
+}
+
+template<>
+FrameBuffer::Plane
+IPADataSerializer<FrameBuffer::Plane>::deserialize(std::vector<uint8_t>::const_iterator dataBegin,
+ std::vector<uint8_t>::const_iterator dataEnd,
+ std::vector<SharedFD>::const_iterator fdsBegin,
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsEnd,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ FrameBuffer::Plane ret;
+
+ ret.fd = IPADataSerializer<SharedFD>::deserialize(dataBegin, dataBegin + 4,
+ fdsBegin, fdsBegin + 1);
+ ret.offset = readPOD<uint32_t>(dataBegin, 4, dataEnd);
+ ret.length = readPOD<uint32_t>(dataBegin, 8, dataEnd);
+
+ return ret;
+}
+
+template<>
+FrameBuffer::Plane
+IPADataSerializer<FrameBuffer::Plane>::deserialize(const std::vector<uint8_t> &data,
+ const std::vector<SharedFD> &fds,
+ ControlSerializer *cs)
+{
+ return deserialize(data.cbegin(), data.end(), fds.cbegin(), fds.end(), cs);
+}
+
+#endif /* __DOXYGEN__ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/ipa_interface.cpp b/src/libcamera/ipa_interface.cpp
index 5959e7de..a9dc54ad 100644
--- a/src/libcamera/ipa_interface.cpp
+++ b/src/libcamera/ipa_interface.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_interface.cpp - Image Processing Algorithm interface
+ * Image Processing Algorithm interface
*/
-#include <ipa/ipa_interface.h>
+#include <libcamera/ipa/ipa_interface.h>
/**
* \file ipa_interface.h
@@ -15,273 +15,51 @@
* an Image Processing Algorithm (IPA) module. An IPA module is developed for a
* specific pipeline handler and each pipeline handler may be compatible with
* multiple IPA implementations, both open and closed source. To support this,
- * libcamera communicates with IPA modules through a standard plain C interface.
+ * libcamera communicates with IPA modules through a per-pipeline C++ interface.
*
- * IPA modules shall expose a public function named ipaCreate() with the
- * following prototype.
+ * IPA modules shall provide an ipaCreate() function exported as a public C
+ * symbol with the following prototype:
*
* \code{.c}
- * struct ipa_context *ipaCreate();
+ * IPAInterface *ipaCreate();
* \endcode
*
- * The ipaCreate() function creates an instance of an IPA context, which models
+ * The ipaCreate() function creates an instance of an IPA interface, which models
* a context of execution for the IPA. IPA modules shall support creating one
* context per camera, as required by their associated pipeline handler.
*
- * The IPA module context operations are defined in the struct ipa_context_ops.
- * They model a low-level interface to configure the IPA, notify it of events,
- * and receive IPA actions through callbacks. An IPA module stores a pointer to
- * the operations corresponding to its context in the ipa_context::ops field.
- * That pointer is immutable for the lifetime of the context, and may differ
- * between different contexts created by the same IPA module.
+ * The IPA module interface operations are defined in the mojom file
+ * corresponding to the pipeline handler, in
+ * include/libcamera/ipa/{pipeline_name}.mojom.
*
- * The IPA interface defines base data types and functions to exchange data. On
- * top of this, each pipeline handler is responsible for defining the set of
- * events and actions used to communicate with their IPA. These are collectively
- * referred to as IPA operations and define the pipeline handler-specific IPA
- * protocol. Each operation defines the data that it carries, and how that data
- * is encoded in the ipa_context_ops functions arguments.
+ * The IPA interface is specific to each pipeline handler. The pipeline handlers
+ * define a set of operations used to communicate with their IPA modules. The
+ * operations, along with the data structures they use, are collectively
+ * referred to as the IPA protocol.
+ *
+ * The IPA protocol is defined using the
+ * <a href="https://chromium.googlesource.com/chromium/src/+/master/mojo/public/tools/bindings/README.md">Mojo interface definition language</a>,
+ * in a Mojo module file stored in include/libcamera/ipa/{pipeline_name}.mojom.
+ * The Mojo module contains two Mojo interfaces: IPAInterface defines the
+ * operations exposed by the IPA and called by the pipeline handler, and
+ * IPAEventInterface defines the events generated by the IPA and received by the
+ * pipeline handler.
*
* \todo Add reference to how pipelines shall document their protocol.
*
* IPAs can be isolated in a separate process. This implies that arguments to
- * the IPA interface functions may need to be transferred over IPC. All
- * arguments use Plain Old Data types and are documented either in the form of C
- * data types, or as a textual description of byte arrays for types that can't
- * be expressed using C data types (such as arrays of mixed data types). IPA
- * modules can thus use the C API without calling into libcamera to access the
- * data passed to the IPA context operations.
+ * the IPA interface functions may need to be transferred over IPC. An IPA
+ * proxy is auto-generated based on the mojom file, which abstracts away the
+ * (de)serialization from the pipeline handler and the IPA implementation. Thus
+ * any C++ structure that is defined in the mojom file, or the C++ libcamera
+ * objects that are listed in core.mojom, can be used directly.
*
* Due to IPC, synchronous communication between pipeline handlers and IPAs can
- * be costly. For that reason, the interface operates asynchronously. This
- * implies that methods don't return a status, and that all methods may copy
- * their arguments.
- *
- * The IPAInterface class is a C++ representation of the ipa_context_ops, using
- * C++ data classes provided by libcamera. This is the API exposed to pipeline
- * handlers to communicate with IPA modules. IPA modules may use the
- * IPAInterface API internally if they want to benefit from the data and helper
- * classes offered by libcamera.
- *
- * When an IPA module is loaded directly into the libcamera process and uses
- * the IPAInterface API internally, short-circuiting the path to the
- * ipa_context_ops and back to IPAInterface is desirable. To support this, IPA
- * modules may implement the ipa_context_ops::get_interface function to return a
- * pointer to their internal IPAInterface.
- */
-
-/**
- * \struct ipa_context
- * \brief IPA module context of execution
- *
- * This structure models a context of execution for an IPA module. It is
- * instantiated by the IPA module ipaCreate() function. IPA modules allocate
- * context instances in an implementation-defined way, contexts shall thus be
- * destroyed using the ipa_operation::destroy function only.
- *
- * The ipa_context structure provides a pointer to the IPA context operations.
- * It shall otherwise be treated as a constant black-box cookie and passed
- * unmodified to the functions defined in struct ipa_context_ops.
- *
- * IPA modules are expected to extend struct ipa_context by inheriting from it,
- * either through structure embedding to model inheritance in plain C, or
- * through C++ class inheritance. A simple example of the latter is available
- * in the IPAContextWrapper class implementation.
- *
- * \var ipa_context::ops
- * \brief The IPA context operations
- */
-
-/**
- * \struct ipa_stream
- * \brief Stream information for the IPA context operations
- *
- * \var ipa_stream::id
- * \brief Identifier for the stream, defined by the IPA protocol
- *
- * \var ipa_stream::pixel_format
- * \brief The stream pixel format, as defined by the PixelFormat class
- *
- * \var ipa_stream::width
- * \brief The stream width in pixels
- *
- * \var ipa_stream::height
- * \brief The stream height in pixels
- */
-
-/**
- * \struct ipa_control_info_map
- * \brief ControlInfoMap description for the IPA context operations
- *
- * \var ipa_control_info_map::id
- * \brief Identifier for the ControlInfoMap, defined by the IPA protocol
- *
- * \var ipa_control_info_map::data
- * \brief Pointer to a control packet for the ControlInfoMap
- * \sa ipa_controls.h
- *
- * \var ipa_control_info_map::size
- * \brief The size of the control packet in bytes
- */
-
-/**
- * \struct ipa_buffer_plane
- * \brief A plane for an ipa_buffer
- *
- * \var ipa_buffer_plane::dmabuf
- * \brief The dmabuf file descriptor for the plane (-1 for unused planes)
- *
- * \var ipa_buffer_plane::length
- * \brief The plane length in bytes (0 for unused planes)
- */
-
-/**
- * \struct ipa_buffer
- * \brief Buffer information for the IPA context operations
- *
- * \var ipa_buffer::id
- * \brief The buffer unique ID (see \ref libcamera::IPABuffer::id)
- *
- * \var ipa_buffer::num_planes
- * \brief The number of used planes in the ipa_buffer::planes array
- *
- * \var ipa_buffer::planes
- * \brief The buffer planes (up to 3)
- */
-
-/**
- * \struct ipa_control_list
- * \brief ControlList description for the IPA context operations
- *
- * \var ipa_control_list::data
- * \brief Pointer to a control packet for the ControlList
- * \sa ipa_controls.h
- *
- * \var ipa_control_list::size
- * \brief The size of the control packet in bytes
- */
-
-/**
- * \struct ipa_operation_data
- * \brief IPA operation data for the IPA context operations
- * \sa libcamera::IPAOperationData
- *
- * \var ipa_operation_data::operation
- * \brief IPA protocol operation
- *
- * \var ipa_operation_data::data
- * \brief Pointer to the operation data array
- *
- * \var ipa_operation_data::num_data
- * \brief Number of entries in the ipa_operation_data::data array
- *
- * \var ipa_operation_data::lists
- * \brief Pointer to an array of ipa_control_list
- *
- * \var ipa_operation_data::num_lists
- * \brief Number of entries in the ipa_control_list array
- */
-
-/**
- * \struct ipa_callback_ops
- * \brief IPA context operations as a set of function pointers
- */
-
-/**
- * \var ipa_callback_ops::queue_frame_action
- * \brief Queue an action associated with a frame to the pipeline handler
- * \param[in] cb_ctx The callback context registered with
- * ipa_context_ops::register_callbacks
- * \param[in] frame The frame number
- *
- * \sa libcamera::IPAInterface::queueFrameAction
- */
-
-/**
- * \struct ipa_context_ops
- * \brief IPA context operations as a set of function pointers
- *
- * To allow for isolation of IPA modules in separate processes, the functions
- * defined in the ipa_context_ops structure return only data related to the
- * libcamera side of the operations. In particular, error related to the
- * libcamera side of the IPC may be returned. Data returned by the IPA,
- * including status information, shall be provided through callbacks from the
- * IPA to libcamera.
- */
-
-/**
- * \var ipa_context_ops::destroy
- * \brief Destroy the IPA context created by the module's ipaCreate() function
- * \param[in] ctx The IPA context
- */
-
-/**
- * \var ipa_context_ops::get_interface
- * \brief Retrieve the IPAInterface implemented by the ipa_context (optional)
- * \param[in] ctx The IPA context
- *
- * IPA modules may implement this function to expose their internal
- * IPAInterface, if any. When implemented, libcamera may at its sole discretion
- * call it and then bypass the ipa_context_ops API by calling the IPAInterface
- * methods directly. IPA modules shall still implement and support the full
- * ipa_context_ops API.
- */
-
-/**
- * \var ipa_context_ops::init
- * \brief Initialise the IPA context
- * \param[in] ctx The IPA context
- *
- * \sa libcamera::IPAInterface::init()
- */
-
-/**
- * \var ipa_context_ops::register_callbacks
- * \brief Register callback operation from the IPA to the pipeline handler
- * \param[in] ctx The IPA context
- * \param[in] callback The IPA callback operations
- * \param[in] cb_ctx The callback context, passed to all callback operations
- */
-
-/**
- * \var ipa_context_ops::configure
- * \brief Configure the IPA stream and sensor settings
- * \param[in] ctx The IPA context
- *
- * \sa libcamera::IPAInterface::configure()
- */
-
-/**
- * \var ipa_context_ops::map_buffers
- * \brief Map buffers shared between the pipeline handler and the IPA
- * \param[in] ctx The IPA context
- * \param[in] buffers The buffers to map
- * \param[in] num_buffers The number of entries in the \a buffers array
- *
- * The dmabuf file descriptors provided in \a buffers are borrowed from the
- * caller and are only guaranteed to be valid during the map_buffers() call.
- * Should the callee need to store a copy of the file descriptors, it shall
- * duplicate them first with ::%dup().
- *
- * \sa libcamera::IPAInterface::mapBuffers()
- */
-
-/**
- * \var ipa_context_ops::unmap_buffers
- * \brief Unmap buffers shared by the pipeline to the IPA
- * \param[in] ctx The IPA context
- * \param[in] ids The IDs of the buffers to unmap
- * \param[in] num_buffers The number of entries in the \a ids array
- *
- * \sa libcamera::IPAInterface::unmapBuffers()
- */
-
-/**
- * \var ipa_context_ops::process_event
- * \brief Process an event from the pipeline handler
- * \param[in] ctx The IPA context
- *
- * \sa libcamera::IPAInterface::processEvent()
+ * be costly. For that reason, functions that cannot afford the high cost
+ * should be marked as [async] in the mojom file, and they will operate
+ * asynchronously. This implies that these functions don't return a status, and
+ * that all functions may copy their arguments. Synchronous functions are still
+ * allowed, but should be used with caution.
*/
/**
@@ -289,213 +67,38 @@
* \brief Entry point to the IPA modules
*
* This function is the entry point to the IPA modules. It is implemented by
- * every IPA module, and called by libcamera to create a new IPA context.
+ * every IPA module, and called by libcamera to create a new IPA interface
+ * instance.
*
- * \return A newly created IPA context
+ * \return A newly created IPA interface instance
*/
namespace libcamera {
/**
- * \struct IPAStream
- * \brief Stream configuration for the IPA interface
- *
- * The IPAStream structure stores stream configuration parameters needed by the
- * IPAInterface::configure() method. It mirrors the StreamConfiguration class
- * that is not suitable for this purpose due to not being serializable.
- */
-
-/**
- * \var IPAStream::pixelFormat
- * \brief The stream pixel format
- */
-
-/**
- * \var IPAStream::size
- * \brief The stream size in pixels
- */
-
-/**
- * \struct IPABuffer
- * \brief Buffer information for the IPA interface
- *
- * The IPABuffer structure associates buffer memory with a unique ID. It is
- * used to map buffers to the IPA with IPAInterface::mapBuffers(), after which
- * buffers will be identified by their ID in the IPA interface.
- */
-
-/**
- * \var IPABuffer::id
- * \brief The buffer unique ID
- *
- * Buffers mapped to the IPA are identified by numerical unique IDs. The IDs
- * are chosen by the pipeline handler to fulfil the following constraints:
- *
- * - IDs shall be positive integers different than zero
- * - IDs shall be unique among all mapped buffers
- *
- * When buffers are unmapped with IPAInterface::unmapBuffers() their IDs are
- * freed and may be reused for new buffer mappings.
- */
-
-/**
- * \var IPABuffer::planes
- * \brief The buffer planes description
- *
- * Stores the dmabuf handle and length for each plane of the buffer.
- */
-
-/**
- * \struct IPAOperationData
- * \brief Parameters for IPA operations
- *
- * The IPAOperationData structure carries parameters for the IPA operations
- * performed through the IPAInterface::processEvent() method and the
- * IPAInterface::queueFrameAction signal.
- */
-
-/**
- * \var IPAOperationData::operation
- * \brief IPA protocol operation
- *
- * The operation field describes which operation the receiver shall perform. It
- * defines, through the IPA protocol, how the other fields of the structure are
- * interpreted. The protocol freely assigns numerical values to operations.
- */
-
-/**
- * \var IPAOperationData::data
- * \brief Operation integer data
- *
- * The interpretation and position of different values in the array are defined
- * by the IPA protocol.
- */
-
-/**
- * \var IPAOperationData::controls
- * \brief Operation controls data
- *
- * The interpretation and position of different values in the array are defined
- * by the IPA protocol.
- */
-
-/**
* \class IPAInterface
* \brief C++ Interface for IPA implementation
*
- * This pure virtual class defines a C++ API corresponding to the ipa_context,
- * ipa_context_ops and ipa_callback_ops API. It is used by pipeline handlers to
- * interact with IPA modules, and may be used internally in IPA modules if
- * desired to benefit from the data and helper classes provided by libcamera.
- *
- * Functions defined in the ipa_context_ops structure are mapped to IPAInterface
- * methods, while functions defined in the ipa_callback_ops are mapped to
- * IPAInterface signals. As with the C API, the IPA C++ interface uses
- * serializable data types only. It reuses structures defined by the C API, or
- * defines corresponding classes using C++ containers when required.
+ * This pure virtual class defines a skeletal C++ API for IPA modules.
+ * Specializations of this class must be defined in a mojom file in
+ * include/libcamera/ipa/ (see the IPA Writers Guide for details
+ * on how to do so).
*
- * Due to process isolation all arguments to the IPAInterface methods and
- * signals may need to be transferred over IPC. The class thus uses serializable
- * data types only. The IPA C++ interface defines custom data structures that
- * mirror core libcamera structures when the latter are not suitable, such as
- * IPAStream to carry StreamConfiguration data.
+ * Due to process isolation all arguments to the IPAInterface member functions
+ * and signals may need to be transferred over IPC. The class thus uses
+ * serializable data types only. The IPA C++ interface defines custom data
+ * structures that mirror core libcamera structures when the latter are not
+ * suitable, such as IPAStream to carry StreamConfiguration data.
*
- * As for the functions defined in struct ipa_context_ops, the methods defined
- * by this class shall not return data from the IPA.
+ * Custom data structures may also be defined in the mojom file, in which case
+ * the (de)serialization will automatically be generated. If any other libcamera
+ * structures are to be used as parameters, then a (de)serializer for them must
+ * be implemented in IPADataSerializer.
*
- * The pipeline handler shall use the IPAManager to locate a compatible
+ * The pipeline handlers shall use the IPAManager to locate a compatible
* IPAInterface. The interface may then be used to interact with the IPA module.
- */
-
-/**
- * \fn IPAInterface::init()
- * \brief Initialise the IPAInterface
- */
-
-/**
- * \fn IPAInterface::configure()
- * \brief Configure the IPA stream and sensor settings
- * \param[in] streamConfig Configuration of all active streams
- * \param[in] entityControls Controls provided by the pipeline entities
- *
- * This method shall be called when the camera is started to inform the IPA of
- * the camera's streams and the sensor settings. The meaning of the numerical
- * keys in the \a streamConfig and \a entityControls maps is defined by the IPA
- * protocol.
- */
-
-/**
- * \fn IPAInterface::mapBuffers()
- * \brief Map buffers shared between the pipeline handler and the IPA
- * \param[in] buffers List of buffers to map
- *
- * This method informs the IPA module of memory buffers set up by the pipeline
- * handler that the IPA needs to access. It provides dmabuf file handles for
- * each buffer, and associates the buffers with unique numerical IDs.
- *
- * IPAs shall map the dmabuf file handles to their address space and keep a
- * cache of the mappings, indexed by the buffer numerical IDs. The IDs are used
- * in all other IPA interface methods to refer to buffers, including the
- * unmapBuffers() method.
- *
- * All buffers that the pipeline handler wishes to share with an IPA shall be
- * mapped with this method. Buffers may be mapped all at once with a single
- * call, or mapped and unmapped dynamically at runtime, depending on the IPA
- * protocol. Regardless of the protocol, all buffers mapped at a given time
- * shall have unique numerical IDs.
- *
- * The numerical IDs have no meaning defined by the IPA interface, and IPA
- * protocols shall not give them any specific meaning either. They should be
- * treated as opaque handles by IPAs, with the only exception that ID zero is
- * invalid.
- *
- * \sa unmapBuffers()
- *
- * \todo Provide a generic implementation of mapBuffers and unmapBuffers for
- * IPAs
- */
-
-/**
- * \fn IPAInterface::unmapBuffers()
- * \brief Unmap buffers shared by the pipeline to the IPA
- * \param[in] ids List of buffer IDs to unmap
- *
- * This method removes mappings set up with mapBuffers(). Buffers may be
- * unmapped all at once with a single call, or selectively at runtime, depending
- * on the IPA protocol. Numerical IDs of unmapped buffers may be reused when
- * mapping new buffers.
- *
- * \sa mapBuffers()
- */
-
-/**
- * \fn IPAInterface::processEvent()
- * \brief Process an event from the pipeline handler
- * \param[in] data IPA operation data
- *
- * This operation is used by pipeline handlers to inform the IPA module of
- * events that occurred during the on-going capture operation.
- *
- * The event notified by the pipeline handler with this method is handled by the
- * IPA, which interprets the operation parameters according to the separately
- * documented IPA protocol.
- */
-
-/**
- * \var IPAInterface::queueFrameAction
- * \brief Queue an action associated with a frame to the pipeline handler
- * \param[in] frame The frame number for the action
- * \param[in] data IPA operation data
- *
- * This signal is emitted when the IPA wishes to queue a FrameAction on the
- * pipeline. The pipeline is still responsible for the scheduling of the action
- * on its timeline.
*
- * This signal is emitted by the IPA to queue an action to be executed by the
- * pipeline handler on a frame. The type of action is identified by the
- * \a data.operation field, as defined by the IPA protocol, and the rest of the
- * \a data is interpreted accordingly. The pipeline handler shall queue the
- * action and execute it as appropriate.
+ * \todo Figure out how to generate IPAInterface documentation.
*/
} /* namespace libcamera */
diff --git a/src/libcamera/ipa_manager.cpp b/src/libcamera/ipa_manager.cpp
index bcaae356..cfc24d38 100644
--- a/src/libcamera/ipa_manager.cpp
+++ b/src/libcamera/ipa_manager.cpp
@@ -2,22 +2,23 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_manager.cpp - Image Processing Algorithm module manager
+ * Image Processing Algorithm module manager
*/
-#include "ipa_manager.h"
+#include "libcamera/internal/ipa_manager.h"
#include <algorithm>
#include <dirent.h>
#include <string.h>
#include <sys/types.h>
-#include "ipa_context_wrapper.h"
-#include "ipa_module.h"
-#include "ipa_proxy.h"
-#include "log.h"
-#include "pipeline_handler.h"
-#include "utils.h"
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/ipa_module.h"
+#include "libcamera/internal/ipa_proxy.h"
+#include "libcamera/internal/pipeline_handler.h"
/**
* \file ipa_manager.h
@@ -43,8 +44,8 @@ LOG_DEFINE_CATEGORY(IPAManager)
* The isolation mechanism ensures that no code from a closed-source module is
* ever run in the libcamera process.
*
- * To create an IPA context, pipeline handlers call the IPAManager::ipaCreate()
- * method. For a directly loaded module, the manager calls the module's
+ * To create an IPA context, pipeline handlers call the IPAManager::createIPA()
+ * function. For a directly loaded module, the manager calls the module's
* ipaCreate() function directly and wraps the returned context in an
* IPAContextWrapper that exposes an IPAInterface.
*
@@ -88,13 +89,25 @@ LOG_DEFINE_CATEGORY(IPAManager)
* returned to the pipeline handler, and all interactions with the IPA context
* go the same interface regardless of process isolation.
*
- * In all cases the data passed to the IPAInterface methods is serialized to
- * Plain Old Data, either for the purpose of passing it to the IPA context
- * plain C API, or to transmit the data to the isolated process through IPC.
+ * In all cases the data passed to the IPAInterface member functions is
+ * serialized to Plain Old Data, either for the purpose of passing it to the IPA
+ * context plain C API, or to transmit the data to the isolated process through
+ * IPC.
*/
+/**
+ * \brief Construct an IPAManager instance
+ *
+ * The IPAManager class is meant to only be instantiated once, by the
+ * CameraManager.
+ */
IPAManager::IPAManager()
{
+#if HAVE_IPA_PUBKEY
+ if (!pubKey_.isValid())
+ LOG(IPAManager, Warning) << "Public key not valid";
+#endif
+
unsigned int ipaCount = 0;
/* User-specified paths take precedence. */
@@ -114,15 +127,12 @@ IPAManager::IPAManager()
/*
* When libcamera is used before it is installed, load IPAs from the
- * same build directory as the libcamera library itself. This requires
- * identifying the path of the libcamera.so, and referencing a relative
- * path for the IPA from that point. We need to recurse one level of
- * sub-directories to match the build tree.
+ * same build directory as the libcamera library itself.
*/
std::string root = utils::libcameraBuildPath();
if (!root.empty()) {
std::string ipaBuildPath = root + "src/ipa";
- constexpr int maxDepth = 1;
+ constexpr int maxDepth = 2;
LOG(IPAManager, Info)
<< "libcamera is not installed. Adding '"
@@ -146,21 +156,6 @@ IPAManager::~IPAManager()
}
/**
- * \brief Retrieve the IPA manager instance
- *
- * The IPAManager is a singleton and can't be constructed manually. This
- * function shall instead be used to retrieve the single global instance of the
- * manager.
- *
- * \return The IPA manager instance
- */
-IPAManager *IPAManager::instance()
-{
- static IPAManager ipaManager;
- return &ipaManager;
-}
-
-/**
* \brief Identify shared library objects within a directory
* \param[in] libDir The directory to search for shared objects
* \param[in] maxDepth The maximum depth of sub-directories to parse
@@ -212,7 +207,7 @@ void IPAManager::parseDir(const char *libDir, unsigned int maxDepth,
* \param[in] libDir The directory to search for IPA modules
* \param[in] maxDepth The maximum depth of sub-directories to search
*
- * This method tries to create an IPAModule instance for every shared object
+ * This function tries to create an IPAModule instance for every shared object
* found in \a libDir, and skips invalid IPA modules.
*
* Sub-directories are searched up to a depth of \a maxDepth. A \a maxDepth
@@ -247,64 +242,75 @@ unsigned int IPAManager::addDir(const char *libDir, unsigned int maxDepth)
}
/**
- * \brief Create an IPA interface that matches a given pipeline handler
- * \param[in] pipe The pipeline handler that wants a matching IPA interface
+ * \brief Retrieve an IPA module that matches a given pipeline handler
+ * \param[in] pipe The pipeline handler
* \param[in] minVersion Minimum acceptable version of IPA module
* \param[in] maxVersion Maximum acceptable version of IPA module
- *
- * \return A newly created IPA interface, or nullptr if no matching
- * IPA module is found or if the IPA interface fails to initialize
*/
-std::unique_ptr<IPAInterface> IPAManager::createIPA(PipelineHandler *pipe,
- uint32_t maxVersion,
- uint32_t minVersion)
+IPAModule *IPAManager::module(PipelineHandler *pipe, uint32_t minVersion,
+ uint32_t maxVersion)
{
- IPAModule *m = nullptr;
-
for (IPAModule *module : modules_) {
- if (module->match(pipe, minVersion, maxVersion)) {
- m = module;
- break;
- }
+ if (module->match(pipe, minVersion, maxVersion))
+ return module;
}
- if (!m)
- return nullptr;
+ return nullptr;
+}
- if (!m->isOpenSource()) {
- IPAProxyFactory *pf = nullptr;
- std::vector<IPAProxyFactory *> &factories = IPAProxyFactory::factories();
+/**
+ * \fn IPAManager::createIPA()
+ * \brief Create an IPA proxy that matches a given pipeline handler
+ * \param[in] pipe The pipeline handler that wants a matching IPA proxy
+ * \param[in] minVersion Minimum acceptable version of IPA module
+ * \param[in] maxVersion Maximum acceptable version of IPA module
+ *
+ * \return A newly created IPA proxy, or nullptr if no matching IPA module is
+ * found or if the IPA proxy fails to initialize
+ */
- for (IPAProxyFactory *factory : factories) {
- /* TODO: Better matching */
- if (!strcmp(factory->name().c_str(), "IPAProxyLinux")) {
- pf = factory;
- break;
- }
- }
+#if HAVE_IPA_PUBKEY
+/**
+ * \fn IPAManager::pubKey()
+ * \brief Retrieve the IPA module signing public key
+ *
+ * IPA module signature verification is normally handled internally by the
+ * IPAManager class. This function is meant to be used by utilities that need to
+ * verify signatures externally.
+ *
+ * \return The IPA module signing public key
+ */
+#endif
- if (!pf) {
- LOG(IPAManager, Error) << "Failed to get proxy factory";
- return nullptr;
- }
+bool IPAManager::isSignatureValid([[maybe_unused]] IPAModule *ipa) const
+{
+#if HAVE_IPA_PUBKEY
+ char *force = utils::secure_getenv("LIBCAMERA_IPA_FORCE_ISOLATION");
+ if (force && force[0] != '\0') {
+ LOG(IPAManager, Debug)
+ << "Isolation of IPA module " << ipa->path()
+ << " forced through environment variable";
+ return false;
+ }
- std::unique_ptr<IPAProxy> proxy = pf->create(m);
- if (!proxy->isValid()) {
- LOG(IPAManager, Error) << "Failed to load proxy";
- return nullptr;
- }
+ File file{ ipa->path() };
+ if (!file.open(File::OpenModeFlag::ReadOnly))
+ return false;
- return proxy;
- }
+ Span<uint8_t> data = file.map();
+ if (data.empty())
+ return false;
- if (!m->load())
- return nullptr;
+ bool valid = pubKey_.verify(data, ipa->signature());
- struct ipa_context *ctx = m->createContext();
- if (!ctx)
- return nullptr;
+ LOG(IPAManager, Debug)
+ << "IPA module " << ipa->path() << " signature is "
+ << (valid ? "valid" : "not valid");
- return std::make_unique<IPAContextWrapper>(ctx);
+ return valid;
+#else
+ return false;
+#endif
}
} /* namespace libcamera */
diff --git a/src/libcamera/ipa_module.cpp b/src/libcamera/ipa_module.cpp
index a01d0757..9ca74be6 100644
--- a/src/libcamera/ipa_module.cpp
+++ b/src/libcamera/ipa_module.cpp
@@ -2,28 +2,29 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_module.cpp - Image Processing Algorithm module
+ * Image Processing Algorithm module
*/
-#include "ipa_module.h"
+#include "libcamera/internal/ipa_module.h"
#include <algorithm>
-#include <array>
+#include <ctype.h>
#include <dlfcn.h>
#include <elf.h>
#include <errno.h>
#include <fcntl.h>
#include <link.h>
#include <string.h>
-#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
-#include <tuple>
#include <unistd.h>
-#include "log.h"
-#include "pipeline_handler.h"
-#include "utils.h"
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/pipeline_handler.h"
/**
* \file ipa_module.h
@@ -42,27 +43,27 @@ LOG_DEFINE_CATEGORY(IPAModule)
namespace {
template<typename T>
-typename std::remove_extent_t<T> *elfPointer(void *map, off_t offset,
- size_t fileSize, size_t objSize)
+typename std::remove_extent_t<T> *elfPointer(Span<const uint8_t> elf,
+ off_t offset, size_t objSize)
{
size_t size = offset + objSize;
- if (size > fileSize || size < objSize)
+ if (size > elf.size() || size < objSize)
return nullptr;
- return reinterpret_cast<typename std::remove_extent_t<T> *>
- (static_cast<char *>(map) + offset);
+ return reinterpret_cast<typename std::remove_extent_t<T> *>(
+ reinterpret_cast<const char *>(elf.data()) + offset);
}
template<typename T>
-typename std::remove_extent_t<T> *elfPointer(void *map, off_t offset,
- size_t fileSize)
+typename std::remove_extent_t<T> *elfPointer(Span<const uint8_t> elf,
+ off_t offset)
{
- return elfPointer<T>(map, offset, fileSize, sizeof(T));
+ return elfPointer<T>(elf, offset, sizeof(T));
}
-int elfVerifyIdent(void *map, size_t soSize)
+int elfVerifyIdent(Span<const uint8_t> elf)
{
- char *e_ident = elfPointer<char[EI_NIDENT]>(map, 0, soSize);
+ const char *e_ident = elfPointer<const char[EI_NIDENT]>(elf, 0);
if (!e_ident)
return -ENOEXEC;
@@ -86,40 +87,47 @@ int elfVerifyIdent(void *map, size_t soSize)
return 0;
}
+const ElfW(Shdr) *elfSection(Span<const uint8_t> elf, const ElfW(Ehdr) *eHdr,
+ ElfW(Half) idx)
+{
+ if (idx >= eHdr->e_shnum)
+ return nullptr;
+
+ off_t offset = eHdr->e_shoff + idx *
+ static_cast<uint32_t>(eHdr->e_shentsize);
+ return elfPointer<const ElfW(Shdr)>(elf, offset);
+}
+
/**
* \brief Retrieve address and size of a symbol from an mmap'ed ELF file
- * \param[in] map Address of mmap'ed ELF file
- * \param[in] soSize Size of mmap'ed ELF file (in bytes)
+ * \param[in] elf Address and size of mmap'ed ELF file
* \param[in] symbol Symbol name
*
- * \return zero or error code, address or nullptr, size of symbol or zero,
- * respectively
+ * \return The memory region storing the symbol on success, or an empty span
+ * otherwise
*/
-std::tuple<void *, size_t>
-elfLoadSymbol(void *map, size_t soSize, const char *symbol)
+Span<const uint8_t> elfLoadSymbol(Span<const uint8_t> elf, const char *symbol)
{
- ElfW(Ehdr) *eHdr = elfPointer<ElfW(Ehdr)>(map, 0, soSize);
+ const ElfW(Ehdr) *eHdr = elfPointer<const ElfW(Ehdr)>(elf, 0);
if (!eHdr)
- return std::make_tuple(nullptr, 0);
+ return {};
- off_t offset = eHdr->e_shoff + eHdr->e_shentsize * eHdr->e_shstrndx;
- ElfW(Shdr) *sHdr = elfPointer<ElfW(Shdr)>(map, offset, soSize);
+ const ElfW(Shdr) *sHdr = elfSection(elf, eHdr, eHdr->e_shstrndx);
if (!sHdr)
- return std::make_tuple(nullptr, 0);
+ return {};
off_t shnameoff = sHdr->sh_offset;
/* Locate .dynsym section header. */
- ElfW(Shdr) *dynsym = nullptr;
+ const ElfW(Shdr) *dynsym = nullptr;
for (unsigned int i = 0; i < eHdr->e_shnum; i++) {
- offset = eHdr->e_shoff + eHdr->e_shentsize * i;
- sHdr = elfPointer<ElfW(Shdr)>(map, offset, soSize);
+ sHdr = elfSection(elf, eHdr, i);
if (!sHdr)
- return std::make_tuple(nullptr, 0);
+ return {};
- offset = shnameoff + sHdr->sh_name;
- char *name = elfPointer<char[8]>(map, offset, soSize);
+ off_t offset = shnameoff + sHdr->sh_name;
+ const char *name = elfPointer<const char[8]>(elf, offset);
if (!name)
- return std::make_tuple(nullptr, 0);
+ return {};
if (sHdr->sh_type == SHT_DYNSYM && !strcmp(name, ".dynsym")) {
dynsym = sHdr;
@@ -129,29 +137,28 @@ elfLoadSymbol(void *map, size_t soSize, const char *symbol)
if (dynsym == nullptr) {
LOG(IPAModule, Error) << "ELF has no .dynsym section";
- return std::make_tuple(nullptr, 0);
+ return {};
}
- offset = eHdr->e_shoff + eHdr->e_shentsize * dynsym->sh_link;
- sHdr = elfPointer<ElfW(Shdr)>(map, offset, soSize);
+ sHdr = elfSection(elf, eHdr, dynsym->sh_link);
if (!sHdr)
- return std::make_tuple(nullptr, 0);
+ return {};
off_t dynsym_nameoff = sHdr->sh_offset;
/* Locate symbol in the .dynsym section. */
- ElfW(Sym) *targetSymbol = nullptr;
+ const ElfW(Sym) *targetSymbol = nullptr;
unsigned int dynsym_num = dynsym->sh_size / dynsym->sh_entsize;
for (unsigned int i = 0; i < dynsym_num; i++) {
- offset = dynsym->sh_offset + dynsym->sh_entsize * i;
- ElfW(Sym) *sym = elfPointer<ElfW(Sym)>(map, offset, soSize);
+ off_t offset = dynsym->sh_offset + dynsym->sh_entsize * i;
+ const ElfW(Sym) *sym = elfPointer<const ElfW(Sym)>(elf, offset);
if (!sym)
- return std::make_tuple(nullptr, 0);
+ return {};
offset = dynsym_nameoff + sym->st_name;
- char *name = elfPointer<char>(map, offset, soSize,
- strlen(symbol) + 1);
+ const char *name = elfPointer<const char>(elf, offset,
+ strlen(symbol) + 1);
if (!name)
- return std::make_tuple(nullptr, 0);
+ return {};
if (!strcmp(name, symbol) &&
sym->st_info & STB_GLOBAL) {
@@ -162,22 +169,20 @@ elfLoadSymbol(void *map, size_t soSize, const char *symbol)
if (targetSymbol == nullptr) {
LOG(IPAModule, Error) << "Symbol " << symbol << " not found";
- return std::make_tuple(nullptr, 0);
+ return {};
}
/* Locate and return data of symbol. */
- if (targetSymbol->st_shndx >= eHdr->e_shnum)
- return std::make_tuple(nullptr, 0);
- offset = eHdr->e_shoff + targetSymbol->st_shndx * eHdr->e_shentsize;
- sHdr = elfPointer<ElfW(Shdr)>(map, offset, soSize);
+ sHdr = elfSection(elf, eHdr, targetSymbol->st_shndx);
if (!sHdr)
- return std::make_tuple(nullptr, 0);
- offset = sHdr->sh_offset + (targetSymbol->st_value - sHdr->sh_addr);
- char *data = elfPointer<char>(map, offset, soSize, targetSymbol->st_size);
+ return {};
+ off_t offset = sHdr->sh_offset + (targetSymbol->st_value - sHdr->sh_addr);
+ const uint8_t *data = elfPointer<const uint8_t>(elf, offset,
+ targetSymbol->st_size);
if (!data)
- return std::make_tuple(nullptr, 0);
+ return {};
- return std::make_tuple(data, targetSymbol->st_size);
+ return { data, targetSymbol->st_size };
}
} /* namespace */
@@ -218,26 +223,10 @@ elfLoadSymbol(void *map, size_t soSize, const char *symbol)
* \var IPAModuleInfo::name
* \brief The name of the IPA module
*
- * \var IPAModuleInfo::license
- * \brief License of the IPA module
- *
- * This license is used to determine whether to force isolation of the IPA in
- * a separate process. If the license is "Proprietary", then the IPA will
- * be isolated. If the license is open-source, then the IPA will be allowed to
- * run without isolation if the user enables it. The license should be an
- * SPDX license string. The following licenses are currently available to
- * allow the IPA to run unisolated:
- *
- * - GPL-2.0-only
- * - GPL-2.0-or-later
- * - GPL-3.0-only
- * - GPL-3.0-or-later
- * - LGPL-2.1-only
- * - LGPL-2.1-or-later
- * - LGPL-3.0-only
- * - LGPL-3.0-or-later
- *
- * Any other license will cause the IPA to be run isolated.
+ * The name may be used to build file system paths to IPA-specific resources.
+ * It shall only contain printable characters, and may not contain '*', '?' or
+ * '\'. For IPA modules included in libcamera, it shall match the directory of
+ * the IPA module in the source tree.
*
* \todo Allow user to choose to isolate open source IPAs
*/
@@ -262,7 +251,7 @@ elfLoadSymbol(void *map, size_t soSize, const char *symbol)
* The IPA module shared object file must be of the same endianness and
* bitness as libcamera.
*
- * The caller shall call the isValid() method after constructing an
+ * The caller shall call the isValid() function after constructing an
* IPAModule instance to verify the validity of the IPAModule.
*/
IPAModule::IPAModule(const std::string &libPath)
@@ -283,55 +272,67 @@ IPAModule::~IPAModule()
int IPAModule::loadIPAModuleInfo()
{
- int fd = open(libPath_.c_str(), O_RDONLY);
- if (fd < 0) {
- int ret = -errno;
+ File file{ libPath_ };
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
LOG(IPAModule, Error) << "Failed to open IPA library: "
- << strerror(-ret);
- return ret;
+ << strerror(-file.error());
+ return file.error();
}
- void *data = nullptr;
- size_t dataSize;
- void *map;
- size_t soSize;
- struct stat st;
- int ret = fstat(fd, &st);
- if (ret < 0)
- goto close;
- soSize = st.st_size;
- map = mmap(NULL, soSize, PROT_READ, MAP_PRIVATE, fd, 0);
- if (map == MAP_FAILED) {
- ret = -errno;
- goto close;
+ Span<const uint8_t> data = file.map();
+ int ret = elfVerifyIdent(data);
+ if (ret) {
+ LOG(IPAModule, Error) << "IPA module is not an ELF file";
+ return ret;
}
- ret = elfVerifyIdent(map, soSize);
- if (ret)
- goto unmap;
-
- std::tie(data, dataSize) = elfLoadSymbol(map, soSize, "ipaModuleInfo");
-
- if (data && dataSize == sizeof(info_))
- memcpy(&info_, data, dataSize);
+ Span<const uint8_t> info = elfLoadSymbol(data, "ipaModuleInfo");
+ if (info.size() < sizeof(info_)) {
+ LOG(IPAModule, Error) << "IPA module has no valid info";
+ return -EINVAL;
+ }
- if (!data)
- goto unmap;
+ memcpy(&info_, info.data(), sizeof(info_));
if (info_.moduleAPIVersion != IPA_MODULE_API_VERSION) {
LOG(IPAModule, Error) << "IPA module API version mismatch";
- ret = -EINVAL;
+ return -EINVAL;
}
-unmap:
- munmap(map, soSize);
-close:
- if (ret || !data)
+ /*
+ * Validate the IPA module name.
+ *
+ * \todo Consider module naming restrictions to avoid escaping from a
+ * base directory. Forbidding ".." may be enough, but this may be best
+ * implemented in a different layer.
+ */
+ std::string ipaName = info_.name;
+ auto iter = std::find_if_not(ipaName.begin(), ipaName.end(),
+ [](unsigned char c) -> bool {
+ return isprint(c) && c != '?' &&
+ c != '*' && c != '\\';
+ });
+ if (iter != ipaName.end()) {
LOG(IPAModule, Error)
- << "Error loading IPA module info for " << libPath_;
+ << "Invalid IPA module name '" << ipaName << "'";
+ return -EINVAL;
+ }
- close(fd);
- return ret;
+ /* Load the signature. Failures are not fatal. */
+ File sign{ libPath_ + ".sign" };
+ if (!sign.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(IPAModule, Debug)
+ << "IPA module " << libPath_ << " is not signed";
+ return 0;
+ }
+
+ data = sign.map(0, -1, File::MapFlag::Private);
+ signature_.resize(data.size());
+ memcpy(signature_.data(), data.data(), data.size());
+
+ LOG(IPAModule, Debug) << "IPA module " << libPath_ << " is signed";
+
+ return 0;
}
/**
@@ -363,6 +364,21 @@ const struct IPAModuleInfo &IPAModule::info() const
}
/**
+ * \brief Retrieve the IPA module signature
+ *
+ * The IPA module signature is stored alongside the IPA module in a file with a
+ * '.sign' suffix, and is loaded when the IPAModule instance is created. This
+ * function returns the signature without verifying it. If the signature is
+ * missing, the returned vector will be empty.
+ *
+ * \return The IPA module signature
+ */
+const std::vector<uint8_t> IPAModule::signature() const
+{
+ return signature_;
+}
+
+/**
* \brief Retrieve the IPA module path
*
* The IPA module path is the file name and path of the IPA module shared
@@ -378,13 +394,13 @@ const std::string &IPAModule::path() const
/**
* \brief Load the IPA implementation factory from the shared object
*
- * The IPA module shared object implements an ipa_context object to be used
- * by pipeline handlers. This method loads the factory function from the
- * shared object. Later, createContext() can be called to instantiate the
- * ipa_context.
+ * The IPA module shared object implements an IPAInterface object to be used
+ * by pipeline handlers. This function loads the factory function from the
+ * shared object. Later, createInterface() can be called to instantiate the
+ * IPAInterface.
*
- * This method only needs to be called successfully once, after which
- * createContext() can be called as many times as ipa_context instances are
+ * This function only needs to be called successfully once, after which
+ * createInterface() can be called as many times as IPAInterface instances are
* needed.
*
* Calling this function on an invalid module (as returned by isValid()) is
@@ -426,20 +442,18 @@ bool IPAModule::load()
}
/**
- * \brief Instantiate an IPA context
+ * \brief Instantiate an IPA interface
*
- * After loading the IPA module with load(), this method creates an instance of
- * the IPA module context. Ownership of the context is passed to the caller, and
- * the context shall be destroyed by calling the \ref ipa_context_ops::destroy
- * "ipa_context::ops::destroy()" function.
+ * After loading the IPA module with load(), this function creates an instance
+ * of the IPA module interface.
*
* Calling this function on a module that has not yet been loaded, or an
* invalid module (as returned by load() and isValid(), respectively) is
* an error.
*
- * \return The IPA context on success, or nullptr on error
+ * \return The IPA interface on success, or nullptr on error
*/
-struct ipa_context *IPAModule::createContext()
+IPAInterface *IPAModule::createInterface()
{
if (!valid_ || !loaded_)
return nullptr;
@@ -448,12 +462,12 @@ struct ipa_context *IPAModule::createContext()
}
/**
- * \brief Verify if the IPA module maches a given pipeline handler
+ * \brief Verify if the IPA module matches a given pipeline handler
* \param[in] pipe Pipeline handler to match with
* \param[in] minVersion Minimum acceptable version of IPA module
* \param[in] maxVersion Maximum acceptable version of IPA module
*
- * This method checks if this IPA module matches the \a pipe pipeline handler,
+ * This function checks if this IPA module matches the \a pipe pipeline handler,
* and the input version range.
*
* \return True if the pipeline handler matches the IPA module, or false otherwise
@@ -466,29 +480,9 @@ bool IPAModule::match(PipelineHandler *pipe,
!strcmp(info_.pipelineName, pipe->name());
}
-/**
- * \brief Verify if the IPA module is open source
- *
- * \sa IPAModuleInfo::license
- */
-bool IPAModule::isOpenSource() const
+std::string IPAModule::logPrefix() const
{
- static const char *osLicenses[] = {
- "GPL-2.0-only",
- "GPL-2.0-or-later",
- "GPL-3.0-only",
- "GPL-3.0-or-later",
- "LGPL-2.1-only",
- "LGPL-2.1-or-later",
- "LGPL-3.0-only",
- "LGPL-3.0-or-later",
- };
-
- for (unsigned int i = 0; i < ARRAY_SIZE(osLicenses); i++)
- if (!strcmp(osLicenses[i], info_.license))
- return true;
-
- return false;
+ return utils::basename(libPath_.c_str());
}
} /* namespace libcamera */
diff --git a/src/libcamera/ipa_proxy.cpp b/src/libcamera/ipa_proxy.cpp
index 5fd88a4b..85004737 100644
--- a/src/libcamera/ipa_proxy.cpp
+++ b/src/libcamera/ipa_proxy.cpp
@@ -2,18 +2,19 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_proxy.cpp - Image Processing Algorithm proxy
+ * Image Processing Algorithm proxy
*/
-#include "ipa_proxy.h"
+#include "libcamera/internal/ipa_proxy.h"
-#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
#include <unistd.h>
-#include "log.h"
-#include "utils.h"
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
-#include <iostream>
+#include "libcamera/internal/ipa_module.h"
/**
* \file ipa_proxy.h
@@ -29,19 +30,27 @@ LOG_DEFINE_CATEGORY(IPAProxy)
* \brief IPA Proxy
*
* Isolate IPA into separate process.
- *
- * Every subclass of proxy shall be registered with libcamera using
- * the REGISTER_IPA_PROXY() macro.
+ */
+
+/**
+ * \enum IPAProxy::ProxyState
+ * \brief Identifies the available operational states of the proxy
+ *
+ * \var IPAProxy::ProxyStopped
+ * \brief The proxy is not active and only synchronous operations are permitted
+ * \var IPAProxy::ProxyStopping
+ * \brief No new tasks can be submitted to the proxy, however existing events
+ * can be completed
+ * \var IPAProxy::ProxyRunning
+ * \brief The Proxy is active and asynchronous tasks may be queued
*/
/**
* \brief Construct an IPAProxy instance
- *
- * IPAProxy instances shall be constructed through the IPAProxyFactory::create()
- * method implemented by the respective factories.
+ * \param[in] ipam The IPA module
*/
-IPAProxy::IPAProxy()
- : valid_(false)
+IPAProxy::IPAProxy(IPAModule *ipam)
+ : valid_(false), state_(ProxyStopped), ipam_(ipam)
{
}
@@ -60,12 +69,108 @@ IPAProxy::~IPAProxy()
*/
/**
+ * \brief Retrieve the absolute path to an IPA configuration file
+ * \param[in] name The configuration file name
+ * \param[in] fallbackName The name of a fallback configuration file
+ *
+ * This function locates the configuration file for an IPA and returns its
+ * absolute path. It searches the following directories, in order:
+ *
+ * - All directories specified in the colon-separated LIBCAMERA_IPA_CONFIG_PATH
+ * environment variable ; or
+ * - If libcamera is not installed, the src/ipa/ directory within the source
+ * tree ; otherwise
+ * - The system sysconf (etc/libcamera/ipa) and the data (share/libcamera/ipa/)
+ * directories.
+ *
+ * The system directories are not searched if libcamera is not installed.
+ *
+ * Within each of those directories, the function looks for a subdirectory
+ * named after the IPA module name, as reported in IPAModuleInfo::name, and for
+ * a file named \a name within that directory. The \a name is IPA-specific.
+ *
+ * If the file named \a name is not found and \a fallbackName is non-empty then
+ * the whole search is repeated for \a fallbackName.
+ *
+ * \return The full path to the IPA configuration file, or an empty string if
+ * no configuration file can be found
+ */
+std::string IPAProxy::configurationFile(const std::string &name,
+ const std::string &fallbackName) const
+{
+ struct stat statbuf;
+ int ret;
+
+ /*
+ * The IPA module name can be used as-is to build directory names as it
+ * has been validated when loading the module.
+ */
+ std::string ipaName = ipam_->info().name;
+
+ /* Check the environment variable first. */
+ const char *confPaths = utils::secure_getenv("LIBCAMERA_IPA_CONFIG_PATH");
+ if (confPaths) {
+ for (const auto &dir : utils::split(confPaths, ":")) {
+ if (dir.empty())
+ continue;
+
+ std::string confPath = dir + "/" + ipaName + "/" + name;
+ ret = stat(confPath.c_str(), &statbuf);
+ if (ret == 0 && (statbuf.st_mode & S_IFMT) == S_IFREG)
+ return confPath;
+ }
+ }
+
+ std::string root = utils::libcameraSourcePath();
+ if (!root.empty()) {
+ /*
+ * When libcamera is used before it is installed, load
+ * configuration files from the source directory. The
+ * configuration files are then located in the 'data'
+ * subdirectory of the corresponding IPA module.
+ */
+ std::string ipaConfDir = root + "src/ipa/" + ipaName + "/data";
+
+ LOG(IPAProxy, Info)
+ << "libcamera is not installed. Loading IPA configuration from '"
+ << ipaConfDir << "'";
+
+ std::string confPath = ipaConfDir + "/" + name;
+ ret = stat(confPath.c_str(), &statbuf);
+ if (ret == 0 && (statbuf.st_mode & S_IFMT) == S_IFREG)
+ return confPath;
+
+ } else {
+ /* Else look in the system locations. */
+ for (const auto &dir : utils::split(IPA_CONFIG_DIR, ":")) {
+ std::string confPath = dir + "/" + ipaName + "/" + name;
+ ret = stat(confPath.c_str(), &statbuf);
+ if (ret == 0 && (statbuf.st_mode & S_IFMT) == S_IFREG)
+ return confPath;
+ }
+ }
+
+ if (fallbackName.empty()) {
+ LOG(IPAProxy, Error)
+ << "Configuration file '" << name
+ << "' not found for IPA module '" << ipaName << "'";
+ return std::string();
+ }
+
+ LOG(IPAProxy, Warning)
+ << "Configuration file '" << name
+ << "' not found for IPA module '" << ipaName
+ << "', falling back to '" << fallbackName << "'";
+ return configurationFile(fallbackName);
+}
+
+/**
* \brief Find a valid full path for a proxy worker for a given executable name
* \param[in] file File name of proxy worker executable
*
* A proxy worker's executable could be found in either the global installation
* directory, or in the paths specified by the environment variable
- * LIBCAMERA_IPA_PROXY_PATH. This method checks the global install directory
+ * LIBCAMERA_IPA_PROXY_PATH. This function checks the global install directory
* first, then LIBCAMERA_IPA_PROXY_PATH in order, and returns the full path to
* the proxy worker executable that is specified by file. The proxy worker
* executable shall have exec permission.
@@ -102,7 +207,7 @@ std::string IPAProxy::resolvePath(const std::string &file) const
std::string ipaProxyDir = root + "src/libcamera/proxy/worker";
LOG(IPAProxy, Info)
- << "libcamera is not installed. Loading proxy workers from'"
+ << "libcamera is not installed. Loading proxy workers from '"
<< ipaProxyDir << "'";
std::string proxyPath = ipaProxyDir + proxyFile;
@@ -134,88 +239,14 @@ std::string IPAProxy::resolvePath(const std::string &file) const
*/
/**
- * \class IPAProxyFactory
- * \brief Registration of IPAProxy classes and creation of instances
- *
- * To facilitate discovery and instantiation of IPAProxy classes, the
- * IPAProxyFactory class maintains a registry of IPAProxy classes. Each
- * IPAProxy subclass shall register itself using the REGISTER_IPA_PROXY()
- * macro, which will create a corresponding instance of a IPAProxyFactory
- * subclass and register it with the static list of factories.
- */
-
-/**
- * \brief Construct a IPAProxy factory
- * \param[in] name Name of the IPAProxy class
- *
- * Creating an instance of the factory registers is with the global list of
- * factories, accessible through the factories() function.
- *
- * The factory \a name is used for debugging and IPAProxy matching purposes
- * and shall be unique.
- */
-IPAProxyFactory::IPAProxyFactory(const char *name)
- : name_(name)
-{
- registerType(this);
-}
-
-/**
- * \fn IPAProxyFactory::create()
- * \brief Create an instance of the IPAProxy corresponding to the factory
- * \param[in] ipam The IPA module
- *
- * This virtual function is implemented by the REGISTER_IPA_PROXY() macro.
- * It creates a IPAProxy instance that isolates an IPA interface designated
- * by the IPA module \a ipam.
- *
- * \return A pointer to a newly constructed instance of the IPAProxy subclass
- * corresponding to the factory
- */
-
-/**
- * \fn IPAProxyFactory::name()
- * \brief Retrieve the factory name
- * \return The factory name
- */
-
-/**
- * \brief Add a IPAProxy class to the registry
- * \param[in] factory Factory to use to construct the IPAProxy
+ * \var IPAProxy::state_
+ * \brief Current state of the IPAProxy
*
- * The caller is responsible to guarantee the uniqueness of the IPAProxy name.
- */
-void IPAProxyFactory::registerType(IPAProxyFactory *factory)
-{
- std::vector<IPAProxyFactory *> &factories = IPAProxyFactory::factories();
-
- factories.push_back(factory);
-
- LOG(IPAProxy, Debug)
- << "Registered proxy \"" << factory->name() << "\"";
-}
-
-/**
- * \brief Retrieve the list of all IPAProxy factories
- *
- * The static factories map is defined inside the function to ensure it gets
- * initialized on first use, without any dependency on link order.
- *
- * \return The list of pipeline handler factories
- */
-std::vector<IPAProxyFactory *> &IPAProxyFactory::factories()
-{
- static std::vector<IPAProxyFactory *> factories;
- return factories;
-}
-
-/**
- * \def REGISTER_IPA_PROXY
- * \brief Register a IPAProxy with the IPAProxy factory
- * \param[in] proxy Class name of IPAProxy derived class to register
+ * The IPAProxy can be Running, Stopped, or Stopping.
*
- * Register a proxy subclass with the factory and make it available to
- * isolate IPA modules.
+ * This state provides a means to ensure that asynchronous functions are only
+ * called while the proxy is running, and prevent new tasks being submitted
+ * while still enabling events to complete when the IPAProxy is stopping.
*/
} /* namespace libcamera */
diff --git a/src/libcamera/ipa_pub_key.cpp.in b/src/libcamera/ipa_pub_key.cpp.in
new file mode 100644
index 00000000..5d8c92c2
--- /dev/null
+++ b/src/libcamera/ipa_pub_key.cpp.in
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * IPA module signing public key
+ *
+ * This file is auto-generated. Do not edit.
+ */
+
+#include "libcamera/internal/ipa_manager.h"
+
+namespace libcamera {
+
+#if HAVE_IPA_PUBKEY
+const uint8_t IPAManager::publicKeyData_[] = {
+ ${ipa_key}
+};
+
+const PubKey IPAManager::pubKey_{ { IPAManager::publicKeyData_ } };
+#endif
+
+} /* namespace libcamera */
diff --git a/src/libcamera/ipc_pipe.cpp b/src/libcamera/ipc_pipe.cpp
new file mode 100644
index 00000000..548299d0
--- /dev/null
+++ b/src/libcamera/ipc_pipe.cpp
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Image Processing Algorithm IPC module for IPA proxies
+ */
+
+#include "libcamera/internal/ipc_pipe.h"
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file ipc_pipe.h
+ * \brief IPC mechanism for IPA isolation
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPCPipe)
+
+/**
+ * \struct IPCMessage::Header
+ * \brief Container for an IPCMessage header
+ *
+ * Holds a cmd code for the IPC message, and a cookie.
+ */
+
+/**
+ * \var IPCMessage::Header::cmd
+ * \brief Type of IPCMessage
+ *
+ * Typically used to carry a command code for an RPC.
+ */
+
+/**
+ * \var IPCMessage::Header::cookie
+ * \brief Cookie to identify the message and a corresponding reply.
+ *
+ * Populated and used by IPCPipe implementations for matching calls with
+ * replies.
+ */
+
+/**
+ * \class IPCMessage
+ * \brief IPC message to be passed through IPC message pipe
+ */
+
+/**
+ * \brief Construct an empty IPCMessage instance
+ */
+IPCMessage::IPCMessage()
+ : header_(Header{ 0, 0 })
+{
+}
+
+/**
+ * \brief Construct an IPCMessage instance with a given command code
+ * \param[in] cmd The command code
+ */
+IPCMessage::IPCMessage(uint32_t cmd)
+ : header_(Header{ cmd, 0 })
+{
+}
+
+/**
+ * \brief Construct an IPCMessage instance with a given header
+ * \param[in] header The header that the constructed IPCMessage will contain
+ */
+IPCMessage::IPCMessage(const Header &header)
+ : header_(header)
+{
+}
+
+/**
+ * \brief Construct an IPCMessage instance from an IPC payload
+ * \param[in] payload The IPCUnixSocket payload to construct from
+ *
+ * This essentially converts an IPCUnixSocket payload into an IPCMessage.
+ * The header is extracted from the payload into the IPCMessage's header field.
+ *
+ * If the IPCUnixSocket payload had any valid file descriptors, then they will
+ * all be invalidated.
+ */
+IPCMessage::IPCMessage(IPCUnixSocket::Payload &payload)
+{
+ memcpy(&header_, payload.data.data(), sizeof(header_));
+ data_ = std::vector<uint8_t>(payload.data.begin() + sizeof(header_),
+ payload.data.end());
+ for (int32_t &fd : payload.fds)
+ fds_.push_back(SharedFD(std::move(fd)));
+}
+
+/**
+ * \brief Create an IPCUnixSocket payload from the IPCMessage
+ *
+ * This essentially converts the IPCMessage into an IPCUnixSocket payload.
+ *
+ * \todo Resolve the layering violation (add other converters later?)
+ */
+IPCUnixSocket::Payload IPCMessage::payload() const
+{
+ IPCUnixSocket::Payload payload;
+
+ payload.data.resize(sizeof(Header) + data_.size());
+ payload.fds.reserve(fds_.size());
+
+ memcpy(payload.data.data(), &header_, sizeof(Header));
+
+ if (data_.size() > 0) {
+ /* \todo Make this work without copy */
+ memcpy(payload.data.data() + sizeof(Header),
+ data_.data(), data_.size());
+ }
+
+ for (const SharedFD &fd : fds_)
+ payload.fds.push_back(fd.get());
+
+ return payload;
+}
+
+/**
+ * \fn IPCMessage::header()
+ * \brief Returns a reference to the header
+ */
+
+/**
+ * \fn IPCMessage::data()
+ * \brief Returns a reference to the byte vector containing data
+ */
+
+/**
+ * \fn IPCMessage::fds()
+ * \brief Returns a reference to the vector containing file descriptors
+ */
+
+/**
+ * \fn IPCMessage::header() const
+ * \brief Returns a const reference to the header
+ */
+
+/**
+ * \fn IPCMessage::data() const
+ * \brief Returns a const reference to the byte vector containing data
+ */
+
+/**
+ * \fn IPCMessage::fds() const
+ * \brief Returns a const reference to the vector containing file descriptors
+ */
+
+/**
+ * \class IPCPipe
+ * \brief IPC message pipe for IPA isolation
+ *
+ * Virtual class to model an IPC message pipe for use by IPA proxies for IPA
+ * isolation. sendSync() and sendAsync() must be implemented, and the recvMessage
+ * signal must be emitted whenever new data is available.
+ */
+
+/**
+ * \brief Construct an IPCPipe instance
+ */
+IPCPipe::IPCPipe()
+ : connected_(false)
+{
+}
+
+IPCPipe::~IPCPipe()
+{
+}
+
+/**
+ * \fn IPCPipe::isConnected()
+ * \brief Check if the IPCPipe instance is connected
+ *
+ * An IPCPipe instance is connected if IPC is successfully set up.
+ *
+ * \return True if the IPCPipe is connected, false otherwise
+ */
+
+/**
+ * \fn IPCPipe::sendSync()
+ * \brief Send a message over IPC synchronously
+ * \param[in] in Data to send
+ * \param[in] out IPCMessage instance in which to receive data, if applicable
+ *
+ * This function will not return until a response is received. The event loop
+ * will still continue to execute, however.
+ *
+ * \return Zero on success, negative error code otherwise
+ *
+ * \todo Determine if the event loop should limit the types of messages it
+ * processes, to avoid reintrancy in the caller, and carefully document what
+ * the caller needs to implement to make this safe.
+ */
+
+/**
+ * \fn IPCPipe::sendAsync()
+ * \brief Send a message over IPC asynchronously
+ * \param[in] data Data to send
+ *
+ * This function will return immediately after sending the message.
+ *
+ * \return Zero on success, negative error code otherwise
+ */
+
+/**
+ * \var IPCPipe::recv
+ * \brief Signal to be emitted when a message is received over IPC
+ *
+ * When a message is received over IPC, this signal shall be emitted. Users must
+ * connect to this to receive messages.
+ */
+
+/**
+ * \var IPCPipe::connected_
+ * \brief Flag to indicate if the IPCPipe instance is connected
+ *
+ * An IPCPipe instance is connected if IPC is successfully set up.
+ *
+ * This flag can be read via IPCPipe::isConnected().
+ *
+ * Implementations of the IPCPipe class should set this flag upon successful
+ * connection.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/ipc_pipe_unixsocket.cpp b/src/libcamera/ipc_pipe_unixsocket.cpp
new file mode 100644
index 00000000..668ec73b
--- /dev/null
+++ b/src/libcamera/ipc_pipe_unixsocket.cpp
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Image Processing Algorithm IPC module using unix socket
+ */
+
+#include "libcamera/internal/ipc_pipe_unixsocket.h"
+
+#include <vector>
+
+#include <libcamera/base/event_dispatcher.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/thread.h>
+#include <libcamera/base/timer.h>
+
+#include "libcamera/internal/ipc_pipe.h"
+#include "libcamera/internal/ipc_unixsocket.h"
+#include "libcamera/internal/process.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(IPCPipe)
+
+IPCPipeUnixSocket::IPCPipeUnixSocket(const char *ipaModulePath,
+ const char *ipaProxyWorkerPath)
+ : IPCPipe()
+{
+ std::vector<int> fds;
+ std::vector<std::string> args;
+ args.push_back(ipaModulePath);
+
+ socket_ = std::make_unique<IPCUnixSocket>();
+ UniqueFD fd = socket_->create();
+ if (!fd.isValid()) {
+ LOG(IPCPipe, Error) << "Failed to create socket";
+ return;
+ }
+ socket_->readyRead.connect(this, &IPCPipeUnixSocket::readyRead);
+ args.push_back(std::to_string(fd.get()));
+ fds.push_back(fd.get());
+
+ proc_ = std::make_unique<Process>();
+ int ret = proc_->start(ipaProxyWorkerPath, args, fds);
+ if (ret) {
+ LOG(IPCPipe, Error)
+ << "Failed to start proxy worker process";
+ return;
+ }
+
+ connected_ = true;
+}
+
+IPCPipeUnixSocket::~IPCPipeUnixSocket()
+{
+}
+
+int IPCPipeUnixSocket::sendSync(const IPCMessage &in, IPCMessage *out)
+{
+ IPCUnixSocket::Payload response;
+
+ int ret = call(in.payload(), &response, in.header().cookie);
+ if (ret) {
+ LOG(IPCPipe, Error) << "Failed to call sync";
+ return ret;
+ }
+
+ if (out)
+ *out = IPCMessage(response);
+
+ return 0;
+}
+
+int IPCPipeUnixSocket::sendAsync(const IPCMessage &data)
+{
+ int ret = socket_->send(data.payload());
+ if (ret) {
+ LOG(IPCPipe, Error) << "Failed to call async";
+ return ret;
+ }
+
+ return 0;
+}
+
+void IPCPipeUnixSocket::readyRead()
+{
+ IPCUnixSocket::Payload payload;
+ int ret = socket_->receive(&payload);
+ if (ret) {
+ LOG(IPCPipe, Error) << "Receive message failed" << ret;
+ return;
+ }
+
+ /* \todo Use span to avoid the double copy when callData is found. */
+ if (payload.data.size() < sizeof(IPCMessage::Header)) {
+ LOG(IPCPipe, Error) << "Not enough data received";
+ return;
+ }
+
+ IPCMessage ipcMessage(payload);
+
+ auto callData = callData_.find(ipcMessage.header().cookie);
+ if (callData != callData_.end()) {
+ *callData->second.response = std::move(payload);
+ callData->second.done = true;
+ return;
+ }
+
+ /* Received unexpected data, this means it's a call from the IPA. */
+ recv.emit(ipcMessage);
+}
+
+int IPCPipeUnixSocket::call(const IPCUnixSocket::Payload &message,
+ IPCUnixSocket::Payload *response, uint32_t cookie)
+{
+ Timer timeout;
+ int ret;
+
+ const auto result = callData_.insert({ cookie, { response, false } });
+ const auto &iter = result.first;
+
+ ret = socket_->send(message);
+ if (ret) {
+ callData_.erase(iter);
+ return ret;
+ }
+
+ /* \todo Make this less dangerous, see IPCPipe::sendSync() */
+ timeout.start(2000ms);
+ while (!iter->second.done) {
+ if (!timeout.isRunning()) {
+ LOG(IPCPipe, Error) << "Call timeout!";
+ callData_.erase(iter);
+ return -ETIMEDOUT;
+ }
+
+ Thread::current()->eventDispatcher()->processEvents();
+ }
+
+ callData_.erase(iter);
+
+ return 0;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/ipc_unixsocket.cpp b/src/libcamera/ipc_unixsocket.cpp
index 6e5cab89..002053e3 100644
--- a/src/libcamera/ipc_unixsocket.cpp
+++ b/src/libcamera/ipc_unixsocket.cpp
@@ -2,17 +2,20 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipc_unixsocket.cpp - IPC mechanism based on Unix sockets
+ * IPC mechanism based on Unix sockets
*/
-#include "ipc_unixsocket.h"
+#include "libcamera/internal/ipc_unixsocket.h"
+#include <array>
#include <poll.h>
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
+#include <vector>
-#include "log.h"
+#include <libcamera/base/event_notifier.h>
+#include <libcamera/base/log.h>
/**
* \file ipc_unixsocket.h
@@ -57,7 +60,7 @@ LOG_DEFINE_CATEGORY(IPCUnixSocket)
*
* Establishment of an IPC channel is asymmetrical. The side that initiates
* communication first instantiates a local side socket and creates the channel
- * with create(). The method returns a file descriptor for the remote side of
+ * with create(). The function returns a file descriptor for the remote side of
* the channel, which is passed to the remote process through an out-of-band
* communication method. The remote side then instantiates a socket, and binds
* it to the other side by passing the file descriptor to bind(). At that point
@@ -67,7 +70,7 @@ LOG_DEFINE_CATEGORY(IPCUnixSocket)
*/
IPCUnixSocket::IPCUnixSocket()
- : fd_(-1), headerReceived_(false), notifier_(nullptr)
+ : headerReceived_(false), notifier_(nullptr)
{
}
@@ -79,15 +82,15 @@ IPCUnixSocket::~IPCUnixSocket()
/**
* \brief Create an new IPC channel
*
- * This method creates a new IPC channel. The socket instance is bound to the
- * local side of the channel, and the method returns a file descriptor bound to
- * the remote side. The caller is responsible for passing the file descriptor to
- * the remote process, where it can be used with IPCUnixSocket::bind() to bind
- * the remote side socket.
+ * This function creates a new IPC channel. The socket instance is bound to the
+ * local side of the channel, and the function returns a file descriptor bound
+ * to the remote side. The caller is responsible for passing the file descriptor
+ * to the remote process, where it can be used with IPCUnixSocket::bind() to
+ * bind the remote side socket.
*
- * \return A file descriptor on success, negative error code on failure
+ * \return A file descriptor. It is valid on success or invalid otherwise.
*/
-int IPCUnixSocket::create()
+UniqueFD IPCUnixSocket::create()
{
int sockets[2];
int ret;
@@ -97,33 +100,37 @@ int IPCUnixSocket::create()
ret = -errno;
LOG(IPCUnixSocket, Error)
<< "Failed to create socket pair: " << strerror(-ret);
- return ret;
+ return {};
}
- ret = bind(sockets[0]);
- if (ret)
- return ret;
+ std::array<UniqueFD, 2> socketFds{
+ UniqueFD(sockets[0]),
+ UniqueFD(sockets[1]),
+ };
- return sockets[1];
+ if (bind(std::move(socketFds[0])) < 0)
+ return {};
+
+ return std::move(socketFds[1]);
}
/**
* \brief Bind to an existing IPC channel
* \param[in] fd File descriptor
*
- * This method binds the socket instance to an existing IPC channel identified
+ * This function binds the socket instance to an existing IPC channel identified
* by the file descriptor \a fd. The file descriptor is obtained from the
- * IPCUnixSocket::create() method.
+ * IPCUnixSocket::create() function.
*
* \return 0 on success or a negative error code otherwise
*/
-int IPCUnixSocket::bind(int fd)
+int IPCUnixSocket::bind(UniqueFD fd)
{
if (isBound())
return -EINVAL;
- fd_ = fd;
- notifier_ = new EventNotifier(fd_, EventNotifier::Read);
+ fd_ = std::move(fd);
+ notifier_ = new EventNotifier(fd_.get(), EventNotifier::Read);
notifier_->activated.connect(this, &IPCUnixSocket::dataNotifier);
return 0;
@@ -142,9 +149,7 @@ void IPCUnixSocket::close()
delete notifier_;
notifier_ = nullptr;
- ::close(fd_);
-
- fd_ = -1;
+ fd_.reset();
headerReceived_ = false;
}
@@ -154,14 +159,14 @@ void IPCUnixSocket::close()
*/
bool IPCUnixSocket::isBound() const
{
- return fd_ != -1;
+ return fd_.isValid();
}
/**
* \brief Send a message payload
* \param[in] payload Message payload to send
*
- * This method queues the message payload for transmission to the other end of
+ * This function queues the message payload for transmission to the other end of
* the IPC channel. It returns immediately, before the message is delivered to
* the remote side.
*
@@ -181,7 +186,7 @@ int IPCUnixSocket::send(const Payload &payload)
if (!hdr.data && !hdr.fds)
return -EINVAL;
- ret = ::send(fd_, &hdr, sizeof(hdr), 0);
+ ret = ::send(fd_.get(), &hdr, sizeof(hdr), 0);
if (ret < 0) {
ret = -errno;
LOG(IPCUnixSocket, Error)
@@ -196,7 +201,7 @@ int IPCUnixSocket::send(const Payload &payload)
* \brief Receive a message payload
* \param[out] payload Payload where to write the received message
*
- * This method receives the message payload from the IPC channel and writes it
+ * This function receives the message payload from the IPC channel and writes it
* to the \a payload. If no message payload is available, it returns
* immediately with -EAGAIN. The \ref readyRead signal shall be used to receive
* notification of message availability.
@@ -243,10 +248,9 @@ int IPCUnixSocket::sendData(const void *buffer, size_t length,
iov[0].iov_base = const_cast<void *>(buffer);
iov[0].iov_len = length;
- char buf[CMSG_SPACE(num * sizeof(uint32_t))];
- memset(buf, 0, sizeof(buf));
+ std::vector<uint8_t> buf(CMSG_SPACE(num * sizeof(uint32_t)));
- struct cmsghdr *cmsg = (struct cmsghdr *)buf;
+ struct cmsghdr *cmsg = reinterpret_cast<struct cmsghdr *>(buf.data());
cmsg->cmsg_len = CMSG_LEN(num * sizeof(uint32_t));
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
@@ -259,9 +263,10 @@ int IPCUnixSocket::sendData(const void *buffer, size_t length,
msg.msg_control = cmsg;
msg.msg_controllen = cmsg->cmsg_len;
msg.msg_flags = 0;
- memcpy(CMSG_DATA(cmsg), fds, num * sizeof(uint32_t));
+ if (fds)
+ memcpy(CMSG_DATA(cmsg), fds, num * sizeof(uint32_t));
- if (sendmsg(fd_, &msg, 0) < 0) {
+ if (sendmsg(fd_.get(), &msg, 0) < 0) {
int ret = -errno;
LOG(IPCUnixSocket, Error)
<< "Failed to sendmsg: " << strerror(-ret);
@@ -278,10 +283,9 @@ int IPCUnixSocket::recvData(void *buffer, size_t length,
iov[0].iov_base = buffer;
iov[0].iov_len = length;
- char buf[CMSG_SPACE(num * sizeof(uint32_t))];
- memset(buf, 0, sizeof(buf));
+ std::vector<uint8_t> buf(CMSG_SPACE(num * sizeof(uint32_t)));
- struct cmsghdr *cmsg = (struct cmsghdr *)buf;
+ struct cmsghdr *cmsg = reinterpret_cast<struct cmsghdr *>(buf.data());
cmsg->cmsg_len = CMSG_LEN(num * sizeof(uint32_t));
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
@@ -295,7 +299,7 @@ int IPCUnixSocket::recvData(void *buffer, size_t length,
msg.msg_controllen = cmsg->cmsg_len;
msg.msg_flags = 0;
- if (recvmsg(fd_, &msg, 0) < 0) {
+ if (recvmsg(fd_.get(), &msg, 0) < 0) {
int ret = -errno;
if (ret != -EAGAIN)
LOG(IPCUnixSocket, Error)
@@ -303,18 +307,19 @@ int IPCUnixSocket::recvData(void *buffer, size_t length,
return ret;
}
- memcpy(fds, CMSG_DATA(cmsg), num * sizeof(uint32_t));
+ if (fds)
+ memcpy(fds, CMSG_DATA(cmsg), num * sizeof(uint32_t));
return 0;
}
-void IPCUnixSocket::dataNotifier(EventNotifier *notifier)
+void IPCUnixSocket::dataNotifier()
{
int ret;
if (!headerReceived_) {
/* Receive the header. */
- ret = ::recv(fd_, &header_, sizeof(header_), 0);
+ ret = ::recv(fd_.get(), &header_, sizeof(header_), 0);
if (ret < 0) {
ret = -errno;
LOG(IPCUnixSocket, Error)
@@ -328,9 +333,9 @@ void IPCUnixSocket::dataNotifier(EventNotifier *notifier)
/*
* If the payload has arrived, disable the notifier and emit the
* readyRead signal. The notifier will be reenabled by the receive()
- * method.
+ * function.
*/
- struct pollfd fds = { fd_, POLLIN, 0 };
+ struct pollfd fds = { fd_.get(), POLLIN, 0 };
ret = poll(&fds, 1, 0);
if (ret < 0)
return;
@@ -339,7 +344,7 @@ void IPCUnixSocket::dataNotifier(EventNotifier *notifier)
return;
notifier_->setEnabled(false);
- readyRead.emit(this);
+ readyRead.emit();
}
} /* namespace libcamera */
diff --git a/src/libcamera/mapped_framebuffer.cpp b/src/libcamera/mapped_framebuffer.cpp
new file mode 100644
index 00000000..f54bbf21
--- /dev/null
+++ b/src/libcamera/mapped_framebuffer.cpp
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Mapped Framebuffer support
+ */
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include <algorithm>
+#include <errno.h>
+#include <map>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file mapped_framebuffer.h
+ * \brief Frame buffer memory mapping support
+ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Buffer)
+
+/**
+ * \class MappedBuffer
+ * \brief Provide an interface to support managing memory mapped buffers
+ *
+ * The MappedBuffer interface provides access to a set of MappedPlanes which
+ * are available for access by the CPU.
+ *
+ * This class is not meant to be constructed directly, but instead derived
+ * classes should be used to implement the correct mapping of a source buffer.
+ *
+ * This allows treating CPU accessible memory through a generic interface
+ * regardless of whether it originates from a libcamera FrameBuffer or other
+ * source.
+ */
+
+/**
+ * \typedef MappedBuffer::Plane
+ * \brief A mapped region of memory accessible to the CPU
+ *
+ * The MappedBuffer::Plane uses the Span interface to describe the mapped memory
+ * region.
+ */
+
+/**
+ * \brief Construct an empty MappedBuffer
+ */
+MappedBuffer::MappedBuffer()
+ : error_(0)
+{
+}
+
+/**
+ * \brief Move constructor, construct the MappedBuffer with the contents of \a
+ * other using move semantics
+ * \param[in] other The other MappedBuffer
+ *
+ * Moving a MappedBuffer moves the mappings contained in the \a other to the new
+ * MappedBuffer and invalidates the \a other.
+ *
+ * No mappings are unmapped or destroyed in this process.
+ */
+MappedBuffer::MappedBuffer(MappedBuffer &&other)
+{
+ *this = std::move(other);
+}
+
+/**
+ * \brief Move assignment operator, replace the mappings with those of \a other
+ * \param[in] other The other MappedBuffer
+ *
+ * Moving a MappedBuffer moves the mappings contained in the \a other to the new
+ * MappedBuffer and invalidates the \a other.
+ *
+ * No mappings are unmapped or destroyed in this process.
+ */
+MappedBuffer &MappedBuffer::operator=(MappedBuffer &&other)
+{
+ error_ = other.error_;
+ planes_ = std::move(other.planes_);
+ maps_ = std::move(other.maps_);
+ other.error_ = -ENOENT;
+
+ return *this;
+}
+
+MappedBuffer::~MappedBuffer()
+{
+ for (Plane &map : maps_)
+ munmap(map.data(), map.size());
+}
+
+/**
+ * \fn MappedBuffer::isValid()
+ * \brief Check if the MappedBuffer instance is valid
+ * \return True if the MappedBuffer has valid mappings, false otherwise
+ */
+
+/**
+ * \fn MappedBuffer::error()
+ * \brief Retrieve the map error status
+ *
+ * This function retrieves the error status from the MappedBuffer.
+ * The error status is a negative number as defined by errno.h. If
+ * no error occurred, this function returns 0.
+ *
+ * \return The map error code
+ */
+
+/**
+ * \fn MappedBuffer::planes()
+ * \brief Retrieve the mapped planes
+ *
+ * This function retrieves the successfully mapped planes stored as a vector
+ * of Span<uint8_t> to provide access to the mapped memory.
+ *
+ * \return A vector of the mapped planes
+ */
+
+/**
+ * \var MappedBuffer::error_
+ * \brief Stores the error value if present
+ *
+ * MappedBuffer derived classes shall set this to a negative value as defined
+ * by errno.h if an error occured during the mapping process.
+ */
+
+/**
+ * \var MappedBuffer::planes_
+ * \brief Stores the internal mapped planes
+ *
+ * MappedBuffer derived classes shall store the mappings they create in this
+ * vector which points the beginning of mapped plane addresses.
+ */
+
+/**
+ * \var MappedBuffer::maps_
+ * \brief Stores the mapped buffer
+ *
+ * MappedBuffer derived classes shall store the mappings they create in this
+ * vector which is parsed during destruct to unmap any memory mappings which
+ * completed successfully.
+ */
+
+/**
+ * \class MappedFrameBuffer
+ * \brief Map a FrameBuffer using the MappedBuffer interface
+ */
+
+/**
+ * \enum MappedFrameBuffer::MapFlag
+ * \brief Specify the mapping mode for the FrameBuffer
+ * \var MappedFrameBuffer::Read
+ * \brief Create a read-only mapping
+ * \var MappedFrameBuffer::Write
+ * \brief Create a write-only mapping
+ * \var MappedFrameBuffer::ReadWrite
+ * \brief Create a mapping that can be both read and written
+ */
+
+/**
+ * \typedef MappedFrameBuffer::MapFlags
+ * \brief A bitwise combination of MappedFrameBuffer::MapFlag values
+ */
+
+/**
+ * \brief Map all planes of a FrameBuffer
+ * \param[in] buffer FrameBuffer to be mapped
+ * \param[in] flags Protection flags to apply to map
+ *
+ * Construct an object to map a frame buffer for CPU access. The mapping can be
+ * made as Read only, Write only or support Read and Write operations by setting
+ * the MapFlag flags accordingly.
+ */
+MappedFrameBuffer::MappedFrameBuffer(const FrameBuffer *buffer, MapFlags flags)
+{
+ ASSERT(!buffer->planes().empty());
+ planes_.reserve(buffer->planes().size());
+
+ int mmapFlags = 0;
+
+ if (flags & MapFlag::Read)
+ mmapFlags |= PROT_READ;
+
+ if (flags & MapFlag::Write)
+ mmapFlags |= PROT_WRITE;
+
+ struct MappedBufferInfo {
+ uint8_t *address = nullptr;
+ size_t mapLength = 0;
+ size_t dmabufLength = 0;
+ };
+ std::map<int, MappedBufferInfo> mappedBuffers;
+
+ for (const FrameBuffer::Plane &plane : buffer->planes()) {
+ const int fd = plane.fd.get();
+ if (mappedBuffers.find(fd) == mappedBuffers.end()) {
+ const size_t length = lseek(fd, 0, SEEK_END);
+ mappedBuffers[fd] = MappedBufferInfo{ nullptr, 0, length };
+ }
+
+ const size_t length = mappedBuffers[fd].dmabufLength;
+
+ if (plane.offset > length ||
+ plane.offset + plane.length > length) {
+ LOG(Buffer, Fatal) << "plane is out of buffer: "
+ << "buffer length=" << length
+ << ", plane offset=" << plane.offset
+ << ", plane length=" << plane.length;
+ return;
+ }
+ size_t &mapLength = mappedBuffers[fd].mapLength;
+ mapLength = std::max(mapLength,
+ static_cast<size_t>(plane.offset + plane.length));
+ }
+
+ for (const FrameBuffer::Plane &plane : buffer->planes()) {
+ const int fd = plane.fd.get();
+ auto &info = mappedBuffers[fd];
+ if (!info.address) {
+ void *address = mmap(nullptr, info.mapLength, mmapFlags,
+ MAP_SHARED, fd, 0);
+ if (address == MAP_FAILED) {
+ error_ = -errno;
+ LOG(Buffer, Error) << "Failed to mmap plane: "
+ << strerror(-error_);
+ return;
+ }
+
+ info.address = static_cast<uint8_t *>(address);
+ maps_.emplace_back(info.address, info.mapLength);
+ }
+
+ planes_.emplace_back(info.address + plane.offset, plane.length);
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/matrix.cpp b/src/libcamera/matrix.cpp
new file mode 100644
index 00000000..4d95a19b
--- /dev/null
+++ b/src/libcamera/matrix.cpp
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Matrix and related operations
+ */
+
+#include "libcamera/internal/matrix.h"
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file matrix.h
+ * \brief Matrix class
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Matrix)
+
+/**
+ * \class Matrix
+ * \brief Matrix class
+ * \tparam T Type of numerical values to be stored in the matrix
+ * \tparam Rows Number of rows in the matrix
+ * \tparam Cols Number of columns in the matrix
+ */
+
+/**
+ * \fn Matrix::Matrix()
+ * \brief Construct a zero matrix
+ */
+
+/**
+ * \fn Matrix::Matrix(const std::array<T, Rows * Cols> &data)
+ * \brief Construct a matrix from supplied data
+ * \param[in] data Data from which to construct a matrix
+ *
+ * \a data is a one-dimensional vector and will be turned into a matrix in
+ * row-major order. The size of \a data must be equal to the product of the
+ * number of rows and columns of the matrix (Rows x Cols).
+ */
+
+/**
+ * \fn Matrix::identity()
+ * \brief Construct an identity matrix
+ */
+
+/**
+ * \fn Matrix::toString()
+ * \brief Assemble and return a string describing the matrix
+ * \return A string describing the matrix
+ */
+
+/**
+ * \fn Span<const T, Cols> Matrix::operator[](size_t i) const
+ * \brief Index to a row in the matrix
+ * \param[in] i Index of row to retrieve
+ *
+ * This operator[] returns a Span, which can then be indexed into again with
+ * another operator[], allowing a convenient m[i][j] to access elements of the
+ * matrix. Note that the lifetime of the Span returned by this first-level
+ * operator[] is bound to that of the Matrix itself, so it is not recommended
+ * to save the Span that is the result of this operator[].
+ *
+ * \return Row \a i from the matrix, as a Span
+ */
+
+/**
+ * \fn Matrix::operator[](size_t i)
+ * \copydoc Matrix::operator[](size_t i) const
+ */
+
+/**
+ * \fn Matrix<T, Rows, Cols> &Matrix::operator*=(U d)
+ * \brief Multiply the matrix by a scalar in-place
+ * \tparam U Type of the numerical scalar value
+ * \param d The scalar multiplier
+ * \return Product of this matrix and scalar \a d
+ */
+
+/**
+ * \fn Matrix::Matrix<U, Rows, Cols> operator*(T d, const Matrix<U, Rows, Cols> &m)
+ * \brief Multiply the matrix by a scalar
+ * \tparam T Type of the numerical scalar value
+ * \tparam U Type of numerical values in the matrix
+ * \tparam Rows Number of rows in the matrix
+ * \tparam Cols Number of columns in the matrix
+ * \param d The scalar multiplier
+ * \param m The matrix
+ * \return Product of scalar \a d and matrix \a m
+ */
+
+/**
+ * \fn Matrix::Matrix<U, Rows, Cols> operator*(const Matrix<U, Rows, Cols> &m, T d)
+ * \copydoc operator*(T d, const Matrix<U, Rows, Cols> &m)
+ */
+
+/**
+ * \fn Matrix<T, R1, C2> operator*(const Matrix<T, R1, C1> &m1, const Matrix<T, R2, C2> &m2)
+ * \brief Matrix multiplication
+ * \tparam T Type of numerical values in the matrices
+ * \tparam R1 Number of rows in the first matrix
+ * \tparam C1 Number of columns in the first matrix
+ * \tparam R2 Number of rows in the second matrix
+ * \tparam C2 Number of columns in the second matrix
+ * \param m1 Multiplicand matrix
+ * \param m2 Multiplier matrix
+ * \return Matrix product of matrices \a m1 and \a m2
+ */
+
+/**
+ * \fn Matrix<T, Rows, Cols> operator+(const Matrix<T, Rows, Cols> &m1, const Matrix<T, Rows, Cols> &m2)
+ * \brief Matrix addition
+ * \tparam T Type of numerical values in the matrices
+ * \tparam Rows Number of rows in the matrices
+ * \tparam Cols Number of columns in the matrices
+ * \param m1 Summand matrix
+ * \param m2 Summand matrix
+ * \return Matrix sum of matrices \a m1 and \a m2
+ */
+
+#ifndef __DOXYGEN__
+/*
+ * The YAML data shall be a list of numerical values. Its size shall be equal
+ * to the product of the number of rows and columns of the matrix (Rows x
+ * Cols). The values shall be stored in row-major order.
+ */
+bool matrixValidateYaml(const YamlObject &obj, unsigned int size)
+{
+ if (!obj.isList())
+ return false;
+
+ if (obj.size() != size) {
+ LOG(Matrix, Error)
+ << "Wrong number of values in matrix: expected "
+ << size << ", got " << obj.size();
+ return false;
+ }
+
+ return true;
+}
+#endif /* __DOXYGEN__ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/media_device.cpp b/src/libcamera/media_device.cpp
index 0d6b5efd..d71dad74 100644
--- a/src/libcamera/media_device.cpp
+++ b/src/libcamera/media_device.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * media_device.cpp - Media device handler
+ * Media device handler
*/
-#include "media_device.h"
+#include "libcamera/internal/media_device.h"
#include <errno.h>
#include <fcntl.h>
@@ -18,7 +18,7 @@
#include <linux/media.h>
-#include "log.h"
+#include <libcamera/base/log.h>
/**
* \file media_device.h
@@ -44,13 +44,13 @@ LOG_DEFINE_CATEGORY(MediaDevice)
* MediaEntity, MediaPad and MediaLink are created to model the media graph,
* and stored in a map indexed by object id.
*
- * The graph is valid once successfully populated, as reported by the valid()
+ * The graph is valid once successfully populated, as reported by the isValid()
* function. It can be queried to list all entities(), or entities can be
* looked up by name with getEntityByName(). The graph can be traversed from
* entity to entity through pads and links as exposed by the corresponding
* classes.
*
- * Media device can be claimed for exclusive use with acquire(), released with
+ * Media devices can be claimed for exclusive use with acquire(), released with
* release() and tested with busy(). This mechanism is aimed at pipeline
* managers to claim media devices they support during enumeration.
*/
@@ -63,15 +63,13 @@ LOG_DEFINE_CATEGORY(MediaDevice)
* populate() before the media graph can be queried.
*/
MediaDevice::MediaDevice(const std::string &deviceNode)
- : deviceNode_(deviceNode), fd_(-1), valid_(false), acquired_(false),
- lockOwner_(false)
+ : deviceNode_(deviceNode), valid_(false), acquired_(false)
{
}
MediaDevice::~MediaDevice()
{
- if (fd_ != -1)
- ::close(fd_);
+ fd_.reset();
clear();
}
@@ -134,7 +132,7 @@ void MediaDevice::release()
* they provide at all times, while still allowing an instance to lock a
* resource while it prepares to actively use a camera from the resource.
*
- * This method shall not be called from a pipeline handler implementation
+ * This function shall not be called from a pipeline handler implementation
* directly, as the base PipelineHandler implementation handles this on the
* behalf of the specified implementation.
*
@@ -143,25 +141,19 @@ void MediaDevice::release()
*/
bool MediaDevice::lock()
{
- if (fd_ == -1)
+ if (!fd_.isValid())
return false;
- /* Do not allow nested locking in the same libcamera instance. */
- if (lockOwner_)
+ if (lockf(fd_.get(), F_TLOCK, 0))
return false;
- if (lockf(fd_, F_TLOCK, 0))
- return false;
-
- lockOwner_ = true;
-
return true;
}
/**
* \brief Unlock the device and free it for use for libcamera instances
*
- * This method shall not be called from a pipeline handler implementation
+ * This function shall not be called from a pipeline handler implementation
* directly, as the base PipelineHandler implementation handles this on the
* behalf of the specified implementation.
*
@@ -169,15 +161,10 @@ bool MediaDevice::lock()
*/
void MediaDevice::unlock()
{
- if (fd_ == -1)
- return;
-
- if (!lockOwner_)
+ if (!fd_.isValid())
return;
- lockOwner_ = false;
-
- lockf(fd_, F_ULOCK, 0);
+ lockf(fd_.get(), F_ULOCK, 0);
}
/**
@@ -220,7 +207,7 @@ int MediaDevice::populate()
return ret;
struct media_device_info info = {};
- ret = ioctl(fd_, MEDIA_IOC_DEVICE_INFO, &info);
+ ret = ioctl(fd_.get(), MEDIA_IOC_DEVICE_INFO, &info);
if (ret) {
ret = -errno;
LOG(MediaDevice, Error)
@@ -231,6 +218,7 @@ int MediaDevice::populate()
driver_ = info.driver;
model_ = info.model;
version_ = info.media_version;
+ hwRevision_ = info.hw_revision;
/*
* Keep calling G_TOPOLOGY until the version number stays stable.
@@ -242,7 +230,7 @@ int MediaDevice::populate()
topology.ptr_links = reinterpret_cast<uintptr_t>(links);
topology.ptr_pads = reinterpret_cast<uintptr_t>(pads);
- ret = ioctl(fd_, MEDIA_IOC_G_TOPOLOGY, &topology);
+ ret = ioctl(fd_.get(), MEDIA_IOC_G_TOPOLOGY, &topology);
if (ret < 0) {
ret = -errno;
LOG(MediaDevice, Error)
@@ -291,7 +279,7 @@ done:
}
/**
- * \fn MediaDevice::valid()
+ * \fn MediaDevice::isValid()
* \brief Query whether the media graph has been populated and is valid
* \return true if the media graph is valid, false otherwise
*/
@@ -315,6 +303,24 @@ done:
*/
/**
+ * \fn MediaDevice::version()
+ * \brief Retrieve the media device API version
+ *
+ * The version is formatted with the KERNEL_VERSION() macro.
+ *
+ * \return The MediaDevice API version
+ */
+
+/**
+ * \fn MediaDevice::hwRevision()
+ * \brief Retrieve the media device hardware revision
+ *
+ * The hardware revision is in a driver-specific format.
+ *
+ * \return The MediaDevice hardware revision
+ */
+
+/**
* \fn MediaDevice::entities()
* \brief Retrieve the list of entities in the media graph
* \return The list of MediaEntities registered in the MediaDevice
@@ -346,8 +352,9 @@ MediaEntity *MediaDevice::getEntityByName(const std::string &name) const
* entity with name \a sourceName, to the pad at index \a sinkIdx of the
* sink entity with name \a sinkName, if any.
*
- * \sa MediaDevice::link(const MediaEntity *source, unsigned int sourceIdx, const MediaEntity *sink, unsigned int sinkIdx) const
- * \sa MediaDevice::link(const MediaPad *source, const MediaPad *sink) const
+ * \sa link(const MediaEntity *source, unsigned int sourceIdx,
+ * const MediaEntity *sink, unsigned int sinkIdx)
+ * \sa link(const MediaPad *source, const MediaPad *sink)
*
* \return The link that connects the two pads, or nullptr if no such a link
* exists
@@ -375,8 +382,9 @@ MediaLink *MediaDevice::link(const std::string &sourceName, unsigned int sourceI
* entity \a source, to the pad at index \a sinkIdx of the sink entity \a
* sink, if any.
*
- * \sa MediaDevice::link(const std::string &sourceName, unsigned int sourceIdx, const std::string &sinkName, unsigned int sinkIdx) const
- * \sa MediaDevice::link(const MediaPad *source, const MediaPad *sink) const
+ * \sa link(const std::string &sourceName, unsigned int sourceIdx,
+ * const std::string &sinkName, unsigned int sinkIdx)
+ * \sa link(const MediaPad *source, const MediaPad *sink)
*
* \return The link that connects the two pads, or nullptr if no such a link
* exists
@@ -398,8 +406,10 @@ MediaLink *MediaDevice::link(const MediaEntity *source, unsigned int sourceIdx,
* \param[in] source The source pad
* \param[in] sink The sink pad
*
- * \sa MediaDevice::link(const std::string &sourceName, unsigned int sourceIdx, const std::string &sinkName, unsigned int sinkIdx) const
- * \sa MediaDevice::link(const MediaEntity *source, unsigned int sourceIdx, const MediaEntity *sink, unsigned int sinkIdx) const
+ * \sa link(const std::string &sourceName, unsigned int sourceIdx,
+ * const std::string &sinkName, unsigned int sinkIdx)
+ * \sa link(const MediaEntity *source, unsigned int sourceIdx,
+ * const MediaEntity *sink, unsigned int sinkIdx)
*
* \return The link that connects the two pads, or nullptr if no such a link
* exists
@@ -462,20 +472,19 @@ int MediaDevice::disableLinks()
*/
int MediaDevice::open()
{
- if (fd_ != -1) {
+ if (fd_.isValid()) {
LOG(MediaDevice, Error) << "MediaDevice already open";
return -EBUSY;
}
- int ret = ::open(deviceNode_.c_str(), O_RDWR);
- if (ret < 0) {
- ret = -errno;
+ fd_ = UniqueFD(::open(deviceNode_.c_str(), O_RDWR | O_CLOEXEC));
+ if (!fd_.isValid()) {
+ int ret = -errno;
LOG(MediaDevice, Error)
<< "Failed to open media device at "
<< deviceNode_ << ": " << strerror(-ret);
return ret;
}
- fd_ = ret;
return 0;
}
@@ -495,11 +504,7 @@ int MediaDevice::open()
*/
void MediaDevice::close()
{
- if (fd_ == -1)
- return;
-
- ::close(fd_);
- fd_ = -1;
+ fd_.reset();
}
/**
@@ -552,7 +557,7 @@ bool MediaDevice::addObject(MediaObject *object)
*
* The media device graph state is reset to invalid when the graph is cleared.
*
- * \sa valid()
+ * \sa isValid()
*/
void MediaDevice::clear()
{
@@ -633,14 +638,7 @@ bool MediaDevice::populateEntities(const struct media_v2_topology &topology)
*/
struct media_v2_interface *iface =
findInterface(topology, ent->id);
-
- MediaEntity *entity;
- if (iface)
- entity = new MediaEntity(this, ent,
- iface->devnode.major,
- iface->devnode.minor);
- else
- entity = new MediaEntity(this, ent);
+ MediaEntity *entity = new MediaEntity(this, ent, iface);
if (!addObject(entity)) {
delete entity;
@@ -689,43 +687,72 @@ bool MediaDevice::populateLinks(const struct media_v2_topology &topology)
(topology.ptr_links);
for (unsigned int i = 0; i < topology.num_links; ++i) {
- /*
- * Skip links between entities and interfaces: we only care
- * about pad-2-pad links here.
- */
if ((mediaLinks[i].flags & MEDIA_LNK_FL_LINK_TYPE) ==
MEDIA_LNK_FL_INTERFACE_LINK)
continue;
- /* Store references to source and sink pads in the link. */
+ /* Look up the source and sink objects. */
unsigned int source_id = mediaLinks[i].source_id;
- MediaPad *source = dynamic_cast<MediaPad *>
- (object(source_id));
+ MediaObject *source = object(source_id);
if (!source) {
LOG(MediaDevice, Error)
- << "Failed to find pad with id: "
+ << "Failed to find MediaObject with id "
<< source_id;
return false;
}
unsigned int sink_id = mediaLinks[i].sink_id;
- MediaPad *sink = dynamic_cast<MediaPad *>
- (object(sink_id));
+ MediaObject *sink = object(sink_id);
if (!sink) {
LOG(MediaDevice, Error)
- << "Failed to find pad with id: "
+ << "Failed to find MediaObject with id "
<< sink_id;
return false;
}
- MediaLink *link = new MediaLink(&mediaLinks[i], source, sink);
- if (!addObject(link)) {
- delete link;
- return false;
+ switch (mediaLinks[i].flags & MEDIA_LNK_FL_LINK_TYPE) {
+ case MEDIA_LNK_FL_DATA_LINK: {
+ MediaPad *sourcePad = dynamic_cast<MediaPad *>(source);
+ MediaPad *sinkPad = dynamic_cast<MediaPad *>(sink);
+ if (!source || !sink) {
+ LOG(MediaDevice, Error)
+ << "Source or sink is not a pad";
+ return false;
+ }
+
+ MediaLink *link = new MediaLink(&mediaLinks[i],
+ sourcePad, sinkPad);
+ if (!addObject(link)) {
+ delete link;
+ return false;
+ }
+
+ link->source()->addLink(link);
+ link->sink()->addLink(link);
+
+ break;
}
- source->addLink(link);
- sink->addLink(link);
+ case MEDIA_LNK_FL_ANCILLARY_LINK: {
+ MediaEntity *primary = dynamic_cast<MediaEntity *>(source);
+ MediaEntity *ancillary = dynamic_cast<MediaEntity *>(sink);
+ if (!primary || !ancillary) {
+ LOG(MediaDevice, Error)
+ << "Source or sink is not an entity";
+ return false;
+ }
+
+ primary->addAncillaryEntity(ancillary);
+
+ break;
+ }
+
+ default:
+ LOG(MediaDevice, Warning)
+ << "Unknown media link type";
+
+ break;
+ }
}
return true;
@@ -744,7 +771,7 @@ void MediaDevice::fixupEntityFlags(struct media_v2_entity *entity)
struct media_entity_desc desc = {};
desc.id = entity->id;
- int ret = ioctl(fd_, MEDIA_IOC_ENUM_ENTITIES, &desc);
+ int ret = ioctl(fd_.get(), MEDIA_IOC_ENUM_ENTITIES, &desc);
if (ret < 0) {
ret = -errno;
LOG(MediaDevice, Debug)
@@ -787,20 +814,16 @@ int MediaDevice::setupLink(const MediaLink *link, unsigned int flags)
linkDesc.flags = flags;
- int ret = ioctl(fd_, MEDIA_IOC_SETUP_LINK, &linkDesc);
+ int ret = ioctl(fd_.get(), MEDIA_IOC_SETUP_LINK, &linkDesc);
if (ret) {
ret = -errno;
LOG(MediaDevice, Error)
- << "Failed to setup link: "
+ << "Failed to setup link " << *link << ": "
<< strerror(-ret);
return ret;
}
- LOG(MediaDevice, Debug)
- << source->entity()->name() << "["
- << source->index() << "] -> "
- << sink->entity()->name() << "["
- << sink->index() << "]: " << flags;
+ LOG(MediaDevice, Debug) << *link << ": " << flags;
return 0;
}
diff --git a/src/libcamera/media_object.cpp b/src/libcamera/media_object.cpp
index ef32065c..3e3772a6 100644
--- a/src/libcamera/media_object.cpp
+++ b/src/libcamera/media_object.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * media_object.cpp - Media device objects: entities, pads and links
+ * Media device objects: entities, pads and links
*/
-#include "media_object.h"
+#include "libcamera/internal/media_object.h"
#include <errno.h>
#include <string>
@@ -15,8 +15,9 @@
#include <linux/media.h>
-#include "log.h"
-#include "media_device.h"
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/media_device.h"
/**
* \file media_object.h
@@ -67,6 +68,11 @@ LOG_DECLARE_CATEGORY(MediaDevice)
/**
* \fn MediaObject::device()
+ * \copydoc MediaObject::device() const
+ */
+
+/**
+ * \fn MediaObject::device() const
* \brief Retrieve the media device the media object belongs to
* \return The MediaDevice
*/
@@ -115,7 +121,8 @@ LOG_DECLARE_CATEGORY(MediaDevice)
*/
int MediaLink::setEnabled(bool enable)
{
- unsigned int flags = enable ? MEDIA_LNK_FL_ENABLED : 0;
+ unsigned int flags = (flags_ & ~MEDIA_LNK_FL_ENABLED)
+ | (enable ? MEDIA_LNK_FL_ENABLED : 0);
int ret = dev_->setupLink(this, flags);
if (ret)
@@ -140,6 +147,31 @@ MediaLink::MediaLink(const struct media_v2_link *link, MediaPad *source,
}
/**
+ * \brief Generate a string representation of the MediaLink
+ * \return A string representing the MediaLink
+ */
+std::string MediaLink::toString() const
+{
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \brief Insert a text representation of a Link into an output stream
+ * \param[in] out The output stream
+ * \param[in] link The MediaLink
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const MediaLink &link)
+{
+ out << *link.source() << " -> " << *link.sink();
+
+ return out;
+}
+
+/**
* \fn MediaLink::source()
* \brief Retrieve the link's source pad
* \return The source pad at the origin of the link
@@ -190,15 +222,6 @@ MediaPad::MediaPad(const struct media_v2_pad *pad, MediaEntity *entity)
{
}
-MediaPad::~MediaPad()
-{
- /*
- * Don't delete the links as we only borrow the reference owned by
- * MediaDevice.
- */
- links_.clear();
-}
-
/**
* \fn MediaPad::index()
* \brief Retrieve the pad index
@@ -238,6 +261,31 @@ void MediaPad::addLink(MediaLink *link)
}
/**
+ * \brief Generate a string representation of the MediaPad
+ * \return A string representing the MediaPad
+ */
+std::string MediaPad::toString() const
+{
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \brief Insert a text representation of a MediaPad into an output stream
+ * \param[in] out The output stream
+ * \param[in] pad The MediaPad
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const MediaPad &pad)
+{
+ out << "'" << pad.entity()->name() << "'[" << pad.index() << "]";
+
+ return out;
+}
+
+/**
* \class MediaEntity
* \brief The MediaEntity represents an entity in the media graph
*
@@ -250,6 +298,23 @@ void MediaPad::addLink(MediaLink *link)
*/
/**
+ * \enum MediaEntity::Type
+ * \brief The type of the interface exposed by the entity to userspace
+ *
+ * \var MediaEntity::Type::Invalid
+ * \brief Invalid or unsupported entity type
+ *
+ * \var MediaEntity::Type::MediaEntity
+ * \brief Plain media entity with no userspace interface
+ *
+ * \var MediaEntity::Type::V4L2VideoDevice
+ * \brief V4L2 video device with a V4L2 video device node
+ *
+ * \var MediaEntity::Type::V4L2Subdevice
+ * \brief V4L2 subdevice with a V4L2 subdev device node
+ */
+
+/**
* \fn MediaEntity::name()
* \brief Retrieve the entity name
* \return The entity name
@@ -276,6 +341,15 @@ void MediaPad::addLink(MediaLink *link)
*/
/**
+ * \fn MediaEntity::type()
+ * \brief Retrieve the entity's type
+ *
+ * The entity type identifies the type of interface exposed to userspace.
+ *
+ * \return The entity's type
+ */
+
+/**
* \fn MediaEntity::deviceNode()
* \brief Retrieve the entity's device node path, if any
* \return The entity's device node path, or an empty string if it is not set
@@ -358,25 +432,32 @@ int MediaEntity::setDeviceNode(const std::string &deviceNode)
* \brief Construct a MediaEntity
* \param[in] dev The media device this entity belongs to
* \param[in] entity The media entity kernel data
- * \param[in] major The major number of the entity associated interface
- * \param[in] minor The minor number of the entity associated interface
+ * \param[in] iface The entity interface data (may be null)
*/
MediaEntity::MediaEntity(MediaDevice *dev,
const struct media_v2_entity *entity,
- unsigned int major, unsigned int minor)
+ const struct media_v2_interface *iface)
: MediaObject(dev, entity->id), name_(entity->name),
function_(entity->function), flags_(entity->flags),
- major_(major), minor_(minor)
+ type_(Type::MediaEntity), major_(0), minor_(0)
{
-}
+ if (!iface)
+ return;
+
+ switch (iface->intf_type) {
+ case MEDIA_INTF_T_V4L_VIDEO:
+ type_ = Type::V4L2VideoDevice;
+ break;
+ case MEDIA_INTF_T_V4L_SUBDEV:
+ type_ = Type::V4L2Subdevice;
+ break;
+ default:
+ type_ = Type::Invalid;
+ return;
+ }
-MediaEntity::~MediaEntity()
-{
- /*
- * Don't delete the pads as we only borrow the reference owned by
- * MediaDevice.
- */
- pads_.clear();
+ major_ = iface->devnode.major;
+ minor_ = iface->devnode.minor;
}
/**
@@ -392,4 +473,19 @@ void MediaEntity::addPad(MediaPad *pad)
pads_.push_back(pad);
}
+/**
+ * \brief Add a MediaEntity to the list of ancillary entities
+ * \param[in] ancillaryEntity The instance of MediaEntity to add
+ */
+void MediaEntity::addAncillaryEntity(MediaEntity *ancillaryEntity)
+{
+ ancillaryEntities_.push_back(ancillaryEntity);
+}
+
+/**
+ * \fn MediaEntity::ancillaryEntities()
+ * \brief Retrieve all ancillary entities of the entity
+ * \return The list of the entity's ancillary entities
+ */
+
} /* namespace libcamera */
diff --git a/src/libcamera/meson.build b/src/libcamera/meson.build
index 87fa09cd..57fde8a8 100644
--- a/src/libcamera/meson.build
+++ b/src/libcamera/meson.build
@@ -1,129 +1,238 @@
-libcamera_sources = files([
- 'bound_method.cpp',
- 'buffer.cpp',
- 'byte_stream_buffer.cpp',
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_public_sources = files([
'camera.cpp',
- 'camera_controls.cpp',
'camera_manager.cpp',
- 'camera_sensor.cpp',
+ 'color_space.cpp',
'controls.cpp',
+ 'fence.cpp',
+ 'framebuffer.cpp',
+ 'framebuffer_allocator.cpp',
+ 'geometry.cpp',
+ 'orientation.cpp',
+ 'pixel_format.cpp',
+ 'request.cpp',
+ 'stream.cpp',
+ 'transform.cpp',
+])
+
+libcamera_internal_sources = files([
+ 'bayer_format.cpp',
+ 'byte_stream_buffer.cpp',
+ 'camera_controls.cpp',
+ 'camera_lens.cpp',
'control_serializer.cpp',
'control_validator.cpp',
+ 'converter.cpp',
+ 'debug_controls.cpp',
+ 'delayed_controls.cpp',
'device_enumerator.cpp',
'device_enumerator_sysfs.cpp',
- 'event_dispatcher.cpp',
- 'event_dispatcher_poll.cpp',
- 'event_notifier.cpp',
- 'file_descriptor.cpp',
+ 'dma_buf_allocator.cpp',
'formats.cpp',
- 'framebuffer_allocator.cpp',
- 'geometry.cpp',
- 'ipa_context_wrapper.cpp',
'ipa_controls.cpp',
+ 'ipa_data_serializer.cpp',
'ipa_interface.cpp',
'ipa_manager.cpp',
'ipa_module.cpp',
'ipa_proxy.cpp',
+ 'ipc_pipe.cpp',
+ 'ipc_pipe_unixsocket.cpp',
'ipc_unixsocket.cpp',
- 'log.cpp',
+ 'mapped_framebuffer.cpp',
+ 'matrix.cpp',
'media_device.cpp',
'media_object.cpp',
- 'message.cpp',
- 'object.cpp',
'pipeline_handler.cpp',
- 'pixelformats.cpp',
'process.cpp',
- 'request.cpp',
- 'semaphore.cpp',
- 'signal.cpp',
- 'stream.cpp',
- 'thread.cpp',
- 'timer.cpp',
- 'utils.cpp',
- 'v4l2_controls.cpp',
+ 'pub_key.cpp',
+ 'shared_mem_object.cpp',
+ 'source_paths.cpp',
+ 'sysfs.cpp',
'v4l2_device.cpp',
+ 'v4l2_pixelformat.cpp',
'v4l2_subdevice.cpp',
'v4l2_videodevice.cpp',
+ 'yaml_parser.cpp',
])
-subdir('include')
-
-libcamera_internal_includes = include_directories('include')
-
includes = [
libcamera_includes,
- libcamera_internal_includes,
]
+libcamera_deps = []
+
+libatomic = cc.find_library('atomic', required : false)
+libthreads = dependency('threads')
+
+subdir('base')
+subdir('converter')
+subdir('ipa')
subdir('pipeline')
subdir('proxy')
+subdir('sensor')
+subdir('software_isp')
-libatomic = cc.find_library('atomic', required : false)
-libdl = cc.find_library('dl')
-libudev = dependency('libudev', required : false)
+null_dep = dependency('', required : false)
+
+# TODO: Use dependency('dl') when updating to meson 0.62.0 or newer.
+libdl = null_dep
+if not cc.has_function('dlopen')
+ libdl = cc.find_library('dl')
+endif
+libudev = dependency('libudev', required : get_option('udev'))
+libyaml = dependency('yaml-0.1', required : false)
+
+# Use one of gnutls or libcrypto (provided by OpenSSL), trying gnutls first.
+libcrypto = dependency('gnutls', required : false)
+if libcrypto.found()
+ config_h.set('HAVE_GNUTLS', 1)
+else
+ libcrypto = dependency('libcrypto', required : false)
+ if libcrypto.found()
+ config_h.set('HAVE_CRYPTO', 1)
+ endif
+endif
+
+if not libcrypto.found()
+ warning('Neither gnutls nor libcrypto found, all IPA modules will be isolated')
+ summary({'IPA modules signed with': 'None (modules will run isolated)'},
+ section : 'Configuration')
+else
+ summary({'IPA modules signed with' : libcrypto.name()}, section : 'Configuration')
+endif
+
+if liblttng.found()
+ tracing_enabled = true
+ config_h.set('HAVE_TRACING', 1)
+ libcamera_internal_sources += files(['tracepoints.cpp'])
+else
+ tracing_enabled = false
+endif
if libudev.found()
config_h.set('HAVE_LIBUDEV', 1)
- libcamera_sources += files([
+ libcamera_internal_sources += files([
'device_enumerator_udev.cpp',
])
endif
-gen_controls = files('gen-controls.py')
+# Fallback to a subproject if libyaml isn't found, as it's not packaged in AOSP.
+if not libyaml.found()
+ cmake = import('cmake')
+
+ libyaml_vars = cmake.subproject_options()
+ libyaml_vars.add_cmake_defines({'CMAKE_POSITION_INDEPENDENT_CODE': 'ON'})
+ libyaml_vars.append_compile_args('c', '-Wno-unused-value')
+ libyaml_wrap = cmake.subproject('libyaml', options : libyaml_vars)
+ libyaml = libyaml_wrap.dependency('yaml')
+endif
control_sources = []
-foreach source : control_source_files
- input_files = files(source +'.yaml', source + '.cpp.in')
- control_sources += custom_target(source + '_cpp',
+controls_mode_files = {
+ 'controls': [
+ controls_files,
+ 'control_ids.cpp',
+ ],
+ 'properties': [
+ properties_files,
+ 'property_ids.cpp',
+ ],
+}
+
+foreach mode, inout_files : controls_mode_files
+ input_files = inout_files[0]
+ output_file = inout_files[1]
+
+ template_file = files('control_ids.cpp.in')
+ ranges_file = files('control_ranges.yaml')
+
+ control_sources += custom_target(mode + '_ids_cpp',
input : input_files,
- output : source + '.cpp',
- depend_files : gen_controls,
- command : [gen_controls, '-o', '@OUTPUT@', '@INPUT@'])
+ output : output_file,
+ command : [gen_controls, '-o', '@OUTPUT@',
+ '--mode', mode, '-t', template_file,
+ '-r', ranges_file, '@INPUT@'],
+ env : py_build_env)
endforeach
-libcamera_sources += control_headers
-libcamera_sources += control_sources
+libcamera_public_sources += control_sources
-gen_version = join_paths(meson.source_root(), 'utils', 'gen-version.sh')
+gen_version = meson.project_source_root() / 'utils' / 'gen-version.sh'
-version_cpp = vcs_tag(command : [gen_version, meson.build_root()],
+# Use vcs_tag() and not configure_file() or run_command(), to ensure that the
+# version gets updated with every ninja build and not just at meson setup time.
+version_cpp = vcs_tag(command : [gen_version, meson.project_build_root(), meson.project_source_root()],
input : 'version.cpp.in',
output : 'version.cpp',
fallback : meson.project_version())
-libcamera_sources += version_cpp
+libcamera_public_sources += version_cpp
-libcamera_deps = [
+if ipa_sign_module
+ ipa_pub_key_cpp = custom_target('ipa_pub_key_cpp',
+ input : [ipa_priv_key, 'ipa_pub_key.cpp.in'],
+ output : 'ipa_pub_key.cpp',
+ command : [gen_ipa_pub_key, '@INPUT@', '@OUTPUT@'])
+
+ libcamera_internal_sources += ipa_pub_key_cpp
+endif
+
+libcamera_deps += [
libatomic,
+ libcamera_base,
+ libcamera_base_private,
+ libcrypto,
libdl,
+ liblttng,
libudev,
- dependency('threads'),
+ libyaml,
]
-libcamera_link_with = []
-
-if get_option('android')
- libcamera_sources += android_hal_sources
- includes += android_includes
- libcamera_link_with += android_camera_metadata
-endif
-
# We add '/' to the build_rpath as a 'safe' path to act as a boolean flag.
# The build_rpath is stripped at install time by meson, so we determine at
# runtime if the library is running from an installed location by checking
# for the presence or abscence of the dynamic tag.
-libcamera = shared_library('camera',
- libcamera_sources,
+libcamera = shared_library('libcamera',
+ [
+ libcamera_public_headers,
+ libcamera_public_sources,
+ libcamera_ipa_headers,
+ libcamera_internal_headers,
+ libcamera_internal_sources,
+ ],
+ version : libcamera_version,
+ soversion : libcamera_soversion,
+ name_prefix : '',
install : true,
- link_with : libcamera_link_with,
include_directories : includes,
build_rpath : '/',
dependencies : libcamera_deps)
-libcamera_dep = declare_dependency(sources : [libcamera_api, libcamera_ipa_api],
- include_directories : libcamera_includes,
- link_with : libcamera)
+libcamera_public = declare_dependency(sources : [
+ libcamera_public_headers,
+ ],
+ include_directories : libcamera_includes,
+ dependencies : libcamera_base,
+ link_with : libcamera)
+
+# Internal dependency for components and plugins which can use private APIs
+libcamera_private = declare_dependency(sources : [
+ libcamera_ipa_headers,
+ ],
+ dependencies : [
+ libcamera_public,
+ libcamera_base_private,
+ ])
+
+pkg_mod = import('pkgconfig')
+pkg_mod.generate(libcamera,
+ libraries : libcamera_base_lib,
+ description : 'Complex Camera Support Library',
+ subdirs : 'libcamera')
+
+meson.override_dependency('libcamera', libcamera_public)
subdir('proxy/worker')
diff --git a/src/libcamera/object.cpp b/src/libcamera/object.cpp
deleted file mode 100644
index 99c3bf9a..00000000
--- a/src/libcamera/object.cpp
+++ /dev/null
@@ -1,245 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * object.cpp - Base object
- */
-
-#include <libcamera/object.h>
-
-#include <algorithm>
-
-#include <libcamera/signal.h>
-
-#include "log.h"
-#include "message.h"
-#include "semaphore.h"
-#include "thread.h"
-#include "utils.h"
-
-/**
- * \file object.h
- * \brief Base object to support automatic signal disconnection
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(Object)
-
-/**
- * \class Object
- * \brief Base object to support automatic signal disconnection
- *
- * The Object class simplifies signal/slot handling for classes implementing
- * slots. By inheriting from Object, an object is automatically disconnected
- * from all connected signals when it gets destroyed.
- *
- * Object instances are bound to the thread of their parent, or the thread in
- * which they're created when they have no parent. When a message is posted to
- * an object, its handler will run in the object's thread. This allows
- * implementing easy message passing between threads by inheriting from the
- * Object class.
- *
- * Deleting an object from a thread other than the one the object is bound to is
- * unsafe, unless the caller ensures that the object isn't processing any
- * message concurrently.
- *
- * Object slots connected to signals will also run in the context of the
- * object's thread, regardless of whether the signal is emitted in the same or
- * in another thread.
- *
- * \sa Message, Signal, Thread
- */
-
-/**
- * \brief Construct an Object instance
- * \param[in] parent The object parent
- *
- * The new Object instance is bound to the thread of its \a parent, or to the
- * current thread if the \a parent is nullptr.
- */
-Object::Object(Object *parent)
- : parent_(parent), pendingMessages_(0)
-{
- thread_ = parent ? parent->thread() : Thread::current();
-
- if (parent)
- parent->children_.push_back(this);
-}
-
-/**
- * \brief Destroy an Object instance
- *
- * Deleting an Object automatically disconnects all signals from the Object's
- * slots. All the Object's children are made orphan, but stay bound to their
- * current thread.
- */
-Object::~Object()
-{
- /*
- * Move signals to a private list to avoid concurrent iteration and
- * deletion of items from Signal::disconnect().
- */
- std::list<SignalBase *> signals(std::move(signals_));
- for (SignalBase *signal : signals)
- signal->disconnect(this);
-
- if (pendingMessages_)
- thread()->removeMessages(this);
-
- if (parent_) {
- auto it = std::find(parent_->children_.begin(),
- parent_->children_.end(), this);
- ASSERT(it != parent_->children_.end());
- parent_->children_.erase(it);
- }
-
- for (auto child : children_)
- child->parent_ = nullptr;
-}
-
-/**
- * \brief Post a message to the object's thread
- * \param[in] msg The message
- *
- * This method posts the message \a msg to the message queue of the object's
- * thread, to be delivered to the object through the message() method in the
- * context of its thread. Message ownership is passed to the thread, and the
- * message will be deleted after being delivered.
- *
- * Messages are delivered through the thread's event loop. If the thread is not
- * running its event loop the message will not be delivered until the event
- * loop gets started.
- *
- * \context This function is \threadsafe.
- */
-void Object::postMessage(std::unique_ptr<Message> msg)
-{
- thread()->postMessage(std::move(msg), this);
-}
-
-/**
- * \brief Message handler for the object
- * \param[in] msg The message
- *
- * This virtual method receives messages for the object. It is called in the
- * context of the object's thread, and can be overridden to process custom
- * messages. The parent Object::message() method shall be called for any
- * message not handled by the override method.
- *
- * The message \a msg is valid only for the duration of the call, no reference
- * to it shall be kept after this method returns.
- */
-void Object::message(Message *msg)
-{
- switch (msg->type()) {
- case Message::InvokeMessage: {
- InvokeMessage *iMsg = static_cast<InvokeMessage *>(msg);
- Semaphore *semaphore = iMsg->semaphore();
- iMsg->invoke();
-
- if (semaphore)
- semaphore->release();
-
- break;
- }
-
- default:
- break;
- }
-}
-
-/**
- * \fn R Object::invokeMethod()
- * \brief Invoke a method asynchronously on an Object instance
- * \param[in] func The object method to invoke
- * \param[in] type Connection type for method invocation
- * \param[in] args The method arguments
- *
- * This method invokes the member method \a func with arguments \a args, based
- * on the connection \a type. Depending on the type, the method will be called
- * synchronously in the same thread or asynchronously in the object's thread.
- *
- * Arguments \a args passed by value or reference are copied, while pointers
- * are passed untouched. The caller shall ensure that any pointer argument
- * remains valid until the method is invoked.
- *
- * \context This function is \threadsafe.
- *
- * \return For connection types ConnectionTypeDirect and
- * ConnectionTypeBlocking, return the return value of the invoked method. For
- * connection type ConnectionTypeQueued, return a default-constructed R value.
- */
-
-/**
- * \fn Object::thread()
- * \brief Retrieve the thread the object is bound to
- * \context This function is \threadsafe.
- * \return The thread the object is bound to
- */
-
-/**
- * \brief Move the object and all its children to a different thread
- * \param[in] thread The target thread
- *
- * This method moves the object and all its children from the current thread to
- * the new \a thread.
- *
- * Before the object is moved, a Message::ThreadMoveMessage message is sent to
- * it. The message() method can be reimplement in derived classes to be notified
- * of the upcoming thread move and perform any required processing.
- *
- * Moving an object that has a parent is not allowed, and causes undefined
- * behaviour.
- *
- * \context This function is thread-bound.
- */
-void Object::moveToThread(Thread *thread)
-{
- ASSERT(Thread::current() == thread_);
-
- if (thread_ == thread)
- return;
-
- if (parent_) {
- LOG(Object, Error)
- << "Moving object to thread with a parent is not permitted";
- return;
- }
-
- notifyThreadMove();
-
- thread->moveObject(this);
-}
-
-void Object::notifyThreadMove()
-{
- Message msg(Message::ThreadMoveMessage);
- message(&msg);
-
- for (auto child : children_)
- child->notifyThreadMove();
-}
-
-/**
- * \fn Object::parent()
- * \brief Retrieve the object's parent
- * \return The object's parent
- */
-
-void Object::connect(SignalBase *signal)
-{
- signals_.push_back(signal);
-}
-
-void Object::disconnect(SignalBase *signal)
-{
- for (auto iter = signals_.begin(); iter != signals_.end(); ) {
- if (*iter == signal)
- iter = signals_.erase(iter);
- else
- iter++;
- }
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/orientation.cpp b/src/libcamera/orientation.cpp
new file mode 100644
index 00000000..7d7d21ae
--- /dev/null
+++ b/src/libcamera/orientation.cpp
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Ideas On Board Oy
+ *
+ * Image orientation
+ */
+
+#include <libcamera/orientation.h>
+
+#include <array>
+
+/**
+ * \file orientation.h
+ * \brief Image orientation definition
+ */
+
+namespace libcamera {
+
+/**
+ * \enum Orientation
+ * \brief The image orientation in a memory buffer
+ *
+ * The Orientation enumeration describes the orientation of the images
+ * produced by the camera pipeline as they get received by the application
+ * inside memory buffers.
+ *
+ * The image orientation expressed using the Orientation enumeration can be then
+ * inferred by applying to a naturally oriented image a multiple of a 90 degrees
+ * rotation in the clockwise direction from the origin and then by applying an
+ * optional horizontal mirroring.
+ *
+ * The enumeration numerical values follow the ones defined by the EXIF
+ * Specification version 2.32, Tag 274 "Orientation", while the names of the
+ * enumerated values report the rotation and mirroring operations performed.
+ *
+ * For example, Orientation::Rotate90Mirror describes the orientation obtained
+ * by rotating the image 90 degrees clockwise first and then applying a
+ * horizontal mirroring.
+ *
+ * \var CameraConfiguration::Rotate0
+ * \image html rotation/rotate0.svg
+ * \var CameraConfiguration::Rotate0Mirror
+ * \image html rotation/rotate0Mirror.svg
+ * \var CameraConfiguration::Rotate180
+ * \image html rotation/rotate180.svg
+ * \var CameraConfiguration::Rotate180Mirror
+ * \image html rotation/rotate180Mirror.svg
+ * \var CameraConfiguration::Rotate90Mirror
+ * \image html rotation/rotate90Mirror.svg
+ * \var CameraConfiguration::Rotate270
+ * \image html rotation/rotate270.svg
+ * \var CameraConfiguration::Rotate270Mirror
+ * \image html rotation/rotate270Mirror.svg
+ * \var CameraConfiguration::Rotate90
+ * \image html rotation/rotate90.svg
+ */
+
+/**
+ * \brief Return the orientation representing a rotation of the given angle
+ * clockwise
+ * \param[in] angle The angle of rotation in a clockwise sense. Negative values
+ * can be used to represent anticlockwise rotations
+ * \param[out] success Set to `true` if the angle is a multiple of 90 degrees,
+ * otherwise `false`
+ * \return The orientation corresponding to the rotation if \a success was set
+ * to `true`, otherwise the `Rotate0` orientation
+ */
+Orientation orientationFromRotation(int angle, bool *success)
+{
+ angle = angle % 360;
+ if (angle < 0)
+ angle += 360;
+
+ if (success != nullptr)
+ *success = true;
+
+ switch (angle) {
+ case 0:
+ return Orientation::Rotate0;
+ case 90:
+ return Orientation::Rotate90;
+ case 180:
+ return Orientation::Rotate180;
+ case 270:
+ return Orientation::Rotate270;
+ }
+
+ if (success != nullptr)
+ *success = false;
+
+ return Orientation::Rotate0;
+}
+
+/**
+ * \brief Prints human-friendly names for Orientation items
+ * \param[in] out The output stream
+ * \param[in] orientation The Orientation item
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const Orientation &orientation)
+{
+ constexpr std::array<const char *, 9> orientationNames = {
+ "", /* Orientation starts counting from 1. */
+ "Rotate0",
+ "Rotate0Mirror",
+ "Rotate180",
+ "Rotate180Mirror",
+ "Rotate90Mirror",
+ "Rotate270",
+ "Rotate270Mirror",
+ "Rotate90",
+ };
+
+ out << orientationNames[static_cast<unsigned int>(orientation)];
+ return out;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp
new file mode 100644
index 00000000..4e66b336
--- /dev/null
+++ b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp
@@ -0,0 +1,1116 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022 - Jacopo Mondi <jacopo@jmondi.org>
+ *
+ * Pipeline handler for ISI interface found on NXP i.MX8 SoC
+ */
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera_manager.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+#include "linux/media-bus-format.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(ISI)
+
+class PipelineHandlerISI;
+
+class ISICameraData : public Camera::Private
+{
+public:
+ ISICameraData(PipelineHandler *ph)
+ : Camera::Private(ph)
+ {
+ /*
+ * \todo Assume 2 channels only for now, as that's the number of
+ * available channels on i.MX8MP.
+ */
+ streams_.resize(2);
+ }
+
+ PipelineHandlerISI *pipe();
+
+ int init();
+
+ unsigned int pipeIndex(const Stream *stream)
+ {
+ return stream - &*streams_.begin();
+ }
+
+ unsigned int getRawMediaBusFormat(PixelFormat *pixelFormat) const;
+ unsigned int getYuvMediaBusFormat(const PixelFormat &pixelFormat) const;
+ unsigned int getMediaBusFormat(PixelFormat *pixelFormat) const;
+
+ std::unique_ptr<CameraSensor> sensor_;
+ std::unique_ptr<V4L2Subdevice> csis_;
+
+ std::vector<Stream> streams_;
+
+ std::vector<Stream *> enabledStreams_;
+
+ unsigned int xbarSink_;
+};
+
+class ISICameraConfiguration : public CameraConfiguration
+{
+public:
+ ISICameraConfiguration(ISICameraData *data)
+ : data_(data)
+ {
+ }
+
+ Status validate() override;
+
+ static const std::map<PixelFormat, unsigned int> formatsMap_;
+
+ V4L2SubdeviceFormat sensorFormat_;
+
+private:
+ CameraConfiguration::Status
+ validateRaw(std::set<Stream *> &availableStreams, const Size &maxResolution);
+ CameraConfiguration::Status
+ validateYuv(std::set<Stream *> &availableStreams, const Size &maxResolution);
+
+ const ISICameraData *data_;
+};
+
+class PipelineHandlerISI : public PipelineHandler
+{
+public:
+ PipelineHandlerISI(CameraManager *manager);
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+
+protected:
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+private:
+ static constexpr Size kPreviewSize = { 1920, 1080 };
+ static constexpr Size kMinISISize = { 1, 1 };
+
+ struct Pipe {
+ std::unique_ptr<V4L2Subdevice> isi;
+ std::unique_ptr<V4L2VideoDevice> capture;
+ };
+
+ ISICameraData *cameraData(Camera *camera)
+ {
+ return static_cast<ISICameraData *>(camera->_d());
+ }
+
+ Pipe *pipeFromStream(Camera *camera, const Stream *stream);
+
+ StreamConfiguration generateYUVConfiguration(Camera *camera,
+ const Size &size);
+ StreamConfiguration generateRawConfiguration(Camera *camera);
+
+ void bufferReady(FrameBuffer *buffer);
+
+ MediaDevice *isiDev_;
+
+ std::unique_ptr<V4L2Subdevice> crossbar_;
+ std::vector<Pipe> pipes_;
+};
+
+/* -----------------------------------------------------------------------------
+ * Camera Data
+ */
+
+PipelineHandlerISI *ISICameraData::pipe()
+{
+ return static_cast<PipelineHandlerISI *>(Camera::Private::pipe());
+}
+
+/* Open and initialize pipe components. */
+int ISICameraData::init()
+{
+ if (!sensor_)
+ return -ENODEV;
+
+ int ret = csis_->open();
+ if (ret)
+ return ret;
+
+ properties_ = sensor_->properties();
+
+ return 0;
+}
+
+/*
+ * Get a RAW Bayer media bus format compatible with the requested pixelFormat.
+ *
+ * If the requested pixelFormat cannot be produced by the sensor adjust it to
+ * the one corresponding to the media bus format with the largest bit-depth.
+ */
+unsigned int ISICameraData::getRawMediaBusFormat(PixelFormat *pixelFormat) const
+{
+ std::vector<unsigned int> mbusCodes = sensor_->mbusCodes();
+
+ static const std::map<PixelFormat, unsigned int> rawFormats = {
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+ { formats::SBGGR14, MEDIA_BUS_FMT_SBGGR14_1X14 },
+ { formats::SGBRG14, MEDIA_BUS_FMT_SGBRG14_1X14 },
+ { formats::SGRBG14, MEDIA_BUS_FMT_SGRBG14_1X14 },
+ { formats::SRGGB14, MEDIA_BUS_FMT_SRGGB14_1X14 },
+ };
+
+ /*
+ * Make sure the requested PixelFormat is supported in the above
+ * map and the sensor can produce the compatible mbus code.
+ */
+ auto it = rawFormats.find(*pixelFormat);
+ if (it != rawFormats.end() &&
+ std::count(mbusCodes.begin(), mbusCodes.end(), it->second))
+ return it->second;
+
+ if (it == rawFormats.end())
+ LOG(ISI, Warning) << pixelFormat
+ << " not supported in ISI formats map.";
+
+ /*
+ * The desired pixel format cannot be produced. Adjust it to the one
+ * corresponding to the raw media bus format with the largest bit-depth
+ * the sensor provides.
+ */
+ unsigned int sensorCode = 0;
+ unsigned int maxDepth = 0;
+ *pixelFormat = {};
+
+ for (unsigned int code : mbusCodes) {
+ /* Make sure the media bus format is RAW Bayer. */
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(code);
+ if (!bayerFormat.isValid())
+ continue;
+
+ /* Make sure the media format is supported. */
+ it = std::find_if(rawFormats.begin(), rawFormats.end(),
+ [code](auto &rawFormat) {
+ return rawFormat.second == code;
+ });
+
+ if (it == rawFormats.end()) {
+ LOG(ISI, Warning) << bayerFormat
+ << " not supported in ISI formats map.";
+ continue;
+ }
+
+ /* Pick the one with the largest bit depth. */
+ if (bayerFormat.bitDepth > maxDepth) {
+ maxDepth = bayerFormat.bitDepth;
+ *pixelFormat = it->first;
+ sensorCode = code;
+ }
+ }
+
+ if (!pixelFormat->isValid())
+ LOG(ISI, Error) << "Cannot find a supported RAW format";
+
+ return sensorCode;
+}
+
+/*
+ * Get a YUV/RGB media bus format from which the ISI can produce a processed
+ * stream, preferring codes with the same colour encoding as the requested
+ * pixelformat.
+ *
+ * If the sensor does not provide any YUV/RGB media bus format the ISI cannot
+ * generate any processed pixel format as it cannot debayer.
+ */
+unsigned int ISICameraData::getYuvMediaBusFormat(const PixelFormat &pixelFormat) const
+{
+ std::vector<unsigned int> mbusCodes = sensor_->mbusCodes();
+
+ /*
+ * The ISI can produce YUV/RGB pixel formats from any non-RAW Bayer
+ * media bus formats.
+ *
+ * Keep the list in sync with the mxc_isi_bus_formats[] array in
+ * the ISI driver.
+ */
+ std::vector<unsigned int> yuvCodes = {
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_RGB565_1X16,
+ MEDIA_BUS_FMT_RGB888_1X24,
+ };
+
+ std::sort(mbusCodes.begin(), mbusCodes.end());
+ std::sort(yuvCodes.begin(), yuvCodes.end());
+
+ std::vector<unsigned int> supportedCodes;
+ std::set_intersection(mbusCodes.begin(), mbusCodes.end(),
+ yuvCodes.begin(), yuvCodes.end(),
+ std::back_inserter(supportedCodes));
+
+ if (supportedCodes.empty()) {
+ LOG(ISI, Warning) << "Cannot find a supported YUV/RGB format";
+
+ return 0;
+ }
+
+ /* Prefer codes with the same encoding as the requested pixel format. */
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixelFormat);
+ for (unsigned int code : supportedCodes) {
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingYUV &&
+ (code == MEDIA_BUS_FMT_UYVY8_1X16 ||
+ code == MEDIA_BUS_FMT_YUV8_1X24))
+ return code;
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRGB &&
+ (code == MEDIA_BUS_FMT_RGB565_1X16 ||
+ code == MEDIA_BUS_FMT_RGB888_1X24))
+ return code;
+ }
+
+ /* Otherwise return the first found code. */
+ return supportedCodes[0];
+}
+
+unsigned int ISICameraData::getMediaBusFormat(PixelFormat *pixelFormat) const
+{
+ if (PixelFormatInfo::info(*pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW)
+ return getRawMediaBusFormat(pixelFormat);
+
+ return getYuvMediaBusFormat(*pixelFormat);
+}
+
+/* -----------------------------------------------------------------------------
+ * Camera Configuration
+ */
+
+/*
+ * ISICameraConfiguration::formatsMap_ records the association between an output
+ * pixel format and the ISI source pixel format to be applied to the pipeline.
+ */
+const std::map<PixelFormat, unsigned int> ISICameraConfiguration::formatsMap_ = {
+ { formats::YUYV, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::AVUY8888, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::NV12, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::NV16, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::YUV444, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::RGB565, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::BGR888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::RGB888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::XRGB8888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::ABGR8888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+/*
+ * Adjust stream configuration when the first requested stream is RAW: all the
+ * streams will have the same RAW pixelformat and size.
+ */
+CameraConfiguration::Status
+ISICameraConfiguration::validateRaw(std::set<Stream *> &availableStreams,
+ const Size &maxResolution)
+{
+ CameraConfiguration::Status status = Valid;
+
+ /*
+ * Make sure the requested RAW format is supported by the
+ * pipeline, otherwise adjust it.
+ */
+ std::vector<unsigned int> mbusCodes = data_->sensor_->mbusCodes();
+ StreamConfiguration &rawConfig = config_[0];
+ PixelFormat rawFormat = rawConfig.pixelFormat;
+
+ unsigned int sensorCode = data_->getRawMediaBusFormat(&rawFormat);
+ if (!sensorCode) {
+ LOG(ISI, Error) << "Cannot adjust RAW pixelformat "
+ << rawConfig.pixelFormat;
+ return Invalid;
+ }
+
+ if (rawFormat != rawConfig.pixelFormat) {
+ LOG(ISI, Debug) << "RAW pixelformat adjusted to "
+ << rawFormat;
+ rawConfig.pixelFormat = rawFormat;
+ status = Adjusted;
+ }
+
+ /* Cap the RAW stream size to the maximum resolution. */
+ const Size configSize = rawConfig.size;
+ rawConfig.size.boundTo(maxResolution);
+ if (rawConfig.size != configSize) {
+ LOG(ISI, Debug) << "RAW size adjusted to "
+ << rawConfig.size;
+ status = Adjusted;
+ }
+
+ /* Adjust all other streams to RAW. */
+ for (const auto &[i, cfg] : utils::enumerate(config_)) {
+
+ LOG(ISI, Debug) << "Stream " << i << ": " << cfg.toString();
+ const PixelFormat pixFmt = cfg.pixelFormat;
+ const Size size = cfg.size;
+
+ cfg.pixelFormat = rawConfig.pixelFormat;
+ cfg.size = rawConfig.size;
+
+ if (cfg.pixelFormat != pixFmt || cfg.size != size) {
+ LOG(ISI, Debug) << "Stream " << i << " adjusted to "
+ << cfg.toString();
+ status = Adjusted;
+ }
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+ cfg.stride = info.stride(cfg.size.width, 0);
+ cfg.frameSize = info.frameSize(cfg.size, info.bitsPerPixel);
+
+ /* Assign streams in the order they are presented. */
+ auto stream = availableStreams.extract(availableStreams.begin());
+ cfg.setStream(stream.value());
+ }
+
+ return status;
+}
+
+/*
+ * Adjust stream configuration when the first requested stream is not RAW: all
+ * the streams will be either YUV or RGB processed formats.
+ */
+CameraConfiguration::Status
+ISICameraConfiguration::validateYuv(std::set<Stream *> &availableStreams,
+ const Size &maxResolution)
+{
+ CameraConfiguration::Status status = Valid;
+
+ StreamConfiguration &yuvConfig = config_[0];
+ PixelFormat yuvPixelFormat = yuvConfig.pixelFormat;
+
+ /*
+ * Make sure the sensor can produce a compatible YUV/RGB media bus
+ * format. If the sensor can only produce RAW Bayer we can only fail
+ * here as we can't adjust to anything but RAW.
+ */
+ unsigned int yuvMediaBusCode = data_->getYuvMediaBusFormat(yuvPixelFormat);
+ if (!yuvMediaBusCode) {
+ LOG(ISI, Error) << "Cannot adjust pixelformat "
+ << yuvConfig.pixelFormat;
+ return Invalid;
+ }
+
+ /* Adjust all the other streams. */
+ for (const auto &[i, cfg] : utils::enumerate(config_)) {
+
+ LOG(ISI, Debug) << "Stream " << i << ": " << cfg.toString();
+
+ /* If the stream is RAW or not supported default it to YUYV. */
+ const PixelFormatInfo &cfgInfo = PixelFormatInfo::info(cfg.pixelFormat);
+ if (cfgInfo.colourEncoding == PixelFormatInfo::ColourEncodingRAW ||
+ !formatsMap_.count(cfg.pixelFormat)) {
+
+ LOG(ISI, Debug) << "Stream " << i << " format: "
+ << cfg.pixelFormat << " adjusted to YUYV";
+
+ cfg.pixelFormat = formats::YUYV;
+ status = Adjusted;
+ }
+
+ /* Cap the streams size to the maximum accepted resolution. */
+ Size configSize = cfg.size;
+ cfg.size.boundTo(maxResolution);
+ if (cfg.size != configSize) {
+ LOG(ISI, Debug)
+ << "Stream " << i << " adjusted to " << cfg.size;
+ status = Adjusted;
+ }
+
+ /* Re-fetch the pixel format info in case it has been adjusted. */
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+
+ /* \todo Multiplane ? */
+ cfg.stride = info.stride(cfg.size.width, 0);
+ cfg.frameSize = info.frameSize(cfg.size, info.bitsPerPixel);
+
+ /* Assign streams in the order they are presented. */
+ auto stream = availableStreams.extract(availableStreams.begin());
+ cfg.setStream(stream.value());
+ }
+
+ return status;
+}
+
+CameraConfiguration::Status ISICameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ std::set<Stream *> availableStreams;
+ std::transform(data_->streams_.begin(), data_->streams_.end(),
+ std::inserter(availableStreams, availableStreams.end()),
+ [](const Stream &s) { return const_cast<Stream *>(&s); });
+
+ if (config_.empty())
+ return Invalid;
+
+ /* Cap the number of streams to the number of available ISI pipes. */
+ if (config_.size() > availableStreams.size()) {
+ config_.resize(availableStreams.size());
+ status = Adjusted;
+ }
+
+ /*
+ * If more than a single stream is requested, the maximum allowed input
+ * image width is 2048. Cap the maximum image size accordingly.
+ *
+ * \todo The (size > 1) check only applies to i.MX8MP which has 2 ISI
+ * channels. SoCs with more channels than the i.MX8MP are capable of
+ * supporting more streams with input width > 2048 by chaining
+ * successive channels together. Define a policy for channels allocation
+ * to fully support other SoCs.
+ */
+ CameraSensor *sensor = data_->sensor_.get();
+ Size maxResolution = sensor->resolution();
+ if (config_.size() > 1)
+ maxResolution.width = std::min(2048U, maxResolution.width);
+
+ /* Validate streams according to the format of the first one. */
+ const PixelFormatInfo info = PixelFormatInfo::info(config_[0].pixelFormat);
+
+ Status validationStatus;
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ validationStatus = validateRaw(availableStreams, maxResolution);
+ else
+ validationStatus = validateYuv(availableStreams, maxResolution);
+
+ if (validationStatus == Invalid)
+ return Invalid;
+
+ if (validationStatus == Adjusted)
+ status = Adjusted;
+
+ /*
+ * Sensor format selection policy: the first stream selects the media
+ * bus code to use, the largest stream selects the size.
+ *
+ * \todo The sensor format selection policy could be changed to
+ * prefer operating the sensor at full resolution to prioritize
+ * image quality in exchange of a usually slower frame rate.
+ * Usage of the STILL_CAPTURE role could be consider for this.
+ */
+ Size maxSize;
+ for (const auto &cfg : config_) {
+ if (cfg.size > maxSize)
+ maxSize = cfg.size;
+ }
+
+ PixelFormat pixelFormat = config_[0].pixelFormat;
+
+ V4L2SubdeviceFormat sensorFormat{};
+ sensorFormat.code = data_->getMediaBusFormat(&pixelFormat);
+ sensorFormat.size = maxSize;
+
+ LOG(ISI, Debug) << "Computed sensor configuration: " << sensorFormat;
+
+ /*
+ * We can't use CameraSensor::getFormat() as it might return a
+ * format larger than our strict width limit, as that function
+ * prioritizes formats with the same aspect ratio over formats with less
+ * difference in size.
+ *
+ * Manually walk all the sensor supported sizes searching for
+ * the smallest larger format without considering the aspect ratio
+ * as the ISI can freely scale.
+ */
+ auto sizes = sensor->sizes(sensorFormat.code);
+ Size bestSize;
+
+ for (const Size &s : sizes) {
+ /* Ignore smaller sizes. */
+ if (s.width < sensorFormat.size.width ||
+ s.height < sensorFormat.size.height)
+ continue;
+
+ /* Make sure the width stays in the limits. */
+ if (s.width > maxResolution.width)
+ continue;
+
+ bestSize = s;
+ break;
+ }
+
+ /*
+ * This should happen only if the sensor can only produce formats that
+ * exceed the maximum allowed input width.
+ */
+ if (bestSize.isNull()) {
+ LOG(ISI, Error) << "Unable to find a suitable sensor format";
+ return Invalid;
+ }
+
+ sensorFormat_.code = sensorFormat.code;
+ sensorFormat_.size = bestSize;
+
+ LOG(ISI, Debug) << "Selected sensor format: " << sensorFormat_;
+
+ return status;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Handler
+ */
+
+PipelineHandlerISI::PipelineHandlerISI(CameraManager *manager)
+ : PipelineHandler(manager)
+{
+}
+
+/*
+ * Generate a StreamConfiguration for YUV/RGB use case.
+ *
+ * Verify it the sensor can produce a YUV/RGB media bus format and collect
+ * all the processed pixel formats the ISI can generate as supported stream
+ * configurations.
+ */
+StreamConfiguration PipelineHandlerISI::generateYUVConfiguration(Camera *camera,
+ const Size &size)
+{
+ ISICameraData *data = cameraData(camera);
+ PixelFormat pixelFormat = formats::YUYV;
+ unsigned int mbusCode;
+
+ mbusCode = data->getYuvMediaBusFormat(pixelFormat);
+ if (!mbusCode)
+ return {};
+
+ /* Adjust the requested size to the sensor's capabilities. */
+ V4L2SubdeviceFormat sensorFmt;
+ sensorFmt.code = mbusCode;
+ sensorFmt.size = size;
+
+ int ret = data->sensor_->tryFormat(&sensorFmt);
+ if (ret) {
+ LOG(ISI, Error) << "Failed to try sensor format.";
+ return {};
+ }
+
+ Size sensorSize = sensorFmt.size;
+
+ /*
+ * Populate the StreamConfiguration.
+ *
+ * As the sensor supports at least one YUV/RGB media bus format all the
+ * processed ones in formatsMap_ can be generated from it.
+ */
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+
+ for (const auto &[pixFmt, pipeFmt] : ISICameraConfiguration::formatsMap_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ continue;
+
+ streamFormats[pixFmt] = { { kMinISISize, sensorSize } };
+ }
+
+ StreamFormats formats(streamFormats);
+
+ StreamConfiguration cfg(formats);
+ cfg.pixelFormat = pixelFormat;
+ cfg.size = sensorSize;
+ cfg.bufferCount = 4;
+
+ return cfg;
+}
+
+/*
+ * Generate a StreamConfiguration for Raw Bayer use case. Verify if the sensor
+ * can produce the requested RAW bayer format and eventually adjust it to
+ * the one with the largest bit-depth the sensor can produce.
+ */
+StreamConfiguration PipelineHandlerISI::generateRawConfiguration(Camera *camera)
+{
+ static const std::map<unsigned int, PixelFormat> rawFormats = {
+ { MEDIA_BUS_FMT_SBGGR8_1X8, formats::SBGGR8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, formats::SGBRG8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, formats::SGRBG8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, formats::SRGGB8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, formats::SBGGR10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, formats::SGBRG10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, formats::SGRBG10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, formats::SRGGB10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, formats::SBGGR12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, formats::SGBRG12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, formats::SGRBG12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, formats::SRGGB12 },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, formats::SBGGR14 },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, formats::SGBRG14 },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, formats::SGRBG14 },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, formats::SRGGB14 },
+ };
+
+ ISICameraData *data = cameraData(camera);
+ PixelFormat pixelFormat = formats::SBGGR10;
+ unsigned int mbusCode;
+
+ /* pixelFormat will be adjusted, if the sensor can produce RAW. */
+ mbusCode = data->getRawMediaBusFormat(&pixelFormat);
+ if (!mbusCode)
+ return {};
+
+ /*
+ * Populate the StreamConfiguration with all the supported Bayer
+ * formats the sensor can produce.
+ */
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+ const CameraSensor *sensor = data->sensor_.get();
+
+ for (unsigned int code : sensor->mbusCodes()) {
+ /* Find a Bayer media bus code from the sensor. */
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(code);
+ if (!bayerFormat.isValid())
+ continue;
+
+ auto it = rawFormats.find(code);
+ if (it == rawFormats.end()) {
+ LOG(ISI, Warning) << bayerFormat
+ << " not supported in ISI formats map.";
+ continue;
+ }
+
+ streamFormats[it->second] = { { sensor->resolution(), sensor->resolution() } };
+ }
+
+ StreamFormats formats(streamFormats);
+
+ StreamConfiguration cfg(formats);
+ cfg.size = sensor->resolution();
+ cfg.pixelFormat = pixelFormat;
+ cfg.bufferCount = 4;
+
+ return cfg;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerISI::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
+{
+ ISICameraData *data = cameraData(camera);
+ std::unique_ptr<ISICameraConfiguration> config =
+ std::make_unique<ISICameraConfiguration>(data);
+
+ if (roles.empty())
+ return config;
+
+ if (roles.size() > data->streams_.size()) {
+ LOG(ISI, Error) << "Only up to " << data->streams_.size()
+ << " streams are supported";
+ return nullptr;
+ }
+
+ for (const auto &role : roles) {
+ /*
+ * Prefer the following formats:
+ * - Still Capture: Full resolution YUYV
+ * - ViewFinder/VideoRecording: 1080p YUYV
+ * - RAW: Full resolution Bayer
+ */
+ StreamConfiguration cfg;
+
+ switch (role) {
+ case StreamRole::StillCapture:
+ case StreamRole::Viewfinder:
+ case StreamRole::VideoRecording: {
+ Size size = role == StreamRole::StillCapture
+ ? data->sensor_->resolution()
+ : PipelineHandlerISI::kPreviewSize;
+ cfg = generateYUVConfiguration(camera, size);
+ if (cfg.pixelFormat.isValid())
+ break;
+
+
+ /*
+ * Fallback to use a Bayer format if that's what the
+ * sensor supports.
+ */
+ [[fallthrough]];
+
+ }
+
+ case StreamRole::Raw: {
+ cfg = generateRawConfiguration(camera);
+ break;
+ }
+
+ default:
+ LOG(ISI, Error) << "Requested stream role not supported: " << role;
+ return nullptr;
+ }
+
+ if (!cfg.pixelFormat.isValid()) {
+ LOG(ISI, Error)
+ << "Cannot generate configuration for role: " << role;
+ return nullptr;
+ }
+
+ config->addConfiguration(cfg);
+ }
+
+ config->validate();
+
+ return config;
+}
+
+int PipelineHandlerISI::configure(Camera *camera, CameraConfiguration *c)
+{
+ ISICameraConfiguration *camConfig = static_cast<ISICameraConfiguration *>(c);
+ ISICameraData *data = cameraData(camera);
+
+ /* All links are immutable except the sensor -> csis link. */
+ const MediaPad *sensorSrc = data->sensor_->entity()->getPadByIndex(0);
+ sensorSrc->links()[0]->setEnabled(true);
+
+ /*
+ * Reset the crossbar switch routing and enable one route for each
+ * requested stream configuration.
+ *
+ * \todo Handle concurrent usage of multiple cameras by adjusting the
+ * routing table instead of resetting it.
+ */
+ V4L2Subdevice::Routing routing = {};
+ unsigned int xbarFirstSource = crossbar_->entity()->pads().size() / 2 + 1;
+
+ for (const auto &[idx, config] : utils::enumerate(*c)) {
+ uint32_t sourcePad = xbarFirstSource + idx;
+ routing.emplace_back(V4L2Subdevice::Stream{ data->xbarSink_, 0 },
+ V4L2Subdevice::Stream{ sourcePad, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+ }
+
+ int ret = crossbar_->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ /* Apply format to the sensor and CSIS receiver. */
+ V4L2SubdeviceFormat format = camConfig->sensorFormat_;
+ ret = data->sensor_->setFormat(&format);
+ if (ret)
+ return ret;
+
+ ret = data->csis_->setFormat(0, &format);
+ if (ret)
+ return ret;
+
+ ret = crossbar_->setFormat(data->xbarSink_, &format);
+ if (ret)
+ return ret;
+
+ /* Now configure the ISI and video node instances, one per stream. */
+ data->enabledStreams_.clear();
+ for (const auto &config : *c) {
+ Pipe *pipe = pipeFromStream(camera, config.stream());
+
+ /*
+ * Set the format on the ISI sink pad: it must match what is
+ * received by the CSIS.
+ */
+ ret = pipe->isi->setFormat(0, &format);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the ISI sink compose rectangle to downscale the
+ * image.
+ *
+ * \todo Additional cropping could be applied on the ISI source
+ * pad to further reduce the output image size.
+ */
+ Rectangle isiScale(config.size);
+ ret = pipe->isi->setSelection(0, V4L2_SEL_TGT_COMPOSE, &isiScale);
+ if (ret)
+ return ret;
+
+ /*
+ * Set the format on ISI source pad: only the media bus code
+ * is relevant as it configures format conversion, while the
+ * size is taken from the sink's COMPOSE (or source's CROP,
+ * if any) rectangles.
+ */
+ unsigned int isiCode = ISICameraConfiguration::formatsMap_.at(config.pixelFormat);
+
+ V4L2SubdeviceFormat isiFormat{};
+ isiFormat.code = isiCode;
+ isiFormat.size = config.size;
+
+ ret = pipe->isi->setFormat(1, &isiFormat);
+ if (ret)
+ return ret;
+
+ V4L2DeviceFormat captureFmt{};
+ captureFmt.fourcc = pipe->capture->toV4L2PixelFormat(config.pixelFormat);
+ captureFmt.size = config.size;
+
+ /* \todo Set stride and format. */
+ ret = pipe->capture->setFormat(&captureFmt);
+ if (ret)
+ return ret;
+
+ /* Store the list of enabled streams for later use. */
+ data->enabledStreams_.push_back(config.stream());
+ }
+
+ return 0;
+}
+
+int PipelineHandlerISI::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ unsigned int count = stream->configuration().bufferCount;
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ return pipe->capture->exportBuffers(count, buffers);
+}
+
+int PipelineHandlerISI::start(Camera *camera,
+ [[maybe_unused]] const ControlList *controls)
+{
+ ISICameraData *data = cameraData(camera);
+
+ for (const auto &stream : data->enabledStreams_) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+ const StreamConfiguration &config = stream->configuration();
+
+ int ret = pipe->capture->importBuffers(config.bufferCount);
+ if (ret)
+ return ret;
+
+ ret = pipe->capture->streamOn();
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void PipelineHandlerISI::stopDevice(Camera *camera)
+{
+ ISICameraData *data = cameraData(camera);
+
+ for (const auto &stream : data->enabledStreams_) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ pipe->capture->streamOff();
+ pipe->capture->releaseBuffers();
+ }
+}
+
+int PipelineHandlerISI::queueRequestDevice(Camera *camera, Request *request)
+{
+ for (auto &[stream, buffer] : request->buffers()) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ int ret = pipe->capture->queueBuffer(buffer);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+bool PipelineHandlerISI::match(DeviceEnumerator *enumerator)
+{
+ DeviceMatch dm("mxc-isi");
+ dm.add("crossbar");
+ dm.add("mxc_isi.0");
+ dm.add("mxc_isi.0.capture");
+
+ isiDev_ = acquireMediaDevice(enumerator, dm);
+ if (!isiDev_)
+ return false;
+
+ /*
+ * Acquire the subdevs and video nodes for the crossbar switch and the
+ * processing pipelines.
+ */
+ crossbar_ = V4L2Subdevice::fromEntityName(isiDev_, "crossbar");
+ if (!crossbar_)
+ return false;
+
+ int ret = crossbar_->open();
+ if (ret)
+ return false;
+
+ for (unsigned int i = 0; ; ++i) {
+ std::string entityName = "mxc_isi." + std::to_string(i);
+ std::unique_ptr<V4L2Subdevice> isi =
+ V4L2Subdevice::fromEntityName(isiDev_, entityName);
+ if (!isi)
+ break;
+
+ ret = isi->open();
+ if (ret)
+ return false;
+
+ entityName += ".capture";
+ std::unique_ptr<V4L2VideoDevice> capture =
+ V4L2VideoDevice::fromEntityName(isiDev_, entityName);
+ if (!capture)
+ return false;
+
+ capture->bufferReady.connect(this, &PipelineHandlerISI::bufferReady);
+
+ ret = capture->open();
+ if (ret)
+ return ret;
+
+ pipes_.push_back({ std::move(isi), std::move(capture) });
+ }
+
+ if (pipes_.empty()) {
+ LOG(ISI, Error) << "Unable to enumerate pipes";
+ return false;
+ }
+
+ /*
+ * Loop over all the crossbar switch sink pads to find connected CSI-2
+ * receivers and camera sensors.
+ */
+ unsigned int numCameras = 0;
+ unsigned int numSinks = 0;
+ for (MediaPad *pad : crossbar_->entity()->pads()) {
+ unsigned int sink = numSinks;
+
+ if (!(pad->flags() & MEDIA_PAD_FL_SINK) || pad->links().empty())
+ continue;
+
+ /*
+ * Count each crossbar sink pad to correctly configure
+ * routing and format for this camera.
+ */
+ numSinks++;
+
+ MediaEntity *csi = pad->links()[0]->source()->entity();
+ if (csi->pads().size() != 2) {
+ LOG(ISI, Debug) << "Skip unsupported CSI-2 receiver "
+ << csi->name();
+ continue;
+ }
+
+ pad = csi->pads()[0];
+ if (!(pad->flags() & MEDIA_PAD_FL_SINK) || pad->links().empty())
+ continue;
+
+ MediaEntity *sensor = pad->links()[0]->source()->entity();
+ if (sensor->function() != MEDIA_ENT_F_CAM_SENSOR) {
+ LOG(ISI, Debug) << "Skip unsupported subdevice "
+ << sensor->name();
+ continue;
+ }
+
+ /* Create the camera data. */
+ std::unique_ptr<ISICameraData> data =
+ std::make_unique<ISICameraData>(this);
+
+ data->sensor_ = CameraSensorFactoryBase::create(sensor);
+ data->csis_ = std::make_unique<V4L2Subdevice>(csi);
+ data->xbarSink_ = sink;
+
+ ret = data->init();
+ if (ret) {
+ LOG(ISI, Error) << "Failed to initialize camera data";
+ return false;
+ }
+
+ /* Register the camera. */
+ const std::string &id = data->sensor_->id();
+ std::set<Stream *> streams;
+ std::transform(data->streams_.begin(), data->streams_.end(),
+ std::inserter(streams, streams.end()),
+ [](Stream &s) { return &s; });
+
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(data), id, streams);
+
+ registerCamera(std::move(camera));
+ numCameras++;
+ }
+
+ return numCameras > 0;
+}
+
+PipelineHandlerISI::Pipe *PipelineHandlerISI::pipeFromStream(Camera *camera,
+ const Stream *stream)
+{
+ ISICameraData *data = cameraData(camera);
+ unsigned int pipeIndex = data->pipeIndex(stream);
+
+ ASSERT(pipeIndex < pipes_.size());
+
+ return &pipes_[pipeIndex];
+}
+
+void PipelineHandlerISI::bufferReady(FrameBuffer *buffer)
+{
+ Request *request = buffer->request();
+
+ /* Record the sensor's timestamp in the request metadata. */
+ ControlList &metadata = request->metadata();
+ if (!metadata.contains(controls::SensorTimestamp.id()))
+ metadata.set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
+
+ completeBuffer(request, buffer);
+ if (request->hasPendingBuffers())
+ return;
+
+ completeRequest(request);
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerISI, "imx8-isi")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/imx8-isi/meson.build b/src/libcamera/pipeline/imx8-isi/meson.build
new file mode 100644
index 00000000..b369b031
--- /dev/null
+++ b/src/libcamera/pipeline/imx8-isi/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'imx8-isi.cpp'
+])
diff --git a/src/libcamera/pipeline/ipu3/cio2.cpp b/src/libcamera/pipeline/ipu3/cio2.cpp
new file mode 100644
index 00000000..aa544d7b
--- /dev/null
+++ b/src/libcamera/pipeline/ipu3/cio2.cpp
@@ -0,0 +1,431 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Intel IPU3 CIO2
+ */
+
+#include "cio2.h"
+
+#include <cmath>
+#include <limits>
+
+#include <linux/media-bus-format.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+#include <libcamera/transform.h>
+
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(IPU3)
+
+namespace {
+
+const std::map<uint32_t, PixelFormat> mbusCodesToPixelFormat = {
+ { MEDIA_BUS_FMT_SBGGR10_1X10, formats::SBGGR10_IPU3 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, formats::SGBRG10_IPU3 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, formats::SGRBG10_IPU3 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, formats::SRGGB10_IPU3 },
+};
+
+} /* namespace */
+
+CIO2Device::CIO2Device()
+{
+}
+
+/**
+ * \brief Retrieve the list of supported PixelFormats
+ *
+ * Retrieve the list of supported pixel formats by matching the sensor produced
+ * media bus codes with the formats supported by the CIO2 unit.
+ *
+ * \return The list of supported PixelFormat
+ */
+std::vector<PixelFormat> CIO2Device::formats() const
+{
+ if (!sensor_)
+ return {};
+
+ std::vector<PixelFormat> formats;
+ for (unsigned int code : sensor_->mbusCodes()) {
+ auto it = mbusCodesToPixelFormat.find(code);
+ if (it != mbusCodesToPixelFormat.end())
+ formats.push_back(it->second);
+ }
+
+ return formats;
+}
+
+/**
+ * \brief Retrieve the list of supported size ranges
+ * \param[in] format The pixel format
+ *
+ * Retrieve the list of supported sizes for a particular \a format by matching
+ * the sensor produced media bus codes formats supported by the CIO2 unit.
+ *
+ * \return A list of supported sizes for the \a format or an empty list
+ * otherwise
+ */
+std::vector<SizeRange> CIO2Device::sizes(const PixelFormat &format) const
+{
+ int mbusCode = -1;
+
+ if (!sensor_)
+ return {};
+
+ std::vector<SizeRange> sizes;
+ for (const auto &iter : mbusCodesToPixelFormat) {
+ if (iter.second != format)
+ continue;
+
+ mbusCode = iter.first;
+ break;
+ }
+
+ if (mbusCode == -1)
+ return {};
+
+ for (const Size &sz : sensor_->sizes(mbusCode))
+ sizes.emplace_back(sz);
+
+ return sizes;
+}
+
+/**
+ * \brief Initialize components of the CIO2 device with \a index
+ * \param[in] media The CIO2 media device
+ * \param[in] index The CIO2 device index
+ *
+ * Create and open the video device and subdevices in the CIO2 instance at \a
+ * index, if a supported image sensor is connected to the CSI-2 receiver of
+ * this CIO2 instance. Enable the media links connecting the CIO2 components
+ * to prepare for capture operations and cached the sensor maximum size.
+ *
+ * \return 0 on success or a negative error code otherwise
+ * \retval -ENODEV No supported image sensor is connected to this CIO2 instance
+ */
+int CIO2Device::init(const MediaDevice *media, unsigned int index)
+{
+ int ret;
+
+ /*
+ * Verify that a sensor subdevice is connected to this CIO2 instance
+ * and enable the media link between the two.
+ */
+ std::string csi2Name = "ipu3-csi2 " + std::to_string(index);
+ MediaEntity *csi2Entity = media->getEntityByName(csi2Name);
+ const std::vector<MediaPad *> &pads = csi2Entity->pads();
+ if (pads.empty())
+ return -ENODEV;
+
+ /* IPU3 CSI-2 receivers have a single sink pad at index 0. */
+ MediaPad *sink = pads[0];
+ const std::vector<MediaLink *> &links = sink->links();
+ if (links.empty())
+ return -ENODEV;
+
+ MediaLink *link = links[0];
+ MediaEntity *sensorEntity = link->source()->entity();
+ sensor_ = CameraSensorFactoryBase::create(sensorEntity);
+ if (!sensor_)
+ return -ENODEV;
+
+ ret = link->setEnabled(true);
+ if (ret)
+ return ret;
+
+ /*
+ * Make sure the sensor produces at least one format compatible with
+ * the CIO2 requirements.
+ *
+ * utils::set_overlap requires the ranges to be sorted, keep the
+ * cio2Codes vector sorted in ascending order.
+ */
+ std::vector<unsigned int> cio2Codes = utils::map_keys(mbusCodesToPixelFormat);
+ const std::vector<unsigned int> &sensorCodes = sensor_->mbusCodes();
+ if (!utils::set_overlap(sensorCodes.begin(), sensorCodes.end(),
+ cio2Codes.begin(), cio2Codes.end())) {
+ LOG(IPU3, Error)
+ << "Sensor " << sensor_->entity()->name()
+ << " has not format compatible with the IPU3";
+ return -EINVAL;
+ }
+
+ /*
+ * \todo Define when to open and close video device nodes, as they
+ * might impact on power consumption.
+ */
+
+ csi2_ = std::make_unique<V4L2Subdevice>(csi2Entity);
+ ret = csi2_->open();
+ if (ret)
+ return ret;
+
+ std::string cio2Name = "ipu3-cio2 " + std::to_string(index);
+ output_ = V4L2VideoDevice::fromEntityName(media, cio2Name);
+ return output_->open();
+}
+
+/**
+ * \brief Configure the CIO2 unit
+ * \param[in] size The requested CIO2 output frame size
+ * \param[in] transform The transformation to be applied on the image sensor
+ * \param[out] outputFormat The CIO2 unit output image format
+ * \return 0 on success or a negative error code otherwise
+ */
+int CIO2Device::configure(const Size &size, const Transform &transform,
+ V4L2DeviceFormat *outputFormat)
+{
+ V4L2SubdeviceFormat sensorFormat;
+ int ret;
+
+ /*
+ * Apply the selected format to the sensor, the CSI-2 receiver and
+ * the CIO2 output device.
+ */
+ std::vector<unsigned int> mbusCodes = utils::map_keys(mbusCodesToPixelFormat);
+ sensorFormat = getSensorFormat(mbusCodes, size);
+ ret = sensor_->setFormat(&sensorFormat, transform);
+ if (ret)
+ return ret;
+
+ ret = csi2_->setFormat(0, &sensorFormat);
+ if (ret)
+ return ret;
+
+ const auto &itInfo = mbusCodesToPixelFormat.find(sensorFormat.code);
+ if (itInfo == mbusCodesToPixelFormat.end())
+ return -EINVAL;
+
+ outputFormat->fourcc = output_->toV4L2PixelFormat(itInfo->second);
+ outputFormat->size = sensorFormat.size;
+ outputFormat->planesCount = 1;
+
+ ret = output_->setFormat(outputFormat);
+ if (ret)
+ return ret;
+
+ LOG(IPU3, Debug) << "CIO2 output format " << *outputFormat;
+
+ return 0;
+}
+
+StreamConfiguration CIO2Device::generateConfiguration(Size size) const
+{
+ StreamConfiguration cfg;
+
+ /* If no desired size use the sensor resolution. */
+ if (size.isNull())
+ size = sensor_->resolution();
+
+ /* Query the sensor static information for closest match. */
+ std::vector<unsigned int> mbusCodes = utils::map_keys(mbusCodesToPixelFormat);
+ V4L2SubdeviceFormat sensorFormat = getSensorFormat(mbusCodes, size);
+ if (!sensorFormat.code) {
+ LOG(IPU3, Error) << "Sensor does not support mbus code";
+ return {};
+ }
+
+ cfg.size = sensorFormat.size;
+ cfg.pixelFormat = mbusCodesToPixelFormat.at(sensorFormat.code);
+ cfg.bufferCount = kBufferCount;
+
+ return cfg;
+}
+
+/**
+ * \brief Retrieve the best sensor format for a desired output
+ * \param[in] mbusCodes The list of acceptable media bus codes
+ * \param[in] size The desired size
+ *
+ * Media bus codes are selected from \a mbusCodes, which lists all acceptable
+ * codes in decreasing order of preference. Media bus codes supported by the
+ * sensor but not listed in \a mbusCodes are ignored. If none of the desired
+ * codes is supported, it returns an error.
+ *
+ * \a size indicates the desired size at the output of the sensor. This method
+ * selects the best media bus code and size supported by the sensor according
+ * to the following criteria.
+ *
+ * - The desired \a size shall fit in the sensor output size to avoid the need
+ * to up-scale.
+ * - The aspect ratio of sensor output size shall be as close as possible to
+ * the sensor's native resolution field of view.
+ * - The sensor output size shall be as small as possible to lower the required
+ * bandwidth.
+ * - The desired \a size shall be supported by one of the media bus code listed
+ * in \a mbusCodes.
+ *
+ * When multiple media bus codes can produce the same size, the code at the
+ * lowest position in \a mbusCodes is selected.
+ *
+ * The returned sensor output format is guaranteed to be acceptable by the
+ * setFormat() method without any modification.
+ *
+ * \return The best sensor output format matching the desired media bus codes
+ * and size on success, or an empty format otherwise.
+ */
+V4L2SubdeviceFormat CIO2Device::getSensorFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size) const
+{
+ unsigned int desiredArea = size.width * size.height;
+ unsigned int bestArea = std::numeric_limits<unsigned int>::max();
+ const Size &resolution = sensor_->resolution();
+ float desiredRatio = static_cast<float>(resolution.width) /
+ resolution.height;
+ float bestRatio = std::numeric_limits<float>::max();
+ Size bestSize;
+ uint32_t bestCode = 0;
+
+ for (unsigned int code : mbusCodes) {
+ const auto sizes = sensor_->sizes(code);
+ if (!sizes.size())
+ continue;
+
+ for (const Size &sz : sizes) {
+ if (sz.width < size.width || sz.height < size.height)
+ continue;
+
+ float ratio = static_cast<float>(sz.width) / sz.height;
+ /*
+ * Ratios can differ by small mantissa difference which
+ * can affect the selection of the sensor output size
+ * wildly. We are interested in selection of the closest
+ * size with respect to the desired output size, hence
+ * comparing it with a single precision digit is enough.
+ */
+ ratio = static_cast<unsigned int>(ratio * 10) / 10.0;
+ float ratioDiff = std::abs(ratio - desiredRatio);
+ unsigned int area = sz.width * sz.height;
+ unsigned int areaDiff = area - desiredArea;
+
+ if (ratioDiff > bestRatio)
+ continue;
+
+ if (ratioDiff < bestRatio || areaDiff < bestArea) {
+ bestRatio = ratioDiff;
+ bestArea = areaDiff;
+ bestSize = sz;
+ bestCode = code;
+ }
+ }
+ }
+
+ if (bestSize.isNull()) {
+ LOG(IPU3, Debug) << "No supported format or size found";
+ return {};
+ }
+
+ V4L2SubdeviceFormat format{};
+ format.code = bestCode;
+ format.size = bestSize;
+
+ return format;
+}
+
+int CIO2Device::exportBuffers(unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ return output_->exportBuffers(count, buffers);
+}
+
+int CIO2Device::start()
+{
+ int ret = output_->exportBuffers(kBufferCount, &buffers_);
+ if (ret < 0)
+ return ret;
+
+ ret = output_->importBuffers(kBufferCount);
+ if (ret)
+ LOG(IPU3, Error) << "Failed to import CIO2 buffers";
+
+ for (std::unique_ptr<FrameBuffer> &buffer : buffers_)
+ availableBuffers_.push(buffer.get());
+
+ ret = output_->streamOn();
+ if (ret) {
+ freeBuffers();
+ return ret;
+ }
+
+ ret = csi2_->setFrameStartEnabled(true);
+ if (ret) {
+ stop();
+ return ret;
+ }
+
+ return 0;
+}
+
+int CIO2Device::stop()
+{
+ int ret;
+
+ csi2_->setFrameStartEnabled(false);
+
+ ret = output_->streamOff();
+
+ freeBuffers();
+
+ return ret;
+}
+
+FrameBuffer *CIO2Device::queueBuffer(Request *request, FrameBuffer *rawBuffer)
+{
+ FrameBuffer *buffer = rawBuffer;
+
+ /* If no buffer is provided in the request, use an internal one. */
+ if (!buffer) {
+ if (availableBuffers_.empty()) {
+ LOG(IPU3, Debug) << "CIO2 buffer underrun";
+ return nullptr;
+ }
+
+ buffer = availableBuffers_.front();
+ availableBuffers_.pop();
+ buffer->_d()->setRequest(request);
+ }
+
+ int ret = output_->queueBuffer(buffer);
+ if (ret)
+ return nullptr;
+
+ return buffer;
+}
+
+void CIO2Device::tryReturnBuffer(FrameBuffer *buffer)
+{
+ /*
+ * \todo Once more pipelines deal with buffers that may be allocated
+ * internally or externally this pattern might become a common need. At
+ * that point this check should be moved to something clever in
+ * FrameBuffer.
+ */
+ for (const std::unique_ptr<FrameBuffer> &buf : buffers_) {
+ if (buf.get() == buffer) {
+ availableBuffers_.push(buffer);
+ break;
+ }
+ }
+
+ bufferAvailable.emit();
+}
+
+void CIO2Device::freeBuffers()
+{
+ availableBuffers_ = {};
+ buffers_.clear();
+
+ if (output_->releaseBuffers())
+ LOG(IPU3, Error) << "Failed to release CIO2 buffers";
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/ipu3/cio2.h b/src/libcamera/pipeline/ipu3/cio2.h
new file mode 100644
index 00000000..963c2f6b
--- /dev/null
+++ b/src/libcamera/pipeline/ipu3/cio2.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Intel IPU3 CIO2
+ */
+
+#pragma once
+
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include <libcamera/base/signal.h>
+
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace libcamera {
+
+class CameraSensor;
+class FrameBuffer;
+class MediaDevice;
+class PixelFormat;
+class Request;
+class Size;
+class SizeRange;
+struct StreamConfiguration;
+enum class Transform;
+
+class CIO2Device
+{
+public:
+ static constexpr unsigned int kBufferCount = 4;
+
+ CIO2Device();
+
+ std::vector<PixelFormat> formats() const;
+ std::vector<SizeRange> sizes(const PixelFormat &format) const;
+
+ int init(const MediaDevice *media, unsigned int index);
+ int configure(const Size &size, const Transform &transform,
+ V4L2DeviceFormat *outputFormat);
+
+ StreamConfiguration generateConfiguration(Size size) const;
+
+ int exportBuffers(unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers);
+
+ V4L2SubdeviceFormat getSensorFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size) const;
+
+ int start();
+ int stop();
+
+ CameraSensor *sensor() { return sensor_.get(); }
+ const CameraSensor *sensor() const { return sensor_.get(); }
+
+ FrameBuffer *queueBuffer(Request *request, FrameBuffer *rawBuffer);
+ void tryReturnBuffer(FrameBuffer *buffer);
+ Signal<FrameBuffer *> &bufferReady() { return output_->bufferReady; }
+ Signal<uint32_t> &frameStart() { return csi2_->frameStart; }
+
+ Signal<> bufferAvailable;
+
+private:
+ void freeBuffers();
+
+ void cio2BufferReady(FrameBuffer *buffer);
+
+ std::unique_ptr<CameraSensor> sensor_;
+ std::unique_ptr<V4L2Subdevice> csi2_;
+ std::unique_ptr<V4L2VideoDevice> output_;
+
+ std::vector<std::unique_ptr<FrameBuffer>> buffers_;
+ std::queue<FrameBuffer *> availableBuffers_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/ipu3/frames.cpp b/src/libcamera/pipeline/ipu3/frames.cpp
new file mode 100644
index 00000000..bc0526a7
--- /dev/null
+++ b/src/libcamera/pipeline/ipu3/frames.cpp
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Intel IPU3 Frames helper
+ */
+
+#include "frames.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/request.h>
+
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/pipeline_handler.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(IPU3)
+
+IPU3Frames::IPU3Frames()
+{
+}
+
+void IPU3Frames::init(const std::vector<std::unique_ptr<FrameBuffer>> &paramBuffers,
+ const std::vector<std::unique_ptr<FrameBuffer>> &statBuffers)
+{
+ for (const std::unique_ptr<FrameBuffer> &buffer : paramBuffers)
+ availableParamBuffers_.push(buffer.get());
+
+ for (const std::unique_ptr<FrameBuffer> &buffer : statBuffers)
+ availableStatBuffers_.push(buffer.get());
+
+ frameInfo_.clear();
+}
+
+void IPU3Frames::clear()
+{
+ availableParamBuffers_ = {};
+ availableStatBuffers_ = {};
+}
+
+IPU3Frames::Info *IPU3Frames::create(Request *request)
+{
+ unsigned int id = request->sequence();
+
+ if (availableParamBuffers_.empty()) {
+ LOG(IPU3, Debug) << "Parameters buffer underrun";
+ return nullptr;
+ }
+
+ if (availableStatBuffers_.empty()) {
+ LOG(IPU3, Debug) << "Statistics buffer underrun";
+ return nullptr;
+ }
+
+ FrameBuffer *paramBuffer = availableParamBuffers_.front();
+ FrameBuffer *statBuffer = availableStatBuffers_.front();
+
+ paramBuffer->_d()->setRequest(request);
+ statBuffer->_d()->setRequest(request);
+
+ availableParamBuffers_.pop();
+ availableStatBuffers_.pop();
+
+ /* \todo Remove the dynamic allocation of Info */
+ std::unique_ptr<Info> info = std::make_unique<Info>();
+
+ info->id = id;
+ info->request = request;
+ info->rawBuffer = nullptr;
+ info->paramBuffer = paramBuffer;
+ info->statBuffer = statBuffer;
+ info->paramDequeued = false;
+ info->metadataProcessed = false;
+
+ frameInfo_[id] = std::move(info);
+
+ return frameInfo_[id].get();
+}
+
+void IPU3Frames::remove(IPU3Frames::Info *info)
+{
+ /* Return params and stat buffer for reuse. */
+ availableParamBuffers_.push(info->paramBuffer);
+ availableStatBuffers_.push(info->statBuffer);
+
+ /* Delete the extended frame information. */
+ frameInfo_.erase(info->id);
+}
+
+bool IPU3Frames::tryComplete(IPU3Frames::Info *info)
+{
+ Request *request = info->request;
+
+ if (request->hasPendingBuffers())
+ return false;
+
+ if (!info->metadataProcessed)
+ return false;
+
+ if (!info->paramDequeued)
+ return false;
+
+ remove(info);
+
+ bufferAvailable.emit();
+
+ return true;
+}
+
+IPU3Frames::Info *IPU3Frames::find(unsigned int id)
+{
+ const auto &itInfo = frameInfo_.find(id);
+
+ if (itInfo != frameInfo_.end())
+ return itInfo->second.get();
+
+ LOG(IPU3, Fatal) << "Can't find tracking information for frame " << id;
+
+ return nullptr;
+}
+
+IPU3Frames::Info *IPU3Frames::find(FrameBuffer *buffer)
+{
+ for (auto const &itInfo : frameInfo_) {
+ Info *info = itInfo.second.get();
+
+ for (auto const itBuffers : info->request->buffers())
+ if (itBuffers.second == buffer)
+ return info;
+
+ if (info->rawBuffer == buffer || info->paramBuffer == buffer ||
+ info->statBuffer == buffer)
+ return info;
+ }
+
+ LOG(IPU3, Fatal) << "Can't find tracking information from buffer";
+
+ return nullptr;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/ipu3/frames.h b/src/libcamera/pipeline/ipu3/frames.h
new file mode 100644
index 00000000..a347b66f
--- /dev/null
+++ b/src/libcamera/pipeline/ipu3/frames.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Intel IPU3 Frames helper
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include <libcamera/base/signal.h>
+
+#include <libcamera/controls.h>
+
+namespace libcamera {
+
+class FrameBuffer;
+class IPAProxy;
+class PipelineHandler;
+class Request;
+class V4L2VideoDevice;
+struct IPABuffer;
+
+class IPU3Frames
+{
+public:
+ struct Info {
+ unsigned int id;
+ Request *request;
+
+ FrameBuffer *rawBuffer;
+ FrameBuffer *paramBuffer;
+ FrameBuffer *statBuffer;
+
+ ControlList effectiveSensorControls;
+
+ bool paramDequeued;
+ bool metadataProcessed;
+ };
+
+ IPU3Frames();
+
+ void init(const std::vector<std::unique_ptr<FrameBuffer>> &paramBuffers,
+ const std::vector<std::unique_ptr<FrameBuffer>> &statBuffers);
+ void clear();
+
+ Info *create(Request *request);
+ void remove(Info *info);
+ bool tryComplete(Info *info);
+
+ Info *find(unsigned int id);
+ Info *find(FrameBuffer *buffer);
+
+ Signal<> bufferAvailable;
+
+private:
+ std::queue<FrameBuffer *> availableParamBuffers_;
+ std::queue<FrameBuffer *> availableStatBuffers_;
+
+ std::map<unsigned int, std::unique_ptr<Info>> frameInfo_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp b/src/libcamera/pipeline/ipu3/imgu.cpp
new file mode 100644
index 00000000..7be78091
--- /dev/null
+++ b/src/libcamera/pipeline/ipu3/imgu.cpp
@@ -0,0 +1,767 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Intel IPU3 ImgU
+ */
+
+#include "imgu.h"
+
+#include <algorithm>
+#include <cmath>
+#include <limits>
+
+#include <linux/media-bus-format.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/media_device.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(IPU3)
+
+namespace {
+
+/*
+ * The procedure to calculate the ImgU pipe configuration has been ported
+ * from the pipe_config.py python script, available at:
+ * https://github.com/intel/intel-ipu3-pipecfg
+ * at revision: 243d13446e44 ("Fix some bug for some resolutions")
+ */
+
+/* BSD scaling factors: min=1, max=2.5, step=1/32 */
+const std::vector<float> bdsScalingFactors = {
+ 1, 1.03125, 1.0625, 1.09375, 1.125, 1.15625, 1.1875, 1.21875, 1.25,
+ 1.28125, 1.3125, 1.34375, 1.375, 1.40625, 1.4375, 1.46875, 1.5, 1.53125,
+ 1.5625, 1.59375, 1.625, 1.65625, 1.6875, 1.71875, 1.75, 1.78125, 1.8125,
+ 1.84375, 1.875, 1.90625, 1.9375, 1.96875, 2, 2.03125, 2.0625, 2.09375,
+ 2.125, 2.15625, 2.1875, 2.21875, 2.25, 2.28125, 2.3125, 2.34375, 2.375,
+ 2.40625, 2.4375, 2.46875, 2.5
+};
+
+/* GDC scaling factors: min=1, max=16, step=1/4 */
+const std::vector<float> gdcScalingFactors = {
+ 1, 1.25, 1.5, 1.75, 2, 2.25, 2.5, 2.75, 3, 3.25, 3.5, 3.75, 4, 4.25,
+ 4.5, 4.75, 5, 5.25, 5.5, 5.75, 6, 6.25, 6.5, 6.75, 7, 7.25, 7.5, 7.75,
+ 8, 8.25, 8.5, 8.75, 9, 9.25, 9.5, 9.75, 10, 10.25, 10.5, 10.75, 11,
+ 11.25, 11.5, 11.75, 12, 12.25, 12.5, 12.75, 13, 13.25, 13.5, 13.75, 14,
+ 14.25, 14.5, 14.75, 15, 15.25, 15.5, 15.75, 16,
+};
+
+std::vector<ImgUDevice::PipeConfig> pipeConfigs;
+
+struct FOV {
+ float w;
+ float h;
+
+ bool isLarger(const FOV &other)
+ {
+ if (w > other.w)
+ return true;
+ if (w == other.w && h > other.h)
+ return true;
+ return false;
+ }
+};
+
+/* Approximate a scaling factor sf to the closest one available in a range. */
+float findScaleFactor(float sf, const std::vector<float> &range,
+ bool roundDown = false)
+{
+ if (sf <= range[0])
+ return range[0];
+ if (sf >= range[range.size() - 1])
+ return range[range.size() - 1];
+
+ float bestDiff = std::numeric_limits<float>::max();
+ unsigned int index = 0;
+ for (unsigned int i = 0; i < range.size(); ++i) {
+ float diff = utils::abs_diff(sf, range[i]);
+ if (diff < bestDiff) {
+ bestDiff = diff;
+ index = i;
+ }
+ }
+
+ if (roundDown && index > 0 && sf < range[index])
+ index--;
+
+ return range[index];
+}
+
+bool isSameRatio(const Size &in, const Size &out)
+{
+ float inRatio = static_cast<float>(in.width) / in.height;
+ float outRatio = static_cast<float>(out.width) / out.height;
+
+ if (utils::abs_diff(inRatio, outRatio) > 0.1)
+ return false;
+
+ return true;
+}
+
+void calculateBDSHeight(ImgUDevice::Pipe *pipe, const Size &iif, const Size &gdc,
+ unsigned int bdsWidth, float bdsSF)
+{
+ unsigned int minIFHeight = iif.height - ImgUDevice::kIFMaxCropHeight;
+ unsigned int minBDSHeight = gdc.height + ImgUDevice::kFilterHeight * 2;
+ unsigned int ifHeight;
+ float bdsHeight;
+
+ if (!isSameRatio(pipe->input, gdc)) {
+ unsigned int foundIfHeight = 0;
+ float estIFHeight = (iif.width * gdc.height) /
+ static_cast<float>(gdc.width);
+ estIFHeight = std::clamp<float>(estIFHeight, minIFHeight, iif.height);
+
+ ifHeight = utils::alignUp(estIFHeight, ImgUDevice::kIFAlignHeight);
+ while (ifHeight >= minIFHeight && ifHeight <= iif.height &&
+ ifHeight / bdsSF >= minBDSHeight) {
+
+ float height = ifHeight / bdsSF;
+ if (std::fmod(height, 1.0) == 0) {
+ unsigned int bdsIntHeight = static_cast<unsigned int>(height);
+
+ if (!(bdsIntHeight % ImgUDevice::kBDSAlignHeight)) {
+ foundIfHeight = ifHeight;
+ bdsHeight = height;
+ break;
+ }
+ }
+
+ ifHeight -= ImgUDevice::kIFAlignHeight;
+ }
+
+ ifHeight = utils::alignUp(estIFHeight, ImgUDevice::kIFAlignHeight);
+ while (ifHeight >= minIFHeight && ifHeight <= iif.height &&
+ ifHeight / bdsSF >= minBDSHeight) {
+
+ float height = ifHeight / bdsSF;
+ if (std::fmod(height, 1.0) == 0) {
+ unsigned int bdsIntHeight = static_cast<unsigned int>(height);
+
+ if (!(bdsIntHeight % ImgUDevice::kBDSAlignHeight)) {
+ foundIfHeight = ifHeight;
+ bdsHeight = height;
+ break;
+ }
+ }
+
+ ifHeight += ImgUDevice::kIFAlignHeight;
+ }
+
+ if (foundIfHeight) {
+ unsigned int bdsIntHeight = static_cast<unsigned int>(bdsHeight);
+
+ pipeConfigs.push_back({ bdsSF, { iif.width, foundIfHeight },
+ { bdsWidth, bdsIntHeight }, gdc });
+ return;
+ }
+ } else {
+ ifHeight = utils::alignUp(iif.height, ImgUDevice::kIFAlignHeight);
+ while (ifHeight >= minIFHeight && ifHeight / bdsSF >= minBDSHeight) {
+
+ bdsHeight = ifHeight / bdsSF;
+ if (std::fmod(ifHeight, 1.0) == 0 && std::fmod(bdsHeight, 1.0) == 0) {
+ unsigned int bdsIntHeight = static_cast<unsigned int>(bdsHeight);
+
+ if (!(ifHeight % ImgUDevice::kIFAlignHeight) &&
+ !(bdsIntHeight % ImgUDevice::kBDSAlignHeight)) {
+ pipeConfigs.push_back({ bdsSF, { iif.width, ifHeight },
+ { bdsWidth, bdsIntHeight }, gdc });
+ }
+ }
+
+ ifHeight -= ImgUDevice::kIFAlignHeight;
+ }
+ }
+}
+
+void calculateBDS(ImgUDevice::Pipe *pipe, const Size &iif, const Size &gdc, float bdsSF)
+{
+ unsigned int minBDSWidth = gdc.width + ImgUDevice::kFilterWidth * 2;
+ unsigned int minBDSHeight = gdc.height + ImgUDevice::kFilterHeight * 2;
+
+ float sf = bdsSF;
+ while (sf <= ImgUDevice::kBDSSfMax && sf >= ImgUDevice::kBDSSfMin) {
+ float bdsWidth = static_cast<float>(iif.width) / sf;
+ float bdsHeight = static_cast<float>(iif.height) / sf;
+
+ if (std::fmod(bdsWidth, 1.0) == 0 &&
+ std::fmod(bdsHeight, 1.0) == 0) {
+ unsigned int bdsIntWidth = static_cast<unsigned int>(bdsWidth);
+ unsigned int bdsIntHeight = static_cast<unsigned int>(bdsHeight);
+ if (!(bdsIntWidth % ImgUDevice::kBDSAlignWidth) && bdsWidth >= minBDSWidth &&
+ !(bdsIntHeight % ImgUDevice::kBDSAlignHeight) && bdsHeight >= minBDSHeight)
+ calculateBDSHeight(pipe, iif, gdc, bdsIntWidth, sf);
+ }
+
+ sf += ImgUDevice::kBDSSfStep;
+ }
+
+ sf = bdsSF;
+ while (sf <= ImgUDevice::kBDSSfMax && sf >= ImgUDevice::kBDSSfMin) {
+ float bdsWidth = static_cast<float>(iif.width) / sf;
+ float bdsHeight = static_cast<float>(iif.height) / sf;
+
+ if (std::fmod(bdsWidth, 1.0) == 0 &&
+ std::fmod(bdsHeight, 1.0) == 0) {
+ unsigned int bdsIntWidth = static_cast<unsigned int>(bdsWidth);
+ unsigned int bdsIntHeight = static_cast<unsigned int>(bdsHeight);
+ if (!(bdsIntWidth % ImgUDevice::kBDSAlignWidth) && bdsWidth >= minBDSWidth &&
+ !(bdsIntHeight % ImgUDevice::kBDSAlignHeight) && bdsHeight >= minBDSHeight)
+ calculateBDSHeight(pipe, iif, gdc, bdsIntWidth, sf);
+ }
+
+ sf -= ImgUDevice::kBDSSfStep;
+ }
+}
+
+Size calculateGDC(ImgUDevice::Pipe *pipe)
+{
+ const Size &in = pipe->input;
+ const Size &main = pipe->main;
+ const Size &vf = pipe->viewfinder;
+ Size gdc;
+
+ if (!vf.isNull()) {
+ gdc.width = main.width;
+
+ float ratio = (main.width * vf.height) / static_cast<float>(vf.width);
+ gdc.height = std::max(static_cast<float>(main.height), ratio);
+
+ return gdc;
+ }
+
+ if (!isSameRatio(in, main)) {
+ gdc = main;
+ return gdc;
+ }
+
+ float totalSF = static_cast<float>(in.width) / main.width;
+ float bdsSF = totalSF > 2 ? 2 : 1;
+ float yuvSF = totalSF / bdsSF;
+ float sf = findScaleFactor(yuvSF, gdcScalingFactors);
+
+ gdc.width = main.width * sf;
+ gdc.height = main.height * sf;
+
+ return gdc;
+}
+
+FOV calcFOV(const Size &in, const ImgUDevice::PipeConfig &pipe)
+{
+ FOV fov{};
+
+ float inW = static_cast<float>(in.width);
+ float inH = static_cast<float>(in.height);
+ float ifCropW = static_cast<float>(in.width - pipe.iif.width);
+ float ifCropH = static_cast<float>(in.height - pipe.iif.height);
+ float gdcCropW = static_cast<float>(pipe.bds.width - pipe.gdc.width) * pipe.bds_sf;
+ float gdcCropH = static_cast<float>(pipe.bds.height - pipe.gdc.height) * pipe.bds_sf;
+
+ fov.w = (inW - (ifCropW + gdcCropW)) / inW;
+ fov.h = (inH - (ifCropH + gdcCropH)) / inH;
+
+ return fov;
+}
+
+} /* namespace */
+
+/**
+ * \struct PipeConfig
+ * \brief The ImgU pipe configuration parameters
+ *
+ * The ImgU image pipeline is composed of several hardware blocks that crop
+ * and scale the input image to obtain the desired output sizes. The
+ * scaling/cropping operations of those components is configured though the
+ * V4L2 selection API and the V4L2 subdev API applied to the ImgU media entity.
+ *
+ * The configurable components in the pipeline are:
+ * - IF: image feeder
+ * - BDS: bayer downscaler
+ * - GDC: geometric distorsion correction
+ *
+ * The IF crop rectangle is controlled by the V4L2_SEL_TGT_CROP selection target
+ * applied to the ImgU media entity sink pad number 0. The BDS scaler is
+ * controlled by the V4L2_SEL_TGT_COMPOSE target on the same pad, while the GDC
+ * output size is configured with the VIDIOC_SUBDEV_S_FMT IOCTL, again on pad
+ * number 0.
+ *
+ * The PipeConfig structure collects the sizes of each of those components
+ * plus the BDS scaling factor used to calculate the field of view
+ * of the final images.
+ */
+
+/**
+ * \struct Pipe
+ * \brief Describe the ImgU requested configuration
+ *
+ * The ImgU unit processes images through several components, which have
+ * to be properly configured inspecting the input image size and the desired
+ * output sizes. This structure collects the ImgU input configuration and the
+ * requested main output and viewfinder configurations.
+ *
+ * \var Pipe::input
+ * \brief The input image size
+ *
+ * \var Pipe::main
+ * \brief The requested main output size
+ *
+ * \var Pipe::viewfinder
+ * \brief The requested viewfinder output size
+ */
+
+/**
+ * \brief Initialize components of the ImgU instance
+ * \param[in] mediaDevice The ImgU instance media device
+ * \param[in] index The ImgU instance index
+ *
+ * Create and open the V4L2 devices and subdevices of the ImgU instance
+ * with \a index.
+ *
+ * In case of errors the created V4L2VideoDevice and V4L2Subdevice instances
+ * are destroyed at pipeline handler delete time.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int ImgUDevice::init(MediaDevice *media, unsigned int index)
+{
+ int ret;
+
+ name_ = "ipu3-imgu " + std::to_string(index);
+ media_ = media;
+
+ /*
+ * The media entities presence in the media device has been verified
+ * by the match() function: no need to check for newly created
+ * video devices and subdevice validity here.
+ */
+ imgu_ = V4L2Subdevice::fromEntityName(media, name_);
+ ret = imgu_->open();
+ if (ret)
+ return ret;
+
+ input_ = V4L2VideoDevice::fromEntityName(media, name_ + " input");
+ ret = input_->open();
+ if (ret)
+ return ret;
+
+ output_ = V4L2VideoDevice::fromEntityName(media, name_ + " output");
+ ret = output_->open();
+ if (ret)
+ return ret;
+
+ viewfinder_ = V4L2VideoDevice::fromEntityName(media, name_ + " viewfinder");
+ ret = viewfinder_->open();
+ if (ret)
+ return ret;
+
+ param_ = V4L2VideoDevice::fromEntityName(media, name_ + " parameters");
+ ret = param_->open();
+ if (ret)
+ return ret;
+
+ stat_ = V4L2VideoDevice::fromEntityName(media, name_ + " 3a stat");
+ ret = stat_->open();
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * \brief Calculate the ImgU pipe configuration parameters
+ * \param[in] pipe The requested ImgU configuration
+ * \return An ImgUDevice::PipeConfig instance on success, an empty configuration
+ * otherwise
+ */
+ImgUDevice::PipeConfig ImgUDevice::calculatePipeConfig(Pipe *pipe)
+{
+ pipeConfigs.clear();
+
+ LOG(IPU3, Debug) << "Calculating pipe configuration for: ";
+ LOG(IPU3, Debug) << "input: " << pipe->input;
+ LOG(IPU3, Debug) << "main: " << pipe->main;
+ LOG(IPU3, Debug) << "vf: " << pipe->viewfinder;
+
+ const Size &in = pipe->input;
+
+ /*
+ * \todo Filter out all resolutions < IF_CROP_MAX.
+ * See https://bugs.libcamera.org/show_bug.cgi?id=32
+ */
+ if (in.width < ImgUDevice::kIFMaxCropWidth || in.height < ImgUDevice::kIFMaxCropHeight) {
+ LOG(IPU3, Error) << "Input resolution " << in << " not supported";
+ return {};
+ }
+
+ Size gdc = calculateGDC(pipe);
+
+ float bdsSF = static_cast<float>(in.width) / gdc.width;
+ float sf = findScaleFactor(bdsSF, bdsScalingFactors, true);
+
+ /* Populate the configurations vector by scaling width and height. */
+ unsigned int ifWidth = utils::alignUp(in.width, ImgUDevice::kIFAlignWidth);
+ unsigned int ifHeight = utils::alignUp(in.height, ImgUDevice::kIFAlignHeight);
+ unsigned int minIfWidth = in.width - ImgUDevice::kIFMaxCropWidth;
+ unsigned int minIfHeight = in.height - ImgUDevice::kIFMaxCropHeight;
+ while (ifWidth >= minIfWidth) {
+ while (ifHeight >= minIfHeight) {
+ Size iif{ ifWidth, ifHeight };
+ calculateBDS(pipe, iif, gdc, sf);
+ ifHeight -= ImgUDevice::kIFAlignHeight;
+ }
+
+ ifWidth -= ImgUDevice::kIFAlignWidth;
+ }
+
+ /* Repeat search by scaling width first. */
+ ifWidth = utils::alignUp(in.width, ImgUDevice::kIFAlignWidth);
+ ifHeight = utils::alignUp(in.height, ImgUDevice::kIFAlignHeight);
+ minIfWidth = in.width - ImgUDevice::kIFMaxCropWidth;
+ minIfHeight = in.height - ImgUDevice::kIFMaxCropHeight;
+ while (ifHeight >= minIfHeight) {
+ /*
+ * \todo This procedure is probably broken:
+ * https://github.com/intel/intel-ipu3-pipecfg/issues/2
+ */
+ while (ifWidth >= minIfWidth) {
+ Size iif{ ifWidth, ifHeight };
+ calculateBDS(pipe, iif, gdc, sf);
+ ifWidth -= ImgUDevice::kIFAlignWidth;
+ }
+
+ ifHeight -= ImgUDevice::kIFAlignHeight;
+ }
+
+ if (pipeConfigs.size() == 0) {
+ LOG(IPU3, Error) << "Failed to calculate pipe configuration";
+ return {};
+ }
+
+ FOV bestFov = calcFOV(pipe->input, pipeConfigs[0]);
+ unsigned int bestIndex = 0;
+ unsigned int p = 0;
+ for (auto pipeConfig : pipeConfigs) {
+ FOV fov = calcFOV(pipe->input, pipeConfig);
+ if (fov.isLarger(bestFov)) {
+ bestFov = fov;
+ bestIndex = p;
+ }
+
+ ++p;
+ }
+
+ LOG(IPU3, Debug) << "Computed pipe configuration: ";
+ LOG(IPU3, Debug) << "IF: " << pipeConfigs[bestIndex].iif;
+ LOG(IPU3, Debug) << "BDS: " << pipeConfigs[bestIndex].bds;
+ LOG(IPU3, Debug) << "GDC: " << pipeConfigs[bestIndex].gdc;
+
+ return pipeConfigs[bestIndex];
+}
+
+/**
+ * \brief Configure the ImgU pipeline
+ * \param[in] config The ImgU pipe configuration parameters
+ * \param[in] inputFormat The format to be applied to ImgU input
+ * \return 0 on success or a negative error code otherwise
+ */
+int ImgUDevice::configure(const PipeConfig &pipeConfig, V4L2DeviceFormat *inputFormat)
+{
+ /* Configure the ImgU input video device with the requested sizes. */
+ int ret = input_->setFormat(inputFormat);
+ if (ret)
+ return ret;
+
+ LOG(IPU3, Debug) << "ImgU input format = " << *inputFormat;
+
+ /*
+ * \todo The IPU3 driver implementation shall be changed to use the
+ * input sizes as 'ImgU Input' subdevice sizes, and use the desired
+ * GDC output sizes to configure the crop/compose rectangles.
+ *
+ * The current IPU3 driver implementation uses GDC sizes as the
+ * 'ImgU Input' subdevice sizes, and the input video device sizes
+ * to configure the crop/compose rectangles, contradicting the
+ * V4L2 specification.
+ */
+ Rectangle iif{ 0, 0, pipeConfig.iif };
+ ret = imgu_->setSelection(PAD_INPUT, V4L2_SEL_TGT_CROP, &iif);
+ if (ret)
+ return ret;
+ LOG(IPU3, Debug) << "ImgU IF rectangle = " << iif;
+
+ Rectangle bds{ 0, 0, pipeConfig.bds };
+ ret = imgu_->setSelection(PAD_INPUT, V4L2_SEL_TGT_COMPOSE, &bds);
+ if (ret)
+ return ret;
+ LOG(IPU3, Debug) << "ImgU BDS rectangle = " << bds;
+
+ V4L2SubdeviceFormat gdcFormat = {};
+ gdcFormat.code = MEDIA_BUS_FMT_FIXED;
+ gdcFormat.size = pipeConfig.gdc;
+
+ ret = imgu_->setFormat(PAD_INPUT, &gdcFormat);
+ if (ret)
+ return ret;
+
+ LOG(IPU3, Debug) << "ImgU GDC format = " << gdcFormat;
+
+ StreamConfiguration paramCfg = {};
+ paramCfg.size = inputFormat->size;
+ V4L2DeviceFormat paramFormat;
+ ret = configureVideoDevice(param_.get(), PAD_PARAM, paramCfg, &paramFormat);
+ if (ret)
+ return ret;
+
+ StreamConfiguration statCfg = {};
+ statCfg.size = inputFormat->size;
+ V4L2DeviceFormat statFormat;
+ ret = configureVideoDevice(stat_.get(), PAD_STAT, statCfg, &statFormat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * \brief Configure a video device on the ImgU
+ * \param[in] dev The video device to configure
+ * \param[in] pad The pad of the ImgU subdevice
+ * \param[in] cfg The requested configuration
+ * \param[out] outputFormat The format set on the video device
+ * \return 0 on success or a negative error code otherwise
+ */
+int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
+ const StreamConfiguration &cfg,
+ V4L2DeviceFormat *outputFormat)
+{
+ V4L2SubdeviceFormat imguFormat = {};
+ imguFormat.code = MEDIA_BUS_FMT_FIXED;
+ imguFormat.size = cfg.size;
+
+ int ret = imgu_->setFormat(pad, &imguFormat);
+ if (ret)
+ return ret;
+
+ /*
+ * No need to apply format to the param or stat video devices as the
+ * driver ignores the operation.
+ */
+ if (dev == param_.get() || dev == stat_.get())
+ return 0;
+
+ *outputFormat = {};
+ outputFormat->fourcc = dev->toV4L2PixelFormat(formats::NV12);
+ outputFormat->size = cfg.size;
+ outputFormat->planesCount = 2;
+
+ ret = dev->setFormat(outputFormat);
+ if (ret)
+ return ret;
+
+ const char *name = dev == output_.get() ? "output" : "viewfinder";
+ LOG(IPU3, Debug) << "ImgU " << name << " format = "
+ << *outputFormat;
+
+ return 0;
+}
+
+/**
+ * \brief Allocate buffers for all the ImgU video devices
+ */
+int ImgUDevice::allocateBuffers(unsigned int bufferCount)
+{
+ /* Share buffers between CIO2 output and ImgU input. */
+ int ret = input_->importBuffers(bufferCount);
+ if (ret) {
+ LOG(IPU3, Error) << "Failed to import ImgU input buffers";
+ return ret;
+ }
+
+ ret = param_->allocateBuffers(bufferCount, &paramBuffers_);
+ if (ret < 0) {
+ LOG(IPU3, Error) << "Failed to allocate ImgU param buffers";
+ goto error;
+ }
+
+ ret = stat_->allocateBuffers(bufferCount, &statBuffers_);
+ if (ret < 0) {
+ LOG(IPU3, Error) << "Failed to allocate ImgU stat buffers";
+ goto error;
+ }
+
+ /*
+ * Import buffers for all outputs, regardless of whether the
+ * corresponding stream is active or inactive, as the driver needs
+ * buffers to be requested on the V4L2 devices in order to operate.
+ */
+ ret = output_->importBuffers(bufferCount);
+ if (ret < 0) {
+ LOG(IPU3, Error) << "Failed to import ImgU output buffers";
+ goto error;
+ }
+
+ ret = viewfinder_->importBuffers(bufferCount);
+ if (ret < 0) {
+ LOG(IPU3, Error) << "Failed to import ImgU viewfinder buffers";
+ goto error;
+ }
+
+ return 0;
+
+error:
+ freeBuffers();
+
+ return ret;
+}
+
+/**
+ * \brief Release buffers for all the ImgU video devices
+ */
+void ImgUDevice::freeBuffers()
+{
+ int ret;
+
+ paramBuffers_.clear();
+ statBuffers_.clear();
+
+ ret = output_->releaseBuffers();
+ if (ret)
+ LOG(IPU3, Error) << "Failed to release ImgU output buffers";
+
+ ret = param_->releaseBuffers();
+ if (ret)
+ LOG(IPU3, Error) << "Failed to release ImgU param buffers";
+
+ ret = stat_->releaseBuffers();
+ if (ret)
+ LOG(IPU3, Error) << "Failed to release ImgU stat buffers";
+
+ ret = viewfinder_->releaseBuffers();
+ if (ret)
+ LOG(IPU3, Error) << "Failed to release ImgU viewfinder buffers";
+
+ ret = input_->releaseBuffers();
+ if (ret)
+ LOG(IPU3, Error) << "Failed to release ImgU input buffers";
+}
+
+int ImgUDevice::start()
+{
+ int ret;
+
+ /* Start the ImgU video devices. */
+ ret = output_->streamOn();
+ if (ret) {
+ LOG(IPU3, Error) << "Failed to start ImgU output";
+ return ret;
+ }
+
+ ret = viewfinder_->streamOn();
+ if (ret) {
+ LOG(IPU3, Error) << "Failed to start ImgU viewfinder";
+ return ret;
+ }
+
+ ret = param_->streamOn();
+ if (ret) {
+ LOG(IPU3, Error) << "Failed to start ImgU param";
+ return ret;
+ }
+
+ ret = stat_->streamOn();
+ if (ret) {
+ LOG(IPU3, Error) << "Failed to start ImgU stat";
+ return ret;
+ }
+
+ ret = input_->streamOn();
+ if (ret) {
+ LOG(IPU3, Error) << "Failed to start ImgU input";
+ return ret;
+ }
+
+ return 0;
+}
+
+int ImgUDevice::stop()
+{
+ int ret;
+
+ ret = output_->streamOff();
+ ret |= viewfinder_->streamOff();
+ ret |= param_->streamOff();
+ ret |= stat_->streamOff();
+ ret |= input_->streamOff();
+
+ return ret;
+}
+
+/**
+ * \brief Enable or disable a single link on the ImgU instance
+ *
+ * This function assumes the media device associated with the ImgU instance
+ * is open.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int ImgUDevice::linkSetup(const std::string &source, unsigned int sourcePad,
+ const std::string &sink, unsigned int sinkPad,
+ bool enable)
+{
+ MediaLink *link = media_->link(source, sourcePad, sink, sinkPad);
+ if (!link) {
+ LOG(IPU3, Error)
+ << "Failed to get link: '" << source << "':"
+ << sourcePad << " -> '" << sink << "':" << sinkPad;
+ return -ENODEV;
+ }
+
+ return link->setEnabled(enable);
+}
+
+/**
+ * \brief Enable or disable all media links in the ImgU instance to prepare
+ * for capture operations
+ *
+ * \todo This function will probably be removed or changed once links will be
+ * enabled or disabled selectively.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int ImgUDevice::enableLinks(bool enable)
+{
+ std::string viewfinderName = name_ + " viewfinder";
+ std::string paramName = name_ + " parameters";
+ std::string outputName = name_ + " output";
+ std::string statName = name_ + " 3a stat";
+ std::string inputName = name_ + " input";
+ int ret;
+
+ ret = linkSetup(inputName, 0, name_, PAD_INPUT, enable);
+ if (ret)
+ return ret;
+
+ ret = linkSetup(name_, PAD_OUTPUT, outputName, 0, enable);
+ if (ret)
+ return ret;
+
+ ret = linkSetup(name_, PAD_VF, viewfinderName, 0, enable);
+ if (ret)
+ return ret;
+
+ ret = linkSetup(paramName, 0, name_, PAD_PARAM, enable);
+ if (ret)
+ return ret;
+
+ return linkSetup(name_, PAD_STAT, statName, 0, enable);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/ipu3/imgu.h b/src/libcamera/pipeline/ipu3/imgu.h
new file mode 100644
index 00000000..fa508316
--- /dev/null
+++ b/src/libcamera/pipeline/ipu3/imgu.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Intel IPU3 ImgU
+ */
+
+#pragma once
+
+#include <memory>
+#include <string>
+
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace libcamera {
+
+class FrameBuffer;
+class MediaDevice;
+class Size;
+struct StreamConfiguration;
+
+class ImgUDevice
+{
+public:
+ static constexpr unsigned int kFilterWidth = 4;
+ static constexpr unsigned int kFilterHeight = 4;
+
+ static constexpr unsigned int kIFAlignWidth = 2;
+ static constexpr unsigned int kIFAlignHeight = 4;
+
+ static constexpr unsigned int kIFMaxCropWidth = 40;
+ static constexpr unsigned int kIFMaxCropHeight = 540;
+
+ static constexpr unsigned int kBDSAlignWidth = 2;
+ static constexpr unsigned int kBDSAlignHeight = 4;
+
+ static constexpr float kBDSSfMax = 2.5;
+ static constexpr float kBDSSfMin = 1.0;
+ static constexpr float kBDSSfStep = 0.03125;
+
+ static constexpr Size kOutputMinSize = { 2, 2 };
+ static constexpr Size kOutputMaxSize = { 4480, 34004 };
+ static constexpr unsigned int kOutputAlignWidth = 64;
+ static constexpr unsigned int kOutputAlignHeight = 4;
+ static constexpr unsigned int kOutputMarginWidth = 64;
+ static constexpr unsigned int kOutputMarginHeight = 32;
+
+ struct PipeConfig {
+ float bds_sf;
+ Size iif;
+ Size bds;
+ Size gdc;
+
+ bool isNull() const
+ {
+ return iif.isNull() || bds.isNull() || gdc.isNull();
+ }
+ };
+
+ struct Pipe {
+ Size input;
+ Size main;
+ Size viewfinder;
+ };
+
+ int init(MediaDevice *media, unsigned int index);
+
+ PipeConfig calculatePipeConfig(Pipe *pipe);
+
+ int configure(const PipeConfig &pipeConfig, V4L2DeviceFormat *inputFormat);
+
+ int configureOutput(const StreamConfiguration &cfg,
+ V4L2DeviceFormat *outputFormat)
+ {
+ return configureVideoDevice(output_.get(), PAD_OUTPUT, cfg,
+ outputFormat);
+ }
+
+ int configureViewfinder(const StreamConfiguration &cfg,
+ V4L2DeviceFormat *outputFormat)
+ {
+ return configureVideoDevice(viewfinder_.get(), PAD_VF, cfg,
+ outputFormat);
+ }
+
+ int allocateBuffers(unsigned int bufferCount);
+ void freeBuffers();
+
+ int start();
+ int stop();
+
+ int enableLinks(bool enable);
+
+ std::unique_ptr<V4L2Subdevice> imgu_;
+ std::unique_ptr<V4L2VideoDevice> input_;
+ std::unique_ptr<V4L2VideoDevice> param_;
+ std::unique_ptr<V4L2VideoDevice> output_;
+ std::unique_ptr<V4L2VideoDevice> viewfinder_;
+ std::unique_ptr<V4L2VideoDevice> stat_;
+
+ std::vector<std::unique_ptr<FrameBuffer>> paramBuffers_;
+ std::vector<std::unique_ptr<FrameBuffer>> statBuffers_;
+
+private:
+ static constexpr unsigned int PAD_INPUT = 0;
+ static constexpr unsigned int PAD_PARAM = 1;
+ static constexpr unsigned int PAD_OUTPUT = 2;
+ static constexpr unsigned int PAD_VF = 3;
+ static constexpr unsigned int PAD_STAT = 4;
+
+ int linkSetup(const std::string &source, unsigned int sourcePad,
+ const std::string &sink, unsigned int sinkPad,
+ bool enable);
+
+ int configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
+ const StreamConfiguration &cfg,
+ V4L2DeviceFormat *outputFormat);
+
+ std::string name_;
+ MediaDevice *media_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
index 1b44460e..e31e3879 100644
--- a/src/libcamera/pipeline/ipu3/ipu3.cpp
+++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
@@ -2,198 +2,131 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipu3.cpp - Pipeline handler for Intel IPU3
+ * Pipeline handler for Intel IPU3
*/
#include <algorithm>
-#include <iomanip>
#include <memory>
+#include <queue>
#include <vector>
-#include <linux/media-bus-format.h>
+#include <linux/intel-ipu3.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/formats.h>
+#include <libcamera/property_ids.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
-#include "camera_sensor.h"
-#include "device_enumerator.h"
-#include "log.h"
-#include "media_device.h"
-#include "pipeline_handler.h"
-#include "utils.h"
-#include "v4l2_controls.h"
-#include "v4l2_subdevice.h"
-#include "v4l2_videodevice.h"
+#include <libcamera/ipa/ipu3_ipa_interface.h>
+#include <libcamera/ipa/ipu3_ipa_proxy.h>
-namespace libcamera {
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/delayed_controls.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
-LOG_DEFINE_CATEGORY(IPU3)
+#include "cio2.h"
+#include "frames.h"
+#include "imgu.h"
-class IPU3CameraData;
-
-class ImgUDevice
-{
-public:
- static constexpr unsigned int PAD_INPUT = 0;
- static constexpr unsigned int PAD_OUTPUT = 2;
- static constexpr unsigned int PAD_VF = 3;
- static constexpr unsigned int PAD_STAT = 4;
-
- /* ImgU output descriptor: group data specific to an ImgU output. */
- struct ImgUOutput {
- V4L2VideoDevice *dev;
- unsigned int pad;
- std::string name;
- std::vector<std::unique_ptr<FrameBuffer>> buffers;
- };
-
- ImgUDevice()
- : imgu_(nullptr), input_(nullptr)
- {
- output_.dev = nullptr;
- viewfinder_.dev = nullptr;
- stat_.dev = nullptr;
- }
+namespace libcamera {
- ~ImgUDevice()
- {
- delete imgu_;
- delete input_;
- delete output_.dev;
- delete viewfinder_.dev;
- delete stat_.dev;
- }
+LOG_DEFINE_CATEGORY(IPU3)
- int init(MediaDevice *media, unsigned int index);
- int configureInput(const Size &size,
- V4L2DeviceFormat *inputFormat);
- int configureOutput(ImgUOutput *output,
- const StreamConfiguration &cfg);
-
- int allocateBuffers(IPU3CameraData *data, unsigned int bufferCount);
- void freeBuffers(IPU3CameraData *data);
-
- int start();
- int stop();
-
- int linkSetup(const std::string &source, unsigned int sourcePad,
- const std::string &sink, unsigned int sinkPad,
- bool enable);
- int enableLinks(bool enable);
-
- unsigned int index_;
- std::string name_;
- MediaDevice *media_;
-
- V4L2Subdevice *imgu_;
- V4L2VideoDevice *input_;
- ImgUOutput output_;
- ImgUOutput viewfinder_;
- ImgUOutput stat_;
- /* \todo Add param video device for 3A tuning */
+static const ControlInfoMap::Map IPU3Controls = {
+ { &controls::draft::PipelineDepth, ControlInfo(2, 3) },
};
-class CIO2Device
+class IPU3CameraData : public Camera::Private
{
public:
- static constexpr unsigned int CIO2_BUFFER_COUNT = 4;
-
- CIO2Device()
- : output_(nullptr), csi2_(nullptr), sensor_(nullptr)
+ IPU3CameraData(PipelineHandler *pipe)
+ : Camera::Private(pipe)
{
}
- ~CIO2Device()
- {
- delete output_;
- delete csi2_;
- delete sensor_;
- }
+ int loadIPA();
- int init(const MediaDevice *media, unsigned int index);
- int configure(const Size &size,
- V4L2DeviceFormat *outputFormat);
+ void imguOutputBufferReady(FrameBuffer *buffer);
+ void cio2BufferReady(FrameBuffer *buffer);
+ void paramBufferReady(FrameBuffer *buffer);
+ void statBufferReady(FrameBuffer *buffer);
+ void queuePendingRequests();
+ void cancelPendingRequests();
+ void frameStart(uint32_t sequence);
- int allocateBuffers();
- void freeBuffers();
+ CIO2Device cio2_;
+ ImgUDevice *imgu_;
- int start();
- int stop();
+ Stream outStream_;
+ Stream vfStream_;
+ Stream rawStream_;
- static V4L2PixelFormat mediaBusToFormat(unsigned int code);
+ Rectangle cropRegion_;
- V4L2VideoDevice *output_;
- V4L2Subdevice *csi2_;
- CameraSensor *sensor_;
+ std::unique_ptr<DelayedControls> delayedCtrls_;
+ IPU3Frames frameInfos_;
-private:
- std::vector<std::unique_ptr<FrameBuffer>> buffers_;
-};
+ std::unique_ptr<ipa::ipu3::IPAProxyIPU3> ipa_;
-class IPU3Stream : public Stream
-{
-public:
- IPU3Stream()
- : active_(false), device_(nullptr)
- {
- }
+ /* Requests for which no buffer has been queued to the CIO2 device yet. */
+ std::queue<Request *> pendingRequests_;
+ /* Requests queued to the CIO2 device but not yet processed by the ImgU. */
+ std::queue<Request *> processingRequests_;
- bool active_;
- std::string name_;
- ImgUDevice::ImgUOutput *device_;
-};
+ ControlInfoMap ipaControls_;
-class IPU3CameraData : public CameraData
-{
-public:
- IPU3CameraData(PipelineHandler *pipe)
- : CameraData(pipe)
- {
- }
-
- void imguOutputBufferReady(FrameBuffer *buffer);
- void imguInputBufferReady(FrameBuffer *buffer);
- void cio2BufferReady(FrameBuffer *buffer);
-
- CIO2Device cio2_;
- ImgUDevice *imgu_;
-
- IPU3Stream outStream_;
- IPU3Stream vfStream_;
+private:
+ void metadataReady(unsigned int id, const ControlList &metadata);
+ void paramsComputed(unsigned int id);
+ void setSensorControls(unsigned int id, const ControlList &sensorControls,
+ const ControlList &lensControls);
};
class IPU3CameraConfiguration : public CameraConfiguration
{
public:
- IPU3CameraConfiguration(Camera *camera, IPU3CameraData *data);
+ static constexpr unsigned int kBufferCount = 4;
+ static constexpr unsigned int kMaxStreams = 3;
- Status validate() override;
+ IPU3CameraConfiguration(IPU3CameraData *data);
- const V4L2SubdeviceFormat &sensorFormat() { return sensorFormat_; }
- const std::vector<const IPU3Stream *> &streams() { return streams_; }
+ Status validate() override;
-private:
- static constexpr unsigned int IPU3_BUFFER_COUNT = 4;
+ const StreamConfiguration &cio2Format() const { return cio2Configuration_; }
+ const ImgUDevice::PipeConfig imguConfig() const { return pipeConfig_; }
- void adjustStream(StreamConfiguration &cfg, bool scale);
+ /* Cache the combinedTransform_ that will be applied to the sensor */
+ Transform combinedTransform_;
+private:
/*
* The IPU3CameraData instance is guaranteed to be valid as long as the
* corresponding Camera instance is valid. In order to borrow a
* reference to the camera data, store a new reference to the camera.
*/
- std::shared_ptr<Camera> camera_;
const IPU3CameraData *data_;
- V4L2SubdeviceFormat sensorFormat_;
- std::vector<const IPU3Stream *> streams_;
+ StreamConfiguration cio2Configuration_;
+ ImgUDevice::PipeConfig pipeConfig_;
};
class PipelineHandlerIPU3 : public PipelineHandler
{
public:
static constexpr unsigned int V4L2_CID_IPU3_PIPE_MODE = 0x009819c1;
+ static constexpr Size kViewfinderSize{ 1280, 720 };
enum IPU3PipeModes {
IPU3PipeModeVideo = 0,
@@ -202,27 +135,28 @@ public:
PipelineHandlerIPU3(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
- int start(Camera *camera) override;
- void stop(Camera *camera) override;
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
bool match(DeviceEnumerator *enumerator) override;
private:
- IPU3CameraData *cameraData(const Camera *camera)
+ IPU3CameraData *cameraData(Camera *camera)
{
- return static_cast<IPU3CameraData *>(
- PipelineHandler::cameraData(camera));
+ return static_cast<IPU3CameraData *>(camera->_d());
}
+ int initControls(IPU3CameraData *data);
+ int updateControls(IPU3CameraData *data);
int registerCameras();
int allocateBuffers(Camera *camera);
@@ -232,148 +166,220 @@ private:
ImgUDevice imgu1_;
MediaDevice *cio2MediaDev_;
MediaDevice *imguMediaDev_;
+
+ std::vector<IPABuffer> ipaBuffers_;
};
-IPU3CameraConfiguration::IPU3CameraConfiguration(Camera *camera,
- IPU3CameraData *data)
+IPU3CameraConfiguration::IPU3CameraConfiguration(IPU3CameraData *data)
: CameraConfiguration()
{
- camera_ = camera->shared_from_this();
data_ = data;
}
-void IPU3CameraConfiguration::adjustStream(StreamConfiguration &cfg, bool scale)
-{
- /* The only pixel format the driver supports is NV12. */
- cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
-
- if (scale) {
- /*
- * Provide a suitable default that matches the sensor aspect
- * ratio.
- */
- if (!cfg.size.width || !cfg.size.height) {
- cfg.size.width = 1280;
- cfg.size.height = 1280 * sensorFormat_.size.height
- / sensorFormat_.size.width;
- }
-
- /*
- * \todo: Clamp the size to the hardware bounds when we will
- * figure them out.
- *
- * \todo: Handle the scaler (BDS) restrictions. The BDS can
- * only scale with the same factor in both directions, and the
- * scaling factor is limited to a multiple of 1/32. At the
- * moment the ImgU driver hides these constraints by applying
- * additional cropping, this should be fixed on the driver
- * side, and cropping should be exposed to us.
- */
- } else {
- /*
- * \todo: Properly support cropping when the ImgU driver
- * interface will be cleaned up.
- */
- cfg.size = sensorFormat_.size;
- }
-
- /*
- * Clamp the size to match the ImgU alignment constraints. The width
- * shall be a multiple of 8 pixels and the height a multiple of 4
- * pixels.
- */
- if (cfg.size.width % 8 || cfg.size.height % 4) {
- cfg.size.width &= ~7;
- cfg.size.height &= ~3;
- }
-
- cfg.bufferCount = IPU3_BUFFER_COUNT;
-}
-
CameraConfiguration::Status IPU3CameraConfiguration::validate()
{
- const CameraSensor *sensor = data_->cio2_.sensor_;
Status status = Valid;
if (config_.empty())
return Invalid;
+ /*
+ * Validate the requested transform against the sensor capabilities and
+ * rotation and store the final combined transform that configure() will
+ * need to apply to the sensor to save us working it out again.
+ */
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->cio2_.sensor()->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
+ status = Adjusted;
+
/* Cap the number of entries to the available streams. */
- if (config_.size() > 2) {
- config_.resize(2);
+ if (config_.size() > kMaxStreams) {
+ config_.resize(kMaxStreams);
status = Adjusted;
}
/*
- * Select the sensor format by collecting the maximum width and height
- * and picking the closest larger match, as the IPU3 can downscale
- * only. If no resolution is requested for any stream, or if no sensor
- * resolution is large enough, pick the largest one.
+ * Validate the requested stream configuration and select the sensor
+ * format by collecting the maximum RAW stream width and height and
+ * picking the closest larger match.
+ *
+ * If no RAW stream is requested use the one of the largest YUV stream,
+ * plus margin pixels for the IF and BDS rectangle to downscale.
+ *
+ * \todo Clarify the IF and BDS margins requirements.
*/
- Size size = {};
+ unsigned int rawCount = 0;
+ unsigned int yuvCount = 0;
+ Size rawRequirement;
+ Size maxYuvSize;
+ Size rawSize;
for (const StreamConfiguration &cfg : config_) {
- if (cfg.size.width > size.width)
- size.width = cfg.size.width;
- if (cfg.size.height > size.height)
- size.height = cfg.size.height;
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
+ rawCount++;
+ rawSize = std::max(rawSize, cfg.size);
+ } else {
+ yuvCount++;
+ maxYuvSize = std::max(maxYuvSize, cfg.size);
+ rawRequirement.expandTo(cfg.size);
+ }
}
- if (!size.width || !size.height)
- size = sensor->resolution();
-
- sensorFormat_ = sensor->getFormat({ MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10 },
- size);
- if (!sensorFormat_.size.width || !sensorFormat_.size.height)
- sensorFormat_.size = sensor->resolution();
+ if (rawCount > 1 || yuvCount > 2) {
+ LOG(IPU3, Debug) << "Camera configuration not supported";
+ return Invalid;
+ } else if (rawCount && !yuvCount) {
+ /*
+ * Disallow raw-only camera configuration. Currently, ImgU does
+ * not get configured for raw-only streams and has early return
+ * in configure(). To support raw-only stream, we do need the IPA
+ * to get configured since it will setup the sensor controls for
+ * the capture.
+ *
+ * \todo Configure the ImgU with internal buffers which will enable
+ * the IPA to get configured for the raw-only camera configuration.
+ */
+ LOG(IPU3, Debug)
+ << "Camera configuration cannot support raw-only streams";
+ return Invalid;
+ }
/*
- * Verify and update all configuration entries, and assign a stream to
- * each of them. The viewfinder stream can scale, while the output
- * stream can crop only, so select the output stream when the requested
- * resolution is equal to the sensor resolution, and the viewfinder
- * stream otherwise.
+ * Generate raw configuration from CIO2.
+ *
+ * The output YUV streams will be limited in size to the maximum frame
+ * size requested for the RAW stream, if present.
+ *
+ * If no raw stream is requested, generate a size from the largest YUV
+ * stream, aligned to the ImgU constraints and bound
+ * by the sensor's maximum resolution. See
+ * https://bugs.libcamera.org/show_bug.cgi?id=32
*/
- std::set<const IPU3Stream *> availableStreams = {
- &data_->outStream_,
- &data_->vfStream_,
- };
+ if (rawSize.isNull())
+ rawSize = rawRequirement.expandedTo({ ImgUDevice::kIFMaxCropWidth,
+ ImgUDevice::kIFMaxCropHeight })
+ .grownBy({ ImgUDevice::kOutputMarginWidth,
+ ImgUDevice::kOutputMarginHeight })
+ .boundedTo(data_->cio2_.sensor()->resolution());
+
+ cio2Configuration_ = data_->cio2_.generateConfiguration(rawSize);
+ if (!cio2Configuration_.pixelFormat.isValid())
+ return Invalid;
+
+ LOG(IPU3, Debug) << "CIO2 configuration: " << cio2Configuration_.toString();
- streams_.clear();
- streams_.reserve(config_.size());
+ ImgUDevice::Pipe pipe{};
+ pipe.input = cio2Configuration_.size;
+ /*
+ * Adjust the configurations if needed and assign streams while
+ * iterating them.
+ */
+ bool mainOutputAvailable = true;
for (unsigned int i = 0; i < config_.size(); ++i) {
- StreamConfiguration &cfg = config_[i];
- const PixelFormat pixelFormat = cfg.pixelFormat;
- const Size size = cfg.size;
- const IPU3Stream *stream;
+ const PixelFormatInfo &info = PixelFormatInfo::info(config_[i].pixelFormat);
+ const StreamConfiguration originalCfg = config_[i];
+ StreamConfiguration *cfg = &config_[i];
+
+ LOG(IPU3, Debug) << "Validating stream: " << config_[i].toString();
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
+ /* Initialize the RAW stream with the CIO2 configuration. */
+ cfg->size = cio2Configuration_.size;
+ cfg->pixelFormat = cio2Configuration_.pixelFormat;
+ cfg->bufferCount = cio2Configuration_.bufferCount;
+ cfg->stride = info.stride(cfg->size.width, 0, 64);
+ cfg->frameSize = info.frameSize(cfg->size, 64);
+ cfg->setStream(const_cast<Stream *>(&data_->rawStream_));
+
+ LOG(IPU3, Debug) << "Assigned " << cfg->toString()
+ << " to the raw stream";
+ } else {
+ /* Assign and configure the main and viewfinder outputs. */
+
+ /*
+ * Clamp the size to match the ImgU size limits and the
+ * margins from the CIO2 output frame size.
+ *
+ * The ImgU outputs needs to be strictly smaller than
+ * the CIO2 output frame and rounded down to 64 pixels
+ * in width and 32 pixels in height. This assumption
+ * comes from inspecting the pipe configuration script
+ * results and the available suggested configurations in
+ * the ChromeOS BSP .xml camera tuning files and shall
+ * be validated.
+ *
+ * \todo Clarify what are the hardware constraints
+ * that require this alignements, if any. It might
+ * depend on the BDS scaling factor of 1/32, as the main
+ * output has no YUV scaler as the viewfinder output has.
+ */
+ unsigned int limit;
+ limit = utils::alignDown(cio2Configuration_.size.width - 1,
+ ImgUDevice::kOutputMarginWidth);
+ cfg->size.width = std::clamp(cfg->size.width,
+ ImgUDevice::kOutputMinSize.width,
+ limit);
+
+ limit = utils::alignDown(cio2Configuration_.size.height - 1,
+ ImgUDevice::kOutputMarginHeight);
+ cfg->size.height = std::clamp(cfg->size.height,
+ ImgUDevice::kOutputMinSize.height,
+ limit);
+
+ cfg->size.alignDownTo(ImgUDevice::kOutputAlignWidth,
+ ImgUDevice::kOutputAlignHeight);
+
+ cfg->pixelFormat = formats::NV12;
+ cfg->bufferCount = kBufferCount;
+ cfg->stride = info.stride(cfg->size.width, 0, 1);
+ cfg->frameSize = info.frameSize(cfg->size, 1);
- if (cfg.size == sensorFormat_.size)
- stream = &data_->outStream_;
- else
- stream = &data_->vfStream_;
+ /*
+ * Use the main output stream in case only one stream is
+ * requested or if the current configuration is the one
+ * with the maximum YUV output size.
+ */
+ if (mainOutputAvailable &&
+ (originalCfg.size == maxYuvSize || yuvCount == 1)) {
+ cfg->setStream(const_cast<Stream *>(&data_->outStream_));
+ mainOutputAvailable = false;
- if (availableStreams.find(stream) == availableStreams.end())
- stream = *availableStreams.begin();
+ pipe.main = cfg->size;
+ if (yuvCount == 1)
+ pipe.viewfinder = pipe.main;
- LOG(IPU3, Debug)
- << "Assigned '" << stream->name_ << "' to stream " << i;
+ LOG(IPU3, Debug) << "Assigned " << cfg->toString()
+ << " to the main output";
+ } else {
+ cfg->setStream(const_cast<Stream *>(&data_->vfStream_));
+ pipe.viewfinder = cfg->size;
- bool scale = stream == &data_->vfStream_;
- adjustStream(config_[i], scale);
+ LOG(IPU3, Debug) << "Assigned " << cfg->toString()
+ << " to the viewfinder output";
+ }
+ }
- if (cfg.pixelFormat != pixelFormat || cfg.size != size) {
+ if (cfg->pixelFormat != originalCfg.pixelFormat ||
+ cfg->size != originalCfg.size) {
LOG(IPU3, Debug)
<< "Stream " << i << " configuration adjusted to "
- << cfg.toString();
+ << cfg->toString();
status = Adjusted;
}
+ }
- streams_.push_back(stream);
- availableStreams.erase(stream);
+ /* Only compute the ImgU configuration if a YUV stream has been requested. */
+ if (yuvCount) {
+ pipeConfig_ = data_->imgu_->calculatePipeConfig(&pipe);
+ if (pipeConfig_.isNull()) {
+ LOG(IPU3, Error) << "Failed to calculate pipe configuration: "
+ << "unsupported resolutions.";
+ return Invalid;
+ }
}
return status;
@@ -384,82 +390,70 @@ PipelineHandlerIPU3::PipelineHandlerIPU3(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerIPU3::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
{
IPU3CameraData *data = cameraData(camera);
- IPU3CameraConfiguration *config;
- std::set<IPU3Stream *> streams = {
- &data->outStream_,
- &data->vfStream_,
- };
+ std::unique_ptr<IPU3CameraConfiguration> config =
+ std::make_unique<IPU3CameraConfiguration>(data);
- config = new IPU3CameraConfiguration(camera, data);
+ if (roles.empty())
+ return config;
+ Size sensorResolution = data->cio2_.sensor()->resolution();
for (const StreamRole role : roles) {
- StreamConfiguration cfg = {};
- IPU3Stream *stream = nullptr;
-
- cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+ unsigned int bufferCount;
+ PixelFormat pixelFormat;
+ Size size;
switch (role) {
case StreamRole::StillCapture:
/*
- * Pick the output stream by default as the Viewfinder
- * and VideoRecording roles are not allowed on
- * the output stream.
- */
- if (streams.find(&data->outStream_) != streams.end()) {
- stream = &data->outStream_;
- } else if (streams.find(&data->vfStream_) != streams.end()) {
- stream = &data->vfStream_;
- } else {
- LOG(IPU3, Error)
- << "No stream available for requested role "
- << role;
- break;
- }
-
- /*
- * FIXME: Soraka: the maximum resolution reported by
- * both sensors (2592x1944 for ov5670 and 4224x3136 for
- * ov13858) are returned as default configurations but
- * they're not correctly processed by the ImgU.
- * Resolutions up tp 2560x1920 have been validated.
+ * Use as default full-frame configuration a value
+ * strictly smaller than the sensor resolution (limited
+ * to the ImgU maximum output size) and aligned down to
+ * the required frame margin.
*
- * \todo Clarify ImgU alignment requirements.
+ * \todo Clarify the alignment constraints as explained
+ * in validate()
*/
- cfg.size = { 2560, 1920 };
+ size = sensorResolution.boundedTo(ImgUDevice::kOutputMaxSize)
+ .shrunkBy({ 1, 1 })
+ .alignedDownTo(ImgUDevice::kOutputMarginWidth,
+ ImgUDevice::kOutputMarginHeight);
+ pixelFormat = formats::NV12;
+ bufferCount = IPU3CameraConfiguration::kBufferCount;
+ streamFormats[pixelFormat] = { { ImgUDevice::kOutputMinSize, size } };
break;
- case StreamRole::Viewfinder:
- case StreamRole::VideoRecording: {
- /*
- * We can't use the 'output' stream for viewfinder or
- * video capture roles.
- *
- * \todo This is an artificial limitation until we
- * figure out the exact capabilities of the hardware.
- */
- if (streams.find(&data->vfStream_) == streams.end()) {
- LOG(IPU3, Error)
- << "No stream available for requested role "
- << role;
- break;
- }
+ case StreamRole::Raw: {
+ StreamConfiguration cio2Config =
+ data->cio2_.generateConfiguration(sensorResolution);
+ pixelFormat = cio2Config.pixelFormat;
+ size = cio2Config.size;
+ bufferCount = cio2Config.bufferCount;
+
+ for (const PixelFormat &format : data->cio2_.formats())
+ streamFormats[format] = data->cio2_.sizes(format);
- stream = &data->vfStream_;
+ break;
+ }
+ case StreamRole::Viewfinder:
+ case StreamRole::VideoRecording: {
/*
- * Align the default viewfinder size to the maximum
- * available sensor resolution and to the IPU3
- * alignment constraints.
+ * Default viewfinder and videorecording to 1280x720,
+ * capped to the maximum sensor resolution and aligned
+ * to the ImgU output constraints.
*/
- const Size &res = data->cio2_.sensor_->resolution();
- unsigned int width = std::min(1280U, res.width);
- unsigned int height = std::min(720U, res.height);
- cfg.size = { width & ~7, height & ~3 };
+ size = sensorResolution.boundedTo(kViewfinderSize)
+ .alignedDownTo(ImgUDevice::kOutputAlignWidth,
+ ImgUDevice::kOutputAlignHeight);
+ pixelFormat = formats::NV12;
+ bufferCount = IPU3CameraConfiguration::kBufferCount;
+ streamFormats[pixelFormat] = { { ImgUDevice::kOutputMinSize, size } };
break;
}
@@ -467,20 +461,19 @@ CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
default:
LOG(IPU3, Error)
<< "Requested stream role not supported: " << role;
- break;
- }
-
- if (!stream) {
- delete config;
return nullptr;
}
- streams.erase(stream);
-
+ StreamFormats formats(streamFormats);
+ StreamConfiguration cfg(formats);
+ cfg.size = size;
+ cfg.pixelFormat = pixelFormat;
+ cfg.bufferCount = bufferCount;
config->addConfiguration(cfg);
}
- config->validate();
+ if (config->validate() == CameraConfiguration::Invalid)
+ return {};
return config;
}
@@ -490,10 +483,11 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
IPU3CameraConfiguration *config =
static_cast<IPU3CameraConfiguration *>(c);
IPU3CameraData *data = cameraData(camera);
- IPU3Stream *outStream = &data->outStream_;
- IPU3Stream *vfStream = &data->vfStream_;
+ Stream *outStream = &data->outStream_;
+ Stream *vfStream = &data->vfStream_;
CIO2Device *cio2 = &data->cio2_;
ImgUDevice *imgu = data->imgu_;
+ V4L2DeviceFormat outputFormat;
int ret;
/*
@@ -528,8 +522,11 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
return ret;
/*
- * \todo: Enable links selectively based on the requested streams.
+ * \todo Enable links selectively based on the requested streams.
* As of now, enable all links unconditionally.
+ * \todo Don't configure the ImgU at all if we only have a single
+ * stream which is for raw capture, in which case no buffers will
+ * ever be queued to the ImgU.
*/
ret = data->imgu_->enableLinks(true);
if (ret)
@@ -539,36 +536,53 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
* Pass the requested stream size to the CIO2 unit and get back the
* adjusted format to be propagated to the ImgU output devices.
*/
- const Size &sensorSize = config->sensorFormat().size;
- V4L2DeviceFormat cio2Format = {};
- ret = cio2->configure(sensorSize, &cio2Format);
+ const Size &sensorSize = config->cio2Format().size;
+ V4L2DeviceFormat cio2Format;
+ ret = cio2->configure(sensorSize, config->combinedTransform_, &cio2Format);
if (ret)
return ret;
- ret = imgu->configureInput(sensorSize, &cio2Format);
+ IPACameraSensorInfo sensorInfo;
+ cio2->sensor()->sensorInfo(&sensorInfo);
+ data->cropRegion_ = sensorInfo.analogCrop;
+
+ /*
+ * If the ImgU gets configured, its driver seems to expect that
+ * buffers will be queued to its outputs, as otherwise the next
+ * capture session that uses the ImgU fails when queueing
+ * buffers to its input.
+ *
+ * If no ImgU configuration has been computed, it means only a RAW
+ * stream has been requested: return here to skip the ImgU configuration
+ * part.
+ */
+ ImgUDevice::PipeConfig imguConfig = config->imguConfig();
+ if (imguConfig.isNull())
+ return 0;
+
+ ret = imgu->configure(imguConfig, &cio2Format);
if (ret)
return ret;
/* Apply the format to the configured streams output devices. */
- outStream->active_ = false;
- vfStream->active_ = false;
+ StreamConfiguration *mainCfg = nullptr;
+ StreamConfiguration *vfCfg = nullptr;
for (unsigned int i = 0; i < config->size(); ++i) {
- /*
- * Use a const_cast<> here instead of storing a mutable stream
- * pointer in the configuration to let the compiler catch
- * unwanted modifications of camera data in the configuration
- * validate() implementation.
- */
- IPU3Stream *stream = const_cast<IPU3Stream *>(config->streams()[i]);
StreamConfiguration &cfg = (*config)[i];
-
- stream->active_ = true;
- cfg.setStream(stream);
-
- ret = imgu->configureOutput(stream->device_, cfg);
- if (ret)
- return ret;
+ Stream *stream = cfg.stream();
+
+ if (stream == outStream) {
+ mainCfg = &cfg;
+ ret = imgu->configureOutput(cfg, &outputFormat);
+ if (ret)
+ return ret;
+ } else if (stream == vfStream) {
+ vfCfg = &cfg;
+ ret = imgu->configureViewfinder(cfg, &outputFormat);
+ if (ret)
+ return ret;
+ }
}
/*
@@ -576,51 +590,64 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
* the configuration of the active one for that purpose (there should
* be at least one active stream in the configuration request).
*/
- if (!outStream->active_) {
- ret = imgu->configureOutput(outStream->device_, config->at(0));
+ if (!vfCfg) {
+ ret = imgu->configureViewfinder(*mainCfg, &outputFormat);
if (ret)
return ret;
}
- if (!vfStream->active_) {
- ret = imgu->configureOutput(vfStream->device_, config->at(0));
- if (ret)
- return ret;
- }
-
- /*
- * Apply the largest available format to the stat node.
- * \todo Revise this when we'll actually use the stat node.
- */
- StreamConfiguration statCfg = {};
- statCfg.size = cio2Format.size;
-
- ret = imgu->configureOutput(&imgu->stat_, statCfg);
- if (ret)
- return ret;
-
/* Apply the "pipe_mode" control to the ImgU subdevice. */
ControlList ctrls(imgu->imgu_->controls());
+ /*
+ * Set the ImgU pipe mode to 'Video' unconditionally to have statistics
+ * generated.
+ *
+ * \todo Figure out what the 'Still Capture' mode is meant for, and use
+ * it accordingly.
+ */
ctrls.set(V4L2_CID_IPU3_PIPE_MODE,
- static_cast<int32_t>(vfStream->active_ ? IPU3PipeModeVideo :
- IPU3PipeModeStillCapture));
+ static_cast<int32_t>(IPU3PipeModeVideo));
ret = imgu->imgu_->setControls(&ctrls);
if (ret) {
LOG(IPU3, Error) << "Unable to set pipe_mode control";
return ret;
}
- return 0;
+ ipa::ipu3::IPAConfigInfo configInfo;
+ configInfo.sensorControls = data->cio2_.sensor()->controls();
+
+ CameraLens *lens = data->cio2_.sensor()->focusLens();
+ if (lens)
+ configInfo.lensControls = lens->controls();
+
+ configInfo.sensorInfo = sensorInfo;
+ configInfo.bdsOutputSize = config->imguConfig().bds;
+ configInfo.iif = config->imguConfig().iif;
+
+ ret = data->ipa_->configure(configInfo, &data->ipaControls_);
+ if (ret) {
+ LOG(IPU3, Error) << "Failed to configure IPA: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ return updateControls(data);
}
int PipelineHandlerIPU3::exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
- IPU3Stream *ipu3stream = static_cast<IPU3Stream *>(stream);
- V4L2VideoDevice *video = ipu3stream->device_->dev;
+ IPU3CameraData *data = cameraData(camera);
unsigned int count = stream->configuration().bufferCount;
- return video->exportBuffers(count, buffers);
+ if (stream == &data->outStream_)
+ return data->imgu_->output_->exportBuffers(count, buffers);
+ else if (stream == &data->vfStream_)
+ return data->imgu_->viewfinder_->exportBuffers(count, buffers);
+ else if (stream == &data->rawStream_)
+ return data->cio2_.exportBuffers(count, buffers);
+
+ return -EINVAL;
}
/**
@@ -634,23 +661,39 @@ int PipelineHandlerIPU3::exportFrameBuffers(Camera *camera, Stream *stream,
int PipelineHandlerIPU3::allocateBuffers(Camera *camera)
{
IPU3CameraData *data = cameraData(camera);
- CIO2Device *cio2 = &data->cio2_;
ImgUDevice *imgu = data->imgu_;
unsigned int bufferCount;
int ret;
- ret = cio2->allocateBuffers();
+ bufferCount = std::max({
+ data->outStream_.configuration().bufferCount,
+ data->vfStream_.configuration().bufferCount,
+ data->rawStream_.configuration().bufferCount,
+ });
+
+ ret = imgu->allocateBuffers(bufferCount);
if (ret < 0)
return ret;
- bufferCount = ret;
+ /* Map buffers to the IPA. */
+ unsigned int ipaBufferId = 1;
- ret = imgu->allocateBuffers(data, bufferCount);
- if (ret < 0) {
- cio2->freeBuffers();
- return ret;
+ for (const std::unique_ptr<FrameBuffer> &buffer : imgu->paramBuffers_) {
+ buffer->setCookie(ipaBufferId++);
+ ipaBuffers_.emplace_back(buffer->cookie(), buffer->planes());
}
+ for (const std::unique_ptr<FrameBuffer> &buffer : imgu->statBuffers_) {
+ buffer->setCookie(ipaBufferId++);
+ ipaBuffers_.emplace_back(buffer->cookie(), buffer->planes());
+ }
+
+ data->ipa_->mapBuffers(ipaBuffers_);
+
+ data->frameInfos_.init(imgu->paramBuffers_, imgu->statBuffers_);
+ data->frameInfos_.bufferAvailable.connect(
+ data, &IPU3CameraData::queuePendingRequests);
+
return 0;
}
@@ -658,24 +701,44 @@ int PipelineHandlerIPU3::freeBuffers(Camera *camera)
{
IPU3CameraData *data = cameraData(camera);
- data->cio2_.freeBuffers();
- data->imgu_->freeBuffers(data);
+ data->frameInfos_.clear();
+
+ std::vector<unsigned int> ids;
+ for (IPABuffer &ipabuf : ipaBuffers_)
+ ids.push_back(ipabuf.id);
+
+ data->ipa_->unmapBuffers(ids);
+ ipaBuffers_.clear();
+
+ data->imgu_->freeBuffers();
return 0;
}
-int PipelineHandlerIPU3::start(Camera *camera)
+int PipelineHandlerIPU3::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
{
IPU3CameraData *data = cameraData(camera);
CIO2Device *cio2 = &data->cio2_;
ImgUDevice *imgu = data->imgu_;
int ret;
+ /* Disable test pattern mode on the sensor, if any. */
+ ret = cio2->sensor()->setTestPatternMode(
+ controls::draft::TestPatternModeEnum::TestPatternModeOff);
+ if (ret)
+ return ret;
+
/* Allocate buffers for internal pipeline usage. */
ret = allocateBuffers(camera);
if (ret)
return ret;
+ ret = data->ipa_->start();
+ if (ret)
+ goto error;
+
+ data->delayedCtrls_->reset();
+
/*
* Start the ImgU video devices, buffers will be queued to the
* ImgU output and viewfinder when requests will be queued.
@@ -685,49 +748,99 @@ int PipelineHandlerIPU3::start(Camera *camera)
goto error;
ret = imgu->start();
- if (ret) {
- imgu->stop();
- cio2->stop();
+ if (ret)
goto error;
- }
return 0;
error:
+ imgu->stop();
+ cio2->stop();
+ data->ipa_->stop();
freeBuffers(camera);
- LOG(IPU3, Error) << "Failed to start camera " << camera->name();
+ LOG(IPU3, Error) << "Failed to start camera " << camera->id();
return ret;
}
-void PipelineHandlerIPU3::stop(Camera *camera)
+void PipelineHandlerIPU3::stopDevice(Camera *camera)
{
IPU3CameraData *data = cameraData(camera);
- int ret;
+ int ret = 0;
+
+ data->cancelPendingRequests();
+
+ data->ipa_->stop();
- ret = data->cio2_.stop();
ret |= data->imgu_->stop();
+ ret |= data->cio2_.stop();
if (ret)
- LOG(IPU3, Warning) << "Failed to stop camera "
- << camera->name();
+ LOG(IPU3, Warning) << "Failed to stop camera " << camera->id();
freeBuffers(camera);
}
-int PipelineHandlerIPU3::queueRequestDevice(Camera *camera, Request *request)
+void IPU3CameraData::cancelPendingRequests()
{
- int error = 0;
+ processingRequests_ = {};
- for (auto it : request->buffers()) {
- IPU3Stream *stream = static_cast<IPU3Stream *>(it.first);
- FrameBuffer *buffer = it.second;
+ while (!pendingRequests_.empty()) {
+ Request *request = pendingRequests_.front();
- int ret = stream->device_->dev->queueBuffer(buffer);
- if (ret < 0)
- error = ret;
+ for (auto it : request->buffers()) {
+ FrameBuffer *buffer = it.second;
+ buffer->_d()->cancel();
+ pipe()->completeBuffer(request, buffer);
+ }
+
+ pipe()->completeRequest(request);
+ pendingRequests_.pop();
}
+}
+
+void IPU3CameraData::queuePendingRequests()
+{
+ while (!pendingRequests_.empty()) {
+ Request *request = pendingRequests_.front();
+
+ IPU3Frames::Info *info = frameInfos_.create(request);
+ if (!info)
+ break;
+
+ /*
+ * Queue a buffer on the CIO2, using the raw stream buffer
+ * provided in the request, if any, or a CIO2 internal buffer
+ * otherwise.
+ */
+ FrameBuffer *reqRawBuffer = request->findBuffer(&rawStream_);
+ FrameBuffer *rawBuffer = cio2_.queueBuffer(request, reqRawBuffer);
+ /*
+ * \todo If queueBuffer fails in queuing a buffer to the device,
+ * report the request as error by cancelling the request and
+ * calling PipelineHandler::completeRequest().
+ */
+ if (!rawBuffer) {
+ frameInfos_.remove(info);
+ break;
+ }
+
+ info->rawBuffer = rawBuffer;
+
+ ipa_->queueRequest(info->id, request->controls());
- return error;
+ pendingRequests_.pop();
+ processingRequests_.push(request);
+ }
+}
+
+int PipelineHandlerIPU3::queueRequestDevice(Camera *camera, Request *request)
+{
+ IPU3CameraData *data = cameraData(camera);
+
+ data->pendingRequests_.push(request);
+ data->queuePendingRequests();
+
+ return 0;
}
bool PipelineHandlerIPU3::match(DeviceEnumerator *enumerator)
@@ -783,6 +896,136 @@ bool PipelineHandlerIPU3::match(DeviceEnumerator *enumerator)
}
/**
+ * \brief Initialize the camera controls
+ * \param[in] data The camera data
+ *
+ * Initialize the camera controls by calculating controls which the pipeline
+ * is reponsible for and merge them with the controls computed by the IPA.
+ *
+ * This function needs data->ipaControls_ to be initialized by the IPA init()
+ * function at camera creation time. Always call this function after IPA init().
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int PipelineHandlerIPU3::initControls(IPU3CameraData *data)
+{
+ /*
+ * \todo The controls initialized here depend on sensor configuration
+ * and their limits should be updated once the configuration gets
+ * changed.
+ *
+ * Initialize the sensor using its resolution and compute the control
+ * limits.
+ */
+ CameraSensor *sensor = data->cio2_.sensor();
+ V4L2SubdeviceFormat sensorFormat = {};
+ sensorFormat.size = sensor->resolution();
+ int ret = sensor->setFormat(&sensorFormat);
+ if (ret)
+ return ret;
+
+ return updateControls(data);
+}
+
+/**
+ * \brief Update the camera controls
+ * \param[in] data The camera data
+ *
+ * Compute the camera controls by calculating controls which the pipeline
+ * is reponsible for and merge them with the controls computed by the IPA.
+ *
+ * This function needs data->ipaControls_ to be refreshed when a new
+ * configuration is applied to the camera by the IPA configure() function.
+ *
+ * Always call this function after IPA configure() to make sure to have a
+ * properly refreshed IPA controls list.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int PipelineHandlerIPU3::updateControls(IPU3CameraData *data)
+{
+ CameraSensor *sensor = data->cio2_.sensor();
+ IPACameraSensorInfo sensorInfo{};
+
+ int ret = sensor->sensorInfo(&sensorInfo);
+ if (ret)
+ return ret;
+
+ ControlInfoMap::Map controls = IPU3Controls;
+ const std::vector<controls::draft::TestPatternModeEnum>
+ &testPatternModes = sensor->testPatternModes();
+ if (!testPatternModes.empty()) {
+ std::vector<ControlValue> values;
+ values.reserve(testPatternModes.size());
+
+ for (auto pattern : testPatternModes)
+ values.emplace_back(pattern);
+
+ controls[&controls::draft::TestPatternMode] = ControlInfo(values);
+ }
+
+ /*
+ * Compute the scaler crop limits.
+ *
+ * Initialize the control use the 'Viewfinder' configuration (1280x720)
+ * as the pipeline output resolution and the full sensor size as input
+ * frame (see the todo note in the validate() function about the usage
+ * of the sensor's full frame as ImgU input).
+ */
+
+ /*
+ * The maximum scaler crop rectangle is the analogue crop used to
+ * produce the maximum frame size.
+ */
+ const Rectangle &analogueCrop = sensorInfo.analogCrop;
+ Rectangle maxCrop = analogueCrop;
+
+ /*
+ * As the ImgU cannot up-scale, the minimum selection rectangle has to
+ * be as large as the pipeline output size. Use the default viewfinder
+ * configuration as the desired output size and calculate the minimum
+ * rectangle required to satisfy the ImgU processing margins, unless the
+ * sensor resolution is smaller.
+ *
+ * \todo This implementation is based on the same assumptions about the
+ * ImgU pipeline configuration described in then viewfinder and main
+ * output sizes calculation in the validate() function.
+ */
+
+ /* The strictly smaller size than the sensor resolution, aligned to margins. */
+ Size minSize = sensor->resolution().shrunkBy({ 1, 1 })
+ .alignedDownTo(ImgUDevice::kOutputMarginWidth,
+ ImgUDevice::kOutputMarginHeight);
+
+ /*
+ * Either the smallest margin-aligned size larger than the viewfinder
+ * size or the adjusted sensor resolution.
+ */
+ minSize = kViewfinderSize.grownBy({ 1, 1 })
+ .alignedUpTo(ImgUDevice::kOutputMarginWidth,
+ ImgUDevice::kOutputMarginHeight)
+ .boundedTo(minSize);
+
+ /*
+ * Re-scale in the sensor's native coordinates. Report (0,0) as
+ * top-left corner as we allow application to freely pan the crop area.
+ */
+ Rectangle minCrop = Rectangle(minSize).scaledBy(analogueCrop.size(),
+ sensorInfo.outputSize);
+
+ controls[&controls::ScalerCrop] = ControlInfo(minCrop, maxCrop, maxCrop);
+
+ /* Add the IPA registered controls to list of camera controls. */
+ for (const auto &ipaControl : data->ipaControls_)
+ controls[ipaControl.first] = ipaControl.second;
+
+ data->controlInfo_ = ControlInfoMap(std::move(controls),
+ controls::controls);
+
+ return 0;
+}
+
+/**
* \brief Initialise ImgU and CIO2 devices associated with cameras
*
* Initialise the two ImgU instances and create cameras with an associated
@@ -816,6 +1059,7 @@ int PipelineHandlerIPU3::registerCameras()
std::set<Stream *> streams = {
&data->outStream_,
&data->vfStream_,
+ &data->rawStream_,
};
CIO2Device *cio2 = &data->cio2_;
@@ -823,8 +1067,35 @@ int PipelineHandlerIPU3::registerCameras()
if (ret)
continue;
+ ret = data->loadIPA();
+ if (ret)
+ continue;
+
/* Initialize the camera properties. */
- data->properties_ = cio2->sensor_->properties();
+ data->properties_ = cio2->sensor()->properties();
+
+ ret = initControls(data.get());
+ if (ret)
+ continue;
+
+ const CameraSensorProperties::SensorDelays &delays = cio2->sensor()->sensorDelays();
+ std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ };
+
+ data->delayedCtrls_ =
+ std::make_unique<DelayedControls>(cio2->sensor()->device(),
+ params);
+ data->cio2_.frameStart().connect(data.get(),
+ &IPU3CameraData::frameStart);
+
+ /* Convert the sensor rotation to a transformation */
+ const auto &rotation = data->properties_.get(properties::Rotation);
+ if (!rotation)
+ LOG(IPU3, Warning) << "Rotation control not exposed by "
+ << cio2->sensor()->id()
+ << ". Assume rotation 0";
/**
* \todo Dynamically assign ImgU and output devices to each
@@ -833,10 +1104,6 @@ int PipelineHandlerIPU3::registerCameras()
* second.
*/
data->imgu_ = numCameras ? &imgu1_ : &imgu0_;
- data->outStream_.device_ = &data->imgu_->output_;
- data->outStream_.name_ = "output";
- data->vfStream_.device_ = &data->imgu_->viewfinder_;
- data->vfStream_.name_ = "viewfinder";
/*
* Connect video devices' 'bufferReady' signals to their
@@ -846,27 +1113,31 @@ int PipelineHandlerIPU3::registerCameras()
* associated ImgU input where they get processed and
* returned through the ImgU main and secondary outputs.
*/
- data->cio2_.output_->bufferReady.connect(data.get(),
- &IPU3CameraData::cio2BufferReady);
- data->imgu_->input_->bufferReady.connect(data.get(),
- &IPU3CameraData::imguInputBufferReady);
- data->imgu_->output_.dev->bufferReady.connect(data.get(),
- &IPU3CameraData::imguOutputBufferReady);
- data->imgu_->viewfinder_.dev->bufferReady.connect(data.get(),
- &IPU3CameraData::imguOutputBufferReady);
+ data->cio2_.bufferReady().connect(data.get(),
+ &IPU3CameraData::cio2BufferReady);
+ data->cio2_.bufferAvailable.connect(
+ data.get(), &IPU3CameraData::queuePendingRequests);
+ data->imgu_->input_->bufferReady.connect(&data->cio2_,
+ &CIO2Device::tryReturnBuffer);
+ data->imgu_->output_->bufferReady.connect(data.get(),
+ &IPU3CameraData::imguOutputBufferReady);
+ data->imgu_->viewfinder_->bufferReady.connect(data.get(),
+ &IPU3CameraData::imguOutputBufferReady);
+ data->imgu_->param_->bufferReady.connect(data.get(),
+ &IPU3CameraData::paramBufferReady);
+ data->imgu_->stat_->bufferReady.connect(data.get(),
+ &IPU3CameraData::statBufferReady);
/* Create and register the Camera instance. */
- std::string cameraName = cio2->sensor_->entity()->name() + " "
- + std::to_string(id);
- std::shared_ptr<Camera> camera = Camera::create(this,
- cameraName,
- streams);
+ const std::string &cameraId = cio2->sensor()->id();
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(data), cameraId, streams);
- registerCamera(std::move(camera), std::move(data));
+ registerCamera(std::move(camera));
LOG(IPU3, Info)
<< "Registered Camera[" << numCameras << "] \""
- << cameraName << "\""
+ << cameraId << "\""
<< " connected to CSI-2 receiver " << id;
numCameras++;
@@ -875,594 +1146,276 @@ int PipelineHandlerIPU3::registerCameras()
return numCameras ? 0 : -ENODEV;
}
-/* -----------------------------------------------------------------------------
- * Buffer Ready slots
- */
-
-/**
- * \brief Handle buffers completion at the ImgU input
- * \param[in] buffer The completed buffer
- *
- * Buffers completed from the ImgU input are immediately queued back to the
- * CIO2 unit to continue frame capture.
- */
-void IPU3CameraData::imguInputBufferReady(FrameBuffer *buffer)
-{
- /* \todo Handle buffer failures when state is set to BufferError. */
- if (buffer->metadata().status == FrameMetadata::FrameCancelled)
- return;
-
- cio2_.output_->queueBuffer(buffer);
-}
-
-/**
- * \brief Handle buffers completion at the ImgU output
- * \param[in] buffer The completed buffer
- *
- * Buffers completed from the ImgU output are directed to the application.
- */
-void IPU3CameraData::imguOutputBufferReady(FrameBuffer *buffer)
+int IPU3CameraData::loadIPA()
{
- Request *request = buffer->request();
-
- if (!pipe_->completeBuffer(camera_, request, buffer))
- /* Request not completed yet, return here. */
- return;
+ ipa_ = IPAManager::createIPA<ipa::ipu3::IPAProxyIPU3>(pipe(), 1, 1);
+ if (!ipa_)
+ return -ENOENT;
- /* Mark the request as complete. */
- pipe_->completeRequest(camera_, request);
-}
-
-/**
- * \brief Handle buffers completion at the CIO2 output
- * \param[in] buffer The completed buffer
- *
- * Buffers completed from the CIO2 are immediately queued to the ImgU unit
- * for further processing.
- */
-void IPU3CameraData::cio2BufferReady(FrameBuffer *buffer)
-{
- /* \todo Handle buffer failures when state is set to BufferError. */
- if (buffer->metadata().status == FrameMetadata::FrameCancelled)
- return;
-
- imgu_->input_->queueBuffer(buffer);
-}
-
-/* -----------------------------------------------------------------------------
- * ImgU Device
- */
-
-/**
- * \brief Initialize components of the ImgU instance
- * \param[in] mediaDevice The ImgU instance media device
- * \param[in] index The ImgU instance index
- *
- * Create and open the V4L2 devices and subdevices of the ImgU instance
- * with \a index.
- *
- * In case of errors the created V4L2VideoDevice and V4L2Subdevice instances
- * are destroyed at pipeline handler delete time.
- *
- * \return 0 on success or a negative error code otherwise
- */
-int ImgUDevice::init(MediaDevice *media, unsigned int index)
-{
- int ret;
-
- index_ = index;
- name_ = "ipu3-imgu " + std::to_string(index_);
- media_ = media;
+ ipa_->setSensorControls.connect(this, &IPU3CameraData::setSensorControls);
+ ipa_->paramsComputed.connect(this, &IPU3CameraData::paramsComputed);
+ ipa_->metadataReady.connect(this, &IPU3CameraData::metadataReady);
/*
- * The media entities presence in the media device has been verified
- * by the match() function: no need to check for newly created
- * video devices and subdevice validity here.
+ * Pass the sensor info to the IPA to initialize controls.
+ *
+ * \todo Find a way to initialize IPA controls without basing their
+ * limits on a particular sensor mode. We currently pass sensor
+ * information corresponding to the largest sensor resolution, and the
+ * IPA uses this to compute limits for supported controls. There's a
+ * discrepancy between the need to compute IPA control limits at init
+ * time, and the fact that those limits may depend on the sensor mode.
+ * Research is required to find out to handle this issue.
*/
- imgu_ = V4L2Subdevice::fromEntityName(media, name_);
- ret = imgu_->open();
- if (ret)
- return ret;
-
- input_ = V4L2VideoDevice::fromEntityName(media, name_ + " input");
- ret = input_->open();
- if (ret)
- return ret;
-
- output_.dev = V4L2VideoDevice::fromEntityName(media, name_ + " output");
- ret = output_.dev->open();
+ CameraSensor *sensor = cio2_.sensor();
+ V4L2SubdeviceFormat sensorFormat = {};
+ sensorFormat.size = sensor->resolution();
+ int ret = sensor->setFormat(&sensorFormat);
if (ret)
return ret;
- output_.pad = PAD_OUTPUT;
- output_.name = "output";
-
- viewfinder_.dev = V4L2VideoDevice::fromEntityName(media,
- name_ + " viewfinder");
- ret = viewfinder_.dev->open();
+ IPACameraSensorInfo sensorInfo{};
+ ret = sensor->sensorInfo(&sensorInfo);
if (ret)
return ret;
- viewfinder_.pad = PAD_VF;
- viewfinder_.name = "viewfinder";
-
- stat_.dev = V4L2VideoDevice::fromEntityName(media, name_ + " 3a stat");
- ret = stat_.dev->open();
- if (ret)
- return ret;
-
- stat_.pad = PAD_STAT;
- stat_.name = "stat";
-
- return 0;
-}
-
-/**
- * \brief Configure the ImgU unit input
- * \param[in] size The ImgU input frame size
- * \param[in] inputFormat The format to be applied to ImgU input
- * \return 0 on success or a negative error code otherwise
- */
-int ImgUDevice::configureInput(const Size &size,
- V4L2DeviceFormat *inputFormat)
-{
- /* Configure the ImgU input video device with the requested sizes. */
- int ret = input_->setFormat(inputFormat);
- if (ret)
- return ret;
-
- LOG(IPU3, Debug) << "ImgU input format = " << inputFormat->toString();
-
/*
- * \todo The IPU3 driver implementation shall be changed to use the
- * input sizes as 'ImgU Input' subdevice sizes, and use the desired
- * GDC output sizes to configure the crop/compose rectangles.
- *
- * The current IPU3 driver implementation uses GDC sizes as the
- * 'ImgU Input' subdevice sizes, and the input video device sizes
- * to configure the crop/compose rectangles, contradicting the
- * V4L2 specification.
+ * The API tuning file is made from the sensor name. If the tuning file
+ * isn't found, fall back to the 'uncalibrated' file.
*/
- Rectangle rect = {
- .x = 0,
- .y = 0,
- .w = inputFormat->size.width,
- .h = inputFormat->size.height,
- };
- ret = imgu_->setCrop(PAD_INPUT, &rect);
- if (ret)
- return ret;
-
- ret = imgu_->setCompose(PAD_INPUT, &rect);
- if (ret)
- return ret;
-
- LOG(IPU3, Debug) << "ImgU input feeder and BDS rectangle = "
- << rect.toString();
-
- V4L2SubdeviceFormat imguFormat = {};
- imguFormat.mbus_code = MEDIA_BUS_FMT_FIXED;
- imguFormat.size = size;
-
- ret = imgu_->setFormat(PAD_INPUT, &imguFormat);
- if (ret)
- return ret;
-
- LOG(IPU3, Debug) << "ImgU GDC format = " << imguFormat.toString();
-
- return 0;
-}
-
-/**
- * \brief Configure the ImgU unit \a id video output
- * \param[in] output The ImgU output device to configure
- * \param[in] cfg The requested configuration
- * \return 0 on success or a negative error code otherwise
- */
-int ImgUDevice::configureOutput(ImgUOutput *output,
- const StreamConfiguration &cfg)
-{
- V4L2VideoDevice *dev = output->dev;
- unsigned int pad = output->pad;
-
- V4L2SubdeviceFormat imguFormat = {};
- imguFormat.mbus_code = MEDIA_BUS_FMT_FIXED;
- imguFormat.size = cfg.size;
-
- int ret = imgu_->setFormat(pad, &imguFormat);
- if (ret)
- return ret;
-
- /* No need to apply format to the stat node. */
- if (output == &stat_)
- return 0;
-
- V4L2DeviceFormat outputFormat = {};
- outputFormat.fourcc = dev->toV4L2PixelFormat(PixelFormat(DRM_FORMAT_NV12));
- outputFormat.size = cfg.size;
- outputFormat.planesCount = 2;
+ std::string ipaTuningFile =
+ ipa_->configurationFile(sensor->model() + ".yaml", "uncalibrated.yaml");
- ret = dev->setFormat(&outputFormat);
- if (ret)
- return ret;
-
- LOG(IPU3, Debug) << "ImgU " << output->name << " format = "
- << outputFormat.toString();
-
- return 0;
-}
-
-/**
- * \brief Allocate buffers for all the ImgU video devices
- */
-int ImgUDevice::allocateBuffers(IPU3CameraData *data, unsigned int bufferCount)
-{
- IPU3Stream *outStream = &data->outStream_;
- IPU3Stream *vfStream = &data->vfStream_;
-
- /* Share buffers between CIO2 output and ImgU input. */
- int ret = input_->importBuffers(bufferCount);
+ ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() },
+ sensorInfo, sensor->controls(), &ipaControls_);
if (ret) {
- LOG(IPU3, Error) << "Failed to import ImgU input buffers";
+ LOG(IPU3, Error) << "Failed to initialise the IPU3 IPA";
return ret;
}
- /*
- * Use for the stat's internal pool the same number of buffers as for
- * the input pool.
- * \todo To be revised when we'll actually use the stat node.
- */
- ret = stat_.dev->allocateBuffers(bufferCount, &stat_.buffers);
- if (ret < 0) {
- LOG(IPU3, Error) << "Failed to allocate ImgU stat buffers";
- goto error;
- }
-
- /*
- * Allocate buffers for both outputs. If an output is active, prepare
- * for buffer import, otherwise allocate internal buffers. Use the same
- * number of buffers in either case.
- */
- if (outStream->active_)
- ret = output_.dev->importBuffers(bufferCount);
- else
- ret = output_.dev->allocateBuffers(bufferCount,
- &output_.buffers);
- if (ret < 0) {
- LOG(IPU3, Error) << "Failed to allocate ImgU output buffers";
- goto error;
- }
-
- if (vfStream->active_)
- ret = viewfinder_.dev->importBuffers(bufferCount);
- else
- ret = viewfinder_.dev->allocateBuffers(bufferCount,
- &viewfinder_.buffers);
- if (ret < 0) {
- LOG(IPU3, Error) << "Failed to allocate ImgU viewfinder buffers";
- goto error;
- }
-
return 0;
-
-error:
- freeBuffers(data);
-
- return ret;
}
-/**
- * \brief Release buffers for all the ImgU video devices
- */
-void ImgUDevice::freeBuffers(IPU3CameraData *data)
+void IPU3CameraData::setSensorControls([[maybe_unused]] unsigned int id,
+ const ControlList &sensorControls,
+ const ControlList &lensControls)
{
- int ret;
+ delayedCtrls_->push(sensorControls);
- ret = output_.dev->releaseBuffers();
- if (ret)
- LOG(IPU3, Error) << "Failed to release ImgU output buffers";
+ CameraLens *focusLens = cio2_.sensor()->focusLens();
+ if (!focusLens)
+ return;
- ret = stat_.dev->releaseBuffers();
- if (ret)
- LOG(IPU3, Error) << "Failed to release ImgU stat buffers";
+ if (!lensControls.contains(V4L2_CID_FOCUS_ABSOLUTE))
+ return;
- ret = viewfinder_.dev->releaseBuffers();
- if (ret)
- LOG(IPU3, Error) << "Failed to release ImgU viewfinder buffers";
+ const ControlValue &focusValue = lensControls.get(V4L2_CID_FOCUS_ABSOLUTE);
- ret = input_->releaseBuffers();
- if (ret)
- LOG(IPU3, Error) << "Failed to release ImgU input buffers";
+ focusLens->setFocusPosition(focusValue.get<int32_t>());
}
-int ImgUDevice::start()
+void IPU3CameraData::paramsComputed(unsigned int id)
{
- int ret;
-
- /* Start the ImgU video devices. */
- ret = output_.dev->streamOn();
- if (ret) {
- LOG(IPU3, Error) << "Failed to start ImgU output";
- return ret;
- }
-
- ret = viewfinder_.dev->streamOn();
- if (ret) {
- LOG(IPU3, Error) << "Failed to start ImgU viewfinder";
- return ret;
- }
+ IPU3Frames::Info *info = frameInfos_.find(id);
+ if (!info)
+ return;
- ret = stat_.dev->streamOn();
- if (ret) {
- LOG(IPU3, Error) << "Failed to start ImgU stat";
- return ret;
- }
+ /* Queue all buffers from the request aimed for the ImgU. */
+ for (auto it : info->request->buffers()) {
+ const Stream *stream = it.first;
+ FrameBuffer *outbuffer = it.second;
- ret = input_->streamOn();
- if (ret) {
- LOG(IPU3, Error) << "Failed to start ImgU input";
- return ret;
+ if (stream == &outStream_)
+ imgu_->output_->queueBuffer(outbuffer);
+ else if (stream == &vfStream_)
+ imgu_->viewfinder_->queueBuffer(outbuffer);
}
- return 0;
+ info->paramBuffer->_d()->metadata().planes()[0].bytesused =
+ sizeof(struct ipu3_uapi_params);
+ imgu_->param_->queueBuffer(info->paramBuffer);
+ imgu_->stat_->queueBuffer(info->statBuffer);
+ imgu_->input_->queueBuffer(info->rawBuffer);
}
-int ImgUDevice::stop()
+void IPU3CameraData::metadataReady(unsigned int id, const ControlList &metadata)
{
- int ret;
+ IPU3Frames::Info *info = frameInfos_.find(id);
+ if (!info)
+ return;
- ret = output_.dev->streamOff();
- ret |= viewfinder_.dev->streamOff();
- ret |= stat_.dev->streamOff();
- ret |= input_->streamOff();
+ Request *request = info->request;
+ request->metadata().merge(metadata);
- return ret;
+ info->metadataProcessed = true;
+ if (frameInfos_.tryComplete(info))
+ pipe()->completeRequest(request);
}
-/**
- * \brief Enable or disable a single link on the ImgU instance
- *
- * This method assumes the media device associated with the ImgU instance
- * is open.
- *
- * \return 0 on success or a negative error code otherwise
+/* -----------------------------------------------------------------------------
+ * Buffer Ready slots
*/
-int ImgUDevice::linkSetup(const std::string &source, unsigned int sourcePad,
- const std::string &sink, unsigned int sinkPad,
- bool enable)
-{
- MediaLink *link = media_->link(source, sourcePad, sink, sinkPad);
- if (!link) {
- LOG(IPU3, Error)
- << "Failed to get link: '" << source << "':"
- << sourcePad << " -> '" << sink << "':" << sinkPad;
- return -ENODEV;
- }
-
- return link->setEnabled(enable);
-}
/**
- * \brief Enable or disable all media links in the ImgU instance to prepare
- * for capture operations
- *
- * \todo This method will probably be removed or changed once links will be
- * enabled or disabled selectively.
+ * \brief Handle buffers completion at the ImgU output
+ * \param[in] buffer The completed buffer
*
- * \return 0 on success or a negative error code otherwise
+ * Buffers completed from the ImgU output are directed to the application.
*/
-int ImgUDevice::enableLinks(bool enable)
+void IPU3CameraData::imguOutputBufferReady(FrameBuffer *buffer)
{
- std::string viewfinderName = name_ + " viewfinder";
- std::string outputName = name_ + " output";
- std::string statName = name_ + " 3a stat";
- std::string inputName = name_ + " input";
- int ret;
+ IPU3Frames::Info *info = frameInfos_.find(buffer);
+ if (!info)
+ return;
- ret = linkSetup(inputName, 0, name_, PAD_INPUT, enable);
- if (ret)
- return ret;
+ Request *request = info->request;
- ret = linkSetup(name_, PAD_OUTPUT, outputName, 0, enable);
- if (ret)
- return ret;
+ pipe()->completeBuffer(request, buffer);
- ret = linkSetup(name_, PAD_VF, viewfinderName, 0, enable);
- if (ret)
- return ret;
+ request->metadata().set(controls::draft::PipelineDepth, 3);
+ /* \todo Actually apply the scaler crop region to the ImgU. */
+ const auto &scalerCrop = request->controls().get(controls::ScalerCrop);
+ if (scalerCrop)
+ cropRegion_ = *scalerCrop;
+ request->metadata().set(controls::ScalerCrop, cropRegion_);
- return linkSetup(name_, PAD_STAT, statName, 0, enable);
+ if (frameInfos_.tryComplete(info))
+ pipe()->completeRequest(request);
}
-/*------------------------------------------------------------------------------
- * CIO2 Device
- */
-
/**
- * \brief Initialize components of the CIO2 device with \a index
- * \param[in] media The CIO2 media device
- * \param[in] index The CIO2 device index
- *
- * Create and open the video device and subdevices in the CIO2 instance at \a
- * index, if a supported image sensor is connected to the CSI-2 receiver of
- * this CIO2 instance. Enable the media links connecting the CIO2 components
- * to prepare for capture operations and cached the sensor maximum size.
+ * \brief Handle buffers completion at the CIO2 output
+ * \param[in] buffer The completed buffer
*
- * \return 0 on success or a negative error code otherwise
- * \retval -ENODEV No supported image sensor is connected to this CIO2 instance
+ * Buffers completed from the CIO2 are immediately queued to the ImgU unit
+ * for further processing.
*/
-int CIO2Device::init(const MediaDevice *media, unsigned int index)
+void IPU3CameraData::cio2BufferReady(FrameBuffer *buffer)
{
- int ret;
+ IPU3Frames::Info *info = frameInfos_.find(buffer);
+ if (!info)
+ return;
- /*
- * Verify that a sensor subdevice is connected to this CIO2 instance
- * and enable the media link between the two.
- */
- std::string csi2Name = "ipu3-csi2 " + std::to_string(index);
- MediaEntity *csi2Entity = media->getEntityByName(csi2Name);
- const std::vector<MediaPad *> &pads = csi2Entity->pads();
- if (pads.empty())
- return -ENODEV;
-
- /* IPU3 CSI-2 receivers have a single sink pad at index 0. */
- MediaPad *sink = pads[0];
- const std::vector<MediaLink *> &links = sink->links();
- if (links.empty())
- return -ENODEV;
-
- MediaLink *link = links[0];
- MediaEntity *sensorEntity = link->source()->entity();
- sensor_ = new CameraSensor(sensorEntity);
- ret = sensor_->init();
- if (ret)
- return ret;
+ Request *request = info->request;
- ret = link->setEnabled(true);
- if (ret)
- return ret;
+ /* If the buffer is cancelled force a complete of the whole request. */
+ if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
+ for (auto it : request->buffers()) {
+ FrameBuffer *b = it.second;
+ b->_d()->cancel();
+ pipe()->completeBuffer(request, b);
+ }
- /*
- * Make sure the sensor produces at least one format compatible with
- * the CIO2 requirements.
- *
- * utils::set_overlap requires the ranges to be sorted, keep the
- * cio2Codes vector sorted in ascending order.
- */
- const std::vector<unsigned int> cio2Codes{ MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10 };
- const std::vector<unsigned int> &sensorCodes = sensor_->mbusCodes();
- if (!utils::set_overlap(sensorCodes.begin(), sensorCodes.end(),
- cio2Codes.begin(), cio2Codes.end())) {
- LOG(IPU3, Error)
- << "Sensor " << sensor_->entity()->name()
- << " has not format compatible with the IPU3";
- return -EINVAL;
+ frameInfos_.remove(info);
+ pipe()->completeRequest(request);
+ return;
}
/*
- * \todo Define when to open and close video device nodes, as they
- * might impact on power consumption.
+ * Record the sensor's timestamp in the request metadata.
+ *
+ * \todo The sensor timestamp should be better estimated by connecting
+ * to the V4L2Device::frameStart signal.
*/
+ request->metadata().set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
- csi2_ = new V4L2Subdevice(csi2Entity);
- ret = csi2_->open();
- if (ret)
- return ret;
+ info->effectiveSensorControls = delayedCtrls_->get(buffer->metadata().sequence);
- std::string cio2Name = "ipu3-cio2 " + std::to_string(index);
- output_ = V4L2VideoDevice::fromEntityName(media, cio2Name);
- ret = output_->open();
- if (ret)
- return ret;
+ if (request->findBuffer(&rawStream_))
+ pipe()->completeBuffer(request, buffer);
- return 0;
+ ipa_->computeParams(info->id, info->paramBuffer->cookie());
}
-/**
- * \brief Configure the CIO2 unit
- * \param[in] size The requested CIO2 output frame size
- * \param[out] outputFormat The CIO2 unit output image format
- * \return 0 on success or a negative error code otherwise
- */
-int CIO2Device::configure(const Size &size,
- V4L2DeviceFormat *outputFormat)
+void IPU3CameraData::paramBufferReady(FrameBuffer *buffer)
{
- V4L2SubdeviceFormat sensorFormat;
- int ret;
+ IPU3Frames::Info *info = frameInfos_.find(buffer);
+ if (!info)
+ return;
+
+ info->paramDequeued = true;
/*
- * Apply the selected format to the sensor, the CSI-2 receiver and
- * the CIO2 output device.
+ * tryComplete() will delete info if it completes the IPU3Frame.
+ * In that event, we must have obtained the Request before hand.
+ *
+ * \todo Improve the FrameInfo API to avoid this type of issue
*/
- sensorFormat = sensor_->getFormat({ MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10 },
- size);
- ret = sensor_->setFormat(&sensorFormat);
- if (ret)
- return ret;
+ Request *request = info->request;
- ret = csi2_->setFormat(0, &sensorFormat);
- if (ret)
- return ret;
+ if (frameInfos_.tryComplete(info))
+ pipe()->completeRequest(request);
+}
- outputFormat->fourcc = mediaBusToFormat(sensorFormat.mbus_code);
- outputFormat->size = sensorFormat.size;
- outputFormat->planesCount = 1;
+void IPU3CameraData::statBufferReady(FrameBuffer *buffer)
+{
+ IPU3Frames::Info *info = frameInfos_.find(buffer);
+ if (!info)
+ return;
- ret = output_->setFormat(outputFormat);
- if (ret)
- return ret;
+ Request *request = info->request;
- LOG(IPU3, Debug) << "CIO2 output format " << outputFormat->toString();
+ if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
+ info->metadataProcessed = true;
- return 0;
+ /*
+ * tryComplete() will delete info if it completes the IPU3Frame.
+ * In that event, we must have obtained the Request before hand.
+ */
+ if (frameInfos_.tryComplete(info))
+ pipe()->completeRequest(request);
+
+ return;
+ }
+
+ ipa_->processStats(info->id, request->metadata().get(controls::SensorTimestamp).value_or(0),
+ info->statBuffer->cookie(), info->effectiveSensorControls);
}
-/**
- * \brief Allocate frame buffers for the CIO2 output
+/*
+ * \brief Handle the start of frame exposure signal
+ * \param[in] sequence The sequence number of frame
*
- * Allocate frame buffers in the CIO2 video device to be used to capture frames
- * from the CIO2 output. The buffers are stored in the CIO2Device::buffers_
- * vector.
+ * Inspect the list of pending requests waiting for a RAW frame to be
+ * produced and apply controls for the 'next' one.
*
- * \return Number of buffers allocated or negative error code
+ * Some controls need to be applied immediately, such as the
+ * TestPatternMode one. Other controls are handled through the delayed
+ * controls class.
*/
-int CIO2Device::allocateBuffers()
+void IPU3CameraData::frameStart(uint32_t sequence)
{
- int ret = output_->allocateBuffers(CIO2_BUFFER_COUNT, &buffers_);
- if (ret < 0)
- LOG(IPU3, Error) << "Failed to allocate CIO2 buffers";
+ delayedCtrls_->applyControls(sequence);
- return ret;
-}
+ if (processingRequests_.empty())
+ return;
-void CIO2Device::freeBuffers()
-{
- buffers_.clear();
+ /*
+ * Handle controls to be set immediately on the next frame.
+ * This currently only handle the TestPatternMode control.
+ *
+ * \todo Synchronize with the sequence number
+ */
+ Request *request = processingRequests_.front();
+ processingRequests_.pop();
- if (output_->releaseBuffers())
- LOG(IPU3, Error) << "Failed to release CIO2 buffers";
-}
+ const auto &testPatternMode = request->controls().get(controls::draft::TestPatternMode);
+ if (!testPatternMode)
+ return;
-int CIO2Device::start()
-{
- for (const std::unique_ptr<FrameBuffer> &buffer : buffers_) {
- int ret = output_->queueBuffer(buffer.get());
- if (ret) {
- LOG(IPU3, Error) << "Failed to queue CIO2 buffer";
- return ret;
- }
+ int ret = cio2_.sensor()->setTestPatternMode(
+ static_cast<controls::draft::TestPatternModeEnum>(*testPatternMode));
+ if (ret) {
+ LOG(IPU3, Error) << "Failed to set test pattern mode: "
+ << ret;
+ return;
}
- return output_->streamOn();
-}
-
-int CIO2Device::stop()
-{
- return output_->streamOff();
-}
-
-V4L2PixelFormat CIO2Device::mediaBusToFormat(unsigned int code)
-{
- switch (code) {
- case MEDIA_BUS_FMT_SBGGR10_1X10:
- return V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SBGGR10);
- case MEDIA_BUS_FMT_SGBRG10_1X10:
- return V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGBRG10);
- case MEDIA_BUS_FMT_SGRBG10_1X10:
- return V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGRBG10);
- case MEDIA_BUS_FMT_SRGGB10_1X10:
- return V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SRGGB10);
- default:
- return {};
- }
+ request->metadata().set(controls::draft::TestPatternMode,
+ *testPatternMode);
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerIPU3);
+REGISTER_PIPELINE_HANDLER(PipelineHandlerIPU3, "ipu3")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/ipu3/meson.build b/src/libcamera/pipeline/ipu3/meson.build
index 0ab766a2..f2904b4a 100644
--- a/src/libcamera/pipeline/ipu3/meson.build
+++ b/src/libcamera/pipeline/ipu3/meson.build
@@ -1,3 +1,8 @@
-libcamera_sources += files([
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'cio2.cpp',
+ 'frames.cpp',
+ 'imgu.cpp',
'ipu3.cpp',
])
diff --git a/src/libcamera/pipeline/mali-c55/mali-c55.cpp b/src/libcamera/pipeline/mali-c55/mali-c55.cpp
new file mode 100644
index 00000000..5abd6b20
--- /dev/null
+++ b/src/libcamera/pipeline/mali-c55/mali-c55.cpp
@@ -0,0 +1,1755 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Pipeline Handler for ARM's Mali-C55 ISP
+ */
+
+#include <algorithm>
+#include <array>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+
+#include <linux/mali-c55-config.h>
+#include <linux/media-bus-format.h>
+#include <linux/media.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/stream.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+#include <libcamera/ipa/mali-c55_ipa_interface.h>
+#include <libcamera/ipa/mali-c55_ipa_proxy.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/delayed_controls.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/request.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace {
+
+bool isFormatRaw(const libcamera::PixelFormat &pixFmt)
+{
+ return libcamera::PixelFormatInfo::info(pixFmt).colourEncoding ==
+ libcamera::PixelFormatInfo::ColourEncodingRAW;
+}
+
+} /* namespace */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(MaliC55)
+
+const std::map<libcamera::PixelFormat, unsigned int> maliC55FmtToCode = {
+ /* \todo Support all formats supported by the driver in libcamera. */
+
+ { formats::RGB565, MEDIA_BUS_FMT_RGB121212_1X36 },
+ { formats::RGB888, MEDIA_BUS_FMT_RGB121212_1X36 },
+ { formats::YUYV, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::UYVY, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::R8, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::NV12, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::NV21, MEDIA_BUS_FMT_YUV10_1X30 },
+
+ /* RAW formats, FR pipe only. */
+ { formats::SGBRG16, MEDIA_BUS_FMT_SGBRG16_1X16 },
+ { formats::SRGGB16, MEDIA_BUS_FMT_SRGGB16_1X16 },
+ { formats::SBGGR16, MEDIA_BUS_FMT_SBGGR16_1X16 },
+ { formats::SGRBG16, MEDIA_BUS_FMT_SGRBG16_1X16 },
+};
+
+constexpr Size kMaliC55MinInputSize = { 640, 480 };
+constexpr Size kMaliC55MinSize = { 128, 128 };
+constexpr Size kMaliC55MaxSize = { 8192, 8192 };
+constexpr unsigned int kMaliC55ISPInternalFormat = MEDIA_BUS_FMT_RGB121212_1X36;
+
+struct MaliC55FrameInfo {
+ Request *request;
+
+ FrameBuffer *paramBuffer;
+ FrameBuffer *statBuffer;
+
+ bool paramsDone;
+ bool statsDone;
+};
+
+class MaliC55CameraData : public Camera::Private
+{
+public:
+ MaliC55CameraData(PipelineHandler *pipe, MediaEntity *entity)
+ : Camera::Private(pipe), entity_(entity)
+ {
+ }
+
+ int init();
+ int loadIPA();
+
+ /* Deflect these functionalities to either TPG or CameraSensor. */
+ const std::vector<Size> sizes(unsigned int mbusCode) const;
+ const Size resolution() const;
+
+ int pixfmtToMbusCode(const PixelFormat &pixFmt) const;
+ const PixelFormat &bestRawFormat() const;
+
+ void updateControls(const ControlInfoMap &ipaControls);
+
+ PixelFormat adjustRawFormat(const PixelFormat &pixFmt) const;
+ Size adjustRawSizes(const PixelFormat &pixFmt, const Size &rawSize) const;
+
+ std::unique_ptr<CameraSensor> sensor_;
+
+ MediaEntity *entity_;
+ std::unique_ptr<V4L2Subdevice> csi_;
+ std::unique_ptr<V4L2Subdevice> sd_;
+ Stream frStream_;
+ Stream dsStream_;
+
+ std::unique_ptr<ipa::mali_c55::IPAProxyMaliC55> ipa_;
+ std::vector<IPABuffer> ipaStatBuffers_;
+ std::vector<IPABuffer> ipaParamBuffers_;
+
+ std::unique_ptr<DelayedControls> delayedCtrls_;
+
+private:
+ void initTPGData();
+ void setSensorControls(const ControlList &sensorControls);
+
+ std::string id_;
+ std::vector<unsigned int> tpgCodes_;
+ std::vector<Size> tpgSizes_;
+ Size tpgResolution_;
+};
+
+int MaliC55CameraData::init()
+{
+ int ret;
+
+ sd_ = std::make_unique<V4L2Subdevice>(entity_);
+ ret = sd_->open();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to open sensor subdevice";
+ return ret;
+ }
+
+ /* If this camera is created from TPG, we return here. */
+ if (entity_->name() == "mali-c55 tpg") {
+ initTPGData();
+ return 0;
+ }
+
+ /*
+ * Register a CameraSensor if we connect to a sensor and create
+ * an entity for the connected CSI-2 receiver.
+ */
+ sensor_ = CameraSensorFactoryBase::create(entity_);
+ if (!sensor_)
+ return ret;
+
+ const MediaPad *sourcePad = entity_->getPadByIndex(0);
+ MediaEntity *csiEntity = sourcePad->links()[0]->sink()->entity();
+
+ csi_ = std::make_unique<V4L2Subdevice>(csiEntity);
+ if (csi_->open()) {
+ LOG(MaliC55, Error) << "Failed to open CSI-2 subdevice";
+ return false;
+ }
+
+ return 0;
+}
+
+void MaliC55CameraData::initTPGData()
+{
+ /* Replicate the CameraSensor implementation for TPG. */
+ V4L2Subdevice::Formats formats = sd_->formats(0);
+ if (formats.empty())
+ return;
+
+ tpgCodes_ = utils::map_keys(formats);
+ std::sort(tpgCodes_.begin(), tpgCodes_.end());
+
+ for (const auto &format : formats) {
+ const std::vector<SizeRange> &ranges = format.second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(tpgSizes_),
+ [](const SizeRange &range) { return range.max; });
+ }
+
+ tpgResolution_ = tpgSizes_.back();
+}
+
+void MaliC55CameraData::setSensorControls(const ControlList &sensorControls)
+{
+ delayedCtrls_->push(sensorControls);
+}
+
+const std::vector<Size> MaliC55CameraData::sizes(unsigned int mbusCode) const
+{
+ if (sensor_)
+ return sensor_->sizes(mbusCode);
+
+ V4L2Subdevice::Formats formats = sd_->formats(0);
+ if (formats.empty())
+ return {};
+
+ std::vector<Size> sizes;
+ const auto &format = formats.find(mbusCode);
+ if (format == formats.end())
+ return {};
+
+ const std::vector<SizeRange> &ranges = format->second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+
+ std::sort(sizes.begin(), sizes.end());
+
+ return sizes;
+}
+
+const Size MaliC55CameraData::resolution() const
+{
+ if (sensor_)
+ return sensor_->resolution();
+
+ return tpgResolution_;
+}
+
+/*
+ * The Mali C55 ISP can only produce 16-bit RAW output in bypass modes, but the
+ * sensors connected to it might produce 8/10/12/16 bits. We simply search the
+ * sensor's supported formats for the one with a matching bayer order and the
+ * greatest bitdepth.
+ */
+int MaliC55CameraData::pixfmtToMbusCode(const PixelFormat &pixFmt) const
+{
+ auto it = maliC55FmtToCode.find(pixFmt);
+ if (it == maliC55FmtToCode.end())
+ return -EINVAL;
+
+ BayerFormat bayerFormat = BayerFormat::fromMbusCode(it->second);
+ if (!bayerFormat.isValid())
+ return -EINVAL;
+
+ V4L2Subdevice::Formats formats = sd_->formats(0);
+ unsigned int sensorMbusCode = 0;
+ unsigned int bitDepth = 0;
+
+ for (const auto &[code, sizes] : formats) {
+ BayerFormat sdBayerFormat = BayerFormat::fromMbusCode(code);
+ if (!sdBayerFormat.isValid())
+ continue;
+
+ if (sdBayerFormat.order != bayerFormat.order)
+ continue;
+
+ if (sdBayerFormat.bitDepth > bitDepth) {
+ bitDepth = sdBayerFormat.bitDepth;
+ sensorMbusCode = code;
+ }
+ }
+
+ if (!sensorMbusCode)
+ return -EINVAL;
+
+ return sensorMbusCode;
+}
+
+/*
+ * Find a RAW PixelFormat supported by both the ISP and the sensor.
+ *
+ * The situation is mildly complicated by the fact that we expect the sensor to
+ * output something like RAW8/10/12/16, but the ISP can only accept as input
+ * RAW20 and can only produce as output RAW16. The one constant in that is the
+ * bayer order of the data, so we'll simply check that the sensor produces a
+ * format with a bayer order that matches that of one of the formats we support,
+ * and select that.
+ */
+const PixelFormat &MaliC55CameraData::bestRawFormat() const
+{
+ static const PixelFormat invalidPixFmt = {};
+
+ for (const auto &fmt : sd_->formats(0)) {
+ BayerFormat sensorBayer = BayerFormat::fromMbusCode(fmt.first);
+
+ if (!sensorBayer.isValid())
+ continue;
+
+ for (const auto &[pixFmt, rawCode] : maliC55FmtToCode) {
+ if (!isFormatRaw(pixFmt))
+ continue;
+
+ BayerFormat bayer = BayerFormat::fromMbusCode(rawCode);
+ if (bayer.order == sensorBayer.order)
+ return pixFmt;
+ }
+ }
+
+ LOG(MaliC55, Error) << "Sensor doesn't provide a compatible format";
+ return invalidPixFmt;
+}
+
+void MaliC55CameraData::updateControls(const ControlInfoMap &ipaControls)
+{
+ if (!sensor_)
+ return;
+
+ IPACameraSensorInfo sensorInfo;
+ int ret = sensor_->sensorInfo(&sensorInfo);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to retrieve sensor info";
+ return;
+ }
+
+ ControlInfoMap::Map controls;
+ Rectangle ispMinCrop{ 0, 0, 640, 480 };
+ controls[&controls::ScalerCrop] =
+ ControlInfo(ispMinCrop, sensorInfo.analogCrop,
+ sensorInfo.analogCrop);
+
+ for (auto const &c : ipaControls)
+ controls.emplace(c.first, c.second);
+
+ controlInfo_ = ControlInfoMap(std::move(controls), controls::controls);
+}
+
+/*
+ * Make sure the provided raw pixel format is supported and adjust it to
+ * one of the supported ones if it's not.
+ */
+PixelFormat MaliC55CameraData::adjustRawFormat(const PixelFormat &rawFmt) const
+{
+ /* Make sure the RAW mbus code is supported by the image source. */
+ int rawCode = pixfmtToMbusCode(rawFmt);
+ if (rawCode < 0)
+ return bestRawFormat();
+
+ const auto rawSizes = sizes(rawCode);
+ if (rawSizes.empty())
+ return bestRawFormat();
+
+ return rawFmt;
+}
+
+Size MaliC55CameraData::adjustRawSizes(const PixelFormat &rawFmt, const Size &size) const
+{
+ /* Expand the RAW size to the minimum ISP input size. */
+ Size rawSize = size.expandedTo(kMaliC55MinInputSize);
+
+ /* Check if the size is natively supported. */
+ int rawCode = pixfmtToMbusCode(rawFmt);
+ if (rawCode < 0)
+ return {};
+
+ const auto rawSizes = sizes(rawCode);
+ auto sizeIt = std::find(rawSizes.begin(), rawSizes.end(), rawSize);
+ if (sizeIt != rawSizes.end())
+ return rawSize;
+
+ /* Or adjust it to the closest supported size. */
+ uint16_t distance = std::numeric_limits<uint16_t>::max();
+ Size bestSize;
+ for (const Size &sz : rawSizes) {
+ uint16_t dist = std::abs(static_cast<int>(rawSize.width) -
+ static_cast<int>(sz.width)) +
+ std::abs(static_cast<int>(rawSize.height) -
+ static_cast<int>(sz.height));
+ if (dist < distance) {
+ dist = distance;
+ bestSize = sz;
+ }
+ }
+
+ return bestSize;
+}
+
+int MaliC55CameraData::loadIPA()
+{
+ int ret;
+
+ /* Do not initialize IPA for TPG. */
+ if (!sensor_)
+ return 0;
+
+ ipa_ = IPAManager::createIPA<ipa::mali_c55::IPAProxyMaliC55>(pipe(), 1, 1);
+ if (!ipa_)
+ return -ENOENT;
+
+ ipa_->setSensorControls.connect(this, &MaliC55CameraData::setSensorControls);
+
+ std::string ipaTuningFile = ipa_->configurationFile(sensor_->model() + ".yaml",
+ "uncalibrated.yaml");
+
+ /* We need to inform the IPA of the sensor configuration */
+ ipa::mali_c55::IPAConfigInfo ipaConfig{};
+
+ ret = sensor_->sensorInfo(&ipaConfig.sensorInfo);
+ if (ret)
+ return ret;
+
+ ipaConfig.sensorControls = sensor_->controls();
+
+ ControlInfoMap ipaControls;
+ ret = ipa_->init({ ipaTuningFile, sensor_->model() }, ipaConfig,
+ &ipaControls);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to initialise the Mali-C55 IPA";
+ return ret;
+ }
+
+ updateControls(ipaControls);
+
+ return 0;
+}
+
+class MaliC55CameraConfiguration : public CameraConfiguration
+{
+public:
+ MaliC55CameraConfiguration(MaliC55CameraData *data)
+ : CameraConfiguration(), data_(data)
+ {
+ }
+
+ Status validate() override;
+ const Transform &combinedTransform() { return combinedTransform_; }
+
+ V4L2SubdeviceFormat sensorFormat_;
+
+private:
+ static constexpr unsigned int kMaxStreams = 2;
+
+ const MaliC55CameraData *data_;
+ Transform combinedTransform_;
+};
+
+CameraConfiguration::Status MaliC55CameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ /*
+ * The TPG doesn't support flips, so we only need to calculate a
+ * transform if we have a sensor.
+ */
+ if (data_->sensor_) {
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->sensor_->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
+ status = Adjusted;
+ } else {
+ combinedTransform_ = Transform::Rot0;
+ }
+
+ /* Only 2 streams available. */
+ if (config_.size() > kMaxStreams) {
+ config_.resize(kMaxStreams);
+ status = Adjusted;
+ }
+
+ bool frPipeAvailable = true;
+ StreamConfiguration *rawConfig = nullptr;
+ for (StreamConfiguration &config : config_) {
+ if (!isFormatRaw(config.pixelFormat))
+ continue;
+
+ if (rawConfig) {
+ LOG(MaliC55, Error)
+ << "Only a single RAW stream is supported";
+ return Invalid;
+ }
+
+ rawConfig = &config;
+ }
+
+ /*
+ * The C55 can not upscale. Limit the configuration to the ISP
+ * capabilities and the sensor resolution.
+ */
+ Size maxSize = kMaliC55MaxSize.boundedTo(data_->resolution());
+ if (rawConfig) {
+ /*
+ * \todo Take into account the Bayer components ordering once
+ * we support rotations.
+ */
+ PixelFormat rawFormat =
+ data_->adjustRawFormat(rawConfig->pixelFormat);
+
+ if (!rawFormat.isValid())
+ return Invalid;
+
+ if (rawFormat != rawConfig->pixelFormat) {
+ LOG(MaliC55, Debug)
+ << "RAW format adjusted to " << rawFormat;
+ rawConfig->pixelFormat = rawFormat;
+ status = Adjusted;
+ }
+
+ Size rawSize =
+ data_->adjustRawSizes(rawFormat, rawConfig->size);
+ if (rawSize != rawConfig->size) {
+ LOG(MaliC55, Debug)
+ << "RAW sizes adjusted to " << rawSize;
+ rawConfig->size = rawSize;
+ status = Adjusted;
+ }
+
+ maxSize = rawSize;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(rawConfig->pixelFormat);
+ rawConfig->stride = info.stride(rawConfig->size.width, 0, 4);
+ rawConfig->frameSize = info.frameSize(rawConfig->size, 4);
+
+ rawConfig->setStream(const_cast<Stream *>(&data_->frStream_));
+ frPipeAvailable = false;
+ }
+
+ /*
+ * Adjust processed streams.
+ *
+ * Compute the minimum sensor size to be later used to select the
+ * sensor configuration.
+ */
+ Size minSensorSize = kMaliC55MinInputSize;
+ for (StreamConfiguration &config : config_) {
+ if (isFormatRaw(config.pixelFormat))
+ continue;
+
+ /* Adjust format and size for processed streams. */
+ const auto it = maliC55FmtToCode.find(config.pixelFormat);
+ if (it == maliC55FmtToCode.end()) {
+ LOG(MaliC55, Debug)
+ << "Format adjusted to " << formats::RGB565;
+ config.pixelFormat = formats::RGB565;
+ status = Adjusted;
+ }
+
+ Size size = std::clamp(config.size, kMaliC55MinSize, maxSize);
+ if (size != config.size) {
+ LOG(MaliC55, Debug)
+ << "Size adjusted to " << size;
+ config.size = size;
+ status = Adjusted;
+ }
+
+ if (minSensorSize < size)
+ minSensorSize = size;
+
+ if (frPipeAvailable) {
+ config.setStream(const_cast<Stream *>(&data_->frStream_));
+ frPipeAvailable = false;
+ } else {
+ config.setStream(const_cast<Stream *>(&data_->dsStream_));
+ }
+ }
+
+ /* Compute the sensor format. */
+
+ /* If there's a RAW config, sensor configuration follows it. */
+ if (rawConfig) {
+ sensorFormat_.code = data_->pixfmtToMbusCode(rawConfig->pixelFormat);
+ sensorFormat_.size = rawConfig->size.expandedTo(minSensorSize);
+
+ return status;
+ }
+
+ /* If there's no RAW config, compute the sensor configuration here. */
+ PixelFormat rawFormat = data_->bestRawFormat();
+ if (!rawFormat.isValid())
+ return Invalid;
+
+ sensorFormat_.code = data_->pixfmtToMbusCode(rawFormat);
+
+ uint16_t distance = std::numeric_limits<uint16_t>::max();
+ const auto sizes = data_->sizes(sensorFormat_.code);
+ Size bestSize;
+ for (const auto &size : sizes) {
+ if (minSensorSize.width > size.width ||
+ minSensorSize.height > size.height)
+ continue;
+
+ uint16_t dist = std::abs(static_cast<int>(minSensorSize.width) -
+ static_cast<int>(size.width)) +
+ std::abs(static_cast<int>(minSensorSize.height) -
+ static_cast<int>(size.height));
+ if (dist < distance) {
+ dist = distance;
+ bestSize = size;
+ }
+ }
+ sensorFormat_.size = bestSize;
+
+ LOG(MaliC55, Debug) << "Computed sensor configuration " << sensorFormat_;
+
+ return status;
+}
+
+class PipelineHandlerMaliC55 : public PipelineHandler
+{
+public:
+ PipelineHandlerMaliC55(CameraManager *manager);
+
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+ int allocateBuffers(Camera *camera);
+ void freeBuffers(Camera *camera);
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+ void imageBufferReady(FrameBuffer *buffer);
+ void paramsBufferReady(FrameBuffer *buffer);
+ void statsBufferReady(FrameBuffer *buffer);
+ void paramsComputed(unsigned int requestId);
+ void statsProcessed(unsigned int requestId, const ControlList &metadata);
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ struct MaliC55Pipe {
+ std::unique_ptr<V4L2Subdevice> resizer;
+ std::unique_ptr<V4L2VideoDevice> cap;
+ MediaLink *link;
+ Stream *stream;
+ };
+
+ enum {
+ MaliC55FR,
+ MaliC55DS,
+ MaliC55NumPipes,
+ };
+
+ MaliC55CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<MaliC55CameraData *>(camera->_d());
+ }
+
+ MaliC55Pipe *pipeFromStream(MaliC55CameraData *data, Stream *stream)
+ {
+ if (stream == &data->frStream_)
+ return &pipes_[MaliC55FR];
+ else if (stream == &data->dsStream_)
+ return &pipes_[MaliC55DS];
+ else
+ LOG(MaliC55, Fatal) << "Stream " << stream << " not valid";
+ return nullptr;
+ }
+
+ MaliC55Pipe *pipeFromStream(MaliC55CameraData *data, const Stream *stream)
+ {
+ return pipeFromStream(data, const_cast<Stream *>(stream));
+ }
+
+ void resetPipes()
+ {
+ for (MaliC55Pipe &pipe : pipes_)
+ pipe.stream = nullptr;
+ }
+
+ MaliC55FrameInfo *findFrameInfo(FrameBuffer *buffer);
+ MaliC55FrameInfo *findFrameInfo(Request *request);
+ void tryComplete(MaliC55FrameInfo *info);
+
+ int configureRawStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat);
+ int configureProcessedStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat);
+
+ void applyScalerCrop(Camera *camera, const ControlList &controls);
+
+ bool registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
+ const std::string &name);
+ bool registerTPGCamera(MediaLink *link);
+ bool registerSensorCamera(MediaLink *link);
+
+ MediaDevice *media_;
+ std::unique_ptr<V4L2Subdevice> isp_;
+ std::unique_ptr<V4L2VideoDevice> stats_;
+ std::unique_ptr<V4L2VideoDevice> params_;
+
+ std::vector<std::unique_ptr<FrameBuffer>> statsBuffers_;
+ std::queue<FrameBuffer *> availableStatsBuffers_;
+
+ std::vector<std::unique_ptr<FrameBuffer>> paramsBuffers_;
+ std::queue<FrameBuffer *> availableParamsBuffers_;
+
+ std::map<unsigned int, MaliC55FrameInfo> frameInfoMap_;
+
+ std::array<MaliC55Pipe, MaliC55NumPipes> pipes_;
+
+ bool dsFitted_;
+};
+
+PipelineHandlerMaliC55::PipelineHandlerMaliC55(CameraManager *manager)
+ : PipelineHandler(manager), dsFitted_(true)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerMaliC55::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
+{
+ MaliC55CameraData *data = cameraData(camera);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<MaliC55CameraConfiguration>(data);
+ bool frPipeAvailable = true;
+
+ if (roles.empty())
+ return config;
+
+ /* Check if one stream is RAW to reserve the FR pipe for it. */
+ if (std::find(roles.begin(), roles.end(), StreamRole::Raw) != roles.end())
+ frPipeAvailable = false;
+
+ for (const StreamRole &role : roles) {
+ struct MaliC55Pipe *pipe;
+
+ /* Assign pipe for this role. */
+ if (role == StreamRole::Raw) {
+ pipe = &pipes_[MaliC55FR];
+ } else {
+ if (frPipeAvailable) {
+ pipe = &pipes_[MaliC55FR];
+ frPipeAvailable = false;
+ } else {
+ pipe = &pipes_[MaliC55DS];
+ }
+ }
+
+ Size size = std::min(Size{ 1920, 1080 }, data->resolution());
+ PixelFormat pixelFormat;
+
+ switch (role) {
+ case StreamRole::StillCapture:
+ size = data->resolution();
+ [[fallthrough]];
+ case StreamRole::VideoRecording:
+ pixelFormat = formats::NV12;
+ break;
+
+ case StreamRole::Viewfinder:
+ pixelFormat = formats::RGB565;
+ break;
+
+ case StreamRole::Raw:
+ pixelFormat = data->bestRawFormat();
+ if (!pixelFormat.isValid()) {
+ LOG(MaliC55, Error)
+ << "Camera does not support RAW formats";
+ return nullptr;
+ }
+
+ size = data->resolution();
+ break;
+
+ default:
+ LOG(MaliC55, Error)
+ << "Requested stream role not supported: " << role;
+ return nullptr;
+ }
+
+ std::map<PixelFormat, std::vector<SizeRange>> formats;
+ for (const auto &maliFormat : maliC55FmtToCode) {
+ PixelFormat pixFmt = maliFormat.first;
+ bool isRaw = isFormatRaw(pixFmt);
+
+ /* RAW formats are only supported on the FR pipe. */
+ if (pipe != &pipes_[MaliC55FR] && isRaw)
+ continue;
+
+ if (isRaw) {
+ /* Make sure the mbus code is supported. */
+ int rawCode = data->pixfmtToMbusCode(pixFmt);
+ if (rawCode < 0)
+ continue;
+
+ const auto sizes = data->sizes(rawCode);
+ if (sizes.empty())
+ continue;
+
+ /* And list all sizes the sensor can produce. */
+ std::vector<SizeRange> sizeRanges;
+ std::transform(sizes.begin(), sizes.end(),
+ std::back_inserter(sizeRanges),
+ [](const Size &s) {
+ return SizeRange(s);
+ });
+
+ formats[pixFmt] = sizeRanges;
+ } else {
+ /* Processed formats are always available. */
+ Size maxSize = std::min(kMaliC55MaxSize,
+ data->resolution());
+ formats[pixFmt] = { kMaliC55MinSize, maxSize };
+ }
+ }
+
+ StreamFormats streamFormats(formats);
+ StreamConfiguration cfg(streamFormats);
+ cfg.pixelFormat = pixelFormat;
+ cfg.bufferCount = 4;
+ cfg.size = size;
+
+ config->addConfiguration(cfg);
+ }
+
+ if (config->validate() == CameraConfiguration::Invalid)
+ return nullptr;
+
+ return config;
+}
+
+int PipelineHandlerMaliC55::configureRawStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat)
+{
+ Stream *stream = config.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ if (pipe != &pipes_[MaliC55FR]) {
+ LOG(MaliC55, Fatal) << "Only the FR pipe supports RAW capture.";
+ return -EINVAL;
+ }
+
+ /* Enable the debayer route to set fixed internal format on pad #0. */
+ V4L2Subdevice::Routing routing = {};
+ routing.emplace_back(V4L2Subdevice::Stream{ 0, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+
+ int ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ unsigned int rawCode = subdevFormat.code;
+ subdevFormat.code = kMaliC55ISPInternalFormat;
+ ret = pipe->resizer->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ /* Enable the bypass route and apply RAW formats there. */
+ routing.clear();
+ routing.emplace_back(V4L2Subdevice::Stream{ 2, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+ ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ subdevFormat.code = rawCode;
+ ret = pipe->resizer->setFormat(2, &subdevFormat);
+ if (ret)
+ return ret;
+
+ ret = pipe->resizer->setFormat(1, &subdevFormat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int PipelineHandlerMaliC55::configureProcessedStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat)
+{
+ Stream *stream = config.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ /* Enable the debayer route on the resizer pipe. */
+ V4L2Subdevice::Routing routing = {};
+ routing.emplace_back(V4L2Subdevice::Stream{ 0, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+
+ int ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ subdevFormat.code = kMaliC55ISPInternalFormat;
+ ret = pipe->resizer->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ /*
+ * Compute the scaler-in to scaler-out ratio: first center-crop to align
+ * the FOV to the desired resolution, then scale to the desired size.
+ */
+ Size scalerIn = subdevFormat.size.boundedToAspectRatio(config.size);
+ int xCrop = (subdevFormat.size.width - scalerIn.width) / 2;
+ int yCrop = (subdevFormat.size.height - scalerIn.height) / 2;
+ Rectangle ispCrop = { xCrop, yCrop, scalerIn };
+ ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_CROP, &ispCrop);
+ if (ret)
+ return ret;
+
+ Rectangle ispCompose = { 0, 0, config.size };
+ ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_COMPOSE, &ispCompose);
+ if (ret)
+ return ret;
+
+ /*
+ * The source pad format size comes directly from the sink
+ * compose rectangle.
+ */
+ subdevFormat.size = ispCompose.size();
+ subdevFormat.code = maliC55FmtToCode.find(config.pixelFormat)->second;
+ return pipe->resizer->setFormat(1, &subdevFormat);
+}
+
+int PipelineHandlerMaliC55::configure(Camera *camera,
+ CameraConfiguration *config)
+{
+ resetPipes();
+
+ int ret = media_->disableLinks();
+ if (ret)
+ return ret;
+
+ /* Link the graph depending if we are operating the TPG or a sensor. */
+ MaliC55CameraData *data = cameraData(camera);
+ if (data->csi_) {
+ const MediaEntity *csiEntity = data->csi_->entity();
+ ret = csiEntity->getPadByIndex(1)->links()[0]->setEnabled(true);
+ } else {
+ ret = data->entity_->getPadByIndex(0)->links()[0]->setEnabled(true);
+ }
+ if (ret)
+ return ret;
+
+ MaliC55CameraConfiguration *maliConfig =
+ static_cast<MaliC55CameraConfiguration *>(config);
+ V4L2SubdeviceFormat subdevFormat = maliConfig->sensorFormat_;
+ ret = data->sd_->getFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ if (data->sensor_) {
+ ret = data->sensor_->setFormat(&subdevFormat,
+ maliConfig->combinedTransform());
+ if (ret)
+ return ret;
+ }
+
+ if (data->csi_) {
+ ret = data->csi_->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ ret = data->csi_->getFormat(1, &subdevFormat);
+ if (ret)
+ return ret;
+ }
+
+ V4L2DeviceFormat statsFormat;
+ ret = stats_->getFormat(&statsFormat);
+ if (ret)
+ return ret;
+
+ if (statsFormat.planes[0].size != sizeof(struct mali_c55_stats_buffer)) {
+ LOG(MaliC55, Error) << "3a stats buffer size invalid";
+ return -EINVAL;
+ }
+
+ /*
+ * Propagate the format to the ISP sink pad and configure the input
+ * crop rectangle (no crop at the moment).
+ *
+ * \todo Configure the CSI-2 receiver.
+ */
+ ret = isp_->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ Rectangle ispCrop(0, 0, subdevFormat.size);
+ ret = isp_->setSelection(0, V4L2_SEL_TGT_CROP, &ispCrop);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the resizer: fixed format the sink pad; use the media
+ * bus code associated with the desired capture format on the source
+ * pad.
+ *
+ * Configure the crop and compose rectangles to match the desired
+ * stream output size
+ *
+ * \todo Make the crop/scaler configurable
+ */
+ for (const StreamConfiguration &streamConfig : *config) {
+ Stream *stream = streamConfig.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ /*
+ * Enable the media link between the pipe's resizer and the
+ * capture video device
+ */
+
+ ret = pipe->link->setEnabled(true);
+ if (ret) {
+ LOG(MaliC55, Error) << "Couldn't enable resizer's link";
+ return ret;
+ }
+
+ if (isFormatRaw(streamConfig.pixelFormat))
+ ret = configureRawStream(data, streamConfig, subdevFormat);
+ else
+ ret = configureProcessedStream(data, streamConfig, subdevFormat);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to configure pipeline";
+ return ret;
+ }
+
+ /* Now apply the pixel format and size to the capture device. */
+ V4L2DeviceFormat captureFormat;
+ captureFormat.fourcc = pipe->cap->toV4L2PixelFormat(streamConfig.pixelFormat);
+ captureFormat.size = streamConfig.size;
+
+ ret = pipe->cap->setFormat(&captureFormat);
+ if (ret)
+ return ret;
+
+ pipe->stream = stream;
+ }
+
+ if (!data->ipa_)
+ return 0;
+
+ /*
+ * Enable the media link between the ISP subdevice and the statistics
+ * video device.
+ */
+ const MediaEntity *ispEntity = isp_->entity();
+ ret = ispEntity->getPadByIndex(3)->links()[0]->setEnabled(true);
+ if (ret) {
+ LOG(MaliC55, Error) << "Couldn't enable statistics link";
+ return ret;
+ }
+
+ /*
+ * Enable the media link between the ISP subdevice and the parameters
+ * video device.
+ */
+ ret = ispEntity->getPadByIndex(4)->links()[0]->setEnabled(true);
+ if (ret) {
+ LOG(MaliC55, Error) << "Couldn't enable parameters link";
+ return ret;
+ }
+
+ /* We need to inform the IPA of the sensor configuration */
+ ipa::mali_c55::IPAConfigInfo ipaConfig{};
+
+ ret = data->sensor_->sensorInfo(&ipaConfig.sensorInfo);
+ if (ret)
+ return ret;
+
+ ipaConfig.sensorControls = data->sensor_->controls();
+
+ /*
+ * And we also need to tell the IPA the bayerOrder of the data (as
+ * affected by any flips that we've configured)
+ */
+ const Transform &combinedTransform = maliConfig->combinedTransform();
+ BayerFormat::Order bayerOrder = data->sensor_->bayerOrder(combinedTransform);
+
+ ControlInfoMap ipaControls;
+ ret = data->ipa_->configure(ipaConfig, utils::to_underlying(bayerOrder),
+ &ipaControls);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to configure IPA";
+ return ret;
+ }
+
+ data->updateControls(ipaControls);
+
+ return 0;
+}
+
+int PipelineHandlerMaliC55::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ MaliC55Pipe *pipe = pipeFromStream(cameraData(camera), stream);
+ unsigned int count = stream->configuration().bufferCount;
+
+ return pipe->cap->exportBuffers(count, buffers);
+}
+
+void PipelineHandlerMaliC55::freeBuffers(Camera *camera)
+{
+ MaliC55CameraData *data = cameraData(camera);
+
+ while (!availableStatsBuffers_.empty())
+ availableStatsBuffers_.pop();
+ while (!availableParamsBuffers_.empty())
+ availableParamsBuffers_.pop();
+
+ statsBuffers_.clear();
+ paramsBuffers_.clear();
+
+ if (data->ipa_) {
+ data->ipa_->unmapBuffers(data->ipaStatBuffers_);
+ data->ipa_->unmapBuffers(data->ipaParamBuffers_);
+ }
+ data->ipaStatBuffers_.clear();
+ data->ipaParamBuffers_.clear();
+
+ if (stats_->releaseBuffers())
+ LOG(MaliC55, Error) << "Failed to release stats buffers";
+
+ if (params_->releaseBuffers())
+ LOG(MaliC55, Error) << "Failed to release params buffers";
+
+ return;
+}
+
+int PipelineHandlerMaliC55::allocateBuffers(Camera *camera)
+{
+ MaliC55CameraData *data = cameraData(camera);
+ unsigned int ipaBufferId = 1;
+ unsigned int bufferCount;
+ int ret;
+
+ bufferCount = std::max({
+ data->frStream_.configuration().bufferCount,
+ data->dsStream_.configuration().bufferCount,
+ });
+
+ ret = stats_->allocateBuffers(bufferCount, &statsBuffers_);
+ if (ret < 0)
+ return ret;
+
+ for (std::unique_ptr<FrameBuffer> &buffer : statsBuffers_) {
+ buffer->setCookie(ipaBufferId++);
+ data->ipaStatBuffers_.emplace_back(buffer->cookie(),
+ buffer->planes());
+ availableStatsBuffers_.push(buffer.get());
+ }
+
+ ret = params_->allocateBuffers(bufferCount, &paramsBuffers_);
+ if (ret < 0)
+ return ret;
+
+ for (std::unique_ptr<FrameBuffer> &buffer : paramsBuffers_) {
+ buffer->setCookie(ipaBufferId++);
+ data->ipaParamBuffers_.emplace_back(buffer->cookie(),
+ buffer->planes());
+ availableParamsBuffers_.push(buffer.get());
+ }
+
+ if (data->ipa_) {
+ data->ipa_->mapBuffers(data->ipaStatBuffers_, true);
+ data->ipa_->mapBuffers(data->ipaParamBuffers_, false);
+ }
+
+ return 0;
+}
+
+int PipelineHandlerMaliC55::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
+{
+ MaliC55CameraData *data = cameraData(camera);
+ int ret;
+
+ ret = allocateBuffers(camera);
+ if (ret)
+ return ret;
+
+ if (data->ipa_) {
+ ret = data->ipa_->start();
+ if (ret) {
+ LOG(MaliC55, Error)
+ << "Failed to start IPA" << camera->id();
+ freeBuffers(camera);
+ return ret;
+ }
+ }
+
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ Stream *stream = pipe.stream;
+
+ ret = pipe.cap->importBuffers(stream->configuration().bufferCount);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to import buffers";
+ if (data->ipa_)
+ data->ipa_->stop();
+ freeBuffers(camera);
+ return ret;
+ }
+
+ ret = pipe.cap->streamOn();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to start stream";
+ if (data->ipa_)
+ data->ipa_->stop();
+ freeBuffers(camera);
+ return ret;
+ }
+ }
+
+ ret = stats_->streamOn();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to start stats stream";
+
+ if (data->ipa_)
+ data->ipa_->stop();
+
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (pipe.stream)
+ pipe.cap->streamOff();
+ }
+
+ freeBuffers(camera);
+ return ret;
+ }
+
+ ret = params_->streamOn();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to start params stream";
+
+ stats_->streamOff();
+ if (data->ipa_)
+ data->ipa_->stop();
+
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (pipe.stream)
+ pipe.cap->streamOff();
+ }
+
+ freeBuffers(camera);
+ return ret;
+ }
+
+ ret = isp_->setFrameStartEnabled(true);
+ if (ret)
+ LOG(MaliC55, Error) << "Failed to enable frame start events";
+
+ return 0;
+}
+
+void PipelineHandlerMaliC55::stopDevice(Camera *camera)
+{
+ MaliC55CameraData *data = cameraData(camera);
+
+ isp_->setFrameStartEnabled(false);
+
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ pipe.cap->streamOff();
+ pipe.cap->releaseBuffers();
+ }
+
+ stats_->streamOff();
+ params_->streamOff();
+ if (data->ipa_)
+ data->ipa_->stop();
+ freeBuffers(camera);
+}
+
+void PipelineHandlerMaliC55::applyScalerCrop(Camera *camera,
+ const ControlList &controls)
+{
+ MaliC55CameraData *data = cameraData(camera);
+
+ const auto &scalerCrop = controls.get<Rectangle>(controls::ScalerCrop);
+ if (!scalerCrop)
+ return;
+
+ if (!data->sensor_) {
+ LOG(MaliC55, Error) << "ScalerCrop not supported for TPG";
+ return;
+ }
+
+ Rectangle nativeCrop = *scalerCrop;
+
+ IPACameraSensorInfo sensorInfo;
+ int ret = data->sensor_->sensorInfo(&sensorInfo);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to retrieve sensor info";
+ return;
+ }
+
+ /*
+ * The ScalerCrop rectangle re-scaling in the ISP crop rectangle
+ * comes straight from the RPi pipeline handler.
+ *
+ * Create a version of the crop rectangle aligned to the analogue crop
+ * rectangle top-left coordinates and scaled in the [analogue crop to
+ * output frame] ratio to take into account binning/skipping on the
+ * sensor.
+ */
+ Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo.analogCrop
+ .topLeft());
+ ispCrop.scaleBy(sensorInfo.outputSize, sensorInfo.analogCrop.size());
+
+ /*
+ * The crop rectangle should be:
+ * 1. At least as big as ispMinCropSize_, once that's been
+ * enlarged to the same aspect ratio.
+ * 2. With the same mid-point, if possible.
+ * 3. But it can't go outside the sensor area.
+ */
+ Rectangle ispMinCrop{ 0, 0, 640, 480 };
+ Size minSize = ispMinCrop.size().expandedToAspectRatio(nativeCrop.size());
+ Size size = ispCrop.size().expandedTo(minSize);
+ ispCrop = size.centeredTo(ispCrop.center())
+ .enclosedIn(Rectangle(sensorInfo.outputSize));
+
+ /*
+ * As the resizer can't upscale, the crop rectangle has to be larger
+ * than the larger stream output size.
+ */
+ Size maxYuvSize;
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ const StreamConfiguration &config = pipe.stream->configuration();
+ if (isFormatRaw(config.pixelFormat)) {
+ LOG(MaliC55, Debug) << "Cannot crop with a RAW stream";
+ return;
+ }
+
+ Size streamSize = config.size;
+ if (streamSize.width > maxYuvSize.width)
+ maxYuvSize.width = streamSize.width;
+ if (streamSize.height > maxYuvSize.height)
+ maxYuvSize.height = streamSize.height;
+ }
+
+ ispCrop.size().expandTo(maxYuvSize);
+
+ /*
+ * Now apply the scaler crop to each enabled output. This overrides the
+ * crop configuration performed at configure() time and can cause
+ * square pixels if the crop rectangle and scaler output FOV ratio are
+ * different.
+ */
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ /* Create a copy to avoid setSelection() to modify ispCrop. */
+ Rectangle pipeCrop = ispCrop;
+ ret = pipe.resizer->setSelection(0, V4L2_SEL_TGT_CROP, &pipeCrop);
+ if (ret) {
+ LOG(MaliC55, Error)
+ << "Failed to apply crop to "
+ << (pipe.stream == &data->frStream_ ?
+ "FR" : "DS") << " pipe";
+ return;
+ }
+ }
+}
+
+int PipelineHandlerMaliC55::queueRequestDevice(Camera *camera, Request *request)
+{
+ MaliC55CameraData *data = cameraData(camera);
+
+ /* Do not run the IPA if the TPG is in use. */
+ if (!data->ipa_) {
+ MaliC55FrameInfo frameInfo;
+ frameInfo.request = request;
+ frameInfo.statBuffer = nullptr;
+ frameInfo.paramBuffer = nullptr;
+ frameInfo.paramsDone = true;
+ frameInfo.statsDone = true;
+
+ frameInfoMap_[request->sequence()] = frameInfo;
+
+ for (auto &[stream, buffer] : request->buffers()) {
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ pipe->cap->queueBuffer(buffer);
+ }
+
+ return 0;
+ }
+
+ if (availableStatsBuffers_.empty()) {
+ LOG(MaliC55, Error) << "Stats buffer underrun";
+ return -ENOENT;
+ }
+
+ if (availableParamsBuffers_.empty()) {
+ LOG(MaliC55, Error) << "Params buffer underrun";
+ return -ENOENT;
+ }
+
+ MaliC55FrameInfo frameInfo;
+ frameInfo.request = request;
+
+ frameInfo.statBuffer = availableStatsBuffers_.front();
+ availableStatsBuffers_.pop();
+ frameInfo.paramBuffer = availableParamsBuffers_.front();
+ availableParamsBuffers_.pop();
+
+ frameInfo.paramsDone = false;
+ frameInfo.statsDone = false;
+
+ frameInfoMap_[request->sequence()] = frameInfo;
+
+ data->ipa_->queueRequest(request->sequence(), request->controls());
+ data->ipa_->fillParams(request->sequence(),
+ frameInfo.paramBuffer->cookie());
+
+ return 0;
+}
+
+MaliC55FrameInfo *PipelineHandlerMaliC55::findFrameInfo(Request *request)
+{
+ for (auto &[sequence, info] : frameInfoMap_) {
+ if (info.request == request)
+ return &info;
+ }
+
+ return nullptr;
+}
+
+MaliC55FrameInfo *PipelineHandlerMaliC55::findFrameInfo(FrameBuffer *buffer)
+{
+ for (auto &[sequence, info] : frameInfoMap_) {
+ if (info.paramBuffer == buffer ||
+ info.statBuffer == buffer)
+ return &info;
+ }
+
+ return nullptr;
+}
+
+void PipelineHandlerMaliC55::tryComplete(MaliC55FrameInfo *info)
+{
+ if (!info->paramsDone)
+ return;
+ if (!info->statsDone)
+ return;
+
+ Request *request = info->request;
+ if (request->hasPendingBuffers())
+ return;
+
+ if (info->statBuffer)
+ availableStatsBuffers_.push(info->statBuffer);
+ if (info->paramBuffer)
+ availableParamsBuffers_.push(info->paramBuffer);
+
+ frameInfoMap_.erase(request->sequence());
+
+ completeRequest(request);
+}
+
+void PipelineHandlerMaliC55::imageBufferReady(FrameBuffer *buffer)
+{
+ Request *request = buffer->request();
+ MaliC55FrameInfo *info = findFrameInfo(request);
+ ASSERT(info);
+
+ if (completeBuffer(request, buffer))
+ tryComplete(info);
+}
+
+void PipelineHandlerMaliC55::paramsBufferReady(FrameBuffer *buffer)
+{
+ MaliC55FrameInfo *info = findFrameInfo(buffer);
+ ASSERT(info);
+
+ info->paramsDone = true;
+
+ tryComplete(info);
+}
+
+void PipelineHandlerMaliC55::statsBufferReady(FrameBuffer *buffer)
+{
+ MaliC55FrameInfo *info = findFrameInfo(buffer);
+ ASSERT(info);
+
+ Request *request = info->request;
+ MaliC55CameraData *data = cameraData(request->_d()->camera());
+
+ ControlList sensorControls = data->delayedCtrls_->get(buffer->metadata().sequence);
+
+ data->ipa_->processStats(request->sequence(), buffer->cookie(),
+ sensorControls);
+}
+
+void PipelineHandlerMaliC55::paramsComputed(unsigned int requestId)
+{
+ MaliC55FrameInfo &frameInfo = frameInfoMap_[requestId];
+ Request *request = frameInfo.request;
+ MaliC55CameraData *data = cameraData(request->_d()->camera());
+
+ /*
+ * Queue buffers for stats and params, then queue buffers to the capture
+ * video devices.
+ */
+
+ frameInfo.paramBuffer->_d()->metadata().planes()[0].bytesused =
+ sizeof(struct mali_c55_params_buffer);
+ params_->queueBuffer(frameInfo.paramBuffer);
+ stats_->queueBuffer(frameInfo.statBuffer);
+
+ for (auto &[stream, buffer] : request->buffers()) {
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ pipe->cap->queueBuffer(buffer);
+ }
+}
+
+void PipelineHandlerMaliC55::statsProcessed(unsigned int requestId,
+ const ControlList &metadata)
+{
+ MaliC55FrameInfo &frameInfo = frameInfoMap_[requestId];
+
+ frameInfo.statsDone = true;
+ frameInfo.request->metadata().merge(metadata);
+
+ tryComplete(&frameInfo);
+}
+
+bool PipelineHandlerMaliC55::registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
+ const std::string &name)
+{
+ if (data->loadIPA())
+ return false;
+
+ if (data->ipa_) {
+ data->ipa_->statsProcessed.connect(this, &PipelineHandlerMaliC55::statsProcessed);
+ data->ipa_->paramsComputed.connect(this, &PipelineHandlerMaliC55::paramsComputed);
+ }
+
+ std::set<Stream *> streams{ &data->frStream_ };
+ if (dsFitted_)
+ streams.insert(&data->dsStream_);
+
+ std::shared_ptr<Camera> camera = Camera::create(std::move(data),
+ name, streams);
+ registerCamera(std::move(camera));
+
+ return true;
+}
+
+/*
+ * The only camera we support through direct connection to the ISP is the
+ * Mali-C55 TPG. Check we have that and warn if not.
+ */
+bool PipelineHandlerMaliC55::registerTPGCamera(MediaLink *link)
+{
+ const std::string &name = link->source()->entity()->name();
+ if (name != "mali-c55 tpg") {
+ LOG(MaliC55, Warning) << "Unsupported direct connection to "
+ << link->source()->entity()->name();
+ /*
+ * Return true and just skip registering a camera for this
+ * entity.
+ */
+ return true;
+ }
+
+ std::unique_ptr<MaliC55CameraData> data =
+ std::make_unique<MaliC55CameraData>(this, link->source()->entity());
+
+ if (data->init())
+ return false;
+
+ return registerMaliCamera(std::move(data), name);
+}
+
+/*
+ * Register a Camera for each sensor connected to the ISP through a CSI-2
+ * receiver.
+ *
+ * \todo Support more complex topologies, such as video muxes.
+ */
+bool PipelineHandlerMaliC55::registerSensorCamera(MediaLink *ispLink)
+{
+ MediaEntity *csi2 = ispLink->source()->entity();
+ const MediaPad *csi2Sink = csi2->getPadByIndex(0);
+
+ for (MediaLink *link : csi2Sink->links()) {
+ MediaEntity *sensor = link->source()->entity();
+ unsigned int function = sensor->function();
+
+ if (function != MEDIA_ENT_F_CAM_SENSOR)
+ continue;
+
+ std::unique_ptr<MaliC55CameraData> data =
+ std::make_unique<MaliC55CameraData>(this, sensor);
+ if (data->init())
+ return false;
+
+ data->properties_ = data->sensor_->properties();
+
+ const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays();
+ std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ };
+
+ data->delayedCtrls_ =
+ std::make_unique<DelayedControls>(data->sensor_->device(),
+ params);
+ isp_->frameStart.connect(data->delayedCtrls_.get(),
+ &DelayedControls::applyControls);
+
+ /* \todo: Init properties. */
+
+ if (!registerMaliCamera(std::move(data), sensor->name()))
+ return false;
+ }
+
+ return true;
+}
+
+bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator)
+{
+ const MediaPad *ispSink;
+
+ /*
+ * We search for just the always-available elements of the media graph.
+ * The TPG and the downscale pipe are both optional blocks and may not
+ * be fitted.
+ */
+ DeviceMatch dm("mali-c55");
+ dm.add("mali-c55 isp");
+ dm.add("mali-c55 resizer fr");
+ dm.add("mali-c55 fr");
+ dm.add("mali-c55 3a stats");
+ dm.add("mali-c55 3a params");
+
+ media_ = acquireMediaDevice(enumerator, dm);
+ if (!media_)
+ return false;
+
+ isp_ = V4L2Subdevice::fromEntityName(media_, "mali-c55 isp");
+ if (isp_->open() < 0)
+ return false;
+
+ stats_ = V4L2VideoDevice::fromEntityName(media_, "mali-c55 3a stats");
+ if (stats_->open() < 0)
+ return false;
+
+ params_ = V4L2VideoDevice::fromEntityName(media_, "mali-c55 3a params");
+ if (params_->open() < 0)
+ return false;
+
+ MaliC55Pipe *frPipe = &pipes_[MaliC55FR];
+ frPipe->resizer = V4L2Subdevice::fromEntityName(media_, "mali-c55 resizer fr");
+ if (frPipe->resizer->open() < 0)
+ return false;
+
+ frPipe->cap = V4L2VideoDevice::fromEntityName(media_, "mali-c55 fr");
+ if (frPipe->cap->open() < 0)
+ return false;
+
+ frPipe->link = media_->link("mali-c55 resizer fr", 1, "mali-c55 fr", 0);
+ if (!frPipe->link) {
+ LOG(MaliC55, Error) << "No link between fr resizer and video node";
+ return false;
+ }
+
+ frPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::imageBufferReady);
+
+ dsFitted_ = !!media_->getEntityByName("mali-c55 ds");
+ if (dsFitted_) {
+ LOG(MaliC55, Debug) << "Downscaler pipe is fitted";
+
+ MaliC55Pipe *dsPipe = &pipes_[MaliC55DS];
+
+ dsPipe->resizer = V4L2Subdevice::fromEntityName(media_, "mali-c55 resizer ds");
+ if (dsPipe->resizer->open() < 0)
+ return false;
+
+ dsPipe->cap = V4L2VideoDevice::fromEntityName(media_, "mali-c55 ds");
+ if (dsPipe->cap->open() < 0)
+ return false;
+
+ dsPipe->link = media_->link("mali-c55 resizer ds", 1,
+ "mali-c55 ds", 0);
+ if (!dsPipe->link) {
+ LOG(MaliC55, Error) << "No link between ds resizer and video node";
+ return false;
+ }
+
+ dsPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::imageBufferReady);
+ }
+
+ stats_->bufferReady.connect(this, &PipelineHandlerMaliC55::statsBufferReady);
+ params_->bufferReady.connect(this, &PipelineHandlerMaliC55::paramsBufferReady);
+
+ ispSink = isp_->entity()->getPadByIndex(0);
+ if (!ispSink || ispSink->links().empty()) {
+ LOG(MaliC55, Error) << "ISP sink pad error";
+ return false;
+ }
+
+ /*
+ * We could have several links pointing to the ISP's sink pad, which
+ * will be from entities with one of the following functions:
+ *
+ * MEDIA_ENT_F_CAM_SENSOR - The test pattern generator
+ * MEDIA_ENT_F_VID_IF_BRIDGE - A CSI-2 receiver
+ * MEDIA_ENT_F_IO_V4L - An input device
+ *
+ * The last one will be unsupported for now. The TPG is relatively easy,
+ * we just register a Camera for it. If we have a CSI-2 receiver we need
+ * to check its sink pad and register Cameras for anything connected to
+ * it (probably...there are some complex situations in which that might
+ * not be true but let's pretend they don't exist until we come across
+ * them)
+ */
+ bool registered;
+ for (MediaLink *link : ispSink->links()) {
+ unsigned int function = link->source()->entity()->function();
+
+ switch (function) {
+ case MEDIA_ENT_F_CAM_SENSOR:
+ registered = registerTPGCamera(link);
+ if (!registered)
+ return registered;
+
+ break;
+ case MEDIA_ENT_F_VID_IF_BRIDGE:
+ registered = registerSensorCamera(link);
+ if (!registered)
+ return registered;
+
+ break;
+ case MEDIA_ENT_F_IO_V4L:
+ LOG(MaliC55, Warning) << "Memory input not yet supported";
+ break;
+ default:
+ LOG(MaliC55, Error) << "Unsupported entity function";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerMaliC55, "mali-c55")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/mali-c55/meson.build b/src/libcamera/pipeline/mali-c55/meson.build
new file mode 100644
index 00000000..eba8e5a3
--- /dev/null
+++ b/src/libcamera/pipeline/mali-c55/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'mali-c55.cpp'
+])
diff --git a/src/libcamera/pipeline/meson.build b/src/libcamera/pipeline/meson.build
index 190ca5a8..8a61991c 100644
--- a/src/libcamera/pipeline/meson.build
+++ b/src/libcamera/pipeline/meson.build
@@ -1,3 +1,20 @@
-foreach pipeline : get_option('pipelines')
+# SPDX-License-Identifier: CC0-1.0
+
+# Location of pipeline specific configuration files
+pipeline_data_dir = libcamera_datadir / 'pipeline'
+
+# Allow multi-level directory structuring for the pipeline handlers if needed.
+subdirs = []
+
+foreach pipeline : pipelines
+ pipeline = pipeline.split('/')[0]
+ if pipeline in subdirs
+ continue
+ endif
+
+ subdirs += pipeline
subdir(pipeline)
+
+ # Don't reuse the pipeline variable below, the subdirectory may have
+ # overwritten it.
endforeach
diff --git a/src/libcamera/pipeline/rkisp1/meson.build b/src/libcamera/pipeline/rkisp1/meson.build
index d04fb452..d21a6ef9 100644
--- a/src/libcamera/pipeline/rkisp1/meson.build
+++ b/src/libcamera/pipeline/rkisp1/meson.build
@@ -1,4 +1,6 @@
-libcamera_sources += files([
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
'rkisp1.cpp',
- 'timeline.cpp',
+ 'rkisp1_path.cpp',
])
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
index 2f909cef..35c793da 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
@@ -2,47 +2,58 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * rkisp1.cpp - Pipeline handler for Rockchip ISP1
+ * Pipeline handler for Rockchip ISP1
*/
#include <algorithm>
-#include <array>
-#include <iomanip>
+#include <map>
#include <memory>
+#include <numeric>
+#include <optional>
#include <queue>
+#include <vector>
#include <linux/media-bus-format.h>
+#include <linux/rkisp1-config.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
-#include <ipa/rkisp1.h>
-#include <libcamera/buffer.h>
#include <libcamera/camera.h>
+#include <libcamera/color_space.h>
#include <libcamera/control_ids.h>
+#include <libcamera/formats.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/property_ids.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
-
-#include "camera_sensor.h"
-#include "device_enumerator.h"
-#include "ipa_manager.h"
-#include "log.h"
-#include "media_device.h"
-#include "pipeline_handler.h"
-#include "timeline.h"
-#include "utils.h"
-#include "v4l2_subdevice.h"
-#include "v4l2_videodevice.h"
+#include <libcamera/transform.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+#include <libcamera/ipa/rkisp1_ipa_interface.h>
+#include <libcamera/ipa/rkisp1_ipa_proxy.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/converter/converter_v4l2_m2m.h"
+#include "libcamera/internal/delayed_controls.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+#include "rkisp1_path.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(RkISP1)
class PipelineHandlerRkISP1;
-class RkISP1ActionQueueBuffers;
-
-enum RkISP1ActionType {
- SetSensor,
- SOE,
- QueueBuffers,
-};
+class RkISP1CameraData;
struct RkISP1FrameInfo {
unsigned int frame;
@@ -50,9 +61,9 @@ struct RkISP1FrameInfo {
FrameBuffer *paramBuffer;
FrameBuffer *statBuffer;
- FrameBuffer *videoBuffer;
+ FrameBuffer *mainPathBuffer;
+ FrameBuffer *selfPathBuffer;
- bool paramFilled;
bool paramDequeued;
bool metadataProcessed;
};
@@ -62,8 +73,10 @@ class RkISP1Frames
public:
RkISP1Frames(PipelineHandler *pipe);
- RkISP1FrameInfo *create(unsigned int frame, Request *request, Stream *stream);
+ RkISP1FrameInfo *create(const RkISP1CameraData *data, Request *request,
+ bool isRaw);
int destroy(unsigned int frame);
+ void clear();
RkISP1FrameInfo *find(unsigned int frame);
RkISP1FrameInfo *find(FrameBuffer *buffer);
@@ -74,67 +87,39 @@ private:
std::map<unsigned int, RkISP1FrameInfo *> frameInfo_;
};
-class RkISP1Timeline : public Timeline
-{
-public:
- RkISP1Timeline()
- : Timeline()
- {
- setDelay(SetSensor, -1, 5);
- setDelay(SOE, 0, -1);
- setDelay(QueueBuffers, -1, 10);
- }
-
- void bufferReady(FrameBuffer *buffer)
- {
- /*
- * Calculate SOE by taking the end of DMA set by the kernel and applying
- * the time offsets provideprovided by the IPA to find the best estimate
- * of SOE.
- */
-
- ASSERT(frameOffset(SOE) == 0);
-
- utils::time_point soe = std::chrono::time_point<utils::clock>()
- + std::chrono::nanoseconds(buffer->metadata().timestamp)
- + timeOffset(SOE);
-
- notifyStartOfExposure(buffer->metadata().sequence, soe);
- }
-
- void setDelay(unsigned int type, int frame, int msdelay)
- {
- utils::duration delay = std::chrono::milliseconds(msdelay);
- setRawDelay(type, frame, delay);
- }
-};
-
-class RkISP1CameraData : public CameraData
+class RkISP1CameraData : public Camera::Private
{
public:
- RkISP1CameraData(PipelineHandler *pipe)
- : CameraData(pipe), sensor_(nullptr), frame_(0),
- frameInfo_(pipe)
- {
- }
-
- ~RkISP1CameraData()
+ RkISP1CameraData(PipelineHandler *pipe, RkISP1MainPath *mainPath,
+ RkISP1SelfPath *selfPath)
+ : Camera::Private(pipe), frame_(0), frameInfo_(pipe),
+ mainPath_(mainPath), selfPath_(selfPath)
{
- delete sensor_;
}
- int loadIPA();
+ PipelineHandlerRkISP1 *pipe();
+ const PipelineHandlerRkISP1 *pipe() const;
+ int loadIPA(unsigned int hwRevision);
- Stream stream_;
- CameraSensor *sensor_;
+ Stream mainPathStream_;
+ Stream selfPathStream_;
+ std::unique_ptr<CameraSensor> sensor_;
+ std::unique_ptr<DelayedControls> delayedCtrls_;
unsigned int frame_;
std::vector<IPABuffer> ipaBuffers_;
RkISP1Frames frameInfo_;
- RkISP1Timeline timeline_;
+
+ RkISP1MainPath *mainPath_;
+ RkISP1SelfPath *selfPath_;
+
+ std::unique_ptr<ipa::rkisp1::IPAProxyRkISP1> ipa_;
+
+ ControlInfoMap ipaControls_;
private:
- void queueFrameAction(unsigned int frame,
- const IPAOperationData &action);
+ void paramsComputed(unsigned int frame, unsigned int bytesused);
+ void setSensorControls(unsigned int frame,
+ const ControlList &sensorControls);
void metadataReady(unsigned int frame, const ControlList &metadata);
};
@@ -147,9 +132,10 @@ public:
Status validate() override;
const V4L2SubdeviceFormat &sensorFormat() { return sensorFormat_; }
+ const Transform &combinedTransform() { return combinedTransform_; }
private:
- static constexpr unsigned int RKISP1_BUFFER_COUNT = 4;
+ bool fitsAllPaths(const StreamConfiguration &cfg);
/*
* The RkISP1CameraData instance is guaranteed to be valid as long as the
@@ -160,55 +146,76 @@ private:
const RkISP1CameraData *data_;
V4L2SubdeviceFormat sensorFormat_;
+ Transform combinedTransform_;
};
class PipelineHandlerRkISP1 : public PipelineHandler
{
public:
PipelineHandlerRkISP1(CameraManager *manager);
- ~PipelineHandlerRkISP1();
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
- int start(Camera *camera) override;
- void stop(Camera *camera) override;
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
bool match(DeviceEnumerator *enumerator) override;
private:
- RkISP1CameraData *cameraData(const Camera *camera)
+ static constexpr Size kRkISP1PreviewSize = { 1920, 1080 };
+
+ RkISP1CameraData *cameraData(Camera *camera)
{
- return static_cast<RkISP1CameraData *>(
- PipelineHandler::cameraData(camera));
+ return static_cast<RkISP1CameraData *>(camera->_d());
}
- friend RkISP1ActionQueueBuffers;
friend RkISP1CameraData;
+ friend RkISP1CameraConfiguration;
friend RkISP1Frames;
- int initLinks();
+ int initLinks(Camera *camera, const CameraSensor *sensor,
+ const RkISP1CameraConfiguration &config);
int createCamera(MediaEntity *sensor);
- void tryCompleteRequest(Request *request);
- void bufferReady(FrameBuffer *buffer);
- void paramReady(FrameBuffer *buffer);
- void statReady(FrameBuffer *buffer);
+ void tryCompleteRequest(RkISP1FrameInfo *info);
+ void imageBufferReady(FrameBuffer *buffer);
+ void paramBufferReady(FrameBuffer *buffer);
+ void statBufferReady(FrameBuffer *buffer);
+ void dewarpBufferReady(FrameBuffer *buffer);
+ void frameStart(uint32_t sequence);
int allocateBuffers(Camera *camera);
int freeBuffers(Camera *camera);
+ int updateControls(RkISP1CameraData *data);
+
MediaDevice *media_;
- V4L2Subdevice *isp_;
- V4L2Subdevice *resizer_;
- V4L2VideoDevice *video_;
- V4L2VideoDevice *param_;
- V4L2VideoDevice *stat_;
+ std::unique_ptr<V4L2Subdevice> isp_;
+ std::unique_ptr<V4L2VideoDevice> param_;
+ std::unique_ptr<V4L2VideoDevice> stat_;
+ std::unique_ptr<V4L2Subdevice> csi_;
+
+ bool hasSelfPath_;
+ bool isRaw_;
+
+ RkISP1MainPath mainPath_;
+ RkISP1SelfPath selfPath_;
+
+ std::unique_ptr<V4L2M2MConverter> dewarper_;
+ Rectangle scalerMaxCrop_;
+ bool useDewarper_;
+
+ std::optional<Rectangle> activeCrop_;
+
+ /* Internal buffers used when dewarper is being used */
+ std::vector<std::unique_ptr<FrameBuffer>> mainPathBuffers_;
+ std::queue<FrameBuffer *> availableMainPathBuffers_;
std::vector<std::unique_ptr<FrameBuffer>> paramBuffers_;
std::vector<std::unique_ptr<FrameBuffer>> statBuffers_;
@@ -216,45 +223,60 @@ private:
std::queue<FrameBuffer *> availableStatBuffers_;
Camera *activeCamera_;
+
+ const MediaPad *ispSink_;
};
RkISP1Frames::RkISP1Frames(PipelineHandler *pipe)
- : pipe_(dynamic_cast<PipelineHandlerRkISP1 *>(pipe))
+ : pipe_(static_cast<PipelineHandlerRkISP1 *>(pipe))
{
}
-RkISP1FrameInfo *RkISP1Frames::create(unsigned int frame, Request *request, Stream *stream)
+RkISP1FrameInfo *RkISP1Frames::create(const RkISP1CameraData *data, Request *request,
+ bool isRaw)
{
- if (pipe_->availableParamBuffers_.empty()) {
- LOG(RkISP1, Error) << "Parameters buffer underrun";
- return nullptr;
+ unsigned int frame = data->frame_;
+
+ FrameBuffer *paramBuffer = nullptr;
+ FrameBuffer *statBuffer = nullptr;
+ FrameBuffer *mainPathBuffer = nullptr;
+ FrameBuffer *selfPathBuffer = nullptr;
+
+ if (!isRaw) {
+ if (pipe_->availableParamBuffers_.empty()) {
+ LOG(RkISP1, Error) << "Parameters buffer underrun";
+ return nullptr;
+ }
+
+ if (pipe_->availableStatBuffers_.empty()) {
+ LOG(RkISP1, Error) << "Statistic buffer underrun";
+ return nullptr;
+ }
+
+ paramBuffer = pipe_->availableParamBuffers_.front();
+ pipe_->availableParamBuffers_.pop();
+
+ statBuffer = pipe_->availableStatBuffers_.front();
+ pipe_->availableStatBuffers_.pop();
+
+ if (pipe_->useDewarper_) {
+ mainPathBuffer = pipe_->availableMainPathBuffers_.front();
+ pipe_->availableMainPathBuffers_.pop();
+ }
}
- FrameBuffer *paramBuffer = pipe_->availableParamBuffers_.front();
- if (pipe_->availableStatBuffers_.empty()) {
- LOG(RkISP1, Error) << "Statisitc buffer underrun";
- return nullptr;
- }
- FrameBuffer *statBuffer = pipe_->availableStatBuffers_.front();
-
- FrameBuffer *videoBuffer = request->findBuffer(stream);
- if (!videoBuffer) {
- LOG(RkISP1, Error)
- << "Attempt to queue request with invalid stream";
- return nullptr;
- }
-
- pipe_->availableParamBuffers_.pop();
- pipe_->availableStatBuffers_.pop();
+ if (!mainPathBuffer)
+ mainPathBuffer = request->findBuffer(&data->mainPathStream_);
+ selfPathBuffer = request->findBuffer(&data->selfPathStream_);
RkISP1FrameInfo *info = new RkISP1FrameInfo;
info->frame = frame;
info->request = request;
info->paramBuffer = paramBuffer;
- info->videoBuffer = videoBuffer;
+ info->mainPathBuffer = mainPathBuffer;
+ info->selfPathBuffer = selfPathBuffer;
info->statBuffer = statBuffer;
- info->paramFilled = false;
info->paramDequeued = false;
info->metadataProcessed = false;
@@ -271,6 +293,7 @@ int RkISP1Frames::destroy(unsigned int frame)
pipe_->availableParamBuffers_.push(info->paramBuffer);
pipe_->availableStatBuffers_.push(info->statBuffer);
+ pipe_->availableMainPathBuffers_.push(info->mainPathBuffer);
frameInfo_.erase(info->frame);
@@ -279,6 +302,21 @@ int RkISP1Frames::destroy(unsigned int frame)
return 0;
}
+void RkISP1Frames::clear()
+{
+ for (const auto &entry : frameInfo_) {
+ RkISP1FrameInfo *info = entry.second;
+
+ pipe_->availableParamBuffers_.push(info->paramBuffer);
+ pipe_->availableStatBuffers_.push(info->statBuffer);
+ pipe_->availableMainPathBuffers_.push(info->mainPathBuffer);
+
+ delete info;
+ }
+
+ frameInfo_.clear();
+}
+
RkISP1FrameInfo *RkISP1Frames::find(unsigned int frame)
{
auto itInfo = frameInfo_.find(frame);
@@ -286,7 +324,8 @@ RkISP1FrameInfo *RkISP1Frames::find(unsigned int frame)
if (itInfo != frameInfo_.end())
return itInfo->second;
- LOG(RkISP1, Error) << "Can't locate info from frame";
+ LOG(RkISP1, Fatal) << "Can't locate info from frame";
+
return nullptr;
}
@@ -297,11 +336,13 @@ RkISP1FrameInfo *RkISP1Frames::find(FrameBuffer *buffer)
if (info->paramBuffer == buffer ||
info->statBuffer == buffer ||
- info->videoBuffer == buffer)
+ info->mainPathBuffer == buffer ||
+ info->selfPathBuffer == buffer)
return info;
}
- LOG(RkISP1, Error) << "Can't locate info from buffer";
+ LOG(RkISP1, Fatal) << "Can't locate info from buffer";
+
return nullptr;
}
@@ -314,112 +355,121 @@ RkISP1FrameInfo *RkISP1Frames::find(Request *request)
return info;
}
- LOG(RkISP1, Error) << "Can't locate info from request";
+ LOG(RkISP1, Fatal) << "Can't locate info from request";
+
return nullptr;
}
-class RkISP1ActionSetSensor : public FrameAction
+PipelineHandlerRkISP1 *RkISP1CameraData::pipe()
{
-public:
- RkISP1ActionSetSensor(unsigned int frame, CameraSensor *sensor, const ControlList &controls)
- : FrameAction(frame, SetSensor), sensor_(sensor), controls_(controls) {}
-
-protected:
- void run() override
- {
- sensor_->setControls(&controls_);
- }
+ return static_cast<PipelineHandlerRkISP1 *>(Camera::Private::pipe());
+}
-private:
- CameraSensor *sensor_;
- ControlList controls_;
-};
+const PipelineHandlerRkISP1 *RkISP1CameraData::pipe() const
+{
+ return static_cast<const PipelineHandlerRkISP1 *>(Camera::Private::pipe());
+}
-class RkISP1ActionQueueBuffers : public FrameAction
+int RkISP1CameraData::loadIPA(unsigned int hwRevision)
{
-public:
- RkISP1ActionQueueBuffers(unsigned int frame, RkISP1CameraData *data,
- PipelineHandlerRkISP1 *pipe)
- : FrameAction(frame, QueueBuffers), data_(data), pipe_(pipe)
- {
- }
+ ipa_ = IPAManager::createIPA<ipa::rkisp1::IPAProxyRkISP1>(pipe(), 1, 1);
+ if (!ipa_)
+ return -ENOENT;
-protected:
- void run() override
- {
- RkISP1FrameInfo *info = data_->frameInfo_.find(frame());
- if (!info)
- LOG(RkISP1, Fatal) << "Frame not known";
+ ipa_->setSensorControls.connect(this, &RkISP1CameraData::setSensorControls);
+ ipa_->paramsComputed.connect(this, &RkISP1CameraData::paramsComputed);
+ ipa_->metadataReady.connect(this, &RkISP1CameraData::metadataReady);
- if (info->paramFilled)
- pipe_->param_->queueBuffer(info->paramBuffer);
- else
- LOG(RkISP1, Error)
- << "Parameters not ready on time for frame "
- << frame() << ", ignore parameters.";
+ /*
+ * The API tuning file is made from the sensor name unless the
+ * environment variable overrides it.
+ */
+ std::string ipaTuningFile;
+ char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RKISP1_TUNING_FILE");
+ if (!configFromEnv || *configFromEnv == '\0') {
+ ipaTuningFile =
+ ipa_->configurationFile(sensor_->model() + ".yaml", "uncalibrated.yaml");
+ } else {
+ ipaTuningFile = std::string(configFromEnv);
+ }
- pipe_->stat_->queueBuffer(info->statBuffer);
- pipe_->video_->queueBuffer(info->videoBuffer);
+ IPACameraSensorInfo sensorInfo{};
+ int ret = sensor_->sensorInfo(&sensorInfo);
+ if (ret) {
+ LOG(RkISP1, Error) << "Camera sensor information not available";
+ return ret;
}
-private:
- RkISP1CameraData *data_;
- PipelineHandlerRkISP1 *pipe_;
-};
+ ret = ipa_->init({ ipaTuningFile, sensor_->model() }, hwRevision,
+ sensorInfo, sensor_->controls(), &ipaControls_);
+ if (ret < 0) {
+ LOG(RkISP1, Error) << "IPA initialization failure";
+ return ret;
+ }
+
+ return 0;
+}
-int RkISP1CameraData::loadIPA()
+void RkISP1CameraData::paramsComputed(unsigned int frame, unsigned int bytesused)
{
- ipa_ = IPAManager::instance()->createIPA(pipe_, 1, 1);
- if (!ipa_)
- return -ENOENT;
+ PipelineHandlerRkISP1 *pipe = RkISP1CameraData::pipe();
+ RkISP1FrameInfo *info = frameInfo_.find(frame);
+ if (!info)
+ return;
- ipa_->queueFrameAction.connect(this,
- &RkISP1CameraData::queueFrameAction);
+ info->paramBuffer->_d()->metadata().planes()[0].bytesused = bytesused;
+ pipe->param_->queueBuffer(info->paramBuffer);
+ pipe->stat_->queueBuffer(info->statBuffer);
- return 0;
+ if (info->mainPathBuffer)
+ mainPath_->queueBuffer(info->mainPathBuffer);
+
+ if (selfPath_ && info->selfPathBuffer)
+ selfPath_->queueBuffer(info->selfPathBuffer);
}
-void RkISP1CameraData::queueFrameAction(unsigned int frame,
- const IPAOperationData &action)
+void RkISP1CameraData::setSensorControls([[maybe_unused]] unsigned int frame,
+ const ControlList &sensorControls)
{
- switch (action.operation) {
- case RKISP1_IPA_ACTION_V4L2_SET: {
- const ControlList &controls = action.controls[0];
- timeline_.scheduleAction(std::make_unique<RkISP1ActionSetSensor>(frame,
- sensor_,
- controls));
- break;
- }
- case RKISP1_IPA_ACTION_PARAM_FILLED: {
- RkISP1FrameInfo *info = frameInfo_.find(frame);
- if (info)
- info->paramFilled = true;
- break;
- }
- case RKISP1_IPA_ACTION_METADATA:
- metadataReady(frame, action.controls[0]);
- break;
- default:
- LOG(RkISP1, Error) << "Unkown action " << action.operation;
- break;
- }
+ delayedCtrls_->push(sensorControls);
}
void RkISP1CameraData::metadataReady(unsigned int frame, const ControlList &metadata)
{
- PipelineHandlerRkISP1 *pipe =
- static_cast<PipelineHandlerRkISP1 *>(pipe_);
-
RkISP1FrameInfo *info = frameInfo_.find(frame);
if (!info)
return;
- info->request->metadata() = metadata;
+ info->request->metadata().merge(metadata);
info->metadataProcessed = true;
- pipe->tryCompleteRequest(info->request);
+ pipe()->tryCompleteRequest(info);
}
+/* -----------------------------------------------------------------------------
+ * Camera Configuration
+ */
+
+namespace {
+
+/* Keep in sync with the supported raw formats in rkisp1_path.cpp. */
+const std::map<PixelFormat, uint32_t> rawFormats = {
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+} /* namespace */
+
RkISP1CameraConfiguration::RkISP1CameraConfiguration(Camera *camera,
RkISP1CameraData *data)
: CameraConfiguration()
@@ -428,120 +478,321 @@ RkISP1CameraConfiguration::RkISP1CameraConfiguration(Camera *camera,
data_ = data;
}
-CameraConfiguration::Status RkISP1CameraConfiguration::validate()
+bool RkISP1CameraConfiguration::fitsAllPaths(const StreamConfiguration &cfg)
{
- static const std::array<PixelFormat, 8> formats{
- PixelFormat(DRM_FORMAT_YUYV),
- PixelFormat(DRM_FORMAT_YVYU),
- PixelFormat(DRM_FORMAT_VYUY),
- PixelFormat(DRM_FORMAT_NV16),
- PixelFormat(DRM_FORMAT_NV61),
- PixelFormat(DRM_FORMAT_NV21),
- PixelFormat(DRM_FORMAT_NV12),
- /* \todo Add support for 8-bit greyscale to DRM formats */
- };
+ const CameraSensor *sensor = data_->sensor_.get();
+ StreamConfiguration config;
+
+ config = cfg;
+ if (data_->mainPath_->validate(sensor, sensorConfig, &config) != Valid)
+ return false;
+
+ config = cfg;
+ if (data_->selfPath_ &&
+ data_->selfPath_->validate(sensor, sensorConfig, &config) != Valid)
+ return false;
+
+ return true;
+}
- const CameraSensor *sensor = data_->sensor_;
- Status status = Valid;
+CameraConfiguration::Status RkISP1CameraConfiguration::validate()
+{
+ const PipelineHandlerRkISP1 *pipe = data_->pipe();
+ const CameraSensor *sensor = data_->sensor_.get();
+ unsigned int pathCount = data_->selfPath_ ? 2 : 1;
+ Status status;
if (config_.empty())
return Invalid;
+ status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
+
+ /*
+ * Make sure that if a sensor configuration has been requested it
+ * is valid.
+ */
+ if (sensorConfig) {
+ if (!sensorConfig->isValid()) {
+ LOG(RkISP1, Error)
+ << "Invalid sensor configuration request";
+
+ return Invalid;
+ }
+
+ unsigned int bitDepth = sensorConfig->bitDepth;
+ if (bitDepth != 8 && bitDepth != 10 && bitDepth != 12) {
+ LOG(RkISP1, Error)
+ << "Invalid sensor configuration bit depth";
+
+ return Invalid;
+ }
+ }
+
/* Cap the number of entries to the available streams. */
- if (config_.size() > 1) {
- config_.resize(1);
+ if (config_.size() > pathCount) {
+ config_.resize(pathCount);
status = Adjusted;
}
- StreamConfiguration &cfg = config_[0];
-
- /* Adjust the pixel format. */
- if (std::find(formats.begin(), formats.end(), cfg.pixelFormat) ==
- formats.end()) {
- LOG(RkISP1, Debug) << "Adjusting format to NV12";
- cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12),
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->sensor_->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
status = Adjusted;
+
+ /*
+ * Simultaneous capture of raw and processed streams isn't possible. If
+ * there is any raw stream, cap the number of streams to one.
+ */
+ if (config_.size() > 1) {
+ for (const auto &cfg : config_) {
+ if (PixelFormatInfo::info(cfg.pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW) {
+ config_.resize(1);
+ status = Adjusted;
+ break;
+ }
+ }
}
- /* Select the sensor format. */
- sensorFormat_ = sensor->getFormat({ MEDIA_BUS_FMT_SBGGR12_1X12,
- MEDIA_BUS_FMT_SGBRG12_1X12,
- MEDIA_BUS_FMT_SGRBG12_1X12,
- MEDIA_BUS_FMT_SRGGB12_1X12,
- MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10,
- MEDIA_BUS_FMT_SBGGR8_1X8,
- MEDIA_BUS_FMT_SGBRG8_1X8,
- MEDIA_BUS_FMT_SGRBG8_1X8,
- MEDIA_BUS_FMT_SRGGB8_1X8 },
- cfg.size);
- if (!sensorFormat_.size.width || !sensorFormat_.size.height)
- sensorFormat_.size = sensor->resolution();
+ bool useDewarper = false;
+ if (pipe->dewarper_) {
+ /*
+ * Platforms with dewarper support, such as i.MX8MP, support
+ * only a single stream. We can inspect config_[0] only here.
+ */
+ bool isRaw = PixelFormatInfo::info(config_[0].pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW;
+ if (!isRaw)
+ useDewarper = true;
+ }
+
+ /*
+ * If there are more than one stream in the configuration figure out the
+ * order to evaluate the streams. The first stream has the highest
+ * priority but if both main path and self path can satisfy it evaluate
+ * the second stream first as the first stream is guaranteed to work
+ * with whichever path is not used by the second one.
+ */
+ std::vector<unsigned int> order(config_.size());
+ std::iota(order.begin(), order.end(), 0);
+ if (config_.size() == 2 && fitsAllPaths(config_[0]))
+ std::reverse(order.begin(), order.end());
/*
- * Provide a suitable default that matches the sensor aspect
- * ratio and clamp the size to the hardware bounds.
- *
- * \todo: Check the hardware alignment constraints.
+ * Validate the configuration against the desired path and, if the
+ * platform supports it, the dewarper.
*/
- const Size size = cfg.size;
+ auto validateConfig = [&](StreamConfiguration &cfg, RkISP1Path *path,
+ Stream *stream, Status expectedStatus) {
+ StreamConfiguration tryCfg = cfg;
+
+ Status ret = path->validate(sensor, sensorConfig, &tryCfg);
+ if (ret == Invalid)
+ return false;
+
+ if (!useDewarper &&
+ (expectedStatus == Valid && ret == Adjusted))
+ return false;
+
+ if (useDewarper) {
+ bool adjusted;
+
+ pipe->dewarper_->validateOutput(&tryCfg, &adjusted,
+ Converter::Alignment::Down);
+ if (expectedStatus == Valid && adjusted)
+ return false;
+ }
+
+ cfg = tryCfg;
+ cfg.setStream(stream);
+ return true;
+ };
- if (!cfg.size.width || !cfg.size.height) {
- cfg.size.width = 1280;
- cfg.size.height = 1280 * sensorFormat_.size.height
- / sensorFormat_.size.width;
+ bool mainPathAvailable = true;
+ bool selfPathAvailable = data_->selfPath_;
+ RkISP1Path *mainPath = data_->mainPath_;
+ RkISP1Path *selfPath = data_->selfPath_;
+ Stream *mainPathStream = const_cast<Stream *>(&data_->mainPathStream_);
+ Stream *selfPathStream = const_cast<Stream *>(&data_->selfPathStream_);
+ for (unsigned int index : order) {
+ StreamConfiguration &cfg = config_[index];
+
+ /* Try to match stream without adjusting configuration. */
+ if (mainPathAvailable) {
+ if (validateConfig(cfg, mainPath, mainPathStream, Valid)) {
+ mainPathAvailable = false;
+ continue;
+ }
+ }
+
+ if (selfPathAvailable) {
+ if (validateConfig(cfg, selfPath, selfPathStream, Valid)) {
+ selfPathAvailable = false;
+ continue;
+ }
+ }
+
+ /* Try to match stream allowing adjusting configuration. */
+ if (mainPathAvailable) {
+ if (validateConfig(cfg, mainPath, mainPathStream, Adjusted)) {
+ mainPathAvailable = false;
+ status = Adjusted;
+ continue;
+ }
+ }
+
+ if (selfPathAvailable) {
+ if (validateConfig(cfg, selfPath, selfPathStream, Adjusted)) {
+ selfPathAvailable = false;
+ status = Adjusted;
+ continue;
+ }
+ }
+
+ /* All paths rejected configuration. */
+ LOG(RkISP1, Debug) << "Camera configuration not supported "
+ << cfg.toString();
+ return Invalid;
}
- cfg.size.width = std::max(32U, std::min(4416U, cfg.size.width));
- cfg.size.height = std::max(16U, std::min(3312U, cfg.size.height));
+ /* Select the sensor format. */
+ PixelFormat rawFormat;
+ Size maxSize;
- if (cfg.size != size) {
- LOG(RkISP1, Debug)
- << "Adjusting size from " << size.toString()
- << " to " << cfg.size.toString();
- status = Adjusted;
+ for (const StreamConfiguration &cfg : config_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ rawFormat = cfg.pixelFormat;
+
+ maxSize = std::max(maxSize, cfg.size);
}
- cfg.bufferCount = RKISP1_BUFFER_COUNT;
+ std::vector<unsigned int> mbusCodes;
- return status;
-}
+ if (rawFormat.isValid()) {
+ mbusCodes = { rawFormats.at(rawFormat) };
+ } else {
+ std::transform(rawFormats.begin(), rawFormats.end(),
+ std::back_inserter(mbusCodes),
+ [](const auto &value) { return value.second; });
+ }
-PipelineHandlerRkISP1::PipelineHandlerRkISP1(CameraManager *manager)
- : PipelineHandler(manager), isp_(nullptr), resizer_(nullptr),
- video_(nullptr), param_(nullptr), stat_(nullptr)
-{
-}
+ sensorFormat_ = sensor->getFormat(mbusCodes, maxSize,
+ mainPath->maxResolution());
-PipelineHandlerRkISP1::~PipelineHandlerRkISP1()
-{
- delete param_;
- delete stat_;
- delete video_;
- delete resizer_;
- delete isp_;
+ if (sensorFormat_.size.isNull())
+ sensorFormat_.size = sensor->resolution();
+
+ return status;
}
/* -----------------------------------------------------------------------------
* Pipeline Operations
*/
-CameraConfiguration *PipelineHandlerRkISP1::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+PipelineHandlerRkISP1::PipelineHandlerRkISP1(CameraManager *manager)
+ : PipelineHandler(manager), hasSelfPath_(true), useDewarper_(false)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerRkISP1::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
RkISP1CameraData *data = cameraData(camera);
- CameraConfiguration *config = new RkISP1CameraConfiguration(camera, data);
+ unsigned int pathCount = data->selfPath_ ? 2 : 1;
+ if (roles.size() > pathCount) {
+ LOG(RkISP1, Error) << "Too many stream roles requested";
+ return nullptr;
+ }
+
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<RkISP1CameraConfiguration>(camera, data);
if (roles.empty())
return config;
- StreamConfiguration cfg{};
- cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
- cfg.size = data->sensor_->resolution();
+ /*
+ * As the ISP can't output different color spaces for the main and self
+ * path, pick a sensible default color space based on the role of the
+ * first stream and use it for all streams.
+ */
+ std::optional<ColorSpace> colorSpace;
+ bool mainPathAvailable = true;
+
+ for (const StreamRole role : roles) {
+ Size size;
+
+ switch (role) {
+ case StreamRole::StillCapture:
+ /* JPEG encoders typically expect sYCC. */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Sycc;
+
+ size = data->sensor_->resolution();
+ break;
+
+ case StreamRole::Viewfinder:
+ /*
+ * sYCC is the YCbCr encoding of sRGB, which is commonly
+ * used by displays.
+ */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Sycc;
+
+ size = kRkISP1PreviewSize;
+ break;
+
+ case StreamRole::VideoRecording:
+ /* Rec. 709 is a good default for HD video recording. */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Rec709;
+
+ size = kRkISP1PreviewSize;
+ break;
+
+ case StreamRole::Raw:
+ if (roles.size() > 1) {
+ LOG(RkISP1, Error)
+ << "Can't capture both raw and processed streams";
+ return nullptr;
+ }
+
+ colorSpace = ColorSpace::Raw;
+ size = data->sensor_->resolution();
+ break;
+
+ default:
+ LOG(RkISP1, Warning)
+ << "Requested stream role not supported: " << role;
+ return nullptr;
+ }
- config->addConfiguration(cfg);
+ /*
+ * Prefer the main path if available, as it supports higher
+ * resolutions.
+ *
+ * \todo Using the main path unconditionally hides support for
+ * RGB (only available on the self path) in the streams formats
+ * exposed to applications. This likely calls for a better API
+ * to expose streams capabilities.
+ */
+ RkISP1Path *path;
+ if (mainPathAvailable) {
+ path = data->mainPath_;
+ mainPathAvailable = false;
+ } else {
+ path = data->selfPath_;
+ }
+
+ StreamConfiguration cfg =
+ path->generateConfiguration(data->sensor_.get(), size, role);
+ if (!cfg.pixelFormat.isValid())
+ return nullptr;
+
+ cfg.colorSpace = colorSpace;
+ config->addConfiguration(cfg);
+ }
config->validate();
@@ -553,148 +804,228 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
RkISP1CameraConfiguration *config =
static_cast<RkISP1CameraConfiguration *>(c);
RkISP1CameraData *data = cameraData(camera);
- StreamConfiguration &cfg = config->at(0);
- CameraSensor *sensor = data->sensor_;
+ CameraSensor *sensor = data->sensor_.get();
int ret;
- /*
- * Configure the sensor links: enable the link corresponding to this
- * camera and disable all the other sensor links.
- */
- const MediaPad *pad = isp_->entity()->getPadByIndex(0);
-
- for (MediaLink *link : pad->links()) {
- bool enable = link->source()->entity() == sensor->entity();
-
- if (!!(link->flags() & MEDIA_LNK_FL_ENABLED) == enable)
- continue;
-
- LOG(RkISP1, Debug)
- << (enable ? "Enabling" : "Disabling")
- << " link from sensor '"
- << link->source()->entity()->name()
- << "' to ISP";
-
- ret = link->setEnabled(enable);
- if (ret < 0)
- return ret;
- }
+ ret = initLinks(camera, sensor, *config);
+ if (ret)
+ return ret;
/*
* Configure the format on the sensor output and propagate it through
* the pipeline.
*/
V4L2SubdeviceFormat format = config->sensorFormat();
- LOG(RkISP1, Debug) << "Configuring sensor with " << format.toString();
+ LOG(RkISP1, Debug) << "Configuring sensor with " << format;
+
+ if (config->sensorConfig)
+ ret = sensor->applyConfiguration(*config->sensorConfig,
+ config->combinedTransform(),
+ &format);
+ else
+ ret = sensor->setFormat(&format, config->combinedTransform());
- ret = sensor->setFormat(&format);
if (ret < 0)
return ret;
- LOG(RkISP1, Debug) << "Sensor configured with " << format.toString();
+ LOG(RkISP1, Debug) << "Sensor configured with " << format;
+
+ if (csi_) {
+ ret = csi_->setFormat(0, &format);
+ if (ret < 0)
+ return ret;
+ }
ret = isp_->setFormat(0, &format);
if (ret < 0)
return ret;
- LOG(RkISP1, Debug) << "ISP input pad configured with " << format.toString();
-
- /* YUYV8_2X8 is required on the ISP source path pad for YUV output. */
- format.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
- LOG(RkISP1, Debug) << "Configuring ISP output pad with " << format.toString();
-
- ret = isp_->setFormat(2, &format);
+ Rectangle inputCrop(0, 0, format.size);
+ ret = isp_->setSelection(0, V4L2_SEL_TGT_CROP, &inputCrop);
if (ret < 0)
return ret;
- LOG(RkISP1, Debug) << "ISP output pad configured with " << format.toString();
+ LOG(RkISP1, Debug)
+ << "ISP input pad configured with " << format
+ << " crop " << inputCrop;
- ret = resizer_->setFormat(0, &format);
- if (ret < 0)
- return ret;
+ Rectangle outputCrop = inputCrop;
+ const PixelFormat &streamFormat = config->at(0).pixelFormat;
+ const PixelFormatInfo &info = PixelFormatInfo::info(streamFormat);
+ isRaw_ = info.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
+ useDewarper_ = dewarper_ && !isRaw_;
+
+ /* YUYV8_2X8 is required on the ISP source path pad for YUV output. */
+ if (!isRaw_)
+ format.code = MEDIA_BUS_FMT_YUYV8_2X8;
- LOG(RkISP1, Debug) << "Resizer input pad configured with " << format.toString();
+ /*
+ * On devices without DUAL_CROP (like the imx8mp) cropping needs to be
+ * done on the ISP/IS output.
+ */
+ if (media_->hwRevision() == RKISP1_V_IMX8MP) {
+ /* imx8mp has only a single path. */
+ const auto &cfg = config->at(0);
+ Size ispCrop = format.size.boundedToAspectRatio(cfg.size);
+ if (useDewarper_)
+ ispCrop = dewarper_->adjustInputSize(cfg.pixelFormat,
+ ispCrop);
+ else
+ ispCrop.alignUpTo(2, 2);
- format.size = cfg.size;
+ outputCrop = ispCrop.centeredTo(Rectangle(format.size).center());
+ format.size = ispCrop;
+ }
- LOG(RkISP1, Debug) << "Configuring resizer output pad with " << format.toString();
+ LOG(RkISP1, Debug)
+ << "Configuring ISP output pad with " << format
+ << " crop " << outputCrop;
- ret = resizer_->setFormat(1, &format);
+ ret = isp_->setSelection(2, V4L2_SEL_TGT_CROP, &outputCrop);
if (ret < 0)
return ret;
- LOG(RkISP1, Debug) << "Resizer output pad configured with " << format.toString();
+ format.colorSpace = config->at(0).colorSpace;
+ ret = isp_->setFormat(2, &format);
+ if (ret < 0)
+ return ret;
- V4L2DeviceFormat outputFormat = {};
- outputFormat.fourcc = video_->toV4L2PixelFormat(cfg.pixelFormat);
- outputFormat.size = cfg.size;
- outputFormat.planesCount = 2;
+ LOG(RkISP1, Debug)
+ << "ISP output pad configured with " << format
+ << " crop " << outputCrop;
- ret = video_->setFormat(&outputFormat);
+ IPACameraSensorInfo sensorInfo;
+ ret = data->sensor_->sensorInfo(&sensorInfo);
if (ret)
return ret;
- if (outputFormat.size != cfg.size ||
- outputFormat.fourcc != video_->toV4L2PixelFormat(cfg.pixelFormat)) {
- LOG(RkISP1, Error)
- << "Unable to configure capture in " << cfg.toString();
- return -EINVAL;
+ std::map<unsigned int, IPAStream> streamConfig;
+ std::vector<std::reference_wrapper<StreamConfiguration>> outputCfgs;
+
+ for (const StreamConfiguration &cfg : *config) {
+ if (cfg.stream() == &data->mainPathStream_) {
+ ret = mainPath_.configure(cfg, format);
+ streamConfig[0] = IPAStream(cfg.pixelFormat,
+ cfg.size);
+ /* Configure dewarp */
+ if (dewarper_ && !isRaw_) {
+ outputCfgs.push_back(const_cast<StreamConfiguration &>(cfg));
+ ret = dewarper_->configure(cfg, outputCfgs);
+ if (ret)
+ return ret;
+
+ /*
+ * Calculate the crop rectangle of the data
+ * flowing into the dewarper in sensor
+ * coordinates.
+ */
+ scalerMaxCrop_ =
+ outputCrop.transformedBetween(inputCrop,
+ sensorInfo.analogCrop);
+ }
+ } else if (hasSelfPath_) {
+ ret = selfPath_.configure(cfg, format);
+ streamConfig[1] = IPAStream(cfg.pixelFormat,
+ cfg.size);
+ } else {
+ return -ENODEV;
+ }
+
+ if (ret)
+ return ret;
}
- V4L2DeviceFormat paramFormat = {};
- paramFormat.fourcc = V4L2PixelFormat(V4L2_META_FMT_RK_ISP1_PARAMS);
+ V4L2DeviceFormat paramFormat;
+ paramFormat.fourcc = V4L2PixelFormat(V4L2_META_FMT_RK_ISP1_EXT_PARAMS);
ret = param_->setFormat(&paramFormat);
if (ret)
return ret;
- V4L2DeviceFormat statFormat = {};
+ V4L2DeviceFormat statFormat;
statFormat.fourcc = V4L2PixelFormat(V4L2_META_FMT_RK_ISP1_STAT_3A);
ret = stat_->setFormat(&statFormat);
if (ret)
return ret;
- cfg.setStream(&data->stream_);
+ /* Inform IPA of stream configuration and sensor controls. */
+ ipa::rkisp1::IPAConfigInfo ipaConfig{ sensorInfo,
+ data->sensor_->controls(),
+ paramFormat.fourcc };
- return 0;
+ ret = data->ipa_->configure(ipaConfig, streamConfig, &data->ipaControls_);
+ if (ret) {
+ LOG(RkISP1, Error) << "failed configuring IPA (" << ret << ")";
+ return ret;
+ }
+
+ return updateControls(data);
}
-int PipelineHandlerRkISP1::exportFrameBuffers(Camera *camera, Stream *stream,
+int PipelineHandlerRkISP1::exportFrameBuffers([[maybe_unused]] Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
+ RkISP1CameraData *data = cameraData(camera);
unsigned int count = stream->configuration().bufferCount;
- return video_->exportBuffers(count, buffers);
+
+ if (stream == &data->mainPathStream_) {
+ /*
+ * Currently, i.MX8MP is the only platform with DW100 dewarper.
+ * It has mainpath and no self path. Hence, export buffers from
+ * dewarper just for the main path stream, for now.
+ */
+ if (useDewarper_)
+ return dewarper_->exportBuffers(&data->mainPathStream_, count, buffers);
+ else
+ return mainPath_.exportBuffers(count, buffers);
+ } else if (hasSelfPath_ && stream == &data->selfPathStream_) {
+ return selfPath_.exportBuffers(count, buffers);
+ }
+
+ return -EINVAL;
}
int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
{
RkISP1CameraData *data = cameraData(camera);
- unsigned int count = data->stream_.configuration().bufferCount;
unsigned int ipaBufferId = 1;
int ret;
- ret = video_->importBuffers(count);
- if (ret < 0)
- goto error;
+ unsigned int maxCount = std::max({
+ data->mainPathStream_.configuration().bufferCount,
+ data->selfPathStream_.configuration().bufferCount,
+ });
- ret = param_->allocateBuffers(count, &paramBuffers_);
- if (ret < 0)
- goto error;
+ if (!isRaw_) {
+ ret = param_->allocateBuffers(maxCount, &paramBuffers_);
+ if (ret < 0)
+ goto error;
- ret = stat_->allocateBuffers(count, &statBuffers_);
- if (ret < 0)
- goto error;
+ ret = stat_->allocateBuffers(maxCount, &statBuffers_);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* If the dewarper is being used, allocate internal buffers for ISP. */
+ if (useDewarper_) {
+ ret = mainPath_.exportBuffers(maxCount, &mainPathBuffers_);
+ if (ret < 0)
+ goto error;
+
+ for (std::unique_ptr<FrameBuffer> &buffer : mainPathBuffers_)
+ availableMainPathBuffers_.push(buffer.get());
+ }
for (std::unique_ptr<FrameBuffer> &buffer : paramBuffers_) {
buffer->setCookie(ipaBufferId++);
- data->ipaBuffers_.push_back({ .id = buffer->cookie(),
- .planes = buffer->planes() });
+ data->ipaBuffers_.emplace_back(buffer->cookie(),
+ buffer->planes());
availableParamBuffers_.push(buffer.get());
}
for (std::unique_ptr<FrameBuffer> &buffer : statBuffers_) {
buffer->setCookie(ipaBufferId++);
- data->ipaBuffers_.push_back({ .id = buffer->cookie(),
- .planes = buffer->planes() });
+ data->ipaBuffers_.emplace_back(buffer->cookie(),
+ buffer->planes());
availableStatBuffers_.push(buffer.get());
}
@@ -705,7 +1036,7 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
error:
paramBuffers_.clear();
statBuffers_.clear();
- video_->releaseBuffers();
+ mainPathBuffers_.clear();
return ret;
}
@@ -720,8 +1051,12 @@ int PipelineHandlerRkISP1::freeBuffers(Camera *camera)
while (!availableParamBuffers_.empty())
availableParamBuffers_.pop();
+ while (!availableMainPathBuffers_.empty())
+ availableMainPathBuffers_.pop();
+
paramBuffers_.clear();
statBuffers_.clear();
+ mainPathBuffers_.clear();
std::vector<unsigned int> ids;
for (IPABuffer &ipabuf : data->ipaBuffers_)
@@ -736,115 +1071,134 @@ int PipelineHandlerRkISP1::freeBuffers(Camera *camera)
if (stat_->releaseBuffers())
LOG(RkISP1, Error) << "Failed to release stat buffers";
- if (video_->releaseBuffers())
- LOG(RkISP1, Error) << "Failed to release video buffers";
-
return 0;
}
-int PipelineHandlerRkISP1::start(Camera *camera)
+int PipelineHandlerRkISP1::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
{
RkISP1CameraData *data = cameraData(camera);
+ utils::ScopeExitActions actions;
int ret;
/* Allocate buffers for internal pipeline usage. */
ret = allocateBuffers(camera);
if (ret)
return ret;
+ actions += [&]() { freeBuffers(camera); };
- data->frame_ = 0;
-
- ret = param_->streamOn();
+ ret = data->ipa_->start();
if (ret) {
- freeBuffers(camera);
LOG(RkISP1, Error)
- << "Failed to start parameters " << camera->name();
+ << "Failed to start IPA " << camera->id();
return ret;
}
+ actions += [&]() { data->ipa_->stop(); };
- ret = stat_->streamOn();
- if (ret) {
- param_->streamOff();
- freeBuffers(camera);
- LOG(RkISP1, Error)
- << "Failed to start statistics " << camera->name();
- return ret;
- }
+ data->frame_ = 0;
- ret = video_->streamOn();
- if (ret) {
- param_->streamOff();
- stat_->streamOff();
- freeBuffers(camera);
+ if (!isRaw_) {
+ ret = param_->streamOn();
+ if (ret) {
+ LOG(RkISP1, Error)
+ << "Failed to start parameters " << camera->id();
+ return ret;
+ }
+ actions += [&]() { param_->streamOff(); };
- LOG(RkISP1, Error)
- << "Failed to start camera " << camera->name();
+ ret = stat_->streamOn();
+ if (ret) {
+ LOG(RkISP1, Error)
+ << "Failed to start statistics " << camera->id();
+ return ret;
+ }
+ actions += [&]() { stat_->streamOff(); };
+
+ if (useDewarper_) {
+ ret = dewarper_->start();
+ if (ret) {
+ LOG(RkISP1, Error) << "Failed to start dewarper";
+ return ret;
+ }
+ actions += [&]() { dewarper_->stop(); };
+ }
}
- activeCamera_ = camera;
+ if (data->mainPath_->isEnabled()) {
+ ret = mainPath_.start();
+ if (ret)
+ return ret;
+ actions += [&]() { mainPath_.stop(); };
+ }
- /* Inform IPA of stream configuration and sensor controls. */
- std::map<unsigned int, IPAStream> streamConfig;
- streamConfig[0] = {
- .pixelFormat = data->stream_.configuration().pixelFormat,
- .size = data->stream_.configuration().size,
- };
+ if (hasSelfPath_ && data->selfPath_->isEnabled()) {
+ ret = selfPath_.start();
+ if (ret)
+ return ret;
+ }
- std::map<unsigned int, const ControlInfoMap &> entityControls;
- entityControls.emplace(0, data->sensor_->controls());
+ isp_->setFrameStartEnabled(true);
- data->ipa_->configure(streamConfig, entityControls);
+ activeCamera_ = camera;
- return ret;
+ actions.release();
+ return 0;
}
-void PipelineHandlerRkISP1::stop(Camera *camera)
+void PipelineHandlerRkISP1::stopDevice(Camera *camera)
{
RkISP1CameraData *data = cameraData(camera);
int ret;
- ret = video_->streamOff();
- if (ret)
- LOG(RkISP1, Warning)
- << "Failed to stop camera " << camera->name();
+ isp_->setFrameStartEnabled(false);
- ret = stat_->streamOff();
- if (ret)
- LOG(RkISP1, Warning)
- << "Failed to stop statistics " << camera->name();
+ data->ipa_->stop();
- ret = param_->streamOff();
- if (ret)
- LOG(RkISP1, Warning)
- << "Failed to stop parameters " << camera->name();
+ if (hasSelfPath_)
+ selfPath_.stop();
+ mainPath_.stop();
- data->timeline_.reset();
+ if (!isRaw_) {
+ ret = stat_->streamOff();
+ if (ret)
+ LOG(RkISP1, Warning)
+ << "Failed to stop statistics for " << camera->id();
+
+ ret = param_->streamOff();
+ if (ret)
+ LOG(RkISP1, Warning)
+ << "Failed to stop parameters for " << camera->id();
+
+ if (useDewarper_)
+ dewarper_->stop();
+ }
+
+ ASSERT(data->queuedRequests_.empty());
+ data->frameInfo_.clear();
freeBuffers(camera);
activeCamera_ = nullptr;
}
-int PipelineHandlerRkISP1::queueRequestDevice(Camera *camera,
- Request *request)
+int PipelineHandlerRkISP1::queueRequestDevice(Camera *camera, Request *request)
{
RkISP1CameraData *data = cameraData(camera);
- Stream *stream = &data->stream_;
- RkISP1FrameInfo *info = data->frameInfo_.create(data->frame_, request,
- stream);
+ RkISP1FrameInfo *info = data->frameInfo_.create(data, request, isRaw_);
if (!info)
return -ENOENT;
- IPAOperationData op;
- op.operation = RKISP1_IPA_EVENT_QUEUE_REQUEST;
- op.data = { data->frame_, info->paramBuffer->cookie() };
- op.controls = { request->controls() };
- data->ipa_->processEvent(op);
+ data->ipa_->queueRequest(data->frame_, request->controls());
+ if (isRaw_) {
+ if (info->mainPathBuffer)
+ data->mainPath_->queueBuffer(info->mainPathBuffer);
- data->timeline_.scheduleAction(std::make_unique<RkISP1ActionQueueBuffers>(data->frame_,
- data,
- this));
+ if (data->selfPath_ && info->selfPathBuffer)
+ data->selfPath_->queueBuffer(info->selfPathBuffer);
+ } else {
+ data->ipa_->computeParams(data->frame_,
+ info->paramBuffer->cookie());
+ }
data->frame_++;
@@ -855,22 +1209,106 @@ int PipelineHandlerRkISP1::queueRequestDevice(Camera *camera,
* Match and Setup
*/
-int PipelineHandlerRkISP1::initLinks()
+int PipelineHandlerRkISP1::initLinks(Camera *camera,
+ const CameraSensor *sensor,
+ const RkISP1CameraConfiguration &config)
{
- MediaLink *link;
+ RkISP1CameraData *data = cameraData(camera);
int ret;
ret = media_->disableLinks();
if (ret < 0)
return ret;
- link = media_->link("rkisp1_isp", 2, "rkisp1_resizer_mainpath", 0);
- if (!link)
- return -ENODEV;
+ /*
+ * Configure the sensor links: enable the link corresponding to this
+ * camera.
+ */
+ for (MediaLink *link : ispSink_->links()) {
+ if (link->source()->entity() != sensor->entity())
+ continue;
- ret = link->setEnabled(true);
- if (ret < 0)
- return ret;
+ LOG(RkISP1, Debug)
+ << "Enabling link from sensor '"
+ << link->source()->entity()->name()
+ << "' to ISP";
+
+ ret = link->setEnabled(true);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (csi_) {
+ MediaLink *link = isp_->entity()->getPadByIndex(0)->links().at(0);
+
+ ret = link->setEnabled(true);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (const StreamConfiguration &cfg : config) {
+ if (cfg.stream() == &data->mainPathStream_)
+ ret = data->mainPath_->setEnabled(true);
+ else if (hasSelfPath_ && cfg.stream() == &data->selfPathStream_)
+ ret = data->selfPath_->setEnabled(true);
+ else
+ return -EINVAL;
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * \brief Update the camera controls
+ * \param[in] data The camera data
+ *
+ * Compute the camera controls by calculating controls which the pipeline
+ * is reponsible for and merge them with the controls computed by the IPA.
+ *
+ * This function needs data->ipaControls_ to be refreshed when a new
+ * configuration is applied to the camera by the IPA configure() function.
+ *
+ * Always call this function after IPA configure() to make sure to have a
+ * properly refreshed IPA controls list.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int PipelineHandlerRkISP1::updateControls(RkISP1CameraData *data)
+{
+ ControlInfoMap::Map controls;
+
+ if (dewarper_) {
+ std::pair<Rectangle, Rectangle> cropLimits;
+ if (dewarper_->isConfigured(&data->mainPathStream_))
+ cropLimits = dewarper_->inputCropBounds(&data->mainPathStream_);
+ else
+ cropLimits = dewarper_->inputCropBounds();
+
+ /*
+ * ScalerCrop is specified to be in Sensor coordinates.
+ * So we need to transform the limits to sensor coordinates.
+ * We can safely assume that the maximum crop limit contains the
+ * full fov of the dewarper.
+ */
+ Rectangle min = cropLimits.first.transformedBetween(cropLimits.second,
+ scalerMaxCrop_);
+
+ controls[&controls::ScalerCrop] = ControlInfo(min,
+ scalerMaxCrop_,
+ scalerMaxCrop_);
+ data->properties_.set(properties::ScalerCropMaximum, scalerMaxCrop_);
+ activeCrop_ = scalerMaxCrop_;
+ }
+
+ /* Add the IPA registered controls to list of camera controls. */
+ for (const auto &ipaControl : data->ipaControls_)
+ controls[ipaControl.first] = ipaControl.second;
+
+ data->controlInfo_ = ControlInfoMap(std::move(controls),
+ controls::controls);
return 0;
}
@@ -880,31 +1318,44 @@ int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor)
int ret;
std::unique_ptr<RkISP1CameraData> data =
- std::make_unique<RkISP1CameraData>(this);
-
- ControlInfoMap::Map ctrls;
- ctrls.emplace(std::piecewise_construct,
- std::forward_as_tuple(&controls::AeEnable),
- std::forward_as_tuple(false, true));
+ std::make_unique<RkISP1CameraData>(this, &mainPath_,
+ hasSelfPath_ ? &selfPath_ : nullptr);
- data->controlInfo_ = std::move(ctrls);
-
- data->sensor_ = new CameraSensor(sensor);
- ret = data->sensor_->init();
- if (ret)
- return ret;
+ data->sensor_ = CameraSensorFactoryBase::create(sensor);
+ if (!data->sensor_)
+ return -ENODEV;
/* Initialize the camera properties. */
data->properties_ = data->sensor_->properties();
- ret = data->loadIPA();
+ scalerMaxCrop_ = Rectangle(data->sensor_->resolution());
+
+ const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays();
+ std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ };
+
+ data->delayedCtrls_ =
+ std::make_unique<DelayedControls>(data->sensor_->device(),
+ params);
+ isp_->frameStart.connect(data->delayedCtrls_.get(),
+ &DelayedControls::applyControls);
+
+ ret = data->loadIPA(media_->hwRevision());
if (ret)
return ret;
- std::set<Stream *> streams{ &data->stream_ };
+ updateControls(data.get());
+
+ std::set<Stream *> streams{
+ &data->mainPathStream_,
+ &data->selfPathStream_,
+ };
+ const std::string &id = data->sensor_->id();
std::shared_ptr<Camera> camera =
- Camera::create(this, sensor->name(), streams);
- registerCamera(std::move(camera), std::move(data));
+ Camera::create(std::move(data), id, streams);
+ registerCamera(std::move(camera));
return 0;
}
@@ -915,9 +1366,7 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
DeviceMatch dm("rkisp1");
dm.add("rkisp1_isp");
- dm.add("rkisp1_resizer_selfpath");
dm.add("rkisp1_resizer_mainpath");
- dm.add("rkisp1_selfpath");
dm.add("rkisp1_mainpath");
dm.add("rkisp1_stats");
dm.add("rkisp1_params");
@@ -926,20 +1375,36 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
if (!media_)
return false;
+ if (!media_->hwRevision()) {
+ LOG(RkISP1, Error)
+ << "The rkisp1 driver is too old, v5.11 or newer is required";
+ return false;
+ }
+
+ hasSelfPath_ = !!media_->getEntityByName("rkisp1_selfpath");
+
/* Create the V4L2 subdevices we will need. */
isp_ = V4L2Subdevice::fromEntityName(media_, "rkisp1_isp");
if (isp_->open() < 0)
return false;
- resizer_ = V4L2Subdevice::fromEntityName(media_, "rkisp1_resizer_mainpath");
- if (resizer_->open() < 0)
+ /* Locate and open the optional CSI-2 receiver. */
+ ispSink_ = isp_->entity()->getPadByIndex(0);
+ if (!ispSink_ || ispSink_->links().empty())
return false;
- /* Locate and open the capture video node. */
- video_ = V4L2VideoDevice::fromEntityName(media_, "rkisp1_mainpath");
- if (video_->open() < 0)
- return false;
+ pad = ispSink_->links().at(0)->source();
+ if (pad->entity()->function() == MEDIA_ENT_F_VID_IF_BRIDGE) {
+ csi_ = std::make_unique<V4L2Subdevice>(pad->entity());
+ if (csi_->open() < 0)
+ return false;
+
+ ispSink_ = csi_->entity()->getPadByIndex(0);
+ if (!ispSink_)
+ return false;
+ }
+ /* Locate and open the stats and params video nodes. */
stat_ = V4L2VideoDevice::fromEntityName(media_, "rkisp1_stats");
if (stat_->open() < 0)
return false;
@@ -948,40 +1413,63 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
if (param_->open() < 0)
return false;
- video_->bufferReady.connect(this, &PipelineHandlerRkISP1::bufferReady);
- stat_->bufferReady.connect(this, &PipelineHandlerRkISP1::statReady);
- param_->bufferReady.connect(this, &PipelineHandlerRkISP1::paramReady);
+ /* Locate and open the ISP main and self paths. */
+ if (!mainPath_.init(media_))
+ return false;
- /* Configure default links. */
- if (initLinks() < 0) {
- LOG(RkISP1, Error) << "Failed to setup links";
+ if (hasSelfPath_ && !selfPath_.init(media_))
return false;
+
+ mainPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::imageBufferReady);
+ if (hasSelfPath_)
+ selfPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::imageBufferReady);
+ stat_->bufferReady.connect(this, &PipelineHandlerRkISP1::statBufferReady);
+ param_->bufferReady.connect(this, &PipelineHandlerRkISP1::paramBufferReady);
+
+ /* If dewarper is present, create its instance. */
+ DeviceMatch dwp("dw100");
+ dwp.add("dw100-source");
+ dwp.add("dw100-sink");
+
+ std::shared_ptr<MediaDevice> dwpMediaDevice = enumerator->search(dwp);
+ if (dwpMediaDevice) {
+ dewarper_ = std::make_unique<V4L2M2MConverter>(dwpMediaDevice.get());
+ if (dewarper_->isValid()) {
+ dewarper_->outputBufferReady.connect(
+ this, &PipelineHandlerRkISP1::dewarpBufferReady);
+
+ LOG(RkISP1, Info)
+ << "Using DW100 dewarper " << dewarper_->deviceNode();
+ } else {
+ LOG(RkISP1, Warning)
+ << "Found DW100 dewarper " << dewarper_->deviceNode()
+ << " but invalid";
+
+ dewarper_.reset();
+ }
}
/*
* Enumerate all sensors connected to the ISP and create one
* camera instance for each of them.
*/
- pad = isp_->entity()->getPadByIndex(0);
- if (!pad)
- return false;
-
- for (MediaLink *link : pad->links())
- createCamera(link->source()->entity());
+ bool registered = false;
+ for (MediaLink *link : ispSink_->links()) {
+ if (!createCamera(link->source()->entity()))
+ registered = true;
+ }
- return true;
+ return registered;
}
/* -----------------------------------------------------------------------------
* Buffer Handling
*/
-void PipelineHandlerRkISP1::tryCompleteRequest(Request *request)
+void PipelineHandlerRkISP1::tryCompleteRequest(RkISP1FrameInfo *info)
{
RkISP1CameraData *data = cameraData(activeCamera_);
- RkISP1FrameInfo *info = data->frameInfo_.find(request);
- if (!info)
- return;
+ Request *request = info->request;
if (request->hasPendingBuffers())
return;
@@ -989,41 +1477,148 @@ void PipelineHandlerRkISP1::tryCompleteRequest(Request *request)
if (!info->metadataProcessed)
return;
- if (!info->paramDequeued)
+ if (!isRaw_ && !info->paramDequeued)
return;
data->frameInfo_.destroy(info->frame);
- completeRequest(activeCamera_, request);
+ completeRequest(request);
}
-void PipelineHandlerRkISP1::bufferReady(FrameBuffer *buffer)
+void PipelineHandlerRkISP1::imageBufferReady(FrameBuffer *buffer)
{
ASSERT(activeCamera_);
RkISP1CameraData *data = cameraData(activeCamera_);
- Request *request = buffer->request();
- data->timeline_.bufferReady(buffer);
+ RkISP1FrameInfo *info = data->frameInfo_.find(buffer);
+ if (!info)
+ return;
- if (data->frame_ <= buffer->metadata().sequence)
- data->frame_ = buffer->metadata().sequence + 1;
+ const FrameMetadata &metadata = buffer->metadata();
+ Request *request = info->request;
+
+ if (metadata.status != FrameMetadata::FrameCancelled) {
+ /*
+ * Record the sensor's timestamp in the request metadata.
+ *
+ * \todo The sensor timestamp should be better estimated by connecting
+ * to the V4L2Device::frameStart signal.
+ */
+ request->metadata().set(controls::SensorTimestamp,
+ metadata.timestamp);
+
+ if (isRaw_) {
+ const ControlList &ctrls =
+ data->delayedCtrls_->get(metadata.sequence);
+ data->ipa_->processStats(info->frame, 0, ctrls);
+ }
+ } else {
+ if (isRaw_)
+ info->metadataProcessed = true;
+ }
+
+ if (!useDewarper_) {
+ completeBuffer(request, buffer);
+ tryCompleteRequest(info);
+
+ return;
+ }
+
+ /* Do not queue cancelled frames to dewarper. */
+ if (metadata.status == FrameMetadata::FrameCancelled) {
+ /*
+ * i.MX8MP is the only known platform with dewarper. It has
+ * no self path. Hence, only main path buffer completion is
+ * required.
+ *
+ * Also, we cannot completeBuffer(request, buffer) as buffer
+ * here, is an internal buffer (between ISP and dewarper) and
+ * is not associated to the any specific request. The request
+ * buffer associated with main path stream is the one that
+ * is required to be completed (not the internal buffer).
+ */
+ for (auto it : request->buffers()) {
+ if (it.first == &data->mainPathStream_)
+ completeBuffer(request, it.second);
+ }
+
+ tryCompleteRequest(info);
+ return;
+ }
+
+ /* Handle scaler crop control. */
+ const auto &crop = request->controls().get(controls::ScalerCrop);
+ if (crop) {
+ Rectangle rect = crop.value();
- completeBuffer(activeCamera_, request, buffer);
- tryCompleteRequest(request);
+ /*
+ * ScalerCrop is specified to be in Sensor coordinates.
+ * So we need to transform it into dewarper coordinates.
+ * We can safely assume that the maximum crop limit contains the
+ * full fov of the dewarper.
+ */
+ std::pair<Rectangle, Rectangle> cropLimits =
+ dewarper_->inputCropBounds(&data->mainPathStream_);
+
+ rect = rect.transformedBetween(scalerMaxCrop_, cropLimits.second);
+ int ret = dewarper_->setInputCrop(&data->mainPathStream_,
+ &rect);
+ rect = rect.transformedBetween(cropLimits.second, scalerMaxCrop_);
+ if (!ret && rect != crop.value()) {
+ /*
+ * If the rectangle is changed by setInputCrop on the
+ * dewarper, log a debug message and cache the actual
+ * applied rectangle for metadata reporting.
+ */
+ LOG(RkISP1, Debug)
+ << "Applied rectangle " << rect.toString()
+ << " differs from requested " << crop.value().toString();
+ }
+
+ activeCrop_ = rect;
+ }
+
+ /*
+ * Queue input and output buffers to the dewarper. The output
+ * buffers for the dewarper are the buffers of the request, supplied
+ * by the application.
+ */
+ int ret = dewarper_->queueBuffers(buffer, request->buffers());
+ if (ret < 0)
+ LOG(RkISP1, Error) << "Cannot queue buffers to dewarper: "
+ << strerror(-ret);
+
+ request->metadata().set(controls::ScalerCrop, activeCrop_.value());
}
-void PipelineHandlerRkISP1::paramReady(FrameBuffer *buffer)
+void PipelineHandlerRkISP1::dewarpBufferReady(FrameBuffer *buffer)
+{
+ ASSERT(activeCamera_);
+ RkISP1CameraData *data = cameraData(activeCamera_);
+ Request *request = buffer->request();
+
+ RkISP1FrameInfo *info = data->frameInfo_.find(buffer->request());
+ if (!info)
+ return;
+
+ completeBuffer(request, buffer);
+ tryCompleteRequest(info);
+}
+
+void PipelineHandlerRkISP1::paramBufferReady(FrameBuffer *buffer)
{
ASSERT(activeCamera_);
RkISP1CameraData *data = cameraData(activeCamera_);
RkISP1FrameInfo *info = data->frameInfo_.find(buffer);
+ if (!info)
+ return;
info->paramDequeued = true;
- tryCompleteRequest(info->request);
+ tryCompleteRequest(info);
}
-void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
+void PipelineHandlerRkISP1::statBufferReady(FrameBuffer *buffer)
{
ASSERT(activeCamera_);
RkISP1CameraData *data = cameraData(activeCamera_);
@@ -1032,12 +1627,19 @@ void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
if (!info)
return;
- IPAOperationData op;
- op.operation = RKISP1_IPA_EVENT_SIGNAL_STAT_BUFFER;
- op.data = { info->frame, info->statBuffer->cookie() };
- data->ipa_->processEvent(op);
+ if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
+ info->metadataProcessed = true;
+ tryCompleteRequest(info);
+ return;
+ }
+
+ if (data->frame_ <= buffer->metadata().sequence)
+ data->frame_ = buffer->metadata().sequence + 1;
+
+ data->ipa_->processStats(info->frame, info->statBuffer->cookie(),
+ data->delayedCtrls_->get(buffer->metadata().sequence));
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerRkISP1);
+REGISTER_PIPELINE_HANDLER(PipelineHandlerRkISP1, "rkisp1")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
new file mode 100644
index 00000000..eee5b09e
--- /dev/null
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
@@ -0,0 +1,567 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Rockchip ISP1 path helper
+ */
+
+#include "rkisp1_path.h"
+
+#include <linux/media-bus-format.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(RkISP1)
+
+namespace {
+
+/* Keep in sync with the supported raw formats in rkisp1.cpp. */
+const std::map<PixelFormat, uint32_t> formatToMediaBus = {
+ { formats::UYVY, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YUYV, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::NV12, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::NV21, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::NV16, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::NV61, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YUV420, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::YVU420, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::YUV422, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YVU422, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::R8, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::RGB565, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::XRGB8888, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+} /* namespace */
+
+RkISP1Path::RkISP1Path(const char *name, const Span<const PixelFormat> &formats)
+ : name_(name), running_(false), formats_(formats), link_(nullptr)
+{
+}
+
+bool RkISP1Path::init(MediaDevice *media)
+{
+ std::string resizer = std::string("rkisp1_resizer_") + name_ + "path";
+ std::string video = std::string("rkisp1_") + name_ + "path";
+
+ resizer_ = V4L2Subdevice::fromEntityName(media, resizer);
+ if (resizer_->open() < 0)
+ return false;
+
+ video_ = V4L2VideoDevice::fromEntityName(media, video);
+ if (video_->open() < 0)
+ return false;
+
+ populateFormats();
+
+ link_ = media->link("rkisp1_isp", 2, resizer, 0);
+ if (!link_)
+ return false;
+
+ return true;
+}
+
+void RkISP1Path::populateFormats()
+{
+ V4L2VideoDevice::Formats v4l2Formats = video_->formats();
+ if (v4l2Formats.empty()) {
+ LOG(RkISP1, Info)
+ << "Failed to enumerate supported formats and sizes, using defaults";
+
+ for (const PixelFormat &format : formats_)
+ streamFormats_.insert(format);
+ return;
+ }
+
+ minResolution_ = { 65535, 65535 };
+ maxResolution_ = { 0, 0 };
+
+ std::vector<PixelFormat> formats;
+ for (const auto &[format, sizes] : v4l2Formats) {
+ const PixelFormat pixelFormat = format.toPixelFormat();
+
+ /*
+ * As a defensive measure, skip any pixel format exposed by the
+ * driver that we don't know about. This ensures that looking up
+ * formats in formatToMediaBus using a key from streamFormats_
+ * will never fail in any of the other functions.
+ */
+ if (!formatToMediaBus.count(pixelFormat)) {
+ LOG(RkISP1, Warning)
+ << "Unsupported pixel format " << pixelFormat;
+ continue;
+ }
+
+ streamFormats_.insert(pixelFormat);
+
+ for (const auto &size : sizes) {
+ if (minResolution_ > size.min)
+ minResolution_ = size.min;
+ if (maxResolution_ < size.max)
+ maxResolution_ = size.max;
+ }
+ }
+}
+
+/**
+ * \brief Filter the sensor resolutions that can be supported
+ * \param[in] sensor The camera sensor
+ *
+ * This function retrieves all the sizes supported by the sensor and
+ * filters all the resolutions that can be supported on the pipeline.
+ * It is possible that the sensor's maximum output resolution is higher
+ * than the ISP maximum input. In that case, this function filters out all
+ * the resolution incapable of being supported and returns the maximum
+ * sensor resolution that can be supported by the pipeline.
+ *
+ * \return Maximum sensor size supported on the pipeline
+ */
+Size RkISP1Path::filterSensorResolution(const CameraSensor *sensor)
+{
+ auto iter = sensorSizesMap_.find(sensor);
+ if (iter != sensorSizesMap_.end())
+ return iter->second.back();
+
+ std::vector<Size> &sizes = sensorSizesMap_[sensor];
+ for (unsigned int code : sensor->mbusCodes()) {
+ for (const Size &size : sensor->sizes(code)) {
+ if (size.width > maxResolution_.width ||
+ size.height > maxResolution_.height)
+ continue;
+
+ sizes.push_back(size);
+ }
+ }
+
+ /* Sort in increasing order and remove duplicates. */
+ std::sort(sizes.begin(), sizes.end());
+ auto last = std::unique(sizes.begin(), sizes.end());
+ sizes.erase(last, sizes.end());
+
+ return sizes.back();
+}
+
+StreamConfiguration
+RkISP1Path::generateConfiguration(const CameraSensor *sensor, const Size &size,
+ StreamRole role)
+{
+ const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes();
+ Size resolution = filterSensorResolution(sensor);
+
+ /* Min and max resolutions to populate the available stream formats. */
+ Size maxResolution = maxResolution_.boundedToAspectRatio(resolution)
+ .boundedTo(resolution);
+ Size minResolution = minResolution_.expandedToAspectRatio(resolution);
+
+ /* The desired stream size, bound to the max resolution. */
+ Size streamSize = size.boundedTo(maxResolution);
+
+ /* Create the list of supported stream formats. */
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+ unsigned int rawBitsPerPixel = 0;
+ PixelFormat rawFormat;
+
+ for (const auto &format : streamFormats_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ /* Populate stream formats for non-RAW configurations. */
+ if (info.colourEncoding != PixelFormatInfo::ColourEncodingRAW) {
+ if (role == StreamRole::Raw)
+ continue;
+
+ streamFormats[format] = { { minResolution, maxResolution } };
+ continue;
+ }
+
+ /* Skip RAW formats for non-raw roles. */
+ if (role != StreamRole::Raw)
+ continue;
+
+ /* Populate stream formats for RAW configurations. */
+ uint32_t mbusCode = formatToMediaBus.at(format);
+ if (std::find(mbusCodes.begin(), mbusCodes.end(), mbusCode) ==
+ mbusCodes.end())
+ /* Skip formats not supported by sensor. */
+ continue;
+
+ /* Add all the RAW sizes the sensor can produce for this code. */
+ for (const auto &rawSize : sensor->sizes(mbusCode)) {
+ if (rawSize.width > maxResolution_.width ||
+ rawSize.height > maxResolution_.height)
+ continue;
+
+ streamFormats[format].push_back({ rawSize, rawSize });
+ }
+
+ /*
+ * Store the raw format with the highest bits per pixel for
+ * later usage.
+ */
+ if (info.bitsPerPixel > rawBitsPerPixel) {
+ rawBitsPerPixel = info.bitsPerPixel;
+ rawFormat = format;
+ }
+ }
+
+ /*
+ * Pick a suitable pixel format for the role. Raw streams need to use a
+ * raw format, processed streams use NV12 by default.
+ */
+ PixelFormat format;
+
+ if (role == StreamRole::Raw) {
+ if (!rawFormat.isValid()) {
+ LOG(RkISP1, Error)
+ << "Sensor " << sensor->model()
+ << " doesn't support raw capture";
+ return {};
+ }
+
+ format = rawFormat;
+ } else {
+ format = formats::NV12;
+ }
+
+ StreamFormats formats(streamFormats);
+ StreamConfiguration cfg(formats);
+ cfg.pixelFormat = format;
+ cfg.size = streamSize;
+ cfg.bufferCount = RKISP1_BUFFER_COUNT;
+
+ return cfg;
+}
+
+CameraConfiguration::Status
+RkISP1Path::validate(const CameraSensor *sensor,
+ const std::optional<SensorConfiguration> &sensorConfig,
+ StreamConfiguration *cfg)
+{
+ const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes();
+ Size resolution = filterSensorResolution(sensor);
+
+ const StreamConfiguration reqCfg = *cfg;
+ CameraConfiguration::Status status = CameraConfiguration::Valid;
+
+ /*
+ * Validate the pixel format. If the requested format isn't supported,
+ * default to either NV12 (all versions of the ISP are guaranteed to
+ * support NV12 on both the main and self paths) if the requested format
+ * is not a raw format, or to the supported raw format with the highest
+ * bits per pixel otherwise.
+ */
+ unsigned int rawBitsPerPixel = 0;
+ PixelFormat rawFormat;
+ bool found = false;
+
+ for (const auto &format : streamFormats_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
+ /* Skip raw formats not supported by the sensor. */
+ uint32_t mbusCode = formatToMediaBus.at(format);
+ if (std::find(mbusCodes.begin(), mbusCodes.end(), mbusCode) ==
+ mbusCodes.end())
+ continue;
+
+ /*
+ * If the bits per pixel is supplied from the sensor
+ * configuration, choose a raw format that complies with
+ * it. Otherwise, store the raw format with the highest
+ * bits per pixel for later usage.
+ */
+ if (sensorConfig && info.bitsPerPixel != sensorConfig->bitDepth)
+ continue;
+
+ if (info.bitsPerPixel > rawBitsPerPixel) {
+ rawBitsPerPixel = info.bitsPerPixel;
+ rawFormat = format;
+ }
+ }
+
+ if (cfg->pixelFormat == format) {
+ found = true;
+ break;
+ }
+ }
+
+ if (sensorConfig && !rawFormat.isValid())
+ return CameraConfiguration::Invalid;
+
+ bool isRaw = PixelFormatInfo::info(cfg->pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW;
+
+ /*
+ * If no raw format supported by the sensor has been found, use a
+ * processed format.
+ */
+ if (!rawFormat.isValid())
+ isRaw = false;
+
+ if (!found)
+ cfg->pixelFormat = isRaw ? rawFormat : formats::NV12;
+
+ Size minResolution;
+ Size maxResolution;
+
+ if (isRaw) {
+ /*
+ * Use the sensor output size closest to the requested stream
+ * size while ensuring the output size doesn't exceed ISP limits.
+ *
+ * As 'resolution' is the largest sensor resolution
+ * supported by the ISP, CameraSensor::getFormat() will never
+ * return a V4L2SubdeviceFormat with a larger size.
+ */
+ uint32_t mbusCode = formatToMediaBus.at(cfg->pixelFormat);
+ cfg->size.boundTo(resolution);
+
+ Size rawSize = sensorConfig ? sensorConfig->outputSize
+ : cfg->size;
+
+ V4L2SubdeviceFormat sensorFormat =
+ sensor->getFormat({ mbusCode }, rawSize);
+
+ if (sensorConfig &&
+ sensorConfig->outputSize != sensorFormat.size)
+ return CameraConfiguration::Invalid;
+
+ minResolution = sensorFormat.size;
+ maxResolution = sensorFormat.size;
+ } else if (sensorConfig) {
+ /*
+ * We have already ensured 'rawFormat' has the matching bit
+ * depth with sensorConfig.bitDepth hence, only validate the
+ * sensorConfig's output size here.
+ */
+ Size sensorSize = sensorConfig->outputSize;
+
+ if (sensorSize > resolution)
+ return CameraConfiguration::Invalid;
+
+ uint32_t mbusCode = formatToMediaBus.at(rawFormat);
+ V4L2SubdeviceFormat sensorFormat =
+ sensor->getFormat({ mbusCode }, sensorSize);
+
+ if (sensorFormat.size != sensorSize)
+ return CameraConfiguration::Invalid;
+
+ minResolution = minResolution_.expandedToAspectRatio(sensorSize);
+ maxResolution = maxResolution_.boundedTo(sensorSize)
+ .boundedToAspectRatio(sensorSize);
+ } else {
+ /*
+ * Adjust the size based on the sensor resolution and absolute
+ * limits of the ISP.
+ */
+ minResolution = minResolution_.expandedToAspectRatio(resolution);
+ maxResolution = maxResolution_.boundedToAspectRatio(resolution)
+ .boundedTo(resolution);
+ }
+
+ cfg->size.boundTo(maxResolution);
+ cfg->size.expandTo(minResolution);
+ cfg->bufferCount = RKISP1_BUFFER_COUNT;
+
+ V4L2DeviceFormat format;
+ format.fourcc = video_->toV4L2PixelFormat(cfg->pixelFormat);
+ format.size = cfg->size;
+
+ int ret = video_->tryFormat(&format);
+ if (ret)
+ return CameraConfiguration::Invalid;
+
+ cfg->stride = format.planes[0].bpl;
+ cfg->frameSize = format.planes[0].size;
+
+ if (cfg->pixelFormat != reqCfg.pixelFormat || cfg->size != reqCfg.size) {
+ LOG(RkISP1, Debug)
+ << "Adjusting format from " << reqCfg.toString()
+ << " to " << cfg->toString();
+ status = CameraConfiguration::Adjusted;
+ }
+
+ return status;
+}
+
+int RkISP1Path::configure(const StreamConfiguration &config,
+ const V4L2SubdeviceFormat &inputFormat)
+{
+ int ret;
+
+ V4L2SubdeviceFormat ispFormat = inputFormat;
+
+ ret = resizer_->setFormat(0, &ispFormat);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Crop on the resizer input to maintain FOV before downscaling.
+ *
+ * Note that this does not apply to devices without DUAL_CROP support
+ * (like imx8mp) , where the cropping needs to be done on the
+ * ImageStabilizer block on the ISP source pad and therefore is
+ * configured before this stage. For simplicity we still set the crop.
+ * This gets ignored by the kernel driver because the hardware is
+ * missing the capability.
+ *
+ * Alignment to a multiple of 2 pixels is required by the resizer.
+ */
+ Size ispCrop = inputFormat.size.boundedToAspectRatio(config.size)
+ .alignedUpTo(2, 2);
+ Rectangle rect = ispCrop.centeredTo(Rectangle(inputFormat.size).center());
+ ret = resizer_->setSelection(0, V4L2_SEL_TGT_CROP, &rect);
+ if (ret < 0)
+ return ret;
+
+ LOG(RkISP1, Debug)
+ << "Configured " << name_ << " resizer input pad with "
+ << ispFormat << " crop " << rect;
+
+ ispFormat.size = config.size;
+
+ LOG(RkISP1, Debug)
+ << "Configuring " << name_ << " resizer output pad with "
+ << ispFormat;
+
+ /*
+ * The configuration has been validated, the pixel format is guaranteed
+ * to be supported and thus found in formatToMediaBus.
+ */
+ ispFormat.code = formatToMediaBus.at(config.pixelFormat);
+
+ ret = resizer_->setFormat(1, &ispFormat);
+ if (ret < 0)
+ return ret;
+
+ LOG(RkISP1, Debug)
+ << "Configured " << name_ << " resizer output pad with "
+ << ispFormat;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(config.pixelFormat);
+ V4L2DeviceFormat outputFormat;
+ outputFormat.fourcc = video_->toV4L2PixelFormat(config.pixelFormat);
+ outputFormat.size = config.size;
+ outputFormat.planesCount = info.numPlanes();
+
+ ret = video_->setFormat(&outputFormat);
+ if (ret)
+ return ret;
+
+ if (outputFormat.size != config.size ||
+ outputFormat.fourcc != video_->toV4L2PixelFormat(config.pixelFormat)) {
+ LOG(RkISP1, Error)
+ << "Unable to configure capture in " << config.toString();
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int RkISP1Path::start()
+{
+ int ret;
+
+ if (running_)
+ return -EBUSY;
+
+ /* \todo Make buffer count user configurable. */
+ ret = video_->importBuffers(RKISP1_BUFFER_COUNT);
+ if (ret)
+ return ret;
+
+ ret = video_->streamOn();
+ if (ret) {
+ LOG(RkISP1, Error)
+ << "Failed to start " << name_ << " path";
+
+ video_->releaseBuffers();
+ return ret;
+ }
+
+ running_ = true;
+
+ return 0;
+}
+
+void RkISP1Path::stop()
+{
+ if (!running_)
+ return;
+
+ if (video_->streamOff())
+ LOG(RkISP1, Warning) << "Failed to stop " << name_ << " path";
+
+ video_->releaseBuffers();
+
+ running_ = false;
+}
+
+/*
+ * \todo Remove the hardcoded formats once all users will have migrated to a
+ * recent enough kernel.
+ */
+namespace {
+constexpr std::array<PixelFormat, 18> RKISP1_RSZ_MP_FORMATS{
+ formats::YUYV,
+ formats::NV16,
+ formats::NV61,
+ formats::NV21,
+ formats::NV12,
+ formats::R8,
+ formats::SBGGR8,
+ formats::SGBRG8,
+ formats::SGRBG8,
+ formats::SRGGB8,
+ formats::SBGGR10,
+ formats::SGBRG10,
+ formats::SGRBG10,
+ formats::SRGGB10,
+ formats::SBGGR12,
+ formats::SGBRG12,
+ formats::SGRBG12,
+ formats::SRGGB12,
+};
+
+constexpr std::array<PixelFormat, 8> RKISP1_RSZ_SP_FORMATS{
+ formats::YUYV,
+ formats::NV16,
+ formats::NV61,
+ formats::NV21,
+ formats::NV12,
+ formats::R8,
+ formats::RGB565,
+ formats::XRGB8888,
+};
+} /* namespace */
+
+RkISP1MainPath::RkISP1MainPath()
+ : RkISP1Path("main", RKISP1_RSZ_MP_FORMATS)
+{
+}
+
+RkISP1SelfPath::RkISP1SelfPath()
+ : RkISP1Path("self", RKISP1_RSZ_SP_FORMATS)
+{
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
new file mode 100644
index 00000000..2a1ef0ab
--- /dev/null
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Rockchip ISP1 path helper
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <set>
+#include <vector>
+
+#include <libcamera/base/signal.h>
+#include <libcamera/base/span.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/geometry.h>
+#include <libcamera/pixel_format.h>
+
+#include "libcamera/internal/media_object.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace libcamera {
+
+class CameraSensor;
+class MediaDevice;
+class SensorConfiguration;
+class V4L2Subdevice;
+struct StreamConfiguration;
+struct V4L2SubdeviceFormat;
+
+class RkISP1Path
+{
+public:
+ RkISP1Path(const char *name, const Span<const PixelFormat> &formats);
+
+ bool init(MediaDevice *media);
+
+ int setEnabled(bool enable) { return link_->setEnabled(enable); }
+ bool isEnabled() const { return link_->flags() & MEDIA_LNK_FL_ENABLED; }
+
+ StreamConfiguration generateConfiguration(const CameraSensor *sensor,
+ const Size &resolution,
+ StreamRole role);
+ CameraConfiguration::Status validate(const CameraSensor *sensor,
+ const std::optional<SensorConfiguration> &sensorConfig,
+ StreamConfiguration *cfg);
+
+ int configure(const StreamConfiguration &config,
+ const V4L2SubdeviceFormat &inputFormat);
+
+ int exportBuffers(unsigned int bufferCount,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+ {
+ return video_->exportBuffers(bufferCount, buffers);
+ }
+
+ int start();
+ void stop();
+
+ int queueBuffer(FrameBuffer *buffer) { return video_->queueBuffer(buffer); }
+ Signal<FrameBuffer *> &bufferReady() { return video_->bufferReady; }
+ const Size &maxResolution() const { return maxResolution_; }
+
+private:
+ void populateFormats();
+ Size filterSensorResolution(const CameraSensor *sensor);
+
+ static constexpr unsigned int RKISP1_BUFFER_COUNT = 4;
+
+ const char *name_;
+ bool running_;
+
+ const Span<const PixelFormat> formats_;
+ std::set<PixelFormat> streamFormats_;
+ Size minResolution_;
+ Size maxResolution_;
+
+ std::unique_ptr<V4L2Subdevice> resizer_;
+ std::unique_ptr<V4L2VideoDevice> video_;
+ MediaLink *link_;
+
+ /*
+ * Map from camera sensors to the sizes (in increasing order),
+ * which are guaranteed to be supported by the pipeline.
+ */
+ std::map<const CameraSensor *, std::vector<Size>> sensorSizesMap_;
+};
+
+class RkISP1MainPath : public RkISP1Path
+{
+public:
+ RkISP1MainPath();
+};
+
+class RkISP1SelfPath : public RkISP1Path
+{
+public:
+ RkISP1SelfPath();
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/timeline.cpp b/src/libcamera/pipeline/rkisp1/timeline.cpp
deleted file mode 100644
index f6c6434d..00000000
--- a/src/libcamera/pipeline/rkisp1/timeline.cpp
+++ /dev/null
@@ -1,227 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * timeline.cpp - Timeline for per-frame control
- */
-
-#include "timeline.h"
-
-#include "log.h"
-
-/**
- * \file timeline.h
- * \brief Timeline for per-frame control
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(Timeline)
-
-/**
- * \class FrameAction
- * \brief Action that can be schedule on a Timeline
- *
- * A frame action is an event schedule to be executed on a Timeline. A frame
- * action has two primal attributes a frame number and a type.
- *
- * The frame number describes the frame to which the action is associated. The
- * type is a numerical ID which identifies the action within the pipeline and
- * IPA protocol.
- */
-
-/**
- * \class Timeline
- * \brief Executor of FrameAction
- *
- * The timeline has three primary functions:
- *
- * 1. Keep track of the Start of Exposure (SOE) for every frame processed by
- * the hardware. Using this information it shall keep an up-to-date estimate
- * of the frame interval (time between two consecutive SOE events).
- *
- * The estimated frame interval together with recorded SOE events are the
- * foundation for how the timeline schedule FrameAction at specific points
- * in time.
- * \todo Improve the frame interval estimation algorithm.
- *
- * 2. Keep track of current delays for different types of actions. The delays
- * for different actions might differ during a capture session. Exposure time
- * effects the over all FPS and different ISP parameters might impacts its
- * processing time.
- *
- * The action type delays shall be updated by the IPA in conjunction with
- * how it changes the capture parameters.
- *
- * 3. Schedule actions on the timeline. This is the process of taking a
- * FrameAction which contains an abstract description of what frame and
- * what type of action it contains and turning that into an time point
- * and make sure the action is executed at that time.
- */
-
-Timeline::Timeline()
- : frameInterval_(0)
-{
- timer_.timeout.connect(this, &Timeline::timeout);
-}
-
-/**
- * \brief Reset and stop the timeline
- *
- * The timeline needs to be reset when the timeline should no longer execute
- * actions. A timeline should be reset between two capture sessions to prevent
- * the old capture session to effect the second one.
- */
-void Timeline::reset()
-{
- timer_.stop();
-
- actions_.clear();
- history_.clear();
-}
-
-/**
- * \brief Schedule an action on the timeline
- * \param[in] action FrameAction to schedule
- *
- * The act of scheduling an action to the timeline is the process of taking
- * the properties of the action (type, frame and time offsets) and translating
- * that to a time point using the current values for the action type timings
- * value recorded in the timeline. If an action is scheduled too late, execute
- * it immediately.
- */
-void Timeline::scheduleAction(std::unique_ptr<FrameAction> action)
-{
- unsigned int lastFrame;
- utils::time_point lastTime;
-
- if (history_.empty()) {
- lastFrame = 0;
- lastTime = std::chrono::steady_clock::now();
- } else {
- lastFrame = history_.back().first;
- lastTime = history_.back().second;
- }
-
- /*
- * Calculate when the action shall be schedule by first finding out how
- * many frames in the future the action acts on and then add the actions
- * frame offset. After the spatial frame offset is found out translate
- * that to a time point by using the last estimated start of exposure
- * (SOE) as the fixed offset. Lastly add the action time offset to the
- * time point.
- */
- int frame = action->frame() - lastFrame + frameOffset(action->type());
- utils::time_point deadline = lastTime + frame * frameInterval_
- + timeOffset(action->type());
-
- utils::time_point now = std::chrono::steady_clock::now();
- if (deadline < now) {
- LOG(Timeline, Warning)
- << "Action scheduled too late "
- << utils::time_point_to_string(deadline)
- << ", run now " << utils::time_point_to_string(now);
- action->run();
- } else {
- actions_.emplace(deadline, std::move(action));
- updateDeadline();
- }
-}
-
-void Timeline::notifyStartOfExposure(unsigned int frame, utils::time_point time)
-{
- history_.push_back(std::make_pair(frame, time));
-
- if (history_.size() <= HISTORY_DEPTH / 2)
- return;
-
- while (history_.size() > HISTORY_DEPTH)
- history_.pop_front();
-
- /* Update esitmated time between two start of exposures. */
- utils::duration sumExposures(0);
- unsigned int numExposures = 0;
-
- utils::time_point lastTime;
- for (auto it = history_.begin(); it != history_.end(); it++) {
- if (it != history_.begin()) {
- sumExposures += it->second - lastTime;
- numExposures++;
- }
-
- lastTime = it->second;
- }
-
- frameInterval_ = sumExposures;
- if (numExposures)
- frameInterval_ /= numExposures;
-}
-
-int Timeline::frameOffset(unsigned int type) const
-{
- const auto it = delays_.find(type);
- if (it == delays_.end()) {
- LOG(Timeline, Error)
- << "No frame offset set for action type " << type;
- return 0;
- }
-
- return it->second.first;
-}
-
-utils::duration Timeline::timeOffset(unsigned int type) const
-{
- const auto it = delays_.find(type);
- if (it == delays_.end()) {
- LOG(Timeline, Error)
- << "No time offset set for action type " << type;
- return utils::duration::zero();
- }
-
- return it->second.second;
-}
-
-void Timeline::setRawDelay(unsigned int type, int frame, utils::duration time)
-{
- delays_[type] = std::make_pair(frame, time);
-}
-
-void Timeline::updateDeadline()
-{
- if (actions_.empty())
- return;
-
- const utils::time_point &deadline = actions_.begin()->first;
-
- if (timer_.isRunning() && deadline >= timer_.deadline())
- return;
-
- if (deadline <= std::chrono::steady_clock::now()) {
- timeout(&timer_);
- return;
- }
-
- timer_.start(deadline);
-}
-
-void Timeline::timeout(Timer *timer)
-{
- utils::time_point now = std::chrono::steady_clock::now();
-
- for (auto it = actions_.begin(); it != actions_.end();) {
- const utils::time_point &sched = it->first;
-
- if (sched > now)
- break;
-
- FrameAction *action = it->second.get();
-
- action->run();
-
- it = actions_.erase(it);
- }
-
- updateDeadline();
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/timeline.h b/src/libcamera/pipeline/rkisp1/timeline.h
deleted file mode 100644
index 9d30e4ea..00000000
--- a/src/libcamera/pipeline/rkisp1/timeline.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * timeline.h - Timeline for per-frame controls
- */
-#ifndef __LIBCAMERA_TIMELINE_H__
-#define __LIBCAMERA_TIMELINE_H__
-
-#include <list>
-#include <map>
-
-#include <libcamera/timer.h>
-
-#include "utils.h"
-
-namespace libcamera {
-
-class FrameAction
-{
-public:
- FrameAction(unsigned int frame, unsigned int type)
- : frame_(frame), type_(type) {}
-
- virtual ~FrameAction() {}
-
- unsigned int frame() const { return frame_; }
- unsigned int type() const { return type_; }
-
- virtual void run() = 0;
-
-private:
- unsigned int frame_;
- unsigned int type_;
-};
-
-class Timeline
-{
-public:
- Timeline();
- virtual ~Timeline() {}
-
- virtual void reset();
- virtual void scheduleAction(std::unique_ptr<FrameAction> action);
- virtual void notifyStartOfExposure(unsigned int frame, utils::time_point time);
-
- utils::duration frameInterval() const { return frameInterval_; }
-
-protected:
- int frameOffset(unsigned int type) const;
- utils::duration timeOffset(unsigned int type) const;
-
- void setRawDelay(unsigned int type, int frame, utils::duration time);
-
- std::map<unsigned int, std::pair<int, utils::duration>> delays_;
-
-private:
- static constexpr unsigned int HISTORY_DEPTH = 10;
-
- void timeout(Timer *timer);
- void updateDeadline();
-
- std::list<std::pair<unsigned int, utils::time_point>> history_;
- std::multimap<utils::time_point, std::unique_ptr<FrameAction>> actions_;
- utils::duration frameInterval_;
-
- Timer timer_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_TIMELINE_H__ */
diff --git a/src/libcamera/pipeline/rpi/common/delayed_controls.cpp b/src/libcamera/pipeline/rpi/common/delayed_controls.cpp
new file mode 100644
index 00000000..ad50a7c8
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/delayed_controls.cpp
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper to deal with controls that take effect with a delay
+ *
+ * Note: This has been forked from the libcamera core implementation.
+ */
+
+#include "delayed_controls.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/v4l2_device.h"
+
+/**
+ * \file delayed_controls.h
+ * \brief Helper to deal with controls that take effect with a delay
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(RPiDelayedControls)
+
+namespace RPi {
+
+/**
+ * \class DelayedControls
+ * \brief Helper to deal with controls that take effect with a delay
+ *
+ * Some sensor controls take effect with a delay as the sensor needs time to
+ * adjust, for example exposure and analog gain. This is a helper class to deal
+ * with such controls and the intended users are pipeline handlers.
+ *
+ * The idea is to extend the concept of the buffer depth of a pipeline the
+ * application needs to maintain to also cover controls. Just as with buffer
+ * depth if the application keeps the number of requests queued above the
+ * control depth the controls are guaranteed to take effect for the correct
+ * request. The control depth is determined by the control with the greatest
+ * delay.
+ */
+
+/**
+ * \struct DelayedControls::ControlParams
+ * \brief Parameters associated with controls handled by the \a DelayedControls
+ * helper class
+ *
+ * \var ControlParams::delay
+ * \brief Frame delay from setting the control on a sensor device to when it is
+ * consumed during framing.
+ *
+ * \var ControlParams::priorityWrite
+ * \brief Flag to indicate that this control must be applied ahead of, and
+ * separately from the other controls.
+ *
+ * Typically set for the \a V4L2_CID_VBLANK control so that the device driver
+ * does not reject \a V4L2_CID_EXPOSURE control values that may be outside of
+ * the existing vertical blanking specified bounds, but are within the new
+ * blanking bounds.
+ */
+
+/**
+ * \brief Construct a DelayedControls instance
+ * \param[in] device The V4L2 device the controls have to be applied to
+ * \param[in] controlParams Map of the numerical V4L2 control ids to their
+ * associated control parameters.
+ *
+ * The control parameters comprise of delays (in frames) and a priority write
+ * flag. If this flag is set, the relevant control is written separately from,
+ * and ahead of the rest of the batched controls.
+ *
+ * Only controls specified in \a controlParams are handled. If it's desired to
+ * mix delayed controls and controls that take effect immediately the immediate
+ * controls must be listed in the \a controlParams map with a delay value of 0.
+ */
+DelayedControls::DelayedControls(V4L2Device *device,
+ const std::unordered_map<uint32_t, ControlParams> &controlParams)
+ : device_(device), maxDelay_(0)
+{
+ const ControlInfoMap &controls = device_->controls();
+
+ /*
+ * Create a map of control ids to delays for controls exposed by the
+ * device.
+ */
+ for (auto const &param : controlParams) {
+ auto it = controls.find(param.first);
+ if (it == controls.end()) {
+ LOG(RPiDelayedControls, Error)
+ << "Delay request for control id "
+ << utils::hex(param.first)
+ << " but control is not exposed by device "
+ << device_->deviceNode();
+ continue;
+ }
+
+ const ControlId *id = it->first;
+
+ controlParams_[id] = param.second;
+
+ LOG(RPiDelayedControls, Debug)
+ << "Set a delay of " << controlParams_[id].delay
+ << " and priority write flag " << controlParams_[id].priorityWrite
+ << " for " << id->name();
+
+ maxDelay_ = std::max(maxDelay_, controlParams_[id].delay);
+ }
+
+ reset(0);
+}
+
+/**
+ * \brief Reset state machine
+ *
+ * Resets the state machine to a starting position based on control values
+ * retrieved from the device.
+ */
+void DelayedControls::reset(unsigned int cookie)
+{
+ queueCount_ = 1;
+ writeCount_ = 0;
+ cookies_[0] = cookie;
+
+ /* Retrieve control as reported by the device. */
+ std::vector<uint32_t> ids;
+ for (auto const &param : controlParams_)
+ ids.push_back(param.first->id());
+
+ ControlList controls = device_->getControls(ids);
+
+ /* Seed the control queue with the controls reported by the device. */
+ values_.clear();
+ for (const auto &ctrl : controls) {
+ const ControlId *id = device_->controls().idmap().at(ctrl.first);
+ /*
+ * Do not mark this control value as updated, it does not need
+ * to be written to to device on startup.
+ */
+ values_[id][0] = Info(ctrl.second, false);
+ }
+}
+
+/**
+ * \brief Push a set of controls on the queue
+ * \param[in] controls List of controls to add to the device queue
+ *
+ * Push a set of controls to the control queue. This increases the control queue
+ * depth by one.
+ *
+ * \returns true if \a controls are accepted, or false otherwise
+ */
+bool DelayedControls::push(const ControlList &controls, const unsigned int cookie)
+{
+ /* Copy state from previous frame. */
+ for (auto &ctrl : values_) {
+ Info &info = ctrl.second[queueCount_];
+ info = values_[ctrl.first][queueCount_ - 1];
+ info.updated = false;
+ }
+
+ /* Update with new controls. */
+ const ControlIdMap &idmap = device_->controls().idmap();
+ for (const auto &control : controls) {
+ const auto &it = idmap.find(control.first);
+ if (it == idmap.end()) {
+ LOG(RPiDelayedControls, Warning)
+ << "Unknown control " << control.first;
+ return false;
+ }
+
+ const ControlId *id = it->second;
+
+ if (controlParams_.find(id) == controlParams_.end())
+ return false;
+
+ Info &info = values_[id][queueCount_];
+
+ info = Info(control.second);
+
+ LOG(RPiDelayedControls, Debug)
+ << "Queuing " << id->name()
+ << " to " << info.toString()
+ << " at index " << queueCount_;
+ }
+
+ cookies_[queueCount_] = cookie;
+ queueCount_++;
+
+ return true;
+}
+
+/**
+ * \brief Read back controls in effect at a sequence number
+ * \param[in] sequence The sequence number to get controls for
+ *
+ * Read back what controls where in effect at a specific sequence number. The
+ * history is a ring buffer of 16 entries where new and old values coexist. It's
+ * the callers responsibility to not read too old sequence numbers that have been
+ * pushed out of the history.
+ *
+ * Historic values are evicted by pushing new values onto the queue using
+ * push(). The max history from the current sequence number that yields valid
+ * values are thus 16 minus number of controls pushed.
+ *
+ * \return The controls at \a sequence number
+ */
+std::pair<ControlList, unsigned int> DelayedControls::get(uint32_t sequence)
+{
+ unsigned int index = std::max<int>(0, sequence - maxDelay_);
+
+ ControlList out(device_->controls());
+ for (const auto &ctrl : values_) {
+ const ControlId *id = ctrl.first;
+ const Info &info = ctrl.second[index];
+
+ out.set(id->id(), info);
+
+ LOG(RPiDelayedControls, Debug)
+ << "Reading " << id->name()
+ << " to " << info.toString()
+ << " at index " << index;
+ }
+
+ return { out, cookies_[index] };
+}
+
+/**
+ * \brief Inform DelayedControls of the start of a new frame
+ * \param[in] sequence Sequence number of the frame that started
+ *
+ * Inform the state machine that a new frame has started and of its sequence
+ * number. Any user of these helpers is responsible to inform the helper about
+ * the start of any frame. This can be connected with ease to the start of a
+ * exposure (SOE) V4L2 event.
+ */
+void DelayedControls::applyControls(uint32_t sequence)
+{
+ LOG(RPiDelayedControls, Debug) << "frame " << sequence << " started";
+
+ /*
+ * Create control list peeking ahead in the value queue to ensure
+ * values are set in time to satisfy the sensor delay.
+ */
+ ControlList out(device_->controls());
+ for (auto &ctrl : values_) {
+ const ControlId *id = ctrl.first;
+ unsigned int delayDiff = maxDelay_ - controlParams_[id].delay;
+ unsigned int index = std::max<int>(0, writeCount_ - delayDiff);
+ Info &info = ctrl.second[index];
+
+ if (info.updated) {
+ if (controlParams_[id].priorityWrite) {
+ /*
+ * This control must be written now, it could
+ * affect validity of the other controls.
+ */
+ ControlList priority(device_->controls());
+ priority.set(id->id(), info);
+ device_->setControls(&priority);
+ } else {
+ /*
+ * Batch up the list of controls and write them
+ * at the end of the function.
+ */
+ out.set(id->id(), info);
+ }
+
+ LOG(RPiDelayedControls, Debug)
+ << "Setting " << id->name()
+ << " to " << info.toString()
+ << " at index " << index;
+
+ /* Done with this update, so mark as completed. */
+ info.updated = false;
+ }
+ }
+
+ writeCount_ = sequence + 1;
+
+ while (writeCount_ > queueCount_) {
+ LOG(RPiDelayedControls, Debug)
+ << "Queue is empty, auto queue no-op.";
+ push({}, cookies_[queueCount_ - 1]);
+ }
+
+ device_->setControls(&out);
+}
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/delayed_controls.h b/src/libcamera/pipeline/rpi/common/delayed_controls.h
new file mode 100644
index 00000000..487b0057
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/delayed_controls.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper to deal with controls that take effect with a delay
+ *
+ * Note: This has been forked from the libcamera core implementation.
+ */
+
+#pragma once
+
+#include <stdint.h>
+#include <unordered_map>
+#include <utility>
+
+#include <libcamera/controls.h>
+
+namespace libcamera {
+
+class V4L2Device;
+
+namespace RPi {
+
+class DelayedControls
+{
+public:
+ struct ControlParams {
+ unsigned int delay;
+ bool priorityWrite;
+ };
+
+ DelayedControls(V4L2Device *device,
+ const std::unordered_map<uint32_t, ControlParams> &controlParams);
+
+ void reset(unsigned int cookie);
+
+ bool push(const ControlList &controls, unsigned int cookie);
+ std::pair<ControlList, unsigned int> get(uint32_t sequence);
+
+ void applyControls(uint32_t sequence);
+
+private:
+ class Info : public ControlValue
+ {
+ public:
+ Info()
+ : updated(false)
+ {
+ }
+
+ Info(const ControlValue &v, bool updated_ = true)
+ : ControlValue(v), updated(updated_)
+ {
+ }
+
+ bool updated;
+ };
+
+ static constexpr int listSize = 16;
+ template<typename T>
+ class RingBuffer : public std::array<T, listSize>
+ {
+ public:
+ T &operator[](unsigned int index)
+ {
+ return std::array<T, listSize>::operator[](index % listSize);
+ }
+
+ const T &operator[](unsigned int index) const
+ {
+ return std::array<T, listSize>::operator[](index % listSize);
+ }
+ };
+
+ V4L2Device *device_;
+ std::unordered_map<const ControlId *, ControlParams> controlParams_;
+ unsigned int maxDelay_;
+
+ uint32_t queueCount_;
+ uint32_t writeCount_;
+ std::unordered_map<const ControlId *, RingBuffer<Info>> values_;
+ RingBuffer<unsigned int> cookies_;
+};
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/meson.build b/src/libcamera/pipeline/rpi/common/meson.build
new file mode 100644
index 00000000..b2b1a0a6
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'delayed_controls.cpp',
+ 'pipeline_base.cpp',
+ 'rpi_stream.cpp',
+])
diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.cpp b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
new file mode 100644
index 00000000..4b147fdb
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
@@ -0,0 +1,1528 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler base class for Raspberry Pi devices
+ */
+
+#include "pipeline_base.h"
+
+#include <chrono>
+
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/logging.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+using namespace RPi;
+
+LOG_DEFINE_CATEGORY(RPI)
+
+using StreamFlag = RPi::Stream::StreamFlag;
+
+namespace {
+
+constexpr unsigned int defaultRawBitDepth = 12;
+
+PixelFormat mbusCodeToPixelFormat(unsigned int code,
+ BayerFormat::Packing packingReq)
+{
+ BayerFormat bayer = BayerFormat::fromMbusCode(code);
+
+ ASSERT(bayer.isValid());
+
+ bayer.packing = packingReq;
+ PixelFormat pix = bayer.toPixelFormat();
+
+ /*
+ * Not all formats (e.g. 8-bit or 16-bit Bayer formats) can have packed
+ * variants. So if the PixelFormat returns as invalid, use the non-packed
+ * conversion instead.
+ */
+ if (!pix.isValid()) {
+ bayer.packing = BayerFormat::Packing::None;
+ pix = bayer.toPixelFormat();
+ }
+
+ return pix;
+}
+
+bool isMonoSensor(std::unique_ptr<CameraSensor> &sensor)
+{
+ unsigned int mbusCode = sensor->mbusCodes()[0];
+ const BayerFormat &bayer = BayerFormat::fromMbusCode(mbusCode);
+
+ return bayer.order == BayerFormat::Order::MONO;
+}
+
+const std::vector<ColorSpace> validColorSpaces = {
+ ColorSpace::Sycc,
+ ColorSpace::Smpte170m,
+ ColorSpace::Rec709
+};
+
+std::optional<ColorSpace> findValidColorSpace(const ColorSpace &colourSpace)
+{
+ for (auto cs : validColorSpaces) {
+ if (colourSpace.primaries == cs.primaries &&
+ colourSpace.transferFunction == cs.transferFunction)
+ return cs;
+ }
+
+ return std::nullopt;
+}
+
+} /* namespace */
+
+/*
+ * Raspberry Pi drivers expect the following colour spaces:
+ * - V4L2_COLORSPACE_RAW for raw streams.
+ * - One of V4L2_COLORSPACE_JPEG, V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_REC709 for
+ * non-raw streams. Other fields such as transfer function, YCbCr encoding and
+ * quantisation are not used.
+ *
+ * The libcamera colour spaces that we wish to use corresponding to these are therefore:
+ * - ColorSpace::Raw for V4L2_COLORSPACE_RAW
+ * - ColorSpace::Sycc for V4L2_COLORSPACE_JPEG
+ * - ColorSpace::Smpte170m for V4L2_COLORSPACE_SMPTE170M
+ * - ColorSpace::Rec709 for V4L2_COLORSPACE_REC709
+ */
+CameraConfiguration::Status RPiCameraConfiguration::validateColorSpaces([[maybe_unused]] ColorSpaceFlags flags)
+{
+ Status status = Valid;
+ yuvColorSpace_.reset();
+
+ for (auto &cfg : config_) {
+ /* First fix up raw streams to have the "raw" colour space. */
+ if (PipelineHandlerBase::isRaw(cfg.pixelFormat)) {
+ /* If there was no value here, that doesn't count as "adjusted". */
+ if (cfg.colorSpace && cfg.colorSpace != ColorSpace::Raw)
+ status = Adjusted;
+ cfg.colorSpace = ColorSpace::Raw;
+ continue;
+ }
+
+ /* Next we need to find our shared colour space. The first valid one will do. */
+ if (cfg.colorSpace && !yuvColorSpace_)
+ yuvColorSpace_ = findValidColorSpace(cfg.colorSpace.value());
+ }
+
+ /* If no colour space was given anywhere, choose sYCC. */
+ if (!yuvColorSpace_)
+ yuvColorSpace_ = ColorSpace::Sycc;
+
+ /* Note the version of this that any RGB streams will have to use. */
+ rgbColorSpace_ = yuvColorSpace_;
+ rgbColorSpace_->ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
+ rgbColorSpace_->range = ColorSpace::Range::Full;
+
+ /* Go through the streams again and force everyone to the same colour space. */
+ for (auto &cfg : config_) {
+ if (cfg.colorSpace == ColorSpace::Raw)
+ continue;
+
+ if (PipelineHandlerBase::isYuv(cfg.pixelFormat) && cfg.colorSpace != yuvColorSpace_) {
+ /* Again, no value means "not adjusted". */
+ if (cfg.colorSpace)
+ status = Adjusted;
+ cfg.colorSpace = yuvColorSpace_;
+ }
+ if (PipelineHandlerBase::isRgb(cfg.pixelFormat) && cfg.colorSpace != rgbColorSpace_) {
+ /* Be nice, and let the YUV version count as non-adjusted too. */
+ if (cfg.colorSpace && cfg.colorSpace != yuvColorSpace_)
+ status = Adjusted;
+ cfg.colorSpace = rgbColorSpace_;
+ }
+ }
+
+ return status;
+}
+
+CameraConfiguration::Status RPiCameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ /*
+ * Make sure that if a sensor configuration has been requested it
+ * is valid.
+ */
+ if (sensorConfig && !sensorConfig->isValid()) {
+ LOG(RPI, Error) << "Invalid sensor configuration request";
+ return Invalid;
+ }
+
+ status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
+
+ /*
+ * Validate the requested transform against the sensor capabilities and
+ * rotation and store the final combined transform that configure() will
+ * need to apply to the sensor to save us working it out again.
+ */
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->sensor_->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
+ status = Adjusted;
+
+ rawStreams_.clear();
+ outStreams_.clear();
+ unsigned int rawStreamIndex = 0;
+ unsigned int outStreamIndex = 0;
+
+ for (auto &cfg : config_) {
+ if (PipelineHandlerBase::isRaw(cfg.pixelFormat))
+ rawStreams_.emplace_back(rawStreamIndex++, &cfg);
+ else
+ outStreams_.emplace_back(outStreamIndex++, &cfg);
+ }
+
+ /* Sort the streams so the highest resolution is first. */
+ std::sort(rawStreams_.begin(), rawStreams_.end(),
+ [](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
+
+ std::sort(outStreams_.begin(), outStreams_.end(),
+ [](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
+
+ /* Compute the sensor's format then do any platform specific fixups. */
+ unsigned int bitDepth;
+ Size sensorSize;
+
+ if (sensorConfig) {
+ /* Use the application provided sensor configuration. */
+ bitDepth = sensorConfig->bitDepth;
+ sensorSize = sensorConfig->outputSize;
+ } else if (!rawStreams_.empty()) {
+ /* Use the RAW stream format and size. */
+ BayerFormat bayerFormat = BayerFormat::fromPixelFormat(rawStreams_[0].cfg->pixelFormat);
+ bitDepth = bayerFormat.bitDepth;
+ sensorSize = rawStreams_[0].cfg->size;
+ } else {
+ bitDepth = defaultRawBitDepth;
+ sensorSize = outStreams_[0].cfg->size;
+ }
+
+ sensorFormat_ = data_->findBestFormat(sensorSize, bitDepth);
+
+ /*
+ * If a sensor configuration has been requested, it should apply
+ * without modifications.
+ */
+ if (sensorConfig) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(sensorFormat_.code);
+
+ if (bayer.bitDepth != sensorConfig->bitDepth ||
+ sensorFormat_.size != sensorConfig->outputSize) {
+ LOG(RPI, Error) << "Invalid sensor configuration: "
+ << "bitDepth/size mismatch";
+ return Invalid;
+ }
+ }
+
+ /* Start with some initial generic RAW stream adjustments. */
+ for (auto &raw : rawStreams_) {
+ StreamConfiguration *rawStream = raw.cfg;
+
+ /*
+ * Some sensors change their Bayer order when they are
+ * h-flipped or v-flipped, according to the transform. Adjust
+ * the RAW stream to match the computed sensor format by
+ * applying the sensor Bayer order resulting from the transform
+ * to the user request.
+ */
+
+ BayerFormat cfgBayer = BayerFormat::fromPixelFormat(rawStream->pixelFormat);
+ cfgBayer.order = data_->sensor_->bayerOrder(combinedTransform_);
+
+ if (rawStream->pixelFormat != cfgBayer.toPixelFormat()) {
+ rawStream->pixelFormat = cfgBayer.toPixelFormat();
+ status = Adjusted;
+ }
+ }
+
+ /* Do any platform specific fixups. */
+ Status st = data_->platformValidate(this);
+ if (st == Invalid)
+ return Invalid;
+ else if (st == Adjusted)
+ status = Adjusted;
+
+ /* Further fixups on the RAW streams. */
+ for (auto &raw : rawStreams_) {
+ int ret = raw.dev->tryFormat(&raw.format);
+ if (ret)
+ return Invalid;
+
+ if (RPi::PipelineHandlerBase::updateStreamConfig(raw.cfg, raw.format))
+ status = Adjusted;
+ }
+
+ /* Further fixups on the ISP output streams. */
+ for (auto &out : outStreams_) {
+
+ /*
+ * We want to send the associated YCbCr info through to the driver.
+ *
+ * But for RGB streams, the YCbCr info gets overwritten on the way back
+ * so we must check against what the stream cfg says, not what we actually
+ * requested (which carefully included the YCbCr info)!
+ */
+ out.format.colorSpace = yuvColorSpace_;
+
+ LOG(RPI, Debug)
+ << "Try color space " << ColorSpace::toString(out.cfg->colorSpace);
+
+ int ret = out.dev->tryFormat(&out.format);
+ if (ret)
+ return Invalid;
+
+ if (RPi::PipelineHandlerBase::updateStreamConfig(out.cfg, out.format))
+ status = Adjusted;
+ }
+
+ return status;
+}
+
+bool PipelineHandlerBase::isRgb(const PixelFormat &pixFmt)
+{
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ return info.colourEncoding == PixelFormatInfo::ColourEncodingRGB;
+}
+
+bool PipelineHandlerBase::isYuv(const PixelFormat &pixFmt)
+{
+ /* The code below would return true for raw mono streams, so weed those out first. */
+ if (PipelineHandlerBase::isRaw(pixFmt))
+ return false;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ return info.colourEncoding == PixelFormatInfo::ColourEncodingYUV;
+}
+
+bool PipelineHandlerBase::isRaw(const PixelFormat &pixFmt)
+{
+ /* This test works for both Bayer and raw mono formats. */
+ return BayerFormat::fromPixelFormat(pixFmt).isValid();
+}
+
+/*
+ * Adjust a StreamConfiguration fields to match a video device format.
+ * Returns true if the StreamConfiguration has been adjusted.
+ */
+bool PipelineHandlerBase::updateStreamConfig(StreamConfiguration *stream,
+ const V4L2DeviceFormat &format)
+{
+ const PixelFormat &pixFormat = format.fourcc.toPixelFormat();
+ bool adjusted = false;
+
+ if (stream->pixelFormat != pixFormat || stream->size != format.size) {
+ stream->pixelFormat = pixFormat;
+ stream->size = format.size;
+ adjusted = true;
+ }
+
+ if (stream->colorSpace != format.colorSpace) {
+ stream->colorSpace = format.colorSpace;
+ adjusted = true;
+ LOG(RPI, Debug)
+ << "Color space changed from "
+ << ColorSpace::toString(stream->colorSpace) << " to "
+ << ColorSpace::toString(format.colorSpace);
+ }
+
+ stream->stride = format.planes[0].bpl;
+ stream->frameSize = format.planes[0].size;
+
+ return adjusted;
+}
+
+/*
+ * Populate and return a video device format using a StreamConfiguration. */
+V4L2DeviceFormat PipelineHandlerBase::toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const StreamConfiguration *stream)
+{
+ V4L2DeviceFormat deviceFormat;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(stream->pixelFormat);
+ deviceFormat.planesCount = info.numPlanes();
+ deviceFormat.fourcc = dev->toV4L2PixelFormat(stream->pixelFormat);
+ deviceFormat.size = stream->size;
+ deviceFormat.planes[0].bpl = stream->stride;
+ deviceFormat.colorSpace = stream->colorSpace;
+
+ return deviceFormat;
+}
+
+V4L2DeviceFormat PipelineHandlerBase::toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const V4L2SubdeviceFormat &format,
+ BayerFormat::Packing packingReq)
+{
+ unsigned int code = format.code;
+ const PixelFormat pix = mbusCodeToPixelFormat(code, packingReq);
+ V4L2DeviceFormat deviceFormat;
+
+ deviceFormat.fourcc = dev->toV4L2PixelFormat(pix);
+ deviceFormat.size = format.size;
+ deviceFormat.colorSpace = format.colorSpace;
+ return deviceFormat;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerBase::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
+{
+ CameraData *data = cameraData(camera);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<RPiCameraConfiguration>(data);
+ V4L2SubdeviceFormat sensorFormat;
+ unsigned int bufferCount;
+ PixelFormat pixelFormat;
+ V4L2VideoDevice::Formats fmts;
+ Size size;
+ std::optional<ColorSpace> colorSpace;
+
+ if (roles.empty())
+ return config;
+
+ Size sensorSize = data->sensor_->resolution();
+ for (const StreamRole role : roles) {
+ switch (role) {
+ case StreamRole::Raw:
+ size = sensorSize;
+ sensorFormat = data->findBestFormat(size, defaultRawBitDepth);
+ pixelFormat = mbusCodeToPixelFormat(sensorFormat.code,
+ BayerFormat::Packing::CSI2);
+ ASSERT(pixelFormat.isValid());
+ colorSpace = ColorSpace::Raw;
+ bufferCount = 2;
+ break;
+
+ case StreamRole::StillCapture:
+ fmts = data->ispFormats();
+ pixelFormat = formats::YUV420;
+ /*
+ * Still image codecs usually expect the sYCC color space.
+ * Even RGB codecs will be fine as the RGB we get with the
+ * sYCC color space is the same as sRGB.
+ */
+ colorSpace = ColorSpace::Sycc;
+ /* Return the largest sensor resolution. */
+ size = sensorSize;
+ bufferCount = 1;
+ break;
+
+ case StreamRole::VideoRecording:
+ /*
+ * The colour denoise algorithm requires the analysis
+ * image, produced by the second ISP output, to be in
+ * YUV420 format. Select this format as the default, to
+ * maximize chances that it will be picked by
+ * applications and enable usage of the colour denoise
+ * algorithm.
+ */
+ fmts = data->ispFormats();
+ pixelFormat = formats::YUV420;
+ /*
+ * Choose a color space appropriate for video recording.
+ * Rec.709 will be a good default for HD resolutions.
+ */
+ colorSpace = ColorSpace::Rec709;
+ size = { 1920, 1080 };
+ bufferCount = 4;
+ break;
+
+ case StreamRole::Viewfinder:
+ fmts = data->ispFormats();
+ pixelFormat = formats::XRGB8888;
+ colorSpace = ColorSpace::Sycc;
+ size = { 800, 600 };
+ bufferCount = 4;
+ break;
+
+ default:
+ LOG(RPI, Error) << "Requested stream role not supported: "
+ << role;
+ return nullptr;
+ }
+
+ std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
+ if (role == StreamRole::Raw) {
+ /* Translate the MBUS codes to a PixelFormat. */
+ for (const auto &format : data->sensorFormats_) {
+ PixelFormat pf = mbusCodeToPixelFormat(format.first,
+ BayerFormat::Packing::CSI2);
+ if (pf.isValid())
+ deviceFormats.emplace(std::piecewise_construct, std::forward_as_tuple(pf),
+ std::forward_as_tuple(format.second.begin(), format.second.end()));
+ }
+ } else {
+ /*
+ * Translate the V4L2PixelFormat to PixelFormat. Note that we
+ * limit the recommended largest ISP output size to match the
+ * sensor resolution.
+ */
+ for (const auto &format : fmts) {
+ PixelFormat pf = format.first.toPixelFormat();
+ /*
+ * Some V4L2 formats translate to the same pixel format (e.g. YU12, YM12
+ * both give YUV420). We must avoid duplicating the range in this case.
+ */
+ if (pf.isValid() && deviceFormats.find(pf) == deviceFormats.end()) {
+ const SizeRange &ispSizes = format.second[0];
+ deviceFormats[pf].emplace_back(ispSizes.min, sensorSize,
+ ispSizes.hStep, ispSizes.vStep);
+ }
+ }
+ }
+
+ /* Add the stream format based on the device node used for the use case. */
+ StreamFormats formats(deviceFormats);
+ StreamConfiguration cfg(formats);
+ cfg.size = size;
+ cfg.pixelFormat = pixelFormat;
+ cfg.colorSpace = colorSpace;
+ cfg.bufferCount = bufferCount;
+ config->addConfiguration(cfg);
+ }
+
+ return config;
+}
+
+int PipelineHandlerBase::configure(Camera *camera, CameraConfiguration *config)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ /* Start by freeing all buffers and reset the stream states. */
+ data->freeBuffers();
+ for (auto const stream : data->streams_)
+ stream->clearFlags(StreamFlag::External);
+
+ /*
+ * Apply the format on the sensor with any cached transform.
+ *
+ * If the application has provided a sensor configuration apply it
+ * instead of just applying a format.
+ */
+ RPiCameraConfiguration *rpiConfig = static_cast<RPiCameraConfiguration *>(config);
+ V4L2SubdeviceFormat *sensorFormat = &rpiConfig->sensorFormat_;
+
+ if (rpiConfig->sensorConfig) {
+ ret = data->sensor_->applyConfiguration(*rpiConfig->sensorConfig,
+ rpiConfig->combinedTransform_,
+ sensorFormat);
+ } else {
+ ret = data->sensor_->setFormat(sensorFormat,
+ rpiConfig->combinedTransform_);
+ }
+ if (ret)
+ return ret;
+
+ /*
+ * Platform specific internal stream configuration. This also assigns
+ * external streams which get configured below.
+ */
+ data->cropParams_.clear();
+ ret = data->platformConfigure(rpiConfig);
+ if (ret)
+ return ret;
+
+ ipa::RPi::ConfigResult result;
+ ret = data->configureIPA(config, &result);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to configure the IPA: " << ret;
+ return ret;
+ }
+
+ /*
+ * Update the ScalerCropMaximum to the correct value for this camera mode.
+ * For us, it's the same as the "analogue crop".
+ *
+ * \todo Make this property the ScalerCrop maximum value when dynamic
+ * controls are available and set it at validate() time
+ */
+ data->properties_.set(properties::ScalerCropMaximum, data->sensorInfo_.analogCrop);
+
+ /* Store the mode sensitivity for the application. */
+ data->properties_.set(properties::SensorSensitivity, result.modeSensitivity);
+
+ /* Update the controls that the Raspberry Pi IPA can handle. */
+ ControlInfoMap::Map ctrlMap;
+ for (auto const &c : result.controlInfo)
+ ctrlMap.emplace(c.first, c.second);
+
+ const auto cropParamsIt = data->cropParams_.find(0);
+ if (cropParamsIt != data->cropParams_.end()) {
+ const CameraData::CropParams &cropParams = cropParamsIt->second;
+ /*
+ * Add the ScalerCrop control limits based on the current mode and
+ * the first configured stream.
+ */
+ Rectangle ispMinCrop = data->scaleIspCrop(Rectangle(cropParams.ispMinCropSize));
+ ctrlMap[&controls::ScalerCrop] = ControlInfo(ispMinCrop, data->sensorInfo_.analogCrop,
+ data->scaleIspCrop(cropParams.ispCrop));
+ if (data->cropParams_.size() == 2) {
+ /*
+ * The control map for rpi::ScalerCrops has the min value
+ * as the default crop for stream 0, max value as the default
+ * value for stream 1.
+ */
+ ctrlMap[&controls::rpi::ScalerCrops] =
+ ControlInfo(data->scaleIspCrop(data->cropParams_.at(0).ispCrop),
+ data->scaleIspCrop(data->cropParams_.at(1).ispCrop),
+ ctrlMap[&controls::ScalerCrop].def());
+ }
+ }
+
+ data->controlInfo_ = ControlInfoMap(std::move(ctrlMap), result.controlInfo.idmap());
+
+ /* Setup the Video Mux/Bridge entities. */
+ for (auto &[device, link] : data->bridgeDevices_) {
+ /*
+ * Start by disabling all the sink pad links on the devices in the
+ * cascade, with the exception of the link connecting the device.
+ */
+ for (const MediaPad *p : device->entity()->pads()) {
+ if (!(p->flags() & MEDIA_PAD_FL_SINK))
+ continue;
+
+ for (MediaLink *l : p->links()) {
+ if (l != link)
+ l->setEnabled(false);
+ }
+ }
+
+ /*
+ * Next, enable the entity -> entity links, and setup the pad format.
+ *
+ * \todo Some bridge devices may chainge the media bus code, so we
+ * ought to read the source pad format and propagate it to the sink pad.
+ */
+ link->setEnabled(true);
+ const MediaPad *sinkPad = link->sink();
+ ret = device->setFormat(sinkPad->index(), sensorFormat);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on " << device->entity()->name()
+ << " pad " << sinkPad->index()
+ << " with format " << *sensorFormat
+ << ": " << ret;
+ return ret;
+ }
+
+ LOG(RPI, Debug) << "Configured media link on device " << device->entity()->name()
+ << " on pad " << sinkPad->index();
+ }
+
+ return 0;
+}
+
+int PipelineHandlerBase::exportFrameBuffers([[maybe_unused]] Camera *camera, libcamera::Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ RPi::Stream *s = static_cast<RPi::Stream *>(stream);
+ unsigned int count = stream->configuration().bufferCount;
+ int ret = s->dev()->exportBuffers(count, buffers);
+
+ s->setExportedBuffers(buffers);
+
+ return ret;
+}
+
+int PipelineHandlerBase::start(Camera *camera, const ControlList *controls)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ /* Check if a ScalerCrop control was specified. */
+ if (controls)
+ data->applyScalerCrop(*controls);
+
+ /* Start the IPA. */
+ ipa::RPi::StartResult result;
+ data->ipa_->start(controls ? *controls : ControlList{ controls::controls },
+ &result);
+
+ /* Apply any gain/exposure settings that the IPA may have passed back. */
+ if (!result.controls.empty())
+ data->setSensorControls(result.controls);
+
+ /* Configure the number of dropped frames required on startup. */
+ data->dropFrameCount_ = data->config_.disableStartupFrameDrops
+ ? 0 : result.dropFrameCount;
+
+ for (auto const stream : data->streams_)
+ stream->resetBuffers();
+
+ if (!data->buffersAllocated_) {
+ /* Allocate buffers for internal pipeline usage. */
+ ret = prepareBuffers(camera);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to allocate buffers";
+ data->freeBuffers();
+ stop(camera);
+ return ret;
+ }
+ data->buffersAllocated_ = true;
+ }
+
+ /* We need to set the dropFrameCount_ before queueing buffers. */
+ ret = queueAllBuffers(camera);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to queue buffers";
+ stop(camera);
+ return ret;
+ }
+
+ /*
+ * Reset the delayed controls with the gain and exposure values set by
+ * the IPA.
+ */
+ data->delayedCtrls_->reset(0);
+ data->state_ = CameraData::State::Idle;
+
+ /* Enable SOF event generation. */
+ data->frontendDevice()->setFrameStartEnabled(true);
+
+ data->platformStart();
+
+ /* Start all streams. */
+ for (auto const stream : data->streams_) {
+ ret = stream->dev()->streamOn();
+ if (ret) {
+ stop(camera);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void PipelineHandlerBase::stopDevice(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+
+ data->state_ = CameraData::State::Stopped;
+ data->platformStop();
+
+ for (auto const stream : data->streams_)
+ stream->dev()->streamOff();
+
+ /* Disable SOF event generation. */
+ data->frontendDevice()->setFrameStartEnabled(false);
+
+ data->clearIncompleteRequests();
+
+ /* Stop the IPA. */
+ data->ipa_->stop();
+}
+
+void PipelineHandlerBase::releaseDevice(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+ data->freeBuffers();
+}
+
+int PipelineHandlerBase::queueRequestDevice(Camera *camera, Request *request)
+{
+ CameraData *data = cameraData(camera);
+
+ if (!data->isRunning())
+ return -EINVAL;
+
+ LOG(RPI, Debug) << "queueRequestDevice: New request sequence: "
+ << request->sequence();
+
+ /* Push all buffers supplied in the Request to the respective streams. */
+ for (auto stream : data->streams_) {
+ if (!(stream->getFlags() & StreamFlag::External))
+ continue;
+
+ FrameBuffer *buffer = request->findBuffer(stream);
+ if (buffer && !stream->getBufferId(buffer)) {
+ /*
+ * This buffer is not recognised, so it must have been allocated
+ * outside the v4l2 device. Store it in the stream buffer list
+ * so we can track it.
+ */
+ stream->setExportedBuffer(buffer);
+ }
+
+ /*
+ * If no buffer is provided by the request for this stream, we
+ * queue a nullptr to the stream to signify that it must use an
+ * internally allocated buffer for this capture request. This
+ * buffer will not be given back to the application, but is used
+ * to support the internal pipeline flow.
+ *
+ * The below queueBuffer() call will do nothing if there are not
+ * enough internal buffers allocated, but this will be handled by
+ * queuing the request for buffers in the RPiStream object.
+ */
+ int ret = stream->queueBuffer(buffer);
+ if (ret)
+ return ret;
+ }
+
+ /* Push the request to the back of the queue. */
+ data->requestQueue_.push(request);
+ data->handleState();
+
+ return 0;
+}
+
+int PipelineHandlerBase::registerCamera(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *frontend, const std::string &frontendName,
+ MediaDevice *backend, MediaEntity *sensorEntity)
+{
+ CameraData *data = cameraData.get();
+ int ret;
+
+ data->sensor_ = CameraSensorFactoryBase::create(sensorEntity);
+ if (!data->sensor_)
+ return -EINVAL;
+
+ /* Populate the map of sensor supported formats and sizes. */
+ for (auto const mbusCode : data->sensor_->mbusCodes())
+ data->sensorFormats_.emplace(mbusCode,
+ data->sensor_->sizes(mbusCode));
+
+ /*
+ * Enumerate all the Video Mux/Bridge devices across the sensor -> Fr
+ * chain. There may be a cascade of devices in this chain!
+ */
+ MediaLink *link = sensorEntity->getPadByIndex(0)->links()[0];
+ data->enumerateVideoDevices(link, frontendName);
+
+ ipa::RPi::InitResult result;
+ if (data->loadIPA(&result)) {
+ LOG(RPI, Error) << "Failed to load a suitable IPA library";
+ return -EINVAL;
+ }
+
+ /*
+ * Setup our delayed control writer with the sensor default
+ * gain and exposure delays. Mark VBLANK for priority write.
+ */
+ const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays();
+ std::unordered_map<uint32_t, RPi::DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ { V4L2_CID_HBLANK, { delays.hblankDelay, false } },
+ { V4L2_CID_VBLANK, { delays.vblankDelay, true } }
+ };
+ data->delayedCtrls_ = std::make_unique<RPi::DelayedControls>(data->sensor_->device(), params);
+ data->sensorMetadata_ = result.sensorConfig.sensorMetadata;
+
+ /* Register initial controls that the Raspberry Pi IPA can handle. */
+ data->controlInfo_ = std::move(result.controlInfo);
+
+ /* Initialize the camera properties. */
+ data->properties_ = data->sensor_->properties();
+
+ /*
+ * The V4L2_CID_NOTIFY_GAINS control, if present, is used to inform the
+ * sensor of the colour gains. It is defined to be a linear gain where
+ * the default value represents a gain of exactly one.
+ */
+ auto it = data->sensor_->controls().find(V4L2_CID_NOTIFY_GAINS);
+ if (it != data->sensor_->controls().end())
+ data->notifyGainsUnity_ = it->second.def().get<int32_t>();
+
+ /*
+ * Set a default value for the ScalerCropMaximum property to show
+ * that we support its use, however, initialise it to zero because
+ * it's not meaningful until a camera mode has been chosen.
+ */
+ data->properties_.set(properties::ScalerCropMaximum, Rectangle{});
+
+ ret = platformRegister(cameraData, frontend, backend);
+ if (ret)
+ return ret;
+
+ ret = data->loadPipelineConfiguration();
+ if (ret) {
+ LOG(RPI, Error) << "Unable to load pipeline configuration";
+ return ret;
+ }
+
+ /* Setup the general IPA signal handlers. */
+ data->frontendDevice()->dequeueTimeout.connect(data, &RPi::CameraData::cameraTimeout);
+ data->frontendDevice()->frameStart.connect(data, &RPi::CameraData::frameStarted);
+ data->ipa_->setDelayedControls.connect(data, &CameraData::setDelayedControls);
+ data->ipa_->setLensControls.connect(data, &CameraData::setLensControls);
+ data->ipa_->metadataReady.connect(data, &CameraData::metadataReady);
+
+ return 0;
+}
+
+void PipelineHandlerBase::mapBuffers(Camera *camera, const BufferMap &buffers, unsigned int mask)
+{
+ CameraData *data = cameraData(camera);
+ std::vector<IPABuffer> bufferIds;
+ /*
+ * Link the FrameBuffers with the id (key value) in the map stored in
+ * the RPi stream object - along with an identifier mask.
+ *
+ * This will allow us to identify buffers passed between the pipeline
+ * handler and the IPA.
+ */
+ for (auto const &it : buffers) {
+ bufferIds.push_back(IPABuffer(mask | it.first,
+ it.second.buffer->planes()));
+ data->bufferIds_.insert(mask | it.first);
+ }
+
+ data->ipa_->mapBuffers(bufferIds);
+}
+
+int PipelineHandlerBase::queueAllBuffers(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ for (auto const stream : data->streams_) {
+ if (!(stream->getFlags() & StreamFlag::External)) {
+ ret = stream->queueAllBuffers();
+ if (ret < 0)
+ return ret;
+ } else {
+ /*
+ * For external streams, we must queue up a set of internal
+ * buffers to handle the number of drop frames requested by
+ * the IPA. This is done by passing nullptr in queueBuffer().
+ *
+ * The below queueBuffer() call will do nothing if there
+ * are not enough internal buffers allocated, but this will
+ * be handled by queuing the request for buffers in the
+ * RPiStream object.
+ */
+ unsigned int i;
+ for (i = 0; i < data->dropFrameCount_; i++) {
+ ret = stream->queueBuffer(nullptr);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+double CameraData::scoreFormat(double desired, double actual) const
+{
+ double score = desired - actual;
+ /* Smaller desired dimensions are preferred. */
+ if (score < 0.0)
+ score = (-score) / 8;
+ /* Penalise non-exact matches. */
+ if (actual != desired)
+ score *= 2;
+
+ return score;
+}
+
+V4L2SubdeviceFormat CameraData::findBestFormat(const Size &req, unsigned int bitDepth) const
+{
+ double bestScore = std::numeric_limits<double>::max(), score;
+ V4L2SubdeviceFormat bestFormat;
+ bestFormat.colorSpace = ColorSpace::Raw;
+
+ constexpr float penaltyAr = 1500.0;
+ constexpr float penaltyBitDepth = 500.0;
+
+ /* Calculate the closest/best mode from the user requested size. */
+ for (const auto &iter : sensorFormats_) {
+ const unsigned int mbusCode = iter.first;
+ const PixelFormat format = mbusCodeToPixelFormat(mbusCode,
+ BayerFormat::Packing::None);
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ for (const Size &size : iter.second) {
+ double reqAr = static_cast<double>(req.width) / req.height;
+ double fmtAr = static_cast<double>(size.width) / size.height;
+
+ /* Score the dimensions for closeness. */
+ score = scoreFormat(req.width, size.width);
+ score += scoreFormat(req.height, size.height);
+ score += penaltyAr * scoreFormat(reqAr, fmtAr);
+
+ /* Add any penalties... this is not an exact science! */
+ score += utils::abs_diff(info.bitsPerPixel, bitDepth) * penaltyBitDepth;
+
+ if (score <= bestScore) {
+ bestScore = score;
+ bestFormat.code = mbusCode;
+ bestFormat.size = size;
+ }
+
+ LOG(RPI, Debug) << "Format: " << size
+ << " fmt " << format
+ << " Score: " << score
+ << " (best " << bestScore << ")";
+ }
+ }
+
+ return bestFormat;
+}
+
+void CameraData::freeBuffers()
+{
+ if (ipa_) {
+ /*
+ * Copy the buffer ids from the unordered_set to a vector to
+ * pass to the IPA.
+ */
+ std::vector<unsigned int> bufferIds(bufferIds_.begin(),
+ bufferIds_.end());
+ ipa_->unmapBuffers(bufferIds);
+ bufferIds_.clear();
+ }
+
+ for (auto const stream : streams_)
+ stream->releaseBuffers();
+
+ platformFreeBuffers();
+
+ buffersAllocated_ = false;
+}
+
+/*
+ * enumerateVideoDevices() iterates over the Media Controller topology, starting
+ * at the sensor and finishing at the frontend. For each sensor, CameraData stores
+ * a unique list of any intermediate video mux or bridge devices connected in a
+ * cascade, together with the entity to entity link.
+ *
+ * Entity pad configuration and link enabling happens at the end of configure().
+ * We first disable all pad links on each entity device in the chain, and then
+ * selectively enabling the specific links to link sensor to the frontend across
+ * all intermediate muxes and bridges.
+ *
+ * In the cascaded topology below, if Sensor1 is used, the Mux2 -> Mux1 link
+ * will be disabled, and Sensor1 -> Mux1 -> Frontend links enabled. Alternatively,
+ * if Sensor3 is used, the Sensor2 -> Mux2 and Sensor1 -> Mux1 links are disabled,
+ * and Sensor3 -> Mux2 -> Mux1 -> Frontend links are enabled. All other links will
+ * remain unchanged.
+ *
+ * +----------+
+ * | FE |
+ * +-----^----+
+ * |
+ * +---+---+
+ * | Mux1 |<------+
+ * +--^---- |
+ * | |
+ * +-----+---+ +---+---+
+ * | Sensor1 | | Mux2 |<--+
+ * +---------+ +-^-----+ |
+ * | |
+ * +-------+-+ +---+-----+
+ * | Sensor2 | | Sensor3 |
+ * +---------+ +---------+
+ */
+void CameraData::enumerateVideoDevices(MediaLink *link, const std::string &frontend)
+{
+ const MediaPad *sinkPad = link->sink();
+ const MediaEntity *entity = sinkPad->entity();
+ bool frontendFound = false;
+
+ /* We only deal with Video Mux and Bridge devices in cascade. */
+ if (entity->function() != MEDIA_ENT_F_VID_MUX &&
+ entity->function() != MEDIA_ENT_F_VID_IF_BRIDGE)
+ return;
+
+ /* Find the source pad for this Video Mux or Bridge device. */
+ const MediaPad *sourcePad = nullptr;
+ for (const MediaPad *pad : entity->pads()) {
+ if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
+ /*
+ * We can only deal with devices that have a single source
+ * pad. If this device has multiple source pads, ignore it
+ * and this branch in the cascade.
+ */
+ if (sourcePad)
+ return;
+
+ sourcePad = pad;
+ }
+ }
+
+ LOG(RPI, Debug) << "Found video mux device " << entity->name()
+ << " linked to sink pad " << sinkPad->index();
+
+ bridgeDevices_.emplace_back(std::make_unique<V4L2Subdevice>(entity), link);
+ bridgeDevices_.back().first->open();
+
+ /*
+ * Iterate through all the sink pad links down the cascade to find any
+ * other Video Mux and Bridge devices.
+ */
+ for (MediaLink *l : sourcePad->links()) {
+ enumerateVideoDevices(l, frontend);
+ /* Once we reach the Frontend entity, we are done. */
+ if (l->sink()->entity()->name() == frontend) {
+ frontendFound = true;
+ break;
+ }
+ }
+
+ /* This identifies the end of our entity enumeration recursion. */
+ if (link->source()->entity()->function() == MEDIA_ENT_F_CAM_SENSOR) {
+ /*
+ * If the frontend is not at the end of this cascade, we cannot
+ * configure this topology automatically, so remove all entity
+ * references.
+ */
+ if (!frontendFound) {
+ LOG(RPI, Warning) << "Cannot automatically configure this MC topology!";
+ bridgeDevices_.clear();
+ }
+ }
+}
+
+int CameraData::loadPipelineConfiguration()
+{
+ config_ = {
+ .disableStartupFrameDrops = false,
+ .cameraTimeoutValue = 0,
+ };
+
+ /* Initial configuration of the platform, in case no config file is present */
+ platformPipelineConfigure({});
+
+ char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_CONFIG_FILE");
+ if (!configFromEnv || *configFromEnv == '\0')
+ return 0;
+
+ std::string filename = std::string(configFromEnv);
+ File file(filename);
+
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(RPI, Warning) << "Failed to open configuration file '" << filename << "'"
+ << ", using defaults";
+ return 0;
+ }
+
+ LOG(RPI, Info) << "Using configuration file '" << filename << "'";
+
+ std::unique_ptr<YamlObject> root = YamlParser::parse(file);
+ if (!root) {
+ LOG(RPI, Warning) << "Failed to parse configuration file, using defaults";
+ return 0;
+ }
+
+ std::optional<double> ver = (*root)["version"].get<double>();
+ if (!ver || *ver != 1.0) {
+ LOG(RPI, Warning) << "Unexpected configuration file version reported: "
+ << *ver;
+ return 0;
+ }
+
+ const YamlObject &phConfig = (*root)["pipeline_handler"];
+
+ config_.disableStartupFrameDrops =
+ phConfig["disable_startup_frame_drops"].get<bool>(config_.disableStartupFrameDrops);
+
+ config_.cameraTimeoutValue =
+ phConfig["camera_timeout_value_ms"].get<unsigned int>(config_.cameraTimeoutValue);
+
+ if (config_.cameraTimeoutValue) {
+ /* Disable the IPA signal to control timeout and set the user requested value. */
+ ipa_->setCameraTimeout.disconnect();
+ frontendDevice()->setDequeueTimeout(config_.cameraTimeoutValue * 1ms);
+ }
+
+ return platformPipelineConfigure(root);
+}
+
+int CameraData::loadIPA(ipa::RPi::InitResult *result)
+{
+ int ret;
+
+ ipa_ = IPAManager::createIPA<ipa::RPi::IPAProxyRPi>(pipe(), 1, 1);
+
+ if (!ipa_)
+ return -ENOENT;
+
+ /*
+ * The configuration (tuning file) is made from the sensor name unless
+ * the environment variable overrides it.
+ */
+ std::string configurationFile;
+ char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_TUNING_FILE");
+ if (!configFromEnv || *configFromEnv == '\0') {
+ std::string model = sensor_->model();
+ if (isMonoSensor(sensor_))
+ model += "_mono";
+ configurationFile = ipa_->configurationFile(model + ".json");
+ } else {
+ configurationFile = std::string(configFromEnv);
+ }
+
+ IPASettings settings(configurationFile, sensor_->model());
+ ipa::RPi::InitParams params;
+
+ ret = sensor_->sensorInfo(&params.sensorInfo);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to retrieve camera sensor info";
+ return ret;
+ }
+
+ params.lensPresent = !!sensor_->focusLens();
+ ret = platformInitIpa(params);
+ if (ret)
+ return ret;
+
+ return ipa_->init(settings, params, result);
+}
+
+int CameraData::configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result)
+{
+ ipa::RPi::ConfigParams params;
+ int ret;
+
+ params.sensorControls = sensor_->controls();
+ if (sensor_->focusLens())
+ params.lensControls = sensor_->focusLens()->controls();
+
+ ret = platformConfigureIpa(params);
+ if (ret)
+ return ret;
+
+ /* We store the IPACameraSensorInfo for digital zoom calculations. */
+ ret = sensor_->sensorInfo(&sensorInfo_);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to retrieve camera sensor info";
+ return ret;
+ }
+
+ /* Always send the user transform to the IPA. */
+ Transform transform = config->orientation / Orientation::Rotate0;
+ params.transform = static_cast<unsigned int>(transform);
+
+ /* Ready the IPA - it must know about the sensor resolution. */
+ ret = ipa_->configure(sensorInfo_, params, result);
+ if (ret < 0) {
+ LOG(RPI, Error) << "IPA configuration failed!";
+ return -EPIPE;
+ }
+
+ if (!result->sensorControls.empty())
+ setSensorControls(result->sensorControls);
+ if (!result->lensControls.empty())
+ setLensControls(result->lensControls);
+
+ return 0;
+}
+
+void CameraData::metadataReady(const ControlList &metadata)
+{
+ if (!isRunning())
+ return;
+
+ /* Add to the Request metadata buffer what the IPA has provided. */
+ /* Last thing to do is to fill up the request metadata. */
+ Request *request = requestQueue_.front();
+ request->metadata().merge(metadata);
+
+ /*
+ * Inform the sensor of the latest colour gains if it has the
+ * V4L2_CID_NOTIFY_GAINS control (which means notifyGainsUnity_ is set).
+ */
+ const auto &colourGains = metadata.get(libcamera::controls::ColourGains);
+ if (notifyGainsUnity_ && colourGains) {
+ /* The control wants linear gains in the order B, Gb, Gr, R. */
+ ControlList ctrls(sensor_->controls());
+ std::array<int32_t, 4> gains{
+ static_cast<int32_t>((*colourGains)[1] * *notifyGainsUnity_),
+ *notifyGainsUnity_,
+ *notifyGainsUnity_,
+ static_cast<int32_t>((*colourGains)[0] * *notifyGainsUnity_)
+ };
+ ctrls.set(V4L2_CID_NOTIFY_GAINS, Span<const int32_t>{ gains });
+
+ sensor_->setControls(&ctrls);
+ }
+}
+
+void CameraData::setDelayedControls(const ControlList &controls, uint32_t delayContext)
+{
+ if (!delayedCtrls_->push(controls, delayContext))
+ LOG(RPI, Error) << "V4L2 DelayedControl set failed";
+}
+
+void CameraData::setLensControls(const ControlList &controls)
+{
+ CameraLens *lens = sensor_->focusLens();
+
+ if (lens && controls.contains(V4L2_CID_FOCUS_ABSOLUTE)) {
+ ControlValue const &focusValue = controls.get(V4L2_CID_FOCUS_ABSOLUTE);
+ lens->setFocusPosition(focusValue.get<int32_t>());
+ }
+}
+
+void CameraData::setSensorControls(ControlList &controls)
+{
+ /*
+ * We need to ensure that if both VBLANK and EXPOSURE are present, the
+ * former must be written ahead of, and separately from EXPOSURE to avoid
+ * V4L2 rejecting the latter. This is identical to what DelayedControls
+ * does with the priority write flag.
+ *
+ * As a consequence of the below logic, VBLANK gets set twice, and we
+ * rely on the v4l2 framework to not pass the second control set to the
+ * driver as the actual control value has not changed.
+ */
+ if (controls.contains(V4L2_CID_EXPOSURE) && controls.contains(V4L2_CID_VBLANK)) {
+ ControlList vblank_ctrl;
+
+ vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
+ sensor_->setControls(&vblank_ctrl);
+ }
+
+ sensor_->setControls(&controls);
+}
+
+Rectangle CameraData::scaleIspCrop(const Rectangle &ispCrop) const
+{
+ /*
+ * Scale a crop rectangle defined in the ISP's coordinates into native sensor
+ * coordinates.
+ */
+ Rectangle nativeCrop = ispCrop.scaledBy(sensorInfo_.analogCrop.size(),
+ sensorInfo_.outputSize);
+ nativeCrop.translateBy(sensorInfo_.analogCrop.topLeft());
+ return nativeCrop;
+}
+
+void CameraData::applyScalerCrop(const ControlList &controls)
+{
+ const auto &scalerCropRPi = controls.get<Span<const Rectangle>>(controls::rpi::ScalerCrops);
+ const auto &scalerCropCore = controls.get<Rectangle>(controls::ScalerCrop);
+ std::vector<Rectangle> scalerCrops;
+
+ /*
+ * First thing to do is create a vector of crops to apply to each ISP output
+ * based on either controls::ScalerCrop or controls::rpi::ScalerCrops if
+ * present.
+ *
+ * If controls::rpi::ScalerCrops is preset, apply the given crops to the
+ * ISP output streams, indexed by the same order in which they had been
+ * configured. This is not the same as the ISP output index. Otherwise
+ * if controls::ScalerCrop is present, apply the same crop to all ISP
+ * output streams.
+ */
+ for (unsigned int i = 0; i < cropParams_.size(); i++) {
+ if (scalerCropRPi && i < scalerCropRPi->size())
+ scalerCrops.push_back(scalerCropRPi->data()[i]);
+ else if (scalerCropCore)
+ scalerCrops.push_back(*scalerCropCore);
+ }
+
+ for (auto const &[i, scalerCrop] : utils::enumerate(scalerCrops)) {
+ Rectangle nativeCrop = scalerCrop;
+
+ if (!nativeCrop.width || !nativeCrop.height)
+ nativeCrop = { 0, 0, 1, 1 };
+
+ /* Create a version of the crop scaled to ISP (camera mode) pixels. */
+ Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo_.analogCrop.topLeft());
+ ispCrop.scaleBy(sensorInfo_.outputSize, sensorInfo_.analogCrop.size());
+
+ /*
+ * The crop that we set must be:
+ * 1. At least as big as ispMinCropSize_, once that's been
+ * enlarged to the same aspect ratio.
+ * 2. With the same mid-point, if possible.
+ * 3. But it can't go outside the sensor area.
+ */
+ Size minSize = cropParams_.at(i).ispMinCropSize.expandedToAspectRatio(nativeCrop.size());
+ Size size = ispCrop.size().expandedTo(minSize);
+ ispCrop = size.centeredTo(ispCrop.center()).enclosedIn(Rectangle(sensorInfo_.outputSize));
+
+ if (ispCrop != cropParams_.at(i).ispCrop) {
+ cropParams_.at(i).ispCrop = ispCrop;
+ platformSetIspCrop(cropParams_.at(i).ispIndex, ispCrop);
+ }
+ }
+}
+
+void CameraData::cameraTimeout()
+{
+ LOG(RPI, Error) << "Camera frontend has timed out!";
+ LOG(RPI, Error) << "Please check that your camera sensor connector is attached securely.";
+ LOG(RPI, Error) << "Alternatively, try another cable and/or sensor.";
+
+ state_ = CameraData::State::Error;
+ platformStop();
+
+ /*
+ * To allow the application to attempt a recovery from this timeout,
+ * stop all devices streaming, and return any outstanding requests as
+ * incomplete and cancelled.
+ */
+ for (auto const stream : streams_)
+ stream->dev()->streamOff();
+
+ clearIncompleteRequests();
+}
+
+void CameraData::frameStarted(uint32_t sequence)
+{
+ LOG(RPI, Debug) << "Frame start " << sequence;
+
+ /* Write any controls for the next frame as soon as we can. */
+ delayedCtrls_->applyControls(sequence);
+}
+
+void CameraData::clearIncompleteRequests()
+{
+ /*
+ * All outstanding requests (and associated buffers) must be returned
+ * back to the application.
+ */
+ while (!requestQueue_.empty()) {
+ Request *request = requestQueue_.front();
+
+ for (auto &b : request->buffers()) {
+ FrameBuffer *buffer = b.second;
+ /*
+ * Has the buffer already been handed back to the
+ * request? If not, do so now.
+ */
+ if (buffer->request()) {
+ buffer->_d()->cancel();
+ pipe()->completeBuffer(request, buffer);
+ }
+ }
+
+ pipe()->completeRequest(request);
+ requestQueue_.pop();
+ }
+}
+
+void CameraData::handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream)
+{
+ /*
+ * It is possible to be here without a pending request, so check
+ * that we actually have one to action, otherwise we just return
+ * buffer back to the stream.
+ */
+ Request *request = requestQueue_.empty() ? nullptr : requestQueue_.front();
+ if (!dropFrameCount_ && request && request->findBuffer(stream) == buffer) {
+ /*
+ * Tag the buffer as completed, returning it to the
+ * application.
+ */
+ LOG(RPI, Debug) << "Completing request buffer for stream "
+ << stream->name();
+ pipe()->completeBuffer(request, buffer);
+ } else {
+ /*
+ * This buffer was not part of the Request (which happens if an
+ * internal buffer was used for an external stream, or
+ * unconditionally for internal streams), or there is no pending
+ * request, so we can recycle it.
+ */
+ LOG(RPI, Debug) << "Returning buffer to stream "
+ << stream->name();
+ stream->returnBuffer(buffer);
+ }
+}
+
+void CameraData::handleState()
+{
+ switch (state_) {
+ case State::Stopped:
+ case State::Busy:
+ case State::Error:
+ break;
+
+ case State::IpaComplete:
+ /* If the request is completed, we will switch to Idle state. */
+ checkRequestCompleted();
+ /*
+ * No break here, we want to try running the pipeline again.
+ * The fallthrough clause below suppresses compiler warnings.
+ */
+ [[fallthrough]];
+
+ case State::Idle:
+ tryRunPipeline();
+ break;
+ }
+}
+
+void CameraData::checkRequestCompleted()
+{
+ bool requestCompleted = false;
+ /*
+ * If we are dropping this frame, do not touch the request, simply
+ * change the state to IDLE when ready.
+ */
+ if (!dropFrameCount_) {
+ Request *request = requestQueue_.front();
+ if (request->hasPendingBuffers())
+ return;
+
+ /* Must wait for metadata to be filled in before completing. */
+ if (state_ != State::IpaComplete)
+ return;
+
+ LOG(RPI, Debug) << "Completing request sequence: "
+ << request->sequence();
+
+ pipe()->completeRequest(request);
+ requestQueue_.pop();
+ requestCompleted = true;
+ }
+
+ /*
+ * Make sure we have three outputs completed in the case of a dropped
+ * frame.
+ */
+ if (state_ == State::IpaComplete &&
+ ((ispOutputCount_ == ispOutputTotal_ && dropFrameCount_) ||
+ requestCompleted)) {
+ LOG(RPI, Debug) << "Going into Idle state";
+ state_ = State::Idle;
+ if (dropFrameCount_) {
+ dropFrameCount_--;
+ LOG(RPI, Debug) << "Dropping frame at the request of the IPA ("
+ << dropFrameCount_ << " left)";
+ }
+ }
+}
+
+void CameraData::fillRequestMetadata(const ControlList &bufferControls, Request *request)
+{
+ request->metadata().set(controls::SensorTimestamp,
+ bufferControls.get(controls::SensorTimestamp).value_or(0));
+
+ if (cropParams_.size()) {
+ std::vector<Rectangle> crops;
+
+ for (auto const &[k, v] : cropParams_)
+ crops.push_back(scaleIspCrop(v.ispCrop));
+
+ request->metadata().set(controls::ScalerCrop, crops[0]);
+ if (crops.size() > 1) {
+ request->metadata().set(controls::rpi::ScalerCrops,
+ Span<const Rectangle>(crops.data(), crops.size()));
+ }
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.h b/src/libcamera/pipeline/rpi/common/pipeline_base.h
new file mode 100644
index 00000000..aae0c2f3
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/pipeline_base.h
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler base class for Raspberry Pi devices
+ */
+
+#include <map>
+#include <memory>
+#include <optional>
+#include <queue>
+#include <string>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include <libcamera/controls.h>
+#include <libcamera/request.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/media_object.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include <libcamera/ipa/raspberrypi_ipa_interface.h>
+#include <libcamera/ipa/raspberrypi_ipa_proxy.h>
+
+#include "delayed_controls.h"
+#include "rpi_stream.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+namespace RPi {
+
+/* Map of mbus codes to supported sizes reported by the sensor. */
+using SensorFormats = std::map<unsigned int, std::vector<Size>>;
+
+class RPiCameraConfiguration;
+class CameraData : public Camera::Private
+{
+public:
+ CameraData(PipelineHandler *pipe)
+ : Camera::Private(pipe), state_(State::Stopped),
+ dropFrameCount_(0), buffersAllocated_(false),
+ ispOutputCount_(0), ispOutputTotal_(0)
+ {
+ }
+
+ virtual ~CameraData()
+ {
+ }
+
+ virtual CameraConfiguration::Status platformValidate(RPiCameraConfiguration *rpiConfig) const = 0;
+ virtual int platformConfigure(const RPiCameraConfiguration *rpiConfig) = 0;
+ virtual void platformStart() = 0;
+ virtual void platformStop() = 0;
+
+ double scoreFormat(double desired, double actual) const;
+ V4L2SubdeviceFormat findBestFormat(const Size &req, unsigned int bitDepth) const;
+
+ void freeBuffers();
+ virtual void platformFreeBuffers() = 0;
+
+ void enumerateVideoDevices(MediaLink *link, const std::string &frontend);
+
+ int loadPipelineConfiguration();
+ int loadIPA(ipa::RPi::InitResult *result);
+ int configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result);
+ virtual int platformInitIpa(ipa::RPi::InitParams &params) = 0;
+ virtual int platformConfigureIpa(ipa::RPi::ConfigParams &params) = 0;
+
+ void metadataReady(const ControlList &metadata);
+ void setDelayedControls(const ControlList &controls, uint32_t delayContext);
+ void setLensControls(const ControlList &controls);
+ void setSensorControls(ControlList &controls);
+
+ Rectangle scaleIspCrop(const Rectangle &ispCrop) const;
+ void applyScalerCrop(const ControlList &controls);
+ virtual void platformSetIspCrop(unsigned int index, const Rectangle &ispCrop) = 0;
+
+ void cameraTimeout();
+ void frameStarted(uint32_t sequence);
+
+ void clearIncompleteRequests();
+ void handleStreamBuffer(FrameBuffer *buffer, Stream *stream);
+ void handleState();
+
+ virtual V4L2VideoDevice::Formats ispFormats() const = 0;
+ virtual V4L2VideoDevice::Formats rawFormats() const = 0;
+ virtual V4L2VideoDevice *frontendDevice() = 0;
+
+ virtual int platformPipelineConfigure(const std::unique_ptr<YamlObject> &root) = 0;
+
+ std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
+
+ std::unique_ptr<CameraSensor> sensor_;
+ SensorFormats sensorFormats_;
+
+ /* The vector below is just for convenience when iterating over all streams. */
+ std::vector<Stream *> streams_;
+ /* Stores the ids of the buffers mapped in the IPA. */
+ std::unordered_set<unsigned int> bufferIds_;
+ /*
+ * Stores a cascade of Video Mux or Bridge devices between the sensor and
+ * Unicam together with media link across the entities.
+ */
+ std::vector<std::pair<std::unique_ptr<V4L2Subdevice>, MediaLink *>> bridgeDevices_;
+
+ std::unique_ptr<DelayedControls> delayedCtrls_;
+ bool sensorMetadata_;
+
+ /*
+ * All the functions in this class are called from a single calling
+ * thread. So, we do not need to have any mutex to protect access to any
+ * of the variables below.
+ */
+ enum class State { Stopped, Idle, Busy, IpaComplete, Error };
+ State state_;
+
+ bool isRunning()
+ {
+ return state_ != State::Stopped && state_ != State::Error;
+ }
+
+ std::queue<Request *> requestQueue_;
+
+ /* For handling digital zoom. */
+ IPACameraSensorInfo sensorInfo_;
+
+ struct CropParams {
+ CropParams(Rectangle ispCrop_, Size ispMinCropSize_, unsigned int ispIndex_)
+ : ispCrop(ispCrop_), ispMinCropSize(ispMinCropSize_), ispIndex(ispIndex_)
+ {
+ }
+
+ /* Crop in ISP (camera mode) pixels */
+ Rectangle ispCrop;
+ /* Minimum crop size in ISP output pixels */
+ Size ispMinCropSize;
+ /* Index of the ISP output channel for this crop */
+ unsigned int ispIndex;
+ };
+
+ /* Mapping of CropParams keyed by the output stream order in CameraConfiguration */
+ std::map<unsigned int, CropParams> cropParams_;
+
+ unsigned int dropFrameCount_;
+
+ /*
+ * If set, this stores the value that represets a gain of one for
+ * the V4L2_CID_NOTIFY_GAINS control.
+ */
+ std::optional<int32_t> notifyGainsUnity_;
+
+ /* Have internal buffers been allocated? */
+ bool buffersAllocated_;
+
+ struct Config {
+ /*
+ * Override any request from the IPA to drop a number of startup
+ * frames.
+ */
+ bool disableStartupFrameDrops;
+ /*
+ * Override the camera timeout value calculated by the IPA based
+ * on frame durations.
+ */
+ unsigned int cameraTimeoutValue;
+ };
+
+ Config config_;
+
+protected:
+ void fillRequestMetadata(const ControlList &bufferControls,
+ Request *request);
+
+ virtual void tryRunPipeline() = 0;
+
+ unsigned int ispOutputCount_;
+ unsigned int ispOutputTotal_;
+
+private:
+ void checkRequestCompleted();
+};
+
+class PipelineHandlerBase : public PipelineHandler
+{
+public:
+ PipelineHandlerBase(CameraManager *manager)
+ : PipelineHandler(manager)
+ {
+ }
+
+ virtual ~PipelineHandlerBase()
+ {
+ }
+
+ static bool isRgb(const PixelFormat &pixFmt);
+ static bool isYuv(const PixelFormat &pixFmt);
+ static bool isRaw(const PixelFormat &pixFmt);
+
+ static bool updateStreamConfig(StreamConfiguration *stream,
+ const V4L2DeviceFormat &format);
+ static V4L2DeviceFormat toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const StreamConfiguration *stream);
+ static V4L2DeviceFormat toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const V4L2SubdeviceFormat &format,
+ BayerFormat::Packing packingReq);
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, libcamera::Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+ void releaseDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+protected:
+ int registerCamera(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *frontent, const std::string &frontendName,
+ MediaDevice *backend, MediaEntity *sensorEntity);
+
+ void mapBuffers(Camera *camera, const BufferMap &buffers, unsigned int mask);
+
+ virtual int platformRegister(std::unique_ptr<CameraData> &cameraData,
+ MediaDevice *unicam, MediaDevice *isp) = 0;
+
+private:
+ CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<CameraData *>(camera->_d());
+ }
+
+ int queueAllBuffers(Camera *camera);
+ virtual int prepareBuffers(Camera *camera) = 0;
+};
+
+class RPiCameraConfiguration final : public CameraConfiguration
+{
+public:
+ RPiCameraConfiguration(const CameraData *data)
+ : CameraConfiguration(), data_(data)
+ {
+ }
+
+ CameraConfiguration::Status validateColorSpaces(ColorSpaceFlags flags);
+ Status validate() override;
+
+ /* Cache the combinedTransform_ that will be applied to the sensor */
+ Transform combinedTransform_;
+ /* The sensor format computed in validate() */
+ V4L2SubdeviceFormat sensorFormat_;
+
+ struct StreamParams {
+ StreamParams()
+ : index(0), cfg(nullptr), dev(nullptr)
+ {
+ }
+
+ StreamParams(unsigned int index_, StreamConfiguration *cfg_)
+ : index(index_), cfg(cfg_), dev(nullptr)
+ {
+ }
+
+ unsigned int index;
+ StreamConfiguration *cfg;
+ V4L2VideoDevice *dev;
+ V4L2DeviceFormat format;
+ };
+
+ std::vector<StreamParams> rawStreams_;
+ std::vector<StreamParams> outStreams_;
+
+ /*
+ * Store the colour spaces that all our streams will have. RGB format streams
+ * will have the same colorspace as YUV streams, with YCbCr field cleared and
+ * range set to full.
+ */
+ std::optional<ColorSpace> yuvColorSpace_;
+ std::optional<ColorSpace> rgbColorSpace_;
+
+private:
+ const CameraData *data_;
+};
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/rpi_stream.cpp b/src/libcamera/pipeline/rpi/common/rpi_stream.cpp
new file mode 100644
index 00000000..accf59eb
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/rpi_stream.cpp
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Raspberry Pi device stream abstraction class.
+ */
+#include "rpi_stream.h"
+
+#include <algorithm>
+#include <tuple>
+#include <utility>
+
+#include <libcamera/base/log.h>
+
+/* Maximum number of buffer slots to allocate in the V4L2 device driver. */
+static constexpr unsigned int maxV4L2BufferCount = 32;
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(RPISTREAM)
+
+namespace RPi {
+
+const BufferObject Stream::errorBufferObject{ nullptr, false };
+
+void Stream::setFlags(StreamFlags flags)
+{
+ /* We don't want dynamic mmapping. */
+ ASSERT(!(flags & StreamFlag::RequiresMmap));
+
+ flags_ |= flags;
+
+ /* Import streams cannot be external. */
+ ASSERT(!(flags_ & StreamFlag::External) || !(flags_ & StreamFlag::ImportOnly));
+}
+
+void Stream::clearFlags(StreamFlags flags)
+{
+ /* We don't want dynamic mmapping. */
+ ASSERT(!(flags & StreamFlag::RequiresMmap));
+
+ flags_ &= ~flags;
+}
+
+RPi::Stream::StreamFlags Stream::getFlags() const
+{
+ return flags_;
+}
+
+V4L2VideoDevice *Stream::dev() const
+{
+ return dev_.get();
+}
+
+const std::string &Stream::name() const
+{
+ return name_;
+}
+
+unsigned int Stream::swDownscale() const
+{
+ return swDownscale_;
+}
+
+void Stream::setSwDownscale(unsigned int swDownscale)
+{
+ swDownscale_ = swDownscale;
+}
+
+void Stream::resetBuffers()
+{
+ /* Add all internal buffers to the queue of usable buffers. */
+ availableBuffers_ = {};
+ for (auto const &buffer : internalBuffers_)
+ availableBuffers_.push(buffer.get());
+}
+
+void Stream::setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ for (auto const &buffer : *buffers)
+ bufferEmplace(++id_, buffer.get());
+}
+
+const BufferMap &Stream::getBuffers() const
+{
+ return bufferMap_;
+}
+
+unsigned int Stream::getBufferId(FrameBuffer *buffer) const
+{
+ if (flags_ & StreamFlag::ImportOnly)
+ return 0;
+
+ /* Find the buffer in the map, and return the buffer id. */
+ auto it = std::find_if(bufferMap_.begin(), bufferMap_.end(),
+ [&buffer](auto const &p) { return p.second.buffer == buffer; });
+
+ if (it == bufferMap_.end())
+ return 0;
+
+ return it->first;
+}
+
+void Stream::setExportedBuffer(FrameBuffer *buffer)
+{
+ bufferEmplace(++id_, buffer);
+}
+
+int Stream::prepareBuffers(unsigned int count)
+{
+ int ret;
+
+ if (!(flags_ & StreamFlag::ImportOnly)) {
+ /* Export some frame buffers for internal use. */
+ ret = dev_->exportBuffers(count, &internalBuffers_);
+ if (ret < 0)
+ return ret;
+
+ /* Add these exported buffers to the internal/external buffer list. */
+ setExportedBuffers(&internalBuffers_);
+ resetBuffers();
+ }
+
+ return dev_->importBuffers(maxV4L2BufferCount);
+}
+
+int Stream::queueBuffer(FrameBuffer *buffer)
+{
+ /*
+ * A nullptr buffer implies an external stream, but no external
+ * buffer has been supplied in the Request. So, pick one from the
+ * availableBuffers_ queue.
+ */
+ if (!buffer) {
+ if (availableBuffers_.empty()) {
+ LOG(RPISTREAM, Debug) << "No buffers available for "
+ << name_;
+ /*
+ * Note that we need to queue an internal buffer as soon
+ * as one becomes available.
+ */
+ requestBuffers_.push(nullptr);
+ return 0;
+ }
+
+ buffer = availableBuffers_.front();
+ availableBuffers_.pop();
+ }
+
+ /*
+ * If no earlier requests are pending to be queued we can go ahead and
+ * queue this buffer into the device.
+ */
+ if (requestBuffers_.empty())
+ return queueToDevice(buffer);
+
+ /*
+ * There are earlier Request buffers to be queued, so this buffer must go
+ * on the waiting list.
+ */
+ requestBuffers_.push(buffer);
+
+ return 0;
+}
+
+void Stream::returnBuffer(FrameBuffer *buffer)
+{
+ if (!(flags_ & StreamFlag::External) && !(flags_ & StreamFlag::Recurrent)) {
+ /* For internal buffers, simply requeue back to the device. */
+ queueToDevice(buffer);
+ return;
+ }
+
+ /* Push this buffer back into the queue to be used again. */
+ availableBuffers_.push(buffer);
+
+ /*
+ * Do we have any Request buffers that are waiting to be queued?
+ * If so, do it now as availableBuffers_ will not be empty.
+ */
+ while (!requestBuffers_.empty()) {
+ FrameBuffer *requestBuffer = requestBuffers_.front();
+
+ if (!requestBuffer) {
+ /*
+ * We want to queue an internal buffer, but none
+ * are available. Can't do anything, quit the loop.
+ */
+ if (availableBuffers_.empty())
+ break;
+
+ /*
+ * We want to queue an internal buffer, and at least one
+ * is available.
+ */
+ requestBuffer = availableBuffers_.front();
+ availableBuffers_.pop();
+ }
+
+ requestBuffers_.pop();
+ queueToDevice(requestBuffer);
+ }
+}
+
+const BufferObject &Stream::getBuffer(unsigned int id)
+{
+ auto const &it = bufferMap_.find(id);
+ if (it == bufferMap_.end())
+ return errorBufferObject;
+
+ return it->second;
+}
+
+const BufferObject &Stream::acquireBuffer()
+{
+ /* No id provided, so pick up the next available buffer if possible. */
+ if (availableBuffers_.empty())
+ return errorBufferObject;
+
+ unsigned int id = getBufferId(availableBuffers_.front());
+ availableBuffers_.pop();
+
+ return getBuffer(id);
+}
+
+int Stream::queueAllBuffers()
+{
+ int ret;
+
+ if ((flags_ & StreamFlag::External) || (flags_ & StreamFlag::Recurrent))
+ return 0;
+
+ while (!availableBuffers_.empty()) {
+ ret = queueBuffer(availableBuffers_.front());
+ if (ret < 0)
+ return ret;
+
+ availableBuffers_.pop();
+ }
+
+ return 0;
+}
+
+void Stream::releaseBuffers()
+{
+ dev_->releaseBuffers();
+ clearBuffers();
+}
+
+void Stream::bufferEmplace(unsigned int id, FrameBuffer *buffer)
+{
+ if (flags_ & StreamFlag::RequiresMmap)
+ bufferMap_.emplace(std::piecewise_construct, std::forward_as_tuple(id),
+ std::forward_as_tuple(buffer, true));
+ else
+ bufferMap_.emplace(std::piecewise_construct, std::forward_as_tuple(id),
+ std::forward_as_tuple(buffer, false));
+}
+
+void Stream::clearBuffers()
+{
+ availableBuffers_ = std::queue<FrameBuffer *>{};
+ requestBuffers_ = std::queue<FrameBuffer *>{};
+ internalBuffers_.clear();
+ bufferMap_.clear();
+ id_ = 0;
+}
+
+int Stream::queueToDevice(FrameBuffer *buffer)
+{
+ LOG(RPISTREAM, Debug) << "Queuing buffer " << getBufferId(buffer)
+ << " for " << name_;
+
+ int ret = dev_->queueBuffer(buffer);
+ if (ret)
+ LOG(RPISTREAM, Error) << "Failed to queue buffer for "
+ << name_;
+ return ret;
+}
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/rpi_stream.h b/src/libcamera/pipeline/rpi/common/rpi_stream.h
new file mode 100644
index 00000000..a13d5dc0
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/rpi_stream.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Raspberry Pi device stream abstraction class.
+ */
+
+#pragma once
+
+#include <optional>
+#include <queue>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include <libcamera/base/flags.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace libcamera {
+
+namespace RPi {
+
+enum BufferMask {
+ MaskID = 0x00ffff,
+ MaskStats = 0x010000,
+ MaskEmbeddedData = 0x020000,
+ MaskBayerData = 0x040000,
+};
+
+struct BufferObject {
+ BufferObject(FrameBuffer *b, bool requiresMmap)
+ : buffer(b), mapped(std::nullopt)
+ {
+ if (requiresMmap)
+ mapped = std::make_optional<MappedFrameBuffer>
+ (b, MappedFrameBuffer::MapFlag::ReadWrite);
+ }
+
+ FrameBuffer *buffer;
+ std::optional<MappedFrameBuffer> mapped;
+};
+
+using BufferMap = std::unordered_map<unsigned int, BufferObject>;
+
+/*
+ * Device stream abstraction for either an internal or external stream.
+ * Used for both Unicam and the ISP.
+ */
+class Stream : public libcamera::Stream
+{
+public:
+ enum class StreamFlag {
+ None = 0,
+ /*
+ * Indicates that this stream only imports buffers, e.g. the ISP
+ * input stream.
+ */
+ ImportOnly = (1 << 0),
+ /*
+ * Indicates that this stream is active externally, i.e. the
+ * buffers might be provided by (and returned to) the application.
+ */
+ External = (1 << 1),
+ /*
+ * Indicates that the stream buffers need to be mmaped and returned
+ * to the pipeline handler when requested.
+ */
+ RequiresMmap = (1 << 2),
+ /*
+ * Indicates a stream that needs buffers recycled every frame internally
+ * in the pipeline handler, e.g. stitch, TDN, config. All buffer
+ * management will be handled by the pipeline handler.
+ */
+ Recurrent = (1 << 3),
+ /*
+ * Indicates that the output stream needs a software format conversion
+ * to be applied after ISP processing.
+ */
+ Needs32bitConv = (1 << 4),
+ };
+
+ using StreamFlags = Flags<StreamFlag>;
+
+ Stream()
+ : flags_(StreamFlag::None), id_(0), swDownscale_(0)
+ {
+ }
+
+ Stream(const char *name, MediaEntity *dev, StreamFlags flags = StreamFlag::None)
+ : flags_(flags), name_(name),
+ dev_(std::make_unique<V4L2VideoDevice>(dev)), id_(0),
+ swDownscale_(0)
+ {
+ }
+
+ void setFlags(StreamFlags flags);
+ void clearFlags(StreamFlags flags);
+ StreamFlags getFlags() const;
+
+ V4L2VideoDevice *dev() const;
+ const std::string &name() const;
+ void resetBuffers();
+
+ unsigned int swDownscale() const;
+ void setSwDownscale(unsigned int swDownscale);
+
+ void setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers);
+ const BufferMap &getBuffers() const;
+ unsigned int getBufferId(FrameBuffer *buffer) const;
+
+ void setExportedBuffer(FrameBuffer *buffer);
+
+ int prepareBuffers(unsigned int count);
+ int queueBuffer(FrameBuffer *buffer);
+ void returnBuffer(FrameBuffer *buffer);
+
+ const BufferObject &getBuffer(unsigned int id);
+ const BufferObject &acquireBuffer();
+
+ int queueAllBuffers();
+ void releaseBuffers();
+
+ /* For error handling. */
+ static const BufferObject errorBufferObject;
+
+private:
+ void bufferEmplace(unsigned int id, FrameBuffer *buffer);
+ void clearBuffers();
+ int queueToDevice(FrameBuffer *buffer);
+
+ StreamFlags flags_;
+
+ /* Stream name identifier. */
+ std::string name_;
+
+ /* The actual device stream. */
+ std::unique_ptr<V4L2VideoDevice> dev_;
+
+ /* Tracks a unique id key for the bufferMap_ */
+ unsigned int id_;
+
+ /* Power of 2 greater than one if software downscaling will be required. */
+ unsigned int swDownscale_;
+
+ /* All frame buffers associated with this device stream. */
+ BufferMap bufferMap_;
+
+ /*
+ * List of frame buffers that we can use if none have been provided by
+ * the application for external streams. This is populated by the
+ * buffers exported internally.
+ */
+ std::queue<FrameBuffer *> availableBuffers_;
+
+ /*
+ * List of frame buffers that are to be queued into the device from a Request.
+ * A nullptr indicates any internal buffer can be used (from availableBuffers_),
+ * whereas a valid pointer indicates an external buffer to be queued.
+ *
+ * Ordering buffers to be queued is important here as it must match the
+ * requests coming from the application.
+ */
+ std::queue<FrameBuffer *> requestBuffers_;
+
+ /*
+ * This is a list of buffers exported internally. Need to keep this around
+ * as the stream needs to maintain ownership of these buffers.
+ */
+ std::vector<std::unique_ptr<FrameBuffer>> internalBuffers_;
+};
+
+/*
+ * The following class is just a convenient (and typesafe) array of device
+ * streams indexed with an enum class.
+ */
+template<typename E, std::size_t N>
+class Device : public std::array<class Stream, N>
+{
+public:
+ Stream &operator[](E e)
+ {
+ return std::array<class Stream, N>::operator[](utils::to_underlying(e));
+ }
+ const Stream &operator[](E e) const
+ {
+ return std::array<class Stream, N>::operator[](utils::to_underlying(e));
+ }
+};
+
+} /* namespace RPi */
+
+LIBCAMERA_FLAGS_ENABLE_OPERATORS(RPi::Stream::StreamFlag)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/meson.build b/src/libcamera/pipeline/rpi/meson.build
new file mode 100644
index 00000000..2391b6a9
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: CC0-1.0
+
+subdir('common')
+
+foreach pipeline : pipelines
+ pipeline = pipeline.split('/')
+ if pipeline.length() < 2 or pipeline[0] != 'rpi'
+ continue
+ endif
+
+ subdir(pipeline[1])
+endforeach
diff --git a/src/libcamera/pipeline/rpi/vc4/data/example.yaml b/src/libcamera/pipeline/rpi/vc4/data/example.yaml
new file mode 100644
index 00000000..b8e01ade
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/data/example.yaml
@@ -0,0 +1,46 @@
+{
+ "version": 1.0,
+ "target": "bcm2835",
+
+ "pipeline_handler":
+ {
+ # The minimum number of internal buffers to be allocated for
+ # Unicam. This value must be greater than 0, but less than or
+ # equal to min_total_unicam_buffers.
+ #
+ # A larger number of internal buffers can reduce the occurrence
+ # of frame drops during high CPU loads, but might also cause
+ # additional latency in the system.
+ #
+ # Note that the pipeline handler might override this value and
+ # not allocate any internal buffers if it knows they will never
+ # be used. For example if the RAW stream is marked as mandatory
+ # and there are no dropped frames signalled for algorithm
+ # convergence.
+ #
+ # "min_unicam_buffers": 2,
+
+ # The minimum total (internal + external) buffer count used for
+ # Unicam. The number of internal buffers allocated for Unicam is
+ # given by:
+ #
+ # internal buffer count = max(min_unicam_buffers,
+ # min_total_unicam_buffers - external buffer count)
+ #
+ # "min_total_unicam_buffers": 4,
+
+ # Override any request from the IPA to drop a number of startup
+ # frames.
+ #
+ # "disable_startup_frame_drops": false,
+
+ # Custom timeout value (in ms) for camera to use. This overrides
+ # the value computed by the pipeline handler based on frame
+ # durations.
+ #
+ # Set this value to 0 to use the pipeline handler computed
+ # timeout value.
+ #
+ # "camera_timeout_value_ms": 0,
+ }
+}
diff --git a/src/libcamera/pipeline/rpi/vc4/data/meson.build b/src/libcamera/pipeline/rpi/vc4/data/meson.build
new file mode 100644
index 00000000..179feebc
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/data/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'example.yaml',
+])
+
+install_data(conf_files,
+ install_dir : pipeline_data_dir / 'rpi' / 'vc4',
+ install_tag : 'runtime')
diff --git a/src/libcamera/pipeline/rpi/vc4/meson.build b/src/libcamera/pipeline/rpi/vc4/meson.build
new file mode 100644
index 00000000..9b37c2f0
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'vc4.cpp',
+])
+
+subdir('data')
diff --git a/src/libcamera/pipeline/rpi/vc4/vc4.cpp b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
new file mode 100644
index 00000000..fd8d84b1
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
@@ -0,0 +1,1030 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler for VC4-based Raspberry Pi devices
+ */
+
+#include <linux/bcm2835-isp.h>
+#include <linux/v4l2-controls.h>
+#include <linux/videodev2.h>
+
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/dma_buf_allocator.h"
+
+#include "../common/pipeline_base.h"
+#include "../common/rpi_stream.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(RPI)
+
+using StreamFlag = RPi::Stream::StreamFlag;
+using StreamParams = RPi::RPiCameraConfiguration::StreamParams;
+
+namespace {
+
+enum class Unicam : unsigned int { Image, Embedded };
+enum class Isp : unsigned int { Input, Output0, Output1, Stats };
+
+} /* namespace */
+
+class Vc4CameraData final : public RPi::CameraData
+{
+public:
+ Vc4CameraData(PipelineHandler *pipe)
+ : RPi::CameraData(pipe)
+ {
+ }
+
+ ~Vc4CameraData()
+ {
+ freeBuffers();
+ }
+
+ V4L2VideoDevice::Formats ispFormats() const override
+ {
+ return isp_[Isp::Output0].dev()->formats();
+ }
+
+ V4L2VideoDevice::Formats rawFormats() const override
+ {
+ return unicam_[Unicam::Image].dev()->formats();
+ }
+
+ V4L2VideoDevice *frontendDevice() override
+ {
+ return unicam_[Unicam::Image].dev();
+ }
+
+ void platformFreeBuffers() override
+ {
+ }
+
+ CameraConfiguration::Status platformValidate(RPi::RPiCameraConfiguration *rpiConfig) const override;
+
+ int platformPipelineConfigure(const std::unique_ptr<YamlObject> &root) override;
+
+ void platformStart() override;
+ void platformStop() override;
+
+ void unicamBufferDequeue(FrameBuffer *buffer);
+ void ispInputDequeue(FrameBuffer *buffer);
+ void ispOutputDequeue(FrameBuffer *buffer);
+
+ void processStatsComplete(const ipa::RPi::BufferIds &buffers);
+ void prepareIspComplete(const ipa::RPi::BufferIds &buffers, bool stitchSwapBuffers);
+ void setIspControls(const ControlList &controls);
+ void setCameraTimeout(uint32_t maxFrameLengthMs);
+
+ /* Array of Unicam and ISP device streams and associated buffers/streams. */
+ RPi::Device<Unicam, 2> unicam_;
+ RPi::Device<Isp, 4> isp_;
+
+ /* DMAHEAP allocation helper. */
+ DmaBufAllocator dmaHeap_;
+ SharedFD lsTable_;
+
+ struct Config {
+ /*
+ * The minimum number of internal buffers to be allocated for
+ * the Unicam Image stream.
+ */
+ unsigned int minUnicamBuffers;
+ /*
+ * The minimum total (internal + external) buffer count used for
+ * the Unicam Image stream.
+ *
+ * Note that:
+ * minTotalUnicamBuffers must be >= 1, and
+ * minTotalUnicamBuffers >= minUnicamBuffers
+ */
+ unsigned int minTotalUnicamBuffers;
+ };
+
+ Config config_;
+
+private:
+ void platformSetIspCrop([[maybe_unused]] unsigned int index, const Rectangle &ispCrop) override
+ {
+ Rectangle crop = ispCrop;
+ isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop);
+ }
+
+ int platformConfigure(const RPi::RPiCameraConfiguration *rpiConfig) override;
+ int platformConfigureIpa(ipa::RPi::ConfigParams &params) override;
+
+ int platformInitIpa([[maybe_unused]] ipa::RPi::InitParams &params) override
+ {
+ return 0;
+ }
+
+ struct BayerFrame {
+ FrameBuffer *buffer;
+ ControlList controls;
+ unsigned int delayContext;
+ };
+
+ void tryRunPipeline() override;
+ bool findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer);
+
+ std::queue<BayerFrame> bayerQueue_;
+ std::queue<FrameBuffer *> embeddedQueue_;
+};
+
+class PipelineHandlerVc4 : public RPi::PipelineHandlerBase
+{
+public:
+ PipelineHandlerVc4(CameraManager *manager)
+ : RPi::PipelineHandlerBase(manager)
+ {
+ }
+
+ ~PipelineHandlerVc4()
+ {
+ }
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ Vc4CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<Vc4CameraData *>(camera->_d());
+ }
+
+ int prepareBuffers(Camera *camera) override;
+ int platformRegister(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *unicam, MediaDevice *isp) override;
+};
+
+bool PipelineHandlerVc4::match(DeviceEnumerator *enumerator)
+{
+ constexpr unsigned int numUnicamDevices = 2;
+
+ /*
+ * Loop over all Unicam instances, but return out once a match is found.
+ * This is to ensure we correctly enumrate the camera when an instance
+ * of Unicam has registered with media controller, but has not registered
+ * device nodes due to a sensor subdevice failure.
+ */
+ for (unsigned int i = 0; i < numUnicamDevices; i++) {
+ DeviceMatch unicam("unicam");
+ MediaDevice *unicamDevice = acquireMediaDevice(enumerator, unicam);
+
+ if (!unicamDevice) {
+ LOG(RPI, Debug) << "Unable to acquire a Unicam instance";
+ continue;
+ }
+
+ DeviceMatch isp("bcm2835-isp");
+ MediaDevice *ispDevice = acquireMediaDevice(enumerator, isp);
+
+ if (!ispDevice) {
+ LOG(RPI, Debug) << "Unable to acquire ISP instance";
+ continue;
+ }
+
+ /*
+ * The loop below is used to register multiple cameras behind one or more
+ * video mux devices that are attached to a particular Unicam instance.
+ * Obviously these cameras cannot be used simultaneously.
+ */
+ unsigned int numCameras = 0;
+ for (MediaEntity *entity : unicamDevice->entities()) {
+ if (entity->function() != MEDIA_ENT_F_CAM_SENSOR)
+ continue;
+
+ std::unique_ptr<RPi::CameraData> cameraData = std::make_unique<Vc4CameraData>(this);
+ int ret = RPi::PipelineHandlerBase::registerCamera(cameraData,
+ unicamDevice, "unicam-image",
+ ispDevice, entity);
+ if (ret)
+ LOG(RPI, Error) << "Failed to register camera "
+ << entity->name() << ": " << ret;
+ else
+ numCameras++;
+ }
+
+ if (numCameras)
+ return true;
+ }
+
+ return false;
+}
+
+int PipelineHandlerVc4::prepareBuffers(Camera *camera)
+{
+ Vc4CameraData *data = cameraData(camera);
+ unsigned int numRawBuffers = 0;
+ int ret;
+
+ for (Stream *s : camera->streams()) {
+ if (BayerFormat::fromPixelFormat(s->configuration().pixelFormat).isValid()) {
+ numRawBuffers = s->configuration().bufferCount;
+ break;
+ }
+ }
+
+ /* Decide how many internal buffers to allocate. */
+ for (auto const stream : data->streams_) {
+ unsigned int numBuffers;
+ /*
+ * For Unicam, allocate a minimum number of buffers for internal
+ * use as we want to avoid any frame drops.
+ */
+ const unsigned int minBuffers = data->config_.minTotalUnicamBuffers;
+ if (stream == &data->unicam_[Unicam::Image]) {
+ /*
+ * If an application has configured a RAW stream, allocate
+ * additional buffers to make up the minimum, but ensure
+ * we have at least minUnicamBuffers of internal buffers
+ * to use to minimise frame drops.
+ */
+ numBuffers = std::max<int>(data->config_.minUnicamBuffers,
+ minBuffers - numRawBuffers);
+ } else if (stream == &data->isp_[Isp::Input]) {
+ /*
+ * ISP input buffers are imported from Unicam, so follow
+ * similar logic as above to count all the RAW buffers
+ * available.
+ */
+ numBuffers = numRawBuffers +
+ std::max<int>(data->config_.minUnicamBuffers,
+ minBuffers - numRawBuffers);
+
+ } else if (stream == &data->unicam_[Unicam::Embedded]) {
+ /*
+ * Embedded data buffers are (currently) for internal use, and
+ * are small enough (typically 1-2KB) that we can
+ * allocate them generously to avoid causing problems in the
+ * IPA when we cannot supply the metadata.
+ *
+ * 12 are allocated as a typical application will have 8-10
+ * input buffers, so allocating more embedded buffers than that
+ * is a sensible choice.
+ *
+ * The lifetimes of these buffers are smaller than those of the
+ * raw buffers, so allocating a fixed number will still suffice
+ * if the application requests a greater number of raw
+ * buffers, as these will be recycled quicker.
+ */
+ numBuffers = 12;
+ } else {
+ /*
+ * Since the ISP runs synchronous with the IPA and requests,
+ * we only ever need one set of internal buffers. Any buffers
+ * the application wants to hold onto will already be exported
+ * through PipelineHandlerRPi::exportFrameBuffers().
+ */
+ numBuffers = 1;
+ }
+
+ LOG(RPI, Debug) << "Preparing " << numBuffers
+ << " buffers for stream " << stream->name();
+
+ ret = stream->prepareBuffers(numBuffers);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Pass the stats and embedded data buffers to the IPA. No other
+ * buffers need to be passed.
+ */
+ mapBuffers(camera, data->isp_[Isp::Stats].getBuffers(), RPi::MaskStats);
+ if (data->sensorMetadata_)
+ mapBuffers(camera, data->unicam_[Unicam::Embedded].getBuffers(),
+ RPi::MaskEmbeddedData);
+
+ return 0;
+}
+
+int PipelineHandlerVc4::platformRegister(std::unique_ptr<RPi::CameraData> &cameraData, MediaDevice *unicam, MediaDevice *isp)
+{
+ Vc4CameraData *data = static_cast<Vc4CameraData *>(cameraData.get());
+
+ if (!data->dmaHeap_.isValid())
+ return -ENOMEM;
+
+ MediaEntity *unicamImage = unicam->getEntityByName("unicam-image");
+ MediaEntity *ispOutput0 = isp->getEntityByName("bcm2835-isp0-output0");
+ MediaEntity *ispCapture1 = isp->getEntityByName("bcm2835-isp0-capture1");
+ MediaEntity *ispCapture2 = isp->getEntityByName("bcm2835-isp0-capture2");
+ MediaEntity *ispCapture3 = isp->getEntityByName("bcm2835-isp0-capture3");
+
+ if (!unicamImage || !ispOutput0 || !ispCapture1 || !ispCapture2 || !ispCapture3)
+ return -ENOENT;
+
+ /* Locate and open the unicam video streams. */
+ data->unicam_[Unicam::Image] = RPi::Stream("Unicam Image", unicamImage);
+
+ /* An embedded data node will not be present if the sensor does not support it. */
+ MediaEntity *unicamEmbedded = unicam->getEntityByName("unicam-embedded");
+ if (unicamEmbedded) {
+ data->unicam_[Unicam::Embedded] = RPi::Stream("Unicam Embedded", unicamEmbedded);
+ data->unicam_[Unicam::Embedded].dev()->bufferReady.connect(data,
+ &Vc4CameraData::unicamBufferDequeue);
+ }
+
+ /* Tag the ISP input stream as an import stream. */
+ data->isp_[Isp::Input] = RPi::Stream("ISP Input", ispOutput0, StreamFlag::ImportOnly);
+ data->isp_[Isp::Output0] = RPi::Stream("ISP Output0", ispCapture1);
+ data->isp_[Isp::Output1] = RPi::Stream("ISP Output1", ispCapture2);
+ data->isp_[Isp::Stats] = RPi::Stream("ISP Stats", ispCapture3);
+
+ /* Wire up all the buffer connections. */
+ data->unicam_[Unicam::Image].dev()->bufferReady.connect(data, &Vc4CameraData::unicamBufferDequeue);
+ data->isp_[Isp::Input].dev()->bufferReady.connect(data, &Vc4CameraData::ispInputDequeue);
+ data->isp_[Isp::Output0].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+ data->isp_[Isp::Output1].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+ data->isp_[Isp::Stats].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+
+ if (data->sensorMetadata_ ^ !!data->unicam_[Unicam::Embedded].dev()) {
+ LOG(RPI, Warning) << "Mismatch between Unicam and CamHelper for embedded data usage!";
+ data->sensorMetadata_ = false;
+ if (data->unicam_[Unicam::Embedded].dev())
+ data->unicam_[Unicam::Embedded].dev()->bufferReady.disconnect();
+ }
+
+ /*
+ * Open all Unicam and ISP streams. The exception is the embedded data
+ * stream, which only gets opened below if the IPA reports that the sensor
+ * supports embedded data.
+ *
+ * The below grouping is just for convenience so that we can easily
+ * iterate over all streams in one go.
+ */
+ data->streams_.push_back(&data->unicam_[Unicam::Image]);
+ if (data->sensorMetadata_)
+ data->streams_.push_back(&data->unicam_[Unicam::Embedded]);
+
+ for (auto &stream : data->isp_)
+ data->streams_.push_back(&stream);
+
+ for (auto stream : data->streams_) {
+ int ret = stream->dev()->open();
+ if (ret)
+ return ret;
+ }
+
+ if (!data->unicam_[Unicam::Image].dev()->caps().hasMediaController()) {
+ LOG(RPI, Error) << "Unicam driver does not use the MediaController, please update your kernel!";
+ return -EINVAL;
+ }
+
+ /* Write up all the IPA connections. */
+ data->ipa_->processStatsComplete.connect(data, &Vc4CameraData::processStatsComplete);
+ data->ipa_->prepareIspComplete.connect(data, &Vc4CameraData::prepareIspComplete);
+ data->ipa_->setIspControls.connect(data, &Vc4CameraData::setIspControls);
+ data->ipa_->setCameraTimeout.connect(data, &Vc4CameraData::setCameraTimeout);
+
+ /*
+ * List the available streams an application may request. At present, we
+ * do not advertise Unicam Embedded and ISP Statistics streams, as there
+ * is no mechanism for the application to request non-image buffer formats.
+ */
+ std::set<Stream *> streams;
+ streams.insert(&data->unicam_[Unicam::Image]);
+ streams.insert(&data->isp_[Isp::Output0]);
+ streams.insert(&data->isp_[Isp::Output1]);
+
+ /* Create and register the camera. */
+ const std::string &id = data->sensor_->id();
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(cameraData), id, streams);
+ PipelineHandler::registerCamera(std::move(camera));
+
+ LOG(RPI, Info) << "Registered camera " << id
+ << " to Unicam device " << unicam->deviceNode()
+ << " and ISP device " << isp->deviceNode();
+
+ return 0;
+}
+
+CameraConfiguration::Status Vc4CameraData::platformValidate(RPi::RPiCameraConfiguration *rpiConfig) const
+{
+ std::vector<StreamParams> &rawStreams = rpiConfig->rawStreams_;
+ std::vector<StreamParams> &outStreams = rpiConfig->outStreams_;
+
+ CameraConfiguration::Status status = CameraConfiguration::Status::Valid;
+
+ /* Can only output 1 RAW stream, or 2 YUV/RGB streams. */
+ if (rawStreams.size() > 1 || outStreams.size() > 2) {
+ LOG(RPI, Error) << "Invalid number of streams requested";
+ return CameraConfiguration::Status::Invalid;
+ }
+
+ if (!rawStreams.empty()) {
+ rawStreams[0].dev = unicam_[Unicam::Image].dev();
+
+ /* Adjust the RAW stream to match the computed sensor format. */
+ StreamConfiguration *rawStream = rawStreams[0].cfg;
+ BayerFormat rawBayer = BayerFormat::fromPixelFormat(rawStream->pixelFormat);
+
+ /* Apply the sensor bitdepth. */
+ rawBayer.bitDepth = BayerFormat::fromMbusCode(rpiConfig->sensorFormat_.code).bitDepth;
+
+ /* Default to CSI2 packing if the user request is unsupported. */
+ if (rawBayer.packing != BayerFormat::Packing::CSI2 &&
+ rawBayer.packing != BayerFormat::Packing::None)
+ rawBayer.packing = BayerFormat::Packing::CSI2;
+
+ PixelFormat rawFormat = rawBayer.toPixelFormat();
+
+ /*
+ * Try for an unpacked format if a packed one wasn't available.
+ * This catches 8 (and 16) bit formats which would otherwise
+ * fail.
+ */
+ if (!rawFormat.isValid() && rawBayer.packing != BayerFormat::Packing::None) {
+ rawBayer.packing = BayerFormat::Packing::None;
+ rawFormat = rawBayer.toPixelFormat();
+ }
+
+ if (rawStream->pixelFormat != rawFormat ||
+ rawStream->size != rpiConfig->sensorFormat_.size) {
+ rawStream->pixelFormat = rawFormat;
+ rawStream->size = rpiConfig->sensorFormat_.size;
+
+ status = CameraConfiguration::Adjusted;
+ }
+
+ rawStreams[0].format =
+ RPi::PipelineHandlerBase::toV4L2DeviceFormat(unicam_[Unicam::Image].dev(), rawStream);
+ }
+
+ /*
+ * For the two ISP outputs, one stream must be equal or smaller than the
+ * other in all dimensions.
+ *
+ * Index 0 contains the largest requested resolution.
+ */
+ for (unsigned int i = 0; i < outStreams.size(); i++) {
+ Size size;
+
+ /*
+ * \todo Should we warn if upscaling, as it reduces the image
+ * quality and is usually undesired ?
+ */
+
+ size.width = std::min(outStreams[i].cfg->size.width,
+ outStreams[0].cfg->size.width);
+ size.height = std::min(outStreams[i].cfg->size.height,
+ outStreams[0].cfg->size.height);
+
+ if (outStreams[i].cfg->size != size) {
+ outStreams[i].cfg->size = size;
+ status = CameraConfiguration::Status::Adjusted;
+ }
+
+ /*
+ * Output 0 must be for the largest resolution. We will
+ * have that fixed up in the code above.
+ */
+ outStreams[i].dev = isp_[i == 0 ? Isp::Output0 : Isp::Output1].dev();
+
+ outStreams[i].format = RPi::PipelineHandlerBase::toV4L2DeviceFormat(outStreams[i].dev, outStreams[i].cfg);
+ }
+
+ return status;
+}
+
+int Vc4CameraData::platformPipelineConfigure(const std::unique_ptr<YamlObject> &root)
+{
+ config_ = {
+ .minUnicamBuffers = 2,
+ .minTotalUnicamBuffers = 4,
+ };
+
+ if (!root)
+ return 0;
+
+ std::optional<double> ver = (*root)["version"].get<double>();
+ if (!ver || *ver != 1.0) {
+ LOG(RPI, Error) << "Unexpected configuration file version reported";
+ return -EINVAL;
+ }
+
+ std::optional<std::string> target = (*root)["target"].get<std::string>();
+ if (!target || *target != "bcm2835") {
+ LOG(RPI, Error) << "Unexpected target reported: expected \"bcm2835\", got "
+ << *target;
+ return -EINVAL;
+ }
+
+ const YamlObject &phConfig = (*root)["pipeline_handler"];
+ config_.minUnicamBuffers =
+ phConfig["min_unicam_buffers"].get<unsigned int>(config_.minUnicamBuffers);
+ config_.minTotalUnicamBuffers =
+ phConfig["min_total_unicam_buffers"].get<unsigned int>(config_.minTotalUnicamBuffers);
+
+ if (config_.minTotalUnicamBuffers < config_.minUnicamBuffers) {
+ LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= min_unicam_buffers";
+ return -EINVAL;
+ }
+
+ if (config_.minTotalUnicamBuffers < 1) {
+ LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= 1";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int Vc4CameraData::platformConfigure(const RPi::RPiCameraConfiguration *rpiConfig)
+{
+ const std::vector<StreamParams> &rawStreams = rpiConfig->rawStreams_;
+ const std::vector<StreamParams> &outStreams = rpiConfig->outStreams_;
+ int ret;
+
+ V4L2VideoDevice *unicam = unicam_[Unicam::Image].dev();
+ V4L2DeviceFormat unicamFormat;
+
+ /*
+ * See which streams are requested, and route the user
+ * StreamConfiguration appropriately.
+ */
+ if (!rawStreams.empty()) {
+ rawStreams[0].cfg->setStream(&unicam_[Unicam::Image]);
+ unicam_[Unicam::Image].setFlags(StreamFlag::External);
+ unicamFormat = rawStreams[0].format;
+ } else {
+ unicamFormat =
+ RPi::PipelineHandlerBase::toV4L2DeviceFormat(unicam,
+ rpiConfig->sensorFormat_,
+ BayerFormat::Packing::CSI2);
+ }
+
+ ret = unicam->setFormat(&unicamFormat);
+ if (ret)
+ return ret;
+
+ ret = isp_[Isp::Input].dev()->setFormat(&unicamFormat);
+ if (ret)
+ return ret;
+
+ LOG(RPI, Info) << "Sensor: " << sensor_->id()
+ << " - Selected sensor format: " << rpiConfig->sensorFormat_
+ << " - Selected unicam format: " << unicamFormat;
+
+ /* Use a sensible small default size if no output streams are configured. */
+ Size maxSize = outStreams.empty() ? Size(320, 240) : outStreams[0].cfg->size;
+ V4L2DeviceFormat format;
+
+ for (unsigned int i = 0; i < outStreams.size(); i++) {
+ StreamConfiguration *cfg = outStreams[i].cfg;
+
+ /* The largest resolution gets routed to the ISP Output 0 node. */
+ RPi::Stream *stream = i == 0 ? &isp_[Isp::Output0] : &isp_[Isp::Output1];
+ format = outStreams[i].format;
+
+ LOG(RPI, Debug) << "Setting " << stream->name() << " to "
+ << format;
+
+ ret = stream->dev()->setFormat(&format);
+ if (ret)
+ return -EINVAL;
+
+ LOG(RPI, Debug)
+ << "Stream " << stream->name() << " has color space "
+ << ColorSpace::toString(cfg->colorSpace);
+
+ cfg->setStream(stream);
+ stream->setFlags(StreamFlag::External);
+ }
+
+ ispOutputTotal_ = outStreams.size();
+
+ /*
+ * If ISP::Output0 stream has not been configured by the application,
+ * we must allow the hardware to generate an output so that the data
+ * flow in the pipeline handler remains consistent, and we still generate
+ * statistics for the IPA to use. So enable the output at a very low
+ * resolution for internal use.
+ *
+ * \todo Allow the pipeline to work correctly without Output0 and only
+ * statistics coming from the hardware.
+ */
+ if (outStreams.empty()) {
+ V4L2VideoDevice *dev = isp_[Isp::Output0].dev();
+
+ format = {};
+ format.size = maxSize;
+ format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
+ /* No one asked for output, so the color space doesn't matter. */
+ format.colorSpace = ColorSpace::Sycc;
+ ret = dev->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error)
+ << "Failed to set default format on ISP Output0: "
+ << ret;
+ return -EINVAL;
+ }
+
+ ispOutputTotal_++;
+
+ LOG(RPI, Debug) << "Defaulting ISP Output0 format to "
+ << format;
+ }
+
+ /*
+ * If ISP::Output1 stream has not been requested by the application, we
+ * set it up for internal use now. This second stream will be used for
+ * fast colour denoise, and must be a quarter resolution of the ISP::Output0
+ * stream. However, also limit the maximum size to 1200 pixels in the
+ * larger dimension, just to avoid being wasteful with buffer allocations
+ * and memory bandwidth.
+ *
+ * \todo If Output 1 format is not YUV420, Output 1 ought to be disabled as
+ * colour denoise will not run.
+ */
+ if (outStreams.size() <= 1) {
+ V4L2VideoDevice *dev = isp_[Isp::Output1].dev();
+
+ V4L2DeviceFormat output1Format;
+ constexpr Size maxDimensions(1200, 1200);
+ const Size limit = maxDimensions.boundedToAspectRatio(format.size);
+
+ output1Format.size = (format.size / 2).boundedTo(limit).alignedDownTo(2, 2);
+ output1Format.colorSpace = format.colorSpace;
+ output1Format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
+
+ LOG(RPI, Debug) << "Setting ISP Output1 (internal) to "
+ << output1Format;
+
+ ret = dev->setFormat(&output1Format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on ISP Output1: "
+ << ret;
+ return -EINVAL;
+ }
+
+ ispOutputTotal_++;
+ }
+
+ /* ISP statistics output format. */
+ format = {};
+ format.fourcc = V4L2PixelFormat(V4L2_META_FMT_BCM2835_ISP_STATS);
+ ret = isp_[Isp::Stats].dev()->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on ISP stats stream: "
+ << format;
+ return ret;
+ }
+
+ ispOutputTotal_++;
+
+ /*
+ * Configure the Unicam embedded data output format only if the sensor
+ * supports it.
+ */
+ if (sensorMetadata_) {
+ V4L2SubdeviceFormat embeddedFormat;
+
+ sensor_->device()->getFormat(1, &embeddedFormat);
+ format = {};
+ format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
+ format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
+
+ LOG(RPI, Debug) << "Setting embedded data format " << format.toString();
+ ret = unicam_[Unicam::Embedded].dev()->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on Unicam embedded: "
+ << format;
+ return ret;
+ }
+ }
+
+ /* Figure out the smallest selection the ISP will allow. */
+ Rectangle testCrop(0, 0, 1, 1);
+ isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &testCrop);
+
+ /* Adjust aspect ratio by providing crops on the input image. */
+ Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
+ Rectangle ispCrop = size.centeredTo(Rectangle(unicamFormat.size).center());
+
+ platformSetIspCrop(0, ispCrop);
+ /*
+ * Set the scaler crop to the value we are using (scaled to native sensor
+ * coordinates).
+ */
+ cropParams_.emplace(std::piecewise_construct,
+ std::forward_as_tuple(0),
+ std::forward_as_tuple(ispCrop, testCrop.size(), 0));
+
+ return 0;
+}
+
+int Vc4CameraData::platformConfigureIpa(ipa::RPi::ConfigParams &params)
+{
+ params.ispControls = isp_[Isp::Input].dev()->controls();
+
+ /* Allocate the lens shading table via dmaHeap and pass to the IPA. */
+ if (!lsTable_.isValid()) {
+ lsTable_ = SharedFD(dmaHeap_.alloc("ls_grid", ipa::RPi::MaxLsGridSize));
+ if (!lsTable_.isValid())
+ return -ENOMEM;
+
+ /* Allow the IPA to mmap the LS table via the file descriptor. */
+ /*
+ * \todo Investigate if mapping the lens shading table buffer
+ * could be handled with mapBuffers().
+ */
+ params.lsTableHandle = lsTable_;
+ }
+
+ return 0;
+}
+
+void Vc4CameraData::platformStart()
+{
+}
+
+void Vc4CameraData::platformStop()
+{
+ bayerQueue_ = {};
+ embeddedQueue_ = {};
+}
+
+void Vc4CameraData::unicamBufferDequeue(FrameBuffer *buffer)
+{
+ RPi::Stream *stream = nullptr;
+ unsigned int index;
+
+ if (!isRunning())
+ return;
+
+ for (RPi::Stream &s : unicam_) {
+ index = s.getBufferId(buffer);
+ if (index) {
+ stream = &s;
+ break;
+ }
+ }
+
+ /* The buffer must belong to one of our streams. */
+ ASSERT(stream);
+
+ LOG(RPI, Debug) << "Stream " << stream->name() << " buffer dequeue"
+ << ", buffer id " << index
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ if (stream == &unicam_[Unicam::Image]) {
+ /*
+ * Lookup the sensor controls used for this frame sequence from
+ * DelayedControl and queue them along with the frame buffer.
+ */
+ auto [ctrl, delayContext] = delayedCtrls_->get(buffer->metadata().sequence);
+ /*
+ * Add the frame timestamp to the ControlList for the IPA to use
+ * as it does not receive the FrameBuffer object.
+ */
+ ctrl.set(controls::SensorTimestamp, buffer->metadata().timestamp);
+ bayerQueue_.push({ buffer, std::move(ctrl), delayContext });
+ } else {
+ embeddedQueue_.push(buffer);
+ }
+
+ handleState();
+}
+
+void Vc4CameraData::ispInputDequeue(FrameBuffer *buffer)
+{
+ if (!isRunning())
+ return;
+
+ LOG(RPI, Debug) << "Stream ISP Input buffer complete"
+ << ", buffer id " << unicam_[Unicam::Image].getBufferId(buffer)
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ /* The ISP input buffer gets re-queued into Unicam. */
+ handleStreamBuffer(buffer, &unicam_[Unicam::Image]);
+ handleState();
+}
+
+void Vc4CameraData::ispOutputDequeue(FrameBuffer *buffer)
+{
+ RPi::Stream *stream = nullptr;
+ unsigned int index = 0;
+
+ if (!isRunning())
+ return;
+
+ for (RPi::Stream &s : isp_) {
+ index = s.getBufferId(buffer);
+ if (index) {
+ stream = &s;
+ break;
+ }
+ }
+
+ /* The buffer must belong to one of our ISP output streams. */
+ ASSERT(stream);
+
+ LOG(RPI, Debug) << "Stream " << stream->name() << " buffer complete"
+ << ", buffer id " << index
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ /*
+ * ISP statistics buffer must not be re-queued or sent back to the
+ * application until after the IPA signals so.
+ */
+ if (stream == &isp_[Isp::Stats]) {
+ ipa::RPi::ProcessParams params;
+ params.buffers.stats = index | RPi::MaskStats;
+ params.ipaContext = requestQueue_.front()->sequence();
+ ipa_->processStats(params);
+ } else {
+ /* Any other ISP output can be handed back to the application now. */
+ handleStreamBuffer(buffer, stream);
+ }
+
+ /*
+ * Increment the number of ISP outputs generated.
+ * This is needed to track dropped frames.
+ */
+ ispOutputCount_++;
+
+ handleState();
+}
+
+void Vc4CameraData::processStatsComplete(const ipa::RPi::BufferIds &buffers)
+{
+ if (!isRunning())
+ return;
+
+ FrameBuffer *buffer = isp_[Isp::Stats].getBuffers().at(buffers.stats & RPi::MaskID).buffer;
+
+ handleStreamBuffer(buffer, &isp_[Isp::Stats]);
+
+ state_ = State::IpaComplete;
+ handleState();
+}
+
+void Vc4CameraData::prepareIspComplete(const ipa::RPi::BufferIds &buffers,
+ [[maybe_unused]] bool stitchSwapBuffers)
+{
+ unsigned int embeddedId = buffers.embedded & RPi::MaskID;
+ unsigned int bayer = buffers.bayer & RPi::MaskID;
+ FrameBuffer *buffer;
+
+ if (!isRunning())
+ return;
+
+ buffer = unicam_[Unicam::Image].getBuffers().at(bayer & RPi::MaskID).buffer;
+ LOG(RPI, Debug) << "Input re-queue to ISP, buffer id " << (bayer & RPi::MaskID)
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ isp_[Isp::Input].queueBuffer(buffer);
+ ispOutputCount_ = 0;
+
+ if (sensorMetadata_ && embeddedId) {
+ buffer = unicam_[Unicam::Embedded].getBuffers().at(embeddedId & RPi::MaskID).buffer;
+ handleStreamBuffer(buffer, &unicam_[Unicam::Embedded]);
+ }
+
+ handleState();
+}
+
+void Vc4CameraData::setIspControls(const ControlList &controls)
+{
+ ControlList ctrls = controls;
+
+ if (ctrls.contains(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING)) {
+ ControlValue &value =
+ const_cast<ControlValue &>(ctrls.get(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING));
+ Span<uint8_t> s = value.data();
+ bcm2835_isp_lens_shading *ls =
+ reinterpret_cast<bcm2835_isp_lens_shading *>(s.data());
+ ls->dmabuf = lsTable_.get();
+ }
+
+ isp_[Isp::Input].dev()->setControls(&ctrls);
+ handleState();
+}
+
+void Vc4CameraData::setCameraTimeout(uint32_t maxFrameLengthMs)
+{
+ /*
+ * Set the dequeue timeout to the larger of 5x the maximum reported
+ * frame length advertised by the IPA over a number of frames. Allow
+ * a minimum timeout value of 1s.
+ */
+ utils::Duration timeout =
+ std::max<utils::Duration>(1s, 5 * maxFrameLengthMs * 1ms);
+
+ LOG(RPI, Debug) << "Setting Unicam timeout to " << timeout;
+ unicam_[Unicam::Image].dev()->setDequeueTimeout(timeout);
+}
+
+void Vc4CameraData::tryRunPipeline()
+{
+ FrameBuffer *embeddedBuffer;
+ BayerFrame bayerFrame;
+
+ /* If any of our request or buffer queues are empty, we cannot proceed. */
+ if (state_ != State::Idle || requestQueue_.empty() ||
+ bayerQueue_.empty() || (embeddedQueue_.empty() && sensorMetadata_))
+ return;
+
+ if (!findMatchingBuffers(bayerFrame, embeddedBuffer))
+ return;
+
+ /* Take the first request from the queue and action the IPA. */
+ Request *request = requestQueue_.front();
+
+ /* See if a new ScalerCrop value needs to be applied. */
+ applyScalerCrop(request->controls());
+
+ /*
+ * Clear the request metadata and fill it with some initial non-IPA
+ * related controls. We clear it first because the request metadata
+ * may have been populated if we have dropped the previous frame.
+ */
+ request->metadata().clear();
+ fillRequestMetadata(bayerFrame.controls, request);
+
+ /* Set our state to say the pipeline is active. */
+ state_ = State::Busy;
+
+ unsigned int bayer = unicam_[Unicam::Image].getBufferId(bayerFrame.buffer);
+
+ LOG(RPI, Debug) << "Signalling prepareIsp:"
+ << " Bayer buffer id: " << bayer;
+
+ ipa::RPi::PrepareParams params;
+ params.buffers.bayer = RPi::MaskBayerData | bayer;
+ params.sensorControls = std::move(bayerFrame.controls);
+ params.requestControls = request->controls();
+ params.ipaContext = request->sequence();
+ params.delayContext = bayerFrame.delayContext;
+ params.buffers.embedded = 0;
+
+ if (embeddedBuffer) {
+ unsigned int embeddedId = unicam_[Unicam::Embedded].getBufferId(embeddedBuffer);
+
+ params.buffers.embedded = RPi::MaskEmbeddedData | embeddedId;
+ LOG(RPI, Debug) << "Signalling prepareIsp:"
+ << " Embedded buffer id: " << embeddedId;
+ }
+
+ ipa_->prepareIsp(params);
+}
+
+bool Vc4CameraData::findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer)
+{
+ if (bayerQueue_.empty())
+ return false;
+
+ /*
+ * Find the embedded data buffer with a matching timestamp to pass to
+ * the IPA. Any embedded buffers with a timestamp lower than the
+ * current bayer buffer will be removed and re-queued to the driver.
+ */
+ uint64_t ts = bayerQueue_.front().buffer->metadata().timestamp;
+ embeddedBuffer = nullptr;
+ while (!embeddedQueue_.empty()) {
+ FrameBuffer *b = embeddedQueue_.front();
+ if (b->metadata().timestamp < ts) {
+ embeddedQueue_.pop();
+ unicam_[Unicam::Embedded].returnBuffer(b);
+ LOG(RPI, Debug) << "Dropping unmatched input frame in stream "
+ << unicam_[Unicam::Embedded].name();
+ } else if (b->metadata().timestamp == ts) {
+ /* Found a match! */
+ embeddedBuffer = b;
+ embeddedQueue_.pop();
+ break;
+ } else {
+ break; /* Only higher timestamps from here. */
+ }
+ }
+
+ if (!embeddedBuffer && sensorMetadata_) {
+ if (embeddedQueue_.empty()) {
+ /*
+ * If the embedded buffer queue is empty, wait for the next
+ * buffer to arrive - dequeue ordering may send the image
+ * buffer first.
+ */
+ LOG(RPI, Debug) << "Waiting for next embedded buffer.";
+ return false;
+ }
+
+ /* Log if there is no matching embedded data buffer found. */
+ LOG(RPI, Debug) << "Returning bayer frame without a matching embedded buffer.";
+ }
+
+ bayerFrame = std::move(bayerQueue_.front());
+ bayerQueue_.pop();
+
+ return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVc4, "rpi/vc4")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/simple/meson.build b/src/libcamera/pipeline/simple/meson.build
new file mode 100644
index 00000000..dda3de97
--- /dev/null
+++ b/src/libcamera/pipeline/simple/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'simple.cpp',
+])
diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
new file mode 100644
index 00000000..8ac24e6e
--- /dev/null
+++ b/src/libcamera/pipeline/simple/simple.cpp
@@ -0,0 +1,1768 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Laurent Pinchart
+ * Copyright (C) 2019, Martijn Braam
+ *
+ * Pipeline handler for simple pipelines
+ */
+
+#include <algorithm>
+#include <iterator>
+#include <list>
+#include <map>
+#include <memory>
+#include <queue>
+#include <set>
+#include <stdint.h>
+#include <string.h>
+#include <string>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include <linux/media-bus-format.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/request.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/converter.h"
+#include "libcamera/internal/delayed_controls.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/software_isp/software_isp.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(SimplePipeline)
+
+/* -----------------------------------------------------------------------------
+ *
+ * Overview
+ * --------
+ *
+ * The SimplePipelineHandler relies on generic kernel APIs to control a camera
+ * device, without any device-specific code and with limited device-specific
+ * static data.
+ *
+ * To qualify for support by the simple pipeline handler, a device shall
+ *
+ * - be supported by V4L2 drivers, exposing the Media Controller API, the V4L2
+ * subdev APIs and the media bus format-based enumeration extension for the
+ * VIDIOC_ENUM_FMT ioctl ;
+ * - not expose any device-specific API from drivers to userspace ;
+ * - include one or more camera sensor media entities and one or more video
+ * capture devices ;
+ * - have a capture pipeline with linear paths from the camera sensors to the
+ * video capture devices ; and
+ * - have an optional memory-to-memory device to perform format conversion
+ * and/or scaling, exposed as a V4L2 M2M device.
+ *
+ * As devices that require a specific pipeline handler may still match the
+ * above characteristics, the simple pipeline handler doesn't attempt to
+ * automatically determine which devices it can support. It instead relies on
+ * an explicit list of supported devices, provided in the supportedDevices
+ * array.
+ *
+ * When matching a device, the pipeline handler enumerates all camera sensors
+ * and attempts, for each of them, to find a path to a video capture video node.
+ * It does so by using a breadth-first search to find the shortest path from the
+ * sensor device to a valid capture device. This is guaranteed to produce a
+ * valid path on devices with one only option and is a good heuristic on more
+ * complex devices to skip paths that aren't suitable for the simple pipeline
+ * handler. For instance, on the IPU-based i.MX6, the shortest path will skip
+ * encoders and image converters, and it will end in a CSI capture device.
+ * A more complex graph search algorithm could be implemented if a device that
+ * would otherwise be compatible with the pipeline handler isn't correctly
+ * handled by this heuristic.
+ *
+ * Once the camera data instances have been created, the match() function
+ * creates a V4L2VideoDevice or V4L2Subdevice instance for each entity used by
+ * any of the cameras and stores them in SimplePipelineHandler::entities_,
+ * accessible by the SimpleCameraData class through the
+ * SimplePipelineHandler::subdev() and SimplePipelineHandler::video() functions.
+ * This avoids duplication of subdev instances between different cameras when
+ * the same entity is used in multiple paths.
+ *
+ * Finally, all camera data instances are initialized to gather information
+ * about the possible pipeline configurations for the corresponding camera. If
+ * valid pipeline configurations are found, a Camera is registered for the
+ * SimpleCameraData instance.
+ *
+ * Pipeline Traversal
+ * ------------------
+ *
+ * During the breadth-first search, the pipeline is traversed from entity to
+ * entity, by following media graph links from source to sink, starting at the
+ * camera sensor.
+ *
+ * When reaching an entity (on its sink side), if the entity is a V4L2 subdev
+ * that supports the streams API, the subdev internal routes are followed to
+ * find the connected source pads. Otherwise all of the entity's source pads
+ * are considered to continue the graph traversal. The pipeline handler
+ * currently considers the default internal routes only and doesn't attempt to
+ * setup custom routes. This can be extended if needed.
+ *
+ * The shortest path between the camera sensor and a video node is stored in
+ * SimpleCameraData::entities_ as a list of SimpleCameraData::Entity structures,
+ * ordered along the data path from the camera sensor to the video node. The
+ * Entity structure stores a pointer to the MediaEntity, as well as information
+ * about how it is connected in that particular path for later usage when
+ * configuring the pipeline.
+ *
+ * Pipeline Configuration
+ * ----------------------
+ *
+ * The simple pipeline handler configures the pipeline by propagating V4L2
+ * subdev formats from the camera sensor to the video node. The format is first
+ * set on the camera sensor's output, picking a resolution supported by the
+ * sensor that best matches the needs of the requested streams. Then, on every
+ * link in the pipeline, the format is retrieved on the link source and set
+ * unmodified on the link sink.
+ *
+ * The best sensor resolution is selected using a heuristic that tries to
+ * minimize the required bus and memory bandwidth, as the simple pipeline
+ * handler is typically used on smaller, less powerful systems. To avoid the
+ * need to upscale, the pipeline handler picks the smallest sensor resolution
+ * large enough to accommodate the need of all streams. Resolutions that
+ * significantly restrict the field of view are ignored.
+ *
+ * When initializating the camera data, the above format propagation procedure
+ * is repeated for every media bus format and size supported by the camera
+ * sensor. Upon reaching the video node, the pixel formats compatible with the
+ * media bus format are enumerated. Each combination of the input media bus
+ * format, output pixel format and output size are recorded in an instance of
+ * the SimpleCameraData::Configuration structure, stored in the
+ * SimpleCameraData::configs_ vector.
+ *
+ * Format Conversion and Scaling
+ * -----------------------------
+ *
+ * The capture pipeline isn't expected to include a scaler, and if a scaler is
+ * available, it is ignored when configuring the pipeline. However, the simple
+ * pipeline handler supports optional memory-to-memory converters to scale the
+ * image and convert it to a different pixel format. If such a converter is
+ * present, the pipeline handler enumerates, for each pipeline configuration,
+ * the pixel formats and sizes that the converter can produce for the output of
+ * the capture video node, and stores the information in the outputFormats and
+ * outputSizes of the SimpleCameraData::Configuration structure.
+ *
+ * Concurrent Access to Cameras
+ * ----------------------------
+ *
+ * The cameras created by the same pipeline handler instance may share hardware
+ * resources. For instances, a platform may have multiple CSI-2 receivers but a
+ * single DMA engine, prohibiting usage of multiple cameras concurrently. This
+ * depends heavily on the hardware architecture, which the simple pipeline
+ * handler has no a priori knowledge of. The pipeline handler thus implements a
+ * heuristic to handle sharing of hardware resources in a generic fashion.
+ *
+ * Two cameras are considered to be mutually exclusive if they share common
+ * pads along the pipeline from the camera sensor to the video node. An entity
+ * can thus be used concurrently by multiple cameras, as long as pads are
+ * distinct.
+ *
+ * A resource reservation mechanism is implemented by the SimplePipelineHandler
+ * acquirePipeline() and releasePipeline() functions to manage exclusive access
+ * to pads. A camera reserves all the pads present in its pipeline when it is
+ * started, and the start() function returns an error if any of the required
+ * pads is already in use. When the camera is stopped, the pads it has reserved
+ * are released.
+ */
+
+class SimplePipelineHandler;
+
+struct SimplePipelineInfo {
+ const char *driver;
+ /*
+ * Each converter in the list contains the name
+ * and the number of streams it supports.
+ */
+ std::vector<std::pair<const char *, unsigned int>> converters;
+ /*
+ * Using Software ISP is to be enabled per driver.
+ *
+ * The Software ISP can't be used together with the converters.
+ */
+ bool swIspEnabled;
+};
+
+namespace {
+
+static const SimplePipelineInfo supportedDevices[] = {
+ { "dcmipp", {}, false },
+ { "imx7-csi", { { "pxp", 1 } }, false },
+ { "intel-ipu6", {}, true },
+ { "j721e-csi2rx", {}, true },
+ { "mtk-seninf", { { "mtk-mdp", 3 } }, false },
+ { "mxc-isi", {}, false },
+ { "qcom-camss", {}, true },
+ { "sun6i-csi", {}, false },
+};
+
+} /* namespace */
+
+class SimpleCameraData : public Camera::Private
+{
+public:
+ SimpleCameraData(SimplePipelineHandler *pipe,
+ unsigned int numStreams,
+ MediaEntity *sensor);
+
+ bool isValid() const { return sensor_ != nullptr; }
+ SimplePipelineHandler *pipe();
+
+ int init();
+ int setupLinks();
+ int setupFormats(V4L2SubdeviceFormat *format,
+ V4L2Subdevice::Whence whence,
+ Transform transform = Transform::Identity);
+ void imageBufferReady(FrameBuffer *buffer);
+ void clearIncompleteRequests();
+
+ unsigned int streamIndex(const Stream *stream) const
+ {
+ return stream - &streams_.front();
+ }
+
+ struct Entity {
+ /* The media entity, always valid. */
+ MediaEntity *entity;
+ /*
+ * Whether or not the entity is a subdev that supports the
+ * routing API.
+ */
+ bool supportsRouting;
+ /*
+ * The local sink pad connected to the upstream entity, null for
+ * the camera sensor at the beginning of the pipeline.
+ */
+ const MediaPad *sink;
+ /*
+ * The local source pad connected to the downstream entity, null
+ * for the video node at the end of the pipeline.
+ */
+ const MediaPad *source;
+ /*
+ * The link on the source pad, to the downstream entity, null
+ * for the video node at the end of the pipeline.
+ */
+ MediaLink *sourceLink;
+ };
+
+ struct Configuration {
+ uint32_t code;
+ Size sensorSize;
+ PixelFormat captureFormat;
+ Size captureSize;
+ std::vector<PixelFormat> outputFormats;
+ SizeRange outputSizes;
+ };
+
+ std::vector<Stream> streams_;
+
+ /*
+ * All entities in the pipeline, from the camera sensor to the video
+ * node.
+ */
+ std::list<Entity> entities_;
+ std::unique_ptr<CameraSensor> sensor_;
+ V4L2VideoDevice *video_;
+
+ std::vector<Configuration> configs_;
+ std::map<PixelFormat, std::vector<const Configuration *>> formats_;
+
+ std::unique_ptr<DelayedControls> delayedCtrls_;
+
+ std::vector<std::unique_ptr<FrameBuffer>> conversionBuffers_;
+ struct RequestOutputs {
+ Request *request;
+ std::map<const Stream *, FrameBuffer *> outputs;
+ };
+ std::queue<RequestOutputs> conversionQueue_;
+ bool useConversion_;
+
+ std::unique_ptr<Converter> converter_;
+ std::unique_ptr<SoftwareIsp> swIsp_;
+
+private:
+ void tryPipeline(unsigned int code, const Size &size);
+ static std::vector<const MediaPad *> routedSourcePads(MediaPad *sink);
+
+ void conversionInputDone(FrameBuffer *buffer);
+ void conversionOutputDone(FrameBuffer *buffer);
+
+ void ispStatsReady(uint32_t frame, uint32_t bufferId);
+ void setSensorControls(const ControlList &sensorControls);
+};
+
+class SimpleCameraConfiguration : public CameraConfiguration
+{
+public:
+ SimpleCameraConfiguration(Camera *camera, SimpleCameraData *data);
+
+ Status validate() override;
+
+ const SimpleCameraData::Configuration *pipeConfig() const
+ {
+ return pipeConfig_;
+ }
+
+ bool needConversion() const { return needConversion_; }
+ const Transform &combinedTransform() const { return combinedTransform_; }
+
+private:
+ /*
+ * The SimpleCameraData instance is guaranteed to be valid as long as
+ * the corresponding Camera instance is valid. In order to borrow a
+ * reference to the camera data, store a new reference to the camera.
+ */
+ std::shared_ptr<Camera> camera_;
+ SimpleCameraData *data_;
+
+ const SimpleCameraData::Configuration *pipeConfig_;
+ bool needConversion_;
+ Transform combinedTransform_;
+};
+
+class SimplePipelineHandler : public PipelineHandler
+{
+public:
+ SimplePipelineHandler(CameraManager *manager);
+
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+ V4L2VideoDevice *video(const MediaEntity *entity);
+ V4L2Subdevice *subdev(const MediaEntity *entity);
+ MediaDevice *converter() { return converter_; }
+ bool swIspEnabled() const { return swIspEnabled_; }
+
+protected:
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+private:
+ static constexpr unsigned int kNumInternalBuffers = 3;
+
+ struct EntityData {
+ std::unique_ptr<V4L2VideoDevice> video;
+ std::unique_ptr<V4L2Subdevice> subdev;
+ std::map<const MediaPad *, SimpleCameraData *> owners;
+ };
+
+ SimpleCameraData *cameraData(Camera *camera)
+ {
+ return static_cast<SimpleCameraData *>(camera->_d());
+ }
+
+ std::vector<MediaEntity *> locateSensors(MediaDevice *media);
+ static int resetRoutingTable(V4L2Subdevice *subdev);
+
+ const MediaPad *acquirePipeline(SimpleCameraData *data);
+ void releasePipeline(SimpleCameraData *data);
+
+ std::map<const MediaEntity *, EntityData> entities_;
+
+ MediaDevice *converter_;
+ bool swIspEnabled_;
+};
+
+/* -----------------------------------------------------------------------------
+ * Camera Data
+ */
+
+SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe,
+ unsigned int numStreams,
+ MediaEntity *sensor)
+ : Camera::Private(pipe), streams_(numStreams)
+{
+ /*
+ * Find the shortest path from the camera sensor to a video capture
+ * device using the breadth-first search algorithm. This heuristic will
+ * be most likely to skip paths that aren't suitable for the simple
+ * pipeline handler on more complex devices, and is guaranteed to
+ * produce a valid path on all devices that have a single option.
+ *
+ * For instance, on the IPU-based i.MX6Q, the shortest path will skip
+ * encoders and image converters, and will end in a CSI capture device.
+ */
+ std::unordered_set<MediaEntity *> visited;
+ std::queue<std::tuple<MediaEntity *, MediaPad *>> queue;
+
+ /* Remember at each entity where we came from. */
+ std::unordered_map<MediaEntity *, Entity> parents;
+ MediaEntity *entity = nullptr;
+ MediaEntity *video = nullptr;
+ MediaPad *sinkPad;
+
+ queue.push({ sensor, nullptr });
+
+ while (!queue.empty()) {
+ std::tie(entity, sinkPad) = queue.front();
+ queue.pop();
+
+ /* Found the capture device. */
+ if (entity->function() == MEDIA_ENT_F_IO_V4L) {
+ LOG(SimplePipeline, Debug)
+ << "Found capture device " << entity->name();
+ video = entity;
+ break;
+ }
+
+ visited.insert(entity);
+
+ /*
+ * Add direct downstream entities to the search queue. If the
+ * current entity supports the subdev internal routing API,
+ * restrict the search to downstream entities reachable through
+ * active routes.
+ */
+
+ std::vector<const MediaPad *> pads;
+ bool supportsRouting = false;
+
+ if (sinkPad) {
+ pads = routedSourcePads(sinkPad);
+ if (!pads.empty())
+ supportsRouting = true;
+ }
+
+ if (pads.empty()) {
+ for (const MediaPad *pad : entity->pads()) {
+ if (!(pad->flags() & MEDIA_PAD_FL_SOURCE))
+ continue;
+ pads.push_back(pad);
+ }
+ }
+
+ for (const MediaPad *pad : pads) {
+ for (MediaLink *link : pad->links()) {
+ MediaEntity *next = link->sink()->entity();
+ if (visited.find(next) == visited.end()) {
+ queue.push({ next, link->sink() });
+
+ Entity e{ entity, supportsRouting, sinkPad, pad, link };
+ parents.insert({ next, e });
+ }
+ }
+ }
+ }
+
+ if (!video)
+ return;
+
+ /*
+ * With the parents, we can follow back our way from the capture device
+ * to the sensor. Store all the entities in the pipeline, from the
+ * camera sensor to the video node, in entities_.
+ */
+ entities_.push_front({ entity, false, sinkPad, nullptr, nullptr });
+
+ for (auto it = parents.find(entity); it != parents.end();
+ it = parents.find(entity)) {
+ const Entity &e = it->second;
+ entities_.push_front(e);
+ entity = e.entity;
+ }
+
+ /* Finally also remember the sensor. */
+ sensor_ = CameraSensorFactoryBase::create(sensor);
+ if (!sensor_)
+ return;
+
+ LOG(SimplePipeline, Debug)
+ << "Found pipeline: "
+ << utils::join(entities_, " -> ",
+ [](const Entity &e) {
+ std::string s = "[";
+ if (e.sink)
+ s += std::to_string(e.sink->index()) + "|";
+ s += e.entity->name();
+ if (e.source)
+ s += "|" + std::to_string(e.source->index());
+ s += "]";
+ return s;
+ });
+}
+
+SimplePipelineHandler *SimpleCameraData::pipe()
+{
+ return static_cast<SimplePipelineHandler *>(Camera::Private::pipe());
+}
+
+int SimpleCameraData::init()
+{
+ SimplePipelineHandler *pipe = SimpleCameraData::pipe();
+ int ret;
+
+ /* Open the converter, if any. */
+ MediaDevice *converter = pipe->converter();
+ if (converter) {
+ converter_ = ConverterFactoryBase::create(converter);
+ if (!converter_) {
+ LOG(SimplePipeline, Warning)
+ << "Failed to create converter, disabling format conversion";
+ converter_.reset();
+ } else {
+ converter_->inputBufferReady.connect(this, &SimpleCameraData::conversionInputDone);
+ converter_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
+ }
+ }
+
+ /*
+ * Instantiate Soft ISP if this is enabled for the given driver and no converter is used.
+ */
+ if (!converter_ && pipe->swIspEnabled()) {
+ swIsp_ = std::make_unique<SoftwareIsp>(pipe, sensor_.get(), &controlInfo_);
+ if (!swIsp_->isValid()) {
+ LOG(SimplePipeline, Warning)
+ << "Failed to create software ISP, disabling software debayering";
+ swIsp_.reset();
+ } else {
+ /*
+ * The inputBufferReady signal is emitted from the soft ISP thread,
+ * and needs to be handled in the pipeline handler thread. Signals
+ * implement queued delivery, but this works transparently only if
+ * the receiver is bound to the target thread. As the
+ * SimpleCameraData class doesn't inherit from the Object class, it
+ * is not bound to any thread, and the signal would be delivered
+ * synchronously. Instead, connect the signal to a lambda function
+ * bound explicitly to the pipe, which is bound to the pipeline
+ * handler thread. The function then simply forwards the call to
+ * conversionInputDone().
+ */
+ swIsp_->inputBufferReady.connect(pipe, [this](FrameBuffer *buffer) {
+ this->conversionInputDone(buffer);
+ });
+ swIsp_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
+ swIsp_->ispStatsReady.connect(this, &SimpleCameraData::ispStatsReady);
+ swIsp_->setSensorControls.connect(this, &SimpleCameraData::setSensorControls);
+ }
+ }
+
+ video_ = pipe->video(entities_.back().entity);
+ ASSERT(video_);
+
+ /*
+ * Setup links first as some subdev drivers take active links into
+ * account to propagate TRY formats. Such is life :-(
+ */
+ ret = setupLinks();
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Generate the list of possible pipeline configurations by trying each
+ * media bus format and size supported by the sensor.
+ */
+ for (unsigned int code : sensor_->mbusCodes()) {
+ for (const Size &size : sensor_->sizes(code))
+ tryPipeline(code, size);
+ }
+
+ if (configs_.empty()) {
+ LOG(SimplePipeline, Error) << "No valid configuration found";
+ return -EINVAL;
+ }
+
+ /* Map the pixel formats to configurations. */
+ for (const Configuration &config : configs_) {
+ formats_[config.captureFormat].push_back(&config);
+
+ for (PixelFormat fmt : config.outputFormats)
+ formats_[fmt].push_back(&config);
+ }
+
+ properties_ = sensor_->properties();
+
+ return 0;
+}
+
+/*
+ * Generate a list of supported pipeline configurations for a sensor media bus
+ * code and size.
+ *
+ * First propagate the media bus code and size through the pipeline from the
+ * camera sensor to the video node. Then, query the video node for all supported
+ * pixel formats compatible with the media bus code. For each pixel format, store
+ * a full pipeline configuration in the configs_ vector.
+ */
+void SimpleCameraData::tryPipeline(unsigned int code, const Size &size)
+{
+ /*
+ * Propagate the format through the pipeline, and enumerate the
+ * corresponding possible V4L2 pixel formats on the video node.
+ */
+ V4L2SubdeviceFormat format{};
+ format.code = code;
+ format.size = size;
+
+ int ret = setupFormats(&format, V4L2Subdevice::TryFormat);
+ if (ret < 0) {
+ /* Pipeline configuration failed, skip this configuration. */
+ format.code = code;
+ format.size = size;
+ LOG(SimplePipeline, Debug)
+ << "Sensor format " << format
+ << " not supported for this pipeline";
+ return;
+ }
+
+ V4L2VideoDevice::Formats videoFormats = video_->formats(format.code);
+
+ LOG(SimplePipeline, Debug)
+ << "Adding configuration for " << format.size
+ << " in pixel formats [ "
+ << utils::join(videoFormats, ", ",
+ [](const auto &f) {
+ return f.first.toString();
+ })
+ << " ]";
+
+ for (const auto &videoFormat : videoFormats) {
+ PixelFormat pixelFormat = videoFormat.first.toPixelFormat();
+ if (!pixelFormat)
+ continue;
+
+ Configuration config;
+ config.code = code;
+ config.sensorSize = size;
+ config.captureFormat = pixelFormat;
+ config.captureSize = format.size;
+
+ if (converter_) {
+ config.outputFormats = converter_->formats(pixelFormat);
+ config.outputSizes = converter_->sizes(format.size);
+ } else if (swIsp_) {
+ config.outputFormats = swIsp_->formats(pixelFormat);
+ config.outputSizes = swIsp_->sizes(pixelFormat, format.size);
+ if (config.outputFormats.empty()) {
+ /* Do not use swIsp for unsupported pixelFormat's. */
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
+ }
+ } else {
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
+ }
+
+ configs_.push_back(config);
+ }
+}
+
+int SimpleCameraData::setupLinks()
+{
+ int ret;
+
+ /*
+ * Configure all links along the pipeline. Some entities may not allow
+ * multiple sink links to be enabled together, even on different sink
+ * pads. We must thus start by disabling all sink links (but the one we
+ * want to enable) before enabling the pipeline link.
+ *
+ * The entities_ list stores entities along with their source link. We
+ * need to process the link in the context of the sink entity, so
+ * record the source link of the current entity as the sink link of the
+ * next entity, and skip the first entity in the loop.
+ */
+ MediaLink *sinkLink = nullptr;
+
+ for (SimpleCameraData::Entity &e : entities_) {
+ if (!sinkLink) {
+ sinkLink = e.sourceLink;
+ continue;
+ }
+
+ for (MediaPad *pad : e.entity->pads()) {
+ /*
+ * If the entity supports the V4L2 internal routing API,
+ * assume that it may carry multiple independent streams
+ * concurrently, and only disable links on the sink and
+ * source pads used by the pipeline.
+ */
+ if (e.supportsRouting && pad != e.sink && pad != e.source)
+ continue;
+
+ for (MediaLink *link : pad->links()) {
+ if (link == sinkLink)
+ continue;
+
+ if ((link->flags() & MEDIA_LNK_FL_ENABLED) &&
+ !(link->flags() & MEDIA_LNK_FL_IMMUTABLE)) {
+ ret = link->setEnabled(false);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+
+ if (!(sinkLink->flags() & MEDIA_LNK_FL_ENABLED)) {
+ ret = sinkLink->setEnabled(true);
+ if (ret < 0)
+ return ret;
+ }
+
+ sinkLink = e.sourceLink;
+ }
+
+ return 0;
+}
+
+int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format,
+ V4L2Subdevice::Whence whence,
+ Transform transform)
+{
+ SimplePipelineHandler *pipe = SimpleCameraData::pipe();
+ int ret;
+
+ /*
+ * Configure the format on the sensor output and propagate it through
+ * the pipeline.
+ */
+ ret = sensor_->setFormat(format, transform);
+ if (ret < 0)
+ return ret;
+
+ for (const Entity &e : entities_) {
+ if (!e.sourceLink)
+ break;
+
+ MediaLink *link = e.sourceLink;
+ MediaPad *source = link->source();
+ MediaPad *sink = link->sink();
+
+ if (source->entity() != sensor_->entity()) {
+ V4L2Subdevice *subdev = pipe->subdev(source->entity());
+ ret = subdev->getFormat(source->index(), format, whence);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (sink->entity()->function() != MEDIA_ENT_F_IO_V4L) {
+ V4L2SubdeviceFormat sourceFormat = *format;
+
+ V4L2Subdevice *subdev = pipe->subdev(sink->entity());
+ ret = subdev->setFormat(sink->index(), format, whence);
+ if (ret < 0)
+ return ret;
+
+ if (format->code != sourceFormat.code ||
+ format->size != sourceFormat.size) {
+ LOG(SimplePipeline, Debug)
+ << "Source '" << source->entity()->name()
+ << "':" << source->index()
+ << " produces " << sourceFormat
+ << ", sink '" << sink->entity()->name()
+ << "':" << sink->index()
+ << " requires " << *format;
+ return -EINVAL;
+ }
+ }
+
+ LOG(SimplePipeline, Debug)
+ << "Link " << *link << ": configured with format "
+ << *format;
+ }
+
+ return 0;
+}
+
+void SimpleCameraData::imageBufferReady(FrameBuffer *buffer)
+{
+ SimplePipelineHandler *pipe = SimpleCameraData::pipe();
+
+ /*
+ * If an error occurred during capture, or if the buffer was cancelled,
+ * complete the request, even if the converter is in use as there's no
+ * point converting an erroneous buffer.
+ */
+ if (buffer->metadata().status != FrameMetadata::FrameSuccess) {
+ if (!useConversion_) {
+ /* No conversion, just complete the request. */
+ Request *request = buffer->request();
+ pipe->completeBuffer(request, buffer);
+ pipe->completeRequest(request);
+ return;
+ }
+
+ /*
+ * The converter or Software ISP is in use. Requeue the internal
+ * buffer for capture (unless the stream is being stopped), and
+ * complete the request with all the user-facing buffers.
+ */
+ if (buffer->metadata().status != FrameMetadata::FrameCancelled)
+ video_->queueBuffer(buffer);
+
+ if (conversionQueue_.empty())
+ return;
+
+ const RequestOutputs &outputs = conversionQueue_.front();
+ for (auto &[stream, buf] : outputs.outputs)
+ pipe->completeBuffer(outputs.request, buf);
+ pipe->completeRequest(outputs.request);
+ conversionQueue_.pop();
+
+ return;
+ }
+
+ /*
+ * Record the sensor's timestamp in the request metadata. The request
+ * needs to be obtained from the user-facing buffer, as internal
+ * buffers are free-wheeling and have no request associated with them.
+ *
+ * \todo The sensor timestamp should be better estimated by connecting
+ * to the V4L2Device::frameStart signal if the platform provides it.
+ */
+ Request *request = buffer->request();
+
+ if (useConversion_ && !conversionQueue_.empty()) {
+ const std::map<const Stream *, FrameBuffer *> &outputs =
+ conversionQueue_.front().outputs;
+ if (!outputs.empty()) {
+ FrameBuffer *outputBuffer = outputs.begin()->second;
+ if (outputBuffer)
+ request = outputBuffer->request();
+ }
+ }
+
+ if (request)
+ request->metadata().set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
+
+ /*
+ * Queue the captured and the request buffer to the converter or Software
+ * ISP if format conversion is needed. If there's no queued request, just
+ * requeue the captured buffer for capture.
+ */
+ if (useConversion_) {
+ if (conversionQueue_.empty()) {
+ video_->queueBuffer(buffer);
+ return;
+ }
+
+ if (converter_)
+ converter_->queueBuffers(buffer, conversionQueue_.front().outputs);
+ else
+ /*
+ * request->sequence() cannot be retrieved from `buffer' inside
+ * queueBuffers because unique_ptr's make buffer->request() invalid
+ * already here.
+ */
+ swIsp_->queueBuffers(request->sequence(), buffer,
+ conversionQueue_.front().outputs);
+
+ conversionQueue_.pop();
+ return;
+ }
+
+ /* Otherwise simply complete the request. */
+ pipe->completeBuffer(request, buffer);
+ pipe->completeRequest(request);
+}
+
+void SimpleCameraData::clearIncompleteRequests()
+{
+ while (!conversionQueue_.empty()) {
+ pipe()->cancelRequest(conversionQueue_.front().request);
+ conversionQueue_.pop();
+ }
+}
+
+void SimpleCameraData::conversionInputDone(FrameBuffer *buffer)
+{
+ /* Queue the input buffer back for capture. */
+ video_->queueBuffer(buffer);
+}
+
+void SimpleCameraData::conversionOutputDone(FrameBuffer *buffer)
+{
+ SimplePipelineHandler *pipe = SimpleCameraData::pipe();
+
+ /* Complete the buffer and the request. */
+ Request *request = buffer->request();
+ if (pipe->completeBuffer(request, buffer))
+ pipe->completeRequest(request);
+}
+
+void SimpleCameraData::ispStatsReady(uint32_t frame, uint32_t bufferId)
+{
+ swIsp_->processStats(frame, bufferId,
+ delayedCtrls_->get(frame));
+}
+
+void SimpleCameraData::setSensorControls(const ControlList &sensorControls)
+{
+ delayedCtrls_->push(sensorControls);
+ ControlList ctrls(sensorControls);
+ sensor_->setControls(&ctrls);
+}
+
+/* Retrieve all source pads connected to a sink pad through active routes. */
+std::vector<const MediaPad *> SimpleCameraData::routedSourcePads(MediaPad *sink)
+{
+ MediaEntity *entity = sink->entity();
+ std::unique_ptr<V4L2Subdevice> subdev =
+ std::make_unique<V4L2Subdevice>(entity);
+
+ int ret = subdev->open();
+ if (ret < 0)
+ return {};
+
+ V4L2Subdevice::Routing routing = {};
+ ret = subdev->getRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret < 0)
+ return {};
+
+ std::vector<const MediaPad *> pads;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (sink->index() != route.sink.pad ||
+ !(route.flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ const MediaPad *pad = entity->getPadByIndex(route.source.pad);
+ if (!pad) {
+ LOG(SimplePipeline, Warning)
+ << "Entity " << entity->name()
+ << " has invalid route source pad "
+ << route.source.pad;
+ }
+
+ pads.push_back(pad);
+ }
+
+ return pads;
+}
+
+/* -----------------------------------------------------------------------------
+ * Camera Configuration
+ */
+
+SimpleCameraConfiguration::SimpleCameraConfiguration(Camera *camera,
+ SimpleCameraData *data)
+ : CameraConfiguration(), camera_(camera->shared_from_this()),
+ data_(data), pipeConfig_(nullptr)
+{
+}
+
+namespace {
+
+static Size adjustSize(const Size &requestedSize, const SizeRange &supportedSizes)
+{
+ ASSERT(supportedSizes.min <= supportedSizes.max);
+
+ if (supportedSizes.min == supportedSizes.max)
+ return supportedSizes.max;
+
+ unsigned int hStep = supportedSizes.hStep;
+ unsigned int vStep = supportedSizes.vStep;
+
+ if (hStep == 0)
+ hStep = supportedSizes.max.width - supportedSizes.min.width;
+ if (vStep == 0)
+ vStep = supportedSizes.max.height - supportedSizes.min.height;
+
+ Size adjusted = requestedSize.boundedTo(supportedSizes.max)
+ .expandedTo(supportedSizes.min);
+
+ return adjusted.shrunkBy(supportedSizes.min)
+ .alignedDownTo(hStep, vStep)
+ .grownBy(supportedSizes.min);
+}
+
+} /* namespace */
+
+CameraConfiguration::Status SimpleCameraConfiguration::validate()
+{
+ const CameraSensor *sensor = data_->sensor_.get();
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = sensor->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
+ status = Adjusted;
+
+ /* Cap the number of entries to the available streams. */
+ if (config_.size() > data_->streams_.size()) {
+ config_.resize(data_->streams_.size());
+ status = Adjusted;
+ }
+
+ /* Find the largest stream size. */
+ Size maxStreamSize;
+ for (const StreamConfiguration &cfg : config_)
+ maxStreamSize.expandTo(cfg.size);
+
+ LOG(SimplePipeline, Debug)
+ << "Largest stream size is " << maxStreamSize;
+
+ /*
+ * Find the best configuration for the pipeline using a heuristic.
+ * First select the pixel format based on the streams (which are
+ * considered ordered from highest to lowest priority). Default to the
+ * first pipeline configuration if no streams request a supported pixel
+ * format.
+ */
+ const std::vector<const SimpleCameraData::Configuration *> *configs =
+ &data_->formats_.begin()->second;
+
+ for (const StreamConfiguration &cfg : config_) {
+ auto it = data_->formats_.find(cfg.pixelFormat);
+ if (it != data_->formats_.end()) {
+ configs = &it->second;
+ break;
+ }
+ }
+
+ /*
+ * \todo Pick the best sensor output media bus format when the
+ * requested pixel format can be produced from multiple sensor media
+ * bus formats.
+ */
+
+ /*
+ * Then pick, among the possible configuration for the pixel format,
+ * the smallest sensor resolution that can accommodate all streams
+ * without upscaling.
+ */
+ const SimpleCameraData::Configuration *maxPipeConfig = nullptr;
+ pipeConfig_ = nullptr;
+
+ for (const SimpleCameraData::Configuration *pipeConfig : *configs) {
+ const Size &size = pipeConfig->captureSize;
+
+ if (size.width >= maxStreamSize.width &&
+ size.height >= maxStreamSize.height) {
+ if (!pipeConfig_ || size < pipeConfig_->captureSize)
+ pipeConfig_ = pipeConfig;
+ }
+
+ if (!maxPipeConfig || maxPipeConfig->captureSize < size)
+ maxPipeConfig = pipeConfig;
+ }
+
+ /* If no configuration was large enough, select the largest one. */
+ if (!pipeConfig_)
+ pipeConfig_ = maxPipeConfig;
+
+ LOG(SimplePipeline, Debug)
+ << "Picked "
+ << V4L2SubdeviceFormat{ pipeConfig_->code, pipeConfig_->sensorSize, {} }
+ << " -> " << pipeConfig_->captureSize
+ << "-" << pipeConfig_->captureFormat
+ << " for max stream size " << maxStreamSize;
+
+ /*
+ * Adjust the requested streams.
+ *
+ * Enable usage of the converter when producing multiple streams, as
+ * the video capture device can't capture to multiple buffers.
+ *
+ * It is possible to produce up to one stream without conversion
+ * (provided the format and size match), at the expense of more complex
+ * buffer handling (including allocation of internal buffers to be used
+ * when a request doesn't contain a buffer for the stream that doesn't
+ * require any conversion, similar to raw capture use cases). This is
+ * left as a future improvement.
+ */
+ needConversion_ = config_.size() > 1;
+
+ for (unsigned int i = 0; i < config_.size(); ++i) {
+ StreamConfiguration &cfg = config_[i];
+
+ /* Adjust the pixel format and size. */
+ auto it = std::find(pipeConfig_->outputFormats.begin(),
+ pipeConfig_->outputFormats.end(),
+ cfg.pixelFormat);
+ if (it == pipeConfig_->outputFormats.end())
+ it = pipeConfig_->outputFormats.begin();
+
+ PixelFormat pixelFormat = *it;
+ if (cfg.pixelFormat != pixelFormat) {
+ LOG(SimplePipeline, Debug) << "Adjusting pixel format";
+ cfg.pixelFormat = pixelFormat;
+ status = Adjusted;
+ }
+
+ if (!pipeConfig_->outputSizes.contains(cfg.size)) {
+ Size adjustedSize = pipeConfig_->captureSize;
+ /*
+ * The converter (when present) may not be able to output
+ * a size identical to its input size. The capture size is thus
+ * not guaranteed to be a valid output size. In such cases, use
+ * the smaller valid output size closest to the requested.
+ */
+ if (!pipeConfig_->outputSizes.contains(adjustedSize))
+ adjustedSize = adjustSize(cfg.size, pipeConfig_->outputSizes);
+ LOG(SimplePipeline, Debug)
+ << "Adjusting size from " << cfg.size
+ << " to " << adjustedSize;
+ cfg.size = adjustedSize;
+ status = Adjusted;
+ }
+
+ /* \todo Create a libcamera core class to group format and size */
+ if (cfg.pixelFormat != pipeConfig_->captureFormat ||
+ cfg.size != pipeConfig_->captureSize)
+ needConversion_ = true;
+
+ /* Set the stride, frameSize and bufferCount. */
+ if (needConversion_) {
+ std::tie(cfg.stride, cfg.frameSize) =
+ data_->converter_
+ ? data_->converter_->strideAndFrameSize(cfg.pixelFormat,
+ cfg.size)
+ : data_->swIsp_->strideAndFrameSize(cfg.pixelFormat,
+ cfg.size);
+ if (cfg.stride == 0)
+ return Invalid;
+ } else {
+ V4L2DeviceFormat format;
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
+ format.size = cfg.size;
+
+ int ret = data_->video_->tryFormat(&format);
+ if (ret < 0)
+ return Invalid;
+
+ cfg.stride = format.planes[0].bpl;
+ cfg.frameSize = format.planes[0].size;
+ }
+
+ cfg.bufferCount = 4;
+ }
+
+ return status;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Handler
+ */
+
+SimplePipelineHandler::SimplePipelineHandler(CameraManager *manager)
+ : PipelineHandler(manager), converter_(nullptr)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+SimplePipelineHandler::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
+{
+ SimpleCameraData *data = cameraData(camera);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<SimpleCameraConfiguration>(camera, data);
+
+ if (roles.empty())
+ return config;
+
+ /* Create the formats map. */
+ std::map<PixelFormat, std::vector<SizeRange>> formats;
+
+ for (const SimpleCameraData::Configuration &cfg : data->configs_) {
+ for (PixelFormat format : cfg.outputFormats)
+ formats[format].push_back(cfg.outputSizes);
+ }
+
+ /* Sort the sizes and merge any consecutive overlapping ranges. */
+ for (auto &[format, sizes] : formats) {
+ std::sort(sizes.begin(), sizes.end(),
+ [](SizeRange &a, SizeRange &b) {
+ return a.min < b.min;
+ });
+
+ auto cur = sizes.begin();
+ auto next = cur;
+
+ while (++next != sizes.end()) {
+ if (cur->max.width >= next->min.width &&
+ cur->max.height >= next->min.height)
+ cur->max = next->max;
+ else if (++cur != next)
+ *cur = *next;
+ }
+
+ sizes.erase(++cur, sizes.end());
+ }
+
+ /*
+ * Create the stream configurations. Take the first entry in the formats
+ * map as the default, for lack of a better option.
+ *
+ * \todo Implement a better way to pick the default format
+ */
+ for ([[maybe_unused]] StreamRole role : roles) {
+ StreamConfiguration cfg{ StreamFormats{ formats } };
+ cfg.pixelFormat = formats.begin()->first;
+ cfg.size = formats.begin()->second[0].max;
+
+ config->addConfiguration(cfg);
+ }
+
+ config->validate();
+
+ return config;
+}
+
+int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
+{
+ SimpleCameraConfiguration *config =
+ static_cast<SimpleCameraConfiguration *>(c);
+ SimpleCameraData *data = cameraData(camera);
+ V4L2VideoDevice *video = data->video_;
+ int ret;
+
+ /*
+ * Configure links on the pipeline and propagate formats from the
+ * sensor to the video node.
+ */
+ ret = data->setupLinks();
+ if (ret < 0)
+ return ret;
+
+ const SimpleCameraData::Configuration *pipeConfig = config->pipeConfig();
+ V4L2SubdeviceFormat format{};
+ format.code = pipeConfig->code;
+ format.size = pipeConfig->sensorSize;
+
+ ret = data->setupFormats(&format, V4L2Subdevice::ActiveFormat,
+ config->combinedTransform());
+ if (ret < 0)
+ return ret;
+
+ /* Configure the video node. */
+ V4L2PixelFormat videoFormat = video->toV4L2PixelFormat(pipeConfig->captureFormat);
+
+ V4L2DeviceFormat captureFormat;
+ captureFormat.fourcc = videoFormat;
+ captureFormat.size = pipeConfig->captureSize;
+
+ ret = video->setFormat(&captureFormat);
+ if (ret)
+ return ret;
+
+ if (captureFormat.planesCount != 1) {
+ LOG(SimplePipeline, Error)
+ << "Planar formats using non-contiguous memory not supported";
+ return -EINVAL;
+ }
+
+ if (captureFormat.fourcc != videoFormat ||
+ captureFormat.size != pipeConfig->captureSize) {
+ LOG(SimplePipeline, Error)
+ << "Unable to configure capture in "
+ << pipeConfig->captureSize << "-" << videoFormat
+ << " (got " << captureFormat << ")";
+ return -EINVAL;
+ }
+
+ /* Configure the converter if needed. */
+ std::vector<std::reference_wrapper<StreamConfiguration>> outputCfgs;
+ data->useConversion_ = config->needConversion();
+
+ for (unsigned int i = 0; i < config->size(); ++i) {
+ StreamConfiguration &cfg = config->at(i);
+
+ cfg.setStream(&data->streams_[i]);
+
+ if (data->useConversion_)
+ outputCfgs.push_back(cfg);
+ }
+
+ if (outputCfgs.empty())
+ return 0;
+
+ const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays();
+ std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ };
+ data->delayedCtrls_ =
+ std::make_unique<DelayedControls>(data->sensor_->device(),
+ params);
+ data->video_->frameStart.connect(data->delayedCtrls_.get(),
+ &DelayedControls::applyControls);
+
+ StreamConfiguration inputCfg;
+ inputCfg.pixelFormat = pipeConfig->captureFormat;
+ inputCfg.size = pipeConfig->captureSize;
+ inputCfg.stride = captureFormat.planes[0].bpl;
+ inputCfg.bufferCount = kNumInternalBuffers;
+
+ if (data->converter_) {
+ return data->converter_->configure(inputCfg, outputCfgs);
+ } else {
+ ipa::soft::IPAConfigInfo configInfo;
+ configInfo.sensorControls = data->sensor_->controls();
+ return data->swIsp_->configure(inputCfg, outputCfgs, configInfo);
+ }
+}
+
+int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ SimpleCameraData *data = cameraData(camera);
+ unsigned int count = stream->configuration().bufferCount;
+
+ /*
+ * Export buffers on the converter or capture video node, depending on
+ * whether the converter is used or not.
+ */
+ if (data->useConversion_)
+ return data->converter_
+ ? data->converter_->exportBuffers(stream, count, buffers)
+ : data->swIsp_->exportBuffers(stream, count, buffers);
+ else
+ return data->video_->exportBuffers(count, buffers);
+}
+
+int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
+{
+ SimpleCameraData *data = cameraData(camera);
+ V4L2VideoDevice *video = data->video_;
+ int ret;
+
+ const MediaPad *pad = acquirePipeline(data);
+ if (pad) {
+ LOG(SimplePipeline, Info)
+ << "Failed to acquire pipeline, entity "
+ << pad->entity()->name() << " in use";
+ return -EBUSY;
+ }
+
+ if (data->useConversion_) {
+ /*
+ * When using the converter allocate a fixed number of internal
+ * buffers.
+ */
+ ret = video->allocateBuffers(kNumInternalBuffers,
+ &data->conversionBuffers_);
+ } else {
+ /* Otherwise, prepare for using buffers from the only stream. */
+ Stream *stream = &data->streams_[0];
+ ret = video->importBuffers(stream->configuration().bufferCount);
+ }
+ if (ret < 0) {
+ releasePipeline(data);
+ return ret;
+ }
+
+ video->bufferReady.connect(data, &SimpleCameraData::imageBufferReady);
+
+ ret = video->streamOn();
+ if (ret < 0) {
+ stop(camera);
+ return ret;
+ }
+
+ if (data->useConversion_) {
+ if (data->converter_)
+ ret = data->converter_->start();
+ else if (data->swIsp_)
+ ret = data->swIsp_->start();
+ else
+ ret = 0;
+
+ if (ret < 0) {
+ stop(camera);
+ return ret;
+ }
+
+ /* Queue all internal buffers for capture. */
+ for (std::unique_ptr<FrameBuffer> &buffer : data->conversionBuffers_)
+ video->queueBuffer(buffer.get());
+ }
+
+ return 0;
+}
+
+void SimplePipelineHandler::stopDevice(Camera *camera)
+{
+ SimpleCameraData *data = cameraData(camera);
+ V4L2VideoDevice *video = data->video_;
+
+ if (data->useConversion_) {
+ if (data->converter_)
+ data->converter_->stop();
+ else if (data->swIsp_)
+ data->swIsp_->stop();
+ }
+
+ video->streamOff();
+ video->releaseBuffers();
+
+ video->bufferReady.disconnect(data, &SimpleCameraData::imageBufferReady);
+
+ data->clearIncompleteRequests();
+ data->conversionBuffers_.clear();
+
+ releasePipeline(data);
+}
+
+int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
+{
+ SimpleCameraData *data = cameraData(camera);
+ int ret;
+
+ std::map<const Stream *, FrameBuffer *> buffers;
+
+ for (auto &[stream, buffer] : request->buffers()) {
+ /*
+ * If conversion is needed, push the buffer to the converter
+ * queue, it will be handed to the converter in the capture
+ * completion handler.
+ */
+ if (data->useConversion_) {
+ buffers.emplace(stream, buffer);
+ } else {
+ ret = data->video_->queueBuffer(buffer);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (data->useConversion_) {
+ data->conversionQueue_.push({ request, std::move(buffers) });
+ if (data->swIsp_)
+ data->swIsp_->queueRequest(request->sequence(), request->controls());
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Match and Setup
+ */
+
+std::vector<MediaEntity *>
+SimplePipelineHandler::locateSensors(MediaDevice *media)
+{
+ std::vector<MediaEntity *> entities;
+
+ /*
+ * Gather all the camera sensor entities based on the function they
+ * expose.
+ */
+ for (MediaEntity *entity : media->entities()) {
+ if (entity->function() == MEDIA_ENT_F_CAM_SENSOR)
+ entities.push_back(entity);
+ }
+
+ if (entities.empty())
+ return {};
+
+ /*
+ * Sensors can be made of multiple entities. For instance, a raw sensor
+ * can be connected to an ISP, and the combination of both should be
+ * treated as one sensor. To support this, as a crude heuristic, check
+ * the downstream entity from the camera sensor, and if it is an ISP,
+ * use it instead of the sensor.
+ */
+ std::vector<MediaEntity *> sensors;
+
+ for (MediaEntity *entity : entities) {
+ /*
+ * Locate the downstream entity by following the first link
+ * from a source pad.
+ */
+ const MediaLink *link = nullptr;
+
+ for (const MediaPad *pad : entity->pads()) {
+ if ((pad->flags() & MEDIA_PAD_FL_SOURCE) &&
+ !pad->links().empty()) {
+ link = pad->links()[0];
+ break;
+ }
+ }
+
+ if (!link)
+ continue;
+
+ MediaEntity *remote = link->sink()->entity();
+ if (remote->function() == MEDIA_ENT_F_PROC_VIDEO_ISP)
+ sensors.push_back(remote);
+ else
+ sensors.push_back(entity);
+ }
+
+ /*
+ * Remove duplicates, in case multiple sensors are connected to the
+ * same ISP.
+ */
+ std::sort(sensors.begin(), sensors.end());
+ auto last = std::unique(sensors.begin(), sensors.end());
+ sensors.erase(last, sensors.end());
+
+ return sensors;
+}
+
+int SimplePipelineHandler::resetRoutingTable(V4L2Subdevice *subdev)
+{
+ /* Reset the media entity routing table to its default state. */
+ V4L2Subdevice::Routing routing = {};
+
+ int ret = subdev->getRouting(&routing, V4L2Subdevice::TryFormat);
+ if (ret)
+ return ret;
+
+ ret = subdev->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ /*
+ * If the routing table is empty we won't be able to meaningfully use
+ * the subdev.
+ */
+ if (routing.empty()) {
+ LOG(SimplePipeline, Error)
+ << "Default routing table of " << subdev->deviceNode()
+ << " is empty";
+ return -EINVAL;
+ }
+
+ LOG(SimplePipeline, Debug)
+ << "Routing table of " << subdev->deviceNode()
+ << " reset to " << routing;
+
+ return 0;
+}
+
+bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
+{
+ const SimplePipelineInfo *info = nullptr;
+ unsigned int numStreams = 1;
+ MediaDevice *media;
+
+ for (const SimplePipelineInfo &inf : supportedDevices) {
+ DeviceMatch dm(inf.driver);
+ media = acquireMediaDevice(enumerator, dm);
+ if (media) {
+ info = &inf;
+ break;
+ }
+ }
+
+ if (!media)
+ return false;
+
+ for (const auto &[name, streams] : info->converters) {
+ DeviceMatch converterMatch(name);
+ converter_ = acquireMediaDevice(enumerator, converterMatch);
+ if (converter_) {
+ numStreams = streams;
+ break;
+ }
+ }
+
+ swIspEnabled_ = info->swIspEnabled;
+
+ /* Locate the sensors. */
+ std::vector<MediaEntity *> sensors = locateSensors(media);
+ if (sensors.empty()) {
+ LOG(SimplePipeline, Info) << "No sensor found for " << media->deviceNode();
+ return false;
+ }
+
+ LOG(SimplePipeline, Debug) << "Sensor found for " << media->deviceNode();
+
+ /*
+ * Create one camera data instance for each sensor and gather all
+ * entities in all pipelines.
+ */
+ std::vector<std::unique_ptr<SimpleCameraData>> pipelines;
+ std::set<MediaEntity *> entities;
+
+ pipelines.reserve(sensors.size());
+
+ for (MediaEntity *sensor : sensors) {
+ std::unique_ptr<SimpleCameraData> data =
+ std::make_unique<SimpleCameraData>(this, numStreams, sensor);
+ if (!data->isValid()) {
+ LOG(SimplePipeline, Error)
+ << "No valid pipeline for sensor '"
+ << sensor->name() << "', skipping";
+ continue;
+ }
+
+ for (SimpleCameraData::Entity &entity : data->entities_)
+ entities.insert(entity.entity);
+
+ pipelines.push_back(std::move(data));
+ }
+
+ if (entities.empty())
+ return false;
+
+ /*
+ * Insert all entities in the global entities list. Create and open
+ * V4L2VideoDevice and V4L2Subdevice instances for the corresponding
+ * entities.
+ */
+ for (MediaEntity *entity : entities) {
+ std::unique_ptr<V4L2VideoDevice> video;
+ std::unique_ptr<V4L2Subdevice> subdev;
+ int ret;
+
+ switch (entity->type()) {
+ case MediaEntity::Type::V4L2VideoDevice:
+ video = std::make_unique<V4L2VideoDevice>(entity);
+ ret = video->open();
+ if (ret < 0) {
+ LOG(SimplePipeline, Error)
+ << "Failed to open " << video->deviceNode()
+ << ": " << strerror(-ret);
+ return false;
+ }
+ break;
+
+ case MediaEntity::Type::V4L2Subdevice:
+ subdev = std::make_unique<V4L2Subdevice>(entity);
+ ret = subdev->open();
+ if (ret < 0) {
+ LOG(SimplePipeline, Error)
+ << "Failed to open " << subdev->deviceNode()
+ << ": " << strerror(-ret);
+ return false;
+ }
+
+ if (subdev->caps().hasStreams()) {
+ /*
+ * Reset the routing table to its default state
+ * to make sure entities are enumerated according
+ * to the default routing configuration.
+ */
+ ret = resetRoutingTable(subdev.get());
+ if (ret) {
+ LOG(SimplePipeline, Error)
+ << "Failed to reset routes for "
+ << subdev->deviceNode() << ": "
+ << strerror(-ret);
+ return false;
+ }
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ entities_[entity] = { std::move(video), std::move(subdev), {} };
+ }
+
+ /* Initialize each pipeline and register a corresponding camera. */
+ bool registered = false;
+
+ for (std::unique_ptr<SimpleCameraData> &data : pipelines) {
+ int ret = data->init();
+ if (ret < 0)
+ continue;
+
+ std::set<Stream *> streams;
+ std::transform(data->streams_.begin(), data->streams_.end(),
+ std::inserter(streams, streams.end()),
+ [](Stream &stream) { return &stream; });
+
+ const std::string &id = data->sensor_->id();
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(data), id, streams);
+ registerCamera(std::move(camera));
+ registered = true;
+ }
+
+ return registered;
+}
+
+V4L2VideoDevice *SimplePipelineHandler::video(const MediaEntity *entity)
+{
+ auto iter = entities_.find(entity);
+ if (iter == entities_.end())
+ return nullptr;
+
+ return iter->second.video.get();
+}
+
+V4L2Subdevice *SimplePipelineHandler::subdev(const MediaEntity *entity)
+{
+ auto iter = entities_.find(entity);
+ if (iter == entities_.end())
+ return nullptr;
+
+ return iter->second.subdev.get();
+}
+
+/**
+ * \brief Acquire all resources needed by the camera pipeline
+ * \return nullptr on success, a pointer to the contended pad on error
+ */
+const MediaPad *SimplePipelineHandler::acquirePipeline(SimpleCameraData *data)
+{
+ for (const SimpleCameraData::Entity &entity : data->entities_) {
+ const EntityData &edata = entities_[entity.entity];
+
+ if (entity.sink) {
+ auto iter = edata.owners.find(entity.sink);
+ if (iter != edata.owners.end() && iter->second != data)
+ return entity.sink;
+ }
+
+ if (entity.source) {
+ auto iter = edata.owners.find(entity.source);
+ if (iter != edata.owners.end() && iter->second != data)
+ return entity.source;
+ }
+ }
+
+ for (const SimpleCameraData::Entity &entity : data->entities_) {
+ EntityData &edata = entities_[entity.entity];
+
+ if (entity.sink)
+ edata.owners[entity.sink] = data;
+ if (entity.source)
+ edata.owners[entity.source] = data;
+ }
+
+ return nullptr;
+}
+
+void SimplePipelineHandler::releasePipeline(SimpleCameraData *data)
+{
+ for (const SimpleCameraData::Entity &entity : data->entities_) {
+ EntityData &edata = entities_[entity.entity];
+
+ if (entity.sink) {
+ auto iter = edata.owners.find(entity.sink);
+ ASSERT(iter->second == data);
+ edata.owners.erase(iter);
+ }
+
+ if (entity.source) {
+ auto iter = edata.owners.find(entity.source);
+ ASSERT(iter->second == data);
+ edata.owners.erase(iter);
+ }
+ }
+}
+
+REGISTER_PIPELINE_HANDLER(SimplePipelineHandler, "simple")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/uvcvideo/meson.build b/src/libcamera/pipeline/uvcvideo/meson.build
index c19ae238..a3a91074 100644
--- a/src/libcamera/pipeline/uvcvideo/meson.build
+++ b/src/libcamera/pipeline/uvcvideo/meson.build
@@ -1,3 +1,5 @@
-libcamera_sources += files([
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
'uvcvideo.cpp',
])
diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
index ffbddf27..8c2c6baf 100644
--- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
+++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
@@ -2,58 +2,75 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * uvcvideo.cpp - Pipeline handler for uvcvideo devices
+ * Pipeline handler for uvcvideo devices
*/
#include <algorithm>
-#include <iomanip>
-#include <sys/sysmacros.h>
-#include <tuple>
+#include <cmath>
+#include <fstream>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/mutex.h>
+#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
+#include <libcamera/property_ids.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
-#include "device_enumerator.h"
-#include "log.h"
-#include "media_device.h"
-#include "pipeline_handler.h"
-#include "utils.h"
-#include "v4l2_controls.h"
-#include "v4l2_videodevice.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/sysfs.h"
+#include "libcamera/internal/v4l2_videodevice.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(UVC)
-class UVCCameraData : public CameraData
+class UVCCameraData : public Camera::Private
{
public:
UVCCameraData(PipelineHandler *pipe)
- : CameraData(pipe), video_(nullptr)
+ : Camera::Private(pipe)
{
}
- ~UVCCameraData()
- {
- delete video_;
- }
+ int init(MediaDevice *media);
+ void addControl(uint32_t cid, const ControlInfo &v4l2info,
+ ControlInfoMap::Map *ctrls);
+ void imageBufferReady(FrameBuffer *buffer);
- int init(MediaEntity *entity);
- void bufferReady(FrameBuffer *buffer);
+ const std::string &id() const { return id_; }
- V4L2VideoDevice *video_;
+ Mutex openLock_;
+ std::unique_ptr<V4L2VideoDevice> video_;
Stream stream_;
+ std::map<PixelFormat, std::vector<SizeRange>> formats_;
+
+private:
+ bool generateId();
+
+ std::string id_;
};
class UVCCameraConfiguration : public CameraConfiguration
{
public:
- UVCCameraConfiguration();
+ UVCCameraConfiguration(UVCCameraData *data);
Status validate() override;
+
+private:
+ UVCCameraData *data_;
};
class PipelineHandlerUVC : public PipelineHandler
@@ -61,32 +78,36 @@ class PipelineHandlerUVC : public PipelineHandler
public:
PipelineHandlerUVC(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
- int start(Camera *camera) override;
- void stop(Camera *camera) override;
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
bool match(DeviceEnumerator *enumerator) override;
private:
+ int processControl(ControlList *controls, unsigned int id,
+ const ControlValue &value);
int processControls(UVCCameraData *data, Request *request);
- UVCCameraData *cameraData(const Camera *camera)
+ bool acquireDevice(Camera *camera) override;
+ void releaseDevice(Camera *camera) override;
+
+ UVCCameraData *cameraData(Camera *camera)
{
- return static_cast<UVCCameraData *>(
- PipelineHandler::cameraData(camera));
+ return static_cast<UVCCameraData *>(camera->_d());
}
};
-UVCCameraConfiguration::UVCCameraConfiguration()
- : CameraConfiguration()
+UVCCameraConfiguration::UVCCameraConfiguration(UVCCameraData *data)
+ : CameraConfiguration(), data_(data)
{
}
@@ -97,6 +118,11 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
if (config_.empty())
return Invalid;
+ if (orientation != Orientation::Rotate0) {
+ orientation = Orientation::Rotate0;
+ status = Adjusted;
+ }
+
/* Cap the number of entries to the available streams. */
if (config_.size() > 1) {
config_.resize(1);
@@ -113,9 +139,8 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
if (iter == pixelFormats.end()) {
cfg.pixelFormat = pixelFormats.front();
LOG(UVC, Debug)
- << "Adjusting pixel format from "
- << pixelFormat.toString() << " to "
- << cfg.pixelFormat.toString();
+ << "Adjusting pixel format from " << pixelFormat
+ << " to " << cfg.pixelFormat;
status = Adjusted;
}
@@ -130,13 +155,48 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
if (cfg.size != size) {
LOG(UVC, Debug)
- << "Adjusting size from " << size.toString()
- << " to " << cfg.size.toString();
+ << "Adjusting size from " << size << " to " << cfg.size;
status = Adjusted;
}
cfg.bufferCount = 4;
+ V4L2DeviceFormat format;
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
+ format.size = cfg.size;
+
+ /*
+ * For power-consumption reasons video_ is closed when the camera is not
+ * acquired. Open it here if necessary.
+ */
+ {
+ bool opened = false;
+
+ MutexLocker locker(data_->openLock_);
+
+ if (!data_->video_->isOpen()) {
+ int ret = data_->video_->open();
+ if (ret)
+ return Invalid;
+
+ opened = true;
+ }
+
+ int ret = data_->video_->tryFormat(&format);
+ if (opened)
+ data_->video_->close();
+ if (ret)
+ return Invalid;
+ }
+
+ cfg.stride = format.planes[0].bpl;
+ cfg.frameSize = format.planes[0].size;
+
+ if (cfg.colorSpace != format.colorSpace) {
+ cfg.colorSpace = format.colorSpace;
+ status = Adjusted;
+ }
+
return status;
}
@@ -145,28 +205,18 @@ PipelineHandlerUVC::PipelineHandlerUVC(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerUVC::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerUVC::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
UVCCameraData *data = cameraData(camera);
- CameraConfiguration *config = new UVCCameraConfiguration();
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<UVCCameraConfiguration>(data);
if (roles.empty())
return config;
- std::map<V4L2PixelFormat, std::vector<SizeRange>> v4l2Formats =
- data->video_->formats();
- std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
- std::transform(v4l2Formats.begin(), v4l2Formats.end(),
- std::inserter(deviceFormats, deviceFormats.begin()),
- [&](const decltype(v4l2Formats)::value_type &format) {
- return decltype(deviceFormats)::value_type{
- data->video_->toPixelFormat(format.first),
- format.second
- };
- });
-
- StreamFormats formats(deviceFormats);
+ StreamFormats formats(data->formats_);
StreamConfiguration cfg(formats);
cfg.pixelFormat = formats.pixelformats().front();
@@ -186,7 +236,7 @@ int PipelineHandlerUVC::configure(Camera *camera, CameraConfiguration *config)
StreamConfiguration &cfg = config->at(0);
int ret;
- V4L2DeviceFormat format = {};
+ V4L2DeviceFormat format;
format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
@@ -212,7 +262,7 @@ int PipelineHandlerUVC::exportFrameBuffers(Camera *camera, Stream *stream,
return data->video_->exportBuffers(count, buffers);
}
-int PipelineHandlerUVC::start(Camera *camera)
+int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
{
UVCCameraData *data = cameraData(camera);
unsigned int count = data->stream_.configuration().bufferCount;
@@ -230,35 +280,101 @@ int PipelineHandlerUVC::start(Camera *camera)
return 0;
}
-void PipelineHandlerUVC::stop(Camera *camera)
+void PipelineHandlerUVC::stopDevice(Camera *camera)
{
UVCCameraData *data = cameraData(camera);
data->video_->streamOff();
data->video_->releaseBuffers();
}
-int PipelineHandlerUVC::processControls(UVCCameraData *data, Request *request)
+int PipelineHandlerUVC::processControl(ControlList *controls, unsigned int id,
+ const ControlValue &value)
{
- ControlList controls(data->video_->controls());
+ uint32_t cid;
+
+ if (id == controls::Brightness)
+ cid = V4L2_CID_BRIGHTNESS;
+ else if (id == controls::Contrast)
+ cid = V4L2_CID_CONTRAST;
+ else if (id == controls::Saturation)
+ cid = V4L2_CID_SATURATION;
+ else if (id == controls::AeEnable)
+ cid = V4L2_CID_EXPOSURE_AUTO;
+ else if (id == controls::ExposureTime)
+ cid = V4L2_CID_EXPOSURE_ABSOLUTE;
+ else if (id == controls::AnalogueGain)
+ cid = V4L2_CID_GAIN;
+ else
+ return -EINVAL;
+
+ const ControlInfo &v4l2Info = controls->infoMap()->at(cid);
+ int32_t min = v4l2Info.min().get<int32_t>();
+ int32_t def = v4l2Info.def().get<int32_t>();
+ int32_t max = v4l2Info.max().get<int32_t>();
+
+ /*
+ * See UVCCameraData::addControl() for explanations of the different
+ * value mappings.
+ */
+ switch (cid) {
+ case V4L2_CID_BRIGHTNESS: {
+ float scale = std::max(max - def, def - min);
+ float fvalue = value.get<float>() * scale + def;
+ controls->set(cid, static_cast<int32_t>(std::lround(fvalue)));
+ break;
+ }
+
+ case V4L2_CID_SATURATION: {
+ float scale = def - min;
+ float fvalue = value.get<float>() * scale + min;
+ controls->set(cid, static_cast<int32_t>(std::lround(fvalue)));
+ break;
+ }
- for (auto it : request->controls()) {
- unsigned int id = it.first;
- ControlValue &value = it.second;
-
- if (id == controls::Brightness) {
- controls.set(V4L2_CID_BRIGHTNESS, value);
- } else if (id == controls::Contrast) {
- controls.set(V4L2_CID_CONTRAST, value);
- } else if (id == controls::Saturation) {
- controls.set(V4L2_CID_SATURATION, value);
- } else if (id == controls::ManualExposure) {
- controls.set(V4L2_CID_EXPOSURE_AUTO, static_cast<int32_t>(1));
- controls.set(V4L2_CID_EXPOSURE_ABSOLUTE, value);
- } else if (id == controls::ManualGain) {
- controls.set(V4L2_CID_GAIN, value);
+ case V4L2_CID_EXPOSURE_AUTO: {
+ int32_t ivalue = value.get<bool>()
+ ? V4L2_EXPOSURE_APERTURE_PRIORITY
+ : V4L2_EXPOSURE_MANUAL;
+ controls->set(V4L2_CID_EXPOSURE_AUTO, ivalue);
+ break;
+ }
+
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ controls->set(cid, value.get<int32_t>() / 100);
+ break;
+
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_GAIN: {
+ float m = (4.0f - 1.0f) / (max - def);
+ float p = 1.0f - m * def;
+
+ if (m * min + p < 0.5f) {
+ m = (1.0f - 0.5f) / (def - min);
+ p = 1.0f - m * def;
}
+
+ float fvalue = (value.get<float>() - p) / m;
+ controls->set(cid, static_cast<int32_t>(std::lround(fvalue)));
+ break;
+ }
+
+ default: {
+ int32_t ivalue = value.get<int32_t>();
+ controls->set(cid, ivalue);
+ break;
+ }
}
+ return 0;
+}
+
+int PipelineHandlerUVC::processControls(UVCCameraData *data, Request *request)
+{
+ ControlList controls(data->video_->controls());
+
+ for (const auto &[id, value] : request->controls())
+ processControl(&controls, id, value);
+
for (const auto &ctrl : controls)
LOG(UVC, Debug)
<< "Setting control " << utils::hex(ctrl.first)
@@ -306,26 +422,15 @@ bool PipelineHandlerUVC::match(DeviceEnumerator *enumerator)
std::unique_ptr<UVCCameraData> data = std::make_unique<UVCCameraData>(this);
- /* Locate and initialise the camera data with the default video node. */
- const std::vector<MediaEntity *> &entities = media->entities();
- auto entity = std::find_if(entities.begin(), entities.end(),
- [](MediaEntity *entity) {
- return entity->flags() & MEDIA_ENT_FL_DEFAULT;
- });
- if (entity == entities.end()) {
- LOG(UVC, Error) << "Could not find a default video device";
+ if (data->init(media))
return false;
- }
-
- if (data->init(*entity))
- return false;
-
- dev_t devnum = makedev((*entity)->deviceMajor(), (*entity)->deviceMinor());
/* Create and register the camera. */
+ std::string id = data->id();
std::set<Stream *> streams{ &data->stream_ };
- std::shared_ptr<Camera> camera = Camera::create(this, media->model(), streams);
- registerCamera(std::move(camera), std::move(data), devnum);
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(data), id, streams);
+ registerCamera(std::move(camera));
/* Enable hot-unplug notifications. */
hotplugMediaDevice(media);
@@ -333,62 +438,327 @@ bool PipelineHandlerUVC::match(DeviceEnumerator *enumerator)
return true;
}
-int UVCCameraData::init(MediaEntity *entity)
+bool PipelineHandlerUVC::acquireDevice(Camera *camera)
+{
+ UVCCameraData *data = cameraData(camera);
+
+ MutexLocker locker(data->openLock_);
+
+ return data->video_->open() == 0;
+}
+
+void PipelineHandlerUVC::releaseDevice(Camera *camera)
+{
+ UVCCameraData *data = cameraData(camera);
+
+ MutexLocker locker(data->openLock_);
+ data->video_->close();
+}
+
+int UVCCameraData::init(MediaDevice *media)
{
int ret;
+ /* Locate and initialise the camera data with the default video node. */
+ const std::vector<MediaEntity *> &entities = media->entities();
+ auto entity = std::find_if(entities.begin(), entities.end(),
+ [](MediaEntity *e) {
+ return e->flags() & MEDIA_ENT_FL_DEFAULT;
+ });
+ if (entity == entities.end()) {
+ LOG(UVC, Error) << "Could not find a default video device";
+ return -ENODEV;
+ }
+
/* Create and open the video device. */
- video_ = new V4L2VideoDevice(entity);
+ video_ = std::make_unique<V4L2VideoDevice>(*entity);
ret = video_->open();
if (ret)
return ret;
- video_->bufferReady.connect(this, &UVCCameraData::bufferReady);
+ video_->bufferReady.connect(this, &UVCCameraData::imageBufferReady);
+
+ /* Generate the camera ID. */
+ if (!generateId()) {
+ LOG(UVC, Error) << "Failed to generate camera ID";
+ return -EINVAL;
+ }
+
+ /*
+ * Populate the map of supported formats, and infer the camera sensor
+ * resolution from the largest size it advertises.
+ */
+ Size resolution;
+ for (const auto &format : video_->formats()) {
+ PixelFormat pixelFormat = format.first.toPixelFormat();
+ if (!pixelFormat.isValid())
+ continue;
+
+ formats_[pixelFormat] = format.second;
+
+ const std::vector<SizeRange> &sizeRanges = format.second;
+ for (const SizeRange &sizeRange : sizeRanges) {
+ if (sizeRange.max > resolution)
+ resolution = sizeRange.max;
+ }
+ }
+
+ if (formats_.empty()) {
+ LOG(UVC, Error)
+ << "Camera " << id_ << " (" << media->model()
+ << ") doesn't expose any supported format";
+ return -EINVAL;
+ }
+
+ /* Populate the camera properties. */
+ properties_.set(properties::Model, utils::toAscii(media->model()));
+
+ /*
+ * Derive the location from the device removable attribute in sysfs.
+ * Non-removable devices are assumed to be front as we lack detailed
+ * location information, and removable device are considered external.
+ *
+ * The sysfs removable attribute is derived from the ACPI _UPC attribute
+ * if available, or from the USB hub descriptors otherwise. ACPI data
+ * may not be very reliable, and the USB hub descriptors may not be
+ * accurate on DT-based platforms. A heuristic may need to be
+ * implemented later if too many devices end up being miscategorized.
+ *
+ * \todo Find a way to tell front and back devices apart. This could
+ * come from the ACPI _PLD, but that may be even more unreliable than
+ * the _UPC.
+ */
+ properties::LocationEnum location = properties::CameraLocationExternal;
+ std::ifstream file(video_->devicePath() + "/../removable");
+ if (file.is_open()) {
+ std::string value;
+ std::getline(file, value);
+ file.close();
+
+ if (value == "fixed")
+ location = properties::CameraLocationFront;
+ }
+
+ properties_.set(properties::Location, location);
+
+ properties_.set(properties::PixelArraySize, resolution);
+ properties_.set(properties::PixelArrayActiveAreas, { Rectangle(resolution) });
/* Initialise the supported controls. */
- const ControlInfoMap &controls = video_->controls();
ControlInfoMap::Map ctrls;
- for (const auto &ctrl : controls) {
+ for (const auto &ctrl : video_->controls()) {
+ uint32_t cid = ctrl.first->id();
const ControlInfo &info = ctrl.second;
- const ControlId *id;
- switch (ctrl.first->id()) {
- case V4L2_CID_BRIGHTNESS:
- id = &controls::Brightness;
- break;
- case V4L2_CID_CONTRAST:
- id = &controls::Contrast;
- break;
- case V4L2_CID_SATURATION:
- id = &controls::Saturation;
- break;
- case V4L2_CID_EXPOSURE_ABSOLUTE:
- id = &controls::ManualExposure;
- break;
- case V4L2_CID_GAIN:
- id = &controls::ManualGain;
+ addControl(cid, info, &ctrls);
+ }
+
+ controlInfo_ = ControlInfoMap(std::move(ctrls), controls::controls);
+
+ /*
+ * Close to allow camera to go into runtime-suspend, video_ will be
+ * re-opened from acquireDevice() and validate().
+ */
+ video_->close();
+
+ return 0;
+}
+
+bool UVCCameraData::generateId()
+{
+ const std::string path = video_->devicePath();
+
+ /* Create a controller ID from first device described in firmware. */
+ std::string controllerId;
+ std::string searchPath = path;
+ while (true) {
+ std::string::size_type pos = searchPath.rfind('/');
+ if (pos <= 1) {
+ LOG(UVC, Error) << "Can not find controller ID";
+ return false;
+ }
+
+ searchPath = searchPath.substr(0, pos);
+
+ controllerId = sysfs::firmwareNodePath(searchPath);
+ if (!controllerId.empty())
break;
- default:
- continue;
+ }
+
+ /*
+ * Create a USB ID from the device path which has the known format:
+ *
+ * path = bus, "-", ports, ":", config, ".", interface ;
+ * bus = number ;
+ * ports = port, [ ".", ports ] ;
+ * port = number ;
+ * config = number ;
+ * interface = number ;
+ *
+ * Example: 3-2.4:1.0
+ *
+ * The bus is not guaranteed to be stable and needs to be stripped from
+ * the USB ID. The final USB ID is built up of the ports, config and
+ * interface properties.
+ *
+ * Example 2.4:1.0.
+ */
+ std::string usbId = utils::basename(path.c_str());
+ usbId = usbId.substr(usbId.find('-') + 1);
+
+ /* Creata a device ID from the USB devices vendor and product ID. */
+ std::string deviceId;
+ for (const char *name : { "idVendor", "idProduct" }) {
+ std::ifstream file(path + "/../" + name);
+
+ if (!file.is_open())
+ return false;
+
+ std::string value;
+ std::getline(file, value);
+ file.close();
+
+ if (!deviceId.empty())
+ deviceId += ":";
+
+ deviceId += value;
+ }
+
+ id_ = controllerId + "-" + usbId + "-" + deviceId;
+ return true;
+}
+
+void UVCCameraData::addControl(uint32_t cid, const ControlInfo &v4l2Info,
+ ControlInfoMap::Map *ctrls)
+{
+ const ControlId *id;
+ ControlInfo info;
+
+ /* Map the control ID. */
+ switch (cid) {
+ case V4L2_CID_BRIGHTNESS:
+ id = &controls::Brightness;
+ break;
+ case V4L2_CID_CONTRAST:
+ id = &controls::Contrast;
+ break;
+ case V4L2_CID_SATURATION:
+ id = &controls::Saturation;
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ id = &controls::AeEnable;
+ break;
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ id = &controls::ExposureTime;
+ break;
+ case V4L2_CID_GAIN:
+ id = &controls::AnalogueGain;
+ break;
+ default:
+ return;
+ }
+
+ /* Map the control info. */
+ int32_t min = v4l2Info.min().get<int32_t>();
+ int32_t max = v4l2Info.max().get<int32_t>();
+ int32_t def = v4l2Info.def().get<int32_t>();
+
+ switch (cid) {
+ case V4L2_CID_BRIGHTNESS: {
+ /*
+ * The Brightness control is a float, with 0.0 mapped to the
+ * default value. The control range is [-1.0, 1.0], but the V4L2
+ * default may not be in the middle of the V4L2 range.
+ * Accommodate this by restricting the range of the libcamera
+ * control, but always within the maximum limits.
+ */
+ float scale = std::max(max - def, def - min);
+
+ info = ControlInfo{
+ { static_cast<float>(min - def) / scale },
+ { static_cast<float>(max - def) / scale },
+ { 0.0f }
+ };
+ break;
+ }
+
+ case V4L2_CID_SATURATION:
+ /*
+ * The Saturation control is a float, with 0.0 mapped to the
+ * minimum value (corresponding to a fully desaturated image)
+ * and 1.0 mapped to the default value. Calculate the maximum
+ * value accordingly.
+ */
+ info = ControlInfo{
+ { 0.0f },
+ { static_cast<float>(max - min) / (def - min) },
+ { 1.0f }
+ };
+ break;
+
+ case V4L2_CID_EXPOSURE_AUTO:
+ info = ControlInfo{ false, true, true };
+ break;
+
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ /*
+ * ExposureTime is in units of 1 µs, and UVC expects
+ * V4L2_CID_EXPOSURE_ABSOLUTE in units of 100 µs.
+ */
+ info = ControlInfo{
+ { min * 100 },
+ { max * 100 },
+ { def * 100 }
+ };
+ break;
+
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_GAIN: {
+ /*
+ * The Contrast and AnalogueGain controls are floats, with 1.0
+ * mapped to the default value. UVC doesn't specify units, and
+ * cameras have been seen to expose very different ranges for
+ * the controls. Arbitrarily assume that the minimum and
+ * maximum values are respectively no lower than 0.5 and no
+ * higher than 4.0.
+ */
+ float m = (4.0f - 1.0f) / (max - def);
+ float p = 1.0f - m * def;
+
+ if (m * min + p < 0.5f) {
+ m = (1.0f - 0.5f) / (def - min);
+ p = 1.0f - m * def;
}
- ctrls.emplace(id, info);
+ info = ControlInfo{
+ { m * min + p },
+ { m * max + p },
+ { 1.0f }
+ };
+ break;
}
- controlInfo_ = std::move(ctrls);
+ default:
+ info = v4l2Info;
+ break;
+ }
- return 0;
+ ctrls->emplace(id, info);
}
-void UVCCameraData::bufferReady(FrameBuffer *buffer)
+void UVCCameraData::imageBufferReady(FrameBuffer *buffer)
{
Request *request = buffer->request();
- pipe_->completeBuffer(camera_, request, buffer);
- pipe_->completeRequest(camera_, request);
+ /* \todo Use the UVC metadata to calculate a more precise timestamp */
+ request->metadata().set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
+
+ pipe()->completeBuffer(request, buffer);
+ pipe()->completeRequest(request);
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerUVC);
+REGISTER_PIPELINE_HANDLER(PipelineHandlerUVC, "uvcvideo")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/vimc/meson.build b/src/libcamera/pipeline/vimc/meson.build
index 615ecd20..868e2546 100644
--- a/src/libcamera/pipeline/vimc/meson.build
+++ b/src/libcamera/pipeline/vimc/meson.build
@@ -1,3 +1,5 @@
-libcamera_sources += files([
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
'vimc.cpp',
])
diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
index b04a9726..07273bd2 100644
--- a/src/libcamera/pipeline/vimc/vimc.cpp
+++ b/src/libcamera/pipeline/vimc/vimc.cpp
@@ -2,74 +2,84 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * vimc.cpp - Pipeline handler for the vimc device
+ * Pipeline handler for the vimc device
*/
#include <algorithm>
-#include <array>
+#include <cmath>
#include <iomanip>
+#include <map>
#include <tuple>
#include <linux/media-bus-format.h>
+#include <linux/version.h>
+
+#include <libcamera/base/flags.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
-#include <ipa/ipa_interface.h>
-#include <ipa/ipa_module_info.h>
#include <libcamera/camera.h>
#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
+#include <libcamera/formats.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
-#include "camera_sensor.h"
-#include "device_enumerator.h"
-#include "ipa_manager.h"
-#include "log.h"
-#include "media_device.h"
-#include "pipeline_handler.h"
-#include "utils.h"
-#include "v4l2_controls.h"
-#include "v4l2_subdevice.h"
-#include "v4l2_videodevice.h"
+#include <libcamera/ipa/ipa_interface.h>
+#include <libcamera/ipa/ipa_module_info.h>
+#include <libcamera/ipa/vimc_ipa_interface.h>
+#include <libcamera/ipa/vimc_ipa_proxy.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(VIMC)
-class VimcCameraData : public CameraData
+class VimcCameraData : public Camera::Private
{
public:
- VimcCameraData(PipelineHandler *pipe)
- : CameraData(pipe), sensor_(nullptr), debayer_(nullptr),
- scaler_(nullptr), video_(nullptr), raw_(nullptr)
- {
- }
-
- ~VimcCameraData()
+ VimcCameraData(PipelineHandler *pipe, MediaDevice *media)
+ : Camera::Private(pipe), media_(media)
{
- delete sensor_;
- delete debayer_;
- delete scaler_;
- delete video_;
- delete raw_;
}
- int init(MediaDevice *media);
- void bufferReady(FrameBuffer *buffer);
-
- CameraSensor *sensor_;
- V4L2Subdevice *debayer_;
- V4L2Subdevice *scaler_;
- V4L2VideoDevice *video_;
- V4L2VideoDevice *raw_;
+ int init();
+ int allocateMockIPABuffers();
+ void imageBufferReady(FrameBuffer *buffer);
+ void paramsComputed(unsigned int id, const Flags<ipa::vimc::TestFlag> flags);
+
+ MediaDevice *media_;
+ std::unique_ptr<CameraSensor> sensor_;
+ std::unique_ptr<V4L2Subdevice> debayer_;
+ std::unique_ptr<V4L2Subdevice> scaler_;
+ std::unique_ptr<V4L2VideoDevice> video_;
+ std::unique_ptr<V4L2VideoDevice> raw_;
Stream stream_;
+
+ std::unique_ptr<ipa::vimc::IPAProxyVimc> ipa_;
+ std::vector<std::unique_ptr<FrameBuffer>> mockIPABufs_;
};
class VimcCameraConfiguration : public CameraConfiguration
{
public:
- VimcCameraConfiguration();
+ VimcCameraConfiguration(VimcCameraData *data);
Status validate() override;
+
+private:
+ VimcCameraData *data_;
};
class PipelineHandlerVimc : public PipelineHandler
@@ -77,15 +87,15 @@ class PipelineHandlerVimc : public PipelineHandler
public:
PipelineHandlerVimc(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
- int start(Camera *camera) override;
- void stop(Camera *camera) override;
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
@@ -94,25 +104,26 @@ public:
private:
int processControls(VimcCameraData *data, Request *request);
- VimcCameraData *cameraData(const Camera *camera)
+ VimcCameraData *cameraData(Camera *camera)
{
- return static_cast<VimcCameraData *>(
- PipelineHandler::cameraData(camera));
+ return static_cast<VimcCameraData *>(camera->_d());
}
};
namespace {
-static const std::array<PixelFormat, 3> pixelformats{
- PixelFormat(DRM_FORMAT_RGB888),
- PixelFormat(DRM_FORMAT_BGR888),
- PixelFormat(DRM_FORMAT_BGRA8888),
+static const std::map<PixelFormat, uint32_t> pixelformats{
+ { formats::RGB888, MEDIA_BUS_FMT_BGR888_1X24 },
+ { formats::BGR888, MEDIA_BUS_FMT_RGB888_1X24 },
};
+static constexpr Size kMinSize{ 16, 16 };
+static constexpr Size kMaxSize{ 4096, 2160 };
+
} /* namespace */
-VimcCameraConfiguration::VimcCameraConfiguration()
- : CameraConfiguration()
+VimcCameraConfiguration::VimcCameraConfiguration(VimcCameraData *data)
+ : CameraConfiguration(), data_(data)
{
}
@@ -123,6 +134,11 @@ CameraConfiguration::Status VimcCameraConfiguration::validate()
if (config_.empty())
return Invalid;
+ if (orientation != Orientation::Rotate0) {
+ orientation = Orientation::Rotate0;
+ status = Adjusted;
+ }
+
/* Cap the number of entries to the available streams. */
if (config_.size() > 1) {
config_.resize(1);
@@ -132,30 +148,51 @@ CameraConfiguration::Status VimcCameraConfiguration::validate()
StreamConfiguration &cfg = config_[0];
/* Adjust the pixel format. */
- if (std::find(pixelformats.begin(), pixelformats.end(), cfg.pixelFormat) ==
- pixelformats.end()) {
- LOG(VIMC, Debug) << "Adjusting format to RGB24";
- cfg.pixelFormat = PixelFormat(DRM_FORMAT_BGR888);
+ const std::vector<libcamera::PixelFormat> formats = cfg.formats().pixelformats();
+ if (std::find(formats.begin(), formats.end(), cfg.pixelFormat) == formats.end()) {
+ LOG(VIMC, Debug) << "Adjusting format to BGR888";
+ cfg.pixelFormat = formats::BGR888;
status = Adjusted;
}
/* Clamp the size based on the device limits. */
const Size size = cfg.size;
- /* The scaler hardcodes a x3 scale-up ratio. */
- cfg.size.width = std::max(48U, std::min(4096U, cfg.size.width));
- cfg.size.height = std::max(48U, std::min(2160U, cfg.size.height));
- cfg.size.width -= cfg.size.width % 3;
- cfg.size.height -= cfg.size.height % 3;
+ /*
+ * The sensor output size is aligned to two pixels in both directions.
+ * Additionally, prior to v5.16, the scaler hardcodes a x3 scale-up
+ * ratio, requiring the output width and height to be multiples of 6.
+ */
+ Size minSize{ kMinSize };
+ unsigned int alignment = 2;
+
+ if (data_->media_->version() < KERNEL_VERSION(5, 16, 0)) {
+ minSize *= 3;
+ alignment *= 3;
+ }
+
+ cfg.size.expandTo(minSize).boundTo(kMaxSize)
+ .alignDownTo(alignment, alignment);
if (cfg.size != size) {
LOG(VIMC, Debug)
- << "Adjusting size to " << cfg.size.toString();
+ << "Adjusting size to " << cfg.size;
status = Adjusted;
}
cfg.bufferCount = 4;
+ V4L2DeviceFormat format;
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
+ format.size = cfg.size;
+
+ int ret = data_->video_->tryFormat(&format);
+ if (ret)
+ return Invalid;
+
+ cfg.stride = format.planes[0].bpl;
+ cfg.frameSize = format.planes[0].size;
+
return status;
}
@@ -164,27 +201,45 @@ PipelineHandlerVimc::PipelineHandlerVimc(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerVimc::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerVimc::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
- CameraConfiguration *config = new VimcCameraConfiguration();
+ VimcCameraData *data = cameraData(camera);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<VimcCameraConfiguration>(data);
if (roles.empty())
return config;
std::map<PixelFormat, std::vector<SizeRange>> formats;
- for (PixelFormat pixelformat : pixelformats) {
- /* The scaler hardcodes a x3 scale-up ratio. */
- std::vector<SizeRange> sizes{
- SizeRange{ { 48, 48 }, { 4096, 2160 } }
- };
- formats[pixelformat] = sizes;
+ for (const auto &pixelformat : pixelformats) {
+ /*
+ * Kernels prior to v5.7 incorrectly report support for RGB888,
+ * but it isn't functional within the pipeline.
+ */
+ if (data->media_->version() < KERNEL_VERSION(5, 7, 0)) {
+ if (pixelformat.first != formats::BGR888) {
+ LOG(VIMC, Info)
+ << "Skipping unsupported pixel format "
+ << pixelformat.first;
+ continue;
+ }
+ }
+
+ /* Prior to v5.16, the scaler hardcodes a x3 scale-up ratio. */
+ Size minSize{ kMinSize };
+ if (data->media_->version() < KERNEL_VERSION(5, 16, 0))
+ minSize *= 3;
+
+ std::vector<SizeRange> sizes{ { minSize, kMaxSize } };
+ formats[pixelformat.first] = sizes;
}
StreamConfiguration cfg(formats);
- cfg.pixelFormat = PixelFormat(DRM_FORMAT_BGR888);
+ cfg.pixelFormat = formats::BGR888;
cfg.size = { 1920, 1080 };
cfg.bufferCount = 4;
@@ -201,10 +256,18 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
StreamConfiguration &cfg = config->at(0);
int ret;
- /* The scaler hardcodes a x3 scale-up ratio. */
+ /*
+ * Prior to v5.16, the scaler hardcodes a x3 scale-up ratio. For newer
+ * kernels, use a sensor resolution of 1920x1080 and let the scaler
+ * produce the requested stream size.
+ */
+ Size sensorSize{ 1920, 1080 };
+ if (data->media_->version() < KERNEL_VERSION(5, 16, 0))
+ sensorSize = { cfg.size.width / 3, cfg.size.height / 3 };
+
V4L2SubdeviceFormat subformat = {};
- subformat.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8;
- subformat.size = { cfg.size.width / 3, cfg.size.height / 3 };
+ subformat.code = MEDIA_BUS_FMT_SGRBG8_1X8;
+ subformat.size = sensorSize;
ret = data->sensor_->setFormat(&subformat);
if (ret)
@@ -214,7 +277,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
if (ret)
return ret;
- subformat.mbus_code = MEDIA_BUS_FMT_RGB888_1X24;
+ subformat.code = pixelformats.find(cfg.pixelFormat)->second;
ret = data->debayer_->setFormat(1, &subformat);
if (ret)
return ret;
@@ -223,12 +286,19 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
if (ret)
return ret;
+ if (data->media_->version() >= KERNEL_VERSION(5, 6, 0)) {
+ Rectangle crop{ 0, 0, subformat.size };
+ ret = data->scaler_->setSelection(0, V4L2_SEL_TGT_CROP, &crop);
+ if (ret)
+ return ret;
+ }
+
subformat.size = cfg.size;
ret = data->scaler_->setFormat(1, &subformat);
if (ret)
return ret;
- V4L2DeviceFormat format = {};
+ V4L2DeviceFormat format;
format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
@@ -245,7 +315,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
* vimc driver will fail pipeline validation.
*/
format.fourcc = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8);
- format.size = { cfg.size.width / 3, cfg.size.height / 3 };
+ format.size = sensorSize;
ret = data->raw_->setFormat(&format);
if (ret)
@@ -253,6 +323,22 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
cfg.setStream(&data->stream_);
+ if (data->ipa_) {
+ /* Inform IPA of stream configuration and sensor controls. */
+ std::map<unsigned int, IPAStream> streamConfig;
+ streamConfig.emplace(std::piecewise_construct,
+ std::forward_as_tuple(0),
+ std::forward_as_tuple(cfg.pixelFormat, cfg.size));
+
+ std::map<unsigned int, ControlInfoMap> entityControls;
+ entityControls.emplace(0, data->sensor_->controls());
+
+ IPACameraSensorInfo sensorInfo;
+ data->sensor_->sensorInfo(&sensorInfo);
+
+ data->ipa_->configure(sensorInfo, streamConfig, entityControls);
+ }
+
return 0;
}
@@ -265,7 +351,7 @@ int PipelineHandlerVimc::exportFrameBuffers(Camera *camera, Stream *stream,
return data->video_->exportBuffers(count, buffers);
}
-int PipelineHandlerVimc::start(Camera *camera)
+int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
{
VimcCameraData *data = cameraData(camera);
unsigned int count = data->stream_.configuration().bufferCount;
@@ -274,8 +360,23 @@ int PipelineHandlerVimc::start(Camera *camera)
if (ret < 0)
return ret;
+ /* Map the mock IPA buffers to VIMC IPA to exercise IPC code paths. */
+ std::vector<IPABuffer> ipaBuffers;
+ for (auto [i, buffer] : utils::enumerate(data->mockIPABufs_)) {
+ buffer->setCookie(i + 1);
+ ipaBuffers.emplace_back(buffer->cookie(), buffer->planes());
+ }
+ data->ipa_->mapBuffers(ipaBuffers);
+
+ ret = data->ipa_->start();
+ if (ret) {
+ data->video_->releaseBuffers();
+ return ret;
+ }
+
ret = data->video_->streamOn();
if (ret < 0) {
+ data->ipa_->stop();
data->video_->releaseBuffers();
return ret;
}
@@ -283,10 +384,17 @@ int PipelineHandlerVimc::start(Camera *camera)
return 0;
}
-void PipelineHandlerVimc::stop(Camera *camera)
+void PipelineHandlerVimc::stopDevice(Camera *camera)
{
VimcCameraData *data = cameraData(camera);
data->video_->streamOff();
+
+ std::vector<unsigned int> ids;
+ for (const std::unique_ptr<FrameBuffer> &buffer : data->mockIPABufs_)
+ ids.push_back(buffer->cookie());
+ data->ipa_->unmapBuffers(ids);
+ data->ipa_->stop();
+
data->video_->releaseBuffers();
}
@@ -294,16 +402,26 @@ int PipelineHandlerVimc::processControls(VimcCameraData *data, Request *request)
{
ControlList controls(data->sensor_->controls());
- for (auto it : request->controls()) {
+ for (const auto &it : request->controls()) {
unsigned int id = it.first;
- ControlValue &value = it.second;
-
- if (id == controls::Brightness)
- controls.set(V4L2_CID_BRIGHTNESS, value);
- else if (id == controls::Contrast)
- controls.set(V4L2_CID_CONTRAST, value);
- else if (id == controls::Saturation)
- controls.set(V4L2_CID_SATURATION, value);
+ unsigned int offset;
+ uint32_t cid;
+
+ if (id == controls::Brightness) {
+ cid = V4L2_CID_BRIGHTNESS;
+ offset = 128;
+ } else if (id == controls::Contrast) {
+ cid = V4L2_CID_CONTRAST;
+ offset = 0;
+ } else if (id == controls::Saturation) {
+ cid = V4L2_CID_SATURATION;
+ offset = 0;
+ } else {
+ continue;
+ }
+
+ int32_t value = std::lround(it.second.get<float>() * 128 + offset);
+ controls.set(cid, std::clamp(value, 0, 255));
}
for (const auto &ctrl : controls)
@@ -339,6 +457,8 @@ int PipelineHandlerVimc::queueRequestDevice(Camera *camera, Request *request)
if (ret < 0)
return ret;
+ data->ipa_->queueRequest(request->sequence(), request->controls());
+
return 0;
}
@@ -360,36 +480,50 @@ bool PipelineHandlerVimc::match(DeviceEnumerator *enumerator)
if (!media)
return false;
- std::unique_ptr<VimcCameraData> data = std::make_unique<VimcCameraData>(this);
-
- data->ipa_ = IPAManager::instance()->createIPA(this, 0, 0);
- if (data->ipa_ == nullptr)
- LOG(VIMC, Warning) << "no matching IPA found";
- else
- data->ipa_->init();
+ std::unique_ptr<VimcCameraData> data = std::make_unique<VimcCameraData>(this, media);
/* Locate and open the capture video node. */
- if (data->init(media))
+ if (data->init())
return false;
+ data->ipa_ = IPAManager::createIPA<ipa::vimc::IPAProxyVimc>(this, 0, 0);
+ if (!data->ipa_) {
+ LOG(VIMC, Error) << "no matching IPA found";
+ return false;
+ }
+
+ data->ipa_->paramsComputed.connect(data.get(), &VimcCameraData::paramsComputed);
+
+ std::string conf = data->ipa_->configurationFile("vimc.conf");
+ Flags<ipa::vimc::TestFlag> inFlags = ipa::vimc::TestFlag::Flag2;
+ Flags<ipa::vimc::TestFlag> outFlags;
+ data->ipa_->init(IPASettings{ conf, data->sensor_->model() },
+ ipa::vimc::IPAOperationInit, inFlags, &outFlags);
+
+ LOG(VIMC, Debug)
+ << "Flag 1 was "
+ << (outFlags & ipa::vimc::TestFlag::Flag1 ? "" : "not ")
+ << "set";
+
/* Create and register the camera. */
std::set<Stream *> streams{ &data->stream_ };
- std::shared_ptr<Camera> camera = Camera::create(this, "VIMC Sensor B",
- streams);
- registerCamera(std::move(camera), std::move(data));
+ const std::string &id = data->sensor_->id();
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(data), id, streams);
+ registerCamera(std::move(camera));
return true;
}
-int VimcCameraData::init(MediaDevice *media)
+int VimcCameraData::init()
{
int ret;
- ret = media->disableLinks();
+ ret = media_->disableLinks();
if (ret < 0)
return ret;
- MediaLink *link = media->link("Debayer B", 1, "Scaler", 0);
+ MediaLink *link = media_->link("Debayer B", 1, "Scaler", 0);
if (!link)
return -ENODEV;
@@ -398,46 +532,54 @@ int VimcCameraData::init(MediaDevice *media)
return ret;
/* Create and open the camera sensor, debayer, scaler and video device. */
- sensor_ = new CameraSensor(media->getEntityByName("Sensor B"));
- ret = sensor_->init();
- if (ret)
- return ret;
+ sensor_ = CameraSensorFactoryBase::create(media_->getEntityByName("Sensor B"));
+ if (!sensor_)
+ return -ENODEV;
- debayer_ = new V4L2Subdevice(media->getEntityByName("Debayer B"));
+ debayer_ = V4L2Subdevice::fromEntityName(media_, "Debayer B");
if (debayer_->open())
return -ENODEV;
- scaler_ = new V4L2Subdevice(media->getEntityByName("Scaler"));
+ scaler_ = V4L2Subdevice::fromEntityName(media_, "Scaler");
if (scaler_->open())
return -ENODEV;
- video_ = new V4L2VideoDevice(media->getEntityByName("RGB/YUV Capture"));
+ video_ = V4L2VideoDevice::fromEntityName(media_, "RGB/YUV Capture");
if (video_->open())
return -ENODEV;
- video_->bufferReady.connect(this, &VimcCameraData::bufferReady);
+ video_->bufferReady.connect(this, &VimcCameraData::imageBufferReady);
- raw_ = new V4L2VideoDevice(media->getEntityByName("Raw Capture 1"));
+ raw_ = V4L2VideoDevice::fromEntityName(media_, "Raw Capture 1");
if (raw_->open())
return -ENODEV;
+ ret = allocateMockIPABuffers();
+ if (ret < 0) {
+ LOG(VIMC, Warning) << "Cannot allocate mock IPA buffers";
+ return ret;
+ }
+
/* Initialise the supported controls. */
const ControlInfoMap &controls = sensor_->controls();
ControlInfoMap::Map ctrls;
for (const auto &ctrl : controls) {
- const ControlInfo &info = ctrl.second;
const ControlId *id;
+ ControlInfo info;
switch (ctrl.first->id()) {
case V4L2_CID_BRIGHTNESS:
id = &controls::Brightness;
+ info = ControlInfo{ { -1.0f }, { 1.0f }, { 0.0f } };
break;
case V4L2_CID_CONTRAST:
id = &controls::Contrast;
+ info = ControlInfo{ { 0.0f }, { 2.0f }, { 1.0f } };
break;
case V4L2_CID_SATURATION:
id = &controls::Saturation;
+ info = ControlInfo{ { 0.0f }, { 2.0f }, { 1.0f } };
break;
default:
continue;
@@ -446,7 +588,7 @@ int VimcCameraData::init(MediaDevice *media)
ctrls.emplace(id, info);
}
- controlInfo_ = std::move(ctrls);
+ controlInfo_ = ControlInfoMap(std::move(ctrls), controls::controls);
/* Initialize the camera properties. */
properties_ = sensor_->properties();
@@ -454,14 +596,54 @@ int VimcCameraData::init(MediaDevice *media)
return 0;
}
-void VimcCameraData::bufferReady(FrameBuffer *buffer)
+void VimcCameraData::imageBufferReady(FrameBuffer *buffer)
{
+ PipelineHandlerVimc *pipe =
+ static_cast<PipelineHandlerVimc *>(this->pipe());
Request *request = buffer->request();
- pipe_->completeBuffer(camera_, request, buffer);
- pipe_->completeRequest(camera_, request);
+ /* If the buffer is cancelled force a complete of the whole request. */
+ if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
+ for (auto it : request->buffers()) {
+ FrameBuffer *b = it.second;
+ b->_d()->cancel();
+ pipe->completeBuffer(request, b);
+ }
+
+ pipe->completeRequest(request);
+ return;
+ }
+
+ /* Record the sensor's timestamp in the request metadata. */
+ request->metadata().set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
+
+ pipe->completeBuffer(request, buffer);
+ pipe->completeRequest(request);
+
+ ipa_->computeParams(request->sequence(), mockIPABufs_[0]->cookie());
+}
+
+int VimcCameraData::allocateMockIPABuffers()
+{
+ constexpr unsigned int kBufCount = 2;
+
+ V4L2DeviceFormat format;
+ format.fourcc = video_->toV4L2PixelFormat(formats::BGR888);
+ format.size = Size (160, 120);
+
+ int ret = video_->setFormat(&format);
+ if (ret < 0)
+ return ret;
+
+ return video_->exportBuffers(kBufCount, &mockIPABufs_);
+}
+
+void VimcCameraData::paramsComputed([[maybe_unused]] unsigned int id,
+ [[maybe_unused]] const Flags<ipa::vimc::TestFlag> flags)
+{
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerVimc);
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVimc, "vimc")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/README.md b/src/libcamera/pipeline/virtual/README.md
new file mode 100644
index 00000000..a9f39c15
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/README.md
@@ -0,0 +1,65 @@
+# Virtual Pipeline Handler
+
+Virtual pipeline handler emulates fake external camera(s) for testing.
+
+## Parse config file and register cameras
+
+- A sample config file is located at `src/libcamera/pipeline/virtual/data/virtual.yaml`.
+- If libcamera is installed, the config file should be installed at
+ `share/libcamera/pipeline/virtual/virtual.yaml`.
+
+### Config File Format
+The config file contains the information about cameras' properties to register.
+The config file should be a yaml file with dictionary of the cameraIds
+associated with their properties as top level. The default value will be applied
+when any property is empty.
+
+Each camera block is a dictionary, containing the following keys:
+- `supported_formats` (list of `VirtualCameraData::Resolution`, optional):
+ List of supported resolution and frame rates of the emulated camera
+ - `width` (`unsigned int`, default=1920): Width of the window resolution.
+ This needs to be even.
+ - `height` (`unsigned int`, default=1080): Height of the window resolution.
+ - `frame_rates` (list of `int`, default=`[30,60]` ): Range of the frame
+ rate (per second). If the list contains one value, it's the lower bound
+ and the upper bound. If the list contains two values, the first is the
+ lower bound and the second is the upper bound. No other number of values
+ is allowed.
+- `test_pattern` (`string`): Which test pattern to use as frames. The options
+ are "bars", "lines". Cannot be set with `frames`.
+ - The test patterns are "bars" which means color bars, and "lines" which means
+ diagonal lines.
+- `frames` (dictionary):
+ - `path` (`string`): Path to an image, or path to a directory of a series of
+ images. Cannot be set with `test_pattern`.
+ - The path to an image has ".jpg" extension.
+ - The path to a directory ends with "/". The name of the images in the
+ directory are "{n}.jpg" with {n} is the sequence of images starting with 0.
+- `location` (`string`, default="front"): The location of the camera. Support
+ "CameraLocationFront", "CameraLocationBack", and "CameraLocationExternal".
+- `model` (`string`, default="Unknown"): The model name of the camera.
+
+Check `data/virtual.yaml` as the sample config file.
+
+### Implementation
+
+`Parser` class provides methods to parse the config file to register cameras
+in Virtual Pipeline Handler. `parseConfigFile()` is exposed to use in
+Virtual Pipeline Handler.
+
+This is the procedure of the Parser class:
+1. `parseConfigFile()` parses the config file to `YamlObject` using `YamlParser::parse()`.
+ - Parse the top level of config file which are the camera ids and look into
+ each camera properties.
+2. For each camera, `parseCameraConfigData()` returns a camera with the configuration.
+ - The methods in the next step fill the data with the pointer to the Camera object.
+ - If the config file contains invalid configuration, this method returns
+ nullptr. The camera will be skipped.
+3. Parse each property and register the data.
+ - `parseSupportedFormats()`: Parses `supported_formats` in the config, which
+ contains resolutions and frame rates.
+ - `parseFrameGenerator()`: Parses `test_pattern` or `frames` in the config.
+ - `parseLocation()`: Parses `location` in the config.
+ - `parseModel()`: Parses `model` in the config.
+4. Back to `parseConfigFile()` and append the camera configuration.
+5. Returns a list of camera configurations.
diff --git a/src/libcamera/pipeline/virtual/config_parser.cpp b/src/libcamera/pipeline/virtual/config_parser.cpp
new file mode 100644
index 00000000..0cbfe39b
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/config_parser.cpp
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Virtual cameras helper to parse config file
+ */
+
+#include "config_parser.h"
+
+#include <string.h>
+#include <utility>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "virtual.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Virtual)
+
+std::vector<std::unique_ptr<VirtualCameraData>>
+ConfigParser::parseConfigFile(File &file, PipelineHandler *pipe)
+{
+ std::vector<std::unique_ptr<VirtualCameraData>> configurations;
+
+ std::unique_ptr<YamlObject> cameras = YamlParser::parse(file);
+ if (!cameras) {
+ LOG(Virtual, Error) << "Failed to pass config file.";
+ return configurations;
+ }
+
+ if (!cameras->isDictionary()) {
+ LOG(Virtual, Error) << "Config file is not a dictionary at the top level.";
+ return configurations;
+ }
+
+ /* Look into the configuration of each camera */
+ for (const auto &[cameraId, cameraConfigData] : cameras->asDict()) {
+ std::unique_ptr<VirtualCameraData> data =
+ parseCameraConfigData(cameraConfigData, pipe);
+ /* Parse configData to data */
+ if (!data) {
+ /* Skip the camera if it has invalid config */
+ LOG(Virtual, Error) << "Failed to parse config of the camera: "
+ << cameraId;
+ continue;
+ }
+
+ data->config_.id = cameraId;
+ ControlInfoMap::Map controls;
+ /* todo: Check which resolution's frame rate to be reported */
+ controls[&controls::FrameDurationLimits] =
+ ControlInfo(1000000 / data->config_.resolutions[0].frameRates[1],
+ 1000000 / data->config_.resolutions[0].frameRates[0]);
+
+ std::vector<ControlValue> supportedFaceDetectModes{
+ static_cast<int32_t>(controls::draft::FaceDetectModeOff),
+ };
+ controls[&controls::draft::FaceDetectMode] = ControlInfo(supportedFaceDetectModes);
+
+ data->controlInfo_ = ControlInfoMap(std::move(controls), controls::controls);
+ configurations.push_back(std::move(data));
+ }
+
+ return configurations;
+}
+
+std::unique_ptr<VirtualCameraData>
+ConfigParser::parseCameraConfigData(const YamlObject &cameraConfigData,
+ PipelineHandler *pipe)
+{
+ std::vector<VirtualCameraData::Resolution> resolutions;
+ if (parseSupportedFormats(cameraConfigData, &resolutions))
+ return nullptr;
+
+ std::unique_ptr<VirtualCameraData> data =
+ std::make_unique<VirtualCameraData>(pipe, resolutions);
+
+ if (parseFrameGenerator(cameraConfigData, data.get()))
+ return nullptr;
+
+ if (parseLocation(cameraConfigData, data.get()))
+ return nullptr;
+
+ if (parseModel(cameraConfigData, data.get()))
+ return nullptr;
+
+ return data;
+}
+
+int ConfigParser::parseSupportedFormats(const YamlObject &cameraConfigData,
+ std::vector<VirtualCameraData::Resolution> *resolutions)
+{
+ if (cameraConfigData.contains("supported_formats")) {
+ const YamlObject &supportedResolutions = cameraConfigData["supported_formats"];
+
+ for (const YamlObject &supportedResolution : supportedResolutions.asList()) {
+ unsigned int width = supportedResolution["width"].get<unsigned int>(1920);
+ unsigned int height = supportedResolution["height"].get<unsigned int>(1080);
+ if (width == 0 || height == 0) {
+ LOG(Virtual, Error) << "Invalid width or/and height";
+ return -EINVAL;
+ }
+ if (width % 2 != 0) {
+ LOG(Virtual, Error) << "Invalid width: width needs to be even";
+ return -EINVAL;
+ }
+
+ std::vector<int64_t> frameRates;
+ if (supportedResolution.contains("frame_rates")) {
+ auto frameRatesList =
+ supportedResolution["frame_rates"].getList<int>();
+ if (!frameRatesList || (frameRatesList->size() != 1 &&
+ frameRatesList->size() != 2)) {
+ LOG(Virtual, Error) << "Invalid frame_rates: either one or two values";
+ return -EINVAL;
+ }
+
+ if (frameRatesList->size() == 2 &&
+ frameRatesList.value()[0] > frameRatesList.value()[1]) {
+ LOG(Virtual, Error) << "frame_rates's first value(lower bound)"
+ << " is higher than the second value(upper bound)";
+ return -EINVAL;
+ }
+ /*
+ * Push the min and max framerates. A
+ * single rate is duplicated.
+ */
+ frameRates.push_back(frameRatesList.value().front());
+ frameRates.push_back(frameRatesList.value().back());
+ } else {
+ frameRates.push_back(30);
+ frameRates.push_back(60);
+ }
+
+ resolutions->emplace_back(
+ VirtualCameraData::Resolution{ Size{ width, height },
+ frameRates });
+ }
+ } else {
+ resolutions->emplace_back(
+ VirtualCameraData::Resolution{ Size{ 1920, 1080 },
+ { 30, 60 } });
+ }
+
+ return 0;
+}
+
+int ConfigParser::parseFrameGenerator(const YamlObject &cameraConfigData, VirtualCameraData *data)
+{
+ const std::string testPatternKey = "test_pattern";
+ const std::string framesKey = "frames";
+ if (cameraConfigData.contains(testPatternKey)) {
+ if (cameraConfigData.contains(framesKey)) {
+ LOG(Virtual, Error) << "A camera should use either "
+ << testPatternKey << " or " << framesKey;
+ return -EINVAL;
+ }
+
+ auto testPattern = cameraConfigData[testPatternKey].get<std::string>("");
+
+ if (testPattern == "bars") {
+ data->config_.frame = TestPattern::ColorBars;
+ } else if (testPattern == "lines") {
+ data->config_.frame = TestPattern::DiagonalLines;
+ } else {
+ LOG(Virtual, Debug) << "Test pattern: " << testPattern
+ << " is not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ const YamlObject &frames = cameraConfigData[framesKey];
+
+ /* When there is no frames provided in the config file, use color bar test pattern */
+ if (!frames) {
+ data->config_.frame = TestPattern::ColorBars;
+ return 0;
+ }
+
+ if (!frames.isDictionary()) {
+ LOG(Virtual, Error) << "'frames' is not a dictionary.";
+ return -EINVAL;
+ }
+
+ auto path = frames["path"].get<std::string>();
+
+ if (!path) {
+ LOG(Virtual, Error) << "Test pattern or path should be specified.";
+ return -EINVAL;
+ }
+
+ std::vector<std::filesystem::path> files;
+
+ switch (std::filesystem::symlink_status(*path).type()) {
+ case std::filesystem::file_type::regular:
+ files.push_back(*path);
+ break;
+
+ case std::filesystem::file_type::directory:
+ for (const auto &dentry : std::filesystem::directory_iterator{ *path }) {
+ if (dentry.is_regular_file())
+ files.push_back(dentry.path());
+ }
+
+ std::sort(files.begin(), files.end(), [](const auto &a, const auto &b) {
+ return ::strverscmp(a.c_str(), b.c_str()) < 0;
+ });
+
+ if (files.empty()) {
+ LOG(Virtual, Error) << "Directory has no files: " << *path;
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ LOG(Virtual, Error) << "Frame: " << *path << " is not supported";
+ return -EINVAL;
+ }
+
+ data->config_.frame = ImageFrames{ std::move(files) };
+
+ return 0;
+}
+
+int ConfigParser::parseLocation(const YamlObject &cameraConfigData, VirtualCameraData *data)
+{
+ std::string location = cameraConfigData["location"].get<std::string>("CameraLocationFront");
+
+ /* Default value is properties::CameraLocationFront */
+ auto it = properties::LocationNameValueMap.find(location);
+ if (it == properties::LocationNameValueMap.end()) {
+ LOG(Virtual, Error)
+ << "location: " << location << " is not supported";
+ return -EINVAL;
+ }
+
+ data->properties_.set(properties::Location, it->second);
+
+ return 0;
+}
+
+int ConfigParser::parseModel(const YamlObject &cameraConfigData, VirtualCameraData *data)
+{
+ std::string model = cameraConfigData["model"].get<std::string>("Unknown");
+
+ data->properties_.set(properties::Model, model);
+
+ return 0;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/config_parser.h b/src/libcamera/pipeline/virtual/config_parser.h
new file mode 100644
index 00000000..d2000de9
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/config_parser.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Virtual cameras helper to parse config file
+ */
+
+#pragma once
+
+#include <memory>
+#include <vector>
+
+#include <libcamera/base/file.h>
+
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "virtual.h"
+
+namespace libcamera {
+
+class ConfigParser
+{
+public:
+ std::vector<std::unique_ptr<VirtualCameraData>>
+ parseConfigFile(File &file, PipelineHandler *pipe);
+
+private:
+ std::unique_ptr<VirtualCameraData>
+ parseCameraConfigData(const YamlObject &cameraConfigData, PipelineHandler *pipe);
+
+ int parseSupportedFormats(const YamlObject &cameraConfigData,
+ std::vector<VirtualCameraData::Resolution> *resolutions);
+ int parseFrameGenerator(const YamlObject &cameraConfigData, VirtualCameraData *data);
+ int parseLocation(const YamlObject &cameraConfigData, VirtualCameraData *data);
+ int parseModel(const YamlObject &cameraConfigData, VirtualCameraData *data);
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/data/virtual.yaml b/src/libcamera/pipeline/virtual/data/virtual.yaml
new file mode 100644
index 00000000..20471bb9
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/data/virtual.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+"Virtual0":
+ supported_formats:
+ - width: 1920
+ height: 1080
+ frame_rates:
+ - 30
+ - 60
+ - width: 1680
+ height: 1050
+ frame_rates:
+ - 70
+ - 80
+ test_pattern: "lines"
+ location: "CameraLocationFront"
+ model: "Virtual Video Device"
+"Virtual1":
+ supported_formats:
+ - width: 800
+ height: 600
+ frame_rates:
+ - 60
+ test_pattern: "bars"
+ location: "CameraLocationBack"
+ model: "Virtual Video Device1"
+"Virtual2":
+ supported_formats:
+ - width: 400
+ height: 300
+ test_pattern: "lines"
+ location: "CameraLocationFront"
+ model: "Virtual Video Device2"
+"Virtual3":
+ test_pattern: "bars"
diff --git a/src/libcamera/pipeline/virtual/frame_generator.h b/src/libcamera/pipeline/virtual/frame_generator.h
new file mode 100644
index 00000000..a0658c45
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/frame_generator.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Virtual cameras helper to generate frames
+ */
+
+#pragma once
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+
+namespace libcamera {
+
+class FrameGenerator
+{
+public:
+ virtual ~FrameGenerator() = default;
+
+ virtual void configure(const Size &size) = 0;
+
+ virtual int generateFrame(const Size &size,
+ const FrameBuffer *buffer) = 0;
+
+protected:
+ FrameGenerator() {}
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/image_frame_generator.cpp b/src/libcamera/pipeline/virtual/image_frame_generator.cpp
new file mode 100644
index 00000000..d1545b5d
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/image_frame_generator.cpp
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Derived class of FrameGenerator for generating frames from images
+ */
+
+#include "image_frame_generator.h"
+
+#include <string>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+#include <libcamera/framebuffer.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include "libyuv/convert.h"
+#include "libyuv/scale.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Virtual)
+
+/*
+ * Factory function to create an ImageFrameGenerator object.
+ * Read the images and convert them to buffers in NV12 format.
+ * Store the pointers to the buffers to a list (imageFrameDatas)
+ */
+std::unique_ptr<ImageFrameGenerator>
+ImageFrameGenerator::create(ImageFrames &imageFrames)
+{
+ std::unique_ptr<ImageFrameGenerator> imageFrameGenerator =
+ std::make_unique<ImageFrameGenerator>();
+ imageFrameGenerator->imageFrames_ = &imageFrames;
+
+ /*
+ * For each file in the directory, load the image,
+ * convert it to NV12, and store the pointer.
+ */
+ for (const auto &path : imageFrames.files) {
+ File file(path);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(Virtual, Error) << "Failed to open image file " << file.fileName()
+ << ": " << strerror(file.error());
+ return nullptr;
+ }
+
+ /* Read the image file to data */
+ auto fileSize = file.size();
+ auto buffer = std::make_unique<uint8_t[]>(fileSize);
+ if (file.read({ buffer.get(), static_cast<size_t>(fileSize) }) != fileSize) {
+ LOG(Virtual, Error) << "Failed to read file " << file.fileName()
+ << ": " << strerror(file.error());
+ return nullptr;
+ }
+
+ /* Get the width and height of the image */
+ int width, height;
+ if (libyuv::MJPGSize(buffer.get(), fileSize, &width, &height)) {
+ LOG(Virtual, Error) << "Failed to get the size of the image file: "
+ << file.fileName();
+ return nullptr;
+ }
+
+ std::unique_ptr<uint8_t[]> dstY =
+ std::make_unique<uint8_t[]>(width * height);
+ std::unique_ptr<uint8_t[]> dstUV =
+ std::make_unique<uint8_t[]>(width * height / 2);
+ int ret = libyuv::MJPGToNV12(buffer.get(), fileSize,
+ dstY.get(), width, dstUV.get(),
+ width, width, height, width, height);
+ if (ret != 0)
+ LOG(Virtual, Error) << "MJPGToNV12() failed with " << ret;
+
+ imageFrameGenerator->imageFrameDatas_.emplace_back(
+ ImageFrameData{ std::move(dstY), std::move(dstUV),
+ Size(width, height) });
+ }
+
+ ASSERT(!imageFrameGenerator->imageFrameDatas_.empty());
+
+ return imageFrameGenerator;
+}
+
+/*
+ * \var ImageFrameGenerator::frameRepeat
+ * \brief Number of frames to repeat before proceeding to the next frame
+ */
+
+/* Scale the buffers for image frames. */
+void ImageFrameGenerator::configure(const Size &size)
+{
+ /* Reset the source images to prevent multiple configuration calls */
+ scaledFrameDatas_.clear();
+ frameIndex_ = 0;
+ parameter_ = 0;
+
+ for (unsigned int i = 0; i < imageFrameDatas_.size(); i++) {
+ /* Scale the imageFrameDatas_ to scaledY and scaledUV */
+ unsigned int halfSizeWidth = (size.width + 1) / 2;
+ unsigned int halfSizeHeight = (size.height + 1) / 2;
+ std::unique_ptr<uint8_t[]> scaledY =
+ std::make_unique<uint8_t[]>(size.width * size.height);
+ std::unique_ptr<uint8_t[]> scaledUV =
+ std::make_unique<uint8_t[]>(halfSizeWidth * halfSizeHeight * 2);
+ auto &src = imageFrameDatas_[i];
+
+ /*
+ * \todo Some platforms might enforce stride due to GPU.
+ * The width needs to be a multiple of the stride to work
+ * properly for now.
+ */
+ libyuv::NV12Scale(src.Y.get(), src.size.width,
+ src.UV.get(), src.size.width,
+ src.size.width, src.size.height,
+ scaledY.get(), size.width, scaledUV.get(), size.width,
+ size.width, size.height, libyuv::FilterMode::kFilterBilinear);
+
+ scaledFrameDatas_.emplace_back(
+ ImageFrameData{ std::move(scaledY), std::move(scaledUV), size });
+ }
+}
+
+int ImageFrameGenerator::generateFrame(const Size &size, const FrameBuffer *buffer)
+{
+ ASSERT(!scaledFrameDatas_.empty());
+
+ MappedFrameBuffer mappedFrameBuffer(buffer, MappedFrameBuffer::MapFlag::Write);
+
+ const auto &planes = mappedFrameBuffer.planes();
+
+ /* Loop only around the number of images available */
+ frameIndex_ %= imageFrameDatas_.size();
+
+ /* Write the scaledY and scaledUV to the mapped frame buffer */
+ libyuv::NV12Copy(scaledFrameDatas_[frameIndex_].Y.get(), size.width,
+ scaledFrameDatas_[frameIndex_].UV.get(), size.width, planes[0].begin(),
+ size.width, planes[1].begin(), size.width,
+ size.width, size.height);
+
+ /* Proceed to the next image every 4 frames */
+ /* \todo Consider setting the frameRepeat in the config file */
+ parameter_++;
+ if (parameter_ % frameRepeat == 0)
+ frameIndex_++;
+
+ return 0;
+}
+
+/*
+ * \var ImageFrameGenerator::imageFrameDatas_
+ * \brief List of pointers to the not scaled image buffers
+ */
+
+/*
+ * \var ImageFrameGenerator::scaledFrameDatas_
+ * \brief List of pointers to the scaled image buffers
+ */
+
+/*
+ * \var ImageFrameGenerator::imageFrames_
+ * \brief Pointer to the imageFrames_ in VirtualCameraData
+ */
+
+/*
+ * \var ImageFrameGenerator::parameter_
+ * \brief Speed parameter. Change to the next image every parameter_ frames
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/image_frame_generator.h b/src/libcamera/pipeline/virtual/image_frame_generator.h
new file mode 100644
index 00000000..42a077ba
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/image_frame_generator.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Derived class of FrameGenerator for generating frames from images
+ */
+
+#pragma once
+
+#include <filesystem>
+#include <memory>
+#include <stdint.h>
+#include <sys/types.h>
+#include <vector>
+
+#include "frame_generator.h"
+
+namespace libcamera {
+
+/* Frame configuration provided by the config file */
+struct ImageFrames {
+ std::vector<std::filesystem::path> files;
+};
+
+class ImageFrameGenerator : public FrameGenerator
+{
+public:
+ static std::unique_ptr<ImageFrameGenerator> create(ImageFrames &imageFrames);
+
+private:
+ static constexpr unsigned int frameRepeat = 4;
+
+ struct ImageFrameData {
+ std::unique_ptr<uint8_t[]> Y;
+ std::unique_ptr<uint8_t[]> UV;
+ Size size;
+ };
+
+ void configure(const Size &size) override;
+ int generateFrame(const Size &size, const FrameBuffer *buffer) override;
+
+ std::vector<ImageFrameData> imageFrameDatas_;
+ std::vector<ImageFrameData> scaledFrameDatas_;
+ ImageFrames *imageFrames_;
+ unsigned int frameIndex_;
+ unsigned int parameter_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/meson.build b/src/libcamera/pipeline/virtual/meson.build
new file mode 100644
index 00000000..4786fe2e
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'config_parser.cpp',
+ 'image_frame_generator.cpp',
+ 'test_pattern_generator.cpp',
+ 'virtual.cpp',
+])
+
+libjpeg = dependency('libjpeg', required : true)
+
+libcamera_deps += [libyuv_dep]
+libcamera_deps += [libjpeg]
diff --git a/src/libcamera/pipeline/virtual/test_pattern_generator.cpp b/src/libcamera/pipeline/virtual/test_pattern_generator.cpp
new file mode 100644
index 00000000..745be83b
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/test_pattern_generator.cpp
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Derived class of FrameGenerator for generating test patterns
+ */
+
+#include "test_pattern_generator.h"
+
+#include <string.h>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include <libyuv/convert_from_argb.h>
+
+namespace {
+
+template<size_t SampleSize>
+void rotateLeft1Column(const libcamera::Size &size, uint8_t *image)
+{
+ if (size.width < 2)
+ return;
+
+ const size_t stride = size.width * SampleSize;
+ uint8_t first[SampleSize];
+
+ for (size_t i = 0; i < size.height; i++, image += stride) {
+ memcpy(first, &image[0], SampleSize);
+ memmove(&image[0], &image[SampleSize], stride - SampleSize);
+ memcpy(&image[stride - SampleSize], first, SampleSize);
+ }
+}
+
+} /* namespace */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Virtual)
+
+static const unsigned int kARGBSize = 4;
+
+int TestPatternGenerator::generateFrame(const Size &size,
+ const FrameBuffer *buffer)
+{
+ MappedFrameBuffer mappedFrameBuffer(buffer,
+ MappedFrameBuffer::MapFlag::Write);
+
+ const auto &planes = mappedFrameBuffer.planes();
+
+ rotateLeft1Column<kARGBSize>(size, template_.get());
+
+ /* Convert the template_ to the frame buffer */
+ int ret = libyuv::ARGBToNV12(template_.get(), size.width * kARGBSize,
+ planes[0].begin(), size.width,
+ planes[1].begin(), size.width,
+ size.width, size.height);
+ if (ret != 0)
+ LOG(Virtual, Error) << "ARGBToNV12() failed with " << ret;
+
+ return ret;
+}
+
+void ColorBarsGenerator::configure(const Size &size)
+{
+ constexpr uint8_t kColorBar[8][3] = {
+ /* R, G, B */
+ { 0xff, 0xff, 0xff }, /* White */
+ { 0xff, 0xff, 0x00 }, /* Yellow */
+ { 0x00, 0xff, 0xff }, /* Cyan */
+ { 0x00, 0xff, 0x00 }, /* Green */
+ { 0xff, 0x00, 0xff }, /* Magenta */
+ { 0xff, 0x00, 0x00 }, /* Red */
+ { 0x00, 0x00, 0xff }, /* Blue */
+ { 0x00, 0x00, 0x00 }, /* Black */
+ };
+
+ template_ = std::make_unique<uint8_t[]>(
+ size.width * size.height * kARGBSize);
+
+ unsigned int colorBarWidth = size.width / std::size(kColorBar);
+
+ uint8_t *buf = template_.get();
+ for (size_t h = 0; h < size.height; h++) {
+ for (size_t w = 0; w < size.width; w++) {
+ /* Repeat when the width is exceed */
+ unsigned int index = (w / colorBarWidth) % std::size(kColorBar);
+
+ *buf++ = kColorBar[index][2]; /* B */
+ *buf++ = kColorBar[index][1]; /* G */
+ *buf++ = kColorBar[index][0]; /* R */
+ *buf++ = 0x00; /* A */
+ }
+ }
+}
+
+void DiagonalLinesGenerator::configure(const Size &size)
+{
+ constexpr uint8_t kColorBar[2][3] = {
+ /* R, G, B */
+ { 0xff, 0xff, 0xff }, /* White */
+ { 0x00, 0x00, 0x00 }, /* Black */
+ };
+
+ template_ = std::make_unique<uint8_t[]>(
+ size.width * size.height * kARGBSize);
+
+ unsigned int lineWidth = size.width / 10;
+
+ uint8_t *buf = template_.get();
+ for (size_t h = 0; h < size.height; h++) {
+ for (size_t w = 0; w < size.width; w++) {
+ /* Repeat when the width is exceed */
+ int index = ((w + h) / lineWidth) % 2;
+
+ *buf++ = kColorBar[index][2]; /* B */
+ *buf++ = kColorBar[index][1]; /* G */
+ *buf++ = kColorBar[index][0]; /* R */
+ *buf++ = 0x00; /* A */
+ }
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/test_pattern_generator.h b/src/libcamera/pipeline/virtual/test_pattern_generator.h
new file mode 100644
index 00000000..2a51bd31
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/test_pattern_generator.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Derived class of FrameGenerator for generating test patterns
+ */
+
+#pragma once
+
+#include <memory>
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+
+#include "frame_generator.h"
+
+namespace libcamera {
+
+enum class TestPattern : char {
+ ColorBars = 0,
+ DiagonalLines = 1,
+};
+
+class TestPatternGenerator : public FrameGenerator
+{
+public:
+ int generateFrame(const Size &size, const FrameBuffer *buffer) override;
+
+protected:
+ /* Buffer of test pattern template */
+ std::unique_ptr<uint8_t[]> template_;
+};
+
+class ColorBarsGenerator : public TestPatternGenerator
+{
+public:
+ /* Generate a template buffer of the color bar test pattern. */
+ void configure(const Size &size) override;
+};
+
+class DiagonalLinesGenerator : public TestPatternGenerator
+{
+public:
+ /* Generate a template buffer of the diagonal lines test pattern. */
+ void configure(const Size &size) override;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/virtual.cpp b/src/libcamera/pipeline/virtual/virtual.cpp
new file mode 100644
index 00000000..e692a543
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/virtual.cpp
@@ -0,0 +1,397 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Pipeline handler for virtual cameras
+ */
+
+#include "virtual.h"
+
+#include <algorithm>
+#include <array>
+#include <chrono>
+#include <errno.h>
+#include <map>
+#include <memory>
+#include <ostream>
+#include <set>
+#include <stdint.h>
+#include <string>
+#include <time.h>
+#include <utility>
+#include <vector>
+
+#include <libcamera/base/flags.h>
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/formats.h>
+#include <libcamera/pixel_format.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/request.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/dma_buf_allocator.h"
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "pipeline/virtual/config_parser.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Virtual)
+
+namespace {
+
+uint64_t currentTimestamp()
+{
+ const auto now = std::chrono::steady_clock::now();
+ auto nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>(
+ now.time_since_epoch());
+
+ return nsecs.count();
+}
+
+} /* namespace */
+
+template<class... Ts>
+struct overloaded : Ts... {
+ using Ts::operator()...;
+};
+template<class... Ts>
+overloaded(Ts...) -> overloaded<Ts...>;
+
+class VirtualCameraConfiguration : public CameraConfiguration
+{
+public:
+ static constexpr unsigned int kBufferCount = 4;
+
+ VirtualCameraConfiguration(VirtualCameraData *data);
+
+ Status validate() override;
+
+private:
+ const VirtualCameraData *data_;
+};
+
+class PipelineHandlerVirtual : public PipelineHandler
+{
+public:
+ PipelineHandlerVirtual(CameraManager *manager);
+ ~PipelineHandlerVirtual();
+
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ static bool created_;
+
+ VirtualCameraData *cameraData(Camera *camera)
+ {
+ return static_cast<VirtualCameraData *>(camera->_d());
+ }
+
+ bool initFrameGenerator(Camera *camera);
+
+ DmaBufAllocator dmaBufAllocator_;
+
+ bool resetCreated_ = false;
+};
+
+VirtualCameraData::VirtualCameraData(PipelineHandler *pipe,
+ const std::vector<Resolution> &supportedResolutions)
+ : Camera::Private(pipe)
+{
+ config_.resolutions = supportedResolutions;
+ for (const auto &resolution : config_.resolutions) {
+ if (config_.minResolutionSize.isNull() || config_.minResolutionSize > resolution.size)
+ config_.minResolutionSize = resolution.size;
+
+ config_.maxResolutionSize = std::max(config_.maxResolutionSize, resolution.size);
+ }
+
+ properties_.set(properties::PixelArrayActiveAreas,
+ { Rectangle(config_.maxResolutionSize) });
+
+ /* \todo Support multiple streams and pass multi_stream_test */
+ streamConfigs_.resize(kMaxStream);
+}
+
+VirtualCameraConfiguration::VirtualCameraConfiguration(VirtualCameraData *data)
+ : CameraConfiguration(), data_(data)
+{
+}
+
+CameraConfiguration::Status VirtualCameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty()) {
+ LOG(Virtual, Error) << "Empty config";
+ return Invalid;
+ }
+
+ /* Only one stream is supported */
+ if (config_.size() > VirtualCameraData::kMaxStream) {
+ config_.resize(VirtualCameraData::kMaxStream);
+ status = Adjusted;
+ }
+
+ for (StreamConfiguration &cfg : config_) {
+ bool adjusted = false;
+ bool found = false;
+ for (const auto &resolution : data_->config_.resolutions) {
+ if (resolution.size.width == cfg.size.width &&
+ resolution.size.height == cfg.size.height) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /*
+ * \todo It's a pipeline's decision to choose a
+ * resolution when the exact one is not supported.
+ * Defining the default logic in PipelineHandler to
+ * find the closest resolution would be nice.
+ */
+ cfg.size = data_->config_.maxResolutionSize;
+ status = Adjusted;
+ adjusted = true;
+ }
+
+ if (cfg.pixelFormat != formats::NV12) {
+ cfg.pixelFormat = formats::NV12;
+ status = Adjusted;
+ adjusted = true;
+ }
+
+ if (adjusted)
+ LOG(Virtual, Info)
+ << "Stream configuration adjusted to " << cfg.toString();
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+ cfg.stride = info.stride(cfg.size.width, 0, 1);
+ cfg.frameSize = info.frameSize(cfg.size, 1);
+
+ cfg.bufferCount = VirtualCameraConfiguration::kBufferCount;
+ }
+
+ return status;
+}
+
+/* static */
+bool PipelineHandlerVirtual::created_ = false;
+
+PipelineHandlerVirtual::PipelineHandlerVirtual(CameraManager *manager)
+ : PipelineHandler(manager),
+ dmaBufAllocator_(DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap |
+ DmaBufAllocator::DmaBufAllocatorFlag::SystemHeap |
+ DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf)
+{
+}
+
+PipelineHandlerVirtual::~PipelineHandlerVirtual()
+{
+ if (resetCreated_)
+ created_ = false;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerVirtual::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
+{
+ VirtualCameraData *data = cameraData(camera);
+ auto config = std::make_unique<VirtualCameraConfiguration>(data);
+
+ if (roles.empty())
+ return config;
+
+ for (const StreamRole role : roles) {
+ switch (role) {
+ case StreamRole::StillCapture:
+ case StreamRole::VideoRecording:
+ case StreamRole::Viewfinder:
+ break;
+
+ case StreamRole::Raw:
+ default:
+ LOG(Virtual, Error)
+ << "Requested stream role not supported: " << role;
+ config.reset();
+ return config;
+ }
+
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+ PixelFormat pixelFormat = formats::NV12;
+ streamFormats[pixelFormat] = { { data->config_.minResolutionSize,
+ data->config_.maxResolutionSize } };
+ StreamFormats formats(streamFormats);
+ StreamConfiguration cfg(formats);
+ cfg.pixelFormat = pixelFormat;
+ cfg.size = data->config_.maxResolutionSize;
+ cfg.bufferCount = VirtualCameraConfiguration::kBufferCount;
+
+ config->addConfiguration(cfg);
+ }
+
+ ASSERT(config->validate() != CameraConfiguration::Invalid);
+
+ return config;
+}
+
+int PipelineHandlerVirtual::configure(Camera *camera,
+ CameraConfiguration *config)
+{
+ VirtualCameraData *data = cameraData(camera);
+ for (auto [i, c] : utils::enumerate(*config)) {
+ c.setStream(&data->streamConfigs_[i].stream);
+ /* Start reading the images/generating test patterns */
+ data->streamConfigs_[i].frameGenerator->configure(c.size);
+ }
+
+ return 0;
+}
+
+int PipelineHandlerVirtual::exportFrameBuffers([[maybe_unused]] Camera *camera,
+ Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ if (!dmaBufAllocator_.isValid())
+ return -ENOBUFS;
+
+ const StreamConfiguration &config = stream->configuration();
+ const PixelFormatInfo &info = PixelFormatInfo::info(config.pixelFormat);
+
+ std::vector<unsigned int> planeSizes;
+ for (size_t i = 0; i < info.numPlanes(); ++i)
+ planeSizes.push_back(info.planeSize(config.size, i));
+
+ return dmaBufAllocator_.exportBuffers(config.bufferCount, planeSizes, buffers);
+}
+
+int PipelineHandlerVirtual::start([[maybe_unused]] Camera *camera,
+ [[maybe_unused]] const ControlList *controls)
+{
+ return 0;
+}
+
+void PipelineHandlerVirtual::stopDevice([[maybe_unused]] Camera *camera)
+{
+}
+
+int PipelineHandlerVirtual::queueRequestDevice([[maybe_unused]] Camera *camera,
+ Request *request)
+{
+ VirtualCameraData *data = cameraData(camera);
+
+ for (auto const &[stream, buffer] : request->buffers()) {
+ bool found = false;
+ /* map buffer and fill test patterns */
+ for (auto &streamConfig : data->streamConfigs_) {
+ if (stream == &streamConfig.stream) {
+ found = true;
+ if (streamConfig.frameGenerator->generateFrame(
+ stream->configuration().size, buffer))
+ buffer->_d()->cancel();
+
+ completeBuffer(request, buffer);
+ break;
+ }
+ }
+ ASSERT(found);
+ }
+
+ request->metadata().set(controls::SensorTimestamp, currentTimestamp());
+ completeRequest(request);
+
+ return 0;
+}
+
+bool PipelineHandlerVirtual::match([[maybe_unused]] DeviceEnumerator *enumerator)
+{
+ if (created_)
+ return false;
+
+ created_ = true;
+
+ File file(configurationFile("virtual", "virtual.yaml"));
+ bool isOpen = file.open(File::OpenModeFlag::ReadOnly);
+ if (!isOpen) {
+ LOG(Virtual, Error) << "Failed to open config file: " << file.fileName();
+ return false;
+ }
+
+ ConfigParser parser;
+ auto configData = parser.parseConfigFile(file, this);
+ if (configData.size() == 0) {
+ LOG(Virtual, Error) << "Failed to parse any cameras from the config file: "
+ << file.fileName();
+ return false;
+ }
+
+ /* Configure and register cameras with configData */
+ for (auto &data : configData) {
+ std::set<Stream *> streams;
+ for (auto &streamConfig : data->streamConfigs_)
+ streams.insert(&streamConfig.stream);
+ std::string id = data->config_.id;
+ std::shared_ptr<Camera> camera = Camera::create(std::move(data), id, streams);
+
+ if (!initFrameGenerator(camera.get())) {
+ LOG(Virtual, Error) << "Failed to initialize frame "
+ << "generator for camera: " << id;
+ continue;
+ }
+
+ registerCamera(std::move(camera));
+ }
+
+ resetCreated_ = true;
+
+ return true;
+}
+
+bool PipelineHandlerVirtual::initFrameGenerator(Camera *camera)
+{
+ auto data = cameraData(camera);
+ auto &frame = data->config_.frame;
+ std::visit(overloaded{
+ [&](TestPattern &testPattern) {
+ for (auto &streamConfig : data->streamConfigs_) {
+ if (testPattern == TestPattern::DiagonalLines)
+ streamConfig.frameGenerator = std::make_unique<DiagonalLinesGenerator>();
+ else
+ streamConfig.frameGenerator = std::make_unique<ColorBarsGenerator>();
+ }
+ },
+ [&](ImageFrames &imageFrames) {
+ for (auto &streamConfig : data->streamConfigs_)
+ streamConfig.frameGenerator = ImageFrameGenerator::create(imageFrames);
+ } },
+ frame);
+
+ for (auto &streamConfig : data->streamConfigs_)
+ if (!streamConfig.frameGenerator)
+ return false;
+
+ return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVirtual, "virtual")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/virtual.h b/src/libcamera/pipeline/virtual/virtual.h
new file mode 100644
index 00000000..92ad7d4a
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/virtual.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Pipeline handler for virtual cameras
+ */
+
+#pragma once
+
+#include <string>
+#include <variant>
+#include <vector>
+
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/pipeline_handler.h"
+
+#include "frame_generator.h"
+#include "image_frame_generator.h"
+#include "test_pattern_generator.h"
+
+namespace libcamera {
+
+using VirtualFrame = std::variant<TestPattern, ImageFrames>;
+
+class VirtualCameraData : public Camera::Private
+{
+public:
+ const static unsigned int kMaxStream = 3;
+
+ struct Resolution {
+ Size size;
+ std::vector<int64_t> frameRates;
+ };
+ struct StreamConfig {
+ Stream stream;
+ std::unique_ptr<FrameGenerator> frameGenerator;
+ };
+ /* The config file is parsed to the Configuration struct */
+ struct Configuration {
+ std::string id;
+ std::vector<Resolution> resolutions;
+ VirtualFrame frame;
+
+ Size maxResolutionSize;
+ Size minResolutionSize;
+ };
+
+ VirtualCameraData(PipelineHandler *pipe,
+ const std::vector<Resolution> &supportedResolutions);
+
+ ~VirtualCameraData() = default;
+
+ Configuration config_;
+
+ std::vector<StreamConfig> streamConfigs_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/vivid/meson.build b/src/libcamera/pipeline/vivid/meson.build
new file mode 100644
index 00000000..513de9af
--- /dev/null
+++ b/src/libcamera/pipeline/vivid/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'vivid.cpp',
+])
diff --git a/src/libcamera/pipeline/vivid/vivid.cpp b/src/libcamera/pipeline/vivid/vivid.cpp
new file mode 100644
index 00000000..0340a500
--- /dev/null
+++ b/src/libcamera/pipeline/vivid/vivid.cpp
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * vivid.cpp - Pipeline handler for the vivid capture device
+ */
+
+#include <math.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/formats.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+/*
+ * Explicitly disable the unused-parameter warning in this pipeline handler.
+ *
+ * Parameters are left unused while they are introduced incrementally, so for
+ * documentation purposes only we disable this warning so that we can compile
+ * each commit independently without breaking the flow of the development
+ * additions.
+ *
+ * This is not recommended practice within libcamera, please listen to your
+ * compiler warnings.
+ */
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+
+#define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000)
+#define VIVID_CID_VIVID_CLASS (0x00f00000 | 1)
+#define VIVID_CID_TEST_PATTERN (VIVID_CID_VIVID_BASE + 0)
+#define VIVID_CID_OSD_TEXT_MODE (VIVID_CID_VIVID_BASE + 1)
+#define VIVID_CID_HOR_MOVEMENT (VIVID_CID_VIVID_BASE + 2)
+#define VIVID_CID_VERT_MOVEMENT (VIVID_CID_VIVID_BASE + 3)
+#define VIVID_CID_SHOW_BORDER (VIVID_CID_VIVID_BASE + 4)
+#define VIVID_CID_SHOW_SQUARE (VIVID_CID_VIVID_BASE + 5)
+#define VIVID_CID_INSERT_SAV (VIVID_CID_VIVID_BASE + 6)
+#define VIVID_CID_INSERT_EAV (VIVID_CID_VIVID_BASE + 7)
+#define VIVID_CID_VBI_CAP_INTERLACED (VIVID_CID_VIVID_BASE + 8)
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(VIVID)
+
+class VividCameraData : public Camera::Private
+{
+public:
+ VividCameraData(PipelineHandler *pipe, MediaDevice *media)
+ : Camera::Private(pipe), media_(media), video_(nullptr)
+ {
+ }
+
+ ~VividCameraData()
+ {
+ delete video_;
+ }
+
+ int init();
+ void bufferReady(FrameBuffer *buffer);
+
+ MediaDevice *media_;
+ V4L2VideoDevice *video_;
+ Stream stream_;
+};
+
+class VividCameraConfiguration : public CameraConfiguration
+{
+public:
+ VividCameraConfiguration();
+
+ Status validate() override;
+};
+
+class PipelineHandlerVivid : public PipelineHandler
+{
+public:
+ PipelineHandlerVivid(CameraManager *manager);
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ int processControls(VividCameraData *data, Request *request);
+
+ VividCameraData *cameraData(Camera *camera)
+ {
+ return static_cast<VividCameraData *>(camera->_d());
+ }
+};
+
+VividCameraConfiguration::VividCameraConfiguration()
+ : CameraConfiguration()
+{
+}
+
+CameraConfiguration::Status VividCameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ /* Cap the number of entries to the available streams. */
+ if (config_.size() > 1) {
+ config_.resize(1);
+ status = Adjusted;
+ }
+
+ StreamConfiguration &cfg = config_[0];
+
+ /* Adjust the pixel format. */
+ const std::vector<libcamera::PixelFormat> formats = cfg.formats().pixelformats();
+ if (std::find(formats.begin(), formats.end(), cfg.pixelFormat) == formats.end()) {
+ cfg.pixelFormat = cfg.formats().pixelformats()[0];
+ LOG(VIVID, Debug) << "Adjusting format to " << cfg.pixelFormat.toString();
+ status = Adjusted;
+ }
+
+ cfg.bufferCount = 4;
+
+ return status;
+}
+
+PipelineHandlerVivid::PipelineHandlerVivid(CameraManager *manager)
+ : PipelineHandler(manager)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerVivid::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
+{
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<VividCameraConfiguration>();
+ VividCameraData *data = cameraData(camera);
+
+ if (roles.empty())
+ return config;
+
+ std::map<V4L2PixelFormat, std::vector<SizeRange>> v4l2Formats =
+ data->video_->formats();
+ std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
+ std::transform(v4l2Formats.begin(), v4l2Formats.end(),
+ std::inserter(deviceFormats, deviceFormats.begin()),
+ [&](const decltype(v4l2Formats)::value_type &format) {
+ return decltype(deviceFormats)::value_type{
+ format.first.toPixelFormat(),
+ format.second
+ };
+ });
+
+ StreamFormats formats(deviceFormats);
+ StreamConfiguration cfg(formats);
+
+ cfg.pixelFormat = formats::BGR888;
+ cfg.size = { 1280, 720 };
+ cfg.bufferCount = 4;
+
+ config->addConfiguration(cfg);
+
+ config->validate();
+
+ return config;
+}
+
+int PipelineHandlerVivid::configure(Camera *camera, CameraConfiguration *config)
+{
+ VividCameraData *data = cameraData(camera);
+ StreamConfiguration &cfg = config->at(0);
+ int ret;
+
+ V4L2DeviceFormat format = {};
+ format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
+ format.size = cfg.size;
+
+ ret = data->video_->setFormat(&format);
+ if (ret)
+ return ret;
+
+ if (format.size != cfg.size ||
+ format.fourcc != data->video_->toV4L2PixelFormat(cfg.pixelFormat)) {
+ LOG(VIVID, Error)
+ << "Requested " << cfg.toString() << ", got "
+ << format.size.toString() << "-"
+ << format.fourcc.toString();
+ return -EINVAL;
+ }
+
+ /* Set initial controls specific to VIVID */
+ ControlList controls(data->video_->controls());
+ controls.set(VIVID_CID_TEST_PATTERN, 0); /* Vertical Colour Bars */
+ controls.set(VIVID_CID_OSD_TEXT_MODE, 0); /* Display all OSD */
+
+ /* Ensure clear colours configured. */
+ controls.set(V4L2_CID_BRIGHTNESS, 128);
+ controls.set(V4L2_CID_CONTRAST, 128);
+ controls.set(V4L2_CID_SATURATION, 128);
+
+ /* Enable movement to visualise buffer updates. */
+ controls.set(VIVID_CID_HOR_MOVEMENT, 5);
+
+ ret = data->video_->setControls(&controls);
+ if (ret) {
+ LOG(VIVID, Error) << "Failed to set controls: " << ret;
+ return ret < 0 ? ret : -EINVAL;
+ }
+
+ cfg.setStream(&data->stream_);
+ cfg.stride = format.planes[0].bpl;
+
+ return 0;
+}
+
+int PipelineHandlerVivid::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ VividCameraData *data = cameraData(camera);
+ unsigned int count = stream->configuration().bufferCount;
+
+ return data->video_->exportBuffers(count, buffers);
+}
+
+int PipelineHandlerVivid::start(Camera *camera, const ControlList *controls)
+{
+ VividCameraData *data = cameraData(camera);
+ unsigned int count = data->stream_.configuration().bufferCount;
+
+ int ret = data->video_->importBuffers(count);
+ if (ret < 0)
+ return ret;
+
+ ret = data->video_->streamOn();
+ if (ret < 0) {
+ data->video_->releaseBuffers();
+ return ret;
+ }
+
+ return 0;
+}
+
+void PipelineHandlerVivid::stopDevice(Camera *camera)
+{
+ VividCameraData *data = cameraData(camera);
+ data->video_->streamOff();
+ data->video_->releaseBuffers();
+}
+
+int PipelineHandlerVivid::processControls(VividCameraData *data, Request *request)
+{
+ ControlList controls(data->video_->controls());
+
+ for (auto it : request->controls()) {
+ unsigned int id = it.first;
+ unsigned int offset;
+ uint32_t cid;
+
+ if (id == controls::Brightness) {
+ cid = V4L2_CID_BRIGHTNESS;
+ offset = 128;
+ } else if (id == controls::Contrast) {
+ cid = V4L2_CID_CONTRAST;
+ offset = 0;
+ } else if (id == controls::Saturation) {
+ cid = V4L2_CID_SATURATION;
+ offset = 0;
+ } else {
+ continue;
+ }
+
+ int32_t value = lroundf(it.second.get<float>() * 128 + offset);
+ controls.set(cid, std::clamp(value, 0, 255));
+ }
+
+ for (const auto &ctrl : controls)
+ LOG(VIVID, Debug)
+ << "Setting control " << utils::hex(ctrl.first)
+ << " to " << ctrl.second.toString();
+
+ int ret = data->video_->setControls(&controls);
+ if (ret) {
+ LOG(VIVID, Error) << "Failed to set controls: " << ret;
+ return ret < 0 ? ret : -EINVAL;
+ }
+
+ return ret;
+}
+
+int PipelineHandlerVivid::queueRequestDevice(Camera *camera, Request *request)
+{
+ VividCameraData *data = cameraData(camera);
+ FrameBuffer *buffer = request->findBuffer(&data->stream_);
+ if (!buffer) {
+ LOG(VIVID, Error)
+ << "Attempt to queue request with invalid stream";
+
+ return -ENOENT;
+ }
+
+ int ret = processControls(data, request);
+ if (ret < 0)
+ return ret;
+
+ ret = data->video_->queueBuffer(buffer);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+bool PipelineHandlerVivid::match(DeviceEnumerator *enumerator)
+{
+ DeviceMatch dm("vivid");
+ dm.add("vivid-000-vid-cap");
+
+ MediaDevice *media = acquireMediaDevice(enumerator, dm);
+ if (!media)
+ return false;
+
+ std::unique_ptr<VividCameraData> data = std::make_unique<VividCameraData>(this, media);
+
+ /* Locate and open the capture video node. */
+ if (data->init())
+ return false;
+
+ /* Create and register the camera. */
+ std::set<Stream *> streams{ &data->stream_ };
+ const std::string id = data->video_->deviceName();
+ std::shared_ptr<Camera> camera = Camera::create(std::move(data), id, streams);
+ registerCamera(std::move(camera));
+
+ return true;
+}
+
+int VividCameraData::init()
+{
+ video_ = new V4L2VideoDevice(media_->getEntityByName("vivid-000-vid-cap"));
+ if (video_->open())
+ return -ENODEV;
+
+ video_->bufferReady.connect(this, &VividCameraData::bufferReady);
+
+ /* Initialise the supported controls and properties. */
+ const ControlInfoMap &controls = video_->controls();
+ ControlInfoMap::Map ctrls;
+
+ for (const auto &ctrl : controls) {
+ const ControlId *id;
+ ControlInfo info;
+
+ switch (ctrl.first->id()) {
+ case V4L2_CID_BRIGHTNESS:
+ id = &controls::Brightness;
+ info = ControlInfo{ { -1.0f }, { 1.0f }, { 0.0f } };
+ break;
+ case V4L2_CID_CONTRAST:
+ id = &controls::Contrast;
+ info = ControlInfo{ { 0.0f }, { 2.0f }, { 1.0f } };
+ break;
+ case V4L2_CID_SATURATION:
+ id = &controls::Saturation;
+ info = ControlInfo{ { 0.0f }, { 2.0f }, { 1.0f } };
+ break;
+ default:
+ continue;
+ }
+
+ ctrls.emplace(id, info);
+ }
+
+ controlInfo_ = ControlInfoMap(std::move(ctrls), controls::controls);
+
+ properties_.set(properties::Location, properties::CameraLocationExternal);
+ properties_.set(properties::Model, "Virtual Video Device");
+
+ return 0;
+}
+
+void VividCameraData::bufferReady(FrameBuffer *buffer)
+{
+ Request *request = buffer->request();
+
+ /* Record the sensor's timestamp in the request metadata. */
+ request->metadata().set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
+
+ pipe()->completeBuffer(request, buffer);
+ pipe()->completeRequest(request);
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVivid, "vivid")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline_handler.cpp b/src/libcamera/pipeline_handler.cpp
index 254d341f..caa5c20e 100644
--- a/src/libcamera/pipeline_handler.cpp
+++ b/src/libcamera/pipeline_handler.cpp
@@ -2,21 +2,29 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * pipeline_handler.cpp - Pipeline handler infrastructure
+ * Pipeline handler infrastructure
*/
-#include "pipeline_handler.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include <chrono>
+#include <sys/stat.h>
#include <sys/sysmacros.h>
-#include <libcamera/buffer.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/mutex.h>
+#include <libcamera/base/utils.h>
+
#include <libcamera/camera.h>
-#include <libcamera/camera_manager.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/property_ids.h>
-#include "device_enumerator.h"
-#include "log.h"
-#include "media_device.h"
-#include "utils.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_manager.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/request.h"
+#include "libcamera/internal/tracepoints.h"
/**
* \file pipeline_handler.h
@@ -32,89 +40,13 @@
* the REGISTER_PIPELINE_HANDLER() macro.
*/
+using namespace std::chrono_literals;
+
namespace libcamera {
LOG_DEFINE_CATEGORY(Pipeline)
/**
- * \class CameraData
- * \brief Base class for platform-specific data associated with a camera
- *
- * The CameraData base abstract class represents platform specific-data
- * a pipeline handler might want to associate with a Camera to access them
- * at a later time.
- *
- * Pipeline handlers are expected to extend this base class with platform
- * specific implementation, associate instances of the derived classes
- * using the setCameraData() method, and access them at a later time
- * with cameraData().
- */
-
-/**
- * \fn CameraData::CameraData(PipelineHandler *pipe)
- * \brief Construct a CameraData instance for the given pipeline handler
- * \param[in] pipe The pipeline handler
- *
- * The reference to the pipeline handler is stored internally, the caller shall
- * guarantee that the pointer remains valid as long as the CameraData instance
- * exists.
- */
-
-/**
- * \var CameraData::camera_
- * \brief The camera related to this CameraData instance
- *
- * The camera_ pointer provides access to the Camera object that this instance
- * is related to. It is set when the Camera is registered with
- * PipelineHandler::registerCamera() and remains valid until the CameraData
- * instance is destroyed.
- */
-
-/**
- * \var CameraData::pipe_
- * \brief The pipeline handler related to this CameraData instance
- *
- * The pipe_ pointer provides access to the PipelineHandler object that this
- * instance is related to. It is set when the CameraData instance is created
- * and remains valid until the instance is destroyed.
- */
-
-/**
- * \var CameraData::queuedRequests_
- * \brief The list of queued and not yet completed request
- *
- * The list of queued request is used to track requests queued in order to
- * ensure completion of all requests when the pipeline handler is stopped.
- *
- * \sa PipelineHandler::queueRequest(), PipelineHandler::stop(),
- * PipelineHandler::completeRequest()
- */
-
-/**
- * \var CameraData::controlInfo_
- * \brief The set of controls supported by the camera
- *
- * The control information shall be initialised by the pipeline handler when
- * creating the camera, and shall not be modified afterwards.
- */
-
-/**
- * \var CameraData::properties_
- * \brief The list of properties supported by the camera
- *
- * The list of camera properties shall be initialised by the pipeline handler
- * when creating the camera, and shall not be modified afterwards.
- */
-
-/**
- * \var CameraData::ipa_
- * \brief The IPA module used by the camera
- *
- * Reference to the Image Processing Algorithms (IPA) operating on the camera's
- * stream(s). If no IPA exists for the camera, this field is set to nullptr.
- */
-
-/**
* \class PipelineHandler
* \brief Create and manage cameras based on a set of media devices
*
@@ -133,17 +65,16 @@ LOG_DEFINE_CATEGORY(Pipeline)
*
* In order to honour the std::enable_shared_from_this<> contract,
* PipelineHandler instances shall never be constructed manually, but always
- * through the PipelineHandlerFactory::create() method implemented by the
- * respective factories.
+ * through the PipelineHandlerFactoryBase::create() function.
*/
PipelineHandler::PipelineHandler(CameraManager *manager)
- : manager_(manager)
+ : manager_(manager), useCount_(0)
{
}
PipelineHandler::~PipelineHandler()
{
- for (std::shared_ptr<MediaDevice> media : mediaDevices_)
+ for (std::shared_ptr<MediaDevice> &media : mediaDevices_)
media->release();
}
@@ -181,7 +112,7 @@ PipelineHandler::~PipelineHandler()
*/
/**
- * \brief Search and acquire a MediDevice matching a device pattern
+ * \brief Search and acquire a MediaDevice matching a device pattern
* \param[in] enumerator Enumerator containing all media devices in the system
* \param[in] dm Device match pattern
*
@@ -212,66 +143,122 @@ MediaDevice *PipelineHandler::acquireMediaDevice(DeviceEnumerator *enumerator,
}
/**
- * \brief Lock all media devices acquired by the pipeline
+ * \brief Acquire exclusive access to the pipeline handler for the process
*
- * This method shall not be called from pipeline handler implementation, as the
- * Camera class handles locking directly.
+ * This function locks all the media devices used by the pipeline to ensure
+ * that no other process can access them concurrently.
*
- * \context This function is \threadsafe.
+ * Access to a pipeline handler may be acquired recursively from within the
+ * same process. Every successful acquire() call shall be matched with a
+ * release() call. This allows concurrent access to the same pipeline handler
+ * from different cameras within the same process.
+ *
+ * Pipeline handlers shall not call this function directly as the Camera class
+ * handles access internally.
+ *
+ * \context This function is called from the CameraManager thread.
*
- * \return True if the devices could be locked, false otherwise
- * \sa unlock()
- * \sa MediaDevice::lock()
+ * \return True if the pipeline handler was acquired, false if another process
+ * has already acquired it
+ * \sa release()
*/
-bool PipelineHandler::lock()
+bool PipelineHandler::acquire(Camera *camera)
{
- for (std::shared_ptr<MediaDevice> &media : mediaDevices_) {
- if (!media->lock()) {
- unlock();
- return false;
+ if (useCount_ == 0) {
+ for (std::shared_ptr<MediaDevice> &media : mediaDevices_) {
+ if (!media->lock()) {
+ unlockMediaDevices();
+ return false;
+ }
}
}
+ if (!acquireDevice(camera)) {
+ if (useCount_ == 0)
+ unlockMediaDevices();
+
+ return false;
+ }
+
+ ++useCount_;
return true;
}
/**
- * \brief Unlock all media devices acquired by the pipeline
+ * \brief Release exclusive access to the pipeline handler
+ * \param[in] camera The camera for which to release data
*
- * This method shall not be called from pipeline handler implementation, as the
- * Camera class handles locking directly.
+ * This function releases access to the pipeline handler previously acquired by
+ * a call to acquire(). Every release() call shall match a previous successful
+ * acquire() call. Calling this function on a pipeline handler that hasn't been
+ * acquired results in undefined behaviour.
*
- * \context This function is \threadsafe.
+ * Pipeline handlers shall not call this function directly as the Camera class
+ * handles access internally.
+ *
+ * \context This function is called from the CameraManager thread.
*
- * \sa lock()
+ * \sa acquire()
*/
-void PipelineHandler::unlock()
+void PipelineHandler::release(Camera *camera)
{
- for (std::shared_ptr<MediaDevice> &media : mediaDevices_)
- media->unlock();
+ ASSERT(useCount_);
+
+ releaseDevice(camera);
+
+ if (useCount_ == 1)
+ unlockMediaDevices();
+
+ --useCount_;
}
/**
- * \brief Retrieve the list of controls for a camera
- * \param[in] camera The camera
- * \context This function is \threadsafe.
- * \return A ControlInfoMap listing the controls support by \a camera
+ * \brief Acquire resources associated with this camera
+ * \param[in] camera The camera for which to acquire resources
+ *
+ * Pipeline handlers may override this in order to get resources such as opening
+ * devices and allocating buffers when a camera is acquired.
+ *
+ * This is used by the uvcvideo pipeline handler to delay opening /dev/video#
+ * until the camera is acquired to avoid excess power consumption. The delayed
+ * opening of /dev/video# is a special case because the kernel uvcvideo driver
+ * powers on the USB device as soon as /dev/video# is opened. This behavior
+ * should *not* be copied by other pipeline handlers.
+ *
+ * \context This function is called from the CameraManager thread.
+ *
+ * \return True on success, false on failure
+ * \sa releaseDevice()
*/
-const ControlInfoMap &PipelineHandler::controls(Camera *camera)
+bool PipelineHandler::acquireDevice([[maybe_unused]] Camera *camera)
{
- CameraData *data = cameraData(camera);
- return data->controlInfo_;
+ return true;
}
/**
- * \brief Retrieve the list of properties for a camera
- * \param[in] camera The camera
- * \return A ControlList of properties supported by \a camera
+ * \brief Release resources associated with this camera
+ * \param[in] camera The camera for which to release resources
+ *
+ * Pipeline handlers may override this in order to perform cleanup operations
+ * when a camera is released, such as freeing memory.
+ *
+ * This is called once for every camera that is released. If there are resources
+ * shared by multiple cameras then the pipeline handler must take care to not
+ * release them until releaseDevice() has been called for all previously
+ * acquired cameras.
+ *
+ * \context This function is called from the CameraManager thread.
+ *
+ * \sa acquireDevice()
*/
-const ControlList &PipelineHandler::properties(Camera *camera)
+void PipelineHandler::releaseDevice([[maybe_unused]] Camera *camera)
+{
+}
+
+void PipelineHandler::unlockMediaDevices()
{
- CameraData *data = cameraData(camera);
- return data->properties_;
+ for (std::shared_ptr<MediaDevice> &media : mediaDevices_)
+ media->unlock();
}
/**
@@ -294,8 +281,7 @@ const ControlList &PipelineHandler::properties(Camera *camera)
* handler.
*
* \return A valid CameraConfiguration if the requested roles can be satisfied,
- * or a null pointer otherwise. The ownership of the returned configuration is
- * passed to the caller.
+ * or a null pointer otherwise.
*/
/**
@@ -310,13 +296,13 @@ const ControlList &PipelineHandler::properties(Camera *camera)
* application.
*
* The configuration is guaranteed to have been validated with
- * CameraConfiguration::valid(). The pipeline handler implementation shall not
- * perform further validation and may rely on any custom field stored in its
+ * CameraConfiguration::validate(). The pipeline handler implementation shall
+ * not perform further validation and may rely on any custom field stored in its
* custom CameraConfiguration derived class.
*
* When configuring the camera the pipeline handler shall associate a Stream
* instance to each StreamConfiguration entry in the CameraConfiguration using
- * the StreamConfiguration::setStream() method.
+ * the StreamConfiguration::setStream() function.
*
* \context This function is called from the CameraManager thread.
*
@@ -330,13 +316,13 @@ const ControlList &PipelineHandler::properties(Camera *camera)
* \param[in] stream The stream to allocate buffers for
* \param[out] buffers Array of buffers successfully allocated
*
- * This method allocates buffers for the \a stream from the devices associated
+ * This function allocates buffers for the \a stream from the devices associated
* with the stream in the corresponding pipeline handler. Those buffers shall be
* suitable to be added to a Request for the stream, and shall be mappable to
* the CPU through their associated dmabufs with mmap().
*
- * The method may only be called after the Camera has been configured and before
- * it gets started, or after it gets stopped. It shall be called only for
+ * The function may only be called after the Camera has been configured and
+ * before it gets started, or after it gets stopped. It shall be called only for
* streams that are part of the active camera configuration.
*
* The only intended caller is Camera::exportFrameBuffers().
@@ -351,10 +337,11 @@ const ControlList &PipelineHandler::properties(Camera *camera)
* \fn PipelineHandler::start()
* \brief Start capturing from a group of streams
* \param[in] camera The camera to start
+ * \param[in] controls Controls to be applied before starting the Camera
*
* Start the group of streams that have been configured for capture by
- * \a configure(). The intended caller of this method is the Camera class which
- * will in turn be called from the application to indicate that it has
+ * \a configure(). The intended caller of this function is the Camera class
+ * which will in turn be called from the application to indicate that it has
* configured the streams and is ready to capture.
*
* \context This function is called from the CameraManager thread.
@@ -363,44 +350,144 @@ const ControlList &PipelineHandler::properties(Camera *camera)
*/
/**
- * \fn PipelineHandler::stop()
- * \brief Stop capturing from all running streams
+ * \brief Stop capturing from all running streams and cancel pending requests
* \param[in] camera The camera to stop
*
- * This method stops capturing and processing requests immediately. All pending
- * requests are cancelled and complete immediately in an error state.
+ * This function stops capturing and processing requests immediately. All
+ * pending requests are cancelled and complete immediately in an error state.
*
* \context This function is called from the CameraManager thread.
*/
+void PipelineHandler::stop(Camera *camera)
+{
+ /* Stop the pipeline handler and let the queued requests complete. */
+ stopDevice(camera);
+
+ /* Cancel and signal as complete all waiting requests. */
+ while (!waitingRequests_.empty()) {
+ Request *request = waitingRequests_.front();
+ waitingRequests_.pop();
+ cancelRequest(request);
+ }
+
+ /* Make sure no requests are pending. */
+ Camera::Private *data = camera->_d();
+ ASSERT(data->queuedRequests_.empty());
+
+ data->requestSequence_ = 0;
+}
+
+/**
+ * \fn PipelineHandler::stopDevice()
+ * \brief Stop capturing from all running streams
+ * \param[in] camera The camera to stop
+ *
+ * This function stops capturing and processing requests immediately. All
+ * pending requests are cancelled and complete immediately in an error state.
+ */
+
+/**
+ * \brief Determine if the camera has any requests pending
+ * \param[in] camera The camera to check
+ *
+ * This function determines if there are any requests queued to the pipeline
+ * awaiting processing.
+ *
+ * \return True if there are pending requests, or false otherwise
+ */
+bool PipelineHandler::hasPendingRequests(const Camera *camera) const
+{
+ return !camera->_d()->queuedRequests_.empty();
+}
+
+/**
+ * \fn PipelineHandler::registerRequest()
+ * \brief Register a request for use by the pipeline handler
+ * \param[in] request The request to register
+ *
+ * This function is called when the request is created, and allows the pipeline
+ * handler to perform any one-time initialization it requries for the request.
+ */
+void PipelineHandler::registerRequest(Request *request)
+{
+ /*
+ * Connect the request prepared signal to notify the pipeline handler
+ * when a request is ready to be processed.
+ */
+ request->_d()->prepared.connect(this, &PipelineHandler::doQueueRequests);
+}
/**
* \fn PipelineHandler::queueRequest()
- * \brief Queue a request to the camera
- * \param[in] camera The camera to queue the request to
+ * \brief Queue a request
* \param[in] request The request to queue
*
- * This method queues a capture request to the pipeline handler for processing.
- * The request is first added to the internal list of queued requests, and
- * then passed to the pipeline handler with a call to queueRequestDevice().
+ * This function queues a capture request to the pipeline handler for
+ * processing. The request is first added to the internal list of waiting
+ * requests which have to be prepared to make sure they are ready for being
+ * queued to the pipeline handler.
+ *
+ * The queue of waiting requests is iterated and all prepared requests are
+ * passed to the pipeline handler in the same order they have been queued by
+ * calling this function.
+ *
+ * If a Request fails during the preparation phase or if the pipeline handler
+ * fails in queuing the request to the hardware the request is cancelled.
*
* Keeping track of queued requests ensures automatic completion of all requests
* when the pipeline handler is stopped with stop(). Request completion shall be
- * signalled by the pipeline handler using the completeRequest() method.
+ * signalled by the pipeline handler using the completeRequest() function.
*
* \context This function is called from the CameraManager thread.
- *
- * \return 0 on success or a negative error code otherwise
*/
-int PipelineHandler::queueRequest(Camera *camera, Request *request)
+void PipelineHandler::queueRequest(Request *request)
{
- CameraData *data = cameraData(camera);
+ LIBCAMERA_TRACEPOINT(request_queue, request);
+
+ waitingRequests_.push(request);
+
+ request->_d()->prepare(300ms);
+}
+
+/**
+ * \brief Queue one requests to the device
+ */
+void PipelineHandler::doQueueRequest(Request *request)
+{
+ LIBCAMERA_TRACEPOINT(request_device_queue, request);
+
+ Camera *camera = request->_d()->camera();
+ Camera::Private *data = camera->_d();
data->queuedRequests_.push_back(request);
+ request->_d()->sequence_ = data->requestSequence_++;
+
+ if (request->_d()->cancelled_) {
+ completeRequest(request);
+ return;
+ }
+
int ret = queueRequestDevice(camera, request);
if (ret)
- data->queuedRequests_.remove(request);
+ cancelRequest(request);
+}
+
+/**
+ * \brief Queue prepared requests to the device
+ *
+ * Iterate the list of waiting requests and queue them to the device one
+ * by one if they have been prepared.
+ */
+void PipelineHandler::doQueueRequests()
+{
+ while (!waitingRequests_.empty()) {
+ Request *request = waitingRequests_.front();
+ if (!request->_d()->prepared_)
+ break;
- return ret;
+ doQueueRequest(request);
+ waitingRequests_.pop();
+ }
}
/**
@@ -409,7 +496,7 @@ int PipelineHandler::queueRequest(Camera *camera, Request *request)
* \param[in] camera The camera to queue the request to
* \param[in] request The request to queue
*
- * This method queues a capture request to the device for processing. The
+ * This function queues a capture request to the device for processing. The
* request contains a set of buffers associated with streams and a set of
* parameters. The pipeline handler shall program the device to ensure that the
* parameters will be applied to the frames captured in the buffers provided in
@@ -422,12 +509,11 @@ int PipelineHandler::queueRequest(Camera *camera, Request *request)
/**
* \brief Complete a buffer for a request
- * \param[in] camera The camera the request belongs to
* \param[in] request The request the buffer belongs to
* \param[in] buffer The buffer that has completed
*
- * This method shall be called by pipeline handlers to signal completion of the
- * \a buffer part of the \a request. It notifies applications of buffer
+ * This function shall be called by pipeline handlers to signal completion of
+ * the \a buffer part of the \a request. It notifies applications of buffer
* completion and updates the request's internal buffer tracking. The request
* is not completed automatically when the last buffer completes to give
* pipeline handlers a chance to perform any operation that may still be
@@ -438,33 +524,34 @@ int PipelineHandler::queueRequest(Camera *camera, Request *request)
* \return True if all buffers contained in the request have completed, false
* otherwise
*/
-bool PipelineHandler::completeBuffer(Camera *camera, Request *request,
- FrameBuffer *buffer)
+bool PipelineHandler::completeBuffer(Request *request, FrameBuffer *buffer)
{
+ Camera *camera = request->_d()->camera();
camera->bufferCompleted.emit(request, buffer);
- return request->completeBuffer(buffer);
+ return request->_d()->completeBuffer(buffer);
}
/**
* \brief Signal request completion
- * \param[in] camera The camera that the request belongs to
* \param[in] request The request that has completed
*
- * The pipeline handler shall call this method to notify the \a camera that the
- * request has completed. The request is deleted and shall not be accessed once
- * this method returns.
+ * The pipeline handler shall call this function to notify the \a camera that
+ * the request has completed. The request is no longer managed by the pipeline
+ * handler and shall not be accessed once this function returns.
*
- * This method ensures that requests will be returned to the application in
+ * This function ensures that requests will be returned to the application in
* submission order, the pipeline handler may call it on any complete request
* without any ordering constraint.
*
* \context This function shall be called from the CameraManager thread.
*/
-void PipelineHandler::completeRequest(Camera *camera, Request *request)
+void PipelineHandler::completeRequest(Request *request)
{
- request->complete();
+ Camera *camera = request->_d()->camera();
+
+ request->_d()->complete();
- CameraData *data = cameraData(camera);
+ Camera::Private *data = camera->_d();
while (!data->queuedRequests_.empty()) {
Request *req = data->queuedRequests_.front();
@@ -478,31 +565,120 @@ void PipelineHandler::completeRequest(Camera *camera, Request *request)
}
/**
+ * \brief Cancel request and signal its completion
+ * \param[in] request The request to cancel
+ *
+ * This function cancels and completes the request. The same rules as for
+ * completeRequest() apply.
+ */
+void PipelineHandler::cancelRequest(Request *request)
+{
+ request->_d()->cancel();
+ completeRequest(request);
+}
+
+/**
+ * \brief Retrieve the absolute path to a platform configuration file
+ * \param[in] subdir The pipeline handler specific subdirectory name
+ * \param[in] name The configuration file name
+ *
+ * This function locates a named platform configuration file and returns
+ * its absolute path to the pipeline handler. It searches the following
+ * directories, in order:
+ *
+ * - If libcamera is not installed, the src/libcamera/pipeline/\<subdir\>/data/
+ * directory within the source tree ; otherwise
+ * - The system data (share/libcamera/pipeline/\<subdir\>) directory.
+ *
+ * The system directories are not searched if libcamera is not installed.
+ *
+ * \return The full path to the pipeline handler configuration file, or an empty
+ * string if no configuration file can be found
+ */
+std::string PipelineHandler::configurationFile(const std::string &subdir,
+ const std::string &name) const
+{
+ std::string confPath;
+ struct stat statbuf;
+ int ret;
+
+ std::string root = utils::libcameraSourcePath();
+ if (!root.empty()) {
+ /*
+ * When libcamera is used before it is installed, load
+ * configuration files from the source directory. The
+ * configuration files are then located in the 'data'
+ * subdirectory of the corresponding pipeline handler.
+ */
+ std::string confDir = root + "src/libcamera/pipeline/";
+ confPath = confDir + subdir + "/data/" + name;
+
+ LOG(Pipeline, Info)
+ << "libcamera is not installed. Loading platform configuration file from '"
+ << confPath << "'";
+ } else {
+ /* Else look in the system locations. */
+ confPath = std::string(LIBCAMERA_DATA_DIR)
+ + "/pipeline/" + subdir + '/' + name;
+ }
+
+ ret = stat(confPath.c_str(), &statbuf);
+ if (ret == 0 && (statbuf.st_mode & S_IFMT) == S_IFREG)
+ return confPath;
+
+ LOG(Pipeline, Error)
+ << "Configuration file '" << confPath
+ << "' not found for pipeline handler '" << PipelineHandler::name() << "'";
+
+ return std::string();
+}
+
+/**
* \brief Register a camera to the camera manager and pipeline handler
* \param[in] camera The camera to be added
- * \param[in] data Pipeline-specific data for the camera
- * \param[in] devnum Device number of the camera (optional)
*
- * This method is called by pipeline handlers to register the cameras they
- * handle with the camera manager. It associates the pipeline-specific \a data
- * with the camera, for later retrieval with cameraData(). Ownership of \a data
- * is transferred to the PipelineHandler.
- *
- * \a devnum is the device number (as returned by makedev) that the \a camera
- * is to be associated with. This is for the V4L2 compatibility layer to map
- * device nodes to Camera instances based on the device number
- * registered by this method in \a devnum.
+ * This function is called by pipeline handlers to register the cameras they
+ * handle with the camera manager.
*
* \context This function shall be called from the CameraManager thread.
*/
-void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera,
- std::unique_ptr<CameraData> data,
- dev_t devnum)
+void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera)
{
- data->camera_ = camera.get();
- cameraData_[camera.get()] = std::move(data);
cameras_.push_back(camera);
- manager_->addCamera(std::move(camera), devnum);
+
+ if (mediaDevices_.empty()) {
+ /*
+ * For virtual devices with no MediaDevice, there are no system
+ * devices to register.
+ */
+ manager_->_d()->addCamera(std::move(camera));
+ return;
+ }
+
+ /*
+ * Walk the entity list and map the devnums of all capture video nodes
+ * to the camera.
+ */
+ std::vector<int64_t> devnums;
+ for (const std::shared_ptr<MediaDevice> &media : mediaDevices_) {
+ for (const MediaEntity *entity : media->entities()) {
+ if (entity->pads().size() == 1 &&
+ (entity->pads()[0]->flags() & MEDIA_PAD_FL_SINK) &&
+ entity->function() == MEDIA_ENT_F_IO_V4L) {
+ devnums.push_back(makedev(entity->deviceMajor(),
+ entity->deviceMinor()));
+ }
+ }
+ }
+
+ /*
+ * Store the associated devices as a property of the camera to allow
+ * systems to identify which devices are managed by libcamera.
+ */
+ Camera::Private *data = camera->_d();
+ data->properties_.set(properties::SystemDevices, devnums);
+
+ manager_->_d()->addCamera(std::move(camera));
}
/**
@@ -519,7 +695,7 @@ void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera,
*/
void PipelineHandler::hotplugMediaDevice(MediaDevice *media)
{
- media->disconnected.connect(this, &PipelineHandler::mediaDeviceDisconnected);
+ media->disconnected.connect(this, [this, media] { mediaDeviceDisconnected(media); });
}
/**
@@ -549,30 +725,28 @@ void PipelineHandler::mediaDeviceDisconnected(MediaDevice *media)
*/
void PipelineHandler::disconnect()
{
- for (std::weak_ptr<Camera> ptr : cameras_) {
+ /*
+ * Each camera holds a reference to its associated pipeline handler
+ * instance. Hence, when the last camera is dropped, the pipeline
+ * handler will get destroyed by the last manager_->removeCamera(camera)
+ * call in the loop below.
+ *
+ * This is acceptable as long as we make sure that the code path does not
+ * access any member of the (already destroyed) pipeline handler instance
+ * afterwards. Therefore, we move the cameras_ vector to a local temporary
+ * container to avoid accessing freed memory later i.e. to explicitly run
+ * cameras_.clear().
+ */
+ std::vector<std::weak_ptr<Camera>> cameras{ std::move(cameras_) };
+
+ for (const std::weak_ptr<Camera> &ptr : cameras) {
std::shared_ptr<Camera> camera = ptr.lock();
if (!camera)
continue;
camera->disconnect();
- manager_->removeCamera(camera.get());
+ manager_->_d()->removeCamera(camera);
}
-
- cameras_.clear();
-}
-
-/**
- * \brief Retrieve the pipeline-specific data associated with a Camera
- * \param[in] camera The camera whose data to retrieve
- * \return A pointer to the pipeline-specific data passed to registerCamera().
- * The returned pointer is a borrowed reference and is guaranteed to remain
- * valid until the pipeline handler is destroyed. It shall not be deleted
- * manually by the caller.
- */
-CameraData *PipelineHandler::cameraData(const Camera *camera)
-{
- ASSERT(cameraData_.count(camera));
- return cameraData_[camera].get();
}
/**
@@ -592,27 +766,32 @@ CameraData *PipelineHandler::cameraData(const Camera *camera)
*/
/**
- * \class PipelineHandlerFactory
- * \brief Registration of PipelineHandler classes and creation of instances
+ * \fn PipelineHandler::cameraManager() const
+ * \brief Retrieve the CameraManager that this pipeline handler belongs to
+ * \context This function is \threadsafe.
+ * \return The CameraManager for this pipeline handler
+ */
+
+/**
+ * \class PipelineHandlerFactoryBase
+ * \brief Base class for pipeline handler factories
*
- * To facilitate discovery and instantiation of PipelineHandler classes, the
- * PipelineHandlerFactory class maintains a registry of pipeline handler
- * classes. Each PipelineHandler subclass shall register itself using the
- * REGISTER_PIPELINE_HANDLER() macro, which will create a corresponding
- * instance of a PipelineHandlerFactory subclass and register it with the
- * static list of factories.
+ * The PipelineHandlerFactoryBase class is the base of all specializations of
+ * the PipelineHandlerFactory class template. It implements the factory
+ * registration, maintains a registry of factories, and provides access to the
+ * registered factories.
*/
/**
- * \brief Construct a pipeline handler factory
+ * \brief Construct a pipeline handler factory base
* \param[in] name Name of the pipeline handler class
*
- * Creating an instance of the factory registers is with the global list of
+ * Creating an instance of the factory base registers it with the global list of
* factories, accessible through the factories() function.
*
* The factory \a name is used for debug purpose and shall be unique.
*/
-PipelineHandlerFactory::PipelineHandlerFactory(const char *name)
+PipelineHandlerFactoryBase::PipelineHandlerFactoryBase(const char *name)
: name_(name)
{
registerType(this);
@@ -625,15 +804,15 @@ PipelineHandlerFactory::PipelineHandlerFactory(const char *name)
* \return A shared pointer to a new instance of the PipelineHandler subclass
* corresponding to the factory
*/
-std::shared_ptr<PipelineHandler> PipelineHandlerFactory::create(CameraManager *manager)
+std::shared_ptr<PipelineHandler> PipelineHandlerFactoryBase::create(CameraManager *manager) const
{
- PipelineHandler *handler = createInstance(manager);
+ std::unique_ptr<PipelineHandler> handler = createInstance(manager);
handler->name_ = name_.c_str();
- return std::shared_ptr<PipelineHandler>(handler);
+ return std::shared_ptr<PipelineHandler>(std::move(handler));
}
/**
- * \fn PipelineHandlerFactory::name()
+ * \fn PipelineHandlerFactoryBase::name()
* \brief Retrieve the factory name
* \return The factory name
*/
@@ -645,47 +824,89 @@ std::shared_ptr<PipelineHandler> PipelineHandlerFactory::create(CameraManager *m
* The caller is responsible to guarantee the uniqueness of the pipeline handler
* name.
*/
-void PipelineHandlerFactory::registerType(PipelineHandlerFactory *factory)
+void PipelineHandlerFactoryBase::registerType(PipelineHandlerFactoryBase *factory)
{
- std::vector<PipelineHandlerFactory *> &factories = PipelineHandlerFactory::factories();
+ std::vector<PipelineHandlerFactoryBase *> &factories =
+ PipelineHandlerFactoryBase::factories();
factories.push_back(factory);
-
- LOG(Pipeline, Debug)
- << "Registered pipeline handler \"" << factory->name() << "\"";
}
/**
* \brief Retrieve the list of all pipeline handler factories
- *
- * The static factories map is defined inside the function to ensures it gets
- * initialized on first use, without any dependency on link order.
- *
* \return the list of pipeline handler factories
*/
-std::vector<PipelineHandlerFactory *> &PipelineHandlerFactory::factories()
+std::vector<PipelineHandlerFactoryBase *> &PipelineHandlerFactoryBase::factories()
{
- static std::vector<PipelineHandlerFactory *> factories;
+ /*
+ * The static factories map is defined inside the function to ensure
+ * it gets initialized on first use, without any dependency on
+ * link order.
+ */
+ static std::vector<PipelineHandlerFactoryBase *> factories;
return factories;
}
/**
- * \fn PipelineHandlerFactory::createInstance()
- * \brief Create an instance of the PipelineHandler corresponding to the factory
- * \param[in] manager The camera manager
+ * \brief Return the factory for the pipeline handler with name \a name
+ * \param[in] name The pipeline handler name
+ * \return The factory of the pipeline with name \a name, or nullptr if not found
+ */
+const PipelineHandlerFactoryBase *PipelineHandlerFactoryBase::getFactoryByName(const std::string &name)
+{
+ const std::vector<PipelineHandlerFactoryBase *> &factories =
+ PipelineHandlerFactoryBase::factories();
+
+ auto iter = std::find_if(factories.begin(),
+ factories.end(),
+ [&name](const PipelineHandlerFactoryBase *f) {
+ return f->name() == name;
+ });
+
+ if (iter != factories.end())
+ return *iter;
+
+ return nullptr;
+}
+
+/**
+ * \class PipelineHandlerFactory
+ * \brief Registration of PipelineHandler classes and creation of instances
+ * \tparam _PipelineHandler The pipeline handler class type for this factory
+ *
+ * To facilitate discovery and instantiation of PipelineHandler classes, the
+ * PipelineHandlerFactory class implements auto-registration of pipeline
+ * handlers. Each PipelineHandler subclass shall register itself using the
+ * REGISTER_PIPELINE_HANDLER() macro, which will create a corresponding
+ * instance of a PipelineHandlerFactory and register it with the static list of
+ * factories.
+ */
+
+/**
+ * \fn PipelineHandlerFactory::PipelineHandlerFactory(const char *name)
+ * \brief Construct a pipeline handler factory
+ * \param[in] name Name of the pipeline handler class
*
- * This virtual function is implemented by the REGISTER_PIPELINE_HANDLER()
- * macro. It creates a pipeline handler instance associated with the camera
- * \a manager.
+ * Creating an instance of the factory registers it with the global list of
+ * factories, accessible through the factories() function.
*
- * \return a pointer to a newly constructed instance of the PipelineHandler
- * subclass corresponding to the factory
+ * The factory \a name is used for debug purpose and shall be unique.
+ */
+
+/**
+ * \fn PipelineHandlerFactory::createInstance() const
+ * \brief Create an instance of the PipelineHandler corresponding to the factory
+ * \param[in] manager The camera manager
+ * \return A unique pointer to a newly constructed instance of the
+ * PipelineHandler subclass corresponding to the factory
*/
/**
* \def REGISTER_PIPELINE_HANDLER
* \brief Register a pipeline handler with the pipeline handler factory
* \param[in] handler Class name of PipelineHandler derived class to register
+ * \param[in] name Name assigned to the pipeline handler, matching the pipeline
+ * subdirectory name in the source tree.
*
* Register a PipelineHandler subclass with the factory and make it available to
* try and match devices.
diff --git a/src/libcamera/pixelformats.cpp b/src/libcamera/pixel_format.cpp
index 87557d98..314179a8 100644
--- a/src/libcamera/pixelformats.cpp
+++ b/src/libcamera/pixel_format.cpp
@@ -2,14 +2,17 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * pixelformats.cpp - libcamera pixel formats
+ * libcamera Pixel Format
*/
-#include <libcamera/pixelformats.h>
+#include <libcamera/formats.h>
+#include <libcamera/pixel_format.h>
+
+#include "libcamera/internal/formats.h"
/**
- * \file pixelformats.h
- * \brief libcamera pixel formats
+ * \file pixel_format.h
+ * \brief libcamera pixel format
*/
namespace libcamera {
@@ -19,31 +22,26 @@ namespace libcamera {
* \brief libcamera image pixel format
*
* The PixelFormat type describes the format of images in the public libcamera
- * API. It stores a FourCC value as a 32-bit unsigned integer and a set of
- * modifiers. The FourCC and modifiers values are defined in the Linux kernel
- * DRM/KMS API (see linux/drm_fourcc.h).
+ * API. It stores a FourCC value as a 32-bit unsigned integer and a modifier.
+ * The FourCC and modifier values are defined in the Linux kernel DRM/KMS API
+ * (see linux/drm_fourcc.h). Constant expressions for all pixel formats
+ * supported by libcamera are available in libcamera/formats.h.
*/
/**
+ * \fn PixelFormat::PixelFormat()
* \brief Construct a PixelFormat with an invalid format
*
* PixelFormat instances constructed with the default constructor are
* invalid, calling the isValid() function returns false.
*/
-PixelFormat::PixelFormat()
- : fourcc_(0)
-{
-}
/**
- * \brief Construct a PixelFormat from a DRM FourCC and a set of modifiers
+ * \fn PixelFormat::PixelFormat(uint32_t fourcc, uint64_t modifier)
+ * \brief Construct a PixelFormat from a DRM FourCC and a modifier
* \param[in] fourcc A DRM FourCC
- * \param[in] modifiers A set of DRM FourCC modifiers
+ * \param[in] modifier A DRM FourCC modifier
*/
-PixelFormat::PixelFormat(uint32_t fourcc, const std::set<uint64_t> &modifiers)
- : fourcc_(fourcc), modifiers_(modifiers)
-{
-}
/**
* \brief Compare pixel formats for equality
@@ -51,7 +49,7 @@ PixelFormat::PixelFormat(uint32_t fourcc, const std::set<uint64_t> &modifiers)
*/
bool PixelFormat::operator==(const PixelFormat &other) const
{
- return fourcc_ == other.fourcc() && modifiers_ == other.modifiers_;
+ return fourcc_ == other.fourcc() && modifier_ == other.modifier_;
}
/**
@@ -70,7 +68,7 @@ bool PixelFormat::operator<(const PixelFormat &other) const
return true;
if (fourcc_ > other.fourcc_)
return false;
- return modifiers_ < modifiers_;
+ return modifier_ < other.modifier_;
}
/**
@@ -97,9 +95,9 @@ bool PixelFormat::operator<(const PixelFormat &other) const
*/
/**
- * \fn PixelFormat::modifiers() const
- * \brief Retrieve the pixel format modifiers
- * \return Set of DRM modifiers
+ * \fn PixelFormat::modifier() const
+ * \brief Retrieve the pixel format modifier
+ * \return DRM modifier
*/
/**
@@ -108,9 +106,50 @@ bool PixelFormat::operator<(const PixelFormat &other) const
*/
std::string PixelFormat::toString() const
{
- char str[11];
- snprintf(str, 11, "0x%08x", fourcc_);
- return str;
+ const PixelFormatInfo &info = PixelFormatInfo::info(*this);
+
+ if (!info.isValid()) {
+ if (*this == PixelFormat())
+ return "<INVALID>";
+
+ char fourcc[7] = { '<',
+ static_cast<char>(fourcc_),
+ static_cast<char>(fourcc_ >> 8),
+ static_cast<char>(fourcc_ >> 16),
+ static_cast<char>(fourcc_ >> 24),
+ '>' };
+
+ for (unsigned int i = 1; i < 5; i++) {
+ if (!isprint(fourcc[i]))
+ fourcc[i] = '.';
+ }
+
+ return fourcc;
+ }
+
+ return info.name;
+}
+
+/**
+ * \brief Create a PixelFormat from a string
+ * \return The PixelFormat represented by the \a name if known, or an
+ * invalid pixel format otherwise.
+ */
+PixelFormat PixelFormat::fromString(const std::string &name)
+{
+ return PixelFormatInfo::info(name).format;
+}
+
+/**
+ * \brief Insert a text representation of a PixelFormat into an output stream
+ * \param[in] out The output stream
+ * \param[in] f The PixelFormat
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const PixelFormat &f)
+{
+ out << f.toString();
+ return out;
}
} /* namespace libcamera */
diff --git a/src/libcamera/process.cpp b/src/libcamera/process.cpp
index 3b4d0f10..bc9833f4 100644
--- a/src/libcamera/process.cpp
+++ b/src/libcamera/process.cpp
@@ -2,15 +2,14 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * process.cpp - Process object
+ * Process object
*/
-#include "process.h"
+#include "libcamera/internal/process.h"
#include <algorithm>
#include <dirent.h>
#include <fcntl.h>
-#include <iostream>
#include <list>
#include <signal.h>
#include <string.h>
@@ -20,10 +19,9 @@
#include <unistd.h>
#include <vector>
-#include <libcamera/event_notifier.h>
-
-#include "log.h"
-#include "utils.h"
+#include <libcamera/base/event_notifier.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
/**
* \file process.h
@@ -41,28 +39,6 @@ LOG_DEFINE_CATEGORY(Process)
* The ProcessManager singleton keeps track of all created Process instances,
* and manages the signal handling involved in terminating processes.
*/
-class ProcessManager
-{
-public:
- void registerProcess(Process *proc);
-
- static ProcessManager *instance();
-
- int writePipe() const;
-
- const struct sigaction &oldsa() const;
-
-private:
- void sighandler(EventNotifier *notifier);
- ProcessManager();
- ~ProcessManager();
-
- std::list<Process *> processes_;
-
- struct sigaction oldsa_;
- EventNotifier *sigEvent_;
- int pipe_[2];
-};
namespace {
@@ -89,10 +65,10 @@ void sigact(int signal, siginfo_t *info, void *ucontext)
} /* namespace */
-void ProcessManager::sighandler(EventNotifier *notifier)
+void ProcessManager::sighandler()
{
char data;
- ssize_t ret = read(pipe_[0], &data, sizeof(data));
+ ssize_t ret = read(pipe_[0].get(), &data, sizeof(data));
if (ret < 0) {
LOG(Process, Error)
<< "Failed to read byte from signal handler pipe";
@@ -118,7 +94,7 @@ void ProcessManager::sighandler(EventNotifier *notifier)
* \brief Register process with process manager
* \param[in] proc Process to register
*
- * This method registers the \a proc with the process manager. It
+ * This function registers the \a proc with the process manager. It
* shall be called by the parent process after successfully forking, in
* order to let the parent signal process termination.
*/
@@ -127,8 +103,20 @@ void ProcessManager::registerProcess(Process *proc)
processes_.push_back(proc);
}
+ProcessManager *ProcessManager::self_ = nullptr;
+
+/**
+ * \brief Construct a ProcessManager instance
+ *
+ * The ProcessManager class is meant to only be instantiated once, by the
+ * CameraManager.
+ */
ProcessManager::ProcessManager()
{
+ if (self_)
+ LOG(Process, Fatal)
+ << "Multiple ProcessManager objects are not allowed";
+
sigaction(SIGCHLD, NULL, &oldsa_);
struct sigaction sa;
@@ -140,52 +128,58 @@ ProcessManager::ProcessManager()
sigaction(SIGCHLD, &sa, NULL);
- if (pipe2(pipe_, O_CLOEXEC | O_DIRECT | O_NONBLOCK))
+ int pipe[2];
+ if (pipe2(pipe, O_CLOEXEC | O_DIRECT | O_NONBLOCK))
LOG(Process, Fatal)
<< "Failed to initialize pipe for signal handling";
- sigEvent_ = new EventNotifier(pipe_[0], EventNotifier::Read);
+
+ pipe_[0] = UniqueFD(pipe[0]);
+ pipe_[1] = UniqueFD(pipe[1]);
+
+ sigEvent_ = new EventNotifier(pipe_[0].get(), EventNotifier::Read);
sigEvent_->activated.connect(this, &ProcessManager::sighandler);
+
+ self_ = this;
}
ProcessManager::~ProcessManager()
{
sigaction(SIGCHLD, &oldsa_, NULL);
+
delete sigEvent_;
- close(pipe_[0]);
- close(pipe_[1]);
+
+ self_ = nullptr;
}
/**
* \brief Retrieve the Process manager instance
*
- * The ProcessManager is a singleton and can't be constructed manually. This
- * method shall instead be used to retrieve the single global instance of the
- * manager.
+ * The ProcessManager is constructed by the CameraManager. This function shall
+ * be used to retrieve the single instance of the manager.
*
* \return The Process manager instance
*/
ProcessManager *ProcessManager::instance()
{
- static ProcessManager processManager;
- return &processManager;
+ return self_;
}
/**
* \brief Retrieve the Process manager's write pipe
*
- * This method is meant only to be used by the static signal handler.
+ * This function is meant only to be used by the static signal handler.
*
* \return Pipe for writing
*/
int ProcessManager::writePipe() const
{
- return pipe_[1];
+ return pipe_[1].get();
}
/**
* \brief Retrive the old signal action data
*
- * This method is meant only to be used by the static signal handler.
+ * This function is meant only to be used by the static signal handler.
*
* \return The old signal action data
*/
@@ -194,7 +188,6 @@ const struct sigaction &ProcessManager::oldsa() const
return oldsa_;
}
-
/**
* \class Process
* \brief Process object
@@ -268,14 +261,16 @@ int Process::start(const std::string &path,
closeAllFdsExcept(fds);
- unsetenv("LIBCAMERA_LOG_FILE");
+ const char *file = utils::secure_getenv("LIBCAMERA_LOG_FILE");
+ if (file && strcmp(file, "syslog"))
+ unsetenv("LIBCAMERA_LOG_FILE");
const char **argv = new const char *[args.size() + 2];
unsigned int len = args.size();
argv[0] = path.c_str();
for (unsigned int i = 0; i < len; i++)
- argv[i+1] = args[i].c_str();
- argv[len+1] = nullptr;
+ argv[i + 1] = args[i].c_str();
+ argv[len + 1] = nullptr;
execv(path.c_str(), (char **)argv);
@@ -326,7 +321,7 @@ int Process::isolate()
* \brief SIGCHLD handler
* \param[in] wstatus The status as output by waitpid()
*
- * This method is called when the process associated with Process terminates.
+ * This function is called when the process associated with Process terminates.
* It emits the Process::finished signal.
*/
void Process::died(int wstatus)
@@ -335,7 +330,7 @@ void Process::died(int wstatus)
exitStatus_ = WIFEXITED(wstatus) ? NormalExit : SignalExit;
exitCode_ = exitStatus_ == NormalExit ? WEXITSTATUS(wstatus) : -1;
- finished.emit(this, exitStatus_, exitCode_);
+ finished.emit(exitStatus_, exitCode_);
}
/**
@@ -355,7 +350,7 @@ void Process::died(int wstatus)
* \fn Process::exitCode()
* \brief Retrieve the exit code of the process
*
- * This method is only valid if exitStatus() returned NormalExit.
+ * This function is only valid if exitStatus() returned NormalExit.
*
* \return Exit code
*/
@@ -373,7 +368,8 @@ void Process::died(int wstatus)
*/
void Process::kill()
{
- ::kill(pid_, SIGKILL);
+ if (pid_ > 0)
+ ::kill(pid_, SIGKILL);
}
} /* namespace libcamera */
diff --git a/src/libcamera/property_ids.cpp.in b/src/libcamera/property_ids.cpp.in
deleted file mode 100644
index bfdd823f..00000000
--- a/src/libcamera/property_ids.cpp.in
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * property_ids.cpp : Property ID list
- *
- * This file is auto-generated. Do not edit.
- */
-
-#include <libcamera/property_ids.h>
-
-/**
- * \file property_ids.h
- * \brief Camera property identifiers
- */
-
-namespace libcamera {
-
-/**
- * \brief Namespace for libcamera properties
- */
-namespace properties {
-
-${controls_doc}
-
-#ifndef __DOXYGEN__
-/*
- * Keep the properties definitions hidden from doxygen as it incorrectly parses
- * them as functions.
- */
-${controls_def}
-#endif
-
-/**
- * \brief List of all supported libcamera properties
- */
-extern const ControlIdMap properties {
-${controls_map}
-};
-
-} /* namespace properties */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/property_ids.yaml b/src/libcamera/property_ids_core.yaml
index ce627fa0..834454a4 100644
--- a/src/libcamera/property_ids.yaml
+++ b/src/libcamera/property_ids_core.yaml
@@ -2,8 +2,9 @@
#
# Copyright (C) 2019, Google Inc.
#
-%YAML 1.2
+%YAML 1.1
---
+vendor: libcamera
controls:
- Location:
type: int32_t
@@ -29,10 +30,10 @@ controls:
- Rotation:
type: int32_t
description: |
- The camera rotation is expressed as the angular difference in degrees
- between two reference systems, one relative to the camera module, and
- one defined on the external world scene to be captured when projected
- on the image sensor pixel array.
+ The camera physical mounting rotation. It is expressed as the angular
+ difference in degrees between two reference systems, one relative to the
+ camera module, and one defined on the external world scene to be
+ captured when projected on the image sensor pixel array.
A camera sensor has a 2-dimensional reference system 'Rc' defined by
its pixel array read-out order. The origin is set to the first pixel
@@ -386,4 +387,318 @@ controls:
| |
| |
+--------------------+
+
+ - Model:
+ type: string
+ description: |
+ The model name shall to the extent possible describe the sensor. For
+ most devices this is the model name of the sensor. While for some
+ devices the sensor model is unavailable as the sensor or the entire
+ camera is part of a larger unit and exposed as a black-box to the
+ system. In such cases the model name of the smallest device that
+ contains the camera sensor shall be used.
+
+ The model name is not meant to be a camera name displayed to the
+ end-user, but may be combined with other camera information to create a
+ camera name.
+
+ The model name is not guaranteed to be unique in the system nor is
+ it guaranteed to be stable or have any other properties required to make
+ it a good candidate to be used as a permanent identifier of a camera.
+
+ The model name shall describe the camera in a human readable format and
+ shall be encoded in ASCII.
+
+ Example model names are 'ov5670', 'imx219' or 'Logitech Webcam C930e'.
+
+ - UnitCellSize:
+ type: Size
+ description: |
+ The pixel unit cell physical size, in nanometers.
+
+ The UnitCellSize properties defines the horizontal and vertical sizes of
+ a single pixel unit, including its active and non-active parts. In
+ other words, it expresses the horizontal and vertical distance between
+ the top-left corners of adjacent pixels.
+
+ The property can be used to calculate the physical size of the sensor's
+ pixel array area and for calibration purposes.
+
+ - PixelArraySize:
+ type: Size
+ description: |
+ The camera sensor pixel array readable area vertical and horizontal
+ sizes, in pixels.
+
+ The PixelArraySize property defines the size in pixel units of the
+ readable part of full pixel array matrix, including optical black
+ pixels used for calibration, pixels which are not considered valid for
+ capture and active pixels containing valid image data.
+
+ The property describes the maximum size of the raw data captured by the
+ camera, which might not correspond to the physical size of the sensor
+ pixel array matrix, as some portions of the physical pixel array matrix
+ are not accessible and cannot be transmitted out.
+
+ For example, let's consider a pixel array matrix assembled as follows
+
+ +--------------------------------------------------+
+ |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+ |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+ |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
+ |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
+ |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
+ |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
+ |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
+ |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
+ ... ... ... ... ...
+
+ ... ... ... ... ...
+ |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
+ |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
+ |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+ |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+ +--------------------------------------------------+
+
+ starting with two lines of non-readable pixels (x), followed by N lines
+ of readable data (D) surrounded by two columns of non-readable pixels on
+ each side, and ending with two more lines of non-readable pixels. Only
+ the readable portion is transmitted to the receiving side, defining the
+ sizes of the largest possible buffer of raw data that can be presented
+ to applications.
+
+ PixelArraySize.width
+ /----------------------------------------------/
+ +----------------------------------------------+ /
+ |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
+ |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
+ |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
+ |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
+ |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
+ |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | PixelArraySize.height
+ ... ... ... ... ...
+ ... ... ... ... ...
+ |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
+ |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
+ +----------------------------------------------+ /
+
+ This defines a rectangle whose top-left corner is placed in position (0,
+ 0) and whose vertical and horizontal sizes are defined by this property.
+ All other rectangles that describe portions of the pixel array, such as
+ the optical black pixels rectangles and active pixel areas, are defined
+ relatively to this rectangle.
+
+ All the coordinates are expressed relative to the default sensor readout
+ direction, without any transformation (such as horizontal and vertical
+ flipping) applied. When mapping them to the raw pixel buffer,
+ applications shall take any configured transformation into account.
+
+ \todo Rename this property to Size once we will have property
+ categories (i.e. Properties::PixelArray::Size)
+
+ - PixelArrayOpticalBlackRectangles:
+ type: Rectangle
+ size: [n]
+ description: |
+ The pixel array region(s) which contain optical black pixels
+ considered valid for calibration purposes.
+
+ This property describes the position and size of optical black pixel
+ regions in the raw data buffer as stored in memory, which might differ
+ from their actual physical location in the pixel array matrix.
+
+ It is important to note, in fact, that camera sensors might
+ automatically reorder or skip portions of their pixels array matrix when
+ transmitting data to the receiver. For instance, a sensor may merge the
+ top and bottom optical black rectangles into a single rectangle,
+ transmitted at the beginning of the frame.
+
+ The pixel array contains several areas with different purposes,
+ interleaved by lines and columns which are said not to be valid for
+ capturing purposes. Invalid lines and columns are defined as invalid as
+ they could be positioned too close to the chip margins or to the optical
+ black shielding placed on top of optical black pixels.
+
+ PixelArraySize.width
+ /----------------------------------------------/
+ x1 x2
+ +--o---------------------------------------o---+ /
+ |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
+ |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
+ y1 oIIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| |
+ |IIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| |
+ |IIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| |
+ y2 oIIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| |
+ |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
+ |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
+ y3 |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| |
+ |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| | PixelArraySize.height
+ |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| |
+ ... ... ... ... ...
+ ... ... ... ... ...
+ y4 |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| |
+ |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
+ |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
+ +----------------------------------------------+ /
+
+ The readable pixel array matrix is composed by
+ 2 invalid lines (I)
+ 4 lines of valid optical black pixels (O)
+ 2 invalid lines (I)
+ n lines of valid pixel data (P)
+ 2 invalid lines (I)
+
+ And the position of the optical black pixel rectangles is defined by
+
+ PixelArrayOpticalBlackRectangles = {
+ { x1, y1, x2 - x1 + 1, y2 - y1 + 1 },
+ { x1, y3, 2, y4 - y3 + 1 },
+ { x2, y3, 2, y4 - y3 + 1 },
+ };
+
+ If the camera, when capturing the full pixel array matrix, automatically
+ skips the invalid lines and columns, producing the following data
+ buffer, when captured to memory
+
+ PixelArraySize.width
+ /----------------------------------------------/
+ x1
+ +--------------------------------------------o-+ /
+ |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| |
+ |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| |
+ |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| |
+ |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| |
+ y1 oOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| |
+ |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| |
+ |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | PixelArraySize.height
+ ... ... ... ... ... |
+ ... ... ... ... ... |
+ |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| |
+ |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| |
+ +----------------------------------------------+ /
+
+ then the invalid lines and columns should not be reported as part of the
+ PixelArraySize property in first place.
+
+ In this case, the position of the black pixel rectangles will be
+
+ PixelArrayOpticalBlackRectangles = {
+ { 0, 0, y1 + 1, PixelArraySize[0] },
+ { 0, y1, 2, PixelArraySize[1] - y1 + 1 },
+ { x1, y1, 2, PixelArraySize[1] - y1 + 1 },
+ };
+
+ \todo Rename this property to Size once we will have property
+ categories (i.e. Properties::PixelArray::OpticalBlackRectangles)
+
+ - PixelArrayActiveAreas:
+ type: Rectangle
+ size: [n]
+ description: |
+ The PixelArrayActiveAreas property defines the (possibly multiple and
+ overlapping) portions of the camera sensor readable pixel matrix
+ which are considered valid for image acquisition purposes.
+
+ This property describes an arbitrary number of overlapping rectangles,
+ with each rectangle representing the maximum image size that the camera
+ sensor can produce for a particular aspect ratio. They are defined
+ relatively to the PixelArraySize rectangle.
+
+ When multiple rectangles are reported, they shall be ordered from the
+ tallest to the shortest.
+
+ Example 1
+ A camera sensor which only produces images in the 4:3 image resolution
+ will report a single PixelArrayActiveAreas rectangle, from which all
+ other image formats are obtained by either cropping the field-of-view
+ and/or applying pixel sub-sampling techniques such as pixel skipping or
+ binning.
+
+ PixelArraySize.width
+ /----------------/
+ x1 x2
+ (0,0)-> +-o------------o-+ /
+ y1 o +------------+ | |
+ | |////////////| | |
+ | |////////////| | | PixelArraySize.height
+ | |////////////| | |
+ y2 o +------------+ | |
+ +----------------+ /
+
+ The property reports a single rectangle
+
+ PixelArrayActiveAreas = (x1, y1, x2 - x1 + 1, y2 - y1 + 1)
+
+ Example 2
+ A camera sensor which can produce images in different native
+ resolutions will report several overlapping rectangles, one for each
+ natively supported resolution.
+
+ PixelArraySize.width
+ /------------------/
+ x1 x2 x3 x4
+ (0,0)-> +o---o------o---o+ /
+ y1 o +------+ | |
+ | |//////| | |
+ y2 o+---+------+---+| |
+ ||///|//////|///|| | PixelArraySize.height
+ y3 o+---+------+---+| |
+ | |//////| | |
+ y4 o +------+ | |
+ +----+------+----+ /
+
+ The property reports two rectangles
+
+ PixelArrayActiveAreas = ((x2, y1, x3 - x2 + 1, y4 - y1 + 1),
+ (x1, y2, x4 - x1 + 1, y3 - y2 + 1))
+
+ The first rectangle describes the maximum field-of-view of all image
+ formats in the 4:3 resolutions, while the second one describes the
+ maximum field of view for all image formats in the 16:9 resolutions.
+
+ Multiple rectangles shall only be reported when the sensor can't capture
+ the pixels in the corner regions. If all the pixels in the (x1,y1) -
+ (x4,y4) area can be captured, the PixelArrayActiveAreas property shall
+ contains the single rectangle (x1,y1) - (x4,y4).
+
+ \todo Rename this property to ActiveAreas once we will have property
+ categories (i.e. Properties::PixelArray::ActiveAreas)
+
+ - ScalerCropMaximum:
+ type: Rectangle
+ description: |
+ The maximum valid rectangle for the controls::ScalerCrop control. This
+ reflects the minimum mandatory cropping applied in the camera sensor and
+ the rest of the pipeline. Just as the ScalerCrop control, it defines a
+ rectangle taken from the sensor's active pixel array.
+
+ This property is valid only after the camera has been successfully
+ configured and its value may change whenever a new configuration is
+ applied.
+
+ \todo Turn this property into a "maximum control value" for the
+ ScalerCrop control once "dynamic" controls have been implemented.
+
+ - SensorSensitivity:
+ type: float
+ description: |
+ The relative sensitivity of the chosen sensor mode.
+
+ Some sensors have readout modes with different sensitivities. For example,
+ a binned camera mode might, with the same exposure and gains, produce
+ twice the signal level of the full resolution readout. This would be
+ signalled by the binned mode, when it is chosen, indicating a value here
+ that is twice that of the full resolution mode. This value will be valid
+ after the configure method has returned successfully.
+
+ - SystemDevices:
+ type: int64_t
+ size: [n]
+ description: |
+ A list of integer values of type dev_t denoting the major and minor
+ device numbers of the underlying devices used in the operation of this
+ camera.
+
+ Different cameras may report identical devices.
+
...
diff --git a/src/libcamera/property_ids_draft.yaml b/src/libcamera/property_ids_draft.yaml
new file mode 100644
index 00000000..62f0e242
--- /dev/null
+++ b/src/libcamera/property_ids_draft.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2019, Google Inc.
+#
+%YAML 1.1
+---
+vendor: draft
+controls:
+ - ColorFilterArrangement:
+ type: int32_t
+ vendor: draft
+ description: |
+ The arrangement of color filters on sensor; represents the colors in the
+ top-left 2x2 section of the sensor, in reading order. Currently
+ identical to ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT.
+ enum:
+ - name: RGGB
+ value: 0
+ description: RGGB Bayer pattern
+ - name: GRBG
+ value: 1
+ description: GRBG Bayer pattern
+ - name: GBRG
+ value: 2
+ description: GBRG Bayer pattern
+ - name: BGGR
+ value: 3
+ description: BGGR Bayer pattern
+ - name: RGB
+ value: 4
+ description: |
+ Sensor is not Bayer; output has 3 16-bit values for each pixel,
+ instead of just 1 16-bit value per pixel.
+ - name: MONO
+ value: 5
+ description: |
+ Sensor is not Bayer; output consists of a single colour channel.
+
+...
diff --git a/src/libcamera/proxy/ipa_proxy_linux.cpp b/src/libcamera/proxy/ipa_proxy_linux.cpp
deleted file mode 100644
index c7218fb4..00000000
--- a/src/libcamera/proxy/ipa_proxy_linux.cpp
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_proxy_linux.cpp - Default Image Processing Algorithm proxy for Linux
- */
-
-#include <vector>
-
-#include <ipa/ipa_interface.h>
-#include <ipa/ipa_module_info.h>
-
-#include "ipa_module.h"
-#include "ipa_proxy.h"
-#include "ipc_unixsocket.h"
-#include "log.h"
-#include "process.h"
-
-namespace libcamera {
-
-LOG_DECLARE_CATEGORY(IPAProxy)
-
-class IPAProxyLinux : public IPAProxy
-{
-public:
- IPAProxyLinux(IPAModule *ipam);
- ~IPAProxyLinux();
-
- int init() override { return 0; }
- void configure(const std::map<unsigned int, IPAStream> &streamConfig,
- const std::map<unsigned int, const ControlInfoMap &> &entityControls) override {}
- void mapBuffers(const std::vector<IPABuffer> &buffers) override {}
- void unmapBuffers(const std::vector<unsigned int> &ids) override {}
- void processEvent(const IPAOperationData &event) override {}
-
-private:
- void readyRead(IPCUnixSocket *ipc);
-
- Process *proc_;
-
- IPCUnixSocket *socket_;
-};
-
-IPAProxyLinux::IPAProxyLinux(IPAModule *ipam)
- : proc_(nullptr), socket_(nullptr)
-{
- LOG(IPAProxy, Debug)
- << "initializing dummy proxy: loading IPA from "
- << ipam->path();
-
- std::vector<int> fds;
- std::vector<std::string> args;
- args.push_back(ipam->path());
- const std::string path = resolvePath("ipa_proxy_linux");
- if (path.empty()) {
- LOG(IPAProxy, Error)
- << "Failed to get proxy worker path";
- return;
- }
-
- socket_ = new IPCUnixSocket();
- int fd = socket_->create();
- if (fd < 0) {
- LOG(IPAProxy, Error)
- << "Failed to create socket";
- return;
- }
- socket_->readyRead.connect(this, &IPAProxyLinux::readyRead);
- args.push_back(std::to_string(fd));
- fds.push_back(fd);
-
- proc_ = new Process();
- int ret = proc_->start(path, args, fds);
- if (ret) {
- LOG(IPAProxy, Error)
- << "Failed to start proxy worker process";
- return;
- }
-
- valid_ = true;
-}
-
-IPAProxyLinux::~IPAProxyLinux()
-{
- delete proc_;
- delete socket_;
-}
-
-void IPAProxyLinux::readyRead(IPCUnixSocket *ipc)
-{
-}
-
-REGISTER_IPA_PROXY(IPAProxyLinux)
-
-} /* namespace libcamera */
diff --git a/src/libcamera/proxy/meson.build b/src/libcamera/proxy/meson.build
index efc11323..8bd1b135 100644
--- a/src/libcamera/proxy/meson.build
+++ b/src/libcamera/proxy/meson.build
@@ -1,3 +1,20 @@
-libcamera_sources += files([
- 'ipa_proxy_linux.cpp',
-])
+# SPDX-License-Identifier: CC0-1.0
+
+# generate {pipeline}_ipa_proxy.cpp
+foreach mojom : ipa_mojoms
+ proxy = custom_target(mojom['name'] + '_proxy_cpp',
+ input : mojom['mojom'],
+ output : mojom['name'] + '_ipa_proxy.cpp',
+ depends : mojom_templates,
+ command : [
+ mojom_generator, 'generate',
+ '-g', 'libcamera',
+ '--bytecode_path', mojom_templates_dir,
+ '--libcamera_generate_proxy_cpp',
+ '--libcamera_output_path=@OUTPUT@',
+ './' + '@INPUT@'
+ ],
+ env : py_build_env)
+
+ libcamera_internal_sources += proxy
+endforeach
diff --git a/src/libcamera/proxy/worker/ipa_proxy_linux_worker.cpp b/src/libcamera/proxy/worker/ipa_proxy_linux_worker.cpp
deleted file mode 100644
index 7d6287c7..00000000
--- a/src/libcamera/proxy/worker/ipa_proxy_linux_worker.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_proxy_linux_worker.cpp - Default Image Processing Algorithm proxy worker for Linux
- */
-
-#include <iostream>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <ipa/ipa_interface.h>
-#include <libcamera/event_dispatcher.h>
-#include <libcamera/logging.h>
-
-#include "ipa_module.h"
-#include "ipc_unixsocket.h"
-#include "log.h"
-#include "thread.h"
-
-using namespace libcamera;
-
-LOG_DEFINE_CATEGORY(IPAProxyLinuxWorker)
-
-void readyRead(IPCUnixSocket *ipc)
-{
- IPCUnixSocket::Payload message;
- int ret;
-
- ret = ipc->receive(&message);
- if (ret) {
- LOG(IPAProxyLinuxWorker, Error)
- << "Receive message failed: " << ret;
- return;
- }
-
- LOG(IPAProxyLinuxWorker, Debug) << "Received a message!";
-}
-
-int main(int argc, char **argv)
-{
- /* Uncomment this for debugging. */
-#if 0
- std::string logPath = "/tmp/libcamera.worker." +
- std::to_string(getpid()) + ".log";
- logSetFile(logPath.c_str());
-#endif
-
- if (argc < 3) {
- LOG(IPAProxyLinuxWorker, Debug)
- << "Tried to start worker with no args";
- return EXIT_FAILURE;
- }
-
- int fd = std::stoi(argv[2]);
- LOG(IPAProxyLinuxWorker, Debug)
- << "Starting worker for IPA module " << argv[1]
- << " with IPC fd = " << fd;
-
- std::unique_ptr<IPAModule> ipam = std::make_unique<IPAModule>(argv[1]);
- if (!ipam->isValid() || !ipam->load()) {
- LOG(IPAProxyLinuxWorker, Error)
- << "IPAModule " << argv[1] << " should be valid but isn't";
- return EXIT_FAILURE;
- }
-
- IPCUnixSocket socket;
- if (socket.bind(fd) < 0) {
- LOG(IPAProxyLinuxWorker, Error) << "IPC socket binding failed";
- return EXIT_FAILURE;
- }
- socket.readyRead.connect(&readyRead);
-
- struct ipa_context *ipac = ipam->createContext();
- if (!ipac) {
- LOG(IPAProxyLinuxWorker, Error) << "Failed to create IPA context";
- return EXIT_FAILURE;
- }
-
- LOG(IPAProxyLinuxWorker, Debug) << "Proxy worker successfully started";
-
- /* \todo upgrade listening loop */
- EventDispatcher *dispatcher = Thread::current()->eventDispatcher();
- while (1)
- dispatcher->processEvents();
-
- ipac->ops->destroy(ipac);
-
- return 0;
-}
diff --git a/src/libcamera/proxy/worker/meson.build b/src/libcamera/proxy/worker/meson.build
index 839156f7..8c54a2e2 100644
--- a/src/libcamera/proxy/worker/meson.build
+++ b/src/libcamera/proxy/worker/meson.build
@@ -1,16 +1,32 @@
-ipa_proxy_sources = [
- ['ipa_proxy_linux', 'ipa_proxy_linux_worker.cpp']
-]
+# SPDX-License-Identifier: CC0-1.0
-proxy_install_dir = join_paths(get_option('libexecdir'), 'libcamera')
+proxy_install_dir = libcamera_libexecdir
-foreach t : ipa_proxy_sources
- proxy = executable(t[0], t[1],
- include_directories : libcamera_internal_includes,
+# generate {pipeline}_ipa_proxy_worker.cpp
+foreach mojom : ipa_mojoms
+ worker = custom_target(mojom['name'] + '_proxy_worker',
+ input : mojom['mojom'],
+ output : mojom['name'] + '_ipa_proxy_worker.cpp',
+ depends : mojom_templates,
+ command : [
+ mojom_generator, 'generate',
+ '-g', 'libcamera',
+ '--bytecode_path', mojom_templates_dir,
+ '--libcamera_generate_proxy_worker',
+ '--libcamera_output_path=@OUTPUT@',
+ './' + '@INPUT@'
+ ],
+ env : py_build_env)
+
+ proxy = executable(mojom['name'] + '_ipa_proxy', worker,
install : true,
install_dir : proxy_install_dir,
- dependencies : libcamera_dep)
+ dependencies : libcamera_private)
endforeach
config_h.set('IPA_PROXY_DIR',
- '"' + join_paths(get_option('prefix'), proxy_install_dir) + '"')
+ '"' + get_option('prefix') / proxy_install_dir + '"')
+
+summary({
+ 'IPA_PROXY_DIR' : config_h.get('IPA_PROXY_DIR'),
+ }, section : 'Paths')
diff --git a/src/libcamera/pub_key.cpp b/src/libcamera/pub_key.cpp
new file mode 100644
index 00000000..f1d73a5c
--- /dev/null
+++ b/src/libcamera/pub_key.cpp
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Public key signature verification
+ */
+
+#include "libcamera/internal/pub_key.h"
+
+#if HAVE_CRYPTO
+#include <openssl/evp.h>
+#include <openssl/rsa.h>
+#include <openssl/sha.h>
+#include <openssl/x509.h>
+#elif HAVE_GNUTLS
+#include <gnutls/abstract.h>
+#endif
+
+/**
+ * \file pub_key.h
+ * \brief Public key signature verification
+ */
+
+namespace libcamera {
+
+/**
+ * \class PubKey
+ * \brief Public key wrapper for signature verification
+ *
+ * The PubKey class wraps a public key and implements signature verification. It
+ * only supports RSA keys and the RSA-SHA256 signature algorithm.
+ */
+
+/**
+ * \brief Construct a PubKey from key data
+ * \param[in] key Key data encoded in DER format
+ */
+PubKey::PubKey([[maybe_unused]] Span<const uint8_t> key)
+ : valid_(false)
+{
+#if HAVE_CRYPTO
+ const uint8_t *data = key.data();
+ pubkey_ = d2i_PUBKEY(nullptr, &data, key.size());
+ if (!pubkey_)
+ return;
+
+ valid_ = true;
+#elif HAVE_GNUTLS
+ int ret = gnutls_pubkey_init(&pubkey_);
+ if (ret < 0)
+ return;
+
+ const gnutls_datum_t gnuTlsKey{
+ const_cast<unsigned char *>(key.data()),
+ static_cast<unsigned int>(key.size())
+ };
+ ret = gnutls_pubkey_import(pubkey_, &gnuTlsKey, GNUTLS_X509_FMT_DER);
+ if (ret < 0)
+ return;
+
+ valid_ = true;
+#endif
+}
+
+PubKey::~PubKey()
+{
+#if HAVE_CRYPTO
+ EVP_PKEY_free(pubkey_);
+#elif HAVE_GNUTLS
+ gnutls_pubkey_deinit(pubkey_);
+#endif
+}
+
+/**
+ * \fn bool PubKey::isValid() const
+ * \brief Check is the public key is valid
+ * \return True if the public key is valid, false otherwise
+ */
+
+/**
+ * \brief Verify signature on data
+ * \param[in] data The signed data
+ * \param[in] sig The signature
+ *
+ * Verify that the signature \a sig matches the signed \a data for the public
+ * key. The signture algorithm is hardcoded to RSA-SHA256.
+ *
+ * \return True if the signature is valid, false otherwise
+ */
+bool PubKey::verify([[maybe_unused]] Span<const uint8_t> data,
+ [[maybe_unused]] Span<const uint8_t> sig) const
+{
+ if (!valid_)
+ return false;
+
+#if HAVE_CRYPTO
+ /*
+ * Create and initialize a public key algorithm context for signature
+ * verification.
+ */
+ EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(pubkey_, nullptr);
+ if (!ctx)
+ return false;
+
+ if (EVP_PKEY_verify_init(ctx) <= 0 ||
+ EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING) <= 0 ||
+ EVP_PKEY_CTX_set_signature_md(ctx, EVP_sha256()) <= 0) {
+ EVP_PKEY_CTX_free(ctx);
+ return false;
+ }
+
+ /* Calculate the SHA256 digest of the data. */
+ uint8_t digest[SHA256_DIGEST_LENGTH];
+ SHA256(data.data(), data.size(), digest);
+
+ /* Decrypt the signature and verify it matches the digest. */
+ int ret = EVP_PKEY_verify(ctx, sig.data(), sig.size(), digest,
+ SHA256_DIGEST_LENGTH);
+ EVP_PKEY_CTX_free(ctx);
+ return ret == 1;
+#elif HAVE_GNUTLS
+ const gnutls_datum_t gnuTlsData{
+ const_cast<unsigned char *>(data.data()),
+ static_cast<unsigned int>(data.size())
+ };
+
+ const gnutls_datum_t gnuTlsSig{
+ const_cast<unsigned char *>(sig.data()),
+ static_cast<unsigned int>(sig.size())
+ };
+
+ int ret = gnutls_pubkey_verify_data2(pubkey_, GNUTLS_SIGN_RSA_SHA256, 0,
+ &gnuTlsData, &gnuTlsSig);
+ return ret >= 0;
+#else
+ return false;
+#endif
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/request.cpp b/src/libcamera/request.cpp
index ea33736f..8c56ed30 100644
--- a/src/libcamera/request.cpp
+++ b/src/libcamera/request.cpp
@@ -2,30 +2,313 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * request.cpp - Capture request handling
+ * Capture request handling
*/
-#include <libcamera/request.h>
+#include "libcamera/internal/request.h"
#include <map>
+#include <sstream>
+
+#include <libcamera/base/log.h>
-#include <libcamera/buffer.h>
#include <libcamera/camera.h>
#include <libcamera/control_ids.h>
+#include <libcamera/fence.h>
+#include <libcamera/framebuffer.h>
#include <libcamera/stream.h>
-#include "camera_controls.h"
-#include "log.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_controls.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/tracepoints.h"
/**
- * \file request.h
+ * \file libcamera/request.h
* \brief Describes a frame capture request to be processed by a camera
*/
+/**
+ * \internal
+ * \file libcamera/internal/request.h
+ * \brief Internal support for request handling
+ */
+
namespace libcamera {
LOG_DEFINE_CATEGORY(Request)
+#ifndef __DOXYGEN_PUBLIC__
+/**
+ * \class Request::Private
+ * \brief Request private data
+ *
+ * The Request::Private class stores all private data associated with a
+ * request. It implements the d-pointer design pattern to hide core
+ * Request data from the public API, and exposes utility functions to
+ * internal users of the request (namely the PipelineHandler class and its
+ * subclasses).
+ */
+
+/**
+ * \brief Create a Request::Private
+ * \param camera The Camera that creates the request
+ */
+Request::Private::Private(Camera *camera)
+ : camera_(camera), cancelled_(false)
+{
+}
+
+Request::Private::~Private()
+{
+ doCancelRequest();
+}
+
+/**
+ * \fn Request::Private::camera()
+ * \brief Retrieve the camera this request has been queued to
+ * \return The Camera this request has been queued to, or nullptr if the
+ * request hasn't been queued
+ */
+
+/**
+ * \brief Check if a request has buffers yet to be completed
+ *
+ * \return True if the request has buffers pending for completion, false
+ * otherwise
+ */
+bool Request::Private::hasPendingBuffers() const
+{
+ return !pending_.empty();
+}
+
+/**
+ * \brief Complete a buffer for the request
+ * \param[in] buffer The buffer that has completed
+ *
+ * A request tracks the status of all buffers it contains through a set of
+ * pending buffers. This function removes the \a buffer from the set to mark it
+ * as complete. All buffers associate with the request shall be marked as
+ * complete by calling this function once and once only before reporting the
+ * request as complete with the complete() function.
+ *
+ * \return True if all buffers contained in the request have completed, false
+ * otherwise
+ */
+bool Request::Private::completeBuffer(FrameBuffer *buffer)
+{
+ LIBCAMERA_TRACEPOINT(request_complete_buffer, this, buffer);
+
+ int ret = pending_.erase(buffer);
+ ASSERT(ret == 1);
+
+ buffer->_d()->setRequest(nullptr);
+
+ if (buffer->metadata().status == FrameMetadata::FrameCancelled)
+ cancelled_ = true;
+
+ return !hasPendingBuffers();
+}
+
+/**
+ * \brief Complete a queued request
+ *
+ * Mark the request as complete by updating its status to RequestComplete,
+ * unless buffers have been cancelled in which case the status is set to
+ * RequestCancelled.
+ */
+void Request::Private::complete()
+{
+ Request *request = _o<Request>();
+
+ ASSERT(request->status() == RequestPending);
+ ASSERT(!hasPendingBuffers());
+
+ request->status_ = cancelled_ ? RequestCancelled : RequestComplete;
+
+ LOG(Request, Debug) << request->toString();
+
+ LIBCAMERA_TRACEPOINT(request_complete, this);
+}
+
+void Request::Private::doCancelRequest()
+{
+ Request *request = _o<Request>();
+
+ for (FrameBuffer *buffer : pending_) {
+ buffer->_d()->cancel();
+ camera_->bufferCompleted.emit(request, buffer);
+ }
+
+ cancelled_ = true;
+ pending_.clear();
+ notifiers_.clear();
+ timer_.reset();
+}
+
+/**
+ * \brief Cancel a queued request
+ *
+ * Mark the request and its associated buffers as cancelled and complete it.
+ *
+ * Set each pending buffer in error state and emit the buffer completion signal
+ * before completing the Request.
+ */
+void Request::Private::cancel()
+{
+ LIBCAMERA_TRACEPOINT(request_cancel, this);
+
+ Request *request = _o<Request>();
+ ASSERT(request->status() == RequestPending);
+
+ doCancelRequest();
+}
+
+/**
+ * \brief Reset the request internal data to default values
+ *
+ * After calling this function, all request internal data will have default
+ * values as if the Request::Private instance had just been constructed.
+ */
+void Request::Private::reset()
+{
+ sequence_ = 0;
+ cancelled_ = false;
+ prepared_ = false;
+ pending_.clear();
+ notifiers_.clear();
+ timer_.reset();
+}
+
+/*
+ * Helper function to save some lines of code and make sure prepared_ is set
+ * to true before emitting the signal.
+ */
+void Request::Private::emitPrepareCompleted()
+{
+ prepared_ = true;
+ prepared.emit();
+}
+
+/**
+ * \brief Prepare the Request to be queued to the device
+ * \param[in] timeout Optional expiration timeout
+ *
+ * Prepare a Request to be queued to the hardware device by ensuring it is
+ * ready for the incoming memory transfers.
+ *
+ * This currently means waiting on each frame buffer acquire fence to be
+ * signalled. An optional expiration timeout can be specified. If not all the
+ * fences have been signalled correctly before the timeout expires the Request
+ * is cancelled.
+ *
+ * The function immediately emits the prepared signal if all the prepare
+ * operations have been completed synchronously. If instead the prepare
+ * operations require to wait the completion of asynchronous events, such as
+ * fences notifications or timer expiration, the prepared signal is emitted upon
+ * the asynchronous event completion.
+ *
+ * As we currently only handle fences, the function emits the prepared signal
+ * immediately if there are no fences to wait on. Otherwise the prepared signal
+ * is emitted when all fences have been signalled or the optional timeout has
+ * expired.
+ *
+ * If not all the fences have been correctly signalled or the optional timeout
+ * has expired the Request will be cancelled and the Request::prepared signal
+ * emitted.
+ *
+ * The intended user of this function is the PipelineHandler base class, which
+ * 'prepares' a Request before queuing it to the hardware device.
+ */
+void Request::Private::prepare(std::chrono::milliseconds timeout)
+{
+ /* Create and connect one notifier for each synchronization fence. */
+ for (FrameBuffer *buffer : pending_) {
+ const Fence *fence = buffer->_d()->fence();
+ if (!fence)
+ continue;
+
+ std::unique_ptr<EventNotifier> notifier =
+ std::make_unique<EventNotifier>(fence->fd().get(),
+ EventNotifier::Read);
+
+ notifier->activated.connect(this, [this, buffer] {
+ notifierActivated(buffer);
+ });
+
+ notifiers_[buffer] = std::move(notifier);
+ }
+
+ if (notifiers_.empty()) {
+ emitPrepareCompleted();
+ return;
+ }
+
+ /*
+ * In case a timeout is specified, create a timer and set it up.
+ *
+ * The timer must be created here instead of in the Request constructor,
+ * in order to be bound to the pipeline handler thread.
+ */
+ if (timeout != 0ms) {
+ timer_ = std::make_unique<Timer>();
+ timer_->timeout.connect(this, &Request::Private::timeout);
+ timer_->start(timeout);
+ }
+}
+
+/**
+ * \var Request::Private::prepared
+ * \brief Request preparation completed Signal
+ *
+ * The signal is emitted once the request preparation has completed and is ready
+ * to be queued. The Request might complete with errors in which case it is
+ * cancelled.
+ *
+ * The intended slot for this signal is the PipelineHandler::doQueueRequests()
+ * function which queues Request after they have been prepared or cancel them
+ * if they have failed preparing.
+ */
+
+void Request::Private::notifierActivated(FrameBuffer *buffer)
+{
+ /* Close the fence if successfully signalled. */
+ ASSERT(buffer);
+ buffer->releaseFence();
+
+ /* Remove the entry from the map and check if other fences are pending. */
+ auto it = notifiers_.find(buffer);
+ ASSERT(it != notifiers_.end());
+ notifiers_.erase(it);
+
+ Request *request = _o<Request>();
+ LOG(Request, Debug)
+ << "Request " << request->cookie() << " buffer " << buffer
+ << " fence signalled";
+
+ if (!notifiers_.empty())
+ return;
+
+ /* All fences completed, delete the timer and emit the prepared signal. */
+ timer_.reset();
+ emitPrepareCompleted();
+}
+
+void Request::Private::timeout()
+{
+ /* A timeout can only happen if there are fences not yet signalled. */
+ ASSERT(!notifiers_.empty());
+ notifiers_.clear();
+
+ Request *request = _o<Request>();
+ LOG(Request, Debug) << "Request prepare timeout: " << request->cookie();
+
+ cancel();
+
+ emitPrepareCompleted();
+}
+#endif /* __DOXYGEN_PUBLIC__ */
+
/**
* \enum Request::Status
* Request completion status
@@ -38,6 +321,20 @@ LOG_DEFINE_CATEGORY(Request)
*/
/**
+ * \enum Request::ReuseFlag
+ * Flags to control the behavior of Request::reuse()
+ * \var Request::Default
+ * Don't reuse buffers
+ * \var Request::ReuseBuffers
+ * Reuse the buffers that were previously added by addBuffer()
+ */
+
+/**
+ * \typedef Request::BufferMap
+ * \brief A map of Stream to FrameBuffer pointers
+ */
+
+/**
* \class Request
* \brief A frame capture request
*
@@ -51,33 +348,65 @@ LOG_DEFINE_CATEGORY(Request)
* \param[in] cookie Opaque cookie for application use
*
* The \a cookie is stored in the request and is accessible through the
- * cookie() method at any time. It is typically used by applications to map the
- * request to an external resource in the request completion handler, and is
+ * cookie() function at any time. It is typically used by applications to map
+ * the request to an external resource in the request completion handler, and is
* completely opaque to libcamera.
- *
*/
Request::Request(Camera *camera, uint64_t cookie)
- : camera_(camera), cookie_(cookie), status_(RequestPending),
- cancelled_(false)
+ : Extensible(std::make_unique<Private>(camera)),
+ cookie_(cookie), status_(RequestPending)
{
- /**
- * \todo Should the Camera expose a validator instance, to avoid
- * creating a new instance for each request?
- */
- validator_ = new CameraControlValidator(camera);
- controls_ = new ControlList(controls::controls, validator_);
+ controls_ = new ControlList(controls::controls,
+ camera->_d()->validator());
/**
- * \todo: Add a validator for metadata controls.
+ * \todo Add a validator for metadata controls.
*/
metadata_ = new ControlList(controls::controls);
+
+ LIBCAMERA_TRACEPOINT(request_construct, this);
+
+ LOG(Request, Debug) << "Created request - cookie: " << cookie_;
}
Request::~Request()
{
+ LIBCAMERA_TRACEPOINT(request_destroy, this);
+
delete metadata_;
delete controls_;
- delete validator_;
+}
+
+/**
+ * \brief Reset the request for reuse
+ * \param[in] flags Indicate whether or not to reuse the buffers
+ *
+ * Reset the status and controls associated with the request, to allow it to
+ * be reused and requeued without destruction. This function shall be called
+ * prior to queueing the request to the camera, in lieu of constructing a new
+ * request. The application can reuse the buffers that were previously added
+ * to the request via addBuffer() by setting \a flags to ReuseBuffers.
+ */
+void Request::reuse(ReuseFlag flags)
+{
+ LIBCAMERA_TRACEPOINT(request_reuse, this);
+
+ _d()->reset();
+
+ if (flags & ReuseBuffers) {
+ for (auto pair : bufferMap_) {
+ FrameBuffer *buffer = pair.second;
+ buffer->_d()->setRequest(this);
+ _d()->pending_.insert(buffer);
+ }
+ } else {
+ bufferMap_.clear();
+ }
+
+ status_ = RequestPending;
+
+ controls_->clear();
+ metadata_->clear();
}
/**
@@ -86,8 +415,8 @@ Request::~Request()
*
* Requests store a list of controls to be applied to all frames captured for
* the request. They are created with an empty list of controls that can be
- * accessed through this method and updated with ControlList::operator[]() or
- * ControlList::update().
+ * accessed through this function. Control values can be retrieved using
+ * ControlList::get() and updated using ControlList::set().
*
* Only controls supported by the camera to which this request will be
* submitted shall be included in the controls list. Attempting to add an
@@ -110,19 +439,36 @@ Request::~Request()
* \brief Add a FrameBuffer with its associated Stream to the Request
* \param[in] stream The stream the buffer belongs to
* \param[in] buffer The FrameBuffer to add to the request
+ * \param[in] fence The optional fence
*
* A reference to the buffer is stored in the request. The caller is responsible
* for ensuring that the buffer will remain valid until the request complete
* callback is called.
*
* A request can only contain one buffer per stream. If a buffer has already
- * been added to the request for the same stream, this method returns -EEXIST.
+ * been added to the request for the same stream, this function returns -EEXIST.
+ *
+ * A Fence can be optionally associated with the \a buffer.
+ *
+ * When a valid Fence is provided to this function, \a fence is moved to \a
+ * buffer and this Request will only be queued to the device once the
+ * fences of all its buffers have been correctly signalled.
+ *
+ * If the \a fence associated with \a buffer isn't signalled, the request will
+ * fail after a timeout. The buffer will still contain the fence, which
+ * applications must retrieve with FrameBuffer::releaseFence() before the buffer
+ * can be reused in another request. Attempting to add a buffer that still
+ * contains a fence to a request will result in this function returning -EEXIST.
+ *
+ * \sa FrameBuffer::releaseFence()
*
* \return 0 on success or a negative error code otherwise
* \retval -EEXIST The request already contains a buffer for the stream
+ * or the buffer still references a fence
* \retval -EINVAL The buffer does not reference a valid Stream
*/
-int Request::addBuffer(Stream *stream, FrameBuffer *buffer)
+int Request::addBuffer(const Stream *stream, FrameBuffer *buffer,
+ std::unique_ptr<Fence> fence)
{
if (!stream) {
LOG(Request, Error) << "Invalid stream reference";
@@ -135,10 +481,22 @@ int Request::addBuffer(Stream *stream, FrameBuffer *buffer)
return -EEXIST;
}
- buffer->request_ = this;
- pending_.insert(buffer);
+ buffer->_d()->setRequest(this);
+ _d()->pending_.insert(buffer);
bufferMap_[stream] = buffer;
+ /*
+ * Make sure the fence has been extracted from the buffer
+ * to avoid waiting on a stale fence.
+ */
+ if (buffer->_d()->fence()) {
+ LOG(Request, Error) << "Can't add buffer that still references a fence";
+ return -EEXIST;
+ }
+
+ if (fence && fence->isValid())
+ buffer->_d()->setFence(std::move(fence));
+
return 0;
}
@@ -157,9 +515,9 @@ int Request::addBuffer(Stream *stream, FrameBuffer *buffer)
* \return The buffer associated with the stream, or nullptr if the stream is
* not part of this request
*/
-FrameBuffer *Request::findBuffer(Stream *stream) const
+FrameBuffer *Request::findBuffer(const Stream *stream) const
{
- auto it = bufferMap_.find(stream);
+ const auto it = bufferMap_.find(stream);
if (it == bufferMap_.end())
return nullptr;
@@ -175,6 +533,26 @@ FrameBuffer *Request::findBuffer(Stream *stream) const
*/
/**
+ * \brief Retrieve the sequence number for the request
+ *
+ * When requests are queued, they are given a sequential number to track the
+ * order in which requests are queued to a camera. This number counts all
+ * requests given to a camera and is reset to zero between camera stop/start
+ * sequences.
+ *
+ * It can be used to support debugging and identifying the flow of requests
+ * through a pipeline, but does not guarantee to represent the sequence number
+ * of any images in the stream. The sequence number is stored as an unsigned
+ * integer and will wrap when overflowed.
+ *
+ * \return The request sequence number
+ */
+uint32_t Request::sequence() const
+{
+ return _d()->sequence_;
+}
+
+/**
* \fn Request::cookie()
* \brief Retrieve the cookie set when the request was created
* \return The request cookie
@@ -194,50 +572,49 @@ FrameBuffer *Request::findBuffer(Stream *stream) const
*/
/**
- * \fn Request::hasPendingBuffers()
* \brief Check if a request has buffers yet to be completed
*
* \return True if the request has buffers pending for completion, false
* otherwise
*/
+bool Request::hasPendingBuffers() const
+{
+ return !_d()->pending_.empty();
+}
/**
- * \brief Complete a queued request
+ * \brief Generate a string representation of the Request internals
*
- * Mark the request as complete by updating its status to RequestComplete,
- * unless buffers have been cancelled in which case the status is set to
- * RequestCancelled.
+ * This function facilitates debugging of Request state while it is used
+ * internally within libcamera.
+ *
+ * \return A string representing the current state of the request
*/
-void Request::complete()
+std::string Request::toString() const
{
- ASSERT(!hasPendingBuffers());
- status_ = cancelled_ ? RequestCancelled : RequestComplete;
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
}
/**
- * \brief Complete a buffer for the request
- * \param[in] buffer The buffer that has completed
- *
- * A request tracks the status of all buffers it contains through a set of
- * pending buffers. This function removes the \a buffer from the set to mark it
- * as complete. All buffers associate with the request shall be marked as
- * complete by calling this function once and once only before reporting the
- * request as complete with the complete() method.
- *
- * \return True if all buffers contained in the request have completed, false
- * otherwise
+ * \brief Insert a text representation of a Request into an output stream
+ * \param[in] out The output stream
+ * \param[in] r The Request
+ * \return The output stream \a out
*/
-bool Request::completeBuffer(FrameBuffer *buffer)
+std::ostream &operator<<(std::ostream &out, const Request &r)
{
- int ret = pending_.erase(buffer);
- ASSERT(ret == 1);
-
- buffer->request_ = nullptr;
+ /* Pending, Completed, Cancelled(X). */
+ static const char *statuses = "PCX";
- if (buffer->metadata().status == FrameMetadata::FrameCancelled)
- cancelled_ = true;
+ /* Example Output: Request(55:P:1/2:6523524) */
+ out << "Request(" << r.sequence() << ":" << statuses[r.status()] << ":"
+ << r._d()->pending_.size() << "/" << r.buffers().size() << ":"
+ << r.cookie() << ")";
- return !hasPendingBuffers();
+ return out;
}
} /* namespace libcamera */
diff --git a/src/libcamera/sensor/camera_sensor.cpp b/src/libcamera/sensor/camera_sensor.cpp
new file mode 100644
index 00000000..d19b5e2e
--- /dev/null
+++ b/src/libcamera/sensor/camera_sensor.cpp
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * A camera sensor
+ */
+
+#include "libcamera/internal/camera_sensor.h"
+
+#include <memory>
+#include <vector>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/media_object.h"
+
+/**
+ * \file camera_sensor.h
+ * \brief A camera sensor
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(CameraSensor)
+
+/**
+ * \class CameraSensor
+ * \brief A abstract camera sensor
+ *
+ * The CameraSensor class eases handling of sensors for pipeline handlers by
+ * hiding the details of the kernel API and caching sensor information.
+ */
+
+/**
+ * \brief Destroy a CameraSensor
+ */
+CameraSensor::~CameraSensor() = default;
+
+/**
+ * \fn CameraSensor::model()
+ * \brief Retrieve the sensor model name
+ *
+ * The sensor model name is a free-formed string that uniquely identifies the
+ * sensor model.
+ *
+ * \return The sensor model name
+ */
+
+/**
+ * \fn CameraSensor::id()
+ * \brief Retrieve the sensor ID
+ *
+ * The sensor ID is a free-form string that uniquely identifies the sensor in
+ * the system. The ID satisfies the requirements to be used as a camera ID.
+ *
+ * \return The sensor ID
+ */
+
+/**
+ * \fn CameraSensor::entity()
+ * \brief Retrieve the sensor media entity
+ * \return The sensor media entity
+ */
+
+/**
+ * \fn CameraSensor::device()
+ * \brief Retrieve the camera sensor device
+ * \todo Remove this function by integrating DelayedControl with CameraSensor
+ * \return The camera sensor device
+ */
+
+/**
+ * \fn CameraSensor::focusLens()
+ * \brief Retrieve the focus lens controller
+ *
+ * \return The focus lens controller. nullptr if no focus lens controller is
+ * connected to the sensor
+ */
+
+/**
+ * \fn CameraSensor::mbusCodes()
+ * \brief Retrieve the media bus codes supported by the camera sensor
+ *
+ * Any Bayer formats are listed using the sensor's native Bayer order,
+ * that is, with the effect of V4L2_CID_HFLIP and V4L2_CID_VFLIP undone
+ * (where these controls exist).
+ *
+ * \return The supported media bus codes sorted in increasing order
+ */
+
+/**
+ * \fn CameraSensor::sizes()
+ * \brief Retrieve the supported frame sizes for a media bus code
+ * \param[in] mbusCode The media bus code for which sizes are requested
+ *
+ * \return The supported frame sizes for \a mbusCode sorted in increasing order
+ */
+
+/**
+ * \fn CameraSensor::resolution()
+ * \brief Retrieve the camera sensor resolution
+ *
+ * The camera sensor resolution is the active pixel area size, clamped to the
+ * maximum frame size the sensor can produce if it is smaller than the active
+ * pixel area.
+ *
+ * \todo Consider if it desirable to distinguish between the maximum resolution
+ * the sensor can produce (also including upscaled ones) and the actual pixel
+ * array size by splitting this function in two.
+ *
+ * \return The camera sensor resolution in pixels
+ */
+
+/**
+ * \fn CameraSensor::getFormat()
+ * \brief Retrieve the best sensor format for a desired output
+ * \param[in] mbusCodes The list of acceptable media bus codes
+ * \param[in] size The desired size
+ * \param[in] maxSize The maximum size
+ *
+ * Media bus codes are selected from \a mbusCodes, which lists all acceptable
+ * codes in decreasing order of preference. Media bus codes supported by the
+ * sensor but not listed in \a mbusCodes are ignored. If none of the desired
+ * codes is supported, it returns an error.
+ *
+ * \a size indicates the desired size at the output of the sensor. This function
+ * selects the best media bus code and size supported by the sensor according
+ * to the following criteria.
+ *
+ * - The desired \a size shall fit in the sensor output size to avoid the need
+ * to up-scale.
+ * - The sensor output size shall match the desired aspect ratio to avoid the
+ * need to crop the field of view.
+ * - The sensor output size shall be as small as possible to lower the required
+ * bandwidth.
+ * - The desired \a size shall be supported by one of the media bus code listed
+ * in \a mbusCodes.
+ * - The desired \a size shall fit into the maximum size \a maxSize if it is not
+ * null.
+ *
+ * When multiple media bus codes can produce the same size, the code at the
+ * lowest position in \a mbusCodes is selected.
+ *
+ * The use of this function is optional, as the above criteria may not match the
+ * needs of all pipeline handlers. Pipeline handlers may implement custom
+ * sensor format selection when needed.
+ *
+ * The returned sensor output format is guaranteed to be acceptable by the
+ * setFormat() function without any modification.
+ *
+ * \return The best sensor output format matching the desired media bus codes
+ * and size on success, or an empty format otherwise.
+ */
+
+/**
+ * \fn CameraSensor::setFormat()
+ * \brief Set the sensor output format
+ * \param[in] format The desired sensor output format
+ * \param[in] transform The transform to be applied on the sensor.
+ * Defaults to Identity.
+ *
+ * If flips are writable they are configured according to the desired Transform.
+ * Transform::Identity always corresponds to H/V flip being disabled if the
+ * controls are writable. Flips are set before the new format is applied as
+ * they can effectively change the Bayer pattern ordering.
+ *
+ * The ranges of any controls associated with the sensor are also updated.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn CameraSensor::tryFormat()
+ * \brief Try the sensor output format
+ * \param[in] format The desired sensor output format
+ *
+ * The ranges of any controls associated with the sensor are not updated.
+ *
+ * \todo Add support for Transform by changing the format's Bayer ordering
+ * before calling subdev_->setFormat().
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn CameraSensor::applyConfiguration()
+ * \brief Apply a sensor configuration to the camera sensor
+ * \param[in] config The sensor configuration
+ * \param[in] transform The transform to be applied on the sensor.
+ * Defaults to Identity
+ * \param[out] sensorFormat Format applied to the sensor (optional)
+ *
+ * Apply to the camera sensor the configuration \a config.
+ *
+ * \todo The configuration shall be fully populated and if any of the fields
+ * specified cannot be applied exactly, an error code is returned.
+ *
+ * \return 0 if \a config is applied correctly to the camera sensor, a negative
+ * error code otherwise
+ */
+
+/**
+ * \brief Retrieve the image source stream
+ *
+ * Sensors that produce multiple streams do not guarantee that the image stream
+ * is always assigned number 0. This function allows callers to retrieve the
+ * image stream on the sensor's source pad, in order to configure the receiving
+ * side accordingly.
+ *
+ * \return The image source stream
+ */
+V4L2Subdevice::Stream CameraSensor::imageStream() const
+{
+ return { 0, 0 };
+}
+
+/**
+ * \brief Retrieve the embedded data source stream
+ *
+ * Some sensors produce embedded data in a stream separate from the image
+ * stream. This function indicates if the sensor supports this feature by
+ * returning the embedded data stream on the sensor's source pad if available,
+ * or an std::optional<> without a value otheriwse.
+ *
+ * \return The embedded data source stream
+ */
+std::optional<V4L2Subdevice::Stream> CameraSensor::embeddedDataStream() const
+{
+ return {};
+}
+
+/**
+ * \brief Retrieve the format on the embedded data stream
+ *
+ * When an embedded data stream is available, this function returns the
+ * corresponding format on the sensor's source pad. The format may vary with
+ * the image stream format, and should therefore be retrieved after configuring
+ * the image stream.
+ *
+ * If the sensor doesn't support embedded data, this function returns a
+ * default-constructed format.
+ *
+ * \return The format on the embedded data stream
+ */
+V4L2SubdeviceFormat CameraSensor::embeddedDataFormat() const
+{
+ return {};
+}
+
+/**
+ * \brief Enable or disable the embedded data stream
+ * \param[in] enable True to enable the embedded data stream, false to disable it
+ *
+ * For sensors that support embedded data, this function enables or disables
+ * generation of embedded data. Some of such sensors always produce embedded
+ * data, in which case this function return -EISCONN if the caller attempts to
+ * disable embedded data.
+ *
+ * If the sensor doesn't support embedded data, this function returns 0 when \a
+ * enable is false, and -ENOSTR otherwise.
+ *
+ * \return 0 on success, or a negative error code otherwise
+ */
+int CameraSensor::setEmbeddedDataEnabled(bool enable)
+{
+ return enable ? -ENOSTR : 0;
+}
+
+/**
+ * \fn CameraSensor::properties()
+ * \brief Retrieve the camera sensor properties
+ * \return The list of camera sensor properties
+ */
+
+/**
+ * \fn CameraSensor::sensorInfo()
+ * \brief Assemble and return the camera sensor info
+ * \param[out] info The camera sensor info
+ *
+ * This function fills \a info with information that describes the camera sensor
+ * and its current configuration. The information combines static data (such as
+ * the the sensor model or active pixel array size) and data specific to the
+ * current sensor configuration (such as the line length and pixel rate).
+ *
+ * Sensor information is only available for raw sensors. When called for a YUV
+ * sensor, this function returns -EINVAL.
+ *
+ * \return 0 on success, a negative error code otherwise
+ */
+
+/**
+ * \fn CameraSensor::computeTransform()
+ * \brief Compute the Transform that gives the requested \a orientation
+ * \param[inout] orientation The desired image orientation
+ *
+ * This function computes the Transform that the pipeline handler should apply
+ * to the CameraSensor to obtain the requested \a orientation.
+ *
+ * The intended caller of this function is the validate() implementation of
+ * pipeline handlers, that pass in the application requested
+ * CameraConfiguration::orientation and obtain a Transform to apply to the
+ * camera sensor, likely at configure() time.
+ *
+ * If the requested \a orientation cannot be obtained, the \a orientation
+ * parameter is adjusted to report the current image orientation and
+ * Transform::Identity is returned.
+ *
+ * If the requested \a orientation can be obtained, the function computes a
+ * Transform and does not adjust \a orientation.
+ *
+ * Pipeline handlers are expected to verify if \a orientation has been
+ * adjusted by this function and set the CameraConfiguration::status to
+ * Adjusted accordingly.
+ *
+ * \return A Transform instance that applied to the CameraSensor produces images
+ * with \a orientation
+ */
+
+/**
+ * \fn CameraSensor::bayerOrder()
+ * \brief Compute the Bayer order that results from the given Transform
+ * \param[in] t The Transform to apply to the sensor
+ *
+ * Some sensors change their Bayer order when they are h-flipped or v-flipped.
+ * This function computes and returns the Bayer order that would result from the
+ * given transform applied to the sensor.
+ *
+ * This function is valid only when the sensor produces raw Bayer formats.
+ *
+ * \return The Bayer order produced by the sensor when the Transform is applied
+ */
+
+/**
+ * \fn CameraSensor::controls()
+ * \brief Retrieve the supported V4L2 controls and their information
+ *
+ * Control information is updated automatically to reflect the current sensor
+ * configuration when the setFormat() function is called, without invalidating
+ * any iterator on the ControlInfoMap.
+ *
+ * \return A map of the V4L2 controls supported by the sensor
+ */
+
+/**
+ * \fn CameraSensor::getControls()
+ * \brief Read V4L2 controls from the sensor
+ * \param[in] ids The list of controls to read, specified by their ID
+ *
+ * This function reads the value of all controls contained in \a ids, and
+ * returns their values as a ControlList. The control identifiers are defined by
+ * the V4L2 specification (V4L2_CID_*).
+ *
+ * If any control in \a ids is not supported by the device, is disabled (i.e.
+ * has the V4L2_CTRL_FLAG_DISABLED flag set), or if any other error occurs
+ * during validation of the requested controls, no control is read and this
+ * function returns an empty control list.
+ *
+ * \sa V4L2Device::getControls()
+ *
+ * \return The control values in a ControlList on success, or an empty list on
+ * error
+ */
+
+/**
+ * \fn CameraSensor::setControls()
+ * \brief Write V4L2 controls to the sensor
+ * \param[in] ctrls The list of controls to write
+ *
+ * This function writes the value of all controls contained in \a ctrls, and
+ * stores the values actually applied to the device in the corresponding \a
+ * ctrls entry. The control identifiers are defined by the V4L2 specification
+ * (V4L2_CID_*).
+ *
+ * If any control in \a ctrls is not supported by the device, is disabled (i.e.
+ * has the V4L2_CTRL_FLAG_DISABLED flag set), is read-only, or if any other
+ * error occurs during validation of the requested controls, no control is
+ * written and this function returns -EINVAL.
+ *
+ * If an error occurs while writing the controls, the index of the first
+ * control that couldn't be written is returned. All controls below that index
+ * are written and their values are updated in \a ctrls, while all other
+ * controls are not written and their values are not changed.
+ *
+ * \sa V4L2Device::setControls()
+ *
+ * \return 0 on success or an error code otherwise
+ * \retval -EINVAL One of the control is not supported or not accessible
+ * \retval i The index of the control that failed
+ */
+
+/**
+ * \fn CameraSensor::testPatternModes()
+ * \brief Retrieve all the supported test pattern modes of the camera sensor
+ * The test pattern mode values correspond to the controls::TestPattern control.
+ *
+ * \return The list of test pattern modes
+ */
+
+/**
+ * \fn CameraSensor::setTestPatternMode()
+ * \brief Set the test pattern mode for the camera sensor
+ * \param[in] mode The test pattern mode
+ *
+ * The new \a mode is applied to the sensor if it differs from the active test
+ * pattern mode. Otherwise, this function is a no-op. Setting the same test
+ * pattern mode for every frame thus incurs no performance penalty.
+ */
+
+/**
+ * \fn CameraSensor::sensorDelays()
+ * \brief Fetch the sensor delay values
+ *
+ * This function retrieves the delays that the sensor applies to controls. If
+ * the static properties database doesn't specifiy control delay values for the
+ * sensor, default delays that may be suitable are returned and a warning is
+ * logged.
+ *
+ * \return The sensor delay values
+ */
+
+/**
+ * \class CameraSensorFactoryBase
+ * \brief Base class for camera sensor factories
+ *
+ * The CameraSensorFactoryBase class is the base of all specializations of
+ * the CameraSensorFactory class template. It implements the factory
+ * registration, maintains a registry of factories, and provides access to the
+ * registered factories.
+ */
+
+/**
+ * \brief Construct a camera sensor factory base
+ * \param[in] name The camera sensor factory name
+ * \param[in] priority Priority order for factory selection
+ *
+ * Creating an instance of the factory base registers it with the global list of
+ * factories, accessible through the factories() function.
+ */
+CameraSensorFactoryBase::CameraSensorFactoryBase(const char *name, int priority)
+ : name_(name), priority_(priority)
+{
+ registerFactory(this);
+}
+
+/**
+ * \brief Create an instance of the CameraSensor corresponding to a media entity
+ * \param[in] entity The media entity on the source end of the sensor
+ *
+ * When multiple factories match the same \a entity, this function selects the
+ * matching factory with the highest priority as specified to the
+ * REGISTER_CAMERA_SENSOR() macro at factory registration time. If multiple
+ * matching factories have the same highest priority value, which factory gets
+ * selected is undefined and may vary between runs.
+ *
+ * \return A unique pointer to a new instance of the CameraSensor subclass
+ * matching the entity, or a null pointer if no such factory exists
+ */
+std::unique_ptr<CameraSensor> CameraSensorFactoryBase::create(MediaEntity *entity)
+{
+ const std::vector<CameraSensorFactoryBase *> &factories =
+ CameraSensorFactoryBase::factories();
+
+ for (const CameraSensorFactoryBase *factory : factories) {
+ std::variant<std::unique_ptr<CameraSensor>, int> result =
+ factory->match(entity);
+
+ if (std::holds_alternative<std::unique_ptr<CameraSensor>>(result)) {
+ LOG(CameraSensor, Debug)
+ << "Entity '" << entity->name() << "' matched by "
+ << factory->name();
+ return std::get<std::unique_ptr<CameraSensor>>(std::move(result));
+ }
+
+ if (std::get<int>(result)) {
+ LOG(CameraSensor, Error)
+ << "Failed to create sensor for '"
+ << entity->name() << ": " << std::get<int>(result);
+ return nullptr;
+ }
+ }
+
+ return nullptr;
+}
+
+/**
+ * \fn CameraSensorFactoryBase::name()
+ * \brief Retrieve the camera sensor factory name
+ * \return The name of the factory
+ */
+
+/**
+ * \fn CameraSensorFactoryBase::priority()
+ * \brief Retrieve the priority value for the factory
+ * \return The priority value for the factory
+ */
+
+/**
+ * \brief Retrieve the list of all camera sensor factories
+ *
+ * The factories are sorted in decreasing priority order.
+ *
+ * \return The list of camera sensor factories
+ */
+std::vector<CameraSensorFactoryBase *> &CameraSensorFactoryBase::factories()
+{
+ /*
+ * The static factories map is defined inside the function to ensure
+ * it gets initialized on first use, without any dependency on link
+ * order.
+ */
+ static std::vector<CameraSensorFactoryBase *> factories;
+ return factories;
+}
+
+/**
+ * \brief Add a camera sensor class to the registry
+ * \param[in] factory Factory to use to construct the camera sensor
+ */
+void CameraSensorFactoryBase::registerFactory(CameraSensorFactoryBase *factory)
+{
+ std::vector<CameraSensorFactoryBase *> &factories =
+ CameraSensorFactoryBase::factories();
+
+ auto pos = std::upper_bound(factories.begin(), factories.end(), factory,
+ [](const CameraSensorFactoryBase *value,
+ const CameraSensorFactoryBase *elem) {
+ return value->priority() > elem->priority();
+ });
+ factories.insert(pos, factory);
+}
+
+/**
+ * \class CameraSensorFactory
+ * \brief Registration of CameraSensorFactory classes and creation of instances
+ * \tparam _CameraSensor The camera sensor class type for this factory
+ *
+ * To facilitate discovery and instantiation of CameraSensor classes, the
+ * CameraSensorFactory class implements auto-registration of camera sensors.
+ * Each CameraSensor subclass shall register itself using the
+ * REGISTER_CAMERA_SENSOR() macro, which will create a corresponding instance
+ * of a CameraSensorFactory subclass and register it with the static list of
+ * factories.
+ */
+
+/**
+ * \fn CameraSensorFactory::CameraSensorFactory()
+ * \brief Construct a camera sensor factory
+ *
+ * Creating an instance of the factory registers it with the global list of
+ * factories, accessible through the CameraSensorFactoryBase::factories()
+ * function.
+ */
+
+/**
+ * \def REGISTER_CAMERA_SENSOR(sensor, priority)
+ * \brief Register a camera sensor type to the sensor factory
+ * \param[in] sensor Class name of the CameraSensor derived class to register
+ * \param[in] priority Priority order for factory selection
+ *
+ * Register a CameraSensor subclass with the factory and make it available to
+ * try and match sensors. The subclass needs to implement a static match
+ * function:
+ *
+ * \code{.cpp}
+ * static std::variant<std::unique_ptr<CameraSensor>, int> match(MediaEntity *entity);
+ * \endcode
+ *
+ * The function tests if the sensor class supports the camera sensor identified
+ * by a MediaEntity. If so, it creates a new instance of the sensor class. The
+ * return value is a variant that contains
+ *
+ * - A new instance of the camera sensor class if the entity matched and
+ * creation succeeded ;
+ * - A non-zero error code if the entity matched and the creation failed ; or
+ * - A zero error code if the entity didn't match.
+ *
+ * When multiple factories can support the same MediaEntity (as in the match()
+ * function of multiple factories returning true for the same entity), the \a
+ * priority argument selects which factory will be used. See
+ * CameraSensorFactoryBase::create() for more information.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/sensor/camera_sensor_legacy.cpp b/src/libcamera/sensor/camera_sensor_legacy.cpp
new file mode 100644
index 00000000..32989c19
--- /dev/null
+++ b/src/libcamera/sensor/camera_sensor_legacy.cpp
@@ -0,0 +1,1045 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * camera_sensor_legacy.cpp - A V4L2-backed camera sensor
+ */
+
+#include <algorithm>
+#include <cmath>
+#include <float.h>
+#include <iomanip>
+#include <limits.h>
+#include <map>
+#include <memory>
+#include <string.h>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/geometry.h>
+#include <libcamera/orientation.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/transform.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/sysfs.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+namespace libcamera {
+
+class BayerFormat;
+class CameraLens;
+class MediaEntity;
+class SensorConfiguration;
+
+struct CameraSensorProperties;
+
+enum class Orientation;
+
+LOG_DECLARE_CATEGORY(CameraSensor)
+
+class CameraSensorLegacy : public CameraSensor, protected Loggable
+{
+public:
+ CameraSensorLegacy(const MediaEntity *entity);
+ ~CameraSensorLegacy();
+
+ static std::variant<std::unique_ptr<CameraSensor>, int>
+ match(MediaEntity *entity);
+
+ const std::string &model() const override { return model_; }
+ const std::string &id() const override { return id_; }
+
+ const MediaEntity *entity() const override { return entity_; }
+ V4L2Subdevice *device() override { return subdev_.get(); }
+
+ CameraLens *focusLens() override { return focusLens_.get(); }
+
+ const std::vector<unsigned int> &mbusCodes() const override { return mbusCodes_; }
+ std::vector<Size> sizes(unsigned int mbusCode) const override;
+ Size resolution() const override;
+
+ V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size,
+ const Size maxSize) const override;
+ int setFormat(V4L2SubdeviceFormat *format,
+ Transform transform = Transform::Identity) override;
+ int tryFormat(V4L2SubdeviceFormat *format) const override;
+
+ int applyConfiguration(const SensorConfiguration &config,
+ Transform transform = Transform::Identity,
+ V4L2SubdeviceFormat *sensorFormat = nullptr) override;
+
+ const ControlList &properties() const override { return properties_; }
+ int sensorInfo(IPACameraSensorInfo *info) const override;
+ Transform computeTransform(Orientation *orientation) const override;
+ BayerFormat::Order bayerOrder(Transform t) const override;
+
+ const ControlInfoMap &controls() const override;
+ ControlList getControls(const std::vector<uint32_t> &ids) override;
+ int setControls(ControlList *ctrls) override;
+
+ const std::vector<controls::draft::TestPatternModeEnum> &
+ testPatternModes() const override { return testPatternModes_; }
+ int setTestPatternMode(controls::draft::TestPatternModeEnum mode) override;
+ const CameraSensorProperties::SensorDelays &sensorDelays() override;
+
+protected:
+ std::string logPrefix() const override;
+
+private:
+ LIBCAMERA_DISABLE_COPY(CameraSensorLegacy)
+
+ int init();
+ int generateId();
+ int validateSensorDriver();
+ void initVimcDefaultProperties();
+ void initStaticProperties();
+ void initTestPatternModes();
+ int initProperties();
+ int applyTestPatternMode(controls::draft::TestPatternModeEnum mode);
+ int discoverAncillaryDevices();
+
+ const MediaEntity *entity_;
+ std::unique_ptr<V4L2Subdevice> subdev_;
+ unsigned int pad_;
+
+ const CameraSensorProperties *staticProps_;
+
+ std::string model_;
+ std::string id_;
+
+ V4L2Subdevice::Formats formats_;
+ std::vector<unsigned int> mbusCodes_;
+ std::vector<Size> sizes_;
+ std::vector<controls::draft::TestPatternModeEnum> testPatternModes_;
+ controls::draft::TestPatternModeEnum testPatternMode_;
+
+ Size pixelArraySize_;
+ Rectangle activeArea_;
+ const BayerFormat *bayerFormat_;
+ bool supportFlips_;
+ bool flipsAlterBayerOrder_;
+ Orientation mountingOrientation_;
+
+ ControlList properties_;
+
+ std::unique_ptr<CameraLens> focusLens_;
+};
+
+/**
+ * \class CameraSensorLegacy
+ * \brief A camera sensor based on V4L2 subdevices
+ *
+ * The implementation is currently limited to sensors that expose a single V4L2
+ * subdevice with a single pad. It will be extended to support more complex
+ * devices as the needs arise.
+ */
+
+CameraSensorLegacy::CameraSensorLegacy(const MediaEntity *entity)
+ : entity_(entity), pad_(UINT_MAX), staticProps_(nullptr),
+ bayerFormat_(nullptr), supportFlips_(false),
+ flipsAlterBayerOrder_(false), properties_(properties::properties)
+{
+}
+
+CameraSensorLegacy::~CameraSensorLegacy() = default;
+
+std::variant<std::unique_ptr<CameraSensor>, int>
+CameraSensorLegacy::match(MediaEntity *entity)
+{
+ std::unique_ptr<CameraSensorLegacy> sensor =
+ std::make_unique<CameraSensorLegacy>(entity);
+
+ int ret = sensor->init();
+ if (ret)
+ return { ret };
+
+ return { std::move(sensor) };
+}
+
+int CameraSensorLegacy::init()
+{
+ for (const MediaPad *pad : entity_->pads()) {
+ if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
+ pad_ = pad->index();
+ break;
+ }
+ }
+
+ if (pad_ == UINT_MAX) {
+ LOG(CameraSensor, Error)
+ << "Sensors with more than one pad are not supported";
+ return -EINVAL;
+ }
+
+ switch (entity_->function()) {
+ case MEDIA_ENT_F_CAM_SENSOR:
+ case MEDIA_ENT_F_PROC_VIDEO_ISP:
+ break;
+
+ default:
+ LOG(CameraSensor, Error)
+ << "Invalid sensor function "
+ << utils::hex(entity_->function());
+ return -EINVAL;
+ }
+
+ /* Create and open the subdev. */
+ subdev_ = std::make_unique<V4L2Subdevice>(entity_);
+ int ret = subdev_->open();
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Clear any flips to be sure we get the "native" Bayer order. This is
+ * harmless for sensors where the flips don't affect the Bayer order.
+ */
+ ControlList ctrls(subdev_->controls());
+ if (subdev_->controls().find(V4L2_CID_HFLIP) != subdev_->controls().end())
+ ctrls.set(V4L2_CID_HFLIP, 0);
+ if (subdev_->controls().find(V4L2_CID_VFLIP) != subdev_->controls().end())
+ ctrls.set(V4L2_CID_VFLIP, 0);
+ subdev_->setControls(&ctrls);
+
+ /* Enumerate, sort and cache media bus codes and sizes. */
+ formats_ = subdev_->formats(pad_);
+ if (formats_.empty()) {
+ LOG(CameraSensor, Error) << "No image format found";
+ return -EINVAL;
+ }
+
+ mbusCodes_ = utils::map_keys(formats_);
+ std::sort(mbusCodes_.begin(), mbusCodes_.end());
+
+ for (const auto &format : formats_) {
+ const std::vector<SizeRange> &ranges = format.second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes_),
+ [](const SizeRange &range) { return range.max; });
+ }
+
+ std::sort(sizes_.begin(), sizes_.end());
+
+ /* Remove duplicates. */
+ auto last = std::unique(sizes_.begin(), sizes_.end());
+ sizes_.erase(last, sizes_.end());
+
+ /*
+ * VIMC is a bit special, as it does not yet support all the mandatory
+ * requirements regular sensors have to respect.
+ *
+ * Do not validate the driver if it's VIMC and initialize the sensor
+ * properties with static information.
+ *
+ * \todo Remove the special case once the VIMC driver has been
+ * updated in all test platforms.
+ */
+ if (entity_->device()->driver() == "vimc") {
+ initVimcDefaultProperties();
+
+ ret = initProperties();
+ if (ret)
+ return ret;
+
+ return discoverAncillaryDevices();
+ }
+
+ /* Get the color filter array pattern (only for RAW sensors). */
+ for (unsigned int mbusCode : mbusCodes_) {
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(mbusCode);
+ if (bayerFormat.isValid()) {
+ bayerFormat_ = &bayerFormat;
+ break;
+ }
+ }
+
+ ret = validateSensorDriver();
+ if (ret)
+ return ret;
+
+ ret = initProperties();
+ if (ret)
+ return ret;
+
+ ret = discoverAncillaryDevices();
+ if (ret)
+ return ret;
+
+ /*
+ * Set HBLANK to the minimum to start with a well-defined line length,
+ * allowing IPA modules that do not modify HBLANK to use the sensor
+ * minimum line length in their calculations.
+ */
+ const struct v4l2_query_ext_ctrl *hblankInfo = subdev_->controlInfo(V4L2_CID_HBLANK);
+ if (hblankInfo && !(hblankInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ ControlList ctrl(subdev_->controls());
+
+ ctrl.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblankInfo->minimum));
+ ret = subdev_->setControls(&ctrl);
+ if (ret)
+ return ret;
+ }
+
+ return applyTestPatternMode(controls::draft::TestPatternModeEnum::TestPatternModeOff);
+}
+
+int CameraSensorLegacy::generateId()
+{
+ const std::string devPath = subdev_->devicePath();
+
+ /* Try to get ID from firmware description. */
+ id_ = sysfs::firmwareNodePath(devPath);
+ if (!id_.empty())
+ return 0;
+
+ /*
+ * Virtual sensors not described in firmware
+ *
+ * Verify it's a platform device and construct ID from the device path
+ * and model of sensor.
+ */
+ if (devPath.find("/sys/devices/platform/", 0) == 0) {
+ id_ = devPath.substr(strlen("/sys/devices/")) + " " + model();
+ return 0;
+ }
+
+ LOG(CameraSensor, Error) << "Can't generate sensor ID";
+ return -EINVAL;
+}
+
+int CameraSensorLegacy::validateSensorDriver()
+{
+ int err = 0;
+
+ /*
+ * Optional controls are used to register optional sensor properties. If
+ * not present, some values will be defaulted.
+ */
+ static constexpr uint32_t optionalControls[] = {
+ V4L2_CID_CAMERA_SENSOR_ROTATION,
+ };
+
+ const ControlIdMap &controls = subdev_->controls().idmap();
+ for (uint32_t ctrl : optionalControls) {
+ if (!controls.count(ctrl))
+ LOG(CameraSensor, Debug)
+ << "Optional V4L2 control " << utils::hex(ctrl)
+ << " not supported";
+ }
+
+ /*
+ * Recommended controls are similar to optional controls, but will
+ * become mandatory in the near future. Be loud if they're missing.
+ */
+ static constexpr uint32_t recommendedControls[] = {
+ V4L2_CID_CAMERA_ORIENTATION,
+ };
+
+ for (uint32_t ctrl : recommendedControls) {
+ if (!controls.count(ctrl)) {
+ LOG(CameraSensor, Warning)
+ << "Recommended V4L2 control " << utils::hex(ctrl)
+ << " not supported";
+ err = -EINVAL;
+ }
+ }
+
+ /*
+ * Verify if sensor supports horizontal/vertical flips
+ *
+ * \todo Handle horizontal and vertical flips independently.
+ */
+ const struct v4l2_query_ext_ctrl *hflipInfo = subdev_->controlInfo(V4L2_CID_HFLIP);
+ const struct v4l2_query_ext_ctrl *vflipInfo = subdev_->controlInfo(V4L2_CID_VFLIP);
+ if (hflipInfo && !(hflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY) &&
+ vflipInfo && !(vflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ supportFlips_ = true;
+
+ if (hflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT ||
+ vflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT)
+ flipsAlterBayerOrder_ = true;
+ }
+
+ if (!supportFlips_)
+ LOG(CameraSensor, Debug)
+ << "Camera sensor does not support horizontal/vertical flip";
+
+ /*
+ * Make sure the required selection targets are supported.
+ *
+ * Failures in reading any of the targets are not deemed to be fatal,
+ * but some properties and features, like constructing a
+ * IPACameraSensorInfo for the IPA module, won't be supported.
+ *
+ * \todo Make support for selection targets mandatory as soon as all
+ * test platforms have been updated.
+ */
+ Rectangle rect;
+ int ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP_BOUNDS, &rect);
+ if (ret) {
+ /*
+ * Default the pixel array size to the largest size supported
+ * by the sensor. The sizes_ vector is sorted in ascending
+ * order, the largest size is thus the last element.
+ */
+ pixelArraySize_ = sizes_.back();
+
+ LOG(CameraSensor, Warning)
+ << "The PixelArraySize property has been defaulted to "
+ << pixelArraySize_;
+ err = -EINVAL;
+ } else {
+ pixelArraySize_ = rect.size();
+ }
+
+ ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP_DEFAULT, &activeArea_);
+ if (ret) {
+ activeArea_ = Rectangle(pixelArraySize_);
+ LOG(CameraSensor, Warning)
+ << "The PixelArrayActiveAreas property has been defaulted to "
+ << activeArea_;
+ err = -EINVAL;
+ }
+
+ ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP, &rect);
+ if (ret) {
+ LOG(CameraSensor, Warning)
+ << "Failed to retrieve the sensor crop rectangle";
+ err = -EINVAL;
+ }
+
+ if (err) {
+ LOG(CameraSensor, Warning)
+ << "The sensor kernel driver needs to be fixed";
+ LOG(CameraSensor, Warning)
+ << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information";
+ }
+
+ if (!bayerFormat_)
+ return 0;
+
+ /*
+ * For raw sensors, make sure the sensor driver supports the controls
+ * required by the CameraSensor class.
+ */
+ static constexpr uint32_t mandatoryControls[] = {
+ V4L2_CID_ANALOGUE_GAIN,
+ V4L2_CID_EXPOSURE,
+ V4L2_CID_HBLANK,
+ V4L2_CID_PIXEL_RATE,
+ V4L2_CID_VBLANK,
+ };
+
+ err = 0;
+ for (uint32_t ctrl : mandatoryControls) {
+ if (!controls.count(ctrl)) {
+ LOG(CameraSensor, Error)
+ << "Mandatory V4L2 control " << utils::hex(ctrl)
+ << " not available";
+ err = -EINVAL;
+ }
+ }
+
+ if (err) {
+ LOG(CameraSensor, Error)
+ << "The sensor kernel driver needs to be fixed";
+ LOG(CameraSensor, Error)
+ << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information";
+ return err;
+ }
+
+ return 0;
+}
+
+void CameraSensorLegacy::initVimcDefaultProperties()
+{
+ /* Use the largest supported size. */
+ pixelArraySize_ = sizes_.back();
+ activeArea_ = Rectangle(pixelArraySize_);
+}
+
+void CameraSensorLegacy::initStaticProperties()
+{
+ staticProps_ = CameraSensorProperties::get(model_);
+ if (!staticProps_)
+ return;
+
+ /* Register the properties retrieved from the sensor database. */
+ properties_.set(properties::UnitCellSize, staticProps_->unitCellSize);
+
+ initTestPatternModes();
+}
+
+const CameraSensorProperties::SensorDelays &CameraSensorLegacy::sensorDelays()
+{
+ static constexpr CameraSensorProperties::SensorDelays defaultSensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 1,
+ .vblankDelay = 2,
+ .hblankDelay = 2,
+ };
+
+ if (!staticProps_ ||
+ (!staticProps_->sensorDelays.exposureDelay &&
+ !staticProps_->sensorDelays.gainDelay &&
+ !staticProps_->sensorDelays.vblankDelay &&
+ !staticProps_->sensorDelays.hblankDelay)) {
+ LOG(CameraSensor, Warning)
+ << "No sensor delays found in static properties. "
+ "Assuming unverified defaults.";
+
+ return defaultSensorDelays;
+ }
+
+ return staticProps_->sensorDelays;
+}
+
+void CameraSensorLegacy::initTestPatternModes()
+{
+ const auto &v4l2TestPattern = controls().find(V4L2_CID_TEST_PATTERN);
+ if (v4l2TestPattern == controls().end()) {
+ LOG(CameraSensor, Debug) << "V4L2_CID_TEST_PATTERN is not supported";
+ return;
+ }
+
+ const auto &testPatternModes = staticProps_->testPatternModes;
+ if (testPatternModes.empty()) {
+ /*
+ * The camera sensor supports test patterns but we don't know
+ * how to map them so this should be fixed.
+ */
+ LOG(CameraSensor, Debug) << "No static test pattern map for \'"
+ << model() << "\'";
+ return;
+ }
+
+ /*
+ * Create a map that associates the V4L2 control index to the test
+ * pattern mode by reversing the testPatternModes map provided by the
+ * camera sensor properties. This makes it easier to verify if the
+ * control index is supported in the below for loop that creates the
+ * list of supported test patterns.
+ */
+ std::map<int32_t, controls::draft::TestPatternModeEnum> indexToTestPatternMode;
+ for (const auto &it : testPatternModes)
+ indexToTestPatternMode[it.second] = it.first;
+
+ for (const ControlValue &value : v4l2TestPattern->second.values()) {
+ const int32_t index = value.get<int32_t>();
+
+ const auto it = indexToTestPatternMode.find(index);
+ if (it == indexToTestPatternMode.end()) {
+ LOG(CameraSensor, Debug)
+ << "Test pattern mode " << index << " ignored";
+ continue;
+ }
+
+ testPatternModes_.push_back(it->second);
+ }
+}
+
+int CameraSensorLegacy::initProperties()
+{
+ model_ = subdev_->model();
+ properties_.set(properties::Model, utils::toAscii(model_));
+
+ /* Generate a unique ID for the sensor. */
+ int ret = generateId();
+ if (ret)
+ return ret;
+
+ /* Initialize the static properties from the sensor database. */
+ initStaticProperties();
+
+ /* Retrieve and register properties from the kernel interface. */
+ const ControlInfoMap &controls = subdev_->controls();
+
+ const auto &orientation = controls.find(V4L2_CID_CAMERA_ORIENTATION);
+ if (orientation != controls.end()) {
+ int32_t v4l2Orientation = orientation->second.def().get<int32_t>();
+ int32_t propertyValue;
+
+ switch (v4l2Orientation) {
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported camera location "
+ << v4l2Orientation << ", setting to External";
+ [[fallthrough]];
+ case V4L2_CAMERA_ORIENTATION_EXTERNAL:
+ propertyValue = properties::CameraLocationExternal;
+ break;
+ case V4L2_CAMERA_ORIENTATION_FRONT:
+ propertyValue = properties::CameraLocationFront;
+ break;
+ case V4L2_CAMERA_ORIENTATION_BACK:
+ propertyValue = properties::CameraLocationBack;
+ break;
+ }
+ properties_.set(properties::Location, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning) << "Failed to retrieve the camera location";
+ }
+
+ const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION);
+ if (rotationControl != controls.end()) {
+ int32_t propertyValue = rotationControl->second.def().get<int32_t>();
+
+ /*
+ * Cache the Transform associated with the camera mounting
+ * rotation for later use in computeTransform().
+ */
+ bool success;
+ mountingOrientation_ = orientationFromRotation(propertyValue, &success);
+ if (!success) {
+ LOG(CameraSensor, Warning)
+ << "Invalid rotation of " << propertyValue
+ << " degrees - ignoring";
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
+ properties_.set(properties::Rotation, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning)
+ << "Rotation control not available, default to 0 degrees";
+ properties_.set(properties::Rotation, 0);
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
+ properties_.set(properties::PixelArraySize, pixelArraySize_);
+ properties_.set(properties::PixelArrayActiveAreas, { activeArea_ });
+
+ /* Color filter array pattern, register only for RAW sensors. */
+ if (bayerFormat_) {
+ int32_t cfa;
+ switch (bayerFormat_->order) {
+ case BayerFormat::BGGR:
+ cfa = properties::draft::BGGR;
+ break;
+ case BayerFormat::GBRG:
+ cfa = properties::draft::GBRG;
+ break;
+ case BayerFormat::GRBG:
+ cfa = properties::draft::GRBG;
+ break;
+ case BayerFormat::RGGB:
+ cfa = properties::draft::RGGB;
+ break;
+ case BayerFormat::MONO:
+ cfa = properties::draft::MONO;
+ break;
+ }
+
+ properties_.set(properties::draft::ColorFilterArrangement, cfa);
+ }
+
+ return 0;
+}
+
+int CameraSensorLegacy::discoverAncillaryDevices()
+{
+ int ret;
+
+ for (MediaEntity *ancillary : entity_->ancillaryEntities()) {
+ switch (ancillary->function()) {
+ case MEDIA_ENT_F_LENS:
+ focusLens_ = std::make_unique<CameraLens>(ancillary);
+ ret = focusLens_->init();
+ if (ret) {
+ LOG(CameraSensor, Error)
+ << "Lens initialisation failed, lens disabled";
+ focusLens_.reset();
+ }
+ break;
+
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported ancillary entity function "
+ << ancillary->function();
+ break;
+ }
+ }
+
+ return 0;
+}
+
+std::vector<Size> CameraSensorLegacy::sizes(unsigned int mbusCode) const
+{
+ std::vector<Size> sizes;
+
+ const auto &format = formats_.find(mbusCode);
+ if (format == formats_.end())
+ return sizes;
+
+ const std::vector<SizeRange> &ranges = format->second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+
+ std::sort(sizes.begin(), sizes.end());
+
+ return sizes;
+}
+
+Size CameraSensorLegacy::resolution() const
+{
+ return std::min(sizes_.back(), activeArea_.size());
+}
+
+V4L2SubdeviceFormat
+CameraSensorLegacy::getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size, Size maxSize) const
+{
+ unsigned int desiredArea = size.width * size.height;
+ unsigned int bestArea = UINT_MAX;
+ float desiredRatio = static_cast<float>(size.width) / size.height;
+ float bestRatio = FLT_MAX;
+ const Size *bestSize = nullptr;
+ uint32_t bestCode = 0;
+
+ for (unsigned int code : mbusCodes) {
+ const auto formats = formats_.find(code);
+ if (formats == formats_.end())
+ continue;
+
+ for (const SizeRange &range : formats->second) {
+ const Size &sz = range.max;
+
+ if (!maxSize.isNull() &&
+ (sz.width > maxSize.width || sz.height > maxSize.height))
+ continue;
+
+ if (sz.width < size.width || sz.height < size.height)
+ continue;
+
+ float ratio = static_cast<float>(sz.width) / sz.height;
+ float ratioDiff = std::abs(ratio - desiredRatio);
+ unsigned int area = sz.width * sz.height;
+ unsigned int areaDiff = area - desiredArea;
+
+ if (ratioDiff > bestRatio)
+ continue;
+
+ if (ratioDiff < bestRatio || areaDiff < bestArea) {
+ bestRatio = ratioDiff;
+ bestArea = areaDiff;
+ bestSize = &sz;
+ bestCode = code;
+ }
+ }
+ }
+
+ if (!bestSize) {
+ LOG(CameraSensor, Debug) << "No supported format or size found";
+ return {};
+ }
+
+ V4L2SubdeviceFormat format{
+ .code = bestCode,
+ .size = *bestSize,
+ .colorSpace = ColorSpace::Raw,
+ };
+
+ return format;
+}
+
+int CameraSensorLegacy::setFormat(V4L2SubdeviceFormat *format, Transform transform)
+{
+ /* Configure flips if the sensor supports that. */
+ if (supportFlips_) {
+ ControlList flipCtrls(subdev_->controls());
+
+ flipCtrls.set(V4L2_CID_HFLIP,
+ static_cast<int32_t>(!!(transform & Transform::HFlip)));
+ flipCtrls.set(V4L2_CID_VFLIP,
+ static_cast<int32_t>(!!(transform & Transform::VFlip)));
+
+ int ret = subdev_->setControls(&flipCtrls);
+ if (ret)
+ return ret;
+ }
+
+ /* Apply format on the subdev. */
+ int ret = subdev_->setFormat(pad_, format);
+ if (ret)
+ return ret;
+
+ subdev_->updateControlInfo();
+ return 0;
+}
+
+int CameraSensorLegacy::tryFormat(V4L2SubdeviceFormat *format) const
+{
+ return subdev_->setFormat(pad_, format,
+ V4L2Subdevice::Whence::TryFormat);
+}
+
+int CameraSensorLegacy::applyConfiguration(const SensorConfiguration &config,
+ Transform transform,
+ V4L2SubdeviceFormat *sensorFormat)
+{
+ if (!config.isValid()) {
+ LOG(CameraSensor, Error) << "Invalid sensor configuration";
+ return -EINVAL;
+ }
+
+ std::vector<unsigned int> filteredCodes;
+ std::copy_if(mbusCodes_.begin(), mbusCodes_.end(),
+ std::back_inserter(filteredCodes),
+ [&config](unsigned int mbusCode) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(mbusCode);
+ if (bayer.bitDepth == config.bitDepth)
+ return true;
+ return false;
+ });
+ if (filteredCodes.empty()) {
+ LOG(CameraSensor, Error)
+ << "Cannot find any format with bit depth "
+ << config.bitDepth;
+ return -EINVAL;
+ }
+
+ /*
+ * Compute the sensor's data frame size by applying the cropping
+ * rectangle, subsampling and output crop to the sensor's pixel array
+ * size.
+ *
+ * \todo The actual size computation is for now ignored and only the
+ * output size is considered. This implies that resolutions obtained
+ * with two different cropping/subsampling will look identical and
+ * only the first found one will be considered.
+ */
+ V4L2SubdeviceFormat subdevFormat = {};
+ for (unsigned int code : filteredCodes) {
+ for (const Size &size : sizes(code)) {
+ if (size.width != config.outputSize.width ||
+ size.height != config.outputSize.height)
+ continue;
+
+ subdevFormat.code = code;
+ subdevFormat.size = size;
+ break;
+ }
+ }
+ if (!subdevFormat.code) {
+ LOG(CameraSensor, Error) << "Invalid output size in sensor configuration";
+ return -EINVAL;
+ }
+
+ int ret = setFormat(&subdevFormat, transform);
+ if (ret)
+ return ret;
+
+ /*
+ * Return to the caller the format actually applied to the sensor.
+ * This is relevant if transform has changed the bayer pattern order.
+ */
+ if (sensorFormat)
+ *sensorFormat = subdevFormat;
+
+ /* \todo Handle AnalogCrop. Most sensors do not support set_selection */
+ /* \todo Handle scaling in the digital domain. */
+
+ return 0;
+}
+
+int CameraSensorLegacy::sensorInfo(IPACameraSensorInfo *info) const
+{
+ if (!bayerFormat_)
+ return -EINVAL;
+
+ info->model = model();
+
+ /*
+ * The active area size is a static property, while the crop
+ * rectangle needs to be re-read as it depends on the sensor
+ * configuration.
+ */
+ info->activeAreaSize = { activeArea_.width, activeArea_.height };
+
+ /*
+ * \todo Support for retreiving the crop rectangle is scheduled to
+ * become mandatory. For the time being use the default value if it has
+ * been initialized at sensor driver validation time.
+ */
+ int ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP, &info->analogCrop);
+ if (ret) {
+ info->analogCrop = activeArea_;
+ LOG(CameraSensor, Warning)
+ << "The analogue crop rectangle has been defaulted to the active area size";
+ }
+
+ /*
+ * IPACameraSensorInfo::analogCrop::x and IPACameraSensorInfo::analogCrop::y
+ * are defined relatively to the active pixel area, while V4L2's
+ * TGT_CROP target is defined in respect to the full pixel array.
+ *
+ * Compensate it by subtracting the active area offset.
+ */
+ info->analogCrop.x -= activeArea_.x;
+ info->analogCrop.y -= activeArea_.y;
+
+ /* The bit depth and image size depend on the currently applied format. */
+ V4L2SubdeviceFormat format{};
+ ret = subdev_->getFormat(pad_, &format);
+ if (ret)
+ return ret;
+ info->bitsPerPixel = MediaBusFormatInfo::info(format.code).bitsPerPixel;
+ info->outputSize = format.size;
+
+ std::optional<int32_t> cfa = properties_.get(properties::draft::ColorFilterArrangement);
+ info->cfaPattern = cfa ? *cfa : properties::draft::RGB;
+
+ /*
+ * Retrieve the pixel rate, line length and minimum/maximum frame
+ * duration through V4L2 controls. Support for the V4L2_CID_PIXEL_RATE,
+ * V4L2_CID_HBLANK and V4L2_CID_VBLANK controls is mandatory.
+ */
+ ControlList ctrls = subdev_->getControls({ V4L2_CID_PIXEL_RATE,
+ V4L2_CID_HBLANK,
+ V4L2_CID_VBLANK });
+ if (ctrls.empty()) {
+ LOG(CameraSensor, Error)
+ << "Failed to retrieve camera info controls";
+ return -EINVAL;
+ }
+
+ info->pixelRate = ctrls.get(V4L2_CID_PIXEL_RATE).get<int64_t>();
+
+ const ControlInfo hblank = ctrls.infoMap()->at(V4L2_CID_HBLANK);
+ info->minLineLength = info->outputSize.width + hblank.min().get<int32_t>();
+ info->maxLineLength = info->outputSize.width + hblank.max().get<int32_t>();
+
+ const ControlInfo vblank = ctrls.infoMap()->at(V4L2_CID_VBLANK);
+ info->minFrameLength = info->outputSize.height + vblank.min().get<int32_t>();
+ info->maxFrameLength = info->outputSize.height + vblank.max().get<int32_t>();
+
+ return 0;
+}
+
+Transform CameraSensorLegacy::computeTransform(Orientation *orientation) const
+{
+ /*
+ * If we cannot do any flips we cannot change the native camera mounting
+ * orientation.
+ */
+ if (!supportFlips_) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ /*
+ * Now compute the required transform to obtain 'orientation' starting
+ * from the mounting rotation.
+ *
+ * As a note:
+ * orientation / mountingOrientation_ = transform
+ * mountingOrientation_ * transform = orientation
+ */
+ Transform transform = *orientation / mountingOrientation_;
+
+ /*
+ * If transform contains any Transpose we cannot do it, so adjust
+ * 'orientation' to report the image native orientation and return Identity.
+ */
+ if (!!(transform & Transform::Transpose)) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ return transform;
+}
+
+BayerFormat::Order CameraSensorLegacy::bayerOrder(Transform t) const
+{
+ /* Return a defined by meaningless value for non-Bayer sensors. */
+ if (!bayerFormat_)
+ return BayerFormat::Order::BGGR;
+
+ if (!flipsAlterBayerOrder_)
+ return bayerFormat_->order;
+
+ /*
+ * Apply the transform to the native (i.e. untransformed) Bayer order,
+ * using the rest of the Bayer format supplied by the caller.
+ */
+ return bayerFormat_->transform(t).order;
+}
+
+const ControlInfoMap &CameraSensorLegacy::controls() const
+{
+ return subdev_->controls();
+}
+
+ControlList CameraSensorLegacy::getControls(const std::vector<uint32_t> &ids)
+{
+ return subdev_->getControls(ids);
+}
+
+int CameraSensorLegacy::setControls(ControlList *ctrls)
+{
+ return subdev_->setControls(ctrls);
+}
+
+int CameraSensorLegacy::setTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternMode_ == mode)
+ return 0;
+
+ if (testPatternModes_.empty()) {
+ LOG(CameraSensor, Error)
+ << "Camera sensor does not support test pattern modes.";
+ return -EINVAL;
+ }
+
+ return applyTestPatternMode(mode);
+}
+
+int CameraSensorLegacy::applyTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternModes_.empty())
+ return 0;
+
+ auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(),
+ mode);
+ if (it == testPatternModes_.end()) {
+ LOG(CameraSensor, Error) << "Unsupported test pattern mode "
+ << mode;
+ return -EINVAL;
+ }
+
+ LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode;
+
+ int32_t index = staticProps_->testPatternModes.at(mode);
+ ControlList ctrls{ controls() };
+ ctrls.set(V4L2_CID_TEST_PATTERN, index);
+
+ int ret = setControls(&ctrls);
+ if (ret)
+ return ret;
+
+ testPatternMode_ = mode;
+
+ return 0;
+}
+
+std::string CameraSensorLegacy::logPrefix() const
+{
+ return "'" + entity_->name() + "'";
+}
+
+REGISTER_CAMERA_SENSOR(CameraSensorLegacy, -100)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/sensor/camera_sensor_properties.cpp b/src/libcamera/sensor/camera_sensor_properties.cpp
new file mode 100644
index 00000000..e2f518f9
--- /dev/null
+++ b/src/libcamera/sensor/camera_sensor_properties.cpp
@@ -0,0 +1,473 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Database of camera sensor properties
+ */
+
+#include "libcamera/internal/camera_sensor_properties.h"
+
+#include <map>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+/**
+ * \file camera_sensor_properties.h
+ * \brief Database of camera sensor properties
+ *
+ * The database of camera sensor properties collects static information about
+ * camera sensors that is not possible or desirable to retrieve from the device
+ * at run time.
+ *
+ * The database is indexed using the camera sensor model, as reported by the
+ * properties::Model property, and for each supported sensor it contains a
+ * list of properties.
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(CameraSensorProperties)
+
+/**
+ * \struct CameraSensorProperties
+ * \brief Database of camera sensor properties
+ *
+ * \var CameraSensorProperties::unitCellSize
+ * \brief The physical size of a pixel, including pixel edges, in nanometers.
+ *
+ * \var CameraSensorProperties::testPatternModes
+ * \brief Map that associates the TestPattern control value with the indexes of
+ * the corresponding sensor test pattern modes as returned by
+ * V4L2_CID_TEST_PATTERN.
+ *
+ * \var CameraSensorProperties::sensorDelays
+ * \brief Sensor control application delays
+ *
+ * This structure may be defined as empty if the actual sensor delays are not
+ * available or have not been measured.
+ */
+
+/**
+ * \struct CameraSensorProperties::SensorDelays
+ * \brief Sensor control application delay values
+ *
+ * This structure holds delay values, expressed in number of frames, between the
+ * time a control value is applied to the sensor and the time that value is
+ * reflected in the output. For example "2 frames delay" means that parameters
+ * set during frame N will take effect for frame N+2 (and by extension a delay
+ * of 0 would mean the parameter is applied immediately to the current frame).
+ *
+ * \var CameraSensorProperties::SensorDelays::exposureDelay
+ * \brief Number of frames between application of exposure control and effect
+ *
+ * \var CameraSensorProperties::SensorDelays::gainDelay
+ * \brief Number of frames between application of analogue gain control and effect
+ *
+ * \var CameraSensorProperties::SensorDelays::vblankDelay
+ * \brief Number of frames between application of vblank control and effect
+ *
+ * \var CameraSensorProperties::SensorDelays::hblankDelay
+ * \brief Number of frames between application of hblank control and effect
+ */
+
+/**
+ * \brief Retrieve the properties associated with a sensor
+ * \param sensor The sensor model name as reported by properties::Model
+ * \return A pointer to the CameraSensorProperties instance associated with a sensor
+ * or nullptr if the sensor is not supported
+ */
+const CameraSensorProperties *CameraSensorProperties::get(const std::string &sensor)
+{
+ static const std::map<std::string, const CameraSensorProperties> sensorProps = {
+ { "ar0144", {
+ .unitCellSize = { 3000, 3000 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 1 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "ar0521", {
+ .unitCellSize = { 2200, 2200 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 1 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ },
+ .sensorDelays = { },
+ } },
+ { "gc05a2", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "gc08a3", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "hi846", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 1 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ /*
+ * No corresponding test pattern mode for:
+ * 5: "Gradient Horizontal"
+ * 6: "Gradient Vertical"
+ * 7: "Check Board"
+ * 8: "Slant Pattern"
+ * 9: "Resolution Pattern"
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "imx214", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ },
+ .sensorDelays = { },
+ } },
+ { "imx219", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 1,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "imx258", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 1 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ },
+ .sensorDelays = { },
+ } },
+ { "imx283", {
+ .unitCellSize = { 2400, 2400 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "imx290", {
+ .unitCellSize = { 2900, 2900 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "imx296", {
+ .unitCellSize = { 3450, 3450 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "imx327", {
+ .unitCellSize = { 2900, 2900 },
+ .testPatternModes = {},
+ .sensorDelays = { },
+ } },
+ { "imx335", {
+ .unitCellSize = { 2000, 2000 },
+ .testPatternModes = {},
+ .sensorDelays = { },
+ } },
+ { "imx415", {
+ .unitCellSize = { 1450, 1450 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "imx462", {
+ .unitCellSize = { 2900, 2900 },
+ .testPatternModes = {},
+ .sensorDelays = { },
+ } },
+ { "imx477", {
+ .unitCellSize = { 1550, 1550 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 3,
+ .hblankDelay = 3
+ },
+ } },
+ { "imx519", {
+ .unitCellSize = { 1220, 1220 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModePn9, 4 },
+ /*
+ * The driver reports ColorBars and ColorBarsFadeToGray as well but
+ * these two patterns do not comply with MIPI CCS v1.1 (Section 10.1).
+ */
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 3,
+ .hblankDelay = 3
+ },
+ } },
+ { "imx708", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 3,
+ .hblankDelay = 3
+ },
+ } },
+ { "ov2685", {
+ .unitCellSize = { 1750, 1750 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1},
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
+ /*
+ * No corresponding test pattern mode for:
+ * 3: "Random Data"
+ * 4: "Black White Square"
+ * 5: "Color Square"
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "ov2740", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1},
+ },
+ .sensorDelays = { },
+ } },
+ { "ov4689", {
+ .unitCellSize = { 2000, 2000 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1},
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2},
+ /*
+ * No corresponding test patterns in
+ * MIPI CCS specification for sensor's
+ * colorBarType2 and colorBarType3.
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "ov5640", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ },
+ .sensorDelays = { },
+ } },
+ { "ov5647", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {},
+ /*
+ * We run this sensor in a mode where the gain delay is
+ * bumped up to 2. It seems to be the only way to make
+ * the delays "predictable".
+ *
+ * \todo Verify these delays properly, as the upstream
+ * driver appears to configure _no_ delay.
+ */
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "ov5670", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ },
+ .sensorDelays = { },
+ } },
+ { "ov5675", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ },
+ .sensorDelays = { },
+ } },
+ { "ov5693", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ /*
+ * No corresponding test pattern mode for
+ * 1: "Random data" and 3: "Colour Bars with
+ * Rolling Bar".
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "ov64a40", {
+ .unitCellSize = { 1008, 1008 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
+ /*
+ * No corresponding test patter mode
+ * 3: "Vertical Color Bar Type 3",
+ * 4: "Vertical Color Bar Type 4"
+ */
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "ov7251", {
+ .unitCellSize = { 3000, 3000 },
+ .testPatternModes = { },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "ov8858", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
+ /*
+ * No corresponding test patter mode
+ * 3: "Vertical Color Bar Type 3",
+ * 4: "Vertical Color Bar Type 4"
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "ov8865", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ /*
+ * No corresponding test pattern mode for:
+ * 1: "Random data"
+ * 3: "Color bars with rolling bar"
+ * 4: "Color squares"
+ * 5: "Color squares with rolling bar"
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "ov9281", {
+ .unitCellSize = { 3000, 3000 },
+ .testPatternModes = { },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "ov13858", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ },
+ .sensorDelays = { },
+ } },
+ };
+
+ const auto it = sensorProps.find(sensor);
+ if (it == sensorProps.end()) {
+ LOG(CameraSensorProperties, Warning)
+ << "No static properties available for '" << sensor << "'";
+ LOG(CameraSensorProperties, Warning)
+ << "Please consider updating the camera sensor properties database";
+ return nullptr;
+ }
+
+ return &it->second;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/sensor/camera_sensor_raw.cpp b/src/libcamera/sensor/camera_sensor_raw.cpp
new file mode 100644
index 00000000..ab75b1f8
--- /dev/null
+++ b/src/libcamera/sensor/camera_sensor_raw.cpp
@@ -0,0 +1,1157 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy.
+ *
+ * camera_sensor_raw.cpp - A raw camera sensor using the V4L2 streams API
+ */
+
+#include <algorithm>
+#include <cmath>
+#include <float.h>
+#include <iomanip>
+#include <limits.h>
+#include <map>
+#include <memory>
+#include <optional>
+#include <string.h>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/geometry.h>
+#include <libcamera/orientation.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/transform.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/sysfs.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+namespace libcamera {
+
+class BayerFormat;
+class CameraLens;
+class MediaEntity;
+class SensorConfiguration;
+
+struct CameraSensorProperties;
+
+enum class Orientation;
+
+LOG_DECLARE_CATEGORY(CameraSensor)
+
+class CameraSensorRaw : public CameraSensor, protected Loggable
+{
+public:
+ CameraSensorRaw(const MediaEntity *entity);
+ ~CameraSensorRaw();
+
+ static std::variant<std::unique_ptr<CameraSensor>, int>
+ match(MediaEntity *entity);
+
+ const std::string &model() const override { return model_; }
+ const std::string &id() const override { return id_; }
+
+ const MediaEntity *entity() const override { return entity_; }
+ V4L2Subdevice *device() override { return subdev_.get(); }
+
+ CameraLens *focusLens() override { return focusLens_.get(); }
+
+ const std::vector<unsigned int> &mbusCodes() const override { return mbusCodes_; }
+ std::vector<Size> sizes(unsigned int mbusCode) const override;
+ Size resolution() const override;
+
+ V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size,
+ const Size maxSize) const override;
+ int setFormat(V4L2SubdeviceFormat *format,
+ Transform transform = Transform::Identity) override;
+ int tryFormat(V4L2SubdeviceFormat *format) const override;
+
+ int applyConfiguration(const SensorConfiguration &config,
+ Transform transform = Transform::Identity,
+ V4L2SubdeviceFormat *sensorFormat = nullptr) override;
+
+ V4L2Subdevice::Stream imageStream() const override;
+ std::optional<V4L2Subdevice::Stream> embeddedDataStream() const override;
+ V4L2SubdeviceFormat embeddedDataFormat() const override;
+ int setEmbeddedDataEnabled(bool enable) override;
+
+ const ControlList &properties() const override { return properties_; }
+ int sensorInfo(IPACameraSensorInfo *info) const override;
+ Transform computeTransform(Orientation *orientation) const override;
+ BayerFormat::Order bayerOrder(Transform t) const override;
+
+ const ControlInfoMap &controls() const override;
+ ControlList getControls(const std::vector<uint32_t> &ids) override;
+ int setControls(ControlList *ctrls) override;
+
+ const std::vector<controls::draft::TestPatternModeEnum> &
+ testPatternModes() const override { return testPatternModes_; }
+ int setTestPatternMode(controls::draft::TestPatternModeEnum mode) override;
+ const CameraSensorProperties::SensorDelays &sensorDelays() override;
+
+protected:
+ std::string logPrefix() const override;
+
+private:
+ LIBCAMERA_DISABLE_COPY(CameraSensorRaw)
+
+ std::optional<int> init();
+ int initProperties();
+ void initStaticProperties();
+ void initTestPatternModes();
+ int applyTestPatternMode(controls::draft::TestPatternModeEnum mode);
+
+ const MediaEntity *entity_;
+ std::unique_ptr<V4L2Subdevice> subdev_;
+
+ struct Streams {
+ V4L2Subdevice::Stream sink;
+ V4L2Subdevice::Stream source;
+ };
+
+ struct {
+ Streams image;
+ std::optional<Streams> edata;
+ } streams_;
+
+ const CameraSensorProperties *staticProps_;
+
+ std::string model_;
+ std::string id_;
+
+ V4L2Subdevice::Formats formats_;
+ std::vector<unsigned int> mbusCodes_;
+ std::vector<Size> sizes_;
+ std::vector<controls::draft::TestPatternModeEnum> testPatternModes_;
+ controls::draft::TestPatternModeEnum testPatternMode_;
+
+ Size pixelArraySize_;
+ Rectangle activeArea_;
+ BayerFormat::Order cfaPattern_;
+ bool supportFlips_;
+ bool flipsAlterBayerOrder_;
+ Orientation mountingOrientation_;
+
+ ControlList properties_;
+
+ std::unique_ptr<CameraLens> focusLens_;
+};
+
+/**
+ * \class CameraSensorRaw
+ * \brief A camera sensor based on V4L2 subdevices
+ *
+ * This class supports single-subdev sensors with a single source pad and one
+ * or two internal sink pads (for the image and embedded data streams).
+ */
+
+CameraSensorRaw::CameraSensorRaw(const MediaEntity *entity)
+ : entity_(entity), staticProps_(nullptr), supportFlips_(false),
+ flipsAlterBayerOrder_(false), properties_(properties::properties)
+{
+}
+
+CameraSensorRaw::~CameraSensorRaw() = default;
+
+std::variant<std::unique_ptr<CameraSensor>, int>
+CameraSensorRaw::match(MediaEntity *entity)
+{
+ /* Check the entity type. */
+ if (entity->type() != MediaEntity::Type::V4L2Subdevice ||
+ entity->function() != MEDIA_ENT_F_CAM_SENSOR) {
+ libcamera::LOG(CameraSensor, Debug)
+ << entity->name() << ": unsupported entity type ("
+ << utils::to_underlying(entity->type())
+ << ") or function (" << utils::hex(entity->function()) << ")";
+ return { 0 };
+ }
+
+ /* Count and check the number of pads. */
+ static constexpr uint32_t kPadFlagsMask = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_SOURCE
+ | MEDIA_PAD_FL_INTERNAL;
+ unsigned int numSinks = 0;
+ unsigned int numSources = 0;
+
+ for (const MediaPad *pad : entity->pads()) {
+ switch (pad->flags() & kPadFlagsMask) {
+ case MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_INTERNAL:
+ numSinks++;
+ break;
+
+ case MEDIA_PAD_FL_SOURCE:
+ numSources++;
+ break;
+
+ default:
+ libcamera::LOG(CameraSensor, Debug)
+ << entity->name() << ": unsupported pad " << pad->index()
+ << " type " << utils::hex(pad->flags());
+ return { 0 };
+ }
+ }
+
+ if (numSinks < 1 || numSinks > 2 || numSources != 1) {
+ libcamera::LOG(CameraSensor, Debug)
+ << entity->name() << ": unsupported number of sinks ("
+ << numSinks << ") or sources (" << numSources << ")";
+ return { 0 };
+ }
+
+ /*
+ * The entity matches. Create the camera sensor and initialize it. The
+ * init() function will perform further match checks.
+ */
+ std::unique_ptr<CameraSensorRaw> sensor =
+ std::make_unique<CameraSensorRaw>(entity);
+
+ std::optional<int> err = sensor->init();
+ if (err)
+ return { *err };
+
+ return { std::move(sensor) };
+}
+
+std::optional<int> CameraSensorRaw::init()
+{
+ /* Create and open the subdev. */
+ subdev_ = std::make_unique<V4L2Subdevice>(entity_);
+ int ret = subdev_->open();
+ if (ret)
+ return { ret };
+
+ /*
+ * 1. Identify the pads.
+ */
+
+ /*
+ * First locate the source pad. The match() function guarantees there
+ * is one and only one source pad.
+ */
+ unsigned int sourcePad = UINT_MAX;
+
+ for (const MediaPad *pad : entity_->pads()) {
+ if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
+ sourcePad = pad->index();
+ break;
+ }
+ }
+
+ /*
+ * Iterate over the routes to identify the streams on the source pad,
+ * and the internal sink pads.
+ */
+ V4L2Subdevice::Routing routing = {};
+ ret = subdev_->getRouting(&routing, V4L2Subdevice::TryFormat);
+ if (ret)
+ return { ret };
+
+ bool imageStreamFound = false;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (route.source.pad != sourcePad) {
+ LOG(CameraSensor, Error) << "Invalid route " << route;
+ return { -EINVAL };
+ }
+
+ /* Identify the stream type based on the supported formats. */
+ V4L2Subdevice::Formats formats = subdev_->formats(route.source);
+
+ std::optional<MediaBusFormatInfo::Type> type;
+
+ for (const auto &[code, sizes] : formats) {
+ const MediaBusFormatInfo &info =
+ MediaBusFormatInfo::info(code);
+ if (info.isValid()) {
+ type = info.type;
+ break;
+ }
+ }
+
+ if (!type) {
+ LOG(CameraSensor, Warning)
+ << "No known format on pad " << route.source;
+ continue;
+ }
+
+ switch (*type) {
+ case MediaBusFormatInfo::Type::Image:
+ if (imageStreamFound) {
+ LOG(CameraSensor, Error)
+ << "Multiple internal image streams ("
+ << streams_.image.sink << " and "
+ << route.sink << ")";
+ return { -EINVAL };
+ }
+
+ imageStreamFound = true;
+ streams_.image.sink = route.sink;
+ streams_.image.source = route.source;
+ break;
+
+ case MediaBusFormatInfo::Type::Metadata:
+ /*
+ * Skip metadata streams that are not sensor embedded
+ * data. The source stream reports a generic metadata
+ * format, check the sink stream for the exact format.
+ */
+ formats = subdev_->formats(route.sink);
+ if (formats.size() != 1)
+ continue;
+
+ if (MediaBusFormatInfo::info(formats.cbegin()->first).type !=
+ MediaBusFormatInfo::Type::EmbeddedData)
+ continue;
+
+ if (streams_.edata) {
+ LOG(CameraSensor, Error)
+ << "Multiple internal embedded data streams ("
+ << streams_.edata->sink << " and "
+ << route.sink << ")";
+ return { -EINVAL };
+ }
+
+ streams_.edata = { route.sink, route.source };
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (!imageStreamFound) {
+ LOG(CameraSensor, Error) << "No image stream found";
+ return { -EINVAL };
+ }
+
+ LOG(CameraSensor, Debug)
+ << "Found image stream " << streams_.image.sink
+ << " -> " << streams_.image.source;
+
+ if (streams_.edata)
+ LOG(CameraSensor, Debug)
+ << "Found embedded data stream " << streams_.edata->sink
+ << " -> " << streams_.edata->source;
+
+ /*
+ * 2. Enumerate and cache the media bus codes, sizes and colour filter
+ * array order for the image stream.
+ */
+
+ /*
+ * Get the native sensor CFA pattern. It is simpler to retrieve it from
+ * the internal image sink pad as it is guaranteed to expose a single
+ * format, and is not affected by flips.
+ */
+ V4L2Subdevice::Formats formats = subdev_->formats(streams_.image.sink);
+ if (formats.size() != 1) {
+ LOG(CameraSensor, Error)
+ << "Image pad has " << formats.size()
+ << " formats, expected 1";
+ return { -EINVAL };
+ }
+
+ uint32_t nativeFormat = formats.cbegin()->first;
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(nativeFormat);
+ if (!bayerFormat.isValid()) {
+ LOG(CameraSensor, Error)
+ << "Invalid native format " << nativeFormat;
+ return { 0 };
+ }
+
+ cfaPattern_ = bayerFormat.order;
+
+ /*
+ * Retrieve and cache the media bus codes and sizes on the source image
+ * stream.
+ */
+ formats_ = subdev_->formats(streams_.image.source);
+ if (formats_.empty()) {
+ LOG(CameraSensor, Error) << "No image format found";
+ return { -EINVAL };
+ }
+
+ /* Populate and sort the media bus codes and the sizes. */
+ for (const auto &[code, ranges] : formats_) {
+ /* Drop non-raw formats (in case we have a hybrid sensor). */
+ const MediaBusFormatInfo &info = MediaBusFormatInfo::info(code);
+ if (info.colourEncoding != PixelFormatInfo::ColourEncodingRAW)
+ continue;
+
+ mbusCodes_.push_back(code);
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes_),
+ [](const SizeRange &range) { return range.max; });
+ }
+
+ if (mbusCodes_.empty()) {
+ LOG(CameraSensor, Debug) << "No raw image formats found";
+ return { 0 };
+ }
+
+ std::sort(mbusCodes_.begin(), mbusCodes_.end());
+ std::sort(sizes_.begin(), sizes_.end());
+
+ /*
+ * Remove duplicate sizes. There are no duplicate media bus codes as
+ * they are the keys in the formats map.
+ */
+ auto last = std::unique(sizes_.begin(), sizes_.end());
+ sizes_.erase(last, sizes_.end());
+
+ /*
+ * 3. Query selection rectangles. Retrieve properties, and verify that
+ * all the expected selection rectangles are supported.
+ */
+
+ Rectangle rect;
+ ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP_BOUNDS,
+ &rect);
+ if (ret) {
+ LOG(CameraSensor, Error) << "No pixel array crop bounds";
+ return { ret };
+ }
+
+ pixelArraySize_ = rect.size();
+
+ ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP_DEFAULT,
+ &activeArea_);
+ if (ret) {
+ LOG(CameraSensor, Error) << "No pixel array crop default";
+ return { ret };
+ }
+
+ ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP,
+ &rect);
+ if (ret) {
+ LOG(CameraSensor, Error) << "No pixel array crop rectangle";
+ return { ret };
+ }
+
+ /*
+ * 4. Verify that all required controls are present.
+ */
+
+ const ControlIdMap &controls = subdev_->controls().idmap();
+
+ static constexpr uint32_t mandatoryControls[] = {
+ V4L2_CID_ANALOGUE_GAIN,
+ V4L2_CID_CAMERA_ORIENTATION,
+ V4L2_CID_EXPOSURE,
+ V4L2_CID_HBLANK,
+ V4L2_CID_PIXEL_RATE,
+ V4L2_CID_VBLANK,
+ };
+
+ ret = 0;
+
+ for (uint32_t ctrl : mandatoryControls) {
+ if (!controls.count(ctrl)) {
+ LOG(CameraSensor, Error)
+ << "Mandatory V4L2 control " << utils::hex(ctrl)
+ << " not available";
+ ret = -EINVAL;
+ }
+ }
+
+ if (ret) {
+ LOG(CameraSensor, Error)
+ << "The sensor kernel driver needs to be fixed";
+ LOG(CameraSensor, Error)
+ << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information";
+ return { ret };
+ }
+
+ /*
+ * Verify if sensor supports horizontal/vertical flips
+ *
+ * \todo Handle horizontal and vertical flips independently.
+ */
+ const struct v4l2_query_ext_ctrl *hflipInfo = subdev_->controlInfo(V4L2_CID_HFLIP);
+ const struct v4l2_query_ext_ctrl *vflipInfo = subdev_->controlInfo(V4L2_CID_VFLIP);
+ if (hflipInfo && !(hflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY) &&
+ vflipInfo && !(vflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ supportFlips_ = true;
+
+ if (hflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT ||
+ vflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT)
+ flipsAlterBayerOrder_ = true;
+ }
+
+ if (!supportFlips_)
+ LOG(CameraSensor, Debug)
+ << "Camera sensor does not support horizontal/vertical flip";
+
+ /*
+ * 5. Discover ancillary devices.
+ *
+ * \todo This code may be shared by different V4L2 sensor classes.
+ */
+ for (MediaEntity *ancillary : entity_->ancillaryEntities()) {
+ switch (ancillary->function()) {
+ case MEDIA_ENT_F_LENS:
+ focusLens_ = std::make_unique<CameraLens>(ancillary);
+ ret = focusLens_->init();
+ if (ret) {
+ LOG(CameraSensor, Error)
+ << "Lens initialisation failed, lens disabled";
+ focusLens_.reset();
+ }
+ break;
+
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported ancillary entity function "
+ << ancillary->function();
+ break;
+ }
+ }
+
+ /*
+ * 6. Initialize properties.
+ */
+
+ ret = initProperties();
+ if (ret)
+ return { ret };
+
+ /*
+ * 7. Initialize controls.
+ */
+
+ /*
+ * Set HBLANK to the minimum to start with a well-defined line length,
+ * allowing IPA modules that do not modify HBLANK to use the sensor
+ * minimum line length in their calculations.
+ */
+ const struct v4l2_query_ext_ctrl *hblankInfo = subdev_->controlInfo(V4L2_CID_HBLANK);
+ if (hblankInfo && !(hblankInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ ControlList ctrl(subdev_->controls());
+
+ ctrl.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblankInfo->minimum));
+ ret = subdev_->setControls(&ctrl);
+ if (ret)
+ return ret;
+ }
+
+ ret = applyTestPatternMode(controls::draft::TestPatternModeEnum::TestPatternModeOff);
+ if (ret)
+ return { ret };
+
+ return {};
+}
+
+int CameraSensorRaw::initProperties()
+{
+ model_ = subdev_->model();
+ properties_.set(properties::Model, utils::toAscii(model_));
+
+ /* Generate a unique ID for the sensor. */
+ id_ = sysfs::firmwareNodePath(subdev_->devicePath());
+ if (id_.empty()) {
+ LOG(CameraSensor, Error) << "Can't generate sensor ID";
+ return -EINVAL;
+ }
+
+ /* Initialize the static properties from the sensor database. */
+ initStaticProperties();
+
+ /* Retrieve and register properties from the kernel interface. */
+ const ControlInfoMap &controls = subdev_->controls();
+
+ const auto &orientation = controls.find(V4L2_CID_CAMERA_ORIENTATION);
+ if (orientation != controls.end()) {
+ int32_t v4l2Orientation = orientation->second.def().get<int32_t>();
+ int32_t propertyValue;
+
+ switch (v4l2Orientation) {
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported camera location "
+ << v4l2Orientation << ", setting to External";
+ [[fallthrough]];
+ case V4L2_CAMERA_ORIENTATION_EXTERNAL:
+ propertyValue = properties::CameraLocationExternal;
+ break;
+ case V4L2_CAMERA_ORIENTATION_FRONT:
+ propertyValue = properties::CameraLocationFront;
+ break;
+ case V4L2_CAMERA_ORIENTATION_BACK:
+ propertyValue = properties::CameraLocationBack;
+ break;
+ }
+ properties_.set(properties::Location, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning) << "Failed to retrieve the camera location";
+ }
+
+ const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION);
+ if (rotationControl != controls.end()) {
+ int32_t propertyValue = rotationControl->second.def().get<int32_t>();
+
+ /*
+ * Cache the Transform associated with the camera mounting
+ * rotation for later use in computeTransform().
+ */
+ bool success;
+ mountingOrientation_ = orientationFromRotation(propertyValue, &success);
+ if (!success) {
+ LOG(CameraSensor, Warning)
+ << "Invalid rotation of " << propertyValue
+ << " degrees - ignoring";
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
+ properties_.set(properties::Rotation, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning)
+ << "Rotation control not available, default to 0 degrees";
+ properties_.set(properties::Rotation, 0);
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
+ properties_.set(properties::PixelArraySize, pixelArraySize_);
+ properties_.set(properties::PixelArrayActiveAreas, { activeArea_ });
+
+ /* Color filter array pattern. */
+ uint32_t cfa;
+
+ switch (cfaPattern_) {
+ case BayerFormat::BGGR:
+ cfa = properties::draft::BGGR;
+ break;
+ case BayerFormat::GBRG:
+ cfa = properties::draft::GBRG;
+ break;
+ case BayerFormat::GRBG:
+ cfa = properties::draft::GRBG;
+ break;
+ case BayerFormat::RGGB:
+ cfa = properties::draft::RGGB;
+ break;
+ case BayerFormat::MONO:
+ default:
+ cfa = properties::draft::MONO;
+ break;
+ }
+
+ properties_.set(properties::draft::ColorFilterArrangement, cfa);
+
+ return 0;
+}
+
+void CameraSensorRaw::initStaticProperties()
+{
+ staticProps_ = CameraSensorProperties::get(model_);
+ if (!staticProps_)
+ return;
+
+ /* Register the properties retrieved from the sensor database. */
+ properties_.set(properties::UnitCellSize, staticProps_->unitCellSize);
+
+ initTestPatternModes();
+}
+
+const CameraSensorProperties::SensorDelays &CameraSensorRaw::sensorDelays()
+{
+ static constexpr CameraSensorProperties::SensorDelays defaultSensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 1,
+ .vblankDelay = 2,
+ .hblankDelay = 2,
+ };
+
+ if (!staticProps_ ||
+ (!staticProps_->sensorDelays.exposureDelay &&
+ !staticProps_->sensorDelays.gainDelay &&
+ !staticProps_->sensorDelays.vblankDelay &&
+ !staticProps_->sensorDelays.hblankDelay)) {
+ LOG(CameraSensor, Warning)
+ << "No sensor delays found in static properties. "
+ "Assuming unverified defaults.";
+
+ return defaultSensorDelays;
+ }
+
+ return staticProps_->sensorDelays;
+}
+
+void CameraSensorRaw::initTestPatternModes()
+{
+ const auto &v4l2TestPattern = controls().find(V4L2_CID_TEST_PATTERN);
+ if (v4l2TestPattern == controls().end()) {
+ LOG(CameraSensor, Debug) << "V4L2_CID_TEST_PATTERN is not supported";
+ return;
+ }
+
+ const auto &testPatternModes = staticProps_->testPatternModes;
+ if (testPatternModes.empty()) {
+ /*
+ * The camera sensor supports test patterns but we don't know
+ * how to map them so this should be fixed.
+ */
+ LOG(CameraSensor, Debug) << "No static test pattern map for \'"
+ << model() << "\'";
+ return;
+ }
+
+ /*
+ * Create a map that associates the V4L2 control index to the test
+ * pattern mode by reversing the testPatternModes map provided by the
+ * camera sensor properties. This makes it easier to verify if the
+ * control index is supported in the below for loop that creates the
+ * list of supported test patterns.
+ */
+ std::map<int32_t, controls::draft::TestPatternModeEnum> indexToTestPatternMode;
+ for (const auto &it : testPatternModes)
+ indexToTestPatternMode[it.second] = it.first;
+
+ for (const ControlValue &value : v4l2TestPattern->second.values()) {
+ const int32_t index = value.get<int32_t>();
+
+ const auto it = indexToTestPatternMode.find(index);
+ if (it == indexToTestPatternMode.end()) {
+ LOG(CameraSensor, Debug)
+ << "Test pattern mode " << index << " ignored";
+ continue;
+ }
+
+ testPatternModes_.push_back(it->second);
+ }
+}
+
+std::vector<Size> CameraSensorRaw::sizes(unsigned int mbusCode) const
+{
+ std::vector<Size> sizes;
+
+ const auto &format = formats_.find(mbusCode);
+ if (format == formats_.end())
+ return sizes;
+
+ const std::vector<SizeRange> &ranges = format->second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+
+ std::sort(sizes.begin(), sizes.end());
+
+ return sizes;
+}
+
+Size CameraSensorRaw::resolution() const
+{
+ return std::min(sizes_.back(), activeArea_.size());
+}
+
+V4L2SubdeviceFormat
+CameraSensorRaw::getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size, Size maxSize) const
+{
+ unsigned int desiredArea = size.width * size.height;
+ unsigned int bestArea = UINT_MAX;
+ float desiredRatio = static_cast<float>(size.width) / size.height;
+ float bestRatio = FLT_MAX;
+ const Size *bestSize = nullptr;
+ uint32_t bestCode = 0;
+
+ for (unsigned int code : mbusCodes) {
+ const auto formats = formats_.find(code);
+ if (formats == formats_.end())
+ continue;
+
+ for (const SizeRange &range : formats->second) {
+ const Size &sz = range.max;
+
+ if (!maxSize.isNull() &&
+ (sz.width > maxSize.width || sz.height > maxSize.height))
+ continue;
+
+ if (sz.width < size.width || sz.height < size.height)
+ continue;
+
+ float ratio = static_cast<float>(sz.width) / sz.height;
+ float ratioDiff = std::abs(ratio - desiredRatio);
+ unsigned int area = sz.width * sz.height;
+ unsigned int areaDiff = area - desiredArea;
+
+ if (ratioDiff > bestRatio)
+ continue;
+
+ if (ratioDiff < bestRatio || areaDiff < bestArea) {
+ bestRatio = ratioDiff;
+ bestArea = areaDiff;
+ bestSize = &sz;
+ bestCode = code;
+ }
+ }
+ }
+
+ if (!bestSize) {
+ LOG(CameraSensor, Debug) << "No supported format or size found";
+ return {};
+ }
+
+ V4L2SubdeviceFormat format{
+ .code = bestCode,
+ .size = *bestSize,
+ .colorSpace = ColorSpace::Raw,
+ };
+
+ return format;
+}
+
+int CameraSensorRaw::setFormat(V4L2SubdeviceFormat *format, Transform transform)
+{
+ /* Configure flips if the sensor supports that. */
+ if (supportFlips_) {
+ ControlList flipCtrls(subdev_->controls());
+
+ flipCtrls.set(V4L2_CID_HFLIP,
+ static_cast<int32_t>(!!(transform & Transform::HFlip)));
+ flipCtrls.set(V4L2_CID_VFLIP,
+ static_cast<int32_t>(!!(transform & Transform::VFlip)));
+
+ int ret = subdev_->setControls(&flipCtrls);
+ if (ret)
+ return ret;
+ }
+
+ /* Apply format on the subdev. */
+ int ret = subdev_->setFormat(streams_.image.source, format);
+ if (ret)
+ return ret;
+
+ subdev_->updateControlInfo();
+ return 0;
+}
+
+int CameraSensorRaw::tryFormat(V4L2SubdeviceFormat *format) const
+{
+ return subdev_->setFormat(streams_.image.source, format,
+ V4L2Subdevice::Whence::TryFormat);
+}
+
+int CameraSensorRaw::applyConfiguration(const SensorConfiguration &config,
+ Transform transform,
+ V4L2SubdeviceFormat *sensorFormat)
+{
+ if (!config.isValid()) {
+ LOG(CameraSensor, Error) << "Invalid sensor configuration";
+ return -EINVAL;
+ }
+
+ std::vector<unsigned int> filteredCodes;
+ std::copy_if(mbusCodes_.begin(), mbusCodes_.end(),
+ std::back_inserter(filteredCodes),
+ [&config](unsigned int mbusCode) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(mbusCode);
+ if (bayer.bitDepth == config.bitDepth)
+ return true;
+ return false;
+ });
+ if (filteredCodes.empty()) {
+ LOG(CameraSensor, Error)
+ << "Cannot find any format with bit depth "
+ << config.bitDepth;
+ return -EINVAL;
+ }
+
+ /*
+ * Compute the sensor's data frame size by applying the cropping
+ * rectangle, subsampling and output crop to the sensor's pixel array
+ * size.
+ *
+ * \todo The actual size computation is for now ignored and only the
+ * output size is considered. This implies that resolutions obtained
+ * with two different cropping/subsampling will look identical and
+ * only the first found one will be considered.
+ */
+ V4L2SubdeviceFormat subdevFormat = {};
+ for (unsigned int code : filteredCodes) {
+ for (const Size &size : sizes(code)) {
+ if (size.width != config.outputSize.width ||
+ size.height != config.outputSize.height)
+ continue;
+
+ subdevFormat.code = code;
+ subdevFormat.size = size;
+ break;
+ }
+ }
+ if (!subdevFormat.code) {
+ LOG(CameraSensor, Error) << "Invalid output size in sensor configuration";
+ return -EINVAL;
+ }
+
+ int ret = setFormat(&subdevFormat, transform);
+ if (ret)
+ return ret;
+
+ /*
+ * Return to the caller the format actually applied to the sensor.
+ * This is relevant if transform has changed the bayer pattern order.
+ */
+ if (sensorFormat)
+ *sensorFormat = subdevFormat;
+
+ /* \todo Handle AnalogCrop. Most sensors do not support set_selection */
+ /* \todo Handle scaling in the digital domain. */
+
+ return 0;
+}
+
+V4L2Subdevice::Stream CameraSensorRaw::imageStream() const
+{
+ return streams_.image.source;
+}
+
+std::optional<V4L2Subdevice::Stream> CameraSensorRaw::embeddedDataStream() const
+{
+ if (!streams_.edata)
+ return {};
+
+ return { streams_.edata->source };
+}
+
+V4L2SubdeviceFormat CameraSensorRaw::embeddedDataFormat() const
+{
+ if (!streams_.edata)
+ return {};
+
+ V4L2SubdeviceFormat format;
+ int ret = subdev_->getFormat(streams_.edata->source, &format);
+ if (ret)
+ return {};
+
+ return format;
+}
+
+int CameraSensorRaw::setEmbeddedDataEnabled(bool enable)
+{
+ if (!streams_.edata)
+ return enable ? -ENOSTR : 0;
+
+ V4L2Subdevice::Routing routing{ 2 };
+
+ routing[0].sink = streams_.image.sink;
+ routing[0].source = streams_.image.source;
+ routing[0].flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE;
+
+ routing[1].sink = streams_.edata->sink;
+ routing[1].source = streams_.edata->source;
+ routing[1].flags = enable ? V4L2_SUBDEV_ROUTE_FL_ACTIVE : 0;
+
+ int ret = subdev_->setRouting(&routing);
+ if (ret)
+ return ret;
+
+ /*
+ * Check if the embedded data stream has been enabled or disabled
+ * correctly. Assume at least one route will match the embedded data
+ * source stream, as there would be something seriously wrong
+ * otherwise.
+ */
+ bool enabled = false;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (route.source != streams_.edata->source)
+ continue;
+
+ enabled = route.flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE;
+ break;
+ }
+
+ if (enabled != enable)
+ return enabled ? -EISCONN : -ENOSTR;
+
+ return 0;
+}
+
+int CameraSensorRaw::sensorInfo(IPACameraSensorInfo *info) const
+{
+ info->model = model();
+
+ /*
+ * The active area size is a static property, while the crop
+ * rectangle needs to be re-read as it depends on the sensor
+ * configuration.
+ */
+ info->activeAreaSize = { activeArea_.width, activeArea_.height };
+
+ int ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP,
+ &info->analogCrop);
+ if (ret)
+ return ret;
+
+ /*
+ * IPACameraSensorInfo::analogCrop::x and IPACameraSensorInfo::analogCrop::y
+ * are defined relatively to the active pixel area, while V4L2's
+ * TGT_CROP target is defined in respect to the full pixel array.
+ *
+ * Compensate it by subtracting the active area offset.
+ */
+ info->analogCrop.x -= activeArea_.x;
+ info->analogCrop.y -= activeArea_.y;
+
+ /* The bit depth and image size depend on the currently applied format. */
+ V4L2SubdeviceFormat format{};
+ ret = subdev_->getFormat(streams_.image.source, &format);
+ if (ret)
+ return ret;
+ info->bitsPerPixel = MediaBusFormatInfo::info(format.code).bitsPerPixel;
+ info->outputSize = format.size;
+
+ std::optional<int32_t> cfa = properties_.get(properties::draft::ColorFilterArrangement);
+ info->cfaPattern = cfa ? *cfa : properties::draft::RGB;
+
+ /*
+ * Retrieve the pixel rate, line length and minimum/maximum frame
+ * duration through V4L2 controls. Support for the V4L2_CID_PIXEL_RATE,
+ * V4L2_CID_HBLANK and V4L2_CID_VBLANK controls is mandatory.
+ */
+ ControlList ctrls = subdev_->getControls({ V4L2_CID_PIXEL_RATE,
+ V4L2_CID_HBLANK,
+ V4L2_CID_VBLANK });
+ if (ctrls.empty()) {
+ LOG(CameraSensor, Error)
+ << "Failed to retrieve camera info controls";
+ return -EINVAL;
+ }
+
+ info->pixelRate = ctrls.get(V4L2_CID_PIXEL_RATE).get<int64_t>();
+
+ const ControlInfo hblank = ctrls.infoMap()->at(V4L2_CID_HBLANK);
+ info->minLineLength = info->outputSize.width + hblank.min().get<int32_t>();
+ info->maxLineLength = info->outputSize.width + hblank.max().get<int32_t>();
+
+ const ControlInfo vblank = ctrls.infoMap()->at(V4L2_CID_VBLANK);
+ info->minFrameLength = info->outputSize.height + vblank.min().get<int32_t>();
+ info->maxFrameLength = info->outputSize.height + vblank.max().get<int32_t>();
+
+ return 0;
+}
+
+Transform CameraSensorRaw::computeTransform(Orientation *orientation) const
+{
+ /*
+ * If we cannot do any flips we cannot change the native camera mounting
+ * orientation.
+ */
+ if (!supportFlips_) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ /*
+ * Now compute the required transform to obtain 'orientation' starting
+ * from the mounting rotation.
+ *
+ * As a note:
+ * orientation / mountingOrientation_ = transform
+ * mountingOrientation_ * transform = orientation
+ */
+ Transform transform = *orientation / mountingOrientation_;
+
+ /*
+ * If transform contains any Transpose we cannot do it, so adjust
+ * 'orientation' to report the image native orientation and return Identity.
+ */
+ if (!!(transform & Transform::Transpose)) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ return transform;
+}
+
+BayerFormat::Order CameraSensorRaw::bayerOrder(Transform t) const
+{
+ if (!flipsAlterBayerOrder_)
+ return cfaPattern_;
+
+ /*
+ * Apply the transform to the native (i.e. untransformed) Bayer order,
+ * using the rest of the Bayer format supplied by the caller.
+ */
+ BayerFormat format{ cfaPattern_, 8, BayerFormat::Packing::None };
+ return format.transform(t).order;
+}
+
+const ControlInfoMap &CameraSensorRaw::controls() const
+{
+ return subdev_->controls();
+}
+
+ControlList CameraSensorRaw::getControls(const std::vector<uint32_t> &ids)
+{
+ return subdev_->getControls(ids);
+}
+
+int CameraSensorRaw::setControls(ControlList *ctrls)
+{
+ return subdev_->setControls(ctrls);
+}
+
+int CameraSensorRaw::setTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternMode_ == mode)
+ return 0;
+
+ if (testPatternModes_.empty()) {
+ LOG(CameraSensor, Error)
+ << "Camera sensor does not support test pattern modes.";
+ return -EINVAL;
+ }
+
+ return applyTestPatternMode(mode);
+}
+
+int CameraSensorRaw::applyTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternModes_.empty())
+ return 0;
+
+ auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(),
+ mode);
+ if (it == testPatternModes_.end()) {
+ LOG(CameraSensor, Error) << "Unsupported test pattern mode "
+ << mode;
+ return -EINVAL;
+ }
+
+ LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode;
+
+ int32_t index = staticProps_->testPatternModes.at(mode);
+ ControlList ctrls{ controls() };
+ ctrls.set(V4L2_CID_TEST_PATTERN, index);
+
+ int ret = setControls(&ctrls);
+ if (ret)
+ return ret;
+
+ testPatternMode_ = mode;
+
+ return 0;
+}
+
+std::string CameraSensorRaw::logPrefix() const
+{
+ return "'" + entity_->name() + "'";
+}
+
+REGISTER_CAMERA_SENSOR(CameraSensorRaw, 0)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/sensor/meson.build b/src/libcamera/sensor/meson.build
new file mode 100644
index 00000000..dce74ed6
--- /dev/null
+++ b/src/libcamera/sensor/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'camera_sensor.cpp',
+ 'camera_sensor_legacy.cpp',
+ 'camera_sensor_properties.cpp',
+ 'camera_sensor_raw.cpp',
+])
diff --git a/src/libcamera/shared_mem_object.cpp b/src/libcamera/shared_mem_object.cpp
new file mode 100644
index 00000000..d9b61d37
--- /dev/null
+++ b/src/libcamera/shared_mem_object.cpp
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ * Copyright (C) 2024 Andrei Konovalov
+ * Copyright (C) 2024 Dennis Bonke
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Helpers for shared memory allocations
+ */
+
+#include "libcamera/internal/shared_mem_object.h"
+
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
+#include <libcamera/base/memfd.h>
+
+/**
+ * \file shared_mem_object.cpp
+ * \brief Helpers for shared memory allocations
+ */
+
+namespace libcamera {
+
+/**
+ * \class SharedMem
+ * \brief Helper class to allocate and manage memory shareable between processes
+ *
+ * SharedMem manages memory suitable for sharing between processes. When an
+ * instance is constructed, it allocates a memory buffer of the requested size
+ * backed by an anonymous file, using the memfd API.
+ *
+ * The allocated memory is exposed by the mem() function. If memory allocation
+ * fails, the function returns an empty Span. This can be also checked using the
+ * bool() operator.
+ *
+ * The file descriptor for the backing file is exposed as a SharedFD by the fd()
+ * function. It can be shared with other processes across IPC boundaries, which
+ * can then map the memory with mmap().
+ *
+ * A single memfd is created for every SharedMem. If there is a need to allocate
+ * a large number of objects in shared memory, these objects should be grouped
+ * together and use the shared memory allocated by a single SharedMem object if
+ * possible. This will help to minimize the number of created memfd's.
+ */
+
+SharedMem::SharedMem() = default;
+
+/**
+ * \brief Construct a SharedMem with memory of the given \a size
+ * \param[in] name Name of the SharedMem
+ * \param[in] size Size of the shared memory to allocate and map
+ *
+ * The \a name is used for debugging purpose only. Multiple SharedMem instances
+ * can have the same name.
+ */
+SharedMem::SharedMem(const std::string &name, std::size_t size)
+{
+ UniqueFD memfd = MemFd::create(name.c_str(), size,
+ MemFd::Seal::Shrink | MemFd::Seal::Grow);
+ if (!memfd.isValid())
+ return;
+
+ fd_ = SharedFD(std::move(memfd));
+
+ void *mem = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd_.get(), 0);
+ if (mem == MAP_FAILED) {
+ fd_ = SharedFD();
+ return;
+ }
+
+ mem_ = { static_cast<uint8_t *>(mem), size };
+}
+
+/**
+ * \brief Move constructor for SharedMem
+ * \param[in] rhs The object to move
+ */
+SharedMem::SharedMem(SharedMem &&rhs)
+{
+ this->fd_ = std::move(rhs.fd_);
+ this->mem_ = rhs.mem_;
+ rhs.mem_ = {};
+}
+
+/**
+ * \brief Destroy the SharedMem instance
+ *
+ * Destroying an instance invalidates the memory mapping exposed with mem().
+ * Other mappings of the backing file, created in this or other processes with
+ * mmap(), remain valid.
+ *
+ * Similarly, other references to the backing file descriptor created by copying
+ * the SharedFD returned by fd() remain valid. The underlying memory will be
+ * freed only when all file descriptors that reference the anonymous file get
+ * closed.
+ */
+SharedMem::~SharedMem()
+{
+ if (!mem_.empty())
+ munmap(mem_.data(), mem_.size_bytes());
+}
+
+/**
+ * \brief Move assignment operator for SharedMem
+ * \param[in] rhs The object to move
+ */
+SharedMem &SharedMem::operator=(SharedMem &&rhs)
+{
+ this->fd_ = std::move(rhs.fd_);
+ this->mem_ = rhs.mem_;
+ rhs.mem_ = {};
+ return *this;
+}
+
+/**
+ * \fn const SharedFD &SharedMem::fd() const
+ * \brief Retrieve the file descriptor for the underlying shared memory
+ * \return The file descriptor, or an invalid SharedFD if allocation failed
+ */
+
+/**
+ * \fn Span<uint8_t> SharedMem::mem() const
+ * \brief Retrieve the underlying shared memory
+ * \return The memory buffer, or an empty Span if allocation failed
+ */
+
+/**
+ * \fn SharedMem::operator bool()
+ * \brief Check if the shared memory allocation succeeded
+ * \return True if allocation of the shared memory succeeded, false otherwise
+ */
+
+/**
+ * \class SharedMemObject
+ * \brief Helper class to allocate an object in shareable memory
+ * \tparam The object type
+ *
+ * The SharedMemObject class is a specialization of the SharedMem class that
+ * wraps an object of type \a T and constructs it in shareable memory. It uses
+ * the same underlying memory allocation and sharing mechanism as the SharedMem
+ * class.
+ *
+ * The wrapped object is constructed at the same time as the SharedMemObject
+ * instance, by forwarding the arguments passed to the SharedMemObject
+ * constructor. The underlying memory allocation is sized to the object \a T
+ * size. The bool() operator should be used to check the allocation was
+ * successful. The object can be accessed using the dereference operators
+ * operator*() and operator->().
+ *
+ * While no restriction on the type \a T is enforced, not all types are suitable
+ * for sharing between multiple processes. Most notably, any object type that
+ * contains pointer or reference members will likely cause issues. Even if those
+ * members refer to other members of the same object, the shared memory will be
+ * mapped at different addresses in different processes, and the pointers will
+ * not be valid.
+ *
+ * A new anonymous file is created for every SharedMemObject instance. If there
+ * is a need to share a large number of small objects, these objects should be
+ * grouped into a single larger object to limit the number of file descriptors.
+ *
+ * To share the object with other processes, see the SharedMem documentation.
+ */
+
+/**
+ * \var SharedMemObject::kSize
+ * \brief The size of the object stored in shared memory
+ */
+
+/**
+ * \fn SharedMemObject::SharedMemObject(const std::string &name, Args &&...args)
+ * \brief Construct a SharedMemObject
+ * \param[in] name Name of the SharedMemObject
+ * \param[in] args Arguments to pass to the constructor of the object T
+ *
+ * The \a name is used for debugging purpose only. Multiple SharedMem instances
+ * can have the same name.
+ */
+
+/**
+ * \fn SharedMemObject::SharedMemObject(SharedMemObject<T> &&rhs)
+ * \brief Move constructor for SharedMemObject
+ * \param[in] rhs The object to move
+ */
+
+/**
+ * \fn SharedMemObject::~SharedMemObject()
+ * \brief Destroy the SharedMemObject instance
+ *
+ * Destroying a SharedMemObject calls the wrapped T object's destructor. While
+ * the underlying memory may not be freed immediately if other mappings have
+ * been created manually (see SharedMem::~SharedMem() for more information), the
+ * stored object may be modified. Depending on the ~T() destructor, accessing
+ * the object after destruction of the SharedMemObject causes undefined
+ * behaviour. It is the responsibility of the user of this class to synchronize
+ * with other users who have access to the shared object.
+ */
+
+/**
+ * \fn SharedMemObject::operator=(SharedMemObject<T> &&rhs)
+ * \brief Move assignment operator for SharedMemObject
+ * \param[in] rhs The SharedMemObject object to take the data from
+ *
+ * Moving a SharedMemObject does not affect the stored object.
+ */
+
+/**
+ * \fn SharedMemObject::operator->()
+ * \brief Dereference the stored object
+ * \return Pointer to the stored object
+ */
+
+/**
+ * \fn const T *SharedMemObject::operator->() const
+ * \copydoc SharedMemObject::operator->
+ */
+
+/**
+ * \fn SharedMemObject::operator*()
+ * \brief Dereference the stored object
+ * \return Reference to the stored object
+ */
+
+/**
+ * \fn const T &SharedMemObject::operator*() const
+ * \copydoc SharedMemObject::operator*
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/TODO b/src/libcamera/software_isp/TODO
new file mode 100644
index 00000000..a50db668
--- /dev/null
+++ b/src/libcamera/software_isp/TODO
@@ -0,0 +1,208 @@
+2. Reconsider stats sharing
+
+>>> +void SwStatsCpu::finishFrame(void)
+>>> +{
+>>> + *sharedStats_ = stats_;
+>>
+>> Is it more efficient to copy the stats instead of operating directly on
+>> the shared memory ?
+>
+> I inherited doing things this way from Andrey. I kept this because
+> we don't really have any synchronization with the IPA reading this.
+>
+> So the idea is to only touch this when the next set of statistics
+> is ready since we don't know when the IPA is done with accessing
+> the previous set of statistics ...
+>
+> This is both something which seems mostly a theoretic problem,
+> yet also definitely something which I think we need to fix.
+>
+> Maybe use a ringbuffer of stats buffers and pass the index into
+> the ringbuffer to the emit signal ?
+
+That would match how we deal with hardware ISPs, and I think that's a
+good idea. It will help decoupling the processing side from the IPA.
+
+---
+
+3. Remove statsReady signal
+
+> class SwStatsCpu
+> {
+> /**
+> * \brief Signals that the statistics are ready
+> */
+> Signal<> statsReady;
+
+But better, I wonder if the signal could be dropped completely. The
+SwStatsCpu class does not operate asynchronously. Shouldn't whoever
+calls the finishFrame() function then handle emitting the signal ?
+
+Now, the trouble is that this would be the DebayerCpu class, whose name
+doesn't indicate as a prime candidate to handle stats. However, it
+already exposes a getStatsFD() function, so we're already calling for
+trouble :-) Either that should be moved to somewhere else, or the class
+should be renamed. Considering that the class applies colour gains in
+addition to performing the interpolation, it may be more of a naming
+issue.
+
+Removing the signal and refactoring those classes doesn't have to be
+addressed now, I think it would be part of a larger refactoring
+(possibly also considering platforms that have no ISP but can produce
+stats in hardware, such as the i.MX7), but please keep it on your radar.
+
+---
+
+5. Store ISP parameters in per-frame buffers
+
+> /**
+> * \fn void Debayer::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+> * \brief Process the bayer data into the requested format.
+> * \param[in] input The input buffer.
+> * \param[in] output The output buffer.
+> * \param[in] params The parameters to be used in debayering.
+> *
+> * \note DebayerParams is passed by value deliberately so that a copy is passed
+> * when this is run in another thread by invokeMethod().
+> */
+
+Possibly something to address later, by storing ISP parameters in
+per-frame buffers like we do for hardware ISPs.
+
+---
+
+6. Input buffer copying configuration
+
+> DebayerCpu::DebayerCpu(std::unique_ptr<SwStatsCpu> stats)
+> : stats_(std::move(stats)), gammaCorrection_(1.0)
+> {
+> enableInputMemcpy_ = true;
+
+Set this appropriately and/or make it configurable.
+
+---
+
+7. Performance measurement configuration
+
+> void DebayerCpu::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+> /* Measure before emitting signals */
+> if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure &&
+> ++measuredFrames_ > DebayerCpu::kFramesToSkip) {
+> timespec frameEndTime = {};
+> clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime);
+> frameProcessTime_ += timeDiff(frameEndTime, frameStartTime);
+> if (measuredFrames_ == DebayerCpu::kLastFrameToMeasure) {
+> const unsigned int measuredFrames = DebayerCpu::kLastFrameToMeasure -
+> DebayerCpu::kFramesToSkip;
+> LOG(Debayer, Info)
+> << "Processed " << measuredFrames
+> << " frames in " << frameProcessTime_ / 1000 << "us, "
+> << frameProcessTime_ / (1000 * measuredFrames)
+> << " us/frame";
+> }
+> }
+
+I wonder if there would be a way to control at runtime when/how to
+perform those measurements. Maybe that's a bit overkill.
+
+---
+
+8. DebayerCpu cleanups
+
+> >> class DebayerCpu : public Debayer, public Object
+> >> const SharedFD &getStatsFD() { return stats_->getStatsFD(); }
+> >
+> > This,
+>
+> Note the statistics pass-through stuff is sort of a necessary evil
+> since we want one main loop going over the data line by line and
+> doing both debayering as well as stats while the line is still
+> hot in the l2 cache. And things like the process2() and process4()
+> loops are highly CPU debayering specific so I don't think we should
+> move those out of the CpuDebayer code.
+
+Yes, that I understood from the review. "necessary evil" is indeed the
+right term :-) I expect it will take quite some design skills to balance
+the need for performances and the need for a maintainable architecture.
+
+> > plus the fact that this class handles colour gains and gamma,
+> > makes me thing we have either a naming issue, or an architecture issue.
+>
+> I agree that this does a bit more then debayering, although
+> the debayering really is the main thing it does.
+>
+> I guess the calculation of the rgb lookup tables which do the
+> color gains and gamma could be moved outside of this class,
+> that might even be beneficial for GPU based debayering assuming
+> that that is going to use rgb lookup tables too (it could
+> implement actual color gains + gamma correction in some different
+> way).
+>
+> I think this falls under the lets wait until we have a GPU
+> based SoftISP MVP/POC and then do some refactoring to see which
+> bits should go where.
+
+---
+
+8. Decouple pipeline and IPA naming
+
+> The current src/ipa/meson.build assumes the IPA name to match the
+> pipeline name. For this reason "-Dipas=simple" is used for the
+> Soft IPA module.
+
+This should be addressed.
+
+---
+
+9. Doxyfile cleanup
+
+>> diff --git a/Documentation/Doxyfile.in b/Documentation/Doxyfile.in
+>> index a86ea6c1..2be8d47b 100644
+>> --- a/Documentation/Doxyfile.in
+>> +++ b/Documentation/Doxyfile.in
+>> @@ -44,6 +44,7 @@ EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
+>> @TOP_SRCDIR@/src/libcamera/pipeline/ \
+>> @TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
+>> @TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
+>> + @TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \
+> Why is this needed ?
+>
+>> @TOP_BUILDDIR@/src/libcamera/proxy/
+>> EXCLUDE_PATTERNS = @TOP_BUILDDIR@/include/libcamera/ipa/*_serializer.h \
+>> diff --git a/include/libcamera/ipa/meson.build b/include/libcamera/ipa/meson.build
+>> index f3b4881c..3352d08f 100644
+>> --- a/include/libcamera/ipa/meson.build
+>> +++ b/include/libcamera/ipa/meson.build
+>> @@ -65,6 +65,7 @@ pipeline_ipa_mojom_mapping = {
+>> 'ipu3': 'ipu3.mojom',
+>> 'rkisp1': 'rkisp1.mojom',
+>> 'rpi/vc4': 'raspberrypi.mojom',
+>> + 'simple': 'soft.mojom',
+>> 'vimc': 'vimc.mojom',
+>> }
+>> diff --git a/include/libcamera/ipa/soft.mojom b/include/libcamera/ipa/soft.mojom
+>> new file mode 100644
+>> index 00000000..c249bd75
+>> --- /dev/null
+>> +++ b/include/libcamera/ipa/soft.mojom
+>> @@ -0,0 +1,28 @@
+>> +/* SPDX-License-Identifier: LGPL-2.1-or-later */
+>> +
+>> +/*
+>> + * \todo Document the interface and remove the related EXCLUDE_PATTERNS entry.
+> Ah that's why.
+
+Yes, because, well... all the other IPAs were doing that...
+
+> It doesn't have to be done before merging, but could you
+> address this sooner than later ?
+
+---
+
+13. Improve black level and colour gains application
+
+I think the black level should eventually be moved before debayering, and
+ideally the colour gains as well. I understand the need for optimizations to
+lower the CPU consumption, but at the same time I don't feel comfortable
+building up on top of an implementation that may work a bit more by chance than
+by correctness, as that's not very maintainable.
diff --git a/src/libcamera/software_isp/debayer.cpp b/src/libcamera/software_isp/debayer.cpp
new file mode 100644
index 00000000..f0b83261
--- /dev/null
+++ b/src/libcamera/software_isp/debayer.cpp
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, 2024 Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayer base class
+ */
+
+#include "debayer.h"
+
+namespace libcamera {
+
+/**
+ * \struct DebayerParams
+ * \brief Struct to hold the debayer parameters.
+ */
+
+/**
+ * \var DebayerParams::kRGBLookupSize
+ * \brief Size of a color lookup table
+ */
+
+/**
+ * \typedef DebayerParams::ColorLookupTable
+ * \brief Type of the lookup tables for red, green, blue values
+ */
+
+/**
+ * \var DebayerParams::red
+ * \brief Lookup table for red color, mapping input values to output values
+ */
+
+/**
+ * \var DebayerParams::green
+ * \brief Lookup table for green color, mapping input values to output values
+ */
+
+/**
+ * \var DebayerParams::blue
+ * \brief Lookup table for blue color, mapping input values to output values
+ */
+
+/**
+ * \class Debayer
+ * \brief Base debayering class
+ *
+ * Base class that provides functions for setting up the debayering process.
+ */
+
+LOG_DEFINE_CATEGORY(Debayer)
+
+Debayer::~Debayer()
+{
+}
+
+/**
+ * \fn int Debayer::configure(const StreamConfiguration &inputCfg, const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+ * \brief Configure the debayer object according to the passed in parameters
+ * \param[in] inputCfg The input configuration
+ * \param[in] outputCfgs The output configurations
+ *
+ * \return 0 on success, a negative errno on failure
+ */
+
+/**
+ * \fn Size Debayer::patternSize(PixelFormat inputFormat)
+ * \brief Get the width and height at which the bayer pattern repeats
+ * \param[in] inputFormat The input format
+ *
+ * Valid sizes are: 2x2, 4x2 or 4x4.
+ *
+ * \return Pattern size or an empty size for unsupported inputFormats
+ */
+
+/**
+ * \fn std::vector<PixelFormat> Debayer::formats(PixelFormat inputFormat)
+ * \brief Get the supported output formats
+ * \param[in] inputFormat The input format
+ *
+ * \return All supported output formats or an empty vector if there are none
+ */
+
+/**
+ * \fn std::tuple<unsigned int, unsigned int> Debayer::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+ * \brief Get the stride and the frame size
+ * \param[in] outputFormat The output format
+ * \param[in] size The output size
+ *
+ * \return A tuple of the stride and the frame size, or a tuple with 0,0 if
+ * there is no valid output config
+ */
+
+/**
+ * \fn void Debayer::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+ * \brief Process the bayer data into the requested format
+ * \param[in] frame The frame number
+ * \param[in] input The input buffer
+ * \param[in] output The output buffer
+ * \param[in] params The parameters to be used in debayering
+ *
+ * \note DebayerParams is passed by value deliberately so that a copy is passed
+ * when this is run in another thread by invokeMethod().
+ */
+
+/**
+ * \fn virtual SizeRange Debayer::sizes(PixelFormat inputFormat, const Size &inputSize)
+ * \brief Get the supported output sizes for the given input format and size
+ * \param[in] inputFormat The input format
+ * \param[in] inputSize The input size
+ *
+ * \return The valid size ranges or an empty range if there are none
+ */
+
+/**
+ * \var Signal<FrameBuffer *> Debayer::inputBufferReady
+ * \brief Signals when the input buffer is ready
+ */
+
+/**
+ * \var Signal<FrameBuffer *> Debayer::outputBufferReady
+ * \brief Signals when the output buffer is ready
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer.h b/src/libcamera/software_isp/debayer.h
new file mode 100644
index 00000000..d7ca060d
--- /dev/null
+++ b/src/libcamera/software_isp/debayer.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayering base class
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/signal.h>
+
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/software_isp/debayer_params.h"
+
+namespace libcamera {
+
+class FrameBuffer;
+
+LOG_DECLARE_CATEGORY(Debayer)
+
+class Debayer
+{
+public:
+ virtual ~Debayer() = 0;
+
+ virtual int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs) = 0;
+
+ virtual std::vector<PixelFormat> formats(PixelFormat inputFormat) = 0;
+
+ virtual std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size) = 0;
+
+ virtual void process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params) = 0;
+
+ virtual SizeRange sizes(PixelFormat inputFormat, const Size &inputSize) = 0;
+
+ Signal<FrameBuffer *> inputBufferReady;
+ Signal<FrameBuffer *> outputBufferReady;
+
+private:
+ virtual Size patternSize(PixelFormat inputFormat) = 0;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer_cpu.cpp b/src/libcamera/software_isp/debayer_cpu.cpp
new file mode 100644
index 00000000..31ab96ab
--- /dev/null
+++ b/src/libcamera/software_isp/debayer_cpu.cpp
@@ -0,0 +1,835 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based debayering class
+ */
+
+#include "debayer_cpu.h"
+
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <time.h>
+
+#include <linux/dma-buf.h>
+
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/dma_buf_allocator.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+
+namespace libcamera {
+
+/**
+ * \class DebayerCpu
+ * \brief Class for debayering on the CPU
+ *
+ * Implementation for CPU based debayering
+ */
+
+/**
+ * \brief Constructs a DebayerCpu object
+ * \param[in] stats Pointer to the stats object to use
+ */
+DebayerCpu::DebayerCpu(std::unique_ptr<SwStatsCpu> stats)
+ : stats_(std::move(stats))
+{
+ /*
+ * Reading from uncached buffers may be very slow.
+ * In such a case, it's better to copy input buffer data to normal memory.
+ * But in case of cached buffers, copying the data is unnecessary overhead.
+ * enable_input_memcpy_ makes this behavior configurable. At the moment, we
+ * always set it to true as the safer choice but this should be changed in
+ * future.
+ */
+ enableInputMemcpy_ = true;
+
+ /* Initialize color lookup tables */
+ for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++)
+ red_[i] = green_[i] = blue_[i] = i;
+}
+
+DebayerCpu::~DebayerCpu() = default;
+
+#define DECLARE_SRC_POINTERS(pixel_t) \
+ const pixel_t *prev = (const pixel_t *)src[0] + xShift_; \
+ const pixel_t *curr = (const pixel_t *)src[1] + xShift_; \
+ const pixel_t *next = (const pixel_t *)src[2] + xShift_;
+
+/*
+ * RGR
+ * GBG
+ * RGR
+ */
+#define BGGR_BGR888(p, n, div) \
+ *dst++ = blue_[curr[x] / (div)]; \
+ *dst++ = green_[(prev[x] + curr[x - p] + curr[x + n] + next[x]) / (4 * (div))]; \
+ *dst++ = red_[(prev[x - p] + prev[x + n] + next[x - p] + next[x + n]) / (4 * (div))]; \
+ if constexpr (addAlphaByte) \
+ *dst++ = 255; \
+ x++;
+
+/*
+ * GBG
+ * RGR
+ * GBG
+ */
+#define GRBG_BGR888(p, n, div) \
+ *dst++ = blue_[(prev[x] + next[x]) / (2 * (div))]; \
+ *dst++ = green_[curr[x] / (div)]; \
+ *dst++ = red_[(curr[x - p] + curr[x + n]) / (2 * (div))]; \
+ if constexpr (addAlphaByte) \
+ *dst++ = 255; \
+ x++;
+
+/*
+ * GRG
+ * BGB
+ * GRG
+ */
+#define GBRG_BGR888(p, n, div) \
+ *dst++ = blue_[(curr[x - p] + curr[x + n]) / (2 * (div))]; \
+ *dst++ = green_[curr[x] / (div)]; \
+ *dst++ = red_[(prev[x] + next[x]) / (2 * (div))]; \
+ if constexpr (addAlphaByte) \
+ *dst++ = 255; \
+ x++;
+
+/*
+ * BGB
+ * GRG
+ * BGB
+ */
+#define RGGB_BGR888(p, n, div) \
+ *dst++ = blue_[(prev[x - p] + prev[x + n] + next[x - p] + next[x + n]) / (4 * (div))]; \
+ *dst++ = green_[(prev[x] + curr[x - p] + curr[x + n] + next[x]) / (4 * (div))]; \
+ *dst++ = red_[curr[x] / (div)]; \
+ if constexpr (addAlphaByte) \
+ *dst++ = 255; \
+ x++;
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer8_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint8_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ BGGR_BGR888(1, 1, 1)
+ GBRG_BGR888(1, 1, 1)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer8_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint8_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ GRBG_BGR888(1, 1, 1)
+ RGGB_BGR888(1, 1, 1)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 4 for 10 -> 8 bpp value */
+ BGGR_BGR888(1, 1, 4)
+ GBRG_BGR888(1, 1, 4)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 4 for 10 -> 8 bpp value */
+ GRBG_BGR888(1, 1, 4)
+ RGGB_BGR888(1, 1, 4)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer12_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 16 for 12 -> 8 bpp value */
+ BGGR_BGR888(1, 1, 16)
+ GBRG_BGR888(1, 1, 16)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer12_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 16 for 12 -> 8 bpp value */
+ GRBG_BGR888(1, 1, 16)
+ RGGB_BGR888(1, 1, 16)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10P_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ /*
+ * For the first pixel getting a pixel from the previous column uses
+ * x - 2 to skip the 5th byte with least-significant bits for 4 pixels.
+ * Same for last pixel (uses x + 2) and looking at the next column.
+ */
+ for (int x = 0; x < widthInBytes;) {
+ /* First pixel */
+ BGGR_BGR888(2, 1, 1)
+ /* Second pixel BGGR -> GBRG */
+ GBRG_BGR888(1, 1, 1)
+ /* Same thing for third and fourth pixels */
+ BGGR_BGR888(1, 1, 1)
+ GBRG_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10P_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* First pixel */
+ GRBG_BGR888(2, 1, 1)
+ /* Second pixel GRBG -> RGGB */
+ RGGB_BGR888(1, 1, 1)
+ /* Same thing for third and fourth pixels */
+ GRBG_BGR888(1, 1, 1)
+ RGGB_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10P_GBGB_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* Even pixel */
+ GBRG_BGR888(2, 1, 1)
+ /* Odd pixel GBGR -> BGGR */
+ BGGR_BGR888(1, 1, 1)
+ /* Same thing for next 2 pixels */
+ GBRG_BGR888(1, 1, 1)
+ BGGR_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* Even pixel */
+ RGGB_BGR888(2, 1, 1)
+ /* Odd pixel RGGB -> GRBG */
+ GRBG_BGR888(1, 1, 1)
+ /* Same thing for next 2 pixels */
+ RGGB_BGR888(1, 1, 1)
+ GRBG_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+static bool isStandardBayerOrder(BayerFormat::Order order)
+{
+ return order == BayerFormat::BGGR || order == BayerFormat::GBRG ||
+ order == BayerFormat::GRBG || order == BayerFormat::RGGB;
+}
+
+/*
+ * Setup the Debayer object according to the passed in parameters.
+ * Return 0 on success, a negative errno value on failure
+ * (unsupported parameters).
+ */
+int DebayerCpu::getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputFormat);
+
+ if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10 || bayerFormat.bitDepth == 12) &&
+ bayerFormat.packing == BayerFormat::Packing::None &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ config.bpp = (bayerFormat.bitDepth + 7) & ~7;
+ config.patternSize.width = 2;
+ config.patternSize.height = 2;
+ config.outputFormats = std::vector<PixelFormat>({ formats::RGB888,
+ formats::XRGB8888,
+ formats::ARGB8888,
+ formats::BGR888,
+ formats::XBGR8888,
+ formats::ABGR8888 });
+ return 0;
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2 &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ config.bpp = 10;
+ config.patternSize.width = 4; /* 5 bytes per *4* pixels */
+ config.patternSize.height = 2;
+ config.outputFormats = std::vector<PixelFormat>({ formats::RGB888,
+ formats::XRGB8888,
+ formats::ARGB8888,
+ formats::BGR888,
+ formats::XBGR8888,
+ formats::ABGR8888 });
+ return 0;
+ }
+
+ LOG(Debayer, Info)
+ << "Unsupported input format " << inputFormat.toString();
+ return -EINVAL;
+}
+
+int DebayerCpu::getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config)
+{
+ if (outputFormat == formats::RGB888 || outputFormat == formats::BGR888) {
+ config.bpp = 24;
+ return 0;
+ }
+
+ if (outputFormat == formats::XRGB8888 || outputFormat == formats::ARGB8888 ||
+ outputFormat == formats::XBGR8888 || outputFormat == formats::ABGR8888) {
+ config.bpp = 32;
+ return 0;
+ }
+
+ LOG(Debayer, Info)
+ << "Unsupported output format " << outputFormat.toString();
+ return -EINVAL;
+}
+
+/*
+ * Check for standard Bayer orders and set xShift_ and swap debayer0/1, so that
+ * a single pair of BGGR debayer functions can be used for all 4 standard orders.
+ */
+int DebayerCpu::setupStandardBayerOrder(BayerFormat::Order order)
+{
+ switch (order) {
+ case BayerFormat::BGGR:
+ break;
+ case BayerFormat::GBRG:
+ xShift_ = 1; /* BGGR -> GBRG */
+ break;
+ case BayerFormat::GRBG:
+ std::swap(debayer0_, debayer1_); /* BGGR -> GRBG */
+ break;
+ case BayerFormat::RGGB:
+ xShift_ = 1; /* BGGR -> GBRG */
+ std::swap(debayer0_, debayer1_); /* GBRG -> RGGB */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int DebayerCpu::setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputFormat);
+ bool addAlphaByte = false;
+
+ xShift_ = 0;
+ swapRedBlueGains_ = false;
+
+ auto invalidFmt = []() -> int {
+ LOG(Debayer, Error) << "Unsupported input output format combination";
+ return -EINVAL;
+ };
+
+ switch (outputFormat) {
+ case formats::XRGB8888:
+ case formats::ARGB8888:
+ addAlphaByte = true;
+ [[fallthrough]];
+ case formats::RGB888:
+ break;
+ case formats::XBGR8888:
+ case formats::ABGR8888:
+ addAlphaByte = true;
+ [[fallthrough]];
+ case formats::BGR888:
+ /* Swap R and B in bayer order to generate BGR888 instead of RGB888 */
+ swapRedBlueGains_ = true;
+
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ bayerFormat.order = BayerFormat::RGGB;
+ break;
+ case BayerFormat::GBRG:
+ bayerFormat.order = BayerFormat::GRBG;
+ break;
+ case BayerFormat::GRBG:
+ bayerFormat.order = BayerFormat::GBRG;
+ break;
+ case BayerFormat::RGGB:
+ bayerFormat.order = BayerFormat::BGGR;
+ break;
+ default:
+ return invalidFmt();
+ }
+ break;
+ default:
+ return invalidFmt();
+ }
+
+ if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10 || bayerFormat.bitDepth == 12) &&
+ bayerFormat.packing == BayerFormat::Packing::None &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ switch (bayerFormat.bitDepth) {
+ case 8:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer8_BGBG_BGR888<true> : &DebayerCpu::debayer8_BGBG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer8_GRGR_BGR888<true> : &DebayerCpu::debayer8_GRGR_BGR888<false>;
+ break;
+ case 10:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10_BGBG_BGR888<true> : &DebayerCpu::debayer10_BGBG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10_GRGR_BGR888<true> : &DebayerCpu::debayer10_GRGR_BGR888<false>;
+ break;
+ case 12:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer12_BGBG_BGR888<true> : &DebayerCpu::debayer12_BGBG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer12_GRGR_BGR888<true> : &DebayerCpu::debayer12_GRGR_BGR888<false>;
+ break;
+ }
+ setupStandardBayerOrder(bayerFormat.order);
+ return 0;
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2) {
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_BGBG_BGR888<true> : &DebayerCpu::debayer10P_BGBG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_GRGR_BGR888<true> : &DebayerCpu::debayer10P_GRGR_BGR888<false>;
+ return 0;
+ case BayerFormat::GBRG:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_GBGB_BGR888<true> : &DebayerCpu::debayer10P_GBGB_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_RGRG_BGR888<true> : &DebayerCpu::debayer10P_RGRG_BGR888<false>;
+ return 0;
+ case BayerFormat::GRBG:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_GRGR_BGR888<true> : &DebayerCpu::debayer10P_GRGR_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_BGBG_BGR888<true> : &DebayerCpu::debayer10P_BGBG_BGR888<false>;
+ return 0;
+ case BayerFormat::RGGB:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_RGRG_BGR888<true> : &DebayerCpu::debayer10P_RGRG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_GBGB_BGR888<true> : &DebayerCpu::debayer10P_GBGB_BGR888<false>;
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ return invalidFmt();
+}
+
+int DebayerCpu::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+{
+ if (getInputConfig(inputCfg.pixelFormat, inputConfig_) != 0)
+ return -EINVAL;
+
+ if (stats_->configure(inputCfg) != 0)
+ return -EINVAL;
+
+ const Size &statsPatternSize = stats_->patternSize();
+ if (inputConfig_.patternSize.width != statsPatternSize.width ||
+ inputConfig_.patternSize.height != statsPatternSize.height) {
+ LOG(Debayer, Error)
+ << "mismatching stats and debayer pattern sizes for "
+ << inputCfg.pixelFormat.toString();
+ return -EINVAL;
+ }
+
+ inputConfig_.stride = inputCfg.stride;
+
+ if (outputCfgs.size() != 1) {
+ LOG(Debayer, Error)
+ << "Unsupported number of output streams: "
+ << outputCfgs.size();
+ return -EINVAL;
+ }
+
+ const StreamConfiguration &outputCfg = outputCfgs[0];
+ SizeRange outSizeRange = sizes(inputCfg.pixelFormat, inputCfg.size);
+ std::tie(outputConfig_.stride, outputConfig_.frameSize) =
+ strideAndFrameSize(outputCfg.pixelFormat, outputCfg.size);
+
+ if (!outSizeRange.contains(outputCfg.size) || outputConfig_.stride != outputCfg.stride) {
+ LOG(Debayer, Error)
+ << "Invalid output size/stride: "
+ << "\n " << outputCfg.size << " (" << outSizeRange << ")"
+ << "\n " << outputCfg.stride << " (" << outputConfig_.stride << ")";
+ return -EINVAL;
+ }
+
+ if (setDebayerFunctions(inputCfg.pixelFormat, outputCfg.pixelFormat) != 0)
+ return -EINVAL;
+
+ window_.x = ((inputCfg.size.width - outputCfg.size.width) / 2) &
+ ~(inputConfig_.patternSize.width - 1);
+ window_.y = ((inputCfg.size.height - outputCfg.size.height) / 2) &
+ ~(inputConfig_.patternSize.height - 1);
+ window_.width = outputCfg.size.width;
+ window_.height = outputCfg.size.height;
+
+ /* Don't pass x,y since process() already adjusts src before passing it */
+ stats_->setWindow(Rectangle(window_.size()));
+
+ /* pad with patternSize.Width on both left and right side */
+ lineBufferPadding_ = inputConfig_.patternSize.width * inputConfig_.bpp / 8;
+ lineBufferLength_ = window_.width * inputConfig_.bpp / 8 +
+ 2 * lineBufferPadding_;
+
+ if (enableInputMemcpy_) {
+ for (unsigned int i = 0; i <= inputConfig_.patternSize.height; i++)
+ lineBuffers_[i].resize(lineBufferLength_);
+ }
+
+ measuredFrames_ = 0;
+ frameProcessTime_ = 0;
+
+ return 0;
+}
+
+/*
+ * Get width and height at which the bayer-pattern repeats.
+ * Return pattern-size or an empty Size for an unsupported inputFormat.
+ */
+Size DebayerCpu::patternSize(PixelFormat inputFormat)
+{
+ DebayerCpu::DebayerInputConfig config;
+
+ if (getInputConfig(inputFormat, config) != 0)
+ return {};
+
+ return config.patternSize;
+}
+
+std::vector<PixelFormat> DebayerCpu::formats(PixelFormat inputFormat)
+{
+ DebayerCpu::DebayerInputConfig config;
+
+ if (getInputConfig(inputFormat, config) != 0)
+ return std::vector<PixelFormat>();
+
+ return config.outputFormats;
+}
+
+std::tuple<unsigned int, unsigned int>
+DebayerCpu::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+{
+ DebayerCpu::DebayerOutputConfig config;
+
+ if (getOutputConfig(outputFormat, config) != 0)
+ return std::make_tuple(0, 0);
+
+ /* round up to multiple of 8 for 64 bits alignment */
+ unsigned int stride = (size.width * config.bpp / 8 + 7) & ~7;
+
+ return std::make_tuple(stride, stride * size.height);
+}
+
+void DebayerCpu::setupInputMemcpy(const uint8_t *linePointers[])
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ if (!enableInputMemcpy_)
+ return;
+
+ for (unsigned int i = 0; i < patternHeight; i++) {
+ memcpy(lineBuffers_[i].data(),
+ linePointers[i + 1] - lineBufferPadding_,
+ lineBufferLength_);
+ linePointers[i + 1] = lineBuffers_[i].data() + lineBufferPadding_;
+ }
+
+ /* Point lineBufferIndex_ to first unused lineBuffer */
+ lineBufferIndex_ = patternHeight;
+}
+
+void DebayerCpu::shiftLinePointers(const uint8_t *linePointers[], const uint8_t *src)
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ for (unsigned int i = 0; i < patternHeight; i++)
+ linePointers[i] = linePointers[i + 1];
+
+ linePointers[patternHeight] = src +
+ (patternHeight / 2) * (int)inputConfig_.stride;
+}
+
+void DebayerCpu::memcpyNextLine(const uint8_t *linePointers[])
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ if (!enableInputMemcpy_)
+ return;
+
+ memcpy(lineBuffers_[lineBufferIndex_].data(),
+ linePointers[patternHeight] - lineBufferPadding_,
+ lineBufferLength_);
+ linePointers[patternHeight] = lineBuffers_[lineBufferIndex_].data() + lineBufferPadding_;
+
+ lineBufferIndex_ = (lineBufferIndex_ + 1) % (patternHeight + 1);
+}
+
+void DebayerCpu::process2(const uint8_t *src, uint8_t *dst)
+{
+ unsigned int yEnd = window_.y + window_.height;
+ /* Holds [0] previous- [1] current- [2] next-line */
+ const uint8_t *linePointers[3];
+
+ /* Adjust src to top left corner of the window */
+ src += window_.y * inputConfig_.stride + window_.x * inputConfig_.bpp / 8;
+
+ /* [x] becomes [x - 1] after initial shiftLinePointers() call */
+ if (window_.y) {
+ linePointers[1] = src - inputConfig_.stride; /* previous-line */
+ linePointers[2] = src;
+ } else {
+ /* window_.y == 0, use the next line as prev line */
+ linePointers[1] = src + inputConfig_.stride;
+ linePointers[2] = src;
+ /* Last 2 lines also need special handling */
+ yEnd -= 2;
+ }
+
+ setupInputMemcpy(linePointers);
+
+ for (unsigned int y = window_.y; y < yEnd; y += 2) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(y, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+
+ if (window_.y == 0) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(yEnd, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ /* next line may point outside of src, use prev. */
+ linePointers[2] = linePointers[0];
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+}
+
+void DebayerCpu::process4(const uint8_t *src, uint8_t *dst)
+{
+ const unsigned int yEnd = window_.y + window_.height;
+ /*
+ * This holds pointers to [0] 2-lines-up [1] 1-line-up [2] current-line
+ * [3] 1-line-down [4] 2-lines-down.
+ */
+ const uint8_t *linePointers[5];
+
+ /* Adjust src to top left corner of the window */
+ src += window_.y * inputConfig_.stride + window_.x * inputConfig_.bpp / 8;
+
+ /* [x] becomes [x - 1] after initial shiftLinePointers() call */
+ linePointers[1] = src - 2 * inputConfig_.stride;
+ linePointers[2] = src - inputConfig_.stride;
+ linePointers[3] = src;
+ linePointers[4] = src + inputConfig_.stride;
+
+ setupInputMemcpy(linePointers);
+
+ for (unsigned int y = window_.y; y < yEnd; y += 4) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(y, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine2(y, linePointers);
+ (this->*debayer2_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer3_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+}
+
+namespace {
+
+inline int64_t timeDiff(timespec &after, timespec &before)
+{
+ return (after.tv_sec - before.tv_sec) * 1000000000LL +
+ (int64_t)after.tv_nsec - (int64_t)before.tv_nsec;
+}
+
+} /* namespace */
+
+void DebayerCpu::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+{
+ timespec frameStartTime;
+
+ if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure) {
+ frameStartTime = {};
+ clock_gettime(CLOCK_MONOTONIC_RAW, &frameStartTime);
+ }
+
+ std::vector<DmaSyncer> dmaSyncers;
+ for (const FrameBuffer::Plane &plane : input->planes())
+ dmaSyncers.emplace_back(plane.fd, DmaSyncer::SyncType::Read);
+
+ for (const FrameBuffer::Plane &plane : output->planes())
+ dmaSyncers.emplace_back(plane.fd, DmaSyncer::SyncType::Write);
+
+ green_ = params.green;
+ red_ = swapRedBlueGains_ ? params.blue : params.red;
+ blue_ = swapRedBlueGains_ ? params.red : params.blue;
+
+ /* Copy metadata from the input buffer */
+ FrameMetadata &metadata = output->_d()->metadata();
+ metadata.status = input->metadata().status;
+ metadata.sequence = input->metadata().sequence;
+ metadata.timestamp = input->metadata().timestamp;
+
+ MappedFrameBuffer in(input, MappedFrameBuffer::MapFlag::Read);
+ MappedFrameBuffer out(output, MappedFrameBuffer::MapFlag::Write);
+ if (!in.isValid() || !out.isValid()) {
+ LOG(Debayer, Error) << "mmap-ing buffer(s) failed";
+ metadata.status = FrameMetadata::FrameError;
+ return;
+ }
+
+ stats_->startFrame();
+
+ if (inputConfig_.patternSize.height == 2)
+ process2(in.planes()[0].data(), out.planes()[0].data());
+ else
+ process4(in.planes()[0].data(), out.planes()[0].data());
+
+ metadata.planes()[0].bytesused = out.planes()[0].size();
+
+ dmaSyncers.clear();
+
+ /* Measure before emitting signals */
+ if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure &&
+ ++measuredFrames_ > DebayerCpu::kFramesToSkip) {
+ timespec frameEndTime = {};
+ clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime);
+ frameProcessTime_ += timeDiff(frameEndTime, frameStartTime);
+ if (measuredFrames_ == DebayerCpu::kLastFrameToMeasure) {
+ const unsigned int measuredFrames = DebayerCpu::kLastFrameToMeasure -
+ DebayerCpu::kFramesToSkip;
+ LOG(Debayer, Info)
+ << "Processed " << measuredFrames
+ << " frames in " << frameProcessTime_ / 1000 << "us, "
+ << frameProcessTime_ / (1000 * measuredFrames)
+ << " us/frame";
+ }
+ }
+
+ /*
+ * Buffer ids are currently not used, so pass zeros as its parameter.
+ *
+ * \todo Pass real bufferId once stats buffer passing is changed.
+ */
+ stats_->finishFrame(frame, 0);
+ outputBufferReady.emit(output);
+ inputBufferReady.emit(input);
+}
+
+SizeRange DebayerCpu::sizes(PixelFormat inputFormat, const Size &inputSize)
+{
+ Size patternSize = this->patternSize(inputFormat);
+ unsigned int borderHeight = patternSize.height;
+
+ if (patternSize.isNull())
+ return {};
+
+ /* No need for top/bottom border with a pattern height of 2 */
+ if (patternSize.height == 2)
+ borderHeight = 0;
+
+ /*
+ * For debayer interpolation a border is kept around the entire image
+ * and the minimum output size is pattern-height x pattern-width.
+ */
+ if (inputSize.width < (3 * patternSize.width) ||
+ inputSize.height < (2 * borderHeight + patternSize.height)) {
+ LOG(Debayer, Warning)
+ << "Input format size too small: " << inputSize.toString();
+ return {};
+ }
+
+ return SizeRange(Size(patternSize.width, patternSize.height),
+ Size((inputSize.width - 2 * patternSize.width) & ~(patternSize.width - 1),
+ (inputSize.height - 2 * borderHeight) & ~(patternSize.height - 1)),
+ patternSize.width, patternSize.height);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer_cpu.h b/src/libcamera/software_isp/debayer_cpu.h
new file mode 100644
index 00000000..2c47e7c6
--- /dev/null
+++ b/src/libcamera/software_isp/debayer_cpu.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based debayering header
+ */
+
+#pragma once
+
+#include <memory>
+#include <stdint.h>
+#include <vector>
+
+#include <libcamera/base/object.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+#include "debayer.h"
+#include "swstats_cpu.h"
+
+namespace libcamera {
+
+class DebayerCpu : public Debayer, public Object
+{
+public:
+ DebayerCpu(std::unique_ptr<SwStatsCpu> stats);
+ ~DebayerCpu();
+
+ int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs);
+ Size patternSize(PixelFormat inputFormat);
+ std::vector<PixelFormat> formats(PixelFormat input);
+ std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size);
+ void process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params);
+ SizeRange sizes(PixelFormat inputFormat, const Size &inputSize);
+
+ /**
+ * \brief Get the file descriptor for the statistics
+ *
+ * \return the file descriptor pointing to the statistics
+ */
+ const SharedFD &getStatsFD() { return stats_->getStatsFD(); }
+
+ /**
+ * \brief Get the output frame size
+ *
+ * \return The output frame size
+ */
+ unsigned int frameSize() { return outputConfig_.frameSize; }
+
+private:
+ /**
+ * \brief Called to debayer 1 line of Bayer input data to output format
+ * \param[out] dst Pointer to the start of the output line to write
+ * \param[in] src The input data
+ *
+ * Input data is an array of (patternSize_.height + 1) src
+ * pointers each pointing to a line in the Bayer source. The middle
+ * element of the array will point to the actual line being processed.
+ * Earlier element(s) will point to the previous line(s) and later
+ * element(s) to the next line(s).
+ *
+ * These functions take an array of src pointers, rather than
+ * a single src pointer + a stride for the source, so that when the src
+ * is slow uncached memory it can be copied to faster memory before
+ * debayering. Debayering a standard 2x2 Bayer pattern requires access
+ * to the previous and next src lines for interpolating the missing
+ * colors. To allow copying the src lines only once 3 temporary buffers
+ * each holding a single line are used, re-using the oldest buffer for
+ * the next line and the pointers are swizzled so that:
+ * src[0] = previous-line, src[1] = currrent-line, src[2] = next-line.
+ * This way the 3 pointers passed to the debayer functions form
+ * a sliding window over the src avoiding the need to copy each
+ * line more than once.
+ *
+ * Similarly for bayer patterns which repeat every 4 lines, 5 src
+ * pointers are passed holding: src[0] = 2-lines-up, src[1] = 1-line-up
+ * src[2] = current-line, src[3] = 1-line-down, src[4] = 2-lines-down.
+ */
+ using debayerFn = void (DebayerCpu::*)(uint8_t *dst, const uint8_t *src[]);
+
+ /* 8-bit raw bayer format */
+ template<bool addAlphaByte>
+ void debayer8_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer8_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* unpacked 10-bit raw bayer format */
+ template<bool addAlphaByte>
+ void debayer10_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer10_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* unpacked 12-bit raw bayer format */
+ template<bool addAlphaByte>
+ void debayer12_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer12_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* CSI-2 packed 10-bit raw bayer format (all the 4 orders) */
+ template<bool addAlphaByte>
+ void debayer10P_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer10P_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer10P_GBGB_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[]);
+
+ struct DebayerInputConfig {
+ Size patternSize;
+ unsigned int bpp; /* Memory used per pixel, not precision */
+ unsigned int stride;
+ std::vector<PixelFormat> outputFormats;
+ };
+
+ struct DebayerOutputConfig {
+ unsigned int bpp; /* Memory used per pixel, not precision */
+ unsigned int stride;
+ unsigned int frameSize;
+ };
+
+ int getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config);
+ int getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config);
+ int setupStandardBayerOrder(BayerFormat::Order order);
+ int setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat);
+ void setupInputMemcpy(const uint8_t *linePointers[]);
+ void shiftLinePointers(const uint8_t *linePointers[], const uint8_t *src);
+ void memcpyNextLine(const uint8_t *linePointers[]);
+ void process2(const uint8_t *src, uint8_t *dst);
+ void process4(const uint8_t *src, uint8_t *dst);
+
+ /* Max. supported Bayer pattern height is 4, debayering this requires 5 lines */
+ static constexpr unsigned int kMaxLineBuffers = 5;
+
+ DebayerParams::ColorLookupTable red_;
+ DebayerParams::ColorLookupTable green_;
+ DebayerParams::ColorLookupTable blue_;
+ debayerFn debayer0_;
+ debayerFn debayer1_;
+ debayerFn debayer2_;
+ debayerFn debayer3_;
+ Rectangle window_;
+ DebayerInputConfig inputConfig_;
+ DebayerOutputConfig outputConfig_;
+ std::unique_ptr<SwStatsCpu> stats_;
+ std::vector<uint8_t> lineBuffers_[kMaxLineBuffers];
+ unsigned int lineBufferLength_;
+ unsigned int lineBufferPadding_;
+ unsigned int lineBufferIndex_;
+ unsigned int xShift_; /* Offset of 0/1 applied to window_.x */
+ bool enableInputMemcpy_;
+ bool swapRedBlueGains_;
+ unsigned int measuredFrames_;
+ int64_t frameProcessTime_;
+ /* Skip 30 frames for things to stabilize then measure 30 frames */
+ static constexpr unsigned int kFramesToSkip = 30;
+ static constexpr unsigned int kLastFrameToMeasure = 60;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/meson.build b/src/libcamera/software_isp/meson.build
new file mode 100644
index 00000000..aac7eda7
--- /dev/null
+++ b/src/libcamera/software_isp/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: CC0-1.0
+
+softisp_enabled = pipelines.contains('simple')
+summary({'SoftISP support' : softisp_enabled}, section : 'Configuration')
+
+if not softisp_enabled
+ subdir_done()
+endif
+
+libcamera_internal_sources += files([
+ 'debayer.cpp',
+ 'debayer_cpu.cpp',
+ 'software_isp.cpp',
+ 'swstats_cpu.cpp',
+])
diff --git a/src/libcamera/software_isp/software_isp.cpp b/src/libcamera/software_isp/software_isp.cpp
new file mode 100644
index 00000000..2bea64d9
--- /dev/null
+++ b/src/libcamera/software_isp/software_isp.cpp
@@ -0,0 +1,370 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * Simple software ISP implementation
+ */
+
+#include "libcamera/internal/software_isp/software_isp.h"
+
+#include <cmath>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <libcamera/controls.h>
+#include <libcamera/formats.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/software_isp/debayer_params.h"
+
+#include "debayer_cpu.h"
+
+/**
+ * \file software_isp.cpp
+ * \brief Simple software ISP implementation
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(SoftwareIsp)
+
+/**
+ * \class SoftwareIsp
+ * \brief Class for the Software ISP
+ */
+
+/**
+ * \var SoftwareIsp::inputBufferReady
+ * \brief A signal emitted when the input frame buffer completes
+ */
+
+/**
+ * \var SoftwareIsp::outputBufferReady
+ * \brief A signal emitted when the output frame buffer completes
+ */
+
+/**
+ * \var SoftwareIsp::ispStatsReady
+ * \brief A signal emitted when the statistics for IPA are ready
+ */
+
+/**
+ * \var SoftwareIsp::setSensorControls
+ * \brief A signal emitted when the values to write to the sensor controls are
+ * ready
+ */
+
+/**
+ * \brief Constructs SoftwareIsp object
+ * \param[in] pipe The pipeline handler in use
+ * \param[in] sensor Pointer to the CameraSensor instance owned by the pipeline
+ * \param[out] ipaControls The IPA controls to update
+ * handler
+ */
+SoftwareIsp::SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor,
+ ControlInfoMap *ipaControls)
+ : dmaHeap_(DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap |
+ DmaBufAllocator::DmaBufAllocatorFlag::SystemHeap |
+ DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf)
+{
+ /*
+ * debayerParams_ must be initialized because the initial value is used for
+ * the first two frames, i.e. until stats processing starts providing its
+ * own parameters.
+ *
+ * \todo This should be handled in the same place as the related
+ * operations, in the IPA module.
+ */
+ std::array<uint8_t, 256> gammaTable;
+ for (unsigned int i = 0; i < 256; i++)
+ gammaTable[i] = UINT8_MAX * std::pow(i / 256.0, 0.5);
+ for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++) {
+ debayerParams_.red[i] = gammaTable[i];
+ debayerParams_.green[i] = gammaTable[i];
+ debayerParams_.blue[i] = gammaTable[i];
+ }
+
+ if (!dmaHeap_.isValid()) {
+ LOG(SoftwareIsp, Error) << "Failed to create DmaBufAllocator object";
+ return;
+ }
+
+ sharedParams_ = SharedMemObject<DebayerParams>("softIsp_params");
+ if (!sharedParams_) {
+ LOG(SoftwareIsp, Error) << "Failed to create shared memory for parameters";
+ return;
+ }
+
+ auto stats = std::make_unique<SwStatsCpu>();
+ if (!stats->isValid()) {
+ LOG(SoftwareIsp, Error) << "Failed to create SwStatsCpu object";
+ return;
+ }
+ stats->statsReady.connect(this, &SoftwareIsp::statsReady);
+
+ debayer_ = std::make_unique<DebayerCpu>(std::move(stats));
+ debayer_->inputBufferReady.connect(this, &SoftwareIsp::inputReady);
+ debayer_->outputBufferReady.connect(this, &SoftwareIsp::outputReady);
+
+ ipa_ = IPAManager::createIPA<ipa::soft::IPAProxySoft>(pipe, 0, 0);
+ if (!ipa_) {
+ LOG(SoftwareIsp, Error)
+ << "Creating IPA for software ISP failed";
+ debayer_.reset();
+ return;
+ }
+
+ /*
+ * The API tuning file is made from the sensor name. If the tuning file
+ * isn't found, fall back to the 'uncalibrated' file.
+ */
+ std::string ipaTuningFile =
+ ipa_->configurationFile(sensor->model() + ".yaml", "uncalibrated.yaml");
+
+ int ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() },
+ debayer_->getStatsFD(),
+ sharedParams_.fd(),
+ sensor->controls(),
+ ipaControls);
+ if (ret) {
+ LOG(SoftwareIsp, Error) << "IPA init failed";
+ debayer_.reset();
+ return;
+ }
+
+ ipa_->setIspParams.connect(this, &SoftwareIsp::saveIspParams);
+ ipa_->setSensorControls.connect(this, &SoftwareIsp::setSensorCtrls);
+
+ debayer_->moveToThread(&ispWorkerThread_);
+}
+
+SoftwareIsp::~SoftwareIsp()
+{
+ /* make sure to destroy the DebayerCpu before the ispWorkerThread_ is gone */
+ debayer_.reset();
+}
+
+/**
+ * \fn int SoftwareIsp::loadConfiguration([[maybe_unused]] const std::string &filename)
+ * \brief Load a configuration from a file
+ * \param[in] filename The file to load the configuration data from
+ *
+ * Currently is a stub doing nothing and always returning "success".
+ *
+ * \return 0 on success
+ */
+
+/**
+ * \brief Process the statistics gathered
+ * \param[in] frame The frame number
+ * \param[in] bufferId ID of the statistics buffer
+ * \param[in] sensorControls The sensor controls
+ *
+ * Requests the IPA to calculate new parameters for ISP and new control
+ * values for the sensor.
+ */
+void SoftwareIsp::processStats(const uint32_t frame, const uint32_t bufferId,
+ const ControlList &sensorControls)
+{
+ ASSERT(ipa_);
+ ipa_->processStats(frame, bufferId, sensorControls);
+}
+
+/**
+ * \brief Check the validity of Software Isp object
+ * \return True if Software Isp is valid, false otherwise
+ */
+bool SoftwareIsp::isValid() const
+{
+ return !!debayer_;
+}
+
+/**
+ * \brief Get the output formats supported for the given input format
+ * \param[in] inputFormat The input format
+ * \return All the supported output formats or an empty vector if there are none
+ */
+std::vector<PixelFormat> SoftwareIsp::formats(PixelFormat inputFormat)
+{
+ ASSERT(debayer_);
+
+ return debayer_->formats(inputFormat);
+}
+
+/**
+ * \brief Get the supported output sizes for the given input format and size
+ * \param[in] inputFormat The input format
+ * \param[in] inputSize The input frame size
+ * \return The valid size range or an empty range if there are none
+ */
+SizeRange SoftwareIsp::sizes(PixelFormat inputFormat, const Size &inputSize)
+{
+ ASSERT(debayer_);
+
+ return debayer_->sizes(inputFormat, inputSize);
+}
+
+/**
+ * Get the output stride and the frame size in bytes for the given output format and size
+ * \param[in] outputFormat The output format
+ * \param[in] size The output size (width and height in pixels)
+ * \return A tuple of the stride and the frame size in bytes, or a tuple of 0,0
+ * if there is no valid output config
+ */
+std::tuple<unsigned int, unsigned int>
+SoftwareIsp::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+{
+ ASSERT(debayer_);
+
+ return debayer_->strideAndFrameSize(outputFormat, size);
+}
+
+/**
+ * \brief Configure the SoftwareIsp object according to the passed in parameters
+ * \param[in] inputCfg The input configuration
+ * \param[in] outputCfgs The output configurations
+ * \param[in] configInfo The IPA configuration data, received from the pipeline
+ * handler
+ * \return 0 on success, a negative errno on failure
+ */
+int SoftwareIsp::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs,
+ const ipa::soft::IPAConfigInfo &configInfo)
+{
+ ASSERT(ipa_ && debayer_);
+
+ int ret = ipa_->configure(configInfo);
+ if (ret < 0)
+ return ret;
+
+ return debayer_->configure(inputCfg, outputCfgs);
+}
+
+/**
+ * \brief Export the buffers from the Software ISP
+ * \param[in] stream Output stream exporting the buffers
+ * \param[in] count Number of buffers to allocate
+ * \param[out] buffers Vector to store the allocated buffers
+ * \return The number of allocated buffers on success or a negative error code
+ * otherwise
+ */
+int SoftwareIsp::exportBuffers(const Stream *stream, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ ASSERT(debayer_ != nullptr);
+
+ /* single output for now */
+ if (stream == nullptr)
+ return -EINVAL;
+
+ return dmaHeap_.exportBuffers(count, { debayer_->frameSize() }, buffers);
+}
+
+/**
+ * \brief Queue a request and process the control list from the application
+ * \param[in] frame The number of the frame which will be processed next
+ * \param[in] controls The controls for the \a frame
+ */
+void SoftwareIsp::queueRequest(const uint32_t frame, const ControlList &controls)
+{
+ ipa_->queueRequest(frame, controls);
+}
+
+/**
+ * \brief Queue buffers to Software ISP
+ * \param[in] frame The frame number
+ * \param[in] input The input framebuffer
+ * \param[in] outputs The container holding the output stream pointers and
+ * their respective frame buffer outputs
+ * \return 0 on success, a negative errno on failure
+ */
+int SoftwareIsp::queueBuffers(uint32_t frame, FrameBuffer *input,
+ const std::map<const Stream *, FrameBuffer *> &outputs)
+{
+ /*
+ * Validate the outputs as a sanity check: at least one output is
+ * required, all outputs must reference a valid stream.
+ */
+ if (outputs.empty())
+ return -EINVAL;
+
+ for (auto [stream, buffer] : outputs) {
+ if (!buffer)
+ return -EINVAL;
+ if (outputs.size() != 1) /* only single stream atm */
+ return -EINVAL;
+ }
+
+ for (auto iter = outputs.begin(); iter != outputs.end(); iter++)
+ process(frame, input, iter->second);
+
+ return 0;
+}
+
+/**
+ * \brief Starts the Software ISP streaming operation
+ * \return 0 on success, any other value indicates an error
+ */
+int SoftwareIsp::start()
+{
+ int ret = ipa_->start();
+ if (ret)
+ return ret;
+
+ ispWorkerThread_.start();
+ return 0;
+}
+
+/**
+ * \brief Stops the Software ISP streaming operation
+ */
+void SoftwareIsp::stop()
+{
+ ispWorkerThread_.exit();
+ ispWorkerThread_.wait();
+
+ ipa_->stop();
+}
+
+/**
+ * \brief Passes the input framebuffer to the ISP worker to process
+ * \param[in] frame The frame number
+ * \param[in] input The input framebuffer
+ * \param[out] output The framebuffer to write the processed frame to
+ */
+void SoftwareIsp::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output)
+{
+ ipa_->computeParams(frame);
+ debayer_->invokeMethod(&DebayerCpu::process,
+ ConnectionTypeQueued, frame, input, output, debayerParams_);
+}
+
+void SoftwareIsp::saveIspParams()
+{
+ debayerParams_ = *sharedParams_;
+}
+
+void SoftwareIsp::setSensorCtrls(const ControlList &sensorControls)
+{
+ setSensorControls.emit(sensorControls);
+}
+
+void SoftwareIsp::statsReady(uint32_t frame, uint32_t bufferId)
+{
+ ispStatsReady.emit(frame, bufferId);
+}
+
+void SoftwareIsp::inputReady(FrameBuffer *input)
+{
+ inputBufferReady.emit(input);
+}
+
+void SoftwareIsp::outputReady(FrameBuffer *output)
+{
+ outputBufferReady.emit(output);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/swstats_cpu.cpp b/src/libcamera/software_isp/swstats_cpu.cpp
new file mode 100644
index 00000000..c520c806
--- /dev/null
+++ b/src/libcamera/software_isp/swstats_cpu.cpp
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based software statistics implementation
+ */
+
+#include "swstats_cpu.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+namespace libcamera {
+
+/**
+ * \class SwStatsCpu
+ * \brief Class for gathering statistics on the CPU
+ *
+ * CPU based software ISP statistics implementation.
+ *
+ * This class offers a configure function + functions to gather statistics on a
+ * line by line basis. This allows CPU based software debayering to interleave
+ * debayering and statistics gathering on a line by line basis while the input
+ * data is still hot in the cache.
+ *
+ * It is also possible to specify a window over which to gather statistics
+ * instead of processing the whole frame.
+ */
+
+/**
+ * \fn bool SwStatsCpu::isValid() const
+ * \brief Gets whether the statistics object is valid
+ *
+ * \return True if it's valid, false otherwise
+ */
+
+/**
+ * \fn const SharedFD &SwStatsCpu::getStatsFD()
+ * \brief Get the file descriptor for the statistics
+ *
+ * \return The file descriptor
+ */
+
+/**
+ * \fn const Size &SwStatsCpu::patternSize()
+ * \brief Get the pattern size
+ *
+ * For some input-formats, e.g. Bayer data, processing is done multiple lines
+ * and/or columns at a time. Get width and height at which the (bayer) pattern
+ * repeats. Window values are rounded down to a multiple of this and the height
+ * also indicates if processLine2() should be called or not.
+ * This may only be called after a successful configure() call.
+ *
+ * \return The pattern size
+ */
+
+/**
+ * \fn void SwStatsCpu::processLine0(unsigned int y, const uint8_t *src[])
+ * \brief Process line 0
+ * \param[in] y The y coordinate.
+ * \param[in] src The input data.
+ *
+ * This function processes line 0 for input formats with
+ * patternSize height == 1.
+ * It'll process line 0 and 1 for input formats with patternSize height >= 2.
+ * This function may only be called after a successful setWindow() call.
+ */
+
+/**
+ * \fn void SwStatsCpu::processLine2(unsigned int y, const uint8_t *src[])
+ * \brief Process line 2 and 3
+ * \param[in] y The y coordinate.
+ * \param[in] src The input data.
+ *
+ * This function processes line 2 and 3 for input formats with
+ * patternSize height == 4.
+ * This function may only be called after a successful setWindow() call.
+ */
+
+/**
+ * \var Signal<> SwStatsCpu::statsReady
+ * \brief Signals that the statistics are ready
+ */
+
+/**
+ * \typedef SwStatsCpu::statsProcessFn
+ * \brief Called when there is data to get statistics from
+ * \param[in] src The input data
+ *
+ * These functions take an array of (patternSize_.height + 1) src
+ * pointers each pointing to a line in the source image. The middle
+ * element of the array will point to the actual line being processed.
+ * Earlier element(s) will point to the previous line(s) and later
+ * element(s) to the next line(s).
+ *
+ * See the documentation of DebayerCpu::debayerFn for more details.
+ */
+
+/**
+ * \var unsigned int SwStatsCpu::ySkipMask_
+ * \brief Skip lines where this bitmask is set in y
+ */
+
+/**
+ * \var Rectangle SwStatsCpu::window_
+ * \brief Statistics window, set by setWindow(), used every line
+ */
+
+/**
+ * \var Size SwStatsCpu::patternSize_
+ * \brief The size of the bayer pattern
+ *
+ * Valid sizes are: 2x2, 4x2 or 4x4.
+ */
+
+/**
+ * \var unsigned int SwStatsCpu::xShift_
+ * \brief The offset of x, applied to window_.x for bayer variants
+ *
+ * This can either be 0 or 1.
+ */
+
+LOG_DEFINE_CATEGORY(SwStatsCpu)
+
+SwStatsCpu::SwStatsCpu()
+ : sharedStats_("softIsp_stats")
+{
+ if (!sharedStats_)
+ LOG(SwStatsCpu, Error)
+ << "Failed to create shared memory for statistics";
+}
+
+static constexpr unsigned int kRedYMul = 77; /* 0.299 * 256 */
+static constexpr unsigned int kGreenYMul = 150; /* 0.587 * 256 */
+static constexpr unsigned int kBlueYMul = 29; /* 0.114 * 256 */
+
+#define SWSTATS_START_LINE_STATS(pixel_t) \
+ pixel_t r, g, g2, b; \
+ uint64_t yVal; \
+ \
+ uint64_t sumR = 0; \
+ uint64_t sumG = 0; \
+ uint64_t sumB = 0;
+
+#define SWSTATS_ACCUMULATE_LINE_STATS(div) \
+ sumR += r; \
+ sumG += g; \
+ sumB += b; \
+ \
+ yVal = r * kRedYMul; \
+ yVal += g * kGreenYMul; \
+ yVal += b * kBlueYMul; \
+ stats_.yHistogram[yVal * SwIspStats::kYHistogramSize / (256 * 256 * (div))]++;
+
+#define SWSTATS_FINISH_LINE_STATS() \
+ stats_.sumR_ += sumR; \
+ stats_.sumG_ += sumG; \
+ stats_.sumB_ += sumB;
+
+void SwStatsCpu::statsBGGR8Line0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x;
+ const uint8_t *src1 = src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR10Line0(const uint8_t *src[])
+{
+ const uint16_t *src0 = (const uint16_t *)src[1] + window_.x;
+ const uint16_t *src1 = (const uint16_t *)src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint16_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ /* divide Y by 4 for 10 -> 8 bpp value */
+ SWSTATS_ACCUMULATE_LINE_STATS(4)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR12Line0(const uint8_t *src[])
+{
+ const uint16_t *src0 = (const uint16_t *)src[1] + window_.x;
+ const uint16_t *src1 = (const uint16_t *)src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint16_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ /* divide Y by 16 for 12 -> 8 bpp value */
+ SWSTATS_ACCUMULATE_LINE_STATS(16)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR10PLine0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x * 5 / 4;
+ const uint8_t *src1 = src[2] + window_.x * 5 / 4;
+ const int widthInBytes = window_.width * 5 / 4;
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ /* x += 5 sample every other 2x2 block */
+ for (int x = 0; x < widthInBytes; x += 5) {
+ /* BGGR */
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+ g = (g + g2) / 2;
+ /* Data is already 8 bits, divide by 1 */
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsGBRG10PLine0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x * 5 / 4;
+ const uint8_t *src1 = src[2] + window_.x * 5 / 4;
+ const int widthInBytes = window_.width * 5 / 4;
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ /* x += 5 sample every other 2x2 block */
+ for (int x = 0; x < widthInBytes; x += 5) {
+ /* GBRG */
+ g = src0[x];
+ b = src0[x + 1];
+ r = src1[x];
+ g2 = src1[x + 1];
+ g = (g + g2) / 2;
+ /* Data is already 8 bits, divide by 1 */
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+/**
+ * \brief Reset state to start statistics gathering for a new frame
+ *
+ * This may only be called after a successful setWindow() call.
+ */
+void SwStatsCpu::startFrame(void)
+{
+ if (window_.width == 0)
+ LOG(SwStatsCpu, Error) << "Calling startFrame() without setWindow()";
+
+ stats_.sumR_ = 0;
+ stats_.sumB_ = 0;
+ stats_.sumG_ = 0;
+ stats_.yHistogram.fill(0);
+}
+
+/**
+ * \brief Finish statistics calculation for the current frame
+ * \param[in] frame The frame number
+ * \param[in] bufferId ID of the statistics buffer
+ *
+ * This may only be called after a successful setWindow() call.
+ */
+void SwStatsCpu::finishFrame(uint32_t frame, uint32_t bufferId)
+{
+ *sharedStats_ = stats_;
+ statsReady.emit(frame, bufferId);
+}
+
+/**
+ * \brief Setup SwStatsCpu object for standard Bayer orders
+ * \param[in] order The Bayer order
+ *
+ * Check if order is a standard Bayer order and setup xShift_ and swapLines_
+ * so that a single BGGR stats function can be used for all 4 standard orders.
+ */
+int SwStatsCpu::setupStandardBayerOrder(BayerFormat::Order order)
+{
+ switch (order) {
+ case BayerFormat::BGGR:
+ xShift_ = 0;
+ swapLines_ = false;
+ break;
+ case BayerFormat::GBRG:
+ xShift_ = 1; /* BGGR -> GBRG */
+ swapLines_ = false;
+ break;
+ case BayerFormat::GRBG:
+ xShift_ = 0;
+ swapLines_ = true; /* BGGR -> GRBG */
+ break;
+ case BayerFormat::RGGB:
+ xShift_ = 1; /* BGGR -> GBRG */
+ swapLines_ = true; /* GBRG -> RGGB */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ patternSize_.height = 2;
+ patternSize_.width = 2;
+ ySkipMask_ = 0x02; /* Skip every 3th and 4th line */
+ return 0;
+}
+
+/**
+ * \brief Configure the statistics object for the passed in input format
+ * \param[in] inputCfg The input format
+ *
+ * \return 0 on success, a negative errno value on failure
+ */
+int SwStatsCpu::configure(const StreamConfiguration &inputCfg)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputCfg.pixelFormat);
+
+ if (bayerFormat.packing == BayerFormat::Packing::None &&
+ setupStandardBayerOrder(bayerFormat.order) == 0) {
+ switch (bayerFormat.bitDepth) {
+ case 8:
+ stats0_ = &SwStatsCpu::statsBGGR8Line0;
+ return 0;
+ case 10:
+ stats0_ = &SwStatsCpu::statsBGGR10Line0;
+ return 0;
+ case 12:
+ stats0_ = &SwStatsCpu::statsBGGR12Line0;
+ return 0;
+ }
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2) {
+ patternSize_.height = 2;
+ patternSize_.width = 4; /* 5 bytes per *4* pixels */
+ /* Skip every 3th and 4th line, sample every other 2x2 block */
+ ySkipMask_ = 0x02;
+ xShift_ = 0;
+
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ case BayerFormat::GRBG:
+ stats0_ = &SwStatsCpu::statsBGGR10PLine0;
+ swapLines_ = bayerFormat.order == BayerFormat::GRBG;
+ return 0;
+ case BayerFormat::GBRG:
+ case BayerFormat::RGGB:
+ stats0_ = &SwStatsCpu::statsGBRG10PLine0;
+ swapLines_ = bayerFormat.order == BayerFormat::RGGB;
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ LOG(SwStatsCpu, Info)
+ << "Unsupported input format " << inputCfg.pixelFormat.toString();
+ return -EINVAL;
+}
+
+/**
+ * \brief Specify window coordinates over which to gather statistics
+ * \param[in] window The window object.
+ */
+void SwStatsCpu::setWindow(const Rectangle &window)
+{
+ window_ = window;
+
+ window_.x &= ~(patternSize_.width - 1);
+ window_.x += xShift_;
+ window_.y &= ~(patternSize_.height - 1);
+
+ /* width_ - xShift_ to make sure the window fits */
+ window_.width -= xShift_;
+ window_.width &= ~(patternSize_.width - 1);
+ window_.height &= ~(patternSize_.height - 1);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/swstats_cpu.h b/src/libcamera/software_isp/swstats_cpu.h
new file mode 100644
index 00000000..26a2f462
--- /dev/null
+++ b/src/libcamera/software_isp/swstats_cpu.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based software statistics implementation
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include <libcamera/base/signal.h>
+
+#include <libcamera/geometry.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/shared_mem_object.h"
+#include "libcamera/internal/software_isp/swisp_stats.h"
+
+namespace libcamera {
+
+class PixelFormat;
+struct StreamConfiguration;
+
+class SwStatsCpu
+{
+public:
+ SwStatsCpu();
+ ~SwStatsCpu() = default;
+
+ bool isValid() const { return sharedStats_.fd().isValid(); }
+
+ const SharedFD &getStatsFD() { return sharedStats_.fd(); }
+
+ const Size &patternSize() { return patternSize_; }
+
+ int configure(const StreamConfiguration &inputCfg);
+ void setWindow(const Rectangle &window);
+ void startFrame();
+ void finishFrame(uint32_t frame, uint32_t bufferId);
+
+ void processLine0(unsigned int y, const uint8_t *src[])
+ {
+ if ((y & ySkipMask_) || y < static_cast<unsigned int>(window_.y) ||
+ y >= (window_.y + window_.height))
+ return;
+
+ (this->*stats0_)(src);
+ }
+
+ void processLine2(unsigned int y, const uint8_t *src[])
+ {
+ if ((y & ySkipMask_) || y < static_cast<unsigned int>(window_.y) ||
+ y >= (window_.y + window_.height))
+ return;
+
+ (this->*stats2_)(src);
+ }
+
+ Signal<uint32_t, uint32_t> statsReady;
+
+private:
+ using statsProcessFn = void (SwStatsCpu::*)(const uint8_t *src[]);
+
+ int setupStandardBayerOrder(BayerFormat::Order order);
+ /* Bayer 8 bpp unpacked */
+ void statsBGGR8Line0(const uint8_t *src[]);
+ /* Bayer 10 bpp unpacked */
+ void statsBGGR10Line0(const uint8_t *src[]);
+ /* Bayer 12 bpp unpacked */
+ void statsBGGR12Line0(const uint8_t *src[]);
+ /* Bayer 10 bpp packed */
+ void statsBGGR10PLine0(const uint8_t *src[]);
+ void statsGBRG10PLine0(const uint8_t *src[]);
+
+ /* Variables set by configure(), used every line */
+ statsProcessFn stats0_;
+ statsProcessFn stats2_;
+ bool swapLines_;
+
+ unsigned int ySkipMask_;
+
+ Rectangle window_;
+
+ Size patternSize_;
+
+ unsigned int xShift_;
+
+ SharedMemObject<SwIspStats> sharedStats_;
+ SwIspStats stats_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/source_paths.cpp b/src/libcamera/source_paths.cpp
new file mode 100644
index 00000000..1af5386a
--- /dev/null
+++ b/src/libcamera/source_paths.cpp
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Identify libcamera source and build paths
+ */
+
+#include "libcamera/internal/source_paths.h"
+
+#include <dlfcn.h>
+#include <elf.h>
+#include <link.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <libcamera/base/utils.h>
+
+/**
+ * \file source_paths.h
+ * \brief Identify the build and source path of a not-yet-installed library
+ */
+
+/* musl doesn't declare _DYNAMIC in link.h, declare it manually. */
+extern ElfW(Dyn) _DYNAMIC[];
+
+namespace libcamera {
+
+namespace {
+
+/**
+ * \brief Check if libcamera is installed or not
+ *
+ * Utilise the build_rpath dynamic tag which is stripped out by meson at
+ * install time to determine at runtime if the library currently executing
+ * has been installed or not.
+ *
+ * \return True if libcamera is installed, false otherwise
+ */
+bool isLibcameraInstalled()
+{
+ /*
+ * DT_RUNPATH (DT_RPATH when the linker uses old dtags) is removed on
+ * install.
+ */
+ for (const ElfW(Dyn) *dyn = _DYNAMIC; dyn->d_tag != DT_NULL; ++dyn) {
+ if (dyn->d_tag == DT_RUNPATH || dyn->d_tag == DT_RPATH)
+ return false;
+ }
+
+ return true;
+}
+
+} /* namespace */
+
+namespace utils {
+
+/**
+ * \brief Retrieve the path to the build directory
+ *
+ * During development, it is useful to run libcamera binaries directly from the
+ * build directory without installing them. This function helps components that
+ * need to locate resources in the build tree, such as IPA modules or IPA proxy
+ * workers, by providing them with the path to the root of the build directory.
+ * Callers can then use it to complement or override searches in system-wide
+ * directories.
+ *
+ * If libcamera has been installed, the build directory path is not available
+ * and this function returns an empty string.
+ *
+ * \return The path to the build directory if running from a build, or an empty
+ * string otherwise
+ */
+std::string libcameraBuildPath()
+{
+ if (isLibcameraInstalled())
+ return std::string();
+
+ Dl_info info;
+
+ /* Look up our own symbol. */
+ int ret = dladdr(reinterpret_cast<void *>(libcameraBuildPath), &info);
+ if (ret == 0)
+ return std::string();
+
+ std::string path = dirname(info.dli_fname) + "/../../";
+
+ char *real = realpath(path.c_str(), nullptr);
+ if (!real)
+ return std::string();
+
+ path = real;
+ free(real);
+
+ return path + "/";
+}
+
+/**
+ * \brief Retrieve the path to the source directory
+ *
+ * During development, it is useful to run libcamera binaries directly from the
+ * build directory without installing them. This function helps components that
+ * need to locate resources in the source tree, such as IPA configuration
+ * files, by providing them with the path to the root of the source directory.
+ * Callers can then use it to complement or override searches in system-wide
+ * directories.
+ *
+ * If libcamera has been installed, the source directory path is not available
+ * and this function returns an empty string.
+ *
+ * \return The path to the source directory if running from a build directory,
+ * or an empty string otherwise
+ */
+std::string libcameraSourcePath()
+{
+ std::string path = libcameraBuildPath();
+ if (path.empty())
+ return std::string();
+
+ path += "source";
+
+ char *real = realpath(path.c_str(), nullptr);
+ if (!real)
+ return std::string();
+
+ path = real;
+ free(real);
+
+ struct stat statbuf;
+ int ret = stat(path.c_str(), &statbuf);
+ if (ret < 0 || (statbuf.st_mode & S_IFMT) != S_IFDIR)
+ return std::string();
+
+ return path + "/";
+}
+
+} /* namespace utils */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/stream.cpp b/src/libcamera/stream.cpp
index ef16aaa1..978d7275 100644
--- a/src/libcamera/stream.cpp
+++ b/src/libcamera/stream.cpp
@@ -2,21 +2,22 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * stream.cpp - Video stream for a Camera
+ * Video stream for a Camera
*/
#include <libcamera/stream.h>
#include <algorithm>
#include <array>
-#include <iomanip>
#include <limits.h>
-#include <sstream>
+#include <ostream>
+#include <string>
+#include <vector>
-#include <libcamera/request.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
-#include "log.h"
-#include "utils.h"
+#include <libcamera/request.h>
/**
* \file stream.h
@@ -275,11 +276,12 @@ SizeRange StreamFormats::range(const PixelFormat &pixelformat) const
*/
/**
- * \todo This method is deprecated and should be removed once all pipeline
- * handlers provied StreamFormats.
+ * \todo This function is deprecated and should be removed once all pipeline
+ * handlers provide StreamFormats.
*/
StreamConfiguration::StreamConfiguration()
- : pixelFormat(0), stream_(nullptr)
+ : pixelFormat(0), stride(0), frameSize(0), bufferCount(0),
+ stream_(nullptr)
{
}
@@ -287,7 +289,8 @@ StreamConfiguration::StreamConfiguration()
* \brief Construct a configuration with stream formats
*/
StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
- : pixelFormat(0), stream_(nullptr), formats_(formats)
+ : pixelFormat(0), stride(0), frameSize(0), bufferCount(0),
+ stream_(nullptr), formats_(formats)
{
}
@@ -302,16 +305,57 @@ StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
*/
/**
+ * \var StreamConfiguration::stride
+ * \brief Image stride for the stream, in bytes
+ *
+ * The stride value reports the number of bytes between the beginning of
+ * successive lines in an image buffer for this stream. The value is
+ * valid after successfully validating the configuration with a call to
+ * CameraConfiguration::validate(). For compressed formats (such as MJPEG),
+ * this value will be zero.
+ */
+
+/**
+ * \var StreamConfiguration::frameSize
+ * \brief Frame size for the stream, in bytes
+ *
+ * The frameSize value reports the number of bytes necessary to contain one
+ * frame of an image buffer for this stream. This total includes the bytes
+ * required for all image planes. The value is valid after successfully
+ * validating the configuration with a call to CameraConfiguration::validate().
+ */
+
+/**
* \var StreamConfiguration::bufferCount
* \brief Requested number of buffers to allocate for the stream
*/
/**
+ * \var StreamConfiguration::colorSpace
+ * \brief The ColorSpace for this stream
+ *
+ * This field allows a ColorSpace to be selected for this Stream.
+ *
+ * The field is optional and an application can choose to leave it unset.
+ * Platforms that support the use of color spaces may provide default
+ * values through the generateConfiguration() method. An application can
+ * override these when necessary.
+ *
+ * If a specific ColorSpace is requested but the Camera cannot deliver it,
+ * then the StreamConfiguration will be adjusted to a value that can be
+ * delivered. In this case the validate() method will indicate via its
+ * return value that the CameraConfiguration has been adjusted.
+ *
+ * Note that platforms will typically have different constraints on what
+ * color spaces can be supported and in what combinations.
+ */
+
+/**
* \fn StreamConfiguration::stream()
* \brief Retrieve the stream associated with the configuration
*
* When a camera is configured with Camera::configure() Stream instances are
- * associated with each stream configuration entry. This method retrieves the
+ * associated with each stream configuration entry. This function retrieves the
* associated Stream, which remains valid until the next call to
* Camera::configure() or Camera::release().
*
@@ -322,8 +366,8 @@ StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
* \fn StreamConfiguration::setStream()
* \brief Associate a stream with a configuration
*
- * This method is meant for the PipelineHandler::configure() method and shall
- * not be called by applications.
+ * This function is meant for the PipelineHandler::configure() function and
+ * shall not be called by applications.
*
* \param[in] stream The stream
*/
@@ -332,10 +376,11 @@ StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
* \fn StreamConfiguration::formats()
* \brief Retrieve advisory stream format information
*
- * This method retrieves information about the pixel formats and sizes supported
- * by the stream configuration. The sizes are advisory and not all of them are
- * guaranteed to be supported by the stream. Users shall always inspect the size
- * in the stream configuration after calling CameraConfiguration::validate().
+ * This function retrieves information about the pixel formats and sizes
+ * supported by the stream configuration. The sizes are advisory and not all of
+ * them are guaranteed to be supported by the stream. Users shall always inspect
+ * the size in the stream configuration after calling
+ * CameraConfiguration::validate().
*
* \return Stream formats information
*/
@@ -347,7 +392,23 @@ StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
*/
std::string StreamConfiguration::toString() const
{
- return size.toString() + "-" + pixelFormat.toString();
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \brief Insert a text representation of a StreamConfiguration into an output
+ * stream
+ * \param[in] out The output stream
+ * \param[in] cfg The StreamConfiguration
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const StreamConfiguration &cfg)
+{
+ out << cfg.size << "-" << cfg.pixelFormat;
+ return out;
}
/**
@@ -358,12 +419,11 @@ std::string StreamConfiguration::toString() const
* are specified by applications and passed to cameras, that then select the
* most appropriate streams and their default configurations.
*
+ * \var Raw
+ * The stream is intended to capture raw frames from the sensor.
* \var StillCapture
* The stream is intended to capture high-resolution, high-quality still images
* with low frame rate. The captured frames may be exposed with flash.
- * \var StillCaptureRaw
- * The stream is intended to capture high-resolution, raw still images with low
- * frame rate.
* \var VideoRecording
* The stream is intended to capture video for the purpose of recording or
* streaming. The video stream may produce a high frame rate and may be
@@ -375,9 +435,23 @@ std::string StreamConfiguration::toString() const
*/
/**
- * \typedef StreamRoles
- * \brief A vector of StreamRole
+ * \brief Insert a text representation of a StreamRole into an output stream
+ * \param[in] out The output stream
+ * \param[in] role The StreamRole
+ * \return The output stream \a out
*/
+std::ostream &operator<<(std::ostream &out, StreamRole role)
+{
+ static constexpr std::array<const char *, 4> names{
+ "Raw",
+ "StillCapture",
+ "VideoRecording",
+ "Viewfinder",
+ };
+
+ out << names[utils::to_underlying(role)];
+ return out;
+}
/**
* \class Stream
diff --git a/src/libcamera/sysfs.cpp b/src/libcamera/sysfs.cpp
new file mode 100644
index 00000000..3d9885b0
--- /dev/null
+++ b/src/libcamera/sysfs.cpp
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Miscellaneous utility functions to access sysfs
+ */
+
+#include "libcamera/internal/sysfs.h"
+
+#include <fstream>
+#include <sstream>
+#include <sys/stat.h>
+#include <sys/sysmacros.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+/**
+ * \file sysfs.h
+ * \brief Miscellaneous utility functions to access sysfs
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(SysFs)
+
+namespace sysfs {
+
+/**
+ * \brief Retrieve the sysfs path for a character device
+ * \param[in] deviceNode Path to character device node
+ * \return The sysfs path on success or an empty string on failure
+ */
+std::string charDevPath(const std::string &deviceNode)
+{
+ struct stat st;
+ int ret = stat(deviceNode.c_str(), &st);
+ if (ret < 0) {
+ ret = -errno;
+ LOG(SysFs, Error)
+ << "Unable to stat '" << deviceNode << "': "
+ << strerror(-ret);
+ return {};
+ }
+
+ std::ostringstream dev("/sys/dev/char/", std::ios_base::ate);
+ dev << major(st.st_rdev) << ":" << minor(st.st_rdev);
+
+ return dev.str();
+}
+
+/**
+ * \brief Retrieve the path of the firmware node for a device
+ * \param[in] device Path in sysfs to search
+ *
+ * Physical devices in a system are described by the system firmware. Depending
+ * on the type of platform, devices are identified using different naming
+ * schemes. The Linux kernel abstract those differences with "firmware nodes".
+ * This function retrieves the firmware node path corresponding to the
+ * \a device.
+ *
+ * For DT-based systems, the path is the full name of the DT node that
+ * represents the device. For ACPI-based systems, the path is the absolute
+ * namespace path to the ACPI object that represents the device. In both cases,
+ * the path is guaranteed to be unique and persistent as long as the system
+ * firmware is not modified.
+ *
+ * \return The firmware node path on success or an empty string on failure
+ */
+std::string firmwareNodePath(const std::string &device)
+{
+ std::string fwPath, node;
+ struct stat st;
+
+ /* Lookup for DT-based systems */
+ node = device + "/of_node";
+ if (!stat(node.c_str(), &st)) {
+ char *ofPath = realpath(node.c_str(), nullptr);
+ if (!ofPath)
+ return {};
+
+ static const char prefix[] = "/sys/firmware/devicetree";
+ if (strncmp(ofPath, prefix, strlen(prefix)) == 0)
+ fwPath = ofPath + strlen(prefix);
+ else
+ fwPath = ofPath;
+
+ free(ofPath);
+
+ return fwPath;
+ }
+
+ /* Lookup for ACPI-based systems */
+ node = device + "/firmware_node/path";
+ if (File::exists(node)) {
+ std::ifstream file(node);
+ if (!file.is_open())
+ return {};
+
+ std::getline(file, fwPath);
+ file.close();
+
+ return fwPath;
+ }
+
+ return {};
+}
+
+} /* namespace sysfs */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/tracepoints.cpp b/src/libcamera/tracepoints.cpp
new file mode 100644
index 00000000..90662d12
--- /dev/null
+++ b/src/libcamera/tracepoints.cpp
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Tracepoints with lttng
+ */
+#define TRACEPOINT_CREATE_PROBES
+#define TRACEPOINT_DEFINE
+
+#include "libcamera/internal/tracepoints.h"
diff --git a/src/libcamera/transform.cpp b/src/libcamera/transform.cpp
new file mode 100644
index 00000000..9fe8b562
--- /dev/null
+++ b/src/libcamera/transform.cpp
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * 2D plane transforms.
+ */
+
+#include <libcamera/transform.h>
+
+#include <libcamera/orientation.h>
+
+/**
+ * \file transform.h
+ * \brief Enum to represent and manipulate 2D plane transforms
+ */
+
+namespace libcamera {
+
+/**
+ * \enum Transform
+ * \brief Enum to represent a 2D plane transform
+ *
+ * The Transform can take 8 distinct values, representing the usual 2D plane
+ * transforms listed below. Each of these transforms can be constructed
+ * out of 3 basic operations, namely a horizontal flip (mirror), a vertical
+ * flip, and a transposition (about the main diagonal). The transforms are
+ * encoded such that a single bit indicates the presence of each of the 3
+ * basic operations:
+ *
+ * - bit 0 - presence of a horizontal flip
+ * - bit 1 - presence of a vertical flip
+ * - bit 2 - presence of a transposition.
+ *
+ * We regard these 3 basic operations as being applied in a specific order:
+ * first the two flip operations (actually they commute, so the order between
+ * them is unimportant) and finally any transpose operation.
+ *
+ * Functions are provided to manipulate directly the bits within the transform
+ * encoding, but there are also higher-level functions to invert and compose
+ * transforms. Transforms are composed according to the usual mathematical
+ * convention such that the right transform is applied first, and the left
+ * transform is applied second.
+ *
+ * Finally, we have a total of 8 distinct transformations, as follows (a
+ * couple of them have additional synonyms for convenience). We illustrate each
+ * with its nominal effect on a rectangle with vertices labelled A, B, C and D.
+ *
+ * \sa https://en.wikipedia.org/wiki/Examples_of_groups#dihedral_group_of_order_8
+ *
+ * The set of 2D plane transforms is also known as the symmetry group of a
+ * square, described in the link. Note that the group can be generated by
+ * only 2 elements (the horizontal flip and a 90 degree rotation, for
+ * example), however, the encoding used here makes the presence of the vertical
+ * flip explicit.
+ *
+ * \var Transform::Identity
+ *
+ * Identity transform.
+~~~
+ A-B A-B
+Input image | | goes to output image | |
+ C-D C-D
+~~~
+ * Numeric value: 0 (no bits set).
+ *
+ * \var Transform::Rot0
+ *
+ * Synonym for Transform::Identity (zero degree rotation).
+ *
+ * \var Transform::HFlip
+ *
+ * Horizontal flip.
+~~~
+ A-B B-A
+Input image | | goes to output image | |
+ C-D D-C
+~~~
+ * Numeric value: 1 (horizontal flip bit set only).
+ *
+ * \var Transform::VFlip
+ *
+ * Vertical flip.
+~~~
+ A-B C-D
+Input image | | goes to output image | |
+ C-D A-B
+~~~
+ * Numeric value: 2 (vertical flip bit set only).
+ *
+ * \var Transform::HVFlip
+ *
+ * Horizontal and vertical flip (identical to a 180 degree rotation).
+~~~
+ A-B D-C
+Input image | | goes to output image | |
+ C-D B-A
+~~~
+ * Numeric value: 3 (horizontal and vertical flip bits set).
+ *
+ * \var Transform::Rot180
+ *
+ * Synonym for `HVFlip` (180 degree rotation).
+ *
+ * \var Transform::Transpose
+ *
+ * Transpose (about the main diagonal).
+~~~
+ A-B A-C
+Input image | | goes to output image | |
+ C-D B-D
+~~~
+ * Numeric value: 4 (transpose bit set only).
+ *
+ * \var Transform::Rot270
+ *
+ * Rotation by 270 degrees clockwise (90 degrees anticlockwise).
+~~~
+ A-B B-D
+Input image | | goes to output image | |
+ C-D A-C
+~~~
+ * Numeric value: 5 (transpose and horizontal flip bits set).
+ *
+ * \var Transform::Rot90
+ *
+ * Rotation by 90 degrees clockwise (270 degrees anticlockwise).
+~~~
+ A-B C-A
+Input image | | goes to output image | |
+ C-D D-B
+~~~
+ * Numeric value: 6 (transpose and vertical flip bits set).
+ *
+ * \var Transform::Rot180Transpose
+ *
+ * Rotation by 180 degrees followed by transpose (alternatively, transposition
+ * about the "opposite diagonal").
+~~~
+ A-B D-B
+Input image | | goes to output image | |
+ C-D C-A
+~~~
+ * Numeric value: 7 (all bits set).
+ */
+
+/**
+ * \fn operator &(Transform t0, Transform t1)
+ * \brief Apply bitwise AND operator between the bits in the two transforms
+ * \param[in] t0 The first transform
+ * \param[in] t1 The second transform
+ */
+
+/**
+ * \fn operator |(Transform t0, Transform t1)
+ * \brief Apply bitwise OR operator between the bits in the two transforms
+ * \param[in] t0 The first transform
+ * \param[in] t1 The second transform
+ */
+
+/**
+ * \fn operator ^(Transform t0, Transform t1)
+ * \brief Apply bitwise XOR operator between the bits in the two transforms
+ * \param[in] t0 The first transform
+ * \param[in] t1 The second transform
+ */
+
+/**
+ * \fn operator &=(Transform &t0, Transform t1)
+ * \brief Apply bitwise AND-assignment operator between the bits in the two
+ * transforms
+ * \param[in] t0 The first transform
+ * \param[in] t1 The second transform
+ */
+
+/**
+ * \fn operator |=(Transform &t0, Transform t1)
+ * \brief Apply bitwise OR-assignment operator between the bits in the two
+ * transforms
+ * \param[in] t0 The first transform
+ * \param[in] t1 The second transform
+ */
+
+/**
+ * \fn operator ^=(Transform &t0, Transform t1)
+ * \brief Apply bitwise XOR-assignment operator between the bits in the two
+ * transforms
+ * \param[in] t0 The first transform
+ * \param[in] t1 The second transform
+ */
+
+/**
+ * \brief Compose two transforms by applying \a t0 first then \a t1
+ * \param[in] t0 The first transform to apply
+ * \param[in] t1 The second transform to apply
+ *
+ * Compose two transforms into a transform that is equivalent to first applying
+ * \a t0 and then applying \a t1. For example, `HFlip * Transpose` performs
+ * `HFlip` first and then the `Transpose` yielding `Rot270`, as shown below.
+~~~
+ A-B B-A B-D
+Input image | | -> HFLip -> | | -> Transpose -> | | = Rot270
+ C-D D-C A-C
+~~~
+ * Note that composition is generally non-commutative for Transforms, and not
+ * the same as XOR-ing the underlying bit representations.
+ *
+ * \return A Transform equivalent to applying \a t0 and then \a t1
+ */
+Transform operator*(Transform t0, Transform t1)
+{
+ /*
+ * Reorder the operations so that we imagine doing t0's transpose
+ * (if any) after t1's flips. The effect is to swap t1's hflips for
+ * vflips and vice versa, after which we can just xor all the bits.
+ */
+ Transform reordered = t1;
+ if (!!(t0 & Transform::Transpose)) {
+ reordered = t1 & Transform::Transpose;
+ if (!!(t1 & Transform::HFlip))
+ reordered |= Transform::VFlip;
+ if (!!(t1 & Transform::VFlip))
+ reordered |= Transform::HFlip;
+ }
+
+ return reordered ^ t0;
+}
+
+/**
+ * \brief Invert a transform
+ * \param[in] t The transform to be inverted
+ *
+ * That is, we return the transform such that `t * (-t)` and `(-t) * t` both
+ * yield the identity transform.
+ */
+Transform operator-(Transform t)
+{
+ /* All are self-inverses, except for Rot270 and Rot90. */
+ static const Transform inverses[] = {
+ Transform::Identity,
+ Transform::HFlip,
+ Transform::VFlip,
+ Transform::HVFlip,
+ Transform::Transpose,
+ Transform::Rot90,
+ Transform::Rot270,
+ Transform::Rot180Transpose
+ };
+
+ return inverses[static_cast<int>(t)];
+}
+
+/**
+ * \fn operator!(Transform t)
+ * \brief Return `true` if the transform is the `Identity`, otherwise `false`
+ * \param[in] t The transform to be tested
+ */
+
+/**
+ * \fn operator~(Transform t)
+ * \brief Return the transform with all the bits inverted individually
+ * \param[in] t The transform of which the bits will be inverted
+ *
+ * This inverts the bits that encode the transform in a bitwise manner. Note
+ * that this is not the proper inverse of transform \a t (for which use \a
+ * operator-).
+ */
+
+/**
+ * \brief Return the transform representing a rotation of the given angle
+ * clockwise
+ * \param[in] angle The angle of rotation in a clockwise sense. Negative values
+ * can be used to represent anticlockwise rotations
+ * \param[out] success Set to `true` if the angle is a multiple of 90 degrees,
+ * otherwise `false`
+ * \return The transform corresponding to the rotation if \a success was set to
+ * `true`, otherwise the `Identity` transform
+ */
+Transform transformFromRotation(int angle, bool *success)
+{
+ angle = angle % 360;
+ if (angle < 0)
+ angle += 360;
+
+ if (success != nullptr)
+ *success = true;
+
+ switch (angle) {
+ case 0:
+ return Transform::Identity;
+ case 90:
+ return Transform::Rot90;
+ case 180:
+ return Transform::Rot180;
+ case 270:
+ return Transform::Rot270;
+ }
+
+ if (success != nullptr)
+ *success = false;
+
+ return Transform::Identity;
+}
+
+namespace {
+
+/**
+ * \brief Return the transform representing \a orientation
+ * \param[in] orientation The orientation to convert
+ * \return The transform corresponding to \a orientation
+ */
+Transform transformFromOrientation(const Orientation &orientation)
+{
+ switch (orientation) {
+ case Orientation::Rotate0:
+ return Transform::Identity;
+ case Orientation::Rotate0Mirror:
+ return Transform::HFlip;
+ case Orientation::Rotate180:
+ return Transform::Rot180;
+ case Orientation::Rotate180Mirror:
+ return Transform::VFlip;
+ case Orientation::Rotate90Mirror:
+ return Transform::Transpose;
+ case Orientation::Rotate90:
+ return Transform::Rot90;
+ case Orientation::Rotate270Mirror:
+ return Transform::Rot180Transpose;
+ case Orientation::Rotate270:
+ return Transform::Rot270;
+ }
+
+ return Transform::Identity;
+}
+
+} /* namespace */
+
+/**
+ * \brief Return the Transform that applied to \a o2 gives \a o1
+ * \param o1 The Orientation to obtain
+ * \param o2 The base Orientation
+ *
+ * This operation can be used to easily compute the Transform to apply to a
+ * base orientation \a o2 to get the desired orientation \a o1.
+ *
+ * \return A Transform that applied to \a o2 gives \a o1
+ */
+Transform operator/(const Orientation &o1, const Orientation &o2)
+{
+ Transform t1 = transformFromOrientation(o1);
+ Transform t2 = transformFromOrientation(o2);
+
+ return -t2 * t1;
+}
+
+/**
+ * \brief Apply the Transform \a t on the orientation \a o
+ * \param o The orientation
+ * \param t The transform to apply on \a o
+ * \return The Orientation resulting from applying \a t on \a o
+ */
+Orientation operator*(const Orientation &o, const Transform &t)
+{
+ /*
+ * Apply a Transform corresponding to the orientation first and
+ * then apply \a t to it.
+ */
+ switch (transformFromOrientation(o) * t) {
+ case Transform::Identity:
+ return Orientation::Rotate0;
+ case Transform::HFlip:
+ return Orientation::Rotate0Mirror;
+ case Transform::VFlip:
+ return Orientation::Rotate180Mirror;
+ case Transform::Rot180:
+ return Orientation::Rotate180;
+ case Transform::Transpose:
+ return Orientation::Rotate90Mirror;
+ case Transform::Rot270:
+ return Orientation::Rotate270;
+ case Transform::Rot90:
+ return Orientation::Rotate90;
+ case Transform::Rot180Transpose:
+ return Orientation::Rotate270Mirror;
+ }
+
+ return Orientation::Rotate0;
+}
+
+/**
+ * \brief Return a character string describing the transform
+ * \param[in] t The transform to be described.
+ */
+const char *transformToString(Transform t)
+{
+ static const char *strings[] = {
+ "identity",
+ "hflip",
+ "vflip",
+ "hvflip",
+ "transpose",
+ "rot270",
+ "rot90",
+ "rot180transpose"
+ };
+
+ return strings[static_cast<int>(t)];
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/utils.cpp b/src/libcamera/utils.cpp
deleted file mode 100644
index 58ee7cc1..00000000
--- a/src/libcamera/utils.cpp
+++ /dev/null
@@ -1,374 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * utils.cpp - Miscellaneous utility functions
- */
-
-#include "utils.h"
-
-#include <dlfcn.h>
-#include <elf.h>
-#include <iomanip>
-#include <link.h>
-#include <sstream>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-/**
- * \file utils.h
- * \brief Miscellaneous utility functions
- */
-
-/* musl doesn't declare _DYNAMIC in link.h, declare it manually. */
-extern ElfW(Dyn) _DYNAMIC[];
-
-namespace libcamera {
-
-namespace utils {
-
-/**
- * \def ARRAY_SIZE(array)
- * \brief Determine the number of elements in the static array.
- */
-
-/**
- * \brief Strip the directory prefix from the path
- * \param[in] path The path to process
- *
- * basename is implemented differently across different C libraries. This
- * implementation matches the one provided by the GNU libc, and does not
- * modify its input parameter.
- *
- * \return A pointer within the given path without any leading directory
- * components.
- */
-const char *basename(const char *path)
-{
- const char *base = strrchr(path, '/');
- return base ? base + 1 : path;
-}
-
-/**
- * \brief Get an environment variable
- * \param[in] name The name of the variable to return
- *
- * The environment list is searched to find the variable 'name', and the
- * corresponding string is returned.
- *
- * If 'secure execution' is required then this function always returns NULL to
- * avoid vulnerabilities that could occur if set-user-ID or set-group-ID
- * programs accidentally trust the environment.
- *
- * \return A pointer to the value in the environment or NULL if the requested
- * environment variable doesn't exist or if secure execution is required.
- */
-char *secure_getenv(const char *name)
-{
-#if HAVE_SECURE_GETENV
- return ::secure_getenv(name);
-#else
- if (issetugid())
- return NULL;
-
- return getenv(name);
-#endif
-}
-
-/**
- * \brief Identify the dirname portion of a path
- * \param[in] path The full path to parse
- *
- * This function conforms with the behaviour of the %dirname() function as
- * defined by POSIX.
- *
- * \return A string of the directory component of the path
- */
-std::string dirname(const std::string &path)
-{
- if (path.empty())
- return ".";
-
- /*
- * Skip all trailing slashes. If the path is only made of slashes,
- * return "/".
- */
- size_t pos = path.size() - 1;
- while (path[pos] == '/') {
- if (!pos)
- return "/";
- pos--;
- }
-
- /*
- * Find the previous slash. If the path contains no non-trailing slash,
- * return ".".
- */
- while (path[pos] != '/') {
- if (!pos)
- return ".";
- pos--;
- }
-
- /*
- * Return the directory name up to (but not including) any trailing
- * slash. If this would result in an empty string, return "/".
- */
- while (path[pos] == '/') {
- if (!pos)
- return "/";
- pos--;
- }
-
- return path.substr(0, pos + 1);
-}
-
-/**
- * \fn libcamera::utils::set_overlap(InputIt1 first1, InputIt1 last1,
- * InputIt2 first2, InputIt2 last2)
- * \brief Count the number of elements in the intersection of two ranges
- *
- * Count the number of elements in the intersection of the sorted ranges [\a
- * first1, \a last1) and [\a first1, \a last2). Elements are compared using
- * operator< and the ranges must be sorted with respect to the same.
- *
- * \return The number of elements in the intersection of the two ranges
- */
-
-/**
- * \fn libcamera::utils::clamp(const T& v, const T& lo, const T& hi)
- * \param[in] v The value to clamp
- * \param[in] lo The lower boundary to clamp v to
- * \param[in] hi The higher boundary to clamp v to
- * \return lo if v is less than lo, hi if v is greater than hi, otherwise v
- */
-
-/**
- * \typedef clock
- * \brief The libcamera clock (monotonic)
- */
-
-/**
- * \typedef duration
- * \brief The libcamera duration related to libcamera::utils::clock
- */
-
-/**
- * \typedef time_point
- * \brief The libcamera time point related to libcamera::utils::clock
- */
-
-/**
- * \brief Convert a duration to a timespec
- * \param[in] value The duration
- * \return A timespec expressing the duration
- */
-struct timespec duration_to_timespec(const duration &value)
-{
- uint64_t nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>(value).count();
- struct timespec ts;
- ts.tv_sec = nsecs / 1000000000ULL;
- ts.tv_nsec = nsecs % 1000000000ULL;
- return ts;
-}
-
-/**
- * \brief Convert a time point to a string representation
- * \param[in] time The time point
- * \return A string representing the time point in hh:mm:ss.nanoseconds format
- */
-std::string time_point_to_string(const time_point &time)
-{
- uint64_t nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>(time.time_since_epoch()).count();
- unsigned int secs = nsecs / 1000000000ULL;
-
- std::ostringstream ossTimestamp;
- ossTimestamp.fill('0');
- ossTimestamp << secs / (60 * 60) << ":"
- << std::setw(2) << (secs / 60) % 60 << ":"
- << std::setw(2) << secs % 60 << "."
- << std::setw(9) << nsecs % 1000000000ULL;
- return ossTimestamp.str();
-}
-
-std::basic_ostream<char, std::char_traits<char>> &
-operator<<(std::basic_ostream<char, std::char_traits<char>> &stream, const _hex &h)
-{
- stream << "0x";
-
- std::ostream::fmtflags flags = stream.setf(std::ios_base::hex,
- std::ios_base::basefield);
- std::streamsize width = stream.width(h.w);
- char fill = stream.fill('0');
-
- stream << h.v;
-
- stream.flags(flags);
- stream.width(width);
- stream.fill(fill);
-
- return stream;
-}
-
-/**
- * \fn hex(T value, unsigned int width)
- * \brief Write an hexadecimal value to an output string
- * \param value The value
- * \param width The width
- *
- * Return an object of unspecified type such that, if \a os is the name of an
- * output stream of type std::ostream, and T is an integer type, then the
- * expression
- *
- * \code{.cpp}
- * os << utils::hex(value)
- * \endcode
- *
- * will output the \a value to the stream in hexadecimal form with the base
- * prefix and the filling character set to '0'. The field width is set to \a
- * width if specified to a non-zero value, or to the native width of type T
- * otherwise. The \a os stream configuration is not modified.
- */
-
-/**
- * \brief Copy a string with a size limit
- * \param[in] dst The destination string
- * \param[in] src The source string
- * \param[in] size The size of the destination string
- *
- * This function copies the null-terminated string \a src to \a dst with a limit
- * of \a size - 1 characters, and null-terminates the result if \a size is
- * larger than 0. If \a src is larger than \a size - 1, \a dst is truncated.
- *
- * \return The size of \a src
- */
-size_t strlcpy(char *dst, const char *src, size_t size)
-{
- if (size) {
- strncpy(dst, src, size);
- dst[size - 1] = '\0';
- }
-
- return strlen(src);
-}
-
-details::StringSplitter::StringSplitter(const std::string &str, const std::string &delim)
- : str_(str), delim_(delim)
-{
-}
-
-details::StringSplitter::iterator::iterator(const details::StringSplitter *ss, std::string::size_type pos)
- : ss_(ss), pos_(pos)
-{
- next_ = ss_->str_.find(ss_->delim_, pos_);
-}
-
-details::StringSplitter::iterator &details::StringSplitter::iterator::operator++()
-{
- pos_ = next_;
- if (pos_ != std::string::npos) {
- pos_ += ss_->delim_.length();
- next_ = ss_->str_.find(ss_->delim_, pos_);
- }
-
- return *this;
-}
-
-std::string details::StringSplitter::iterator::operator*() const
-{
- std::string::size_type count;
- count = next_ != std::string::npos ? next_ - pos_ : next_;
- return ss_->str_.substr(pos_, count);
-}
-
-bool details::StringSplitter::iterator::operator!=(const details::StringSplitter::iterator &other) const
-{
- return pos_ != other.pos_;
-}
-
-details::StringSplitter::iterator details::StringSplitter::begin() const
-{
- return iterator(this, 0);
-}
-
-details::StringSplitter::iterator details::StringSplitter::end() const
-{
- return iterator(this, std::string::npos);
-}
-
-/**
- * \fn split(const std::string &str, const std::string &delim)
- * \brief Split a string based on a delimiter
- * \param[in] str The string to split
- * \param[in] delim The delimiter string
- *
- * This function splits the string \a str into substrings based on the
- * delimiter \a delim. It returns an object of unspecified type that can be
- * used in a range-based for loop and yields the substrings in sequence.
- *
- * \return An object that can be used in a range-based for loop to iterate over
- * the substrings
- */
-details::StringSplitter split(const std::string &str, const std::string &delim)
-{
- /** \todo Try to avoid copies of str and delim */
- return details::StringSplitter(str, delim);
-}
-
-/**
- * \brief Check if libcamera is installed or not
- *
- * Utilise the build_rpath dynamic tag which is stripped out by meson at
- * install time to determine at runtime if the library currently executing
- * has been installed or not.
- *
- * \return True if libcamera is installed, false otherwise
- */
-bool isLibcameraInstalled()
-{
- /*
- * DT_RUNPATH (DT_RPATH when the linker uses old dtags) is removed on
- * install.
- */
- for (const ElfW(Dyn) *dyn = _DYNAMIC; dyn->d_tag != DT_NULL; ++dyn) {
- if (dyn->d_tag == DT_RUNPATH || dyn->d_tag == DT_RPATH)
- return false;
- }
-
- return true;
-}
-
-/**
- * \brief Retrieve the path to the build directory
- *
- * During development, it is useful to run libcamera binaries directly from the
- * build directory without installing them. This function helps components that
- * need to locate resources, such as IPA modules or IPA proxy workers, by
- * providing them with the path to the root of the build directory. Callers can
- * then use it to complement or override searches in system-wide directories.
- *
- * If libcamera has been installed, the build directory path is not available
- * and this function returns an empty string.
- *
- * \return The path to the build directory if running from a build, or an empty
- * string otherwise
- */
-std::string libcameraBuildPath()
-{
- if (isLibcameraInstalled())
- return std::string();
-
- Dl_info info;
-
- /* Look up our own symbol. */
- int ret = dladdr(reinterpret_cast<void *>(libcameraBuildPath), &info);
- if (ret == 0)
- return std::string();
-
- return dirname(info.dli_fname) + "/../../";
-}
-
-} /* namespace utils */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/v4l2_controls.cpp b/src/libcamera/v4l2_controls.cpp
deleted file mode 100644
index 8e2415f2..00000000
--- a/src/libcamera/v4l2_controls.cpp
+++ /dev/null
@@ -1,151 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * v4l2_controls.cpp - V4L2 Controls Support
- */
-
-#include "v4l2_controls.h"
-
-#include <string.h>
-
-/**
- * \file v4l2_controls.h
- * \brief Support for V4L2 Controls using the V4L2 Extended Controls APIs
- *
- * The V4L2 Control API allows application to inspect and modify sets of
- * configurable parameters on a video device or subdevice. The nature of the
- * parameters an application can modify using the control framework depends on
- * what the driver implements support for, and on the characteristics of the
- * underlying hardware platform. Generally controls are used to modify user
- * visible settings, such as the image brightness and exposure time, or
- * non-standard parameters which cannot be controlled through the V4L2 format
- * negotiation API.
- *
- * Controls are identified by a numerical ID, defined by the V4L2 kernel headers
- * and have an associated type. Each control has a value, which is the data that
- * can be modified with V4L2Device::setControls() or retrieved with
- * V4L2Device::getControls().
- *
- * The control's type along with the control's flags define the type of the
- * control's value content. Controls can transport a single data value stored in
- * variable inside the control, or they might as well deal with more complex
- * data types, such as arrays of matrices, stored in a contiguous memory
- * locations associated with the control and called 'the payload'. Such controls
- * are called 'compound controls' and are currently not supported by the
- * libcamera V4L2 control framework.
- *
- * libcamera implements support for controls using the V4L2 Extended Control
- * API, which allows future handling of controls with payloads of arbitrary
- * sizes.
- *
- * The libcamera V4L2 Controls framework operates on lists of controls, wrapped
- * by the ControlList class, to match the V4L2 extended controls API. The
- * interface to set and get control is implemented by the V4L2Device class, and
- * this file only provides the data type definitions.
- *
- * \todo Add support for compound controls
- */
-
-namespace libcamera {
-
-namespace {
-
-std::string v4l2_ctrl_name(const struct v4l2_query_ext_ctrl &ctrl)
-{
- size_t len = strnlen(ctrl.name, sizeof(ctrl.name));
- return std::string(static_cast<const char *>(ctrl.name), len);
-}
-
-ControlType v4l2_ctrl_type(const struct v4l2_query_ext_ctrl &ctrl)
-{
- switch (ctrl.type) {
- case V4L2_CTRL_TYPE_U8:
- return ControlTypeByte;
-
- case V4L2_CTRL_TYPE_BOOLEAN:
- return ControlTypeBool;
-
- case V4L2_CTRL_TYPE_INTEGER:
- return ControlTypeInteger32;
-
- case V4L2_CTRL_TYPE_INTEGER64:
- return ControlTypeInteger64;
-
- case V4L2_CTRL_TYPE_MENU:
- case V4L2_CTRL_TYPE_BUTTON:
- case V4L2_CTRL_TYPE_BITMASK:
- case V4L2_CTRL_TYPE_INTEGER_MENU:
- /*
- * More precise types may be needed, for now use a 32-bit
- * integer type.
- */
- return ControlTypeInteger32;
-
- default:
- return ControlTypeNone;
- }
-}
-
-} /* namespace */
-
-/**
- * \class V4L2ControlId
- * \brief V4L2 control static metadata
- *
- * The V4L2ControlId class is a specialisation of the ControlId for V4L2
- * controls.
- */
-
-/**
- * \brief Construct a V4L2ControlId from a struct v4l2_query_ext_ctrl
- * \param[in] ctrl The struct v4l2_query_ext_ctrl as returned by the kernel
- */
-V4L2ControlId::V4L2ControlId(const struct v4l2_query_ext_ctrl &ctrl)
- : ControlId(ctrl.id, v4l2_ctrl_name(ctrl), v4l2_ctrl_type(ctrl))
-{
-}
-
-/**
- * \class V4L2ControlInfo
- * \brief Convenience specialisation of ControlInfo for V4L2 controls
- *
- * The V4L2ControlInfo class is a specialisation of the ControlInfo for V4L2
- * controls. It offers a convenience constructor from a struct
- * v4l2_query_ext_ctrl, and is otherwise equivalent to the ControlInfo class.
- */
-
-/**
- * \brief Construct a V4L2ControlInfo from a struct v4l2_query_ext_ctrl
- * \param[in] ctrl The struct v4l2_query_ext_ctrl as returned by the kernel
- */
-V4L2ControlInfo::V4L2ControlInfo(const struct v4l2_query_ext_ctrl &ctrl)
-{
- switch (ctrl.type) {
- case V4L2_CTRL_TYPE_U8:
- ControlInfo::operator=(ControlInfo(static_cast<uint8_t>(ctrl.minimum),
- static_cast<uint8_t>(ctrl.maximum),
- static_cast<uint8_t>(ctrl.default_value)));
- break;
-
- case V4L2_CTRL_TYPE_BOOLEAN:
- ControlInfo::operator=(ControlInfo(static_cast<bool>(ctrl.minimum),
- static_cast<bool>(ctrl.maximum),
- static_cast<bool>(ctrl.default_value)));
- break;
-
- case V4L2_CTRL_TYPE_INTEGER64:
- ControlInfo::operator=(ControlInfo(static_cast<int64_t>(ctrl.minimum),
- static_cast<int64_t>(ctrl.maximum),
- static_cast<int64_t>(ctrl.default_value)));
- break;
-
- default:
- ControlInfo::operator=(ControlInfo(static_cast<int32_t>(ctrl.minimum),
- static_cast<int32_t>(ctrl.maximum),
- static_cast<int32_t>(ctrl.default_value)));
- break;
- }
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/v4l2_device.cpp b/src/libcamera/v4l2_device.cpp
index 03e30516..2f65a43a 100644
--- a/src/libcamera/v4l2_device.cpp
+++ b/src/libcamera/v4l2_device.cpp
@@ -2,21 +2,29 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_device.cpp - Common base for V4L2 video devices and subdevices
+ * Common base for V4L2 video devices and subdevices
*/
-#include "v4l2_device.h"
+#include "libcamera/internal/v4l2_device.h"
#include <fcntl.h>
-#include <iomanip>
+#include <map>
+#include <stdint.h>
+#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/syscall.h>
#include <unistd.h>
+#include <vector>
-#include "log.h"
-#include "utils.h"
-#include "v4l2_controls.h"
+#include <linux/v4l2-mediabus.h>
+
+#include <libcamera/base/event_notifier.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/sysfs.h"
/**
* \file v4l2_device.h
@@ -31,9 +39,9 @@ LOG_DEFINE_CATEGORY(V4L2)
* \class V4L2Device
* \brief Base class for V4L2VideoDevice and V4L2Subdevice
*
- * The V4L2Device class groups together the methods and fields common to
+ * The V4L2Device class groups together the functions and fields common to
* both the V4L2VideoDevice and V4L2Subdevice classes, and provides a base
- * class with methods to open and close the device node associated with the
+ * class with functions to open and close the device node associated with the
* device and to perform IOCTL system calls on it.
*
* The V4L2Device class cannot be instantiated directly, as its constructor
@@ -49,7 +57,8 @@ LOG_DEFINE_CATEGORY(V4L2)
* at open() time, and the \a logTag to prefix log messages with.
*/
V4L2Device::V4L2Device(const std::string &deviceNode)
- : deviceNode_(deviceNode), fd_(-1)
+ : deviceNode_(deviceNode), fdEventNotifier_(nullptr),
+ frameStartEnabled_(false)
{
}
@@ -76,17 +85,17 @@ int V4L2Device::open(unsigned int flags)
return -EBUSY;
}
- int ret = syscall(SYS_openat, AT_FDCWD, deviceNode_.c_str(), flags);
- if (ret < 0) {
- ret = -errno;
- LOG(V4L2, Error) << "Failed to open V4L2 device: "
+ UniqueFD fd(syscall(SYS_openat, AT_FDCWD, deviceNode_.c_str(),
+ flags | O_CLOEXEC));
+ if (!fd.isValid()) {
+ int ret = -errno;
+ LOG(V4L2, Error) << "Failed to open V4L2 device '"
+ << deviceNode_ << "': "
<< strerror(-ret);
return ret;
}
- fd_ = ret;
-
- listControls();
+ setFd(std::move(fd));
return 0;
}
@@ -95,24 +104,30 @@ int V4L2Device::open(unsigned int flags)
* \brief Set the file descriptor of a V4L2 device
* \param[in] fd The file descriptor handle
*
- * This method allows a device to provide an already opened file descriptor
+ * This function allows a device to provide an already opened file descriptor
* referring to the V4L2 device node, instead of opening it with open(). This
* can be used for V4L2 M2M devices where a single video device node is used for
* both the output and capture devices, or when receiving an open file
* descriptor in a context that doesn't have permission to open the device node
* itself.
*
- * This method and the open() method are mutually exclusive, only one of the two
- * shall be used for a V4L2Device instance.
+ * This function and the open() function are mutually exclusive, only one of the
+ * two shall be used for a V4L2Device instance.
*
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Device::setFd(int fd)
+int V4L2Device::setFd(UniqueFD fd)
{
if (isOpen())
return -EBUSY;
- fd_ = fd;
+ fd_ = std::move(fd);
+
+ fdEventNotifier_ = new EventNotifier(fd_.get(), EventNotifier::Exception);
+ fdEventNotifier_->activated.connect(this, &V4L2Device::eventAvailable);
+ fdEventNotifier_->setEnabled(false);
+
+ listControls();
return 0;
}
@@ -127,10 +142,9 @@ void V4L2Device::close()
if (!isOpen())
return;
- if (::close(fd_) < 0)
- LOG(V4L2, Error) << "Failed to close V4L2 device: "
- << strerror(errno);
- fd_ = -1;
+ delete fdEventNotifier_;
+
+ fd_.reset();
}
/**
@@ -147,114 +161,127 @@ void V4L2Device::close()
/**
* \brief Read controls from the device
- * \param[inout] ctrls The list of controls to read
- *
- * This method reads the value of all controls contained in \a ctrls, and stores
- * their values in the corresponding \a ctrls entry.
+ * \param[in] ids The list of controls to read, specified by their ID
*
- * If any control in \a ctrls is not supported by the device, is disabled (i.e.
- * has the V4L2_CTRL_FLAG_DISABLED flag set), is a compound control, or if any
- * other error occurs during validation of the requested controls, no control is
- * read and this method returns -EINVAL.
+ * This function reads the value of all controls contained in \a ids, and
+ * returns their values as a ControlList.
*
- * If an error occurs while reading the controls, the index of the first control
- * that couldn't be read is returned. The value of all controls below that index
- * are updated in \a ctrls, while the value of all the other controls are not
- * changed.
+ * If any control in \a ids is not supported by the device, is disabled (i.e.
+ * has the V4L2_CTRL_FLAG_DISABLED flag set), or if any other error occurs
+ * during validation of the requested controls, no control is read and this
+ * function returns an empty control list.
*
- * \return 0 on success or an error code otherwise
- * \retval -EINVAL One of the control is not supported or not accessible
- * \retval i The index of the control that failed
+ * \return The control values in a ControlList on success, or an empty list on
+ * error
*/
-int V4L2Device::getControls(ControlList *ctrls)
+ControlList V4L2Device::getControls(const std::vector<uint32_t> &ids)
{
- unsigned int count = ctrls->size();
- if (count == 0)
- return 0;
+ if (ids.empty())
+ return {};
- struct v4l2_ext_control v4l2Ctrls[count];
- memset(v4l2Ctrls, 0, sizeof(v4l2Ctrls));
+ ControlList ctrls{ controls_ };
- unsigned int i = 0;
- for (auto &ctrl : *ctrls) {
- unsigned int id = ctrl.first;
+ for (uint32_t id : ids) {
const auto iter = controls_.find(id);
if (iter == controls_.end()) {
LOG(V4L2, Error)
<< "Control " << utils::hex(id) << " not found";
- return -EINVAL;
+ return {};
}
+ ctrls.set(id, {});
+ }
+
+ std::vector<v4l2_ext_control> v4l2Ctrls(ids.size());
+ memset(v4l2Ctrls.data(), 0, sizeof(v4l2_ext_control) * ctrls.size());
+
+ unsigned int i = 0;
+ for (auto &ctrl : ctrls) {
+ unsigned int id = ctrl.first;
const struct v4l2_query_ext_ctrl &info = controlInfo_[id];
- ControlValue &value = ctrl.second;
+
+ v4l2_ext_control &v4l2Ctrl = v4l2Ctrls[i++];
+ v4l2Ctrl.id = id;
if (info.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD) {
ControlType type;
+ ControlValue &value = ctrl.second;
+ Span<uint8_t> data;
switch (info.type) {
case V4L2_CTRL_TYPE_U8:
type = ControlTypeByte;
+ value.reserve(type, true, info.elems);
+ data = value.data();
+ v4l2Ctrl.p_u8 = data.data();
+ break;
+
+ case V4L2_CTRL_TYPE_U16:
+ type = ControlTypeUnsigned16;
+ value.reserve(type, true, info.elems);
+ data = value.data();
+ v4l2Ctrl.p_u16 = reinterpret_cast<uint16_t *>(data.data());
+ break;
+
+ case V4L2_CTRL_TYPE_U32:
+ type = ControlTypeUnsigned32;
+ value.reserve(type, true, info.elems);
+ data = value.data();
+ v4l2Ctrl.p_u32 = reinterpret_cast<uint32_t *>(data.data());
break;
default:
LOG(V4L2, Error)
<< "Unsupported payload control type "
<< info.type;
- return -EINVAL;
+ return {};
}
- value.reserve(type, true, info.elems);
- Span<uint8_t> data = value.data();
-
- v4l2Ctrls[i].p_u8 = data.data();
- v4l2Ctrls[i].size = data.size();
+ v4l2Ctrl.size = data.size();
}
-
- v4l2Ctrls[i].id = id;
- i++;
}
struct v4l2_ext_controls v4l2ExtCtrls = {};
v4l2ExtCtrls.which = V4L2_CTRL_WHICH_CUR_VAL;
- v4l2ExtCtrls.controls = v4l2Ctrls;
- v4l2ExtCtrls.count = count;
+ v4l2ExtCtrls.controls = v4l2Ctrls.data();
+ v4l2ExtCtrls.count = v4l2Ctrls.size();
int ret = ioctl(VIDIOC_G_EXT_CTRLS, &v4l2ExtCtrls);
if (ret) {
unsigned int errorIdx = v4l2ExtCtrls.error_idx;
/* Generic validation error. */
- if (errorIdx == 0 || errorIdx >= count) {
+ if (errorIdx == 0 || errorIdx >= v4l2Ctrls.size()) {
LOG(V4L2, Error) << "Unable to read controls: "
<< strerror(-ret);
- return -EINVAL;
+ return {};
}
/* A specific control failed. */
- LOG(V4L2, Error) << "Unable to read control " << errorIdx
+ const unsigned int id = v4l2Ctrls[errorIdx].id;
+ LOG(V4L2, Error) << "Unable to read control " << utils::hex(id)
<< ": " << strerror(-ret);
- count = errorIdx - 1;
- ret = errorIdx;
+
+ v4l2Ctrls.resize(errorIdx);
}
- updateControls(ctrls, v4l2Ctrls, count);
+ updateControls(&ctrls, v4l2Ctrls);
- return ret;
+ return ctrls;
}
/**
* \brief Write controls to the device
* \param[in] ctrls The list of controls to write
*
- * This method writes the value of all controls contained in \a ctrls, and
+ * This function writes the value of all controls contained in \a ctrls, and
* stores the values actually applied to the device in the corresponding
* \a ctrls entry.
*
* If any control in \a ctrls is not supported by the device, is disabled (i.e.
- * has the V4L2_CTRL_FLAG_DISABLED flag set), is read-only, is a
- * compound control, or if any other error occurs during validation of
- * the requested controls, no control is written and this method returns
- * -EINVAL.
+ * has the V4L2_CTRL_FLAG_DISABLED flag set), is read-only, if any other error
+ * occurs during validation of the requested controls, no control is written and
+ * this function returns -EINVAL.
*
* If an error occurs while writing the controls, the index of the first
* control that couldn't be written is returned. All controls below that index
@@ -267,30 +294,64 @@ int V4L2Device::getControls(ControlList *ctrls)
*/
int V4L2Device::setControls(ControlList *ctrls)
{
- unsigned int count = ctrls->size();
- if (count == 0)
+ if (ctrls->empty())
return 0;
- struct v4l2_ext_control v4l2Ctrls[count];
- memset(v4l2Ctrls, 0, sizeof(v4l2Ctrls));
+ std::vector<v4l2_ext_control> v4l2Ctrls(ctrls->size());
+ memset(v4l2Ctrls.data(), 0, sizeof(v4l2_ext_control) * ctrls->size());
- unsigned int i = 0;
- for (auto &ctrl : *ctrls) {
- unsigned int id = ctrl.first;
+ for (auto [ctrl, i] = std::pair(ctrls->begin(), 0u); i < ctrls->size(); ctrl++, i++) {
+ const unsigned int id = ctrl->first;
const auto iter = controls_.find(id);
if (iter == controls_.end()) {
LOG(V4L2, Error)
<< "Control " << utils::hex(id) << " not found";
return -EINVAL;
}
-
- v4l2Ctrls[i].id = id;
+ v4l2_ext_control &v4l2Ctrl = v4l2Ctrls[i];
+ v4l2Ctrl.id = id;
/* Set the v4l2_ext_control value for the write operation. */
- ControlValue &value = ctrl.second;
+ ControlValue &value = ctrl->second;
switch (iter->first->type()) {
+ case ControlTypeUnsigned16: {
+ if (value.isArray()) {
+ Span<uint8_t> data = value.data();
+ v4l2Ctrl.p_u16 = reinterpret_cast<uint16_t *>(data.data());
+ v4l2Ctrl.size = data.size();
+ } else {
+ v4l2Ctrl.value = value.get<uint16_t>();
+ }
+
+ break;
+ }
+
+ case ControlTypeUnsigned32: {
+ if (value.isArray()) {
+ Span<uint8_t> data = value.data();
+ v4l2Ctrl.p_u32 = reinterpret_cast<uint32_t *>(data.data());
+ v4l2Ctrl.size = data.size();
+ } else {
+ v4l2Ctrl.value = value.get<uint32_t>();
+ }
+
+ break;
+ }
+
+ case ControlTypeInteger32: {
+ if (value.isArray()) {
+ Span<uint8_t> data = value.data();
+ v4l2Ctrl.p_u32 = reinterpret_cast<uint32_t *>(data.data());
+ v4l2Ctrl.size = data.size();
+ } else {
+ v4l2Ctrl.value = value.get<int32_t>();
+ }
+
+ break;
+ }
+
case ControlTypeInteger64:
- v4l2Ctrls[i].value64 = value.get<int64_t>();
+ v4l2Ctrl.value64 = value.get<int64_t>();
break;
case ControlTypeByte: {
@@ -302,50 +363,127 @@ int V4L2Device::setControls(ControlList *ctrls)
}
Span<uint8_t> data = value.data();
- v4l2Ctrls[i].p_u8 = data.data();
- v4l2Ctrls[i].size = data.size();
+ v4l2Ctrl.p_u8 = data.data();
+ v4l2Ctrl.size = data.size();
break;
}
default:
/* \todo To be changed to support strings. */
- v4l2Ctrls[i].value = value.get<int32_t>();
+ v4l2Ctrl.value = value.get<int32_t>();
break;
}
-
- i++;
}
struct v4l2_ext_controls v4l2ExtCtrls = {};
v4l2ExtCtrls.which = V4L2_CTRL_WHICH_CUR_VAL;
- v4l2ExtCtrls.controls = v4l2Ctrls;
- v4l2ExtCtrls.count = count;
+ v4l2ExtCtrls.controls = v4l2Ctrls.data();
+ v4l2ExtCtrls.count = v4l2Ctrls.size();
int ret = ioctl(VIDIOC_S_EXT_CTRLS, &v4l2ExtCtrls);
if (ret) {
unsigned int errorIdx = v4l2ExtCtrls.error_idx;
/* Generic validation error. */
- if (errorIdx == 0 || errorIdx >= count) {
+ if (errorIdx == 0 || errorIdx >= v4l2Ctrls.size()) {
LOG(V4L2, Error) << "Unable to set controls: "
<< strerror(-ret);
return -EINVAL;
}
/* A specific control failed. */
- LOG(V4L2, Error) << "Unable to set control " << errorIdx
+ const unsigned int id = v4l2Ctrls[errorIdx].id;
+ LOG(V4L2, Error) << "Unable to set control " << utils::hex(id)
<< ": " << strerror(-ret);
- count = errorIdx - 1;
+
+ v4l2Ctrls.resize(errorIdx);
ret = errorIdx;
}
- updateControls(ctrls, v4l2Ctrls, count);
+ updateControls(ctrls, v4l2Ctrls);
return ret;
}
/**
+ * \brief Retrieve the v4l2_query_ext_ctrl information for the given control
+ * \param[in] id The V4L2 control id
+ * \return A pointer to the v4l2_query_ext_ctrl structure for the given
+ * control, or a null pointer if not found
+ */
+const struct v4l2_query_ext_ctrl *V4L2Device::controlInfo(uint32_t id) const
+{
+ const auto it = controlInfo_.find(id);
+ if (it == controlInfo_.end())
+ return nullptr;
+
+ return &it->second;
+}
+
+/**
+ * \brief Retrieve the device path in sysfs
+ *
+ * This function returns the sysfs path to the physical device backing the V4L2
+ * device. The path is guaranteed to be an absolute path, without any symbolic
+ * link.
+ *
+ * It includes the sysfs mount point prefix
+ *
+ * \return The device path in sysfs
+ */
+std::string V4L2Device::devicePath() const
+{
+ std::string devicePath = sysfs::charDevPath(deviceNode_) + "/device";
+
+ char *realPath = realpath(devicePath.c_str(), nullptr);
+ if (!realPath) {
+ LOG(V4L2, Fatal)
+ << "Can not resolve device path for " << devicePath;
+ return {};
+ }
+
+ std::string path{ realPath };
+ free(realPath);
+
+ return path;
+}
+
+/**
+ * \brief Enable or disable frame start event notification
+ * \param[in] enable True to enable frame start events, false to disable them
+ *
+ * This function enables or disables generation of frame start events. Once
+ * enabled, the events are signalled through the frameStart signal.
+ *
+ * \return 0 on success, a negative error code otherwise
+ */
+int V4L2Device::setFrameStartEnabled(bool enable)
+{
+ if (frameStartEnabled_ == enable)
+ return 0;
+
+ struct v4l2_event_subscription event{};
+ event.type = V4L2_EVENT_FRAME_SYNC;
+
+ unsigned long request = enable ? VIDIOC_SUBSCRIBE_EVENT
+ : VIDIOC_UNSUBSCRIBE_EVENT;
+ int ret = ioctl(request, &event);
+ if (enable && ret)
+ return ret;
+
+ fdEventNotifier_->setEnabled(enable);
+ frameStartEnabled_ = enable;
+
+ return ret;
+}
+
+/**
+ * \var V4L2Device::frameStart
+ * \brief A Signal emitted when capture of a frame has started
+ */
+
+/**
* \brief Perform an IOCTL system call on the device node
* \param[in] request The IOCTL request code
* \param[in] argp A pointer to the IOCTL argument
@@ -357,7 +495,7 @@ int V4L2Device::ioctl(unsigned long request, void *argp)
* Printing out an error message is usually better performed
* in the caller, which can provide more context.
*/
- if (::ioctl(fd_, request, argp) < 0)
+ if (::ioctl(fd_.get(), request, argp) < 0)
return -errno;
return 0;
@@ -375,6 +513,150 @@ int V4L2Device::ioctl(unsigned long request, void *argp)
* \return The V4L2 device file descriptor, -1 if the device node is not open
*/
+/**
+ * \brief Retrieve the libcamera control type associated with the V4L2 control
+ * \param[in] ctrlType The V4L2 control type
+ * \return The ControlType associated to \a ctrlType
+ */
+ControlType V4L2Device::v4l2CtrlType(uint32_t ctrlType)
+{
+ switch (ctrlType) {
+ case V4L2_CTRL_TYPE_U8:
+ return ControlTypeByte;
+
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ return ControlTypeBool;
+
+ case V4L2_CTRL_TYPE_U16:
+ return ControlTypeUnsigned16;
+
+ case V4L2_CTRL_TYPE_U32:
+ return ControlTypeUnsigned32;
+
+ case V4L2_CTRL_TYPE_INTEGER:
+ return ControlTypeInteger32;
+
+ case V4L2_CTRL_TYPE_INTEGER64:
+ return ControlTypeInteger64;
+
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_BITMASK:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ /*
+ * More precise types may be needed, for now use a 32-bit
+ * integer type.
+ */
+ return ControlTypeInteger32;
+
+ default:
+ return ControlTypeNone;
+ }
+}
+
+/**
+ * \brief Create a ControlId for a V4L2 control
+ * \param[in] ctrl The v4l2_query_ext_ctrl that represents a V4L2 control
+ * \return A ControlId associated to \a ctrl
+ */
+std::unique_ptr<ControlId> V4L2Device::v4l2ControlId(const v4l2_query_ext_ctrl &ctrl)
+{
+ const size_t len = strnlen(ctrl.name, sizeof(ctrl.name));
+ const std::string name(static_cast<const char *>(ctrl.name), len);
+ const ControlType type = v4l2CtrlType(ctrl.type);
+
+ ControlId::DirectionFlags flags;
+ if (ctrl.flags & V4L2_CTRL_FLAG_READ_ONLY)
+ flags = ControlId::Direction::Out;
+ else if (ctrl.flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ flags = ControlId::Direction::In;
+ else
+ flags = ControlId::Direction::In | ControlId::Direction::Out;
+
+ return std::make_unique<ControlId>(ctrl.id, name, "v4l2", type, flags);
+}
+
+/**
+ * \brief Create a ControlInfo for a V4L2 control
+ * \param[in] ctrl The v4l2_query_ext_ctrl that represents a V4L2 control
+ * \return A ControlInfo that represents \a ctrl
+ */
+std::optional<ControlInfo> V4L2Device::v4l2ControlInfo(const v4l2_query_ext_ctrl &ctrl)
+{
+ switch (ctrl.type) {
+ case V4L2_CTRL_TYPE_U8:
+ return ControlInfo(static_cast<uint8_t>(ctrl.minimum),
+ static_cast<uint8_t>(ctrl.maximum),
+ static_cast<uint8_t>(ctrl.default_value));
+
+ case V4L2_CTRL_TYPE_U16:
+ return ControlInfo(static_cast<uint16_t>(ctrl.minimum),
+ static_cast<uint16_t>(ctrl.maximum),
+ static_cast<uint16_t>(ctrl.default_value));
+
+ case V4L2_CTRL_TYPE_U32:
+ return ControlInfo(static_cast<uint32_t>(ctrl.minimum),
+ static_cast<uint32_t>(ctrl.maximum),
+ static_cast<uint32_t>(ctrl.default_value));
+
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ return ControlInfo(static_cast<bool>(ctrl.minimum),
+ static_cast<bool>(ctrl.maximum),
+ static_cast<bool>(ctrl.default_value));
+
+ case V4L2_CTRL_TYPE_INTEGER64:
+ return ControlInfo(static_cast<int64_t>(ctrl.minimum),
+ static_cast<int64_t>(ctrl.maximum),
+ static_cast<int64_t>(ctrl.default_value));
+
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_MENU:
+ return v4l2MenuControlInfo(ctrl);
+
+ default:
+ return ControlInfo(static_cast<int32_t>(ctrl.minimum),
+ static_cast<int32_t>(ctrl.maximum),
+ static_cast<int32_t>(ctrl.default_value));
+ }
+}
+
+/**
+ * \brief Create ControlInfo for a V4L2 menu control
+ * \param[in] ctrl The v4l2_query_ext_ctrl that represents a V4L2 menu control
+ *
+ * The created ControlInfo contains indices acquired by VIDIOC_QUERYMENU.
+ *
+ * \return A ControlInfo that represents \a ctrl
+ */
+std::optional<ControlInfo> V4L2Device::v4l2MenuControlInfo(const struct v4l2_query_ext_ctrl &ctrl)
+{
+ std::vector<ControlValue> indices;
+ struct v4l2_querymenu menu = {};
+ menu.id = ctrl.id;
+
+ if (ctrl.minimum < 0)
+ return std::nullopt;
+
+ for (int32_t index = ctrl.minimum; index <= ctrl.maximum; ++index) {
+ menu.index = index;
+ if (ioctl(VIDIOC_QUERYMENU, &menu) != 0)
+ continue;
+
+ indices.push_back(index);
+ }
+
+ /*
+ * Some faulty UVC devices are known to return an empty menu control.
+ * Controls without a menu option can not be set, or read, so they are
+ * not exposed.
+ */
+ if (indices.size() == 0)
+ return std::nullopt;
+
+ return ControlInfo(indices,
+ ControlValue(static_cast<int32_t>(ctrl.default_value)));
+}
+
/*
* \brief List and store information about all controls supported by the
* V4L2 device
@@ -384,7 +666,6 @@ void V4L2Device::listControls()
ControlInfoMap::Map ctrls;
struct v4l2_query_ext_ctrl ctrl = {};
- /* \todo Add support for menu and compound controls. */
while (1) {
ctrl.id |= V4L2_CTRL_FLAG_NEXT_CTRL |
V4L2_CTRL_FLAG_NEXT_COMPOUND;
@@ -404,6 +685,8 @@ void V4L2Device::listControls()
case V4L2_CTRL_TYPE_BITMASK:
case V4L2_CTRL_TYPE_INTEGER_MENU:
case V4L2_CTRL_TYPE_U8:
+ case V4L2_CTRL_TYPE_U16:
+ case V4L2_CTRL_TYPE_U32:
break;
/* \todo Support other control types. */
default:
@@ -413,13 +696,64 @@ void V4L2Device::listControls()
continue;
}
- controlIds_.emplace_back(std::make_unique<V4L2ControlId>(ctrl));
+ LOG(V4L2, Debug) << "Control: " << ctrl.name
+ << " (" << utils::hex(ctrl.id) << ")";
+
+ controlIds_.emplace_back(v4l2ControlId(ctrl));
+ controlIdMap_[ctrl.id] = controlIds_.back().get();
controlInfo_.emplace(ctrl.id, ctrl);
- ctrls.emplace(controlIds_.back().get(), V4L2ControlInfo(ctrl));
+ std::optional<ControlInfo> info = v4l2ControlInfo(ctrl);
+
+ if (!info) {
+ LOG(V4L2, Error)
+ << "Control " << ctrl.name
+ << " cannot be registered";
+
+ continue;
+ }
+
+ ctrls.emplace(controlIds_.back().get(), *info);
}
- controls_ = std::move(ctrls);
+ controls_ = ControlInfoMap(std::move(ctrls), controlIdMap_);
+}
+
+/**
+* \brief Update the information for all device controls
+ *
+ * The V4L2Device class caches information about all controls supported by the
+ * device and exposes it through the controls() and controlInfo() functions.
+ * Control information may change at runtime, for instance when formats on a
+ * subdev are modified. When this occurs, this function can be used to refresh
+ * control information. The information is refreshed in-place, all pointers to
+ * v4l2_query_ext_ctrl instances previously returned by controlInfo() and
+ * iterators to the ControlInfoMap returned by controls() remain valid.
+ *
+ * Note that control information isn't refreshed automatically is it may be an
+ * expensive operation. The V4L2Device users are responsible for calling this
+ * function when required, based on their usage pattern of the class.
+ */
+void V4L2Device::updateControlInfo()
+{
+ for (auto &[controlId, info] : controls_) {
+ unsigned int id = controlId->id();
+
+ /*
+ * Assume controlInfo_ has a corresponding entry, as it has been
+ * generated by listControls().
+ */
+ struct v4l2_query_ext_ctrl &ctrl = controlInfo_[id];
+
+ if (ioctl(VIDIOC_QUERY_EXT_CTRL, &ctrl)) {
+ LOG(V4L2, Debug)
+ << "Could not refresh control "
+ << utils::hex(id);
+ continue;
+ }
+
+ info = *v4l2ControlInfo(ctrl);
+ }
}
/*
@@ -427,45 +761,303 @@ void V4L2Device::listControls()
* values in \a v4l2Ctrls
* \param[inout] ctrls List of V4L2 controls to update
* \param[in] v4l2Ctrls List of V4L2 extended controls as returned by the driver
- * \param[in] count The number of controls to update
*/
void V4L2Device::updateControls(ControlList *ctrls,
- const struct v4l2_ext_control *v4l2Ctrls,
- unsigned int count)
+ Span<const v4l2_ext_control> v4l2Ctrls)
{
- unsigned int i = 0;
- for (auto &ctrl : *ctrls) {
- if (i == count)
- break;
+ for (const v4l2_ext_control &v4l2Ctrl : v4l2Ctrls) {
+ const unsigned int id = v4l2Ctrl.id;
- const struct v4l2_ext_control *v4l2Ctrl = &v4l2Ctrls[i];
- unsigned int id = ctrl.first;
- ControlValue &value = ctrl.second;
+ ControlValue value = ctrls->get(id);
+ if (value.isArray()) {
+ /*
+ * No action required, the VIDIOC_[GS]_EXT_CTRLS ioctl
+ * accessed the ControlValue storage directly for array
+ * controls.
+ */
+ continue;
+ }
const auto iter = controls_.find(id);
+ ASSERT(iter != controls_.end());
+
switch (iter->first->type()) {
case ControlTypeInteger64:
- value.set<int64_t>(v4l2Ctrl->value64);
- break;
-
- case ControlTypeByte:
- /*
- * No action required, the VIDIOC_[GS]_EXT_CTRLS ioctl
- * accessed the ControlValue storage directly.
- */
+ value.set<int64_t>(v4l2Ctrl.value64);
break;
default:
/*
- * \todo To be changed when support for string and
- * compound controls will be added.
+ * Note: this catches the ControlTypeInteger32 case.
+ *
+ * \todo To be changed when support for string controls
+ * will be added.
*/
- value.set<int32_t>(v4l2Ctrl->value);
+ value.set<int32_t>(v4l2Ctrl.value);
break;
}
- i++;
+ ctrls->set(id, value);
}
}
+/**
+ * \brief Slot to handle V4L2 events from the V4L2 device
+ *
+ * When this slot is called, a V4L2 event is available to be dequeued from the
+ * device.
+ */
+void V4L2Device::eventAvailable()
+{
+ struct v4l2_event event{};
+ int ret = ioctl(VIDIOC_DQEVENT, &event);
+ if (ret < 0) {
+ LOG(V4L2, Error)
+ << "Failed to dequeue event, disabling event notifier";
+ fdEventNotifier_->setEnabled(false);
+ return;
+ }
+
+ if (event.type != V4L2_EVENT_FRAME_SYNC) {
+ LOG(V4L2, Error)
+ << "Spurious event (" << event.type
+ << "), disabling event notifier";
+ fdEventNotifier_->setEnabled(false);
+ return;
+ }
+
+ frameStart.emit(event.u.frame_sync.frame_sequence);
+}
+
+static const std::map<uint32_t, ColorSpace> v4l2ToColorSpace = {
+ { V4L2_COLORSPACE_RAW, ColorSpace::Raw },
+ { V4L2_COLORSPACE_SRGB, {
+ ColorSpace::Primaries::Rec709,
+ ColorSpace::TransferFunction::Srgb,
+ ColorSpace::YcbcrEncoding::Rec601,
+ ColorSpace::Range::Limited } },
+ { V4L2_COLORSPACE_JPEG, ColorSpace::Sycc },
+ { V4L2_COLORSPACE_SMPTE170M, ColorSpace::Smpte170m },
+ { V4L2_COLORSPACE_REC709, ColorSpace::Rec709 },
+ { V4L2_COLORSPACE_BT2020, ColorSpace::Rec2020 },
+};
+
+static const std::map<uint32_t, ColorSpace::TransferFunction> v4l2ToTransferFunction = {
+ { V4L2_XFER_FUNC_NONE, ColorSpace::TransferFunction::Linear },
+ { V4L2_XFER_FUNC_SRGB, ColorSpace::TransferFunction::Srgb },
+ { V4L2_XFER_FUNC_709, ColorSpace::TransferFunction::Rec709 },
+};
+
+static const std::map<uint32_t, ColorSpace::YcbcrEncoding> v4l2ToYcbcrEncoding = {
+ { V4L2_YCBCR_ENC_601, ColorSpace::YcbcrEncoding::Rec601 },
+ { V4L2_YCBCR_ENC_709, ColorSpace::YcbcrEncoding::Rec709 },
+ { V4L2_YCBCR_ENC_BT2020, ColorSpace::YcbcrEncoding::Rec2020 },
+};
+
+static const std::map<uint32_t, ColorSpace::Range> v4l2ToRange = {
+ { V4L2_QUANTIZATION_FULL_RANGE, ColorSpace::Range::Full },
+ { V4L2_QUANTIZATION_LIM_RANGE, ColorSpace::Range::Limited },
+};
+
+static const std::vector<std::pair<ColorSpace, v4l2_colorspace>> colorSpaceToV4l2 = {
+ { ColorSpace::Raw, V4L2_COLORSPACE_RAW },
+ { ColorSpace::Sycc, V4L2_COLORSPACE_JPEG },
+ { ColorSpace::Smpte170m, V4L2_COLORSPACE_SMPTE170M },
+ { ColorSpace::Rec709, V4L2_COLORSPACE_REC709 },
+ { ColorSpace::Rec2020, V4L2_COLORSPACE_BT2020 },
+};
+
+static const std::map<ColorSpace::Primaries, v4l2_colorspace> primariesToV4l2 = {
+ { ColorSpace::Primaries::Raw, V4L2_COLORSPACE_RAW },
+ { ColorSpace::Primaries::Smpte170m, V4L2_COLORSPACE_SMPTE170M },
+ { ColorSpace::Primaries::Rec709, V4L2_COLORSPACE_REC709 },
+ { ColorSpace::Primaries::Rec2020, V4L2_COLORSPACE_BT2020 },
+};
+
+static const std::map<ColorSpace::TransferFunction, v4l2_xfer_func> transferFunctionToV4l2 = {
+ { ColorSpace::TransferFunction::Linear, V4L2_XFER_FUNC_NONE },
+ { ColorSpace::TransferFunction::Srgb, V4L2_XFER_FUNC_SRGB },
+ { ColorSpace::TransferFunction::Rec709, V4L2_XFER_FUNC_709 },
+};
+
+static const std::map<ColorSpace::YcbcrEncoding, v4l2_ycbcr_encoding> ycbcrEncodingToV4l2 = {
+ /* V4L2 has no "none" encoding. */
+ { ColorSpace::YcbcrEncoding::None, V4L2_YCBCR_ENC_DEFAULT },
+ { ColorSpace::YcbcrEncoding::Rec601, V4L2_YCBCR_ENC_601 },
+ { ColorSpace::YcbcrEncoding::Rec709, V4L2_YCBCR_ENC_709 },
+ { ColorSpace::YcbcrEncoding::Rec2020, V4L2_YCBCR_ENC_BT2020 },
+};
+
+static const std::map<ColorSpace::Range, v4l2_quantization> rangeToV4l2 = {
+ { ColorSpace::Range::Full, V4L2_QUANTIZATION_FULL_RANGE },
+ { ColorSpace::Range::Limited, V4L2_QUANTIZATION_LIM_RANGE },
+};
+
+/**
+ * \brief Convert the color space fields in a V4L2 format to a ColorSpace
+ * \param[in] v4l2Format A V4L2 format containing color space information
+ * \param[in] colourEncoding Type of colour encoding
+ *
+ * The colorspace, ycbcr_enc, xfer_func and quantization fields within a
+ * V4L2 format structure are converted to a corresponding ColorSpace.
+ *
+ * If any V4L2 fields are not recognised then we return an "unset"
+ * color space.
+ *
+ * \return The ColorSpace corresponding to the input V4L2 format
+ * \retval std::nullopt One or more V4L2 color space fields were not recognised
+ */
+template<typename T>
+std::optional<ColorSpace> V4L2Device::toColorSpace(const T &v4l2Format,
+ PixelFormatInfo::ColourEncoding colourEncoding)
+{
+ auto itColor = v4l2ToColorSpace.find(v4l2Format.colorspace);
+ if (itColor == v4l2ToColorSpace.end())
+ return std::nullopt;
+
+ /* This sets all the color space fields to the correct "default" values. */
+ ColorSpace colorSpace = itColor->second;
+
+ if (v4l2Format.xfer_func != V4L2_XFER_FUNC_DEFAULT) {
+ auto itTransfer = v4l2ToTransferFunction.find(v4l2Format.xfer_func);
+ if (itTransfer == v4l2ToTransferFunction.end())
+ return std::nullopt;
+
+ colorSpace.transferFunction = itTransfer->second;
+ }
+
+ if (v4l2Format.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT) {
+ auto itYcbcrEncoding = v4l2ToYcbcrEncoding.find(v4l2Format.ycbcr_enc);
+ if (itYcbcrEncoding == v4l2ToYcbcrEncoding.end())
+ return std::nullopt;
+
+ colorSpace.ycbcrEncoding = itYcbcrEncoding->second;
+
+ /*
+ * V4L2 has no "none" encoding, override the value returned by
+ * the kernel for non-YUV formats as YCbCr encoding isn't
+ * applicable in that case.
+ */
+ if (colourEncoding != PixelFormatInfo::ColourEncodingYUV)
+ colorSpace.ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
+ }
+
+ if (v4l2Format.quantization != V4L2_QUANTIZATION_DEFAULT) {
+ auto itRange = v4l2ToRange.find(v4l2Format.quantization);
+ if (itRange == v4l2ToRange.end())
+ return std::nullopt;
+
+ colorSpace.range = itRange->second;
+
+ /*
+ * "Limited" quantization range is only meant for YUV formats.
+ * Override the range to "Full" for all other formats.
+ */
+ if (colourEncoding != PixelFormatInfo::ColourEncodingYUV)
+ colorSpace.range = ColorSpace::Range::Full;
+ }
+
+ return colorSpace;
+}
+
+template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_pix_format &,
+ PixelFormatInfo::ColourEncoding);
+template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_pix_format_mplane &,
+ PixelFormatInfo::ColourEncoding);
+template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_mbus_framefmt &,
+ PixelFormatInfo::ColourEncoding);
+
+/**
+ * \brief Fill in the color space fields of a V4L2 format from a ColorSpace
+ * \param[in] colorSpace The ColorSpace to be converted
+ * \param[out] v4l2Format A V4L2 format containing color space information
+ *
+ * The colorspace, ycbcr_enc, xfer_func and quantization fields within a
+ * V4L2 format structure are filled in from a corresponding ColorSpace.
+ *
+ * An error is returned if any of the V4L2 fields do not support the
+ * value given in the ColorSpace. Such fields are set to the V4L2
+ * "default" values, but all other fields are still filled in where
+ * possible.
+ *
+ * If the color space is completely unset, "default" V4L2 values are used
+ * everywhere, so a driver would then choose its preferred color space.
+ *
+ * \return 0 on success or a negative error code otherwise
+ * \retval -EINVAL The ColorSpace does not have a representation using V4L2 enums
+ */
+template<typename T>
+int V4L2Device::fromColorSpace(const std::optional<ColorSpace> &colorSpace, T &v4l2Format)
+{
+ v4l2Format.colorspace = V4L2_COLORSPACE_DEFAULT;
+ v4l2Format.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ v4l2Format.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ v4l2Format.quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ if (!colorSpace)
+ return 0;
+
+ auto itColor = std::find_if(colorSpaceToV4l2.begin(), colorSpaceToV4l2.end(),
+ [&colorSpace](const auto &item) {
+ return colorSpace == item.first;
+ });
+ if (itColor != colorSpaceToV4l2.end()) {
+ v4l2Format.colorspace = itColor->second;
+ /* Leaving all the other fields as "default" should be fine. */
+ return 0;
+ }
+
+ /*
+ * If the colorSpace doesn't precisely match a standard color space,
+ * then we must choose a V4L2 colorspace with matching primaries.
+ */
+ int ret = 0;
+
+ auto itPrimaries = primariesToV4l2.find(colorSpace->primaries);
+ if (itPrimaries != primariesToV4l2.end()) {
+ v4l2Format.colorspace = itPrimaries->second;
+ } else {
+ libcamera::LOG(V4L2, Warning)
+ << "Unrecognised primaries in "
+ << ColorSpace::toString(colorSpace);
+ ret = -EINVAL;
+ }
+
+ auto itTransfer = transferFunctionToV4l2.find(colorSpace->transferFunction);
+ if (itTransfer != transferFunctionToV4l2.end()) {
+ v4l2Format.xfer_func = itTransfer->second;
+ } else {
+ libcamera::LOG(V4L2, Warning)
+ << "Unrecognised transfer function in "
+ << ColorSpace::toString(colorSpace);
+ ret = -EINVAL;
+ }
+
+ auto itYcbcrEncoding = ycbcrEncodingToV4l2.find(colorSpace->ycbcrEncoding);
+ if (itYcbcrEncoding != ycbcrEncodingToV4l2.end()) {
+ v4l2Format.ycbcr_enc = itYcbcrEncoding->second;
+ } else {
+ libcamera::LOG(V4L2, Warning)
+ << "Unrecognised YCbCr encoding in "
+ << ColorSpace::toString(colorSpace);
+ ret = -EINVAL;
+ }
+
+ auto itRange = rangeToV4l2.find(colorSpace->range);
+ if (itRange != rangeToV4l2.end()) {
+ v4l2Format.quantization = itRange->second;
+ } else {
+ libcamera::LOG(V4L2, Warning)
+ << "Unrecognised quantization in "
+ << ColorSpace::toString(colorSpace);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+template int V4L2Device::fromColorSpace(const std::optional<ColorSpace> &, struct v4l2_pix_format &);
+template int V4L2Device::fromColorSpace(const std::optional<ColorSpace> &, struct v4l2_pix_format_mplane &);
+template int V4L2Device::fromColorSpace(const std::optional<ColorSpace> &, struct v4l2_mbus_framefmt &);
+
} /* namespace libcamera */
diff --git a/src/libcamera/v4l2_pixelformat.cpp b/src/libcamera/v4l2_pixelformat.cpp
new file mode 100644
index 00000000..e8b3eb9c
--- /dev/null
+++ b/src/libcamera/v4l2_pixelformat.cpp
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * V4L2 Pixel Format
+ */
+
+#include "libcamera/internal/v4l2_pixelformat.h"
+
+#include <ctype.h>
+#include <map>
+#include <string.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/pixel_format.h>
+
+#include "libcamera/internal/formats.h"
+
+/**
+ * \file v4l2_pixelformat.h
+ * \brief V4L2 Pixel Format
+ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(V4L2)
+
+/**
+ * \class V4L2PixelFormat
+ * \brief V4L2 pixel format FourCC wrapper
+ *
+ * The V4L2PixelFormat class describes the pixel format of a V4L2 buffer. It
+ * wraps the V4L2 numerical FourCC, and shall be used in all APIs that deal with
+ * V4L2 pixel formats. Its purpose is to prevent unintentional confusion of
+ * V4L2 and DRM FourCCs in code by catching implicit conversion attempts at
+ * compile time.
+ *
+ * To achieve this goal, construction of a V4L2PixelFormat from an integer value
+ * is explicit. To retrieve the integer value of a V4L2PixelFormat, both the
+ * explicit value() and implicit uint32_t conversion operators may be used.
+ */
+
+namespace {
+
+const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
+ /* RGB formats. */
+ { V4L2PixelFormat(V4L2_PIX_FMT_RGB565),
+ { formats::RGB565, "16-bit RGB 5-6-5" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_RGB565X),
+ { formats::RGB565_BE, "16-bit RGB 5-6-5 BE" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_RGB24),
+ { formats::BGR888, "24-bit RGB 8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_BGR24),
+ { formats::RGB888, "24-bit BGR 8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_XBGR32),
+ { formats::XRGB8888, "32-bit BGRX 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_XRGB32),
+ { formats::BGRX8888, "32-bit XRGB 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_RGBX32),
+ { formats::XBGR8888, "32-bit RGBX 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_BGRX32),
+ { formats::RGBX8888, "32-bit XBGR 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_RGBA32),
+ { formats::ABGR8888, "32-bit RGBA 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_ABGR32),
+ { formats::ARGB8888, "32-bit BGRA 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_ARGB32),
+ { formats::BGRA8888, "32-bit ARGB 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_BGRA32),
+ { formats::RGBA8888, "32-bit ABGR 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_RGB48),
+ { formats::BGR161616, "48-bit RGB 16-16-16" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_BGR48),
+ { formats::RGB161616, "48-bit BGR 16-16-16" } },
+
+ /* YUV packed formats. */
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUYV),
+ { formats::YUYV, "YUYV 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YVYU),
+ { formats::YVYU, "YVYU 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_UYVY),
+ { formats::UYVY, "UYVY 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_VYUY),
+ { formats::VYUY, "VYUY 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUVA32),
+ { formats::AVUY8888, "32-bit YUVA 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUVX32),
+ { formats::XVUY8888, "32-bit YUVX 8-8-8-8" } },
+
+ /* YUV planar formats. */
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV16),
+ { formats::NV16, "Y/CbCr 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV16M),
+ { formats::NV16, "Y/CbCr 4:2:2 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV61),
+ { formats::NV61, "Y/CrCb 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV61M),
+ { formats::NV61, "Y/CrCb 4:2:2 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV12),
+ { formats::NV12, "Y/CbCr 4:2:0" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV12M),
+ { formats::NV12, "Y/CbCr 4:2:0 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV21),
+ { formats::NV21, "Y/CrCb 4:2:0" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV21M),
+ { formats::NV21, "Y/CrCb 4:2:0 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV24),
+ { formats::NV24, "Y/CbCr 4:4:4" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV42),
+ { formats::NV42, "Y/CrCb 4:4:4" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUV420),
+ { formats::YUV420, "Planar YUV 4:2:0" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUV420M),
+ { formats::YUV420, "Planar YUV 4:2:0 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YVU420),
+ { formats::YVU420, "Planar YVU 4:2:0" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YVU420M),
+ { formats::YVU420, "Planar YVU 4:2:0 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUV422P),
+ { formats::YUV422, "Planar YUV 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUV422M),
+ { formats::YUV422, "Planar YUV 4:2:2 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YVU422M),
+ { formats::YVU422, "Planar YVU 4:2:2 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUV444M),
+ { formats::YUV444, "Planar YUV 4:4:4 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YVU444M),
+ { formats::YVU444, "Planar YVU 4:4:4 (N-C)" } },
+
+ /* Greyscale formats. */
+ { V4L2PixelFormat(V4L2_PIX_FMT_GREY),
+ { formats::R8, "8-bit Greyscale" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y10),
+ { formats::R10, "10-bit Greyscale" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y10P),
+ { formats::R10_CSI2P, "10-bit Greyscale Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y12),
+ { formats::R12, "12-bit Greyscale" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y12P),
+ { formats::R12_CSI2P, "12-bit Greyscale Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y16),
+ { formats::R16, "16-bit Greyscale" } },
+
+ /* Bayer formats. */
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR8),
+ { formats::SBGGR8, "8-bit Bayer BGBG/GRGR" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG8),
+ { formats::SGBRG8, "8-bit Bayer GBGB/RGRG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8),
+ { formats::SGRBG8, "8-bit Bayer GRGR/BGBG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB8),
+ { formats::SRGGB8, "8-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10),
+ { formats::SBGGR10, "10-bit Bayer BGBG/GRGR" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10),
+ { formats::SGBRG10, "10-bit Bayer GBGB/RGRG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10),
+ { formats::SGRBG10, "10-bit Bayer GRGR/BGBG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10),
+ { formats::SRGGB10, "10-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10P),
+ { formats::SBGGR10_CSI2P, "10-bit Bayer BGBG/GRGR Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10P),
+ { formats::SGBRG10_CSI2P, "10-bit Bayer GBGB/RGRG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10P),
+ { formats::SGRBG10_CSI2P, "10-bit Bayer GRGR/BGBG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10P),
+ { formats::SRGGB10_CSI2P, "10-bit Bayer RGRG/GBGB Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12),
+ { formats::SBGGR12, "12-bit Bayer BGBG/GRGR" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12),
+ { formats::SGBRG12, "12-bit Bayer GBGB/RGRG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12),
+ { formats::SGRBG12, "12-bit Bayer GRGR/BGBG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12),
+ { formats::SRGGB12, "12-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12P),
+ { formats::SBGGR12_CSI2P, "12-bit Bayer BGBG/GRGR Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12P),
+ { formats::SGBRG12_CSI2P, "12-bit Bayer GBGB/RGRG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12P),
+ { formats::SGRBG12_CSI2P, "12-bit Bayer GRGR/BGBG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P),
+ { formats::SRGGB12_CSI2P, "12-bit Bayer RGRG/GBGB Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14),
+ { formats::SBGGR14, "14-bit Bayer BGBG/GRGR" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14),
+ { formats::SGBRG14, "14-bit Bayer GBGB/RGRG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14),
+ { formats::SGRBG14, "14-bit Bayer GRGR/BGBG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14),
+ { formats::SRGGB14, "14-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14P),
+ { formats::SBGGR14_CSI2P, "14-bit Bayer BGBG/GRGR Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14P),
+ { formats::SGBRG14_CSI2P, "14-bit Bayer GBGB/RGRG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14P),
+ { formats::SGRBG14_CSI2P, "14-bit Bayer GRGR/BGBG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14P),
+ { formats::SRGGB14_CSI2P, "14-bit Bayer RGRG/GBGB Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16),
+ { formats::SBGGR16, "16-bit Bayer BGBG/GRGR" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG16),
+ { formats::SGBRG16, "16-bit Bayer GBGB/RGRG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG16),
+ { formats::SGRBG16, "16-bit Bayer GRGR/BGBG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16),
+ { formats::SRGGB16, "16-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR),
+ { formats::BGGR_PISP_COMP1, "16-bit Bayer BGBG/GRGR PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG),
+ { formats::GBRG_PISP_COMP1, "16-bit Bayer GBGB/RGRG PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG),
+ { formats::GRBG_PISP_COMP1, "16-bit Bayer GRGR/BGBG PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB),
+ { formats::RGGB_PISP_COMP1, "16-bit Bayer RGRG/GBGB PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO),
+ { formats::MONO_PISP_COMP1, "16-bit Mono PiSP Compress Mode 1" } },
+
+ /* Compressed formats. */
+ { V4L2PixelFormat(V4L2_PIX_FMT_MJPEG),
+ { formats::MJPEG, "Motion-JPEG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_JPEG),
+ { formats::MJPEG, "JPEG JFIF" } },
+};
+
+} /* namespace */
+
+/**
+ * \struct V4L2PixelFormat::Info
+ * \brief Information about a V4L2 pixel format
+ *
+ * \var V4L2PixelFormat::Info::format
+ * \brief The corresponding libcamera PixelFormat
+ *
+ * \sa PixelFormat
+ *
+ * \var V4L2PixelFormat::Info::description
+ * \brief The human-readable description of the V4L2 pixel format
+ */
+
+/**
+ * \fn V4L2PixelFormat::V4L2PixelFormat()
+ * \brief Construct a V4L2PixelFormat with an invalid format
+ *
+ * V4L2PixelFormat instances constructed with the default constructor are
+ * invalid, calling the isValid() function returns false.
+ */
+
+/**
+ * \fn V4L2PixelFormat::V4L2PixelFormat(uint32_t fourcc)
+ * \brief Construct a V4L2PixelFormat from a FourCC value
+ * \param[in] fourcc The pixel format FourCC numerical value
+ */
+
+/**
+ * \fn bool V4L2PixelFormat::isValid() const
+ * \brief Check if the pixel format is valid
+ *
+ * V4L2PixelFormat instances constructed with the default constructor are
+ * invalid. Instances constructed with a FourCC defined in the V4L2 API are
+ * valid. The behaviour is undefined otherwise.
+ *
+ * \return True if the pixel format is valid, false otherwise
+ */
+
+/**
+ * \fn uint32_t V4L2PixelFormat::fourcc() const
+ * \brief Retrieve the pixel format FourCC numerical value
+ * \return The pixel format FourCC numerical value
+ */
+
+/**
+ * \fn V4L2PixelFormat::operator uint32_t() const
+ * \brief Convert to the pixel format FourCC numerical value
+ * \return The pixel format FourCC numerical value
+ */
+
+/**
+ * \brief Assemble and return a string describing the pixel format
+ * \return A string describing the pixel format
+ */
+std::string V4L2PixelFormat::toString() const
+{
+ if (fourcc_ == 0)
+ return "<INVALID>";
+
+ char ss[8] = { static_cast<char>(fourcc_ & 0x7f),
+ static_cast<char>((fourcc_ >> 8) & 0x7f),
+ static_cast<char>((fourcc_ >> 16) & 0x7f),
+ static_cast<char>((fourcc_ >> 24) & 0x7f) };
+
+ for (unsigned int i = 0; i < 4; i++) {
+ if (!isprint(ss[i]))
+ ss[i] = '.';
+ }
+
+ if (fourcc_ & (1 << 31))
+ strcat(ss, "-BE");
+
+ return ss;
+}
+
+/**
+ * \brief Retrieve the V4L2 description for the format
+ *
+ * The description matches the value used by the kernel, as would be reported
+ * by the VIDIOC_ENUM_FMT ioctl.
+ *
+ * \return The V4L2 description corresponding to the V4L2 format, or a
+ * placeholder description if not found
+ */
+const char *V4L2PixelFormat::description() const
+{
+ const auto iter = vpf2pf.find(*this);
+ if (iter == vpf2pf.end()) {
+ LOG(V4L2, Warning)
+ << "Unsupported V4L2 pixel format "
+ << toString();
+ return "Unsupported format";
+ }
+
+ return iter->second.description;
+}
+
+/**
+ * \brief Convert the V4L2 pixel format to the corresponding PixelFormat
+ * \param[in] warn When true, log a warning message if the V4L2 pixel format
+ * isn't known
+ *
+ * Users of this function might try to convert a V4L2PixelFormat to a
+ * PixelFormat just to check if the format is supported or not. In that case,
+ * they can suppress the warning message by setting the \a warn argument to
+ * false to not pollute the log with unnecessary messages.
+ *
+ * \return The PixelFormat corresponding to the V4L2 pixel format
+ */
+PixelFormat V4L2PixelFormat::toPixelFormat(bool warn) const
+{
+ const auto iter = vpf2pf.find(*this);
+ if (iter == vpf2pf.end()) {
+ if (warn)
+ LOG(V4L2, Warning) << "Unsupported V4L2 pixel format "
+ << toString();
+ return PixelFormat();
+ }
+
+ return iter->second.format;
+}
+
+/**
+ * \brief Retrieve the list of V4L2PixelFormat associated with \a pixelFormat
+ * \param[in] pixelFormat The PixelFormat to convert
+ *
+ * Multiple V4L2 formats may exist for one PixelFormat as V4L2 defines separate
+ * 4CCs for contiguous and non-contiguous versions of the same image format.
+ *
+ * \return The list of V4L2PixelFormat corresponding to \a pixelFormat
+ */
+const std::vector<V4L2PixelFormat> &
+V4L2PixelFormat::fromPixelFormat(const PixelFormat &pixelFormat)
+{
+ static const std::vector<V4L2PixelFormat> empty;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixelFormat);
+ if (!info.isValid())
+ return empty;
+
+ return info.v4l2Formats;
+}
+
+/**
+ * \brief Test if a V4L2PixelFormat is one of the line based generic metadata
+ * formats
+ *
+ * A limited number of metadata formats, the ones that represents generic
+ * line-based metadata buffers, need to have their width, height and
+ * bytesperline set by userspace.
+ *
+ * This function tests if the current V4L2PixelFormat is one of those.
+ *
+ * Note: It would have been nicer to store this information in a
+ * V4L2PixelFormat::Info instance, but as metadata format are not exposed to
+ * applications, there are no PixelFormat and DRM fourcc codes associated to
+ * them.
+ *
+ * \return True if the V4L2PixelFormat() is a generic line based format, false
+ * otherwise
+ */
+bool V4L2PixelFormat::isGenericLineBasedMetadata() const
+{
+ switch (fourcc_) {
+ case V4L2_META_FMT_GENERIC_8:
+ case V4L2_META_FMT_GENERIC_CSI2_10:
+ case V4L2_META_FMT_GENERIC_CSI2_12:
+ case V4L2_META_FMT_GENERIC_CSI2_14:
+ case V4L2_META_FMT_GENERIC_CSI2_16:
+ case V4L2_META_FMT_GENERIC_CSI2_20:
+ case V4L2_META_FMT_GENERIC_CSI2_24:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * \brief Insert a text representation of a V4L2PixelFormat into an output
+ * stream
+ * \param[in] out The output stream
+ * \param[in] f The V4L2PixelFormat
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2PixelFormat &f)
+{
+ out << f.toString();
+ return out;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/v4l2_subdevice.cpp b/src/libcamera/v4l2_subdevice.cpp
index 8b9da81e..7a064d87 100644
--- a/src/libcamera/v4l2_subdevice.cpp
+++ b/src/libcamera/v4l2_subdevice.cpp
@@ -2,26 +2,29 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_subdevice.cpp - V4L2 Subdevice
+ * V4L2 Subdevice
*/
-#include "v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_subdevice.h"
#include <fcntl.h>
-#include <iomanip>
+#include <regex>
#include <sstream>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
+#include <linux/media-bus-format.h>
#include <linux/v4l2-subdev.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
#include <libcamera/geometry.h>
-#include "log.h"
-#include "media_device.h"
-#include "media_object.h"
-#include "utils.h"
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/media_object.h"
/**
* \file v4l2_subdevice.h
@@ -33,6 +36,831 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(V4L2)
/**
+ * \class MediaBusFormatInfo
+ * \brief Information about media bus formats
+ *
+ * The MediaBusFormatInfo class groups together information describing a media
+ * bus format. It facilitates handling of media bus formats by providing data
+ * commonly used in pipeline handlers.
+ *
+ * \var MediaBusFormatInfo::name
+ * \brief The format name as a human-readable string, used as the text
+ * representation of the format
+ *
+ * \var MediaBusFormatInfo::code
+ * \brief The media bus format code described by this instance (MEDIA_BUS_FMT_*)
+ *
+ * \var MediaBusFormatInfo::type
+ * \brief The media bus format type
+ *
+ * \var MediaBusFormatInfo::bitsPerPixel
+ * \brief The average number of bits per pixel
+ *
+ * The number of bits per pixel averages the total number of bits for all
+ * colour components over the whole image, excluding any padding bits or
+ * padding pixels.
+ *
+ * For formats that transmit multiple or fractional pixels per sample, the
+ * value will differ from the bus width.
+ *
+ * Formats that don't have a fixed number of bits per pixel, such as compressed
+ * formats, or device-specific embedded data formats, report 0 in this field.
+ *
+ * \var MediaBusFormatInfo::colourEncoding
+ * \brief The colour encoding type
+ *
+ * This field is valid for Type::Image formats only.
+ */
+
+/**
+ * \enum MediaBusFormatInfo::Type
+ * \brief The format type
+ *
+ * \var MediaBusFormatInfo::Type::Image
+ * \brief The format describes image data
+ *
+ * \var MediaBusFormatInfo::Type::Metadata
+ * \brief The format describes generic metadata
+ *
+ * \var MediaBusFormatInfo::Type::EmbeddedData
+ * \brief The format describes sensor embedded data
+ */
+
+namespace {
+
+const std::map<uint32_t, MediaBusFormatInfo> mediaBusFormatInfo{
+ /* This table is sorted to match the order in linux/media-bus-format.h */
+ { MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE, {
+ .name = "RGB444_2X8_PADHI_BE",
+ .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE, {
+ .name = "RGB444_2X8_PADHI_LE",
+ .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, {
+ .name = "RGB555_2X8_PADHI_BE",
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, {
+ .name = "RGB555_2X8_PADHI_LE",
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB565_1X16, {
+ .name = "RGB565_1X16",
+ .code = MEDIA_BUS_FMT_RGB565_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_BGR565_2X8_BE, {
+ .name = "BGR565_2X8_BE",
+ .code = MEDIA_BUS_FMT_BGR565_2X8_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_BGR565_2X8_LE, {
+ .name = "BGR565_2X8_LE",
+ .code = MEDIA_BUS_FMT_BGR565_2X8_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB565_2X8_BE, {
+ .name = "RGB565_2X8_BE",
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB565_2X8_LE, {
+ .name = "RGB565_2X8_LE",
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB666_1X18, {
+ .name = "RGB666_1X18",
+ .code = MEDIA_BUS_FMT_RGB666_1X18,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 18,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_BGR888_1X24, {
+ .name = "BGR888_1X24",
+ .code = MEDIA_BUS_FMT_BGR888_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB888_1X24, {
+ .name = "RGB888_1X24",
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB888_2X12_BE, {
+ .name = "RGB888_2X12_BE",
+ .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB888_2X12_LE, {
+ .name = "RGB888_2X12_LE",
+ .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB121212_1X36, {
+ .name = "RGB121212_1X36",
+ .code = MEDIA_BUS_FMT_RGB121212_1X36,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 36,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB202020_1X60, {
+ .name = "RGB202020_1X60",
+ .code = MEDIA_BUS_FMT_RGB202020_1X60,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 60,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_ARGB8888_1X32, {
+ .name = "ARGB8888_1X32",
+ .code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_Y8_1X8, {
+ .name = "Y8_1X8",
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UV8_1X8, {
+ .name = "UV8_1X8",
+ .code = MEDIA_BUS_FMT_UV8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY8_1_5X8, {
+ .name = "UYVY8_1_5X8",
+ .code = MEDIA_BUS_FMT_UYVY8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY8_1_5X8, {
+ .name = "VYUY8_1_5X8",
+ .code = MEDIA_BUS_FMT_VYUY8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV8_1_5X8, {
+ .name = "YUYV8_1_5X8",
+ .code = MEDIA_BUS_FMT_YUYV8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU8_1_5X8, {
+ .name = "YVYU8_1_5X8",
+ .code = MEDIA_BUS_FMT_YVYU8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY8_2X8, {
+ .name = "UYVY8_2X8",
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, {
+ .name = "VYUY8_2X8",
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, {
+ .name = "YUYV8_2X8",
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, {
+ .name = "YVYU8_2X8",
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_Y10_1X10, {
+ .name = "Y10_1X10",
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY10_2X10, {
+ .name = "UYVY10_2X10",
+ .code = MEDIA_BUS_FMT_UYVY10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY10_2X10, {
+ .name = "VYUY10_2X10",
+ .code = MEDIA_BUS_FMT_VYUY10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV10_2X10, {
+ .name = "YUYV10_2X10",
+ .code = MEDIA_BUS_FMT_YUYV10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU10_2X10, {
+ .name = "YVYU10_2X10",
+ .code = MEDIA_BUS_FMT_YVYU10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_Y12_1X12, {
+ .name = "Y12_1X12",
+ .code = MEDIA_BUS_FMT_Y12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_Y16_1X16, {
+ .name = "Y16_1X16",
+ .code = MEDIA_BUS_FMT_Y16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY8_1X16, {
+ .name = "UYVY8_1X16",
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY8_1X16, {
+ .name = "VYUY8_1X16",
+ .code = MEDIA_BUS_FMT_VYUY8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV8_1X16, {
+ .name = "YUYV8_1X16",
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU8_1X16, {
+ .name = "YVYU8_1X16",
+ .code = MEDIA_BUS_FMT_YVYU8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YDYUYDYV8_1X16, {
+ .name = "YDYUYDYV8_1X16",
+ .code = MEDIA_BUS_FMT_YDYUYDYV8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY10_1X20, {
+ .name = "UYVY10_1X20",
+ .code = MEDIA_BUS_FMT_UYVY10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY10_1X20, {
+ .name = "VYUY10_1X20",
+ .code = MEDIA_BUS_FMT_VYUY10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV10_1X20, {
+ .name = "YUYV10_1X20",
+ .code = MEDIA_BUS_FMT_YUYV10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU10_1X20, {
+ .name = "YVYU10_1X20",
+ .code = MEDIA_BUS_FMT_YVYU10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUV8_1X24, {
+ .name = "YUV8_1X24",
+ .code = MEDIA_BUS_FMT_YUV8_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUV10_1X30, {
+ .name = "YUV10_1X30",
+ .code = MEDIA_BUS_FMT_YUV10_1X30,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 30,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_AYUV8_1X32, {
+ .name = "AYUV8_1X32",
+ .code = MEDIA_BUS_FMT_AYUV8_1X32,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY12_2X12, {
+ .name = "UYVY12_2X12",
+ .code = MEDIA_BUS_FMT_UYVY12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY12_2X12, {
+ .name = "VYUY12_2X12",
+ .code = MEDIA_BUS_FMT_VYUY12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV12_2X12, {
+ .name = "YUYV12_2X12",
+ .code = MEDIA_BUS_FMT_YUYV12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU12_2X12, {
+ .name = "YVYU12_2X12",
+ .code = MEDIA_BUS_FMT_YVYU12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY12_1X24, {
+ .name = "UYVY12_1X24",
+ .code = MEDIA_BUS_FMT_UYVY12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY12_1X24, {
+ .name = "VYUY12_1X24",
+ .code = MEDIA_BUS_FMT_VYUY12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV12_1X24, {
+ .name = "YUYV12_1X24",
+ .code = MEDIA_BUS_FMT_YUYV12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU12_1X24, {
+ .name = "YVYU12_1X24",
+ .code = MEDIA_BUS_FMT_YVYU12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, {
+ .name = "SBGGR8_1X8",
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, {
+ .name = "SGBRG8_1X8",
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, {
+ .name = "SGRBG8_1X8",
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, {
+ .name = "SRGGB8_1X8",
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8, {
+ .name = "SBGGR10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8, {
+ .name = "SGBRG10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8, {
+ .name = "SGRBG10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8, {
+ .name = "SRGGB10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, {
+ .name = "SBGGR10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, {
+ .name = "SGBRG10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, {
+ .name = "SGRBG10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, {
+ .name = "SRGGB10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, {
+ .name = "SBGGR10_2X8_PADHI_BE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, {
+ .name = "SBGGR10_2X8_PADHI_LE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, {
+ .name = "SBGGR10_2X8_PADLO_BE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, {
+ .name = "SBGGR10_2X8_PADLO_LE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, {
+ .name = "SBGGR10_1X10",
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, {
+ .name = "SGBRG10_1X10",
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, {
+ .name = "SGRBG10_1X10",
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, {
+ .name = "SRGGB10_1X10",
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, {
+ .name = "SBGGR12_1X12",
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, {
+ .name = "SGBRG12_1X12",
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, {
+ .name = "SGRBG12_1X12",
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, {
+ .name = "SRGGB12_1X12",
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, {
+ .name = "SBGGR14_1X14",
+ .code = MEDIA_BUS_FMT_SBGGR14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, {
+ .name = "SGBRG14_1X14",
+ .code = MEDIA_BUS_FMT_SGBRG14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, {
+ .name = "SGRBG14_1X14",
+ .code = MEDIA_BUS_FMT_SGRBG14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, {
+ .name = "SRGGB14_1X14",
+ .code = MEDIA_BUS_FMT_SRGGB14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR16_1X16, {
+ .name = "SBGGR16_1X16",
+ .code = MEDIA_BUS_FMT_SBGGR16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGBRG16_1X16, {
+ .name = "SGBRG16_1X16",
+ .code = MEDIA_BUS_FMT_SGBRG16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGRBG16_1X16, {
+ .name = "SGRBG16_1X16",
+ .code = MEDIA_BUS_FMT_SGRBG16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SRGGB16_1X16, {
+ .name = "SRGGB16_1X16",
+ .code = MEDIA_BUS_FMT_SRGGB16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SBGGR20_1X20, {
+ .name = "SBGGR20_1X20",
+ .code = MEDIA_BUS_FMT_SBGGR20_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGBRG20_1X20, {
+ .name = "SGBRG20_1X20",
+ .code = MEDIA_BUS_FMT_SGBRG20_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGRBG20_1X20, {
+ .name = "SGRBG20_1X20",
+ .code = MEDIA_BUS_FMT_SGRBG20_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SRGGB20_1X20, {
+ .name = "SRGGB20_1X20",
+ .code = MEDIA_BUS_FMT_SRGGB20_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ /* \todo Clarify colour encoding for HSV formats */
+ { MEDIA_BUS_FMT_AHSV8888_1X32, {
+ .name = "AHSV8888_1X32",
+ .code = MEDIA_BUS_FMT_AHSV8888_1X32,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_JPEG_1X8, {
+ .name = "JPEG_1X8",
+ .code = MEDIA_BUS_FMT_JPEG_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_METADATA_FIXED, {
+ .name = "METADATA_FIXED",
+ .code = MEDIA_BUS_FMT_METADATA_FIXED,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_8, {
+ .name = "META_8",
+ .code = MEDIA_BUS_FMT_META_8,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_10, {
+ .name = "META_10",
+ .code = MEDIA_BUS_FMT_META_10,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_12, {
+ .name = "META_12",
+ .code = MEDIA_BUS_FMT_META_12,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_14, {
+ .name = "META_14",
+ .code = MEDIA_BUS_FMT_META_14,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_16, {
+ .name = "META_16",
+ .code = MEDIA_BUS_FMT_META_16,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_20, {
+ .name = "META_20",
+ .code = MEDIA_BUS_FMT_META_20,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_24, {
+ .name = "META_24",
+ .code = MEDIA_BUS_FMT_META_24,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_CCS_EMBEDDED, {
+ .name = "CCS_EMBEDDED",
+ .code = MEDIA_BUS_FMT_CCS_EMBEDDED,
+ .type = MediaBusFormatInfo::Type::EmbeddedData,
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_OV2740_EMBEDDED, {
+ .name = "OV2740_EMBEDDED",
+ .code = MEDIA_BUS_FMT_CCS_EMBEDDED,
+ .type = MediaBusFormatInfo::Type::EmbeddedData,
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+};
+
+} /* namespace */
+
+/**
+ * \fn bool MediaBusFormatInfo::isValid() const
+ * \brief Check if the media bus format info is valid
+ * \return True if the media bus format info is valid, false otherwise
+ */
+
+/**
+ * \brief Retrieve information about a media bus format
+ * \param[in] code The media bus format code
+ * \return The MediaBusFormatInfo describing the \a code if known, or an invalid
+ * MediaBusFormatInfo otherwise
+ */
+const MediaBusFormatInfo &MediaBusFormatInfo::info(uint32_t code)
+{
+ static const MediaBusFormatInfo invalid{};
+
+ const auto it = mediaBusFormatInfo.find(code);
+ if (it == mediaBusFormatInfo.end()) {
+ LOG(V4L2, Warning)
+ << "Unsupported media bus format "
+ << utils::hex(code, 4);
+ return invalid;
+ }
+
+ return it->second;
+}
+
+/**
+ * \struct V4L2SubdeviceCapability
+ * \brief struct v4l2_subdev_capability object wrapper and helpers
+ *
+ * The V4L2SubdeviceCapability structure manages the information returned by the
+ * VIDIOC_SUBDEV_QUERYCAP ioctl.
+ */
+
+/**
+ * \fn V4L2SubdeviceCapability::isReadOnly()
+ * \brief Retrieve if a subdevice is registered as read-only
+ *
+ * A V4L2 subdevice is registered as read-only if V4L2_SUBDEV_CAP_RO_SUBDEV
+ * is listed as part of its capabilities.
+ *
+ * \return True if the subdevice is registered as read-only, false otherwise
+ */
+
+/**
+ * \fn V4L2SubdeviceCapability::hasStreams()
+ * \brief Retrieve if a subdevice supports the V4L2 streams API
+ * \return True if the subdevice supports the streams API, false otherwise
+ */
+
+/**
* \struct V4L2SubdeviceFormat
* \brief The V4L2 sub-device image format and sizes
*
@@ -61,7 +889,7 @@ LOG_DECLARE_CATEGORY(V4L2)
*/
/**
- * \var V4L2SubdeviceFormat::mbus_code
+ * \var V4L2SubdeviceFormat::code
* \brief The image format bus code
*/
@@ -71,17 +899,54 @@ LOG_DECLARE_CATEGORY(V4L2)
*/
/**
+ * \var V4L2SubdeviceFormat::colorSpace
+ * \brief The color space of the pixels
+ *
+ * The color space of the image. When setting the format this may be
+ * unset, in which case the driver gets to use its default color space.
+ * After being set, this value should contain the color space that
+ * was actually used. If this value is unset, then the color space chosen
+ * by the driver could not be represented by the ColorSpace class (and
+ * should probably be added).
+ *
+ * It is up to the pipeline handler or application to check if the
+ * resulting color space is acceptable.
+ */
+
+/**
* \brief Assemble and return a string describing the format
* \return A string describing the V4L2SubdeviceFormat
*/
const std::string V4L2SubdeviceFormat::toString() const
{
std::stringstream ss;
- ss << size.toString() << "-" << utils::hex(mbus_code, 4);
+ ss << *this;
+
return ss.str();
}
/**
+ * \brief Insert a text representation of a V4L2SubdeviceFormat into an output
+ * stream
+ * \param[in] out The output stream
+ * \param[in] f The V4L2SubdeviceFormat
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2SubdeviceFormat &f)
+{
+ out << f.size << "-";
+
+ const auto it = mediaBusFormatInfo.find(f.code);
+
+ if (it == mediaBusFormatInfo.end())
+ out << utils::hex(f.code, 4);
+ else
+ out << it->second.name;
+
+ return out;
+}
+
+/**
* \class V4L2Subdevice
* \brief A V4L2 subdevice as exposed by the Linux kernel
*
@@ -96,6 +961,148 @@ const std::string V4L2SubdeviceFormat::toString() const
*/
/**
+ * \typedef V4L2Subdevice::Formats
+ * \brief A map of supported media bus formats to frame sizes
+ */
+
+/**
+ * \enum V4L2Subdevice::Whence
+ * \brief Specify the type of format for getFormat() and setFormat() operations
+ * \var V4L2Subdevice::ActiveFormat
+ * \brief The format operation applies to ACTIVE formats
+ * \var V4L2Subdevice::TryFormat
+ * \brief The format operation applies to TRY formats
+ */
+
+/**
+ * \class V4L2Subdevice::Stream
+ * \brief V4L2 subdevice stream
+ *
+ * This class identifies a subdev stream, by bundling the pad number with the
+ * stream number. It is used in all stream-aware functions of the V4L2Subdevice
+ * class to identify the stream the functions operate on.
+ *
+ * \var V4L2Subdevice::Stream::pad
+ * \brief The 0-indexed pad number
+ *
+ * \var V4L2Subdevice::Stream::stream
+ * \brief The stream number
+ */
+
+/**
+ * \fn V4L2Subdevice::Stream::Stream()
+ * \brief Construct a Stream with pad and stream set to 0
+ */
+
+/**
+ * \fn V4L2Subdevice::Stream::Stream(unsigned int pad, unsigned int stream)
+ * \brief Construct a Stream with a given \a pad and \a stream number
+ * \param[in] pad The indexed pad number
+ * \param[in] stream The stream number
+ */
+
+/**
+ * \brief Compare streams for equality
+ * \return True if the two streams are equal, false otherwise
+ */
+bool operator==(const V4L2Subdevice::Stream &lhs, const V4L2Subdevice::Stream &rhs)
+{
+ return lhs.pad == rhs.pad && lhs.stream == rhs.stream;
+}
+
+/**
+ * \fn bool operator!=(const V4L2Subdevice::Stream &lhs, const V4L2Subdevice::Stream &rhs)
+ * \brief Compare streams for inequality
+ * \return True if the two streams are not equal, false otherwise
+ */
+
+/**
+ * \brief Insert a text representation of a V4L2Subdevice::Stream into an
+ * output stream
+ * \param[in] out The output stream
+ * \param[in] stream The V4L2Subdevice::Stream
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Stream &stream)
+{
+ out << stream.pad << "/" << stream.stream;
+
+ return out;
+}
+
+/**
+ * \class V4L2Subdevice::Route
+ * \brief V4L2 subdevice routing table entry
+ *
+ * This class models a route in the subdevice routing table. It is similar to
+ * the v4l2_subdev_route structure, but uses the V4L2Subdevice::Stream class
+ * for easier usage with the V4L2Subdevice stream-aware functions.
+ *
+ * \var V4L2Subdevice::Route::sink
+ * \brief The sink stream of the route
+ *
+ * \var V4L2Subdevice::Route::source
+ * \brief The source stream of the route
+ *
+ * \var V4L2Subdevice::Route::flags
+ * \brief The route flags (V4L2_SUBDEV_ROUTE_FL_*)
+ */
+
+/**
+ * \fn V4L2Subdevice::Route::Route()
+ * \brief Construct a Route with default streams
+ */
+
+/**
+ * \fn V4L2Subdevice::Route::Route(const Stream &sink, const Stream &source,
+ * uint32_t flags)
+ * \brief Construct a Route from \a sink to \a source
+ * \param[in] sink The sink stream
+ * \param[in] source The source stream
+ * \param[in] flags The route flags
+ */
+
+/**
+ * \brief Insert a text representation of a V4L2Subdevice::Route into an
+ * output stream
+ * \param[in] out The output stream
+ * \param[in] route The V4L2Subdevice::Route
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Route &route)
+{
+ out << route.sink << " -> " << route.source
+ << " (" << utils::hex(route.flags) << ")";
+
+ return out;
+}
+
+/**
+ * \typedef V4L2Subdevice::Routing
+ * \brief V4L2 subdevice routing table
+ *
+ * This class stores a subdevice routing table as a vector of routes.
+ */
+
+/**
+ * \brief Insert a text representation of a V4L2Subdevice::Routing into an
+ * output stream
+ * \param[in] out The output stream
+ * \param[in] routing The V4L2Subdevice::Routing
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Routing &routing)
+{
+ for (const auto &[i, route] : utils::enumerate(routing)) {
+ out << "[" << i << "] " << route;
+ if (i != routing.size() - 1)
+ out << ", ";
+ }
+
+ return out;
+}
+
+/**
* \brief Create a V4L2 subdevice from a MediaEntity using its device node
* path
*/
@@ -115,7 +1122,40 @@ V4L2Subdevice::~V4L2Subdevice()
*/
int V4L2Subdevice::open()
{
- return V4L2Device::open(O_RDWR);
+ int ret = V4L2Device::open(O_RDWR);
+ if (ret)
+ return ret;
+
+ /*
+ * Try to query the subdev capabilities. The VIDIOC_SUBDEV_QUERYCAP API
+ * was introduced in kernel v5.8, ENOTTY errors must be ignored to
+ * support older kernels.
+ */
+ caps_ = {};
+ ret = ioctl(VIDIOC_SUBDEV_QUERYCAP, &caps_);
+ if (ret < 0 && errno != ENOTTY) {
+ ret = -errno;
+ LOG(V4L2, Error)
+ << "Unable to query capabilities: " << strerror(-ret);
+ return ret;
+ }
+
+ /* If the subdev supports streams, enable the streams API. */
+ if (caps_.hasStreams()) {
+ struct v4l2_subdev_client_capability clientCaps{};
+ clientCaps.capabilities = V4L2_SUBDEV_CLIENT_CAP_STREAMS;
+
+ ret = ioctl(VIDIOC_SUBDEV_S_CLIENT_CAP, &clientCaps);
+ if (ret < 0) {
+ ret = -errno;
+ LOG(V4L2, Error)
+ << "Unable to set client capabilities: "
+ << strerror(-ret);
+ return ret;
+ }
+ }
+
+ return 0;
}
/**
@@ -125,54 +1165,136 @@ int V4L2Subdevice::open()
*/
/**
- * \brief Set a crop rectangle on one of the V4L2 subdevice pads
- * \param[in] pad The 0-indexed pad number the rectangle is to be applied to
- * \param[inout] rect The rectangle describing crop target area
+ * \brief Get selection rectangle \a rect for \a target
+ * \param[in] stream The stream the rectangle is retrieved from
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[out] rect The retrieved selection rectangle
+ *
+ * \todo Define a V4L2SelectionTarget enum for the selection target
+ *
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::setCrop(unsigned int pad, Rectangle *rect)
+int V4L2Subdevice::getSelection(const Stream &stream, unsigned int target,
+ Rectangle *rect)
{
- return setSelection(pad, V4L2_SEL_TGT_CROP, rect);
+ struct v4l2_subdev_selection sel = {};
+
+ sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sel.pad = stream.pad;
+ sel.stream = stream.stream;
+ sel.target = target;
+ sel.flags = 0;
+
+ int ret = ioctl(VIDIOC_SUBDEV_G_SELECTION, &sel);
+ if (ret < 0) {
+ LOG(V4L2, Error)
+ << "Unable to get rectangle " << target << " on pad "
+ << stream << ": " << strerror(-ret);
+ return ret;
+ }
+
+ rect->x = sel.r.left;
+ rect->y = sel.r.top;
+ rect->width = sel.r.width;
+ rect->height = sel.r.height;
+
+ return 0;
}
/**
- * \brief Set a compose rectangle on one of the V4L2 subdevice pads
- * \param[in] pad The 0-indexed pad number the rectangle is to be applied to
- * \param[inout] rect The rectangle describing the compose target area
+ * \fn V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
+ * Rectangle *rect)
+ * \brief Get selection rectangle \a rect for \a target
+ * \param[in] pad The 0-indexed pad number the rectangle is retrieved from
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[out] rect The retrieved selection rectangle
+ *
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::setCompose(unsigned int pad, Rectangle *rect)
+
+/**
+ * \brief Set selection rectangle \a rect for \a target
+ * \param[in] stream The stream the rectangle is to be applied to
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[inout] rect The selection rectangle to be applied
+ *
+ * \todo Define a V4L2SelectionTarget enum for the selection target
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int V4L2Subdevice::setSelection(const Stream &stream, unsigned int target,
+ Rectangle *rect)
{
- return setSelection(pad, V4L2_SEL_TGT_COMPOSE, rect);
+ struct v4l2_subdev_selection sel = {};
+
+ sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sel.pad = stream.pad;
+ sel.stream = stream.stream;
+ sel.target = target;
+ sel.flags = 0;
+
+ sel.r.left = rect->x;
+ sel.r.top = rect->y;
+ sel.r.width = rect->width;
+ sel.r.height = rect->height;
+
+ int ret = ioctl(VIDIOC_SUBDEV_S_SELECTION, &sel);
+ if (ret < 0) {
+ LOG(V4L2, Error)
+ << "Unable to set rectangle " << target << " on pad "
+ << stream << ": " << strerror(-ret);
+ return ret;
+ }
+
+ rect->x = sel.r.left;
+ rect->y = sel.r.top;
+ rect->width = sel.r.width;
+ rect->height = sel.r.height;
+
+ return 0;
}
/**
- * \brief Enumerate all media bus codes and frame sizes on a \a pad
- * \param[in] pad The 0-indexed pad number to enumerate formats on
+ * \fn V4L2Subdevice::setSelection(unsigned int pad, unsigned int target,
+ * Rectangle *rect)
+ * \brief Set selection rectangle \a rect for \a target
+ * \param[in] pad The 0-indexed pad number the rectangle is to be applied to
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[inout] rect The selection rectangle to be applied
+ *
+ * \todo Define a V4L2SelectionTarget enum for the selection target
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \brief Enumerate all media bus codes and frame sizes on a \a stream
+ * \param[in] stream The stream to enumerate formats for
*
* Enumerate all media bus codes and frame sizes supported by the subdevice on
- * a \a pad.
+ * a \a stream.
*
* \return A list of the supported device formats
*/
-ImageFormats V4L2Subdevice::formats(unsigned int pad)
+V4L2Subdevice::Formats V4L2Subdevice::formats(const Stream &stream)
{
- ImageFormats formats;
+ Formats formats;
- if (pad >= entity_->pads().size()) {
- LOG(V4L2, Error) << "Invalid pad: " << pad;
+ if (stream.pad >= entity_->pads().size()) {
+ LOG(V4L2, Error) << "Invalid pad: " << stream.pad;
return {};
}
- for (unsigned int code : enumPadCodes(pad)) {
- std::vector<SizeRange> sizes = enumPadSizes(pad, code);
+ for (unsigned int code : enumPadCodes(stream)) {
+ std::vector<SizeRange> sizes = enumPadSizes(stream, code);
if (sizes.empty())
return {};
- if (formats.addFormat(code, sizes)) {
+ const auto inserted = formats.insert({ code, sizes });
+ if (!inserted.second) {
LOG(V4L2, Error)
<< "Could not add sizes for media bus code "
- << code << " on pad " << pad;
+ << code << " on pad " << stream.pad;
return {};
}
}
@@ -181,86 +1303,461 @@ ImageFormats V4L2Subdevice::formats(unsigned int pad)
}
/**
- * \brief Retrieve the image format set on one of the V4L2 subdevice pads
- * \param[in] pad The 0-indexed pad number the format is to be retrieved from
+ * \fn V4L2Subdevice::formats(unsigned int pad)
+ * \brief Enumerate all media bus codes and frame sizes on a \a pad
+ * \param[in] pad The 0-indexed pad number to enumerate formats on
+ *
+ * Enumerate all media bus codes and frame sizes supported by the subdevice on
+ * a \a pad
+ *
+ * \return A list of the supported device formats
+ */
+
+std::optional<ColorSpace> V4L2Subdevice::toColorSpace(const v4l2_mbus_framefmt &format) const
+{
+ /*
+ * Only image formats have a color space, for other formats (such as
+ * metadata formats) the color space concept isn't applicable. V4L2
+ * subdev drivers return a colorspace set to V4L2_COLORSPACE_DEFAULT in
+ * that case (as well as for image formats when the driver hasn't
+ * bothered implementing color space support). Check the colorspace
+ * field here and return std::nullopt directly to avoid logging a
+ * warning.
+ */
+ if (format.colorspace == V4L2_COLORSPACE_DEFAULT)
+ return std::nullopt;
+
+ PixelFormatInfo::ColourEncoding colourEncoding;
+ const MediaBusFormatInfo &info = MediaBusFormatInfo::info(format.code);
+ if (info.isValid()) {
+ colourEncoding = info.colourEncoding;
+ } else {
+ LOG(V4L2, Warning)
+ << "Unknown subdev format "
+ << utils::hex(format.code, 4)
+ << ", defaulting to RGB encoding";
+
+ colourEncoding = PixelFormatInfo::ColourEncodingRGB;
+ }
+
+ return V4L2Device::toColorSpace(format, colourEncoding);
+}
+
+/**
+ * \brief Retrieve the image format set on one of the V4L2 subdevice streams
+ * \param[in] stream The stream the format is to be retrieved from
* \param[out] format The image bus format
+ * \param[in] whence The format to get, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::getFormat(unsigned int pad, V4L2SubdeviceFormat *format)
+int V4L2Subdevice::getFormat(const Stream &stream, V4L2SubdeviceFormat *format,
+ Whence whence)
{
struct v4l2_subdev_format subdevFmt = {};
- subdevFmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- subdevFmt.pad = pad;
+ subdevFmt.which = whence;
+ subdevFmt.pad = stream.pad;
+ subdevFmt.stream = stream.stream;
int ret = ioctl(VIDIOC_SUBDEV_G_FMT, &subdevFmt);
if (ret) {
LOG(V4L2, Error)
- << "Unable to get format on pad " << pad
- << ": " << strerror(-ret);
+ << "Unable to get format on pad " << stream << ": "
+ << strerror(-ret);
return ret;
}
format->size.width = subdevFmt.format.width;
format->size.height = subdevFmt.format.height;
- format->mbus_code = subdevFmt.format.code;
+ format->code = subdevFmt.format.code;
+ format->colorSpace = toColorSpace(subdevFmt.format);
return 0;
}
/**
+ * \fn V4L2Subdevice::getFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+ * Whence whence)
+ * \brief Retrieve the image format set on one of the V4L2 subdevice pads
+ * \param[in] pad The 0-indexed pad number the format is to be retrieved from
+ * \param[out] format The image bus format
+ * \param[in] whence The format to get, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
* \brief Set an image format on one of the V4L2 subdevice pads
- * \param[in] pad The 0-indexed pad number the format is to be applied to
- * \param[inout] format The image bus format to apply to the subdevice's pad
+ * \param[in] stream The stream the format is to be applied to
+ * \param[inout] format The image bus format to apply to the stream
+ * \param[in] whence The format to set, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
*
- * Apply the requested image format to the desired media pad and return the
- * actually applied format parameters, as \ref V4L2Subdevice::getFormat would
- * do.
+ * Apply the requested image format to the desired stream and return the
+ * actually applied format parameters, as getFormat() would do.
*
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::setFormat(unsigned int pad, V4L2SubdeviceFormat *format)
+int V4L2Subdevice::setFormat(const Stream &stream, V4L2SubdeviceFormat *format,
+ Whence whence)
{
struct v4l2_subdev_format subdevFmt = {};
- subdevFmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- subdevFmt.pad = pad;
+ subdevFmt.which = whence;
+ subdevFmt.pad = stream.pad;
+ subdevFmt.stream = stream.stream;
subdevFmt.format.width = format->size.width;
subdevFmt.format.height = format->size.height;
- subdevFmt.format.code = format->mbus_code;
+ subdevFmt.format.code = format->code;
+ subdevFmt.format.field = V4L2_FIELD_NONE;
+ if (format->colorSpace) {
+ fromColorSpace(format->colorSpace, subdevFmt.format);
+
+ /* The CSC flag is only applicable to source pads. */
+ if (entity_->pads()[stream.pad]->flags() & MEDIA_PAD_FL_SOURCE)
+ subdevFmt.format.flags |= V4L2_MBUS_FRAMEFMT_SET_CSC;
+ }
int ret = ioctl(VIDIOC_SUBDEV_S_FMT, &subdevFmt);
if (ret) {
LOG(V4L2, Error)
- << "Unable to set format on pad " << pad
- << ": " << strerror(-ret);
+ << "Unable to set format on pad " << stream << ": "
+ << strerror(-ret);
return ret;
}
format->size.width = subdevFmt.format.width;
format->size.height = subdevFmt.format.height;
- format->mbus_code = subdevFmt.format.code;
+ format->code = subdevFmt.format.code;
+ format->colorSpace = toColorSpace(subdevFmt.format);
+
+ return 0;
+}
+
+/**
+ * \fn V4L2Subdevice::setFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+ * Whence whence)
+ * \brief Set an image format on one of the V4L2 subdevice pads
+ * \param[in] pad The 0-indexed pad number the format is to be applied to
+ * \param[inout] format The image bus format to apply to the subdevice's pad
+ * \param[in] whence The format to set, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ *
+ * Apply the requested image format to the desired media pad and return the
+ * actually applied format parameters, as getFormat() would do.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+namespace {
+
+void routeFromKernel(V4L2Subdevice::Route &route,
+ const struct v4l2_subdev_route &kroute)
+{
+ route.sink.pad = kroute.sink_pad;
+ route.sink.stream = kroute.sink_stream;
+ route.source.pad = kroute.source_pad;
+ route.source.stream = kroute.source_stream;
+ route.flags = kroute.flags;
+}
+
+void routeToKernel(const V4L2Subdevice::Route &route,
+ struct v4l2_subdev_route &kroute)
+{
+ kroute.sink_pad = route.sink.pad;
+ kroute.sink_stream = route.sink.stream;
+ kroute.source_pad = route.source.pad;
+ kroute.source_stream = route.source.stream;
+ kroute.flags = route.flags;
+}
+
+/*
+ * Legacy routing support for pre-v6.10-rc1 kernels. Drop when v6.12-rc1 gets
+ * released.
+ */
+struct v4l2_subdev_routing_legacy {
+ __u32 which;
+ __u32 num_routes;
+ __u64 routes;
+ __u32 reserved[6];
+};
+
+#define VIDIOC_SUBDEV_G_ROUTING_LEGACY _IOWR('V', 38, struct v4l2_subdev_routing_legacy)
+#define VIDIOC_SUBDEV_S_ROUTING_LEGACY _IOWR('V', 39, struct v4l2_subdev_routing_legacy)
+
+} /* namespace */
+
+int V4L2Subdevice::getRoutingLegacy(Routing *routing, Whence whence)
+{
+ struct v4l2_subdev_routing_legacy rt = {};
+
+ rt.which = whence;
+
+ int ret = ioctl(VIDIOC_SUBDEV_G_ROUTING_LEGACY, &rt);
+ if (ret == 0 || ret == -ENOTTY)
+ return ret;
+
+ if (ret != -ENOSPC) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve number of routes: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ std::vector<struct v4l2_subdev_route> routes{ rt.num_routes };
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ ret = ioctl(VIDIOC_SUBDEV_G_ROUTING_LEGACY, &rt);
+ if (ret) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve routes: " << strerror(-ret);
+ return ret;
+ }
+
+ if (rt.num_routes != routes.size()) {
+ LOG(V4L2, Error) << "Invalid number of routes";
+ return -EINVAL;
+ }
+
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
+
+ return 0;
+}
+
+/**
+ * \brief Retrieve the subdevice's internal routing table
+ * \param[out] routing The routing table
+ * \param[in] whence The routing table to get, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int V4L2Subdevice::getRouting(Routing *routing, Whence whence)
+{
+ routing->clear();
+
+ if (!caps_.hasStreams())
+ return 0;
+
+ struct v4l2_subdev_routing rt = {};
+
+ rt.which = whence;
+
+ int ret = ioctl(VIDIOC_SUBDEV_G_ROUTING, &rt);
+ if (ret == -ENOTTY)
+ return V4L2Subdevice::getRoutingLegacy(routing, whence);
+
+ if (ret) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve number of routes: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ if (!rt.num_routes)
+ return 0;
+
+ std::vector<struct v4l2_subdev_route> routes{ rt.num_routes };
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ rt.len_routes = rt.num_routes;
+ rt.num_routes = 0;
+
+ ret = ioctl(VIDIOC_SUBDEV_G_ROUTING, &rt);
+ if (ret) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve routes: " << strerror(-ret);
+ return ret;
+ }
+
+ if (rt.num_routes != routes.size()) {
+ LOG(V4L2, Error) << "Invalid number of routes";
+ return -EINVAL;
+ }
+
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
+
+ return 0;
+}
+
+int V4L2Subdevice::setRoutingLegacy(Routing *routing, Whence whence)
+{
+ std::vector<struct v4l2_subdev_route> routes{ routing->size() };
+
+ for (const auto &[i, route] : utils::enumerate(*routing))
+ routeToKernel(route, routes[i]);
+
+ struct v4l2_subdev_routing_legacy rt = {};
+ rt.which = whence;
+ rt.num_routes = routes.size();
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ int ret = ioctl(VIDIOC_SUBDEV_S_ROUTING_LEGACY, &rt);
+ if (ret) {
+ LOG(V4L2, Error) << "Failed to set routes: " << strerror(-ret);
+ return ret;
+ }
+
+ routes.resize(rt.num_routes);
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
return 0;
}
/**
+ * \brief Set a routing table on the V4L2 subdevice
+ * \param[inout] routing The routing table
+ * \param[in] whence The routing table to set, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ *
+ * Apply to the V4L2 subdevice the routing table \a routing and update its
+ * content to reflect the actually applied routing table as getRouting() would
+ * do.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int V4L2Subdevice::setRouting(Routing *routing, Whence whence)
+{
+ if (!caps_.hasStreams()) {
+ routing->clear();
+ return 0;
+ }
+
+ std::vector<struct v4l2_subdev_route> routes{ routing->size() };
+
+ for (const auto &[i, route] : utils::enumerate(*routing))
+ routeToKernel(route, routes[i]);
+
+ struct v4l2_subdev_routing rt = {};
+ rt.which = whence;
+ rt.len_routes = routes.size();
+ rt.num_routes = routes.size();
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ int ret = ioctl(VIDIOC_SUBDEV_S_ROUTING, &rt);
+ if (ret == -ENOTTY)
+ return setRoutingLegacy(routing, whence);
+
+ if (ret) {
+ LOG(V4L2, Error) << "Failed to set routes: " << strerror(-ret);
+ return ret;
+ }
+
+ /*
+ * The kernel may want to return more routes than we have space for. In
+ * that event, we must issue a VIDIOC_SUBDEV_G_ROUTING call to retrieve
+ * the additional routes.
+ */
+ if (rt.num_routes > routes.size()) {
+ routes.resize(rt.num_routes);
+
+ rt.len_routes = rt.num_routes;
+ rt.num_routes = 0;
+
+ ret = ioctl(VIDIOC_SUBDEV_G_ROUTING, &rt);
+ if (ret) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve routes: " << strerror(-ret);
+ return ret;
+ }
+ }
+
+ if (rt.num_routes != routes.size()) {
+ LOG(V4L2, Error) << "Invalid number of routes";
+ return -EINVAL;
+ }
+
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
+
+ return 0;
+}
+
+/**
+ * \brief Retrieve the model name of the device
+ *
+ * The model name allows identification of the specific device model. This can
+ * be used to infer device characteristics, for instance to determine the
+ * analogue gain model of a camera sensor based on the sensor model name.
+ *
+ * Neither the V4L2 API nor the Media Controller API expose an explicit model
+ * name. This function implements a heuristics to extract the model name from
+ * the subdevice's entity name. This should produce accurate results for
+ * I2C-based devices. If the heuristics can't match a known naming pattern,
+ * the function returns the full entity name.
+ *
+ * \return The model name of the device
+ */
+const std::string &V4L2Subdevice::model()
+{
+ if (!model_.empty())
+ return model_;
+
+ /*
+ * Extract model name from the media entity name.
+ *
+ * There is no standardized naming scheme for sensor or other entities
+ * in the Linux kernel at the moment.
+ *
+ * - The most common rule, used by I2C sensors, associates the model
+ * name with the I2C bus number and address (e.g. 'imx219 0-0010').
+ *
+ * - When the sensor exposes multiple subdevs, the model name is
+ * usually followed by a function name, as in the smiapp driver (e.g.
+ * 'jt8ew9 pixel_array 0-0010').
+ *
+ * - The vimc driver names its sensors 'Sensor A' and 'Sensor B'.
+ *
+ * Other schemes probably exist. As a best effort heuristic, use the
+ * part of the entity name before the first space if the name contains
+ * an I2C address, and use the full entity name otherwise.
+ */
+ std::string entityName = entity_->name();
+ std::regex i2cRegex{ " [0-9]+-[0-9a-f]{4}" };
+ std::smatch match;
+
+ std::string model;
+ if (std::regex_search(entityName, match, i2cRegex))
+ model_ = entityName.substr(0, entityName.find(' '));
+ else
+ model_ = entityName;
+
+ return model_;
+}
+
+/**
+ * \fn V4L2Subdevice::caps()
+ * \brief Retrieve the subdevice V4L2 capabilities
+ * \return The subdevice V4L2 capabilities
+ */
+
+/**
* \brief Create a new video subdevice instance from \a entity in media device
* \a media
* \param[in] media The media device where the entity is registered
* \param[in] entity The media entity name
*
- * Releasing memory of the newly created instance is responsibility of the
- * caller of this function.
- *
* \return A newly created V4L2Subdevice on success, nullptr otherwise
*/
-V4L2Subdevice *V4L2Subdevice::fromEntityName(const MediaDevice *media,
- const std::string &entity)
+std::unique_ptr<V4L2Subdevice>
+V4L2Subdevice::fromEntityName(const MediaDevice *media,
+ const std::string &entity)
{
MediaEntity *mediaEntity = media->getEntityByName(entity);
if (!mediaEntity)
return nullptr;
- return new V4L2Subdevice(mediaEntity);
+ return std::make_unique<V4L2Subdevice>(mediaEntity);
}
std::string V4L2Subdevice::logPrefix() const
@@ -268,14 +1765,15 @@ std::string V4L2Subdevice::logPrefix() const
return "'" + entity_->name() + "'";
}
-std::vector<unsigned int> V4L2Subdevice::enumPadCodes(unsigned int pad)
+std::vector<unsigned int> V4L2Subdevice::enumPadCodes(const Stream &stream)
{
std::vector<unsigned int> codes;
int ret;
for (unsigned int index = 0; ; index++) {
struct v4l2_subdev_mbus_code_enum mbusEnum = {};
- mbusEnum.pad = pad;
+ mbusEnum.pad = stream.pad;
+ mbusEnum.stream = stream.stream;
mbusEnum.index = index;
mbusEnum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
@@ -288,7 +1786,7 @@ std::vector<unsigned int> V4L2Subdevice::enumPadCodes(unsigned int pad)
if (ret < 0 && ret != -EINVAL) {
LOG(V4L2, Error)
- << "Unable to enumerate formats on pad " << pad
+ << "Unable to enumerate formats on pad " << stream
<< ": " << strerror(-ret);
return {};
}
@@ -296,7 +1794,7 @@ std::vector<unsigned int> V4L2Subdevice::enumPadCodes(unsigned int pad)
return codes;
}
-std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
+std::vector<SizeRange> V4L2Subdevice::enumPadSizes(const Stream &stream,
unsigned int code)
{
std::vector<SizeRange> sizes;
@@ -305,7 +1803,8 @@ std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
for (unsigned int index = 0;; index++) {
struct v4l2_subdev_frame_size_enum sizeEnum = {};
sizeEnum.index = index;
- sizeEnum.pad = pad;
+ sizeEnum.pad = stream.pad;
+ sizeEnum.stream = stream.stream;
sizeEnum.code = code;
sizeEnum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
@@ -319,7 +1818,7 @@ std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
if (ret < 0 && ret != -EINVAL && ret != -ENOTTY) {
LOG(V4L2, Error)
- << "Unable to enumerate sizes on pad " << pad
+ << "Unable to enumerate sizes on pad " << stream
<< ": " << strerror(-ret);
return {};
}
@@ -327,35 +1826,4 @@ std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
return sizes;
}
-int V4L2Subdevice::setSelection(unsigned int pad, unsigned int target,
- Rectangle *rect)
-{
- struct v4l2_subdev_selection sel = {};
-
- sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- sel.pad = pad;
- sel.target = target;
- sel.flags = 0;
-
- sel.r.left = rect->x;
- sel.r.top = rect->y;
- sel.r.width = rect->w;
- sel.r.height = rect->h;
-
- int ret = ioctl(VIDIOC_SUBDEV_S_SELECTION, &sel);
- if (ret < 0) {
- LOG(V4L2, Error)
- << "Unable to set rectangle " << target << " on pad "
- << pad << ": " << strerror(-ret);
- return ret;
- }
-
- rect->x = sel.r.left;
- rect->y = sel.r.top;
- rect->w = sel.r.width;
- rect->h = sel.r.height;
-
- return 0;
-}
-
} /* namespace libcamera */
diff --git a/src/libcamera/v4l2_videodevice.cpp b/src/libcamera/v4l2_videodevice.cpp
index eb33a68e..e241eb47 100644
--- a/src/libcamera/v4l2_videodevice.cpp
+++ b/src/libcamera/v4l2_videodevice.cpp
@@ -2,37 +2,40 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_videodevice.cpp - V4L2 Video Device
+ * V4L2 Video Device
*/
-#include "v4l2_videodevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+#include <algorithm>
+#include <array>
#include <fcntl.h>
-#include <iomanip>
#include <sstream>
#include <string.h>
#include <sys/ioctl.h>
-#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <unistd.h>
#include <vector>
-#include <linux/drm_fourcc.h>
#include <linux/version.h>
-#include <libcamera/event_notifier.h>
-#include <libcamera/file_descriptor.h>
+#include <libcamera/base/event_notifier.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/shared_fd.h>
+#include <libcamera/base/unique_fd.h>
+#include <libcamera/base/utils.h>
-#include "log.h"
-#include "media_device.h"
-#include "media_object.h"
-#include "utils.h"
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/media_object.h"
/**
* \file v4l2_videodevice.h
* \brief V4L2 Video Device
*/
+
namespace libcamera {
LOG_DECLARE_CATEGORY(V4L2)
@@ -137,6 +140,12 @@ LOG_DECLARE_CATEGORY(V4L2)
*/
/**
+ * \fn V4L2Capability::hasMediaController()
+ * \brief Determine if the video device uses Media Controller to configure I/O
+ * \return True if the video device is controlled by a Media Controller device
+ */
+
+/**
* \class V4L2BufferCache
* \brief Hot cache of associations between V4L2 buffer indexes and FrameBuffer
*
@@ -182,7 +191,7 @@ V4L2BufferCache::V4L2BufferCache(const std::vector<std::unique_ptr<FrameBuffer>>
for (const std::unique_ptr<FrameBuffer> &buffer : buffers)
cache_.emplace_back(true,
lastUsedCounter_.fetch_add(1, std::memory_order_acq_rel),
- buffer->planes());
+ *buffer.get());
}
V4L2BufferCache::~V4L2BufferCache()
@@ -192,6 +201,19 @@ V4L2BufferCache::~V4L2BufferCache()
}
/**
+ * \brief Check if all the entries in the cache are unused
+ */
+bool V4L2BufferCache::isEmpty() const
+{
+ for (auto const &entry : cache_) {
+ if (!entry.free_)
+ return false;
+ }
+
+ return true;
+}
+
+/**
* \brief Find the best V4L2 buffer for a FrameBuffer
* \param[in] buffer The FrameBuffer
*
@@ -213,7 +235,7 @@ int V4L2BufferCache::get(const FrameBuffer &buffer)
for (unsigned int index = 0; index < cache_.size(); index++) {
const Entry &entry = cache_[index];
- if (!entry.free)
+ if (!entry.free_)
continue;
/* Try to find a cache hit by comparing the planes. */
@@ -223,9 +245,9 @@ int V4L2BufferCache::get(const FrameBuffer &buffer)
break;
}
- if (entry.lastUsed < oldest) {
+ if (entry.lastUsed_ < oldest) {
use = index;
- oldest = entry.lastUsed;
+ oldest = entry.lastUsed_;
}
}
@@ -249,16 +271,16 @@ int V4L2BufferCache::get(const FrameBuffer &buffer)
void V4L2BufferCache::put(unsigned int index)
{
ASSERT(index < cache_.size());
- cache_[index].free = true;
+ cache_[index].free_ = true;
}
V4L2BufferCache::Entry::Entry()
- : free(true), lastUsed(0)
+ : free_(true), lastUsed_(0)
{
}
V4L2BufferCache::Entry::Entry(bool free, uint64_t lastUsed, const FrameBuffer &buffer)
- : free(free), lastUsed(lastUsed)
+ : free_(free), lastUsed_(lastUsed)
{
for (const FrameBuffer::Plane &plane : buffer.planes())
planes_.emplace_back(plane);
@@ -272,90 +294,13 @@ bool V4L2BufferCache::Entry::operator==(const FrameBuffer &buffer) const
return false;
for (unsigned int i = 0; i < planes.size(); i++)
- if (planes_[i].fd != planes[i].fd.fd() ||
+ if (planes_[i].fd != planes[i].fd.get() ||
planes_[i].length != planes[i].length)
return false;
return true;
}
/**
- * \class V4L2PixelFormat
- * \brief V4L2 pixel format FourCC wrapper
- *
- * The V4L2PixelFormat class describes the pixel format of a V4L2 buffer. It
- * wraps the V4L2 numerical FourCC, and shall be used in all APIs that deal with
- * V4L2 pixel formats. Its purpose is to prevent unintentional confusion of
- * V4L2 and DRM FourCCs in code by catching implicit conversion attempts at
- * compile time.
- *
- * To achieve this goal, construction of a V4L2PixelFormat from an integer value
- * is explicit. To retrieve the integer value of a V4L2PixelFormat, both the
- * explicit value() and implicit uint32_t conversion operators may be used.
- */
-
-/**
- * \fn V4L2PixelFormat::V4L2PixelFormat()
- * \brief Construct a V4L2PixelFormat with an invalid format
- *
- * V4L2PixelFormat instances constructed with the default constructor are
- * invalid, calling the isValid() function returns false.
- */
-
-/**
- * \fn V4L2PixelFormat::V4L2PixelFormat(uint32_t fourcc)
- * \brief Construct a V4L2PixelFormat from a FourCC value
- * \param[in] fourcc The pixel format FourCC numerical value
- */
-
-/**
- * \fn bool V4L2PixelFormat::isValid() const
- * \brief Check if the pixel format is valid
- *
- * V4L2PixelFormat instances constructed with the default constructor are
- * invalid. Instances constructed with a FourCC defined in the V4L2 API are
- * valid. The behaviour is undefined otherwise.
- *
- * \return True if the pixel format is valid, false otherwise
- */
-
-/**
- * \fn uint32_t V4L2PixelFormat::fourcc() const
- * \brief Retrieve the pixel format FourCC numerical value
- * \return The pixel format FourCC numerical value
- */
-
-/**
- * \fn V4L2PixelFormat::operator uint32_t() const
- * \brief Convert to the pixel format FourCC numerical value
- * \return The pixel format FourCC numerical value
- */
-
-/**
- * \brief Assemble and return a string describing the pixel format
- * \return A string describing the pixel format
- */
-std::string V4L2PixelFormat::toString() const
-{
- if (fourcc_ == 0)
- return "<INVALID>";
-
- char ss[8] = { static_cast<char>(fourcc_ & 0x7f),
- static_cast<char>((fourcc_ >> 8) & 0x7f),
- static_cast<char>((fourcc_ >> 16) & 0x7f),
- static_cast<char>((fourcc_ >> 24) & 0x7f) };
-
- for (unsigned int i = 0; i < 4; i++) {
- if (!isprint(ss[i]))
- ss[i] = '.';
- }
-
- if (fourcc_ & (1 << 31))
- strcat(ss, "-BE");
-
- return ss;
-}
-
-/**
* \class V4L2DeviceFormat
* \brief The V4L2 video device image format and sizes
*
@@ -429,6 +374,15 @@ std::string V4L2PixelFormat::toString() const
*/
/**
+ * \struct V4L2DeviceFormat::Plane
+ * \brief Per-plane memory size information
+ * \var V4L2DeviceFormat::Plane::size
+ * \brief The plane total memory size (in bytes)
+ * \var V4L2DeviceFormat::Plane::bpl
+ * \brief The plane line stride (in bytes)
+ */
+
+/**
* \var V4L2DeviceFormat::size
* \brief The image size in pixels
*/
@@ -442,6 +396,21 @@ std::string V4L2PixelFormat::toString() const
*/
/**
+ * \var V4L2DeviceFormat::colorSpace
+ * \brief The color space of the pixels
+ *
+ * The color space of the image. When setting the format this may be
+ * unset, in which case the driver gets to use its default color space.
+ * After being set, this value should contain the color space that
+ * was actually used. If this value is unset, then the color space chosen
+ * by the driver could not be represented by the ColorSpace class (and
+ * should probably be added).
+ *
+ * It is up to the pipeline handler or application to check if the
+ * resulting color space is acceptable.
+ */
+
+/**
* \var V4L2DeviceFormat::planes
* \brief The per-plane memory size information
*
@@ -463,11 +432,25 @@ std::string V4L2PixelFormat::toString() const
const std::string V4L2DeviceFormat::toString() const
{
std::stringstream ss;
- ss << size.toString() << "-" << fourcc.toString();
+ ss << *this;
+
return ss.str();
}
/**
+ * \brief Insert a text representation of a V4L2DeviceFormat into an output
+ * stream
+ * \param[in] out The output stream
+ * \param[in] f The V4L2DeviceFormat
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2DeviceFormat &f)
+{
+ out << f.size << "-" << f.fourcc;
+ return out;
+}
+
+/**
* \class V4L2VideoDevice
* \brief V4L2VideoDevice object and API
*
@@ -540,11 +523,18 @@ const std::string V4L2DeviceFormat::toString() const
*/
/**
+ * \typedef V4L2VideoDevice::Formats
+ * \brief A map of supported V4L2 pixel formats to frame sizes
+ */
+
+/**
* \brief Construct a V4L2VideoDevice
* \param[in] deviceNode The file-system path to the video device node
*/
V4L2VideoDevice::V4L2VideoDevice(const std::string &deviceNode)
- : V4L2Device(deviceNode), cache_(nullptr), fdEvent_(nullptr)
+ : V4L2Device(deviceNode), formatInfo_(nullptr), cache_(nullptr),
+ fdBufferNotifier_(nullptr), state_(State::Stopped),
+ watchdogDuration_(0.0)
{
/*
* We default to an MMAP based CAPTURE video device, however this will
@@ -563,6 +553,7 @@ V4L2VideoDevice::V4L2VideoDevice(const std::string &deviceNode)
V4L2VideoDevice::V4L2VideoDevice(const MediaEntity *entity)
: V4L2VideoDevice(entity->deviceNode())
{
+ watchdog_.timeout.connect(this, &V4L2VideoDevice::watchdogExpired);
}
V4L2VideoDevice::~V4L2VideoDevice()
@@ -610,34 +601,41 @@ int V4L2VideoDevice::open()
* devices (POLLIN), and write notifications for OUTPUT video devices
* (POLLOUT).
*/
+ EventNotifier::Type notifierType;
+
if (caps_.isVideoCapture()) {
- fdEvent_ = new EventNotifier(fd(), EventNotifier::Read);
+ notifierType = EventNotifier::Read;
bufferType_ = caps_.isMultiplanar()
? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
: V4L2_BUF_TYPE_VIDEO_CAPTURE;
} else if (caps_.isVideoOutput()) {
- fdEvent_ = new EventNotifier(fd(), EventNotifier::Write);
+ notifierType = EventNotifier::Write;
bufferType_ = caps_.isMultiplanar()
? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
: V4L2_BUF_TYPE_VIDEO_OUTPUT;
} else if (caps_.isMetaCapture()) {
- fdEvent_ = new EventNotifier(fd(), EventNotifier::Read);
+ notifierType = EventNotifier::Read;
bufferType_ = V4L2_BUF_TYPE_META_CAPTURE;
} else if (caps_.isMetaOutput()) {
- fdEvent_ = new EventNotifier(fd(), EventNotifier::Write);
+ notifierType = EventNotifier::Write;
bufferType_ = V4L2_BUF_TYPE_META_OUTPUT;
} else {
LOG(V4L2, Error) << "Device is not a supported type";
return -EINVAL;
}
- fdEvent_->activated.connect(this, &V4L2VideoDevice::bufferAvailable);
- fdEvent_->setEnabled(false);
+ fdBufferNotifier_ = new EventNotifier(fd(), notifierType);
+ fdBufferNotifier_->activated.connect(this, &V4L2VideoDevice::bufferAvailable);
+ fdBufferNotifier_->setEnabled(false);
LOG(V4L2, Debug)
<< "Opened device " << caps_.bus_info() << ": "
<< caps_.driver() << ": " << caps_.card();
+ ret = initFormats();
+ if (ret)
+ return ret;
+
return 0;
}
@@ -647,37 +645,33 @@ int V4L2VideoDevice::open()
* \param[in] handle The file descriptor to set
* \param[in] type The device type to operate on
*
- * This methods opens a video device from the existing file descriptor \a
- * handle. Like open(), this method queries the capabilities of the device, but
- * handles it according to the given device \a type instead of determining its
- * type from the capabilities. This can be used to force a given device type for
- * memory-to-memory devices.
+ * This function opens a video device from the existing file descriptor \a
+ * handle. Like open(), this function queries the capabilities of the device,
+ * but handles it according to the given device \a type instead of determining
+ * its type from the capabilities. This can be used to force a given device type
+ * for memory-to-memory devices.
*
- * The file descriptor \a handle is duplicated, and the caller is responsible
- * for closing the \a handle when it has no further use for it. The close()
- * method will close the duplicated file descriptor, leaving \a handle
- * untouched.
+ * The file descriptor \a handle is duplicated, no reference to the original
+ * handle is kept.
*
* \return 0 on success or a negative error code otherwise
*/
-int V4L2VideoDevice::open(int handle, enum v4l2_buf_type type)
+int V4L2VideoDevice::open(SharedFD handle, enum v4l2_buf_type type)
{
int ret;
- int newFd;
- newFd = dup(handle);
- if (newFd < 0) {
+ UniqueFD newFd = handle.dup();
+ if (!newFd.isValid()) {
ret = -errno;
LOG(V4L2, Error) << "Failed to duplicate file handle: "
<< strerror(-ret);
return ret;
}
- ret = V4L2Device::setFd(newFd);
+ ret = V4L2Device::setFd(std::move(newFd));
if (ret < 0) {
LOG(V4L2, Error) << "Failed to set file handle: "
<< strerror(-ret);
- ::close(newFd);
return ret;
}
@@ -699,15 +693,17 @@ int V4L2VideoDevice::open(int handle, enum v4l2_buf_type type)
* devices (POLLIN), and write notifications for OUTPUT video devices
* (POLLOUT).
*/
+ EventNotifier::Type notifierType;
+
switch (type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- fdEvent_ = new EventNotifier(fd(), EventNotifier::Write);
+ notifierType = EventNotifier::Write;
bufferType_ = caps_.isMultiplanar()
? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
: V4L2_BUF_TYPE_VIDEO_OUTPUT;
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- fdEvent_ = new EventNotifier(fd(), EventNotifier::Read);
+ notifierType = EventNotifier::Read;
bufferType_ = caps_.isMultiplanar()
? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
: V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -717,13 +713,39 @@ int V4L2VideoDevice::open(int handle, enum v4l2_buf_type type)
return -EINVAL;
}
- fdEvent_->activated.connect(this, &V4L2VideoDevice::bufferAvailable);
- fdEvent_->setEnabled(false);
+ fdBufferNotifier_ = new EventNotifier(fd(), notifierType);
+ fdBufferNotifier_->activated.connect(this, &V4L2VideoDevice::bufferAvailable);
+ fdBufferNotifier_->setEnabled(false);
LOG(V4L2, Debug)
<< "Opened device " << caps_.bus_info() << ": "
<< caps_.driver() << ": " << caps_.card();
+ ret = initFormats();
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int V4L2VideoDevice::initFormats()
+{
+ const std::vector<V4L2PixelFormat> &deviceFormats = enumPixelformats(0);
+ if (deviceFormats.empty()) {
+ LOG(V4L2, Error) << "Failed to initialize device formats";
+ return -EINVAL;
+ }
+
+ pixelFormats_ = { deviceFormats.begin(), deviceFormats.end() };
+
+ int ret = getFormat(&format_);
+ if (ret) {
+ LOG(V4L2, Error) << "Failed to get format";
+ return ret;
+ }
+
+ formatInfo_ = &PixelFormatInfo::info(format_.fourcc);
+
return 0;
}
@@ -736,7 +758,9 @@ void V4L2VideoDevice::close()
return;
releaseBuffers();
- delete fdEvent_;
+ delete fdBufferNotifier_;
+
+ formatInfo_ = nullptr;
V4L2Device::close();
}
@@ -759,9 +783,16 @@ void V4L2VideoDevice::close()
* \return The string containing the device location
*/
+/**
+ * \fn V4L2VideoDevice::caps()
+ * \brief Retrieve the device V4L2 capabilities
+ * \return The device V4L2 capabilities
+ */
+
std::string V4L2VideoDevice::logPrefix() const
{
- return deviceNode() + (V4L2_TYPE_IS_OUTPUT(bufferType_) ? "[out]" : "[cap]");
+ return deviceNode() + "[" + std::to_string(fd()) +
+ (V4L2_TYPE_IS_OUTPUT(bufferType_) ? ":out]" : ":cap]");
}
/**
@@ -771,12 +802,46 @@ std::string V4L2VideoDevice::logPrefix() const
*/
int V4L2VideoDevice::getFormat(V4L2DeviceFormat *format)
{
- if (caps_.isMeta())
- return getFormatMeta(format);
- else if (caps_.isMultiplanar())
- return getFormatMultiplane(format);
- else
+ switch (bufferType_) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
return getFormatSingleplane(format);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return getFormatMultiplane(format);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ return getFormatMeta(format);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * \brief Try an image format on the V4L2 video device
+ * \param[inout] format The image format to test applicability to the video device
+ *
+ * Try the supplied \a format on the video device without applying it, returning
+ * the format that would be applied. This is equivalent to setFormat(), except
+ * that the device configuration is not changed.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int V4L2VideoDevice::tryFormat(V4L2DeviceFormat *format)
+{
+ switch (bufferType_) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return trySetFormatSingleplane(format, false);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return trySetFormatMultiplane(format, false);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ return trySetFormatMeta(format, false);
+ default:
+ return -EINVAL;
+ }
}
/**
@@ -790,18 +855,40 @@ int V4L2VideoDevice::getFormat(V4L2DeviceFormat *format)
*/
int V4L2VideoDevice::setFormat(V4L2DeviceFormat *format)
{
- if (caps_.isMeta())
- return setFormatMeta(format);
- else if (caps_.isMultiplanar())
- return setFormatMultiplane(format);
- else
- return setFormatSingleplane(format);
+ int ret;
+
+ switch (bufferType_) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ ret = trySetFormatSingleplane(format, true);
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ ret = trySetFormatMultiplane(format, true);
+ break;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ ret = trySetFormatMeta(format, true);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ /* Cache the set format on success. */
+ if (ret)
+ return ret;
+
+ format_ = *format;
+ formatInfo_ = &PixelFormatInfo::info(format_.fourcc);
+
+ return 0;
}
int V4L2VideoDevice::getFormatMeta(V4L2DeviceFormat *format)
{
struct v4l2_format v4l2Format = {};
- struct v4l2_meta_format *pix = &v4l2Format.fmt.meta;
+ struct v4l2_meta_format *meta = &v4l2Format.fmt.meta;
int ret;
v4l2Format.type = bufferType_;
@@ -811,28 +898,47 @@ int V4L2VideoDevice::getFormatMeta(V4L2DeviceFormat *format)
return ret;
}
- format->size.width = 0;
- format->size.height = 0;
- format->fourcc = V4L2PixelFormat(pix->dataformat);
+ format->fourcc = V4L2PixelFormat(meta->dataformat);
+ format->planes[0].size = meta->buffersize;
format->planesCount = 1;
- format->planes[0].bpl = pix->buffersize;
- format->planes[0].size = pix->buffersize;
+
+ bool genericLineBased = caps_.isMetaCapture() &&
+ format->fourcc.isGenericLineBasedMetadata();
+
+ if (genericLineBased) {
+ format->size.width = meta->width;
+ format->size.height = meta->height;
+ format->planes[0].bpl = meta->bytesperline;
+ } else {
+ format->size.width = 0;
+ format->size.height = 0;
+ format->planes[0].bpl = meta->buffersize;
+ }
return 0;
}
-int V4L2VideoDevice::setFormatMeta(V4L2DeviceFormat *format)
+int V4L2VideoDevice::trySetFormatMeta(V4L2DeviceFormat *format, bool set)
{
+ bool genericLineBased = caps_.isMetaCapture() &&
+ format->fourcc.isGenericLineBasedMetadata();
struct v4l2_format v4l2Format = {};
- struct v4l2_meta_format *pix = &v4l2Format.fmt.meta;
+ struct v4l2_meta_format *meta = &v4l2Format.fmt.meta;
int ret;
v4l2Format.type = bufferType_;
- pix->dataformat = format->fourcc;
- pix->buffersize = format->planes[0].size;
- ret = ioctl(VIDIOC_S_FMT, &v4l2Format);
+ meta->dataformat = format->fourcc;
+ meta->buffersize = format->planes[0].size;
+ if (genericLineBased) {
+ meta->width = format->size.width;
+ meta->height = format->size.height;
+ meta->bytesperline = format->planes[0].bpl;
+ }
+ ret = ioctl(set ? VIDIOC_S_FMT : VIDIOC_TRY_FMT, &v4l2Format);
if (ret) {
- LOG(V4L2, Error) << "Unable to set format: " << strerror(-ret);
+ LOG(V4L2, Error)
+ << "Unable to " << (set ? "set" : "try")
+ << " format: " << strerror(-ret);
return ret;
}
@@ -840,16 +946,29 @@ int V4L2VideoDevice::setFormatMeta(V4L2DeviceFormat *format)
* Return to caller the format actually applied on the video device,
* which might differ from the requested one.
*/
- format->size.width = 0;
- format->size.height = 0;
- format->fourcc = format->fourcc;
+ format->fourcc = V4L2PixelFormat(meta->dataformat);
format->planesCount = 1;
- format->planes[0].bpl = pix->buffersize;
- format->planes[0].size = pix->buffersize;
+ format->planes[0].size = meta->buffersize;
+ if (genericLineBased) {
+ format->size.width = meta->width;
+ format->size.height = meta->height;
+ format->planes[0].bpl = meta->bytesperline;
+ } else {
+ format->size.width = 0;
+ format->size.height = 0;
+ format->planes[0].bpl = meta->buffersize;
+ }
return 0;
}
+template<typename T>
+std::optional<ColorSpace> V4L2VideoDevice::toColorSpace(const T &v4l2Format)
+{
+ V4L2PixelFormat fourcc{ v4l2Format.pixelformat };
+ return V4L2Device::toColorSpace(v4l2Format, PixelFormatInfo::info(fourcc).colourEncoding);
+}
+
int V4L2VideoDevice::getFormatMultiplane(V4L2DeviceFormat *format)
{
struct v4l2_format v4l2Format = {};
@@ -867,6 +986,7 @@ int V4L2VideoDevice::getFormatMultiplane(V4L2DeviceFormat *format)
format->size.height = pix->height;
format->fourcc = V4L2PixelFormat(pix->pixelformat);
format->planesCount = pix->num_planes;
+ format->colorSpace = toColorSpace(*pix);
for (unsigned int i = 0; i < format->planesCount; ++i) {
format->planes[i].bpl = pix->plane_fmt[i].bytesperline;
@@ -876,7 +996,7 @@ int V4L2VideoDevice::getFormatMultiplane(V4L2DeviceFormat *format)
return 0;
}
-int V4L2VideoDevice::setFormatMultiplane(V4L2DeviceFormat *format)
+int V4L2VideoDevice::trySetFormatMultiplane(V4L2DeviceFormat *format, bool set)
{
struct v4l2_format v4l2Format = {};
struct v4l2_pix_format_mplane *pix = &v4l2Format.fmt.pix_mp;
@@ -888,15 +1008,25 @@ int V4L2VideoDevice::setFormatMultiplane(V4L2DeviceFormat *format)
pix->pixelformat = format->fourcc;
pix->num_planes = format->planesCount;
pix->field = V4L2_FIELD_NONE;
+ if (format->colorSpace) {
+ fromColorSpace(format->colorSpace, *pix);
+
+ if (caps_.isVideoCapture())
+ pix->flags |= V4L2_PIX_FMT_FLAG_SET_CSC;
+ }
+
+ ASSERT(pix->num_planes <= std::size(pix->plane_fmt));
for (unsigned int i = 0; i < pix->num_planes; ++i) {
pix->plane_fmt[i].bytesperline = format->planes[i].bpl;
pix->plane_fmt[i].sizeimage = format->planes[i].size;
}
- ret = ioctl(VIDIOC_S_FMT, &v4l2Format);
+ ret = ioctl(set ? VIDIOC_S_FMT : VIDIOC_TRY_FMT, &v4l2Format);
if (ret) {
- LOG(V4L2, Error) << "Unable to set format: " << strerror(-ret);
+ LOG(V4L2, Error)
+ << "Unable to " << (set ? "set" : "try")
+ << " format: " << strerror(-ret);
return ret;
}
@@ -912,6 +1042,7 @@ int V4L2VideoDevice::setFormatMultiplane(V4L2DeviceFormat *format)
format->planes[i].bpl = pix->plane_fmt[i].bytesperline;
format->planes[i].size = pix->plane_fmt[i].sizeimage;
}
+ format->colorSpace = toColorSpace(*pix);
return 0;
}
@@ -935,11 +1066,12 @@ int V4L2VideoDevice::getFormatSingleplane(V4L2DeviceFormat *format)
format->planesCount = 1;
format->planes[0].bpl = pix->bytesperline;
format->planes[0].size = pix->sizeimage;
+ format->colorSpace = toColorSpace(*pix);
return 0;
}
-int V4L2VideoDevice::setFormatSingleplane(V4L2DeviceFormat *format)
+int V4L2VideoDevice::trySetFormatSingleplane(V4L2DeviceFormat *format, bool set)
{
struct v4l2_format v4l2Format = {};
struct v4l2_pix_format *pix = &v4l2Format.fmt.pix;
@@ -951,9 +1083,18 @@ int V4L2VideoDevice::setFormatSingleplane(V4L2DeviceFormat *format)
pix->pixelformat = format->fourcc;
pix->bytesperline = format->planes[0].bpl;
pix->field = V4L2_FIELD_NONE;
- ret = ioctl(VIDIOC_S_FMT, &v4l2Format);
+ if (format->colorSpace) {
+ fromColorSpace(format->colorSpace, *pix);
+
+ if (caps_.isVideoCapture())
+ pix->flags |= V4L2_PIX_FMT_FLAG_SET_CSC;
+ }
+
+ ret = ioctl(set ? VIDIOC_S_FMT : VIDIOC_TRY_FMT, &v4l2Format);
if (ret) {
- LOG(V4L2, Error) << "Unable to set format: " << strerror(-ret);
+ LOG(V4L2, Error)
+ << "Unable to " << (set ? "set" : "try")
+ << " format: " << strerror(-ret);
return ret;
}
@@ -967,22 +1108,26 @@ int V4L2VideoDevice::setFormatSingleplane(V4L2DeviceFormat *format)
format->planesCount = 1;
format->planes[0].bpl = pix->bytesperline;
format->planes[0].size = pix->sizeimage;
+ format->colorSpace = toColorSpace(*pix);
return 0;
}
/**
* \brief Enumerate all pixel formats and frame sizes
+ * \param[in] code Restrict formats to this media bus code.
*
* Enumerate all pixel formats and frame sizes supported by the video device.
+ * If the \a code argument is not zero, only formats compatible with that media
+ * bus code will be enumerated.
*
* \return A list of the supported video device formats
*/
-std::map<V4L2PixelFormat, std::vector<SizeRange>> V4L2VideoDevice::formats()
+V4L2VideoDevice::Formats V4L2VideoDevice::formats(uint32_t code)
{
- std::map<V4L2PixelFormat, std::vector<SizeRange>> formats;
+ Formats formats;
- for (V4L2PixelFormat pixelFormat : enumPixelformats()) {
+ for (V4L2PixelFormat pixelFormat : enumPixelformats(code)) {
std::vector<SizeRange> sizes = enumSizes(pixelFormat);
if (sizes.empty())
return {};
@@ -1000,15 +1145,22 @@ std::map<V4L2PixelFormat, std::vector<SizeRange>> V4L2VideoDevice::formats()
return formats;
}
-std::vector<V4L2PixelFormat> V4L2VideoDevice::enumPixelformats()
+std::vector<V4L2PixelFormat> V4L2VideoDevice::enumPixelformats(uint32_t code)
{
std::vector<V4L2PixelFormat> formats;
int ret;
+ if (code && !caps_.hasMediaController()) {
+ LOG(V4L2, Error)
+ << "Media bus code filtering not supported by the device";
+ return {};
+ }
+
for (unsigned int index = 0; ; index++) {
struct v4l2_fmtdesc pixelformatEnum = {};
pixelformatEnum.index = index;
pixelformatEnum.type = bufferType_;
+ pixelformatEnum.mbus_code = code;
ret = ioctl(VIDIOC_ENUM_FMT, &pixelformatEnum);
if (ret)
@@ -1086,25 +1238,46 @@ std::vector<SizeRange> V4L2VideoDevice::enumSizes(V4L2PixelFormat pixelFormat)
}
/**
- * \brief Set a crop rectangle on the V4L2 video device node
- * \param[inout] rect The rectangle describing the crop target area
+ * \brief Get the selection rectangle for \a target
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[out] rect The selection rectangle to retrieve
+ *
+ * \todo Define a V4L2SelectionTarget enum for the selection target
+ *
* \return 0 on success or a negative error code otherwise
*/
-int V4L2VideoDevice::setCrop(Rectangle *rect)
+int V4L2VideoDevice::getSelection(unsigned int target, Rectangle *rect)
{
- return setSelection(V4L2_SEL_TGT_CROP, rect);
+ struct v4l2_selection sel = {};
+
+ sel.type = bufferType_;
+ sel.target = target;
+ sel.flags = 0;
+
+ int ret = ioctl(VIDIOC_G_SELECTION, &sel);
+ if (ret < 0) {
+ LOG(V4L2, Error) << "Unable to get rectangle " << target
+ << ": " << strerror(-ret);
+ return ret;
+ }
+
+ rect->x = sel.r.left;
+ rect->y = sel.r.top;
+ rect->width = sel.r.width;
+ rect->height = sel.r.height;
+
+ return 0;
}
/**
- * \brief Set a compose rectangle on the V4L2 video device node
- * \param[inout] rect The rectangle describing the compose target area
+ * \brief Set a selection rectangle \a rect for \a target
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[inout] rect The selection rectangle to be applied
+ *
+ * \todo Define a V4L2SelectionTarget enum for the selection target
+ *
* \return 0 on success or a negative error code otherwise
*/
-int V4L2VideoDevice::setCompose(Rectangle *rect)
-{
- return setSelection(V4L2_SEL_TGT_COMPOSE, rect);
-}
-
int V4L2VideoDevice::setSelection(unsigned int target, Rectangle *rect)
{
struct v4l2_selection sel = {};
@@ -1115,8 +1288,8 @@ int V4L2VideoDevice::setSelection(unsigned int target, Rectangle *rect)
sel.r.left = rect->x;
sel.r.top = rect->y;
- sel.r.width = rect->w;
- sel.r.height = rect->h;
+ sel.r.width = rect->width;
+ sel.r.height = rect->height;
int ret = ioctl(VIDIOC_S_SELECTION, &sel);
if (ret < 0) {
@@ -1127,8 +1300,8 @@ int V4L2VideoDevice::setSelection(unsigned int target, Rectangle *rect)
rect->x = sel.r.left;
rect->y = sel.r.top;
- rect->w = sel.r.width;
- rect->h = sel.r.height;
+ rect->width = sel.r.width;
+ rect->height = sel.r.height;
return 0;
}
@@ -1174,8 +1347,13 @@ int V4L2VideoDevice::requestBuffers(unsigned int count,
* successful return the driver's internal buffer management is initialized in
* MMAP mode, and the video device is ready to accept queueBuffer() calls.
*
- * The number of planes and the plane sizes for the allocation are determined
- * by the currently active format on the device as set by setFormat().
+ * The number of planes and their offsets and sizes are determined by the
+ * currently active format on the device as set by setFormat(). They do not map
+ * to the V4L2 buffer planes, but to colour planes of the pixel format. For
+ * instance, if the active format is formats::NV12, the allocated FrameBuffer
+ * instances will have two planes, for the luma and chroma components,
+ * regardless of whether the device uses V4L2_PIX_FMT_NV12 or
+ * V4L2_PIX_FMT_NV12M.
*
* Buffers allocated with this function shall later be free with
* releaseBuffers(). If buffers have already been allocated with
@@ -1212,8 +1390,13 @@ int V4L2VideoDevice::allocateBuffers(unsigned int count,
* usable with any V4L2 video device in DMABUF mode, or with other dmabuf
* importers.
*
- * The number of planes and the plane sizes for the allocation are determined
- * by the currently active format on the device as set by setFormat().
+ * The number of planes and their offsets and sizes are determined by the
+ * currently active format on the device as set by setFormat(). They do not map
+ * to the V4L2 buffer planes, but to colour planes of the pixel format. For
+ * instance, if the active format is formats::NV12, the allocated FrameBuffer
+ * instances will have two planes, for the luma and chroma components,
+ * regardless of whether the device uses V4L2_PIX_FMT_NV12 or
+ * V4L2_PIX_FMT_NV12M.
*
* Multiple independent sets of buffers can be allocated with multiple calls to
* this function. Device-specific limitations may apply regarding the minimum
@@ -1278,8 +1461,7 @@ std::unique_ptr<FrameBuffer> V4L2VideoDevice::createBuffer(unsigned int index)
buf.index = index;
buf.type = bufferType_;
- buf.memory = V4L2_MEMORY_MMAP;
- buf.length = ARRAY_SIZE(v4l2Planes);
+ buf.length = std::size(v4l2Planes);
buf.m.planes = v4l2Planes;
int ret = ioctl(VIDIOC_QUERYBUF, &buf);
@@ -1300,23 +1482,68 @@ std::unique_ptr<FrameBuffer> V4L2VideoDevice::createBuffer(unsigned int index)
std::vector<FrameBuffer::Plane> planes;
for (unsigned int nplane = 0; nplane < numPlanes; nplane++) {
- FileDescriptor fd = exportDmabufFd(buf.index, nplane);
+ UniqueFD fd = exportDmabufFd(buf.index, nplane);
if (!fd.isValid())
return nullptr;
FrameBuffer::Plane plane;
- plane.fd = std::move(fd);
- plane.length = multiPlanar ?
- buf.m.planes[nplane].length : buf.length;
+ plane.fd = SharedFD(std::move(fd));
+ /*
+ * V4L2 API doesn't provide dmabuf offset information of plane.
+ * Set 0 as a placeholder offset.
+ * \todo Set the right offset once V4L2 API provides a way.
+ */
+ plane.offset = 0;
+ plane.length = multiPlanar ? buf.m.planes[nplane].length : buf.length;
planes.push_back(std::move(plane));
}
- return std::make_unique<FrameBuffer>(std::move(planes));
+ /*
+ * If we have a multi-planar format with a V4L2 single-planar buffer,
+ * split the single V4L2 plane into multiple FrameBuffer planes by
+ * computing the offsets manually.
+ *
+ * The format info is not guaranteed to be valid, as there are no
+ * PixelFormatInfo for metadata formats, so check it first.
+ */
+ if (formatInfo_->isValid() && formatInfo_->numPlanes() != numPlanes) {
+ /*
+ * There's no valid situation where the number of colour planes
+ * differs from the number of V4L2 planes and the V4L2 buffer
+ * has more than one plane.
+ */
+ ASSERT(numPlanes == 1u);
+
+ planes.resize(formatInfo_->numPlanes());
+ const SharedFD &fd = planes[0].fd;
+ size_t offset = 0;
+
+ for (auto [i, plane] : utils::enumerate(planes)) {
+ /*
+ * The stride is reported by V4L2 for the first plane
+ * only. Compute the stride of the other planes by
+ * taking the horizontal subsampling factor into
+ * account, which is equal to the bytesPerGroup ratio of
+ * the planes.
+ */
+ unsigned int stride = format_.planes[0].bpl
+ * formatInfo_->planes[i].bytesPerGroup
+ / formatInfo_->planes[0].bytesPerGroup;
+
+ plane.fd = fd;
+ plane.offset = offset;
+ plane.length = formatInfo_->planeSize(format_.size.height,
+ i, stride);
+ offset += plane.length;
+ }
+ }
+
+ return std::make_unique<FrameBuffer>(planes);
}
-FileDescriptor V4L2VideoDevice::exportDmabufFd(unsigned int index,
- unsigned int plane)
+UniqueFD V4L2VideoDevice::exportDmabufFd(unsigned int index,
+ unsigned int plane)
{
struct v4l2_exportbuffer expbuf = {};
int ret;
@@ -1324,16 +1551,16 @@ FileDescriptor V4L2VideoDevice::exportDmabufFd(unsigned int index,
expbuf.type = bufferType_;
expbuf.index = index;
expbuf.plane = plane;
- expbuf.flags = O_RDWR;
+ expbuf.flags = O_CLOEXEC | O_RDWR;
ret = ioctl(VIDIOC_EXPBUF, &expbuf);
if (ret < 0) {
LOG(V4L2, Error)
<< "Failed to export buffer: " << strerror(-ret);
- return FileDescriptor();
+ return {};
}
- return FileDescriptor(expbuf.fd);
+ return UniqueFD(expbuf.fd);
}
/**
@@ -1386,6 +1613,9 @@ int V4L2VideoDevice::importBuffers(unsigned int count)
*/
int V4L2VideoDevice::releaseBuffers()
{
+ if (!cache_)
+ return 0;
+
LOG(V4L2, Debug) << "Releasing buffers";
delete cache_;
@@ -1406,6 +1636,9 @@ int V4L2VideoDevice::releaseBuffers()
* The best available V4L2 buffer is picked for \a buffer using the V4L2 buffer
* cache.
*
+ * Note that queueBuffer() will fail if the device is in the process of being
+ * stopped from a streaming state through streamOff().
+ *
* \return 0 on success or a negative error code otherwise
*/
int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
@@ -1414,6 +1647,21 @@ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
struct v4l2_buffer buf = {};
int ret;
+ if (state_ == State::Stopping) {
+ LOG(V4L2, Error) << "Device is in a stopping state.";
+ return -ESHUTDOWN;
+ }
+
+ /*
+ * Pipeline handlers should not requeue buffers after releasing the
+ * buffers on the device. Any occurence of this error should be fixed
+ * in the pipeline handler directly.
+ */
+ if (!cache_) {
+ LOG(V4L2, Fatal) << "No BufferCache available to queue.";
+ return -ENOENT;
+ }
+
ret = cache_->get(*buffer);
if (ret < 0)
return ret;
@@ -1425,37 +1673,99 @@ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
bool multiPlanar = V4L2_TYPE_IS_MULTIPLANAR(buf.type);
const std::vector<FrameBuffer::Plane> &planes = buffer->planes();
+ const unsigned int numV4l2Planes = format_.planesCount;
+
+ /*
+ * Ensure that the frame buffer has enough planes, and that they're
+ * contiguous if the V4L2 format requires them to be.
+ */
+ if (planes.size() < numV4l2Planes) {
+ LOG(V4L2, Error) << "Frame buffer has too few planes";
+ return -EINVAL;
+ }
+
+ if (planes.size() != numV4l2Planes && !buffer->_d()->isContiguous()) {
+ LOG(V4L2, Error) << "Device format requires contiguous buffer";
+ return -EINVAL;
+ }
if (buf.memory == V4L2_MEMORY_DMABUF) {
if (multiPlanar) {
- for (unsigned int p = 0; p < planes.size(); ++p)
- v4l2Planes[p].m.fd = planes[p].fd.fd();
+ for (unsigned int p = 0; p < numV4l2Planes; ++p)
+ v4l2Planes[p].m.fd = planes[p].fd.get();
} else {
- buf.m.fd = planes[0].fd.fd();
+ buf.m.fd = planes[0].fd.get();
}
}
if (multiPlanar) {
- buf.length = planes.size();
+ buf.length = numV4l2Planes;
buf.m.planes = v4l2Planes;
}
if (V4L2_TYPE_IS_OUTPUT(buf.type)) {
const FrameMetadata &metadata = buffer->metadata();
- if (multiPlanar) {
- unsigned int nplane = 0;
- for (const FrameMetadata::Plane &plane : metadata.planes) {
- v4l2Planes[nplane].bytesused = plane.bytesused;
- v4l2Planes[nplane].length = buffer->planes()[nplane].length;
- nplane++;
+ for (const auto &plane : metadata.planes()) {
+ if (!plane.bytesused)
+ LOG(V4L2, Warning) << "byteused == 0 is deprecated";
+ }
+
+ if (numV4l2Planes != planes.size()) {
+ /*
+ * If we have a multi-planar buffer with a V4L2
+ * single-planar format, coalesce all planes. The length
+ * and number of bytes used may only differ in the last
+ * plane as any other situation can't be represented.
+ */
+ unsigned int bytesused = 0;
+ unsigned int length = 0;
+
+ for (auto [i, plane] : utils::enumerate(planes)) {
+ bytesused += metadata.planes()[i].bytesused;
+ length += plane.length;
+
+ if (i != planes.size() - 1 && bytesused != length) {
+ LOG(V4L2, Error)
+ << "Holes in multi-planar buffer not supported";
+ return -EINVAL;
+ }
+ }
+
+ if (multiPlanar) {
+ v4l2Planes[0].bytesused = bytesused;
+ v4l2Planes[0].length = length;
+ } else {
+ buf.bytesused = bytesused;
+ buf.length = length;
+ }
+ } else if (multiPlanar) {
+ /*
+ * If we use the multi-planar API, fill in the planes.
+ * The number of planes in the frame buffer and in the
+ * V4L2 buffer is guaranteed to be equal at this point.
+ */
+ for (auto [i, plane] : utils::enumerate(planes)) {
+ v4l2Planes[i].bytesused = metadata.planes()[i].bytesused;
+ v4l2Planes[i].length = plane.length;
}
} else {
- if (metadata.planes.size())
- buf.bytesused = metadata.planes[0].bytesused;
+ /*
+ * Single-planar API with a single plane in the buffer
+ * is trivial to handle.
+ */
+ buf.bytesused = metadata.planes()[0].bytesused;
+ buf.length = planes[0].length;
}
- buf.sequence = metadata.sequence;
+ /*
+ * Timestamps are to be supplied if the device is a mem-to-mem
+ * device. The drivers will have V4L2_BUF_FLAG_TIMESTAMP_COPY
+ * set hence these timestamps will be copied from the output
+ * buffers to capture buffers. If the device is not mem-to-mem,
+ * there is no harm in setting the timestamps as they will be
+ * ignored (and over-written).
+ */
buf.timestamp.tv_sec = metadata.timestamp / 1000000000;
buf.timestamp.tv_usec = (metadata.timestamp / 1000) % 1000000;
}
@@ -1470,8 +1780,11 @@ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
return ret;
}
- if (queuedBuffers_.empty())
- fdEvent_->setEnabled(true);
+ if (queuedBuffers_.empty()) {
+ fdBufferNotifier_->setEnabled(true);
+ if (watchdogDuration_)
+ watchdog_.start(std::chrono::duration_cast<std::chrono::milliseconds>(watchdogDuration_));
+ }
queuedBuffers_[buf.index] = buffer;
@@ -1480,7 +1793,6 @@ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
/**
* \brief Slot to handle completed buffer events from the V4L2 video device
- * \param[in] notifier The event notifier
*
* When this slot is called, a Buffer has become available from the device, and
* will be emitted through the bufferReady Signal.
@@ -1488,7 +1800,7 @@ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
* For Capture video devices the FrameBuffer will contain valid data.
* For Output video devices the FrameBuffer can be considered empty.
*/
-void V4L2VideoDevice::bufferAvailable(EventNotifier *notifier)
+void V4L2VideoDevice::bufferAvailable()
{
FrameBuffer *buffer = dequeueBuffer();
if (!buffer)
@@ -1501,8 +1813,8 @@ void V4L2VideoDevice::bufferAvailable(EventNotifier *notifier)
/**
* \brief Dequeue the next available buffer from the video device
*
- * This method dequeues the next available buffer from the device. If no buffer
- * is available to be dequeued it will return nullptr immediately.
+ * This function dequeues the next available buffer from the device. If no
+ * buffer is available to be dequeued it will return nullptr immediately.
*
* \return A pointer to the dequeued buffer on success, or nullptr otherwise
*/
@@ -1531,28 +1843,124 @@ FrameBuffer *V4L2VideoDevice::dequeueBuffer()
LOG(V4L2, Debug) << "Dequeuing buffer " << buf.index;
+ /*
+ * If the video node fails to stream-on successfully (which can occur
+ * when queuing a buffer), a vb2 kernel bug can lead to the buffer which
+ * returns a failure upon queuing being mistakenly kept in the kernel.
+ * This leads to the kernel notifying us that a buffer is available to
+ * dequeue, which we have no awareness of being queued, and thus we will
+ * not find it in the queuedBuffers_ list.
+ *
+ * Whilst this kernel bug has been fixed in mainline, ensure that we
+ * safely ignore buffers which are unexpected to prevent crashes on
+ * older kernels.
+ */
+ auto it = queuedBuffers_.find(buf.index);
+ if (it == queuedBuffers_.end()) {
+ LOG(V4L2, Error)
+ << "Dequeued unexpected buffer index " << buf.index;
+
+ return nullptr;
+ }
+
cache_->put(buf.index);
- auto it = queuedBuffers_.find(buf.index);
FrameBuffer *buffer = it->second;
queuedBuffers_.erase(it);
- if (queuedBuffers_.empty())
- fdEvent_->setEnabled(false);
+ if (queuedBuffers_.empty()) {
+ fdBufferNotifier_->setEnabled(false);
+ watchdog_.stop();
+ } else if (watchdogDuration_) {
+ /*
+ * Restart the watchdog timer if there are buffers still queued
+ * in the device.
+ */
+ watchdog_.start(std::chrono::duration_cast<std::chrono::milliseconds>(watchdogDuration_));
+ }
- buffer->metadata_.status = buf.flags & V4L2_BUF_FLAG_ERROR
- ? FrameMetadata::FrameError
- : FrameMetadata::FrameSuccess;
- buffer->metadata_.sequence = buf.sequence;
- buffer->metadata_.timestamp = buf.timestamp.tv_sec * 1000000000ULL
- + buf.timestamp.tv_usec * 1000ULL;
+ FrameMetadata &metadata = buffer->_d()->metadata();
- buffer->metadata_.planes.clear();
- if (multiPlanar) {
- for (unsigned int nplane = 0; nplane < buf.length; nplane++)
- buffer->metadata_.planes.push_back({ planes[nplane].bytesused });
+ metadata.status = buf.flags & V4L2_BUF_FLAG_ERROR
+ ? FrameMetadata::FrameError
+ : FrameMetadata::FrameSuccess;
+ metadata.sequence = buf.sequence;
+ metadata.timestamp = buf.timestamp.tv_sec * 1000000000ULL
+ + buf.timestamp.tv_usec * 1000ULL;
+
+ if (V4L2_TYPE_IS_OUTPUT(buf.type))
+ return buffer;
+
+ /*
+ * Detect kernel drivers which do not reset the sequence number to zero
+ * on stream start.
+ */
+ if (!firstFrame_.has_value()) {
+ if (buf.sequence)
+ LOG(V4L2, Info)
+ << "Zero sequence expected for first frame (got "
+ << buf.sequence << ")";
+ firstFrame_ = buf.sequence;
+ }
+ metadata.sequence -= firstFrame_.value();
+
+ unsigned int numV4l2Planes = multiPlanar ? buf.length : 1;
+
+ if (numV4l2Planes != buffer->planes().size()) {
+ /*
+ * If we have a multi-planar buffer with a V4L2
+ * single-planar format, split the V4L2 buffer across
+ * the buffer planes. Only the last plane may have less
+ * bytes used than its length.
+ */
+ if (numV4l2Planes != 1) {
+ LOG(V4L2, Error)
+ << "Invalid number of planes (" << numV4l2Planes
+ << " != " << buffer->planes().size() << ")";
+
+ metadata.status = FrameMetadata::FrameError;
+ return buffer;
+ }
+
+ /*
+ * With a V4L2 single-planar format, all the data is stored in
+ * a single memory plane. The number of bytes used is conveyed
+ * through that plane when using the V4L2 multi-planar API, or
+ * set directly in the buffer when using the V4L2 single-planar
+ * API.
+ */
+ unsigned int bytesused = multiPlanar ? planes[0].bytesused
+ : buf.bytesused;
+ unsigned int remaining = bytesused;
+
+ for (auto [i, plane] : utils::enumerate(buffer->planes())) {
+ if (!remaining) {
+ LOG(V4L2, Error)
+ << "Dequeued buffer (" << bytesused
+ << " bytes) too small for plane lengths "
+ << utils::join(buffer->planes(), "/",
+ [](const FrameBuffer::Plane &p) {
+ return p.length;
+ });
+
+ metadata.status = FrameMetadata::FrameError;
+ return buffer;
+ }
+
+ metadata.planes()[i].bytesused =
+ std::min(plane.length, remaining);
+ remaining -= metadata.planes()[i].bytesused;
+ }
+ } else if (multiPlanar) {
+ /*
+ * If we use the multi-planar API, fill in the planes.
+ * The number of planes in the frame buffer and in the
+ * V4L2 buffer is guaranteed to be equal at this point.
+ */
+ for (unsigned int i = 0; i < numV4l2Planes; ++i)
+ metadata.planes()[i].bytesused = planes[i].bytesused;
} else {
- buffer->metadata_.planes.push_back({ buf.bytesused });
+ metadata.planes()[0].bytesused = buf.bytesused;
}
return buffer;
@@ -1571,6 +1979,8 @@ int V4L2VideoDevice::streamOn()
{
int ret;
+ firstFrame_.reset();
+
ret = ioctl(VIDIOC_STREAMON, &bufferType_);
if (ret < 0) {
LOG(V4L2, Error)
@@ -1578,6 +1988,10 @@ int V4L2VideoDevice::streamOn()
return ret;
}
+ state_ = State::Streaming;
+ if (watchdogDuration_ && !queuedBuffers_.empty())
+ watchdog_.start(std::chrono::duration_cast<std::chrono::milliseconds>(watchdogDuration_));
+
return 0;
}
@@ -1589,12 +2003,21 @@ int V4L2VideoDevice::streamOn()
* and the bufferReady signal is emitted for them. The order in which those
* buffers are dequeued is not specified.
*
+ * This will be a no-op if the stream is not started in the first place and
+ * has no queued buffers.
+ *
* \return 0 on success or a negative error code otherwise
*/
int V4L2VideoDevice::streamOff()
{
int ret;
+ if (state_ != State::Streaming && queuedBuffers_.empty())
+ return 0;
+
+ if (watchdogDuration_.count())
+ watchdog_.stop();
+
ret = ioctl(VIDIOC_STREAMOFF, &bufferType_);
if (ret < 0) {
LOG(V4L2, Error)
@@ -1602,189 +2025,121 @@ int V4L2VideoDevice::streamOff()
return ret;
}
+ state_ = State::Stopping;
+
/* Send back all queued buffers. */
for (auto it : queuedBuffers_) {
FrameBuffer *buffer = it.second;
+ FrameMetadata &metadata = buffer->_d()->metadata();
- buffer->metadata_.status = FrameMetadata::FrameCancelled;
+ cache_->put(it.first);
+ metadata.status = FrameMetadata::FrameCancelled;
bufferReady.emit(buffer);
}
+ ASSERT(cache_->isEmpty());
+
queuedBuffers_.clear();
- fdEvent_->setEnabled(false);
+ fdBufferNotifier_->setEnabled(false);
+ state_ = State::Stopped;
return 0;
}
/**
- * \brief Create a new video device instance from \a entity in media device
- * \a media
- * \param[in] media The media device where the entity is registered
- * \param[in] entity The media entity name
+ * \brief Set the dequeue timeout value
+ * \param[in] timeout The timeout value to be used
*
- * Releasing memory of the newly created instance is responsibility of the
- * caller of this function.
+ * Sets a timeout value, given by \a timeout, that will be used by a watchdog
+ * timer to ensure buffer dequeue events are periodically occurring when the
+ * device is streaming. The watchdog timer is only active when the device is
+ * streaming, so it is not necessary to disable it when the device stops
+ * streaming. The timeout value can be safely updated at any time.
*
- * \return A newly created V4L2VideoDevice on success, nullptr otherwise
+ * If the timer expires, the \ref V4L2VideoDevice::dequeueTimeout signal is
+ * emitted. This can typically be used by pipeline handlers to be notified of
+ * stalled devices.
+ *
+ * Set \a timeout to 0 to disable the watchdog timer.
*/
-V4L2VideoDevice *V4L2VideoDevice::fromEntityName(const MediaDevice *media,
- const std::string &entity)
+void V4L2VideoDevice::setDequeueTimeout(utils::Duration timeout)
{
- MediaEntity *mediaEntity = media->getEntityByName(entity);
- if (!mediaEntity)
- return nullptr;
+ watchdogDuration_ = timeout;
- return new V4L2VideoDevice(mediaEntity);
+ watchdog_.stop();
+ if (watchdogDuration_ && state_ == State::Streaming && !queuedBuffers_.empty())
+ watchdog_.start(std::chrono::duration_cast<std::chrono::milliseconds>(timeout));
}
/**
- * \brief Convert a \a v4l2Fourcc to the corresponding PixelFormat
- * \param[in] v4l2Fourcc The V4L2 pixel format (V4L2_PIX_FORMAT_*)
- * \return The PixelFormat corresponding to \a v4l2Fourcc
+ * \var V4L2VideoDevice::dequeueTimeout
+ * \brief A Signal emitted when the dequeue watchdog timer expires
*/
-PixelFormat V4L2VideoDevice::toPixelFormat(V4L2PixelFormat v4l2Fourcc)
+
+/**
+ * \brief Slot to handle an expired dequeue timer
+ *
+ * When this slot is called, the time between successive dequeue events is over
+ * the required timeout. Emit the \ref V4L2VideoDevice::dequeueTimeout signal.
+ */
+void V4L2VideoDevice::watchdogExpired()
{
- switch (v4l2Fourcc) {
- /* RGB formats. */
- case V4L2_PIX_FMT_RGB24:
- return PixelFormat(DRM_FORMAT_BGR888);
- case V4L2_PIX_FMT_BGR24:
- return PixelFormat(DRM_FORMAT_RGB888);
- case V4L2_PIX_FMT_RGBA32:
- return PixelFormat(DRM_FORMAT_ABGR8888);
- case V4L2_PIX_FMT_ABGR32:
- return PixelFormat(DRM_FORMAT_ARGB8888);
- case V4L2_PIX_FMT_ARGB32:
- return PixelFormat(DRM_FORMAT_BGRA8888);
- case V4L2_PIX_FMT_BGRA32:
- return PixelFormat(DRM_FORMAT_RGBA8888);
-
- /* YUV packed formats. */
- case V4L2_PIX_FMT_YUYV:
- return PixelFormat(DRM_FORMAT_YUYV);
- case V4L2_PIX_FMT_YVYU:
- return PixelFormat(DRM_FORMAT_YVYU);
- case V4L2_PIX_FMT_UYVY:
- return PixelFormat(DRM_FORMAT_UYVY);
- case V4L2_PIX_FMT_VYUY:
- return PixelFormat(DRM_FORMAT_VYUY);
-
- /* YUY planar formats. */
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV16M:
- return PixelFormat(DRM_FORMAT_NV16);
- case V4L2_PIX_FMT_NV61:
- case V4L2_PIX_FMT_NV61M:
- return PixelFormat(DRM_FORMAT_NV61);
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV12M:
- return PixelFormat(DRM_FORMAT_NV12);
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV21M:
- return PixelFormat(DRM_FORMAT_NV21);
-
- /* Compressed formats. */
- case V4L2_PIX_FMT_MJPEG:
- return PixelFormat(DRM_FORMAT_MJPEG);
-
- /* V4L2 formats not yet supported by DRM. */
- case V4L2_PIX_FMT_GREY:
- default:
- /*
- * \todo We can't use LOG() in a static method of a Loggable
- * class. Until we fix the logger, work around it.
- */
- libcamera::_log(__FILE__, __LINE__, _LOG_CATEGORY(V4L2)(),
- LogError).stream()
- << "Unsupported V4L2 pixel format "
- << v4l2Fourcc.toString();
- return PixelFormat();
- }
+ LOG(V4L2, Warning)
+ << "Dequeue timer of " << watchdogDuration_ << " has expired!";
+
+ dequeueTimeout.emit();
}
/**
- * \brief Convert \a PixelFormat to its corresponding V4L2 FourCC
- * \param[in] pixelFormat The PixelFormat to convert
- *
- * For multiplanar formats, the V4L2 format variant (contiguous or
- * non-contiguous planes) is selected automatically based on the capabilities
- * of the video device. If the video device supports the V4L2 multiplanar API,
- * non-contiguous formats are preferred.
+ * \brief Create a new video device instance from \a entity in media device
+ * \a media
+ * \param[in] media The media device where the entity is registered
+ * \param[in] entity The media entity name
*
- * \return The V4L2_PIX_FMT_* pixel format code corresponding to \a pixelFormat
+ * \return A newly created V4L2VideoDevice on success, nullptr otherwise
*/
-V4L2PixelFormat V4L2VideoDevice::toV4L2PixelFormat(const PixelFormat &pixelFormat)
+std::unique_ptr<V4L2VideoDevice>
+V4L2VideoDevice::fromEntityName(const MediaDevice *media,
+ const std::string &entity)
{
- return toV4L2PixelFormat(pixelFormat, caps_.isMultiplanar());
+ MediaEntity *mediaEntity = media->getEntityByName(entity);
+ if (!mediaEntity)
+ return nullptr;
+
+ return std::make_unique<V4L2VideoDevice>(mediaEntity);
}
/**
- * \brief Convert \a pixelFormat to its corresponding V4L2 FourCC
+ * \brief Convert \a PixelFormat to a V4L2PixelFormat supported by the device
* \param[in] pixelFormat The PixelFormat to convert
- * \param[in] multiplanar V4L2 Multiplanar API support flag
*
- * Multiple V4L2 formats may exist for one PixelFormat when the format uses
- * multiple planes, as V4L2 defines separate 4CCs for contiguous and separate
- * planes formats. Set the \a multiplanar parameter to false to select a format
- * with contiguous planes, or to true to select a format with non-contiguous
- * planes.
+ * Convert \a pixelformat to a V4L2 FourCC that is known to be supported by
+ * the video device.
+ *
+ * A V4L2VideoDevice may support different V4L2 pixel formats that map the same
+ * PixelFormat. This is the case of the contiguous and non-contiguous variants
+ * of multiplanar formats, and with the V4L2 MJPEG and JPEG pixel formats.
+ * Converting a PixelFormat to a V4L2PixelFormat may thus have multiple answers.
*
- * \return The V4L2_PIX_FMT_* pixel format code corresponding to \a pixelFormat
+ * This function converts the \a pixelFormat using the list of V4L2 pixel
+ * formats that the V4L2VideoDevice supports. This guarantees that the returned
+ * V4L2PixelFormat will be valid for the device. If multiple matches are still
+ * possible, contiguous variants are preferred. If the \a pixelFormat is not
+ * supported by the device, the function returns an invalid V4L2PixelFormat.
+ *
+ * \return The V4L2PixelFormat corresponding to \a pixelFormat if supported by
+ * the device, or an invalid V4L2PixelFormat otherwise
*/
-V4L2PixelFormat V4L2VideoDevice::toV4L2PixelFormat(const PixelFormat &pixelFormat,
- bool multiplanar)
+V4L2PixelFormat V4L2VideoDevice::toV4L2PixelFormat(const PixelFormat &pixelFormat) const
{
- switch (pixelFormat) {
- /* RGB formats. */
- case DRM_FORMAT_BGR888:
- return V4L2PixelFormat(V4L2_PIX_FMT_RGB24);
- case DRM_FORMAT_RGB888:
- return V4L2PixelFormat(V4L2_PIX_FMT_BGR24);
- case DRM_FORMAT_ABGR8888:
- return V4L2PixelFormat(V4L2_PIX_FMT_RGBA32);
- case DRM_FORMAT_ARGB8888:
- return V4L2PixelFormat(V4L2_PIX_FMT_ABGR32);
- case DRM_FORMAT_BGRA8888:
- return V4L2PixelFormat(V4L2_PIX_FMT_ARGB32);
- case DRM_FORMAT_RGBA8888:
- return V4L2PixelFormat(V4L2_PIX_FMT_BGRA32);
-
- /* YUV packed formats. */
- case DRM_FORMAT_YUYV:
- return V4L2PixelFormat(V4L2_PIX_FMT_YUYV);
- case DRM_FORMAT_YVYU:
- return V4L2PixelFormat(V4L2_PIX_FMT_YVYU);
- case DRM_FORMAT_UYVY:
- return V4L2PixelFormat(V4L2_PIX_FMT_UYVY);
- case DRM_FORMAT_VYUY:
- return V4L2PixelFormat(V4L2_PIX_FMT_VYUY);
-
- /*
- * YUY planar formats.
- * \todo Add support for non-contiguous memory planes
- * \todo Select the format variant not only based on \a multiplanar but
- * also take into account the formats supported by the device.
- */
- case DRM_FORMAT_NV16:
- return V4L2PixelFormat(V4L2_PIX_FMT_NV16);
- case DRM_FORMAT_NV61:
- return V4L2PixelFormat(V4L2_PIX_FMT_NV61);
- case DRM_FORMAT_NV12:
- return V4L2PixelFormat(V4L2_PIX_FMT_NV12);
- case DRM_FORMAT_NV21:
- return V4L2PixelFormat(V4L2_PIX_FMT_NV21);
+ const std::vector<V4L2PixelFormat> &v4l2PixelFormats =
+ V4L2PixelFormat::fromPixelFormat(pixelFormat);
- /* Compressed formats. */
- case DRM_FORMAT_MJPEG:
- return V4L2PixelFormat(V4L2_PIX_FMT_MJPEG);
+ for (const V4L2PixelFormat &v4l2Format : v4l2PixelFormats) {
+ if (pixelFormats_.count(v4l2Format))
+ return v4l2Format;
}
- /*
- * \todo We can't use LOG() in a static method of a Loggable
- * class. Until we fix the logger, work around it.
- */
- libcamera::_log(__FILE__, __LINE__, _LOG_CATEGORY(V4L2)(), LogError).stream()
- << "Unsupported V4L2 pixel format " << pixelFormat.toString();
return {};
}
@@ -1792,15 +2147,24 @@ V4L2PixelFormat V4L2VideoDevice::toV4L2PixelFormat(const PixelFormat &pixelForma
* \class V4L2M2MDevice
* \brief Memory-to-Memory video device
*
+ * Memory to Memory devices in the kernel using the V4L2 M2M API can
+ * operate with multiple contexts for parallel operations on a single
+ * device. Each instance of a V4L2M2MDevice represents a single context.
+ *
* The V4L2M2MDevice manages two V4L2VideoDevice instances on the same
* deviceNode which operate together using two queues to implement the V4L2
* Memory to Memory API.
*
- * The two devices should be opened by calling open() on the V4L2M2MDevice, and
- * can be closed by calling close on the V4L2M2MDevice.
+ * Users of this class should create a new instance of the V4L2M2MDevice for
+ * each desired execution context and then open it by calling open() on the
+ * V4L2M2MDevice and close it by calling close() on the V4L2M2MDevice.
*
* Calling V4L2VideoDevice::open() and V4L2VideoDevice::close() on the capture
* or output V4L2VideoDevice is not permitted.
+ *
+ * Once the M2M device is open, users can operate on the output and capture
+ * queues represented by the V4L2VideoDevice returned by the output() and
+ * capture() functions.
*/
/**
@@ -1842,21 +2206,18 @@ V4L2M2MDevice::~V4L2M2MDevice()
*/
int V4L2M2MDevice::open()
{
- int fd;
int ret;
/*
* The output and capture V4L2VideoDevice instances use the same file
- * handle for the same device node. The local file handle can be closed
- * as the V4L2VideoDevice::open() retains a handle by duplicating the
- * fd passed in.
+ * handle for the same device node.
*/
- fd = syscall(SYS_openat, AT_FDCWD, deviceNode_.c_str(),
- O_RDWR | O_NONBLOCK);
- if (fd < 0) {
+ SharedFD fd(syscall(SYS_openat, AT_FDCWD, deviceNode_.c_str(),
+ O_RDWR | O_NONBLOCK));
+ if (!fd.isValid()) {
ret = -errno;
- LOG(V4L2, Error)
- << "Failed to open V4L2 M2M device: " << strerror(-ret);
+ LOG(V4L2, Error) << "Failed to open V4L2 M2M device: "
+ << strerror(-ret);
return ret;
}
@@ -1868,13 +2229,10 @@ int V4L2M2MDevice::open()
if (ret)
goto err;
- ::close(fd);
-
return 0;
err:
close();
- ::close(fd);
return ret;
}
diff --git a/src/libcamera/version.cpp.in b/src/libcamera/version.cpp.in
index 5aec08a1..bf5a2c30 100644
--- a/src/libcamera/version.cpp.in
+++ b/src/libcamera/version.cpp.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * version.cpp - libcamera version
+ * libcamera version
*
* This file is auto-generated. Do not edit.
*/
diff --git a/src/libcamera/yaml_parser.cpp b/src/libcamera/yaml_parser.cpp
new file mode 100644
index 00000000..a5e42461
--- /dev/null
+++ b/src/libcamera/yaml_parser.cpp
@@ -0,0 +1,784 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * libcamera YAML parsing helper
+ */
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include <charconv>
+#include <errno.h>
+#include <functional>
+#include <limits>
+#include <stdlib.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+#include <yaml.h>
+
+/**
+ * \file yaml_parser.h
+ * \brief A YAML parser helper
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(YamlParser)
+
+namespace {
+
+/* Empty static YamlObject as a safe result for invalid operations */
+static const YamlObject empty;
+
+} /* namespace */
+
+/**
+ * \class YamlObject
+ * \brief A class representing the tree structure of the YAML content
+ *
+ * The YamlObject class represents the tree structure of YAML content. A
+ * YamlObject can be empty, a dictionary or list of YamlObjects, or a value if a
+ * tree leaf.
+ */
+
+YamlObject::YamlObject()
+ : type_(Type::Empty)
+{
+}
+
+YamlObject::~YamlObject() = default;
+
+/**
+ * \fn YamlObject::isValue()
+ * \brief Return whether the YamlObject is a value
+ *
+ * \return True if the YamlObject is a value, false otherwise
+ */
+
+/**
+ * \fn YamlObject::isList()
+ * \brief Return whether the YamlObject is a list
+ *
+ * \return True if the YamlObject is a list, false otherwise
+ */
+
+/**
+ * \fn YamlObject::isDictionary()
+ * \brief Return whether the YamlObject is a dictionary
+ *
+ * \return True if the YamlObject is a dictionary, false otherwise
+ */
+
+/**
+ * \fn YamlObject::isEmpty()
+ * \brief Return whether the YamlObject is an empty
+ *
+ * \return True if the YamlObject is empty, false otherwise
+ */
+
+/**
+ * \fn YamlObject::operator bool()
+ * \brief Return whether the YamlObject is a non-empty
+ *
+ * \return False if the YamlObject is empty, true otherwise
+ */
+
+/**
+ * \fn YamlObject::size()
+ * \brief Retrieve the number of elements in a dictionary or list YamlObject
+ *
+ * This function retrieves the size of the YamlObject, defined as the number of
+ * child elements it contains. Only YamlObject instances of Dictionary or List
+ * types have a size, calling this function on other types of instances is
+ * invalid and results in undefined behaviour.
+ *
+ * \return The size of the YamlObject
+ */
+std::size_t YamlObject::size() const
+{
+ switch (type_) {
+ case Type::Dictionary:
+ case Type::List:
+ return list_.size();
+ default:
+ return 0;
+ }
+}
+
+/**
+ * \fn template<typename T> YamlObject::get<T>() const
+ * \brief Parse the YamlObject as a \a T value
+ *
+ * This function parses the value of the YamlObject as a \a T object, and
+ * returns the value. If parsing fails (usually because the YamlObject doesn't
+ * store a \a T value), std::nullopt is returned.
+ *
+ * \return The YamlObject value, or std::nullopt if parsing failed
+ */
+
+/**
+ * \fn template<typename T, typename U> YamlObject::get<T>(U &&defaultValue) const
+ * \brief Parse the YamlObject as a \a T value
+ * \param[in] defaultValue The default value when failing to parse
+ *
+ * This function parses the value of the YamlObject as a \a T object, and
+ * returns the value. If parsing fails (usually because the YamlObject doesn't
+ * store a \a T value), the \a defaultValue is returned.
+ *
+ * \return The YamlObject value, or \a defaultValue if parsing failed
+ */
+
+#ifndef __DOXYGEN__
+
+template<>
+std::optional<bool>
+YamlObject::Getter<bool>::get(const YamlObject &obj) const
+{
+ if (obj.type_ != Type::Value)
+ return std::nullopt;
+
+ if (obj.value_ == "true")
+ return true;
+ else if (obj.value_ == "false")
+ return false;
+
+ return std::nullopt;
+}
+
+template<typename T>
+struct YamlObject::Getter<T, std::enable_if_t<
+ std::is_same_v<int8_t, T> ||
+ std::is_same_v<uint8_t, T> ||
+ std::is_same_v<int16_t, T> ||
+ std::is_same_v<uint16_t, T> ||
+ std::is_same_v<int32_t, T> ||
+ std::is_same_v<uint32_t, T>>>
+{
+ std::optional<T> get(const YamlObject &obj) const
+ {
+ if (obj.type_ != Type::Value)
+ return std::nullopt;
+
+ const std::string &str = obj.value_;
+ T value;
+
+ auto [ptr, ec] = std::from_chars(str.data(), str.data() + str.size(),
+ value);
+ if (ptr != str.data() + str.size() || ec != std::errc())
+ return std::nullopt;
+
+ return value;
+ }
+};
+
+template struct YamlObject::Getter<int8_t>;
+template struct YamlObject::Getter<uint8_t>;
+template struct YamlObject::Getter<int16_t>;
+template struct YamlObject::Getter<uint16_t>;
+template struct YamlObject::Getter<int32_t>;
+template struct YamlObject::Getter<uint32_t>;
+
+template<>
+std::optional<float>
+YamlObject::Getter<float>::get(const YamlObject &obj) const
+{
+ return obj.get<double>();
+}
+
+template<>
+std::optional<double>
+YamlObject::Getter<double>::get(const YamlObject &obj) const
+{
+ if (obj.type_ != Type::Value)
+ return std::nullopt;
+
+ if (obj.value_.empty())
+ return std::nullopt;
+
+ char *end;
+
+ errno = 0;
+ double value = utils::strtod(obj.value_.c_str(), &end);
+
+ if ('\0' != *end || errno == ERANGE)
+ return std::nullopt;
+
+ return value;
+}
+
+template<>
+std::optional<std::string>
+YamlObject::Getter<std::string>::get(const YamlObject &obj) const
+{
+ if (obj.type_ != Type::Value)
+ return std::nullopt;
+
+ return obj.value_;
+}
+
+template<>
+std::optional<Size>
+YamlObject::Getter<Size>::get(const YamlObject &obj) const
+{
+ if (obj.type_ != Type::List)
+ return std::nullopt;
+
+ if (obj.list_.size() != 2)
+ return std::nullopt;
+
+ auto width = obj.list_[0].value->get<uint32_t>();
+ if (!width)
+ return std::nullopt;
+
+ auto height = obj.list_[1].value->get<uint32_t>();
+ if (!height)
+ return std::nullopt;
+
+ return Size(*width, *height);
+}
+
+#endif /* __DOXYGEN__ */
+
+/**
+ * \fn template<typename T> YamlObject::getList<T>() const
+ * \brief Parse the YamlObject as a list of \a T
+ *
+ * This function parses the value of the YamlObject as a list of \a T objects,
+ * and returns the value as a \a std::vector<T>. If parsing fails, std::nullopt
+ * is returned.
+ *
+ * \return The YamlObject value as a std::vector<T>, or std::nullopt if parsing
+ * failed
+ */
+
+#ifndef __DOXYGEN__
+
+template<typename T,
+ std::enable_if_t<
+ std::is_same_v<bool, T> ||
+ std::is_same_v<float, T> ||
+ std::is_same_v<double, T> ||
+ std::is_same_v<int8_t, T> ||
+ std::is_same_v<uint8_t, T> ||
+ std::is_same_v<int16_t, T> ||
+ std::is_same_v<uint16_t, T> ||
+ std::is_same_v<int32_t, T> ||
+ std::is_same_v<uint32_t, T> ||
+ std::is_same_v<std::string, T> ||
+ std::is_same_v<Size, T>> *>
+std::optional<std::vector<T>> YamlObject::getList() const
+{
+ if (type_ != Type::List)
+ return std::nullopt;
+
+ std::vector<T> values;
+ values.reserve(list_.size());
+
+ for (const YamlObject &entry : asList()) {
+ const auto value = entry.get<T>();
+ if (!value)
+ return std::nullopt;
+ values.emplace_back(*value);
+ }
+
+ return values;
+}
+
+template std::optional<std::vector<bool>> YamlObject::getList<bool>() const;
+template std::optional<std::vector<float>> YamlObject::getList<float>() const;
+template std::optional<std::vector<double>> YamlObject::getList<double>() const;
+template std::optional<std::vector<int8_t>> YamlObject::getList<int8_t>() const;
+template std::optional<std::vector<uint8_t>> YamlObject::getList<uint8_t>() const;
+template std::optional<std::vector<int16_t>> YamlObject::getList<int16_t>() const;
+template std::optional<std::vector<uint16_t>> YamlObject::getList<uint16_t>() const;
+template std::optional<std::vector<int32_t>> YamlObject::getList<int32_t>() const;
+template std::optional<std::vector<uint32_t>> YamlObject::getList<uint32_t>() const;
+template std::optional<std::vector<std::string>> YamlObject::getList<std::string>() const;
+template std::optional<std::vector<Size>> YamlObject::getList<Size>() const;
+
+#endif /* __DOXYGEN__ */
+
+/**
+ * \fn YamlObject::asDict() const
+ * \brief Wrap a dictionary YamlObject in an adapter that exposes iterators
+ *
+ * The YamlObject class doesn't directly implement iterators, as the iterator
+ * type depends on whether the object is a Dictionary or List. This function
+ * wraps a YamlObject of Dictionary type into an adapter that exposes
+ * iterators, as well as begin() and end() functions, allowing usage of
+ * range-based for loops with YamlObject. As YAML mappings are not ordered, the
+ * iteration order is not specified.
+ *
+ * The iterator's value_type is a
+ * <em>std::pair<const std::string &, const \ref YamlObject &></em>.
+ *
+ * If the YamlObject is not of Dictionary type, the returned adapter operates
+ * as an empty container.
+ *
+ * \return An adapter of unspecified type compatible with range-based for loops
+ */
+
+/**
+ * \fn YamlObject::asList() const
+ * \brief Wrap a list YamlObject in an adapter that exposes iterators
+ *
+ * The YamlObject class doesn't directly implement iterators, as the iterator
+ * type depends on whether the object is a Dictionary or List. This function
+ * wraps a YamlObject of List type into an adapter that exposes iterators, as
+ * well as begin() and end() functions, allowing usage of range-based for loops
+ * with YamlObject. As YAML lists are ordered, the iteration order is identical
+ * to the list order in the YAML data.
+ *
+ * The iterator's value_type is a <em>const YamlObject &</em>.
+ *
+ * If the YamlObject is not of List type, the returned adapter operates as an
+ * empty container.
+ *
+ * \return An adapter of unspecified type compatible with range-based for loops
+ */
+
+/**
+ * \fn YamlObject::operator[](std::size_t index) const
+ * \brief Retrieve the element from list YamlObject by index
+ *
+ * This function retrieves an element of the YamlObject. Only YamlObject
+ * instances of List type associate elements with index, calling this function
+ * on other types of instances or with an invalid index results in an empty
+ * object.
+ *
+ * \return The YamlObject as an element of the list
+ */
+const YamlObject &YamlObject::operator[](std::size_t index) const
+{
+ if (type_ != Type::List || index >= size())
+ return empty;
+
+ return *list_[index].value;
+}
+
+/**
+ * \fn YamlObject::contains()
+ * \brief Check if an element of a dictionary exists
+ *
+ * This function check if the YamlObject contains an element. Only YamlObject
+ * instances of Dictionary type associate elements with names, calling this
+ * function on other types of instances is invalid and results in undefined
+ * behaviour.
+ *
+ * \return True if an element exists, false otherwise
+ */
+bool YamlObject::contains(std::string_view key) const
+{
+ return dictionary_.find(key) != dictionary_.end();
+}
+
+/**
+ * \fn YamlObject::operator[](std::string_view key) const
+ * \brief Retrieve a member by name from the dictionary
+ *
+ * This function retrieve a member of a YamlObject by name. Only YamlObject
+ * instances of Dictionary type associate elements with names, calling this
+ * function on other types of instances or with a nonexistent key results in an
+ * empty object.
+ *
+ * \return The YamlObject corresponding to the \a key member
+ */
+const YamlObject &YamlObject::operator[](std::string_view key) const
+{
+ if (type_ != Type::Dictionary)
+ return empty;
+
+ auto iter = dictionary_.find(key);
+ if (iter == dictionary_.end())
+ return empty;
+
+ return *iter->second;
+}
+
+#ifndef __DOXYGEN__
+
+class YamlParserContext
+{
+public:
+ YamlParserContext();
+ ~YamlParserContext();
+
+ int init(File &file);
+ int parseContent(YamlObject &yamlObject);
+
+private:
+ struct EventDeleter {
+ void operator()(yaml_event_t *event) const
+ {
+ yaml_event_delete(event);
+ delete event;
+ }
+ };
+ using EventPtr = std::unique_ptr<yaml_event_t, EventDeleter>;
+
+ static int yamlRead(void *data, unsigned char *buffer, size_t size,
+ size_t *sizeRead);
+
+ EventPtr nextEvent();
+
+ void readValue(std::string &value, EventPtr event);
+ int parseDictionaryOrList(YamlObject::Type type,
+ const std::function<int(EventPtr event)> &parseItem);
+ int parseNextYamlObject(YamlObject &yamlObject, EventPtr event);
+
+ bool parserValid_;
+ yaml_parser_t parser_;
+};
+
+/**
+ * \class YamlParserContext
+ * \brief Class for YamlParser parsing and context data
+ *
+ * The YamlParserContext class stores the internal yaml_parser_t and provides
+ * helper functions to do event-based parsing for YAML files.
+ */
+YamlParserContext::YamlParserContext()
+ : parserValid_(false)
+{
+}
+
+/**
+ * \class YamlParserContext
+ * \brief Destructor of YamlParserContext
+ */
+YamlParserContext::~YamlParserContext()
+{
+ if (parserValid_) {
+ yaml_parser_delete(&parser_);
+ parserValid_ = false;
+ }
+}
+
+/**
+ * \fn YamlParserContext::init()
+ * \brief Initialize a parser with an opened file for parsing
+ * \param[in] fh The YAML file to parse
+ *
+ * Prior to parsing the YAML content, the YamlParserContext must be initialized
+ * with a file to create an internal parser. The file needs to stay valid until
+ * parsing completes.
+ *
+ * \return 0 on success or a negative error code otherwise
+ * \retval -EINVAL The parser has failed to initialize
+ */
+int YamlParserContext::init(File &file)
+{
+ /* yaml_parser_initialize returns 1 when it succeededs */
+ if (!yaml_parser_initialize(&parser_)) {
+ LOG(YamlParser, Error) << "Failed to initialize YAML parser";
+ return -EINVAL;
+ }
+ parserValid_ = true;
+ yaml_parser_set_input(&parser_, &YamlParserContext::yamlRead, &file);
+
+ return 0;
+}
+
+int YamlParserContext::yamlRead(void *data, unsigned char *buffer, size_t size,
+ size_t *sizeRead)
+{
+ File *file = static_cast<File *>(data);
+
+ Span<unsigned char> buf{ buffer, size };
+ ssize_t ret = file->read(buf);
+ if (ret < 0)
+ return 0;
+
+ *sizeRead = ret;
+ return 1;
+}
+
+/**
+ * \fn YamlParserContext::nextEvent()
+ * \brief Get the next event
+ *
+ * Get the next event in the current YAML event stream, and return nullptr when
+ * there is no more event.
+ *
+ * \return The next event on success or nullptr otherwise
+ */
+YamlParserContext::EventPtr YamlParserContext::nextEvent()
+{
+ EventPtr event(new yaml_event_t);
+
+ /* yaml_parser_parse returns 1 when it succeeds */
+ if (!yaml_parser_parse(&parser_, event.get())) {
+ File *file = static_cast<File *>(parser_.read_handler_data);
+
+ LOG(YamlParser, Error) << file->fileName() << ":"
+ << parser_.problem_mark.line << ":"
+ << parser_.problem_mark.column << " "
+ << parser_.problem << " "
+ << parser_.context;
+
+ return nullptr;
+ }
+
+ return event;
+}
+
+/**
+ * \fn YamlParserContext::parseContent()
+ * \brief Parse the content of a YAML document
+ * \param[in] yamlObject The result of YamlObject
+ *
+ * Check YAML start and end events of a YAML document, and parse the root object
+ * of the YAML document into a YamlObject.
+ *
+ * \return 0 on success or a negative error code otherwise
+ * \retval -EINVAL The parser has failed to validate end of a YAML file
+ */
+int YamlParserContext::parseContent(YamlObject &yamlObject)
+{
+ /* Check start of the YAML file. */
+ EventPtr event = nextEvent();
+ if (!event || event->type != YAML_STREAM_START_EVENT)
+ return -EINVAL;
+
+ event = nextEvent();
+ if (!event || event->type != YAML_DOCUMENT_START_EVENT)
+ return -EINVAL;
+
+ /* Parse the root object. */
+ event = nextEvent();
+ if (parseNextYamlObject(yamlObject, std::move(event)))
+ return -EINVAL;
+
+ /* Check end of the YAML file. */
+ event = nextEvent();
+ if (!event || event->type != YAML_DOCUMENT_END_EVENT)
+ return -EINVAL;
+
+ event = nextEvent();
+ if (!event || event->type != YAML_STREAM_END_EVENT)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * \fn YamlParserContext::readValue()
+ * \brief Parse event scalar and fill its content into a string
+ * \param[in] value The string reference to fill value
+ *
+ * A helper function to parse a scalar event as string. The caller needs to
+ * guarantee the event is of scaler type.
+ */
+void YamlParserContext::readValue(std::string &value, EventPtr event)
+{
+ value.assign(reinterpret_cast<char *>(event->data.scalar.value),
+ event->data.scalar.length);
+}
+
+/**
+ * \fn YamlParserContext::parseDictionaryOrList()
+ * \brief A helper function to abstract the common part of parsing dictionary or list
+ *
+ * \param[in] isDictionary True for parsing a dictionary, and false for a list
+ * \param[in] parseItem The callback to handle an item
+ *
+ * A helper function to abstract parsing an item from a dictionary or a list.
+ * The differences of them in a YAML event stream are:
+ *
+ * 1. The start and end event types are different
+ * 2. There is a leading scalar string as key in the items of a dictionary
+ *
+ * The caller should handle the leading key string in its callback parseItem
+ * when it's a dictionary.
+ *
+ * \return 0 on success or a negative error code otherwise
+ * \retval -EINVAL The parser is failed to initialize
+ */
+int YamlParserContext::parseDictionaryOrList(YamlObject::Type type,
+ const std::function<int(EventPtr event)> &parseItem)
+{
+ yaml_event_type_t endEventType = YAML_SEQUENCE_END_EVENT;
+ if (type == YamlObject::Type::Dictionary)
+ endEventType = YAML_MAPPING_END_EVENT;
+
+ /*
+ * Add a safety counter to make sure we don't loop indefinitely in case
+ * the YAML file is malformed.
+ */
+ for (unsigned int sentinel = 2000; sentinel; sentinel--) {
+ auto evt = nextEvent();
+ if (!evt)
+ return -EINVAL;
+
+ if (evt->type == endEventType)
+ return 0;
+
+ int ret = parseItem(std::move(evt));
+ if (ret)
+ return ret;
+ }
+
+ LOG(YamlParser, Error) << "The YAML file contains a List or Dictionary"
+ " whose size exceeds the parser's limit (1000)";
+
+ return -EINVAL;
+}
+
+/**
+ * \fn YamlParserContext::parseNextYamlObject()
+ * \brief Parse next YAML event and read it as a YamlObject
+ * \param[in] yamlObject The result of YamlObject
+ * \param[in] event The leading event of the object
+ *
+ * Parse next YAML object separately as a value, list or dictionary.
+ *
+ * \return 0 on success or a negative error code otherwise
+ * \retval -EINVAL Fail to parse the YAML file.
+ */
+int YamlParserContext::parseNextYamlObject(YamlObject &yamlObject, EventPtr event)
+{
+ if (!event)
+ return -EINVAL;
+
+ switch (event->type) {
+ case YAML_SCALAR_EVENT:
+ yamlObject.type_ = YamlObject::Type::Value;
+ readValue(yamlObject.value_, std::move(event));
+ return 0;
+
+ case YAML_SEQUENCE_START_EVENT: {
+ yamlObject.type_ = YamlObject::Type::List;
+ auto &list = yamlObject.list_;
+ auto handler = [this, &list](EventPtr evt) {
+ list.emplace_back(std::string{}, std::make_unique<YamlObject>());
+ return parseNextYamlObject(*list.back().value, std::move(evt));
+ };
+ return parseDictionaryOrList(YamlObject::Type::List, handler);
+ }
+
+ case YAML_MAPPING_START_EVENT: {
+ yamlObject.type_ = YamlObject::Type::Dictionary;
+ auto &list = yamlObject.list_;
+ auto handler = [this, &list](EventPtr evtKey) {
+ /* Parse key */
+ if (evtKey->type != YAML_SCALAR_EVENT) {
+ LOG(YamlParser, Error) << "Expect key at line: "
+ << evtKey->start_mark.line
+ << " column: "
+ << evtKey->start_mark.column;
+ return -EINVAL;
+ }
+
+ std::string key;
+ readValue(key, std::move(evtKey));
+
+ /* Parse value */
+ EventPtr evtValue = nextEvent();
+ if (!evtValue)
+ return -EINVAL;
+
+ auto &elem = list.emplace_back(std::move(key),
+ std::make_unique<YamlObject>());
+ return parseNextYamlObject(*elem.value, std::move(evtValue));
+ };
+ int ret = parseDictionaryOrList(YamlObject::Type::Dictionary, handler);
+ if (ret)
+ return ret;
+
+ auto &dictionary = yamlObject.dictionary_;
+ for (const auto &elem : list)
+ dictionary.emplace(elem.key, elem.value.get());
+
+ return 0;
+ }
+
+ default:
+ LOG(YamlParser, Error) << "Invalid YAML file";
+ return -EINVAL;
+ }
+}
+
+#endif /* __DOXYGEN__ */
+
+/**
+ * \class YamlParser
+ * \brief A helper class for parsing a YAML file
+ *
+ * The YamlParser class provides an easy interface to parse the contents of a
+ * YAML file into a tree of YamlObject instances.
+ *
+ * Example usage:
+ *
+ * \code{.unparsed}
+ *
+ * name:
+ * "John"
+ * numbers:
+ * - 1
+ * - 2
+ *
+ * \endcode
+ *
+ * The following code illustrates how to parse the above YAML file:
+ *
+ * \code{.cpp}
+ *
+ * std::unique_ptr<YamlObject> root = YamlParser::parse(fh);
+ * if (!root)
+ * return;
+ *
+ * if (!root->isDictionary())
+ * return;
+ *
+ * const YamlObject &name = (*root)["name"];
+ * std::cout << name.get<std::string>("") << std::endl;
+ *
+ * const YamlObject &numbers = (*root)["numbers"];
+ * if (!numbers.isList())
+ * return;
+ *
+ * for (std::size_t i = 0; i < numbers.size(); i++)
+ * std::cout << numbers[i].get<int32_t>(0) << std::endl;
+ *
+ * \endcode
+ *
+ * The YamlParser::parse() function takes an open FILE, parses its contents, and
+ * returns a pointer to a YamlObject corresponding to the root node of the YAML
+ * document.
+ *
+ * The parser preserves the order of items in the YAML file, for both lists and
+ * dictionaries.
+ */
+
+/**
+ * \brief Parse a YAML file as a YamlObject
+ * \param[in] file The YAML file to parse
+ *
+ * The YamlParser::parse() function takes a file, parses its contents, and
+ * returns a pointer to a YamlObject corresponding to the root node of the YAML
+ * document.
+ *
+ * \return Pointer to result YamlObject on success or nullptr otherwise
+ */
+std::unique_ptr<YamlObject> YamlParser::parse(File &file)
+{
+ YamlParserContext context;
+
+ if (context.init(file))
+ return nullptr;
+
+ std::unique_ptr<YamlObject> root(new YamlObject());
+
+ if (context.parseContent(*root)) {
+ LOG(YamlParser, Error)
+ << "Failed to parse YAML content from "
+ << file.fileName();
+ return nullptr;
+ }
+
+ return root;
+}
+
+} /* namespace libcamera */