summaryrefslogtreecommitdiff
path: root/src/libcamera
diff options
context:
space:
mode:
Diffstat (limited to 'src/libcamera')
-rw-r--r--src/libcamera/base/backtrace.cpp14
-rw-r--r--src/libcamera/base/bound_method.cpp3
-rw-r--r--src/libcamera/base/class.cpp2
-rw-r--r--src/libcamera/base/event_dispatcher.cpp2
-rw-r--r--src/libcamera/base/event_dispatcher_poll.cpp2
-rw-r--r--src/libcamera/base/event_notifier.cpp8
-rw-r--r--src/libcamera/base/file.cpp7
-rw-r--r--src/libcamera/base/flags.cpp2
-rw-r--r--src/libcamera/base/log.cpp56
-rw-r--r--src/libcamera/base/meson.build7
-rw-r--r--src/libcamera/base/message.cpp2
-rw-r--r--src/libcamera/base/mutex.cpp2
-rw-r--r--src/libcamera/base/object.cpp57
-rw-r--r--src/libcamera/base/semaphore.cpp6
-rw-r--r--src/libcamera/base/shared_fd.cpp2
-rw-r--r--src/libcamera/base/signal.cpp5
-rw-r--r--src/libcamera/base/thread.cpp19
-rw-r--r--src/libcamera/base/timer.cpp12
-rw-r--r--src/libcamera/base/unique_fd.cpp2
-rw-r--r--src/libcamera/base/utils.cpp70
-rw-r--r--src/libcamera/bayer_format.cpp64
-rw-r--r--src/libcamera/byte_stream_buffer.cpp2
-rw-r--r--src/libcamera/camera.cpp261
-rw-r--r--src/libcamera/camera_controls.cpp2
-rw-r--r--src/libcamera/camera_lens.cpp2
-rw-r--r--src/libcamera/camera_manager.cpp262
-rw-r--r--src/libcamera/color_space.cpp431
-rw-r--r--src/libcamera/control_ids.cpp.in14
-rw-r--r--src/libcamera/control_ids_core.yaml (renamed from src/libcamera/control_ids.yaml)408
-rw-r--r--src/libcamera/control_ids_draft.yaml230
-rw-r--r--src/libcamera/control_ids_rpi.yaml29
-rw-r--r--src/libcamera/control_ranges.yaml18
-rw-r--r--src/libcamera/control_serializer.cpp30
-rw-r--r--src/libcamera/control_validator.cpp2
-rw-r--r--src/libcamera/controls.cpp70
-rw-r--r--src/libcamera/converter.cpp335
-rw-r--r--src/libcamera/converter/converter_v4l2_m2m.cpp (renamed from src/libcamera/pipeline/simple/converter.cpp)169
-rw-r--r--src/libcamera/converter/meson.build5
-rw-r--r--src/libcamera/delayed_controls.cpp16
-rw-r--r--src/libcamera/device_enumerator.cpp16
-rw-r--r--src/libcamera/device_enumerator_sysfs.cpp2
-rw-r--r--src/libcamera/device_enumerator_udev.cpp12
-rw-r--r--src/libcamera/dma_heaps.cpp165
-rw-r--r--src/libcamera/fence.cpp2
-rw-r--r--src/libcamera/formats.cpp527
-rw-r--r--src/libcamera/formats.yaml51
-rw-r--r--src/libcamera/framebuffer.cpp60
-rw-r--r--src/libcamera/framebuffer_allocator.cpp21
-rw-r--r--src/libcamera/geometry.cpp10
-rw-r--r--src/libcamera/ipa/meson.build7
-rw-r--r--src/libcamera/ipa_controls.cpp2
-rw-r--r--src/libcamera/ipa_data_serializer.cpp2
-rw-r--r--src/libcamera/ipa_interface.cpp2
-rw-r--r--src/libcamera/ipa_manager.cpp22
-rw-r--r--src/libcamera/ipa_module.cpp25
-rw-r--r--src/libcamera/ipa_proxy.cpp2
-rw-r--r--src/libcamera/ipa_pub_key.cpp.in2
-rw-r--r--src/libcamera/ipc_pipe.cpp2
-rw-r--r--src/libcamera/ipc_pipe_unixsocket.cpp2
-rw-r--r--src/libcamera/ipc_unixsocket.cpp2
-rw-r--r--src/libcamera/mapped_framebuffer.cpp2
-rw-r--r--src/libcamera/media_device.cpp20
-rw-r--r--src/libcamera/media_object.cpp2
-rw-r--r--src/libcamera/meson.build72
-rw-r--r--src/libcamera/orientation.cpp115
-rw-r--r--src/libcamera/pipeline/imx8-isi/imx8-isi.cpp1117
-rw-r--r--src/libcamera/pipeline/imx8-isi/meson.build5
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.cpp19
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.h6
-rw-r--r--src/libcamera/pipeline/ipu3/frames.cpp2
-rw-r--r--src/libcamera/pipeline/ipu3/frames.h2
-rw-r--r--src/libcamera/pipeline/ipu3/imgu.cpp8
-rw-r--r--src/libcamera/pipeline/ipu3/imgu.h2
-rw-r--r--src/libcamera/pipeline/ipu3/ipu3.cpp159
-rw-r--r--src/libcamera/pipeline/mali-c55/mali-c55.cpp1066
-rw-r--r--src/libcamera/pipeline/mali-c55/meson.build5
-rw-r--r--src/libcamera/pipeline/meson.build15
-rw-r--r--src/libcamera/pipeline/raspberrypi/dma_heaps.cpp90
-rw-r--r--src/libcamera/pipeline/raspberrypi/dma_heaps.h32
-rw-r--r--src/libcamera/pipeline/raspberrypi/raspberrypi.cpp2200
-rw-r--r--src/libcamera/pipeline/raspberrypi/rpi_stream.h178
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1.cpp520
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1_path.cpp299
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1_path.h18
-rw-r--r--src/libcamera/pipeline/rpi/common/delayed_controls.cpp293
-rw-r--r--src/libcamera/pipeline/rpi/common/delayed_controls.h87
-rw-r--r--src/libcamera/pipeline/rpi/common/meson.build (renamed from src/libcamera/pipeline/raspberrypi/meson.build)4
-rw-r--r--src/libcamera/pipeline/rpi/common/pipeline_base.cpp1491
-rw-r--r--src/libcamera/pipeline/rpi/common/pipeline_base.h286
-rw-r--r--src/libcamera/pipeline/rpi/common/rpi_stream.cpp (renamed from src/libcamera/pipeline/raspberrypi/rpi_stream.cpp)159
-rw-r--r--src/libcamera/pipeline/rpi/common/rpi_stream.h199
-rw-r--r--src/libcamera/pipeline/rpi/meson.build12
-rw-r--r--src/libcamera/pipeline/rpi/vc4/data/example.yaml46
-rw-r--r--src/libcamera/pipeline/rpi/vc4/data/meson.build9
-rw-r--r--src/libcamera/pipeline/rpi/vc4/meson.build7
-rw-r--r--src/libcamera/pipeline/rpi/vc4/vc4.cpp1023
-rw-r--r--src/libcamera/pipeline/simple/converter.h98
-rw-r--r--src/libcamera/pipeline/simple/meson.build1
-rw-r--r--src/libcamera/pipeline/simple/simple.cpp465
-rw-r--r--src/libcamera/pipeline/uvcvideo/uvcvideo.cpp237
-rw-r--r--src/libcamera/pipeline/vimc/vimc.cpp49
-rw-r--r--src/libcamera/pipeline_handler.cpp260
-rw-r--r--src/libcamera/pixel_format.cpp2
-rw-r--r--src/libcamera/process.cpp6
-rw-r--r--src/libcamera/property_ids.cpp.in14
-rw-r--r--src/libcamera/property_ids_core.yaml (renamed from src/libcamera/property_ids.yaml)50
-rw-r--r--src/libcamera/property_ids_draft.yaml39
-rw-r--r--src/libcamera/proxy/worker/meson.build2
-rw-r--r--src/libcamera/pub_key.cpp52
-rw-r--r--src/libcamera/request.cpp17
-rw-r--r--src/libcamera/sensor/camera_sensor.cpp (renamed from src/libcamera/camera_sensor.cpp)587
-rw-r--r--src/libcamera/sensor/camera_sensor_properties.cpp (renamed from src/libcamera/camera_sensor_properties.cpp)102
-rw-r--r--src/libcamera/sensor/meson.build6
-rw-r--r--src/libcamera/shared_mem_object.cpp242
-rw-r--r--src/libcamera/software_isp/TODO279
-rw-r--r--src/libcamera/software_isp/debayer.cpp132
-rw-r--r--src/libcamera/software_isp/debayer.h54
-rw-r--r--src/libcamera/software_isp/debayer_cpu.cpp807
-rw-r--r--src/libcamera/software_isp/debayer_cpu.h158
-rw-r--r--src/libcamera/software_isp/meson.build15
-rw-r--r--src/libcamera/software_isp/software_isp.cpp357
-rw-r--r--src/libcamera/software_isp/swstats_cpu.cpp432
-rw-r--r--src/libcamera/software_isp/swstats_cpu.h97
-rw-r--r--src/libcamera/source_paths.cpp2
-rw-r--r--src/libcamera/stream.cpp23
-rw-r--r--src/libcamera/sysfs.cpp2
-rw-r--r--src/libcamera/tracepoints.cpp2
-rw-r--r--src/libcamera/transform.cpp113
-rw-r--r--src/libcamera/v4l2_device.cpp88
-rw-r--r--src/libcamera/v4l2_pixelformat.cpp84
-rw-r--r--src/libcamera/v4l2_subdevice.cpp1312
-rw-r--r--src/libcamera/v4l2_videodevice.cpp116
-rw-r--r--src/libcamera/version.cpp.in2
-rw-r--r--src/libcamera/yaml_parser.cpp358
134 files changed, 14819 insertions, 4979 deletions
diff --git a/src/libcamera/base/backtrace.cpp b/src/libcamera/base/backtrace.cpp
index 483492c3..0b04629c 100644
--- a/src/libcamera/base/backtrace.cpp
+++ b/src/libcamera/base/backtrace.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * backtrace.h - Call stack backtraces
+ * Call stack backtraces
*/
#include <libcamera/base/backtrace.h>
@@ -191,10 +191,22 @@ __attribute__((__noinline__))
bool Backtrace::unwindTrace()
{
#if HAVE_UNWIND
+/*
+ * unw_getcontext() for ARM32 is an inline assembly function using the stmia
+ * instruction to store SP and PC. This is considered by clang-11 as deprecated,
+ * and generates a warning.
+ */
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Winline-asm"
+#endif
unw_context_t uc;
int ret = unw_getcontext(&uc);
if (ret)
return false;
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
unw_cursor_t cursor;
ret = unw_init_local(&cursor, &uc);
diff --git a/src/libcamera/base/bound_method.cpp b/src/libcamera/base/bound_method.cpp
index 3ecec51c..322029a8 100644
--- a/src/libcamera/base/bound_method.cpp
+++ b/src/libcamera/base/bound_method.cpp
@@ -2,11 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * bound_method.cpp - Method bind and invocation
+ * Method bind and invocation
*/
#include <libcamera/base/bound_method.h>
#include <libcamera/base/message.h>
+#include <libcamera/base/object.h>
#include <libcamera/base/semaphore.h>
#include <libcamera/base/thread.h>
diff --git a/src/libcamera/base/class.cpp b/src/libcamera/base/class.cpp
index 9c2d9f21..61998398 100644
--- a/src/libcamera/base/class.cpp
+++ b/src/libcamera/base/class.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * class.cpp - Utilities and helpers for classes
+ * Utilities and helpers for classes
*/
#include <libcamera/base/class.h>
diff --git a/src/libcamera/base/event_dispatcher.cpp b/src/libcamera/base/event_dispatcher.cpp
index 4be89e81..5f4a5cb4 100644
--- a/src/libcamera/base/event_dispatcher.cpp
+++ b/src/libcamera/base/event_dispatcher.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_dispatcher.cpp - Event dispatcher
+ * Event dispatcher
*/
#include <libcamera/base/event_dispatcher.h>
diff --git a/src/libcamera/base/event_dispatcher_poll.cpp b/src/libcamera/base/event_dispatcher_poll.cpp
index 7238a316..b737ca7a 100644
--- a/src/libcamera/base/event_dispatcher_poll.cpp
+++ b/src/libcamera/base/event_dispatcher_poll.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_dispatcher_poll.cpp - Poll-based event dispatcher
+ * Poll-based event dispatcher
*/
#include <libcamera/base/event_dispatcher_poll.h>
diff --git a/src/libcamera/base/event_notifier.cpp b/src/libcamera/base/event_notifier.cpp
index fd93c087..495c281d 100644
--- a/src/libcamera/base/event_notifier.cpp
+++ b/src/libcamera/base/event_notifier.cpp
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_notifier.cpp - File descriptor event notifier
+ * File descriptor event notifier
*/
#include <libcamera/base/event_notifier.h>
#include <libcamera/base/event_dispatcher.h>
+#include <libcamera/base/log.h>
#include <libcamera/base/message.h>
#include <libcamera/base/thread.h>
@@ -20,6 +21,8 @@
namespace libcamera {
+LOG_DECLARE_CATEGORY(Event)
+
/**
* \class EventNotifier
* \brief Notify of activity on a file descriptor
@@ -104,6 +107,9 @@ EventNotifier::~EventNotifier()
*/
void EventNotifier::setEnabled(bool enable)
{
+ if (!assertThreadBound("EventNotifier can't be enabled from another thread"))
+ return;
+
if (enabled_ == enable)
return;
diff --git a/src/libcamera/base/file.cpp b/src/libcamera/base/file.cpp
index fb3e276d..2b83a517 100644
--- a/src/libcamera/base/file.cpp
+++ b/src/libcamera/base/file.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * file.cpp - File I/O operations
+ * File I/O operations
*/
#include <libcamera/base/file.h>
@@ -163,6 +163,9 @@ bool File::exists() const
* attempt to create the file with initial permissions set to 0666 (modified by
* the process' umask).
*
+ * The file is opened with the O_CLOEXEC flag, and will be closed automatically
+ * when a new binary is executed with one of the exec(3) functions.
+ *
* The error() status is updated.
*
* \return True on success, false otherwise
@@ -178,7 +181,7 @@ bool File::open(File::OpenMode mode)
if (mode & OpenModeFlag::WriteOnly)
flags |= O_CREAT;
- fd_ = UniqueFD(::open(name_.c_str(), flags, 0666));
+ fd_ = UniqueFD(::open(name_.c_str(), flags | O_CLOEXEC, 0666));
if (!fd_.isValid()) {
error_ = -errno;
return false;
diff --git a/src/libcamera/base/flags.cpp b/src/libcamera/base/flags.cpp
index 3e4320ac..9981f2ed 100644
--- a/src/libcamera/base/flags.cpp
+++ b/src/libcamera/base/flags.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * flags.cpp - Type-safe enum-based bitfields
+ * Type-safe enum-based bitfields
*/
#include <libcamera/base/flags.h>
diff --git a/src/libcamera/base/log.cpp b/src/libcamera/base/log.cpp
index 5c359a22..3a656b8f 100644
--- a/src/libcamera/base/log.cpp
+++ b/src/libcamera/base/log.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * log.cpp - Logging infrastructure
+ * Logging infrastructure
*/
#include <libcamera/base/log.h>
@@ -21,6 +21,7 @@
#include <libcamera/logging.h>
#include <libcamera/base/backtrace.h>
+#include <libcamera/base/mutex.h>
#include <libcamera/base/thread.h>
#include <libcamera/base/utils.h>
@@ -314,10 +315,11 @@ private:
friend LogCategory;
void registerCategory(LogCategory *category);
+ LogCategory *findCategory(const char *name) const;
static bool destroyed_;
- std::unordered_set<LogCategory *> categories_;
+ std::vector<LogCategory *> categories_;
std::list<std::pair<std::string, LogSeverity>> levels_;
std::shared_ptr<LogOutput> output_;
@@ -568,7 +570,7 @@ void Logger::logSetLevel(const char *category, const char *level)
return;
for (LogCategory *c : categories_) {
- if (!strcmp(c->name(), category)) {
+ if (c->name() == category) {
c->setSeverity(severity);
break;
}
@@ -707,12 +709,12 @@ LogSeverity Logger::parseLogLevel(const std::string &level)
* \brief Register a log category with the logger
* \param[in] category The log category
*
- * Log categories must have unique names. If a category with the same name
- * already exists this function performs no operation.
+ * Log categories must have unique names. It is invalid to call this function
+ * if a log category with the same name already exists.
*/
void Logger::registerCategory(LogCategory *category)
{
- categories_.insert(category);
+ categories_.push_back(category);
const std::string &name = category->name();
for (const std::pair<std::string, LogSeverity> &level : levels_) {
@@ -737,6 +739,22 @@ void Logger::registerCategory(LogCategory *category)
}
/**
+ * \brief Find an existing log category with the given name
+ * \param[in] name Name of the log category
+ * \return The pointer to the found log category or nullptr if not found
+ */
+LogCategory *Logger::findCategory(const char *name) const
+{
+ if (auto it = std::find_if(categories_.begin(), categories_.end(),
+ [name](auto c) { return c->name() == name; });
+ it != categories_.end()) {
+ return *it;
+ }
+
+ return nullptr;
+}
+
+/**
* \enum LogSeverity
* Log message severity
* \var LogDebug
@@ -761,13 +779,35 @@ void Logger::registerCategory(LogCategory *category)
*/
/**
+ * \brief Create a new LogCategory or return an existing one
+ * \param[in] name Name of the log category
+ *
+ * Create and return a new LogCategory with the given name if such a category
+ * does not yet exist, or return the existing one.
+ *
+ * \return The pointer to the LogCategory
+ */
+LogCategory *LogCategory::create(const char *name)
+{
+ static Mutex mutex_;
+ MutexLocker locker(mutex_);
+ LogCategory *category = Logger::instance()->findCategory(name);
+
+ if (!category) {
+ category = new LogCategory(name);
+ Logger::instance()->registerCategory(category);
+ }
+
+ return category;
+}
+
+/**
* \brief Construct a log category
* \param[in] name The category name
*/
LogCategory::LogCategory(const char *name)
: name_(name), severity_(LogSeverity::LogInfo)
{
- Logger::instance()->registerCategory(this);
}
/**
@@ -804,7 +844,7 @@ void LogCategory::setSeverity(LogSeverity severity)
*/
const LogCategory &LogCategory::defaultCategory()
{
- static const LogCategory *category = new LogCategory("default");
+ static const LogCategory *category = LogCategory::create("default");
return *category;
}
diff --git a/src/libcamera/base/meson.build b/src/libcamera/base/meson.build
index 7030ad1f..7a7fd7e4 100644
--- a/src/libcamera/base/meson.build
+++ b/src/libcamera/base/meson.build
@@ -22,8 +22,8 @@ libcamera_base_sources = files([
'utils.cpp',
])
-libdw = cc.find_library('libdw', required : false)
-libunwind = cc.find_library('libunwind', required : false)
+libdw = dependency('libdw', required : false)
+libunwind = dependency('libunwind', required : false)
if cc.has_header_symbol('execinfo.h', 'backtrace')
config_h.set('HAVE_BACKTRACE', 1)
@@ -38,9 +38,9 @@ if libunwind.found()
endif
libcamera_base_deps = [
- dependency('threads'),
libatomic,
libdw,
+ libthreads,
libunwind,
]
@@ -51,6 +51,7 @@ libcamera_base_args = [ '-DLIBCAMERA_BASE_PRIVATE' ]
libcamera_base_lib = shared_library('libcamera-base',
[libcamera_base_sources, libcamera_base_headers],
version : libcamera_version,
+ soversion : libcamera_soversion,
name_prefix : '',
install : true,
cpp_args : libcamera_base_args,
diff --git a/src/libcamera/base/message.cpp b/src/libcamera/base/message.cpp
index 2da2a7ed..098faac6 100644
--- a/src/libcamera/base/message.cpp
+++ b/src/libcamera/base/message.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * message.cpp - Message queue support
+ * Message queue support
*/
#include <libcamera/base/message.h>
diff --git a/src/libcamera/base/mutex.cpp b/src/libcamera/base/mutex.cpp
index e34e8618..2a4542c4 100644
--- a/src/libcamera/base/mutex.cpp
+++ b/src/libcamera/base/mutex.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * mutex.cpp - Mutex classes with clang thread safety annotation
+ * Mutex classes with clang thread safety annotation
*/
#include <libcamera/base/mutex.h>
diff --git a/src/libcamera/base/object.cpp b/src/libcamera/base/object.cpp
index 92cecd22..745d2565 100644
--- a/src/libcamera/base/object.cpp
+++ b/src/libcamera/base/object.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * object.cpp - Base object
+ * Base object
*/
#include <libcamera/base/object.h>
@@ -40,8 +40,9 @@ LOG_DEFINE_CATEGORY(Object)
* Object class.
*
* Deleting an object from a thread other than the one the object is bound to is
- * unsafe, unless the caller ensures that the object isn't processing any
- * message concurrently.
+ * unsafe, unless the caller ensures that the object's thread is stopped and no
+ * parent or child of the object gets deleted concurrently. See
+ * Object::~Object() for more information.
*
* Object slots connected to signals will also run in the context of the
* object's thread, regardless of whether the signal is emitted in the same or
@@ -84,9 +85,20 @@ Object::Object(Object *parent)
* Object instances shall be destroyed from the thread they are bound to,
* otherwise undefined behaviour may occur. If deletion of an Object needs to
* be scheduled from a different thread, deleteLater() shall be used.
+ *
+ * As an exception to this rule, Object instances may be deleted from a
+ * different thread if the thread the instance is bound to is stopped through
+ * the whole duration of the object's destruction, *and* the parent and children
+ * of the object do not get deleted concurrently. The caller is responsible for
+ * fulfilling those requirements.
+ *
+ * In all cases Object instances shall be deleted before the Thread they are
+ * bound to.
*/
Object::~Object()
{
+ ASSERT(Thread::current() == thread_ || !thread_->isRunning());
+
/*
* Move signals to a private list to avoid concurrent iteration and
* deletion of items from Signal::disconnect().
@@ -116,8 +128,9 @@ Object::~Object()
* event loop that the object belongs to. This ensures the object is destroyed
* from the right context, as required by the libcamera threading model.
*
- * If this function is called before the thread's event loop is started, the
- * object will be deleted when the event loop starts.
+ * If this function is called before the thread's event loop is started or after
+ * it has stopped, the object will be deleted when the event loop (re)starts. If
+ * this never occurs, the object will be leaked.
*
* Deferred deletion can be used to control the destruction context with shared
* pointers. An object managed with shared pointers is deleted when the last
@@ -213,6 +226,35 @@ void Object::message(Message *msg)
}
/**
+ * \fn Object::assertThreadBound()
+ * \brief Check if the caller complies with thread-bound constraints
+ * \param[in] message The message to be printed on error
+ *
+ * This function verifies the calling constraints required by the \threadbound
+ * definition. It shall be called at the beginning of member functions of an
+ * Object subclass that are explicitly marked as thread-bound in their
+ * documentation.
+ *
+ * If the thread-bound constraints are not met, the function prints \a message
+ * as an error message. For debug builds, it additionally causes an assertion
+ * error.
+ *
+ * \todo Verify the thread-bound requirements for functions marked as
+ * thread-bound at the class level.
+ *
+ * \return True if the call is thread-bound compliant, false otherwise
+ */
+bool Object::assertThreadBound(const char *message)
+{
+ if (Thread::current() == thread_)
+ return true;
+
+ LOG(Object, Error) << message;
+ ASSERT(false);
+ return false;
+}
+
+/**
* \fn R Object::invokeMethod()
* \brief Invoke a method asynchronously on an Object instance
* \param[in] func The object method to invoke
@@ -259,11 +301,12 @@ void Object::message(Message *msg)
* Moving an object that has a parent is not allowed, and causes undefined
* behaviour.
*
- * \context This function is thread-bound.
+ * \context This function is \threadbound.
*/
void Object::moveToThread(Thread *thread)
{
- ASSERT(Thread::current() == thread_);
+ if (!assertThreadBound("Object can't be moved from another thread"))
+ return;
if (thread_ == thread)
return;
diff --git a/src/libcamera/base/semaphore.cpp b/src/libcamera/base/semaphore.cpp
index 4fe30293..862f3b31 100644
--- a/src/libcamera/base/semaphore.cpp
+++ b/src/libcamera/base/semaphore.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * semaphore.cpp - General-purpose counting semaphore
+ * General-purpose counting semaphore
*/
#include <libcamera/base/semaphore.h>
@@ -56,7 +56,9 @@ unsigned int Semaphore::available()
void Semaphore::acquire(unsigned int n)
{
MutexLocker locker(mutex_);
- cv_.wait(locker, [&] { return available_ >= n; });
+ cv_.wait(locker, [&]() LIBCAMERA_TSA_REQUIRES(mutex_) {
+ return available_ >= n;
+ });
available_ -= n;
}
diff --git a/src/libcamera/base/shared_fd.cpp b/src/libcamera/base/shared_fd.cpp
index c711cf57..7afc8ca5 100644
--- a/src/libcamera/base/shared_fd.cpp
+++ b/src/libcamera/base/shared_fd.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * shared_fd.cpp - File descriptor wrapper with shared ownership
+ * File descriptor wrapper with shared ownership
*/
#include <libcamera/base/shared_fd.h>
diff --git a/src/libcamera/base/signal.cpp b/src/libcamera/base/signal.cpp
index a46386a0..b782e050 100644
--- a/src/libcamera/base/signal.cpp
+++ b/src/libcamera/base/signal.cpp
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * signal.cpp - Signal & slot implementation
+ * Signal & slot implementation
*/
#include <libcamera/base/signal.h>
#include <libcamera/base/mutex.h>
+#include <libcamera/base/object.h>
/**
* \file base/signal.h
@@ -74,7 +75,7 @@ SignalBase::SlotList SignalBase::slots()
*
* Signals and slots are a language construct aimed at communication between
* objects through the observer pattern without the need for boilerplate code.
- * See http://doc.qt.io/qt-5/signalsandslots.html for more information.
+ * See http://doc.qt.io/qt-6/signalsandslots.html for more information.
*
* Signals model events that can be observed from objects unrelated to the event
* source. Slots are functions that are called in response to a signal. Signals
diff --git a/src/libcamera/base/thread.cpp b/src/libcamera/base/thread.cpp
index 6bda9d14..72733431 100644
--- a/src/libcamera/base/thread.cpp
+++ b/src/libcamera/base/thread.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * thread.cpp - Thread support
+ * Thread support
*/
#include <libcamera/base/thread.h>
@@ -18,6 +18,7 @@
#include <libcamera/base/log.h>
#include <libcamera/base/message.h>
#include <libcamera/base/mutex.h>
+#include <libcamera/base/object.h>
/**
* \page thread Thread Support
@@ -151,7 +152,7 @@ private:
friend class ThreadMain;
Thread *thread_;
- bool running_;
+ bool running_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
pid_t tid_;
Mutex mutex_;
@@ -370,6 +371,12 @@ void Thread::run()
void Thread::finishThread()
{
+ /*
+ * Objects may have been scheduled for deletion right before the thread
+ * exited. Ensure they get deleted now, before the thread stops.
+ */
+ dispatchMessages(Message::Type::DeferredDelete);
+
data_->mutex_.lock();
data_->running_ = false;
data_->mutex_.unlock();
@@ -422,11 +429,15 @@ bool Thread::wait(utils::duration duration)
{
MutexLocker locker(data_->mutex_);
+ auto isRunning = ([&]() LIBCAMERA_TSA_REQUIRES(data_->mutex_) {
+ return !data_->running_;
+ });
+
if (duration == utils::duration::max())
- data_->cv_.wait(locker, [&]() { return !data_->running_; });
+ data_->cv_.wait(locker, isRunning);
else
hasFinished = data_->cv_.wait_for(locker, duration,
- [&]() { return !data_->running_; });
+ isRunning);
}
if (thread_.joinable())
diff --git a/src/libcamera/base/timer.cpp b/src/libcamera/base/timer.cpp
index 74b060af..7b0f3725 100644
--- a/src/libcamera/base/timer.cpp
+++ b/src/libcamera/base/timer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * timer.cpp - Generic timer
+ * Generic timer
*/
#include <libcamera/base/timer.h>
@@ -85,10 +85,8 @@ void Timer::start(std::chrono::milliseconds duration)
*/
void Timer::start(std::chrono::steady_clock::time_point deadline)
{
- if (Thread::current() != thread()) {
- LOG(Timer, Error) << "Timer " << this << " << can't be started from another thread";
+ if (!assertThreadBound("Timer can't be started from another thread"))
return;
- }
deadline_ = deadline;
@@ -114,13 +112,11 @@ void Timer::start(std::chrono::steady_clock::time_point deadline)
*/
void Timer::stop()
{
- if (!isRunning())
+ if (!assertThreadBound("Timer can't be stopped from another thread"))
return;
- if (Thread::current() != thread()) {
- LOG(Timer, Error) << "Timer " << this << " can't be stopped from another thread";
+ if (!isRunning())
return;
- }
unregisterTimer();
}
diff --git a/src/libcamera/base/unique_fd.cpp b/src/libcamera/base/unique_fd.cpp
index 83d6919c..d0649e4d 100644
--- a/src/libcamera/base/unique_fd.cpp
+++ b/src/libcamera/base/unique_fd.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * unique_fd.cpp - File descriptor wrapper that owns a file descriptor
+ * File descriptor wrapper that owns a file descriptor
*/
#include <libcamera/base/unique_fd.h>
diff --git a/src/libcamera/base/utils.cpp b/src/libcamera/base/utils.cpp
index 6a307940..ccb31063 100644
--- a/src/libcamera/base/utils.cpp
+++ b/src/libcamera/base/utils.cpp
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * utils.cpp - Miscellaneous utility functions
+ * Miscellaneous utility functions
*/
#include <libcamera/base/utils.h>
#include <iomanip>
+#include <locale.h>
#include <sstream>
#include <stdlib.h>
#include <string.h>
@@ -463,6 +464,73 @@ std::string toAscii(const std::string &str)
* \a b
*/
+#if HAVE_LOCALE_T
+
+namespace {
+
+/*
+ * RAII wrapper around locale_t instances, to support global locale instances
+ * without leaking memory.
+ */
+class Locale
+{
+public:
+ Locale(const char *locale)
+ {
+ locale_ = newlocale(LC_ALL_MASK, locale, static_cast<locale_t>(0));
+ }
+
+ ~Locale()
+ {
+ freelocale(locale_);
+ }
+
+ locale_t locale() { return locale_; }
+
+private:
+ locale_t locale_;
+};
+
+Locale cLocale("C");
+
+} /* namespace */
+
+#endif /* HAVE_LOCALE_T */
+
+/**
+ * \brief Convert a string to a double independently of the current locale
+ * \param[in] nptr The string to convert
+ * \param[out] endptr Pointer to trailing portion of the string after conversion
+ *
+ * This function is a locale-independent version of the std::strtod() function.
+ * It behaves as the standard function, but uses the "C" locale instead of the
+ * current locale.
+ *
+ * \return The converted value, if any, or 0.0 if the conversion failed.
+ */
+double strtod(const char *__restrict nptr, char **__restrict endptr)
+{
+#if HAVE_LOCALE_T
+ return strtod_l(nptr, endptr, cLocale.locale());
+#else
+ /*
+ * If the libc implementation doesn't provide locale object support,
+ * assume that strtod() is locale-independent.
+ */
+ return ::strtod(nptr, endptr);
+#endif
+}
+
+/**
+ * \fn to_underlying(Enum e)
+ * \brief Convert an enumeration to its underlygin type
+ * \param[in] e Enumeration value to convert
+ *
+ * This function is equivalent to the C++23 std::to_underlying().
+ *
+ * \return The value of e converted to its underlying type
+ */
+
} /* namespace utils */
#ifndef __DOXYGEN__
diff --git a/src/libcamera/bayer_format.cpp b/src/libcamera/bayer_format.cpp
index 4882707e..014f716d 100644
--- a/src/libcamera/bayer_format.cpp
+++ b/src/libcamera/bayer_format.cpp
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * bayer_format.cpp - Class to represent Bayer formats
+ * Class to represent Bayer formats
*/
#include "libcamera/internal/bayer_format.h"
@@ -61,6 +61,10 @@ namespace libcamera {
* \brief Format uses MIPI CSI-2 style packing
* \var BayerFormat::Packing::IPU3
* \brief Format uses IPU3 style packing
+ * \var BayerFormat::Packing::PISP1
+ * \brief Format uses PISP mode 1 compression
+ * \var BayerFormat::Packing::PISP2
+ * \brief Format uses PISP mode 2 compression
*/
namespace {
@@ -140,6 +144,22 @@ const std::map<BayerFormat, Formats, BayerFormatComparator> bayerToFormat{
{ formats::SGRBG12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12P) } },
{ { BayerFormat::RGGB, 12, BayerFormat::Packing::CSI2 },
{ formats::SRGGB12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P) } },
+ { { BayerFormat::BGGR, 14, BayerFormat::Packing::None },
+ { formats::SBGGR14, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14) } },
+ { { BayerFormat::GBRG, 14, BayerFormat::Packing::None },
+ { formats::SGBRG14, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14) } },
+ { { BayerFormat::GRBG, 14, BayerFormat::Packing::None },
+ { formats::SGRBG14, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14) } },
+ { { BayerFormat::RGGB, 14, BayerFormat::Packing::None },
+ { formats::SRGGB14, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14) } },
+ { { BayerFormat::BGGR, 14, BayerFormat::Packing::CSI2 },
+ { formats::SBGGR14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14P) } },
+ { { BayerFormat::GBRG, 14, BayerFormat::Packing::CSI2 },
+ { formats::SGBRG14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14P) } },
+ { { BayerFormat::GRBG, 14, BayerFormat::Packing::CSI2 },
+ { formats::SGRBG14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14P) } },
+ { { BayerFormat::RGGB, 14, BayerFormat::Packing::CSI2 },
+ { formats::SRGGB14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14P) } },
{ { BayerFormat::BGGR, 16, BayerFormat::Packing::None },
{ formats::SBGGR16, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16) } },
{ { BayerFormat::GBRG, 16, BayerFormat::Packing::None },
@@ -148,12 +168,26 @@ const std::map<BayerFormat, Formats, BayerFormatComparator> bayerToFormat{
{ formats::SGRBG16, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG16) } },
{ { BayerFormat::RGGB, 16, BayerFormat::Packing::None },
{ formats::SRGGB16, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16) } },
+ { { BayerFormat::BGGR, 16, BayerFormat::Packing::PISP1 },
+ { formats::BGGR_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR) } },
+ { { BayerFormat::GBRG, 16, BayerFormat::Packing::PISP1 },
+ { formats::GBRG_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG) } },
+ { { BayerFormat::GRBG, 16, BayerFormat::Packing::PISP1 },
+ { formats::GRBG_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG) } },
+ { { BayerFormat::RGGB, 16, BayerFormat::Packing::PISP1 },
+ { formats::RGGB_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB) } },
{ { BayerFormat::MONO, 8, BayerFormat::Packing::None },
{ formats::R8, V4L2PixelFormat(V4L2_PIX_FMT_GREY) } },
{ { BayerFormat::MONO, 10, BayerFormat::Packing::None },
{ formats::R10, V4L2PixelFormat(V4L2_PIX_FMT_Y10) } },
{ { BayerFormat::MONO, 10, BayerFormat::Packing::CSI2 },
{ formats::R10_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_Y10P) } },
+ { { BayerFormat::MONO, 12, BayerFormat::Packing::None },
+ { formats::R12, V4L2PixelFormat(V4L2_PIX_FMT_Y12) } },
+ { { BayerFormat::MONO, 16, BayerFormat::Packing::None },
+ { formats::R16, V4L2PixelFormat(V4L2_PIX_FMT_Y16) } },
+ { { BayerFormat::MONO, 16, BayerFormat::Packing::PISP1 },
+ { formats::MONO_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO) } },
};
const std::unordered_map<unsigned int, BayerFormat> mbusCodeToBayer{
@@ -191,6 +225,8 @@ const std::unordered_map<unsigned int, BayerFormat> mbusCodeToBayer{
{ MEDIA_BUS_FMT_SRGGB16_1X16, { BayerFormat::RGGB, 16, BayerFormat::Packing::None } },
{ MEDIA_BUS_FMT_Y8_1X8, { BayerFormat::MONO, 8, BayerFormat::Packing::None } },
{ MEDIA_BUS_FMT_Y10_1X10, { BayerFormat::MONO, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_Y12_1X12, { BayerFormat::MONO, 12, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_Y16_1X16, { BayerFormat::MONO, 16, BayerFormat::Packing::None } },
};
} /* namespace */
@@ -281,6 +317,10 @@ std::ostream &operator<<(std::ostream &out, const BayerFormat &f)
out << "-CSI2P";
else if (f.packing == BayerFormat::Packing::IPU3)
out << "-IPU3P";
+ else if (f.packing == BayerFormat::Packing::PISP1)
+ out << "-PISP1";
+ else if (f.packing == BayerFormat::Packing::PISP2)
+ out << "-PISP2";
return out;
}
@@ -355,11 +395,14 @@ BayerFormat BayerFormat::fromPixelFormat(PixelFormat format)
* \brief Apply a transform to this BayerFormat
* \param[in] t The transform to apply
*
- * Appplying a transform to an image stored in a Bayer format affects the Bayer
- * order. For example, performing a horizontal flip on the Bayer pattern
- * RGGB causes the RG rows of pixels to become GR, and the GB rows to become BG.
- * The transformed image would have a GRBG order. The bit depth and modifiers
- * are not affected.
+ * Applying a transform to an image stored in a Bayer format affects the Bayer
+ * order. For example, performing a horizontal flip on the Bayer pattern RGGB
+ * causes the RG rows of pixels to become GR, and the GB rows to become BG. The
+ * transformed image would have a GRBG order. Performing a vertical flip on the
+ * Bayer pattern RGGB causes the GB rows to come before the RG ones and the
+ * transformed image would have GBRG order. Applying both vertical and
+ * horizontal flips on the Bayer patter RGGB results in transformed images with
+ * BGGR order. The bit depth and modifiers are not affected.
*
* Horizontal and vertical flips are applied before transpose.
*
@@ -374,8 +417,11 @@ BayerFormat BayerFormat::transform(Transform t) const
/*
* Observe that flipping bit 0 of the Order enum performs a horizontal
- * mirror on the Bayer pattern (e.g. RGGB goes to GRBG). Similarly,
- * flipping bit 1 performs a vertical mirror operation on it. Hence:
+ * mirror on the Bayer pattern (e.g. RG/GB goes to GR/BG). Similarly,
+ * flipping bit 1 performs a vertical mirror operation on it (e.g RG/GB
+ * goes to GB/RG). Applying both vertical and horizontal flips
+ * combines vertical and horizontal mirroring on the Bayer pattern
+ * (e.g. RG/GB goes to BG/GR). Hence:
*/
if (!!(t & Transform::HFlip))
result.order = static_cast<Order>(result.order ^ 1);
diff --git a/src/libcamera/byte_stream_buffer.cpp b/src/libcamera/byte_stream_buffer.cpp
index 881cd371..fba9a6f3 100644
--- a/src/libcamera/byte_stream_buffer.cpp
+++ b/src/libcamera/byte_stream_buffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * byte_stream_buffer.cpp - Byte stream buffer
+ * Byte stream buffer
*/
#include "libcamera/internal/byte_stream_buffer.h"
diff --git a/src/libcamera/camera.cpp b/src/libcamera/camera.cpp
index 713543fd..67f34901 100644
--- a/src/libcamera/camera.cpp
+++ b/src/libcamera/camera.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * camera.cpp - Camera device
+ * Camera device
*/
#include <libcamera/camera.h>
@@ -97,6 +97,16 @@
* implemented in the above order at the hardware level. The libcamera pipeline
* handlers translate the pipeline model to the real hardware configuration.
*
+ * \subsection camera-sensor-model Camera Sensor Model
+ *
+ * By default, libcamera configures the camera sensor automatically based on the
+ * configuration of the streams. Applications may instead specify a manual
+ * configuration for the camera sensor. This allows precise control of the frame
+ * geometry and frame rate delivered by the sensor.
+ *
+ * More details about the camera sensor model implemented by libcamera are
+ * available in the libcamera camera-sensor-model documentation page.
+ *
* \subsection digital-zoom Digital Zoom
*
* Digital zoom is implemented as a combination of the cropping and scaling
@@ -112,6 +122,127 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(Camera)
/**
+ * \class SensorConfiguration
+ * \brief Camera sensor configuration
+ *
+ * The SensorConfiguration class collects parameters to control the operations
+ * of the camera sensor, according to the abstract camera sensor model
+ * implemented by libcamera.
+ *
+ * \todo Applications shall fully populate all fields of the
+ * CameraConfiguration::sensorConfig class members before validating the
+ * CameraConfiguration. If the SensorConfiguration is not fully populated, or if
+ * any of its parameters cannot be applied to the sensor in use, the
+ * CameraConfiguration validation process will fail and return
+ * CameraConfiguration::Status::Invalid.
+ *
+ * Applications that populate the SensorConfiguration class members are
+ * expected to be highly-specialized applications that know what sensor
+ * they are operating with and what parameters are valid for the sensor in use.
+ *
+ * A detailed description of the abstract camera sensor model implemented by
+ * libcamera and the description of its configuration parameters is available
+ * in the libcamera documentation camera-sensor-model file.
+ */
+
+/**
+ * \var SensorConfiguration::bitDepth
+ * \brief The sensor image format bit depth
+ *
+ * The number of bits (resolution) used to represent a pixel sample.
+ */
+
+/**
+ * \var SensorConfiguration::analogCrop
+ * \brief The analog crop rectangle
+ *
+ * The selected portion of the active pixel array used to produce the image
+ * frame.
+ */
+
+/**
+ * \var SensorConfiguration::binning
+ * \brief Sensor binning configuration
+ *
+ * Refer to the camera-sensor-model documentation for an accurate description
+ * of the binning operations. Disabled by default.
+ */
+
+/**
+ * \var SensorConfiguration::binX
+ * \brief Horizontal binning factor
+ *
+ * The horizontal binning factor. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::binY
+ * \brief Vertical binning factor
+ *
+ * The vertical binning factor. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::skipping
+ * \brief The sensor skipping configuration
+ *
+ * Refer to the camera-sensor-model documentation for an accurate description
+ * of the skipping operations.
+ *
+ * If no skipping is performed, all the structure fields should be
+ * set to 1. Disabled by default.
+ */
+
+/**
+ * \var SensorConfiguration::xOddInc
+ * \brief Horizontal increment for odd rows. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::xEvenInc
+ * \brief Horizontal increment for even rows. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::yOddInc
+ * \brief Vertical increment for odd columns. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::yEvenInc
+ * \brief Vertical increment for even columns. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::outputSize
+ * \brief The frame output (visible) size
+ *
+ * The size of the data frame as received by the host processor.
+ */
+
+/**
+ * \brief Check if the sensor configuration is valid
+ *
+ * A sensor configuration is valid if it's fully populated.
+ *
+ * \todo For now allow applications to populate the bitDepth and the outputSize
+ * only as skipping and binnings factors are initialized to 1 and the analog
+ * crop is ignored.
+ *
+ * \return True if the sensor configuration is valid, false otherwise
+ */
+bool SensorConfiguration::isValid() const
+{
+ if (bitDepth && binning.binX && binning.binY &&
+ skipping.xOddInc && skipping.yOddInc &&
+ skipping.xEvenInc && skipping.yEvenInc &&
+ !outputSize.isNull())
+ return true;
+
+ return false;
+}
+
+/**
* \class CameraConfiguration
* \brief Hold configuration for streams of the camera
@@ -160,7 +291,7 @@ LOG_DECLARE_CATEGORY(Camera)
* \brief Create an empty camera configuration
*/
CameraConfiguration::CameraConfiguration()
- : transform(Transform::Identity), config_({})
+ : orientation(Orientation::Rotate0), config_({})
{
}
@@ -184,12 +315,12 @@ void CameraConfiguration::addConfiguration(const StreamConfiguration &cfg)
* This function adjusts the camera configuration to the closest valid
* configuration and returns the validation status.
*
- * \todo: Define exactly when to return each status code. Should stream
+ * \todo Define exactly when to return each status code. Should stream
* parameters set to 0 by the caller be adjusted without returning Adjusted ?
* This would potentially be useful for applications but would get in the way
* in Camera::configure(). Do we need an extra status code to signal this ?
*
- * \todo: Handle validation of buffers count when refactoring the buffers API.
+ * \todo Handle validation of buffers count when refactoring the buffers API.
*
* \return A CameraConfiguration::Status value that describes the validation
* status.
@@ -317,17 +448,6 @@ std::size_t CameraConfiguration::size() const
return config_.size();
}
-namespace {
-
-bool isRaw(const PixelFormat &pixFmt)
-{
- const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
- return info.isValid() &&
- info.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
-}
-
-} /* namespace */
-
/**
* \enum CameraConfiguration::ColorSpaceFlag
* \brief Specify the behaviour of validateColorSpaces
@@ -358,8 +478,8 @@ bool isRaw(const PixelFormat &pixFmt)
* \return A CameraConfiguration::Status value that describes the validation
* status.
* \retval CameraConfigutation::Adjusted The configuration has been adjusted
- * and is now valid. The color space of some or all of the streams may bave
- * benn changed. The caller shall check the color spaces carefully.
+ * and is now valid. The color space of some or all of the streams may have
+ * been changed. The caller shall check the color spaces carefully.
* \retval CameraConfiguration::Valid The configuration was already valid and
* hasn't been adjusted.
*/
@@ -368,29 +488,33 @@ CameraConfiguration::Status CameraConfiguration::validateColorSpaces(ColorSpaceF
Status status = Valid;
/*
- * Set all raw streams to the Raw color space, and make a note of the largest
- * non-raw stream with a defined color space (if there is one).
+ * Set all raw streams to the Raw color space, and make a note of the
+ * largest non-raw stream with a defined color space (if there is one).
*/
- int index = -1;
- for (auto [i, cfg] : utils::enumerate(config_)) {
- if (isRaw(cfg.pixelFormat)) {
- if (cfg.colorSpace != ColorSpace::Raw) {
- cfg.colorSpace = ColorSpace::Raw;
- status = Adjusted;
- }
- } else if (cfg.colorSpace && (index == -1 || cfg.size > config_[i].size)) {
- index = i;
+ std::optional<ColorSpace> colorSpace;
+ Size size;
+
+ for (StreamConfiguration &cfg : config_) {
+ if (!cfg.colorSpace)
+ continue;
+
+ if (cfg.colorSpace->adjust(cfg.pixelFormat))
+ status = Adjusted;
+
+ if (cfg.colorSpace != ColorSpace::Raw && cfg.size > size) {
+ colorSpace = cfg.colorSpace;
+ size = cfg.size;
}
}
- if (index < 0 || !(flags & ColorSpaceFlag::StreamsShareColorSpace))
+ if (!colorSpace || !(flags & ColorSpaceFlag::StreamsShareColorSpace))
return status;
/* Make all output color spaces the same, if requested. */
for (auto &cfg : config_) {
- if (!isRaw(cfg.pixelFormat) &&
- cfg.colorSpace != config_[index].colorSpace) {
- cfg.colorSpace = config_[index].colorSpace;
+ if (cfg.colorSpace != ColorSpace::Raw &&
+ cfg.colorSpace != colorSpace) {
+ cfg.colorSpace = colorSpace;
status = Adjusted;
}
}
@@ -399,17 +523,35 @@ CameraConfiguration::Status CameraConfiguration::validateColorSpaces(ColorSpaceF
}
/**
- * \var CameraConfiguration::transform
- * \brief User-specified transform to be applied to the image
+ * \var CameraConfiguration::sensorConfig
+ * \brief The camera sensor configuration
+ *
+ * The sensorConfig member allows manual control of the configuration of the
+ * camera sensor. By default, if sensorConfig is not set, the camera will
+ * configure the sensor automatically based on the configuration of the streams.
+ * Applications can override this by manually specifying the full sensor
+ * configuration.
+ *
+ * Refer to the camera-sensor-model documentation and to the SensorConfiguration
+ * class documentation for details about the sensor configuration process.
+ *
+ * The camera sensor configuration applies to all streams produced by a camera
+ * from the same image source.
+ */
+
+/**
+ * \var CameraConfiguration::orientation
+ * \brief The desired orientation of the images produced by the camera
+ *
+ * The orientation field is a user-specified 2D plane transformation that
+ * specifies how the application wants the camera images to be rotated in
+ * the memory buffers.
*
- * The transform is a user-specified 2D plane transform that will be applied
- * to the camera images by the processing pipeline before being handed to
- * the application. This is subsequent to any transform that is already
- * required to fix up any platform-defined rotation.
+ * If the orientation requested by the application cannot be obtained, the
+ * camera will not rotate or flip the images, and the validate() function will
+ * Adjust this value to the native image orientation produced by the camera.
*
- * The usual 2D plane transforms are allowed here (horizontal/vertical
- * flips, multiple of 90-degree rotations etc.), but the validate() function
- * may adjust this field at its discretion if the selection is not supported.
+ * By default the orientation field is set to Orientation::Rotate0.
*/
/**
@@ -497,7 +639,7 @@ Camera::Private::~Private()
* facilitate debugging of internal request usage.
*
* The requestSequence_ tracks the number of requests queued to a camera
- * over its lifetime.
+ * over a single capture session.
*/
static const char *const camera_state_names[] = {
@@ -508,6 +650,11 @@ static const char *const camera_state_names[] = {
"Running",
};
+bool Camera::Private::isAcquired() const
+{
+ return state_.load(std::memory_order_acquire) != CameraAvailable;
+}
+
bool Camera::Private::isRunning() const
{
return state_.load(std::memory_order_acquire) == CameraRunning;
@@ -811,7 +958,7 @@ int Camera::exportFrameBuffers(Stream *stream,
* not blocking, if the device has already been acquired (by the same or another
* process) the -EBUSY error code is returned.
*
- * Acquiring a camera will limit usage of any other camera(s) provided by the
+ * Acquiring a camera may limit usage of any other camera(s) provided by the
* same pipeline handler to the same instance of libcamera. The limit is in
* effect until all cameras from the pipeline handler are released. Other
* instances of libcamera can still list and examine the cameras but will fail
@@ -839,7 +986,7 @@ int Camera::acquire()
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
- if (!d->pipe_->lock()) {
+ if (!d->pipe_->acquire()) {
LOG(Camera, Info)
<< "Pipeline handler in use by another process";
return -EBUSY;
@@ -873,7 +1020,8 @@ int Camera::release()
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
- d->pipe_->unlock();
+ if (d->isAcquired())
+ d->pipe_->release(this);
d->setState(Private::CameraAvailable);
@@ -937,10 +1085,9 @@ const std::set<Stream *> &Camera::streams() const
* \context This function is \threadsafe.
*
* \return A CameraConfiguration if the requested roles can be satisfied, or a
- * null pointer otherwise. The ownership of the returned configuration is
- * passed to the caller.
+ * null pointer otherwise.
*/
-std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(Span<const StreamRole> roles)
{
Private *const d = _d();
@@ -952,7 +1099,8 @@ std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamR
if (roles.size() > streams().size())
return nullptr;
- CameraConfiguration *config = d->pipe_->generateConfiguration(this, roles);
+ std::unique_ptr<CameraConfiguration> config =
+ d->pipe_->generateConfiguration(this, roles);
if (!config) {
LOG(Camera, Debug)
<< "Pipeline handler failed to generate configuration";
@@ -969,10 +1117,16 @@ std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamR
LOG(Camera, Debug) << msg.str();
- return std::unique_ptr<CameraConfiguration>(config);
+ return config;
}
/**
+ * \fn std::unique_ptr<CameraConfiguration> \
+ * Camera::generateConfiguration(std::initializer_list<StreamRole> roles)
+ * \overload
+ */
+
+/**
* \brief Configure the camera prior to capture
* \param[in] config The camera configurations to setup
*
@@ -1127,6 +1281,11 @@ int Camera::queueRequest(Request *request)
return -EXDEV;
}
+ if (request->status() != Request::RequestPending) {
+ LOG(Camera, Error) << request->toString() << " is not valid";
+ return -EINVAL;
+ }
+
/*
* The camera state may change until the end of the function. No locking
* is however needed as PipelineHandler::queueRequest() will handle
@@ -1181,6 +1340,8 @@ int Camera::start(const ControlList *controls)
LOG(Camera, Debug) << "Starting capture";
+ ASSERT(d->requestSequence_ == 0);
+
ret = d->pipe_->invokeMethod(&PipelineHandler::start,
ConnectionTypeBlocking, this, controls);
if (ret)
diff --git a/src/libcamera/camera_controls.cpp b/src/libcamera/camera_controls.cpp
index cabdcf75..b672c7cf 100644
--- a/src/libcamera/camera_controls.cpp
+++ b/src/libcamera/camera_controls.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_controls.cpp - Camera controls
+ * Camera controls
*/
#include "libcamera/internal/camera_controls.h"
diff --git a/src/libcamera/camera_lens.cpp b/src/libcamera/camera_lens.cpp
index b3d48199..ccc2a6a6 100644
--- a/src/libcamera/camera_lens.cpp
+++ b/src/libcamera/camera_lens.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_lens.cpp - A camera lens
+ * A camera lens
*/
#include "libcamera/internal/camera_lens.h"
diff --git a/src/libcamera/camera_manager.cpp b/src/libcamera/camera_manager.cpp
index 70d73822..95a9e326 100644
--- a/src/libcamera/camera_manager.cpp
+++ b/src/libcamera/camera_manager.cpp
@@ -2,77 +2,38 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * camera_manager.h - Camera management
+ * Camera management
*/
-#include <libcamera/camera_manager.h>
-
-#include <map>
-
-#include <libcamera/camera.h>
+#include "libcamera/internal/camera_manager.h"
#include <libcamera/base/log.h>
-#include <libcamera/base/mutex.h>
-#include <libcamera/base/thread.h>
#include <libcamera/base/utils.h>
+#include <libcamera/camera.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/camera.h"
#include "libcamera/internal/device_enumerator.h"
-#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/pipeline_handler.h"
-#include "libcamera/internal/process.h"
/**
- * \file camera_manager.h
+ * \file libcamera/camera_manager.h
* \brief The camera manager
*/
/**
+ * \file libcamera/internal/camera_manager.h
+ * \brief Internal camera manager support
+ */
+
+/**
* \brief Top-level libcamera namespace
*/
namespace libcamera {
LOG_DEFINE_CATEGORY(Camera)
-class CameraManager::Private : public Extensible::Private, public Thread
-{
- LIBCAMERA_DECLARE_PUBLIC(CameraManager)
-
-public:
- Private();
-
- int start();
- void addCamera(std::shared_ptr<Camera> camera,
- const std::vector<dev_t> &devnums);
- void removeCamera(Camera *camera);
-
- /*
- * This mutex protects
- *
- * - initialized_ and status_ during initialization
- * - cameras_ and camerasByDevnum_ after initialization
- */
- mutable Mutex mutex_;
- std::vector<std::shared_ptr<Camera>> cameras_;
- std::map<dev_t, std::weak_ptr<Camera>> camerasByDevnum_;
-
-protected:
- void run() override;
-
-private:
- int init();
- void createPipelineHandlers();
- void cleanup();
-
- ConditionVariable cv_;
- bool initialized_;
- int status_;
-
- std::unique_ptr<DeviceEnumerator> enumerator_;
-
- IPAManager ipaManager_;
- ProcessManager processManager_;
-};
-
CameraManager::Private::Private()
: initialized_(false)
{
@@ -87,7 +48,9 @@ int CameraManager::Private::start()
{
MutexLocker locker(mutex_);
- cv_.wait(locker, [&] { return initialized_; });
+ cv_.wait(locker, [&]() LIBCAMERA_TSA_REQUIRES(mutex_) {
+ return initialized_;
+ });
status = status_;
}
@@ -129,23 +92,45 @@ int CameraManager::Private::init()
return -ENODEV;
createPipelineHandlers();
+ enumerator_->devicesAdded.connect(this, &Private::createPipelineHandlers);
return 0;
}
void CameraManager::Private::createPipelineHandlers()
{
- CameraManager *const o = LIBCAMERA_O_PTR();
-
/*
* \todo Try to read handlers and order from configuration
- * file and only fallback on all handlers if there is no
- * configuration file.
+ * file and only fallback on environment variable or all handlers, if
+ * there is no configuration file.
*/
- std::vector<PipelineHandlerFactory *> &factories =
- PipelineHandlerFactory::factories();
+ const char *pipesList =
+ utils::secure_getenv("LIBCAMERA_PIPELINES_MATCH_LIST");
+ if (pipesList) {
+ /*
+ * When a list of preferred pipelines is defined, iterate
+ * through the ordered list to match the enumerated devices.
+ */
+ for (const auto &pipeName : utils::split(pipesList, ",")) {
+ const PipelineHandlerFactoryBase *factory;
+ factory = PipelineHandlerFactoryBase::getFactoryByName(pipeName);
+ if (!factory)
+ continue;
+
+ LOG(Camera, Debug)
+ << "Found listed pipeline handler '"
+ << pipeName << "'";
+ pipelineFactoryMatch(factory);
+ }
+
+ return;
+ }
+
+ const std::vector<PipelineHandlerFactoryBase *> &factories =
+ PipelineHandlerFactoryBase::factories();
- for (PipelineHandlerFactory *factory : factories) {
+ /* Match all the registered pipeline handlers. */
+ for (const PipelineHandlerFactoryBase *factory : factories) {
LOG(Camera, Debug)
<< "Found registered pipeline handler '"
<< factory->name() << "'";
@@ -153,18 +138,24 @@ void CameraManager::Private::createPipelineHandlers()
* Try each pipeline handler until it exhaust
* all pipelines it can provide.
*/
- while (1) {
- std::shared_ptr<PipelineHandler> pipe = factory->create(o);
- if (!pipe->match(enumerator_.get()))
- break;
-
- LOG(Camera, Debug)
- << "Pipeline handler \"" << factory->name()
- << "\" matched";
- }
+ pipelineFactoryMatch(factory);
}
+}
- enumerator_->devicesAdded.connect(this, &Private::createPipelineHandlers);
+void CameraManager::Private::pipelineFactoryMatch(const PipelineHandlerFactoryBase *factory)
+{
+ CameraManager *const o = LIBCAMERA_O_PTR();
+
+ /* Provide as many matching pipelines as possible. */
+ while (1) {
+ std::shared_ptr<PipelineHandler> pipe = factory->create(o);
+ if (!pipe->match(enumerator_.get()))
+ break;
+
+ LOG(Camera, Debug)
+ << "Pipeline handler \"" << factory->name()
+ << "\" matched";
+ }
}
void CameraManager::Private::cleanup()
@@ -178,18 +169,36 @@ void CameraManager::Private::cleanup()
* process deletion requests from the thread's message queue as the event
* loop is not in action here.
*/
- cameras_.clear();
+ {
+ MutexLocker locker(mutex_);
+ cameras_.clear();
+ }
+
dispatchMessages(Message::Type::DeferredDelete);
enumerator_.reset(nullptr);
}
-void CameraManager::Private::addCamera(std::shared_ptr<Camera> camera,
- const std::vector<dev_t> &devnums)
+/**
+ * \brief Add a camera to the camera manager
+ * \param[in] camera The camera to be added
+ *
+ * This function is called by pipeline handlers to register the cameras they
+ * handle with the camera manager. Registered cameras are immediately made
+ * available to the system.
+ *
+ * Device numbers from the SystemDevices property are used by the V4L2
+ * compatibility layer to map V4L2 device nodes to Camera instances.
+ *
+ * \context This function shall be called from the CameraManager thread.
+ */
+void CameraManager::Private::addCamera(std::shared_ptr<Camera> camera)
{
+ ASSERT(Thread::current() == this);
+
MutexLocker locker(mutex_);
- for (std::shared_ptr<Camera> c : cameras_) {
+ for (const std::shared_ptr<Camera> &c : cameras_) {
if (c->id() == camera->id()) {
LOG(Camera, Fatal)
<< "Trying to register a camera with a duplicated ID '"
@@ -201,17 +210,31 @@ void CameraManager::Private::addCamera(std::shared_ptr<Camera> camera,
cameras_.push_back(std::move(camera));
unsigned int index = cameras_.size() - 1;
- for (dev_t devnum : devnums)
- camerasByDevnum_[devnum] = cameras_[index];
+
+ /* Report the addition to the public signal */
+ CameraManager *const o = LIBCAMERA_O_PTR();
+ o->cameraAdded.emit(cameras_[index]);
}
-void CameraManager::Private::removeCamera(Camera *camera)
+/**
+ * \brief Remove a camera from the camera manager
+ * \param[in] camera The camera to be removed
+ *
+ * This function is called by pipeline handlers to unregister cameras from the
+ * camera manager. Unregistered cameras won't be reported anymore by the
+ * cameras() and get() calls, but references may still exist in applications.
+ *
+ * \context This function shall be called from the CameraManager thread.
+ */
+void CameraManager::Private::removeCamera(std::shared_ptr<Camera> camera)
{
+ ASSERT(Thread::current() == this);
+
MutexLocker locker(mutex_);
auto iter = std::find_if(cameras_.begin(), cameras_.end(),
[camera](std::shared_ptr<Camera> &c) {
- return c.get() == camera;
+ return c.get() == camera.get();
});
if (iter == cameras_.end())
return;
@@ -219,14 +242,11 @@ void CameraManager::Private::removeCamera(Camera *camera)
LOG(Camera, Debug)
<< "Unregistering camera '" << camera->id() << "'";
- auto iter_d = std::find_if(camerasByDevnum_.begin(), camerasByDevnum_.end(),
- [camera](const std::pair<dev_t, std::weak_ptr<Camera>> &p) {
- return p.second.lock().get() == camera;
- });
- if (iter_d != camerasByDevnum_.end())
- camerasByDevnum_.erase(iter_d);
-
cameras_.erase(iter);
+
+ /* Report the removal to the public signal */
+ CameraManager *const o = LIBCAMERA_O_PTR();
+ o->cameraRemoved.emit(camera);
}
/**
@@ -363,35 +383,6 @@ std::shared_ptr<Camera> CameraManager::get(const std::string &id)
}
/**
- * \brief Retrieve a camera based on device number
- * \param[in] devnum Device number of camera to get
- *
- * This function is meant solely for the use of the V4L2 compatibility
- * layer, to map device nodes to Camera instances. Applications shall
- * not use it and shall instead retrieve cameras by name.
- *
- * Before calling this function the caller is responsible for ensuring that
- * the camera manager is running.
- *
- * \context This function is \threadsafe.
- *
- * \return Shared pointer to Camera object, which is empty if the camera is
- * not found
- */
-std::shared_ptr<Camera> CameraManager::get(dev_t devnum)
-{
- Private *const d = _d();
-
- MutexLocker locker(d->mutex_);
-
- auto iter = d->camerasByDevnum_.find(devnum);
- if (iter == d->camerasByDevnum_.end())
- return nullptr;
-
- return iter->second.lock();
-}
-
-/**
* \var CameraManager::cameraAdded
* \brief Notify of a new camera added to the system
*
@@ -420,51 +411,6 @@ std::shared_ptr<Camera> CameraManager::get(dev_t devnum)
*/
/**
- * \brief Add a camera to the camera manager
- * \param[in] camera The camera to be added
- * \param[in] devnums The device numbers to associate with \a camera
- *
- * This function is called by pipeline handlers to register the cameras they
- * handle with the camera manager. Registered cameras are immediately made
- * available to the system.
- *
- * \a devnums are used by the V4L2 compatibility layer to map V4L2 device nodes
- * to Camera instances.
- *
- * \context This function shall be called from the CameraManager thread.
- */
-void CameraManager::addCamera(std::shared_ptr<Camera> camera,
- const std::vector<dev_t> &devnums)
-{
- Private *const d = _d();
-
- ASSERT(Thread::current() == d);
-
- d->addCamera(camera, devnums);
- cameraAdded.emit(camera);
-}
-
-/**
- * \brief Remove a camera from the camera manager
- * \param[in] camera The camera to be removed
- *
- * This function is called by pipeline handlers to unregister cameras from the
- * camera manager. Unregistered cameras won't be reported anymore by the
- * cameras() and get() calls, but references may still exist in applications.
- *
- * \context This function shall be called from the CameraManager thread.
- */
-void CameraManager::removeCamera(std::shared_ptr<Camera> camera)
-{
- Private *const d = _d();
-
- ASSERT(Thread::current() == d);
-
- d->removeCamera(camera.get());
- cameraRemoved.emit(camera);
-}
-
-/**
* \fn const std::string &CameraManager::version()
* \brief Retrieve the libcamera version string
* \context This function is \a threadsafe.
diff --git a/src/libcamera/color_space.cpp b/src/libcamera/color_space.cpp
index 895e5c8e..3d1c456c 100644
--- a/src/libcamera/color_space.cpp
+++ b/src/libcamera/color_space.cpp
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2021, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2021, Raspberry Pi Ltd
*
- * color_space.cpp - color spaces.
+ * color spaces.
*/
#include <libcamera/color_space.h>
@@ -12,6 +12,11 @@
#include <map>
#include <sstream>
#include <utility>
+#include <vector>
+
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/formats.h"
/**
* \file color_space.h
@@ -29,21 +34,33 @@ namespace libcamera {
* (sometimes also referred to as the quantisation) of the color space.
*
* Certain combinations of these fields form well-known standard color
- * spaces such as "JPEG" or "REC709".
+ * spaces such as "sRGB" or "Rec709".
*
* In the strictest sense a "color space" formally only refers to the
* color primaries and white point. Here, however, the ColorSpace class
* adopts the common broader usage that includes the transfer function,
* Y'CbCr encoding method and quantisation.
*
- * For more information on the specific color spaces described here, please
- * see:
+ * More information on color spaces is available in the V4L2 documentation, see
+ * in particular
*
* - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-srgb">sRGB</a>
* - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-jpeg">JPEG</a>
* - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-smpte-170m">SMPTE 170M</a>
* - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-rec709">Rec.709</a>
* - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-bt2020">Rec.2020</a>
+ *
+ * Note that there is no guarantee of a 1:1 mapping between color space names
+ * and definitions in libcamera and V4L2. Two notable differences are
+ *
+ * - The sRGB libcamera color space is defined for RGB formats only with no
+ * Y'CbCr encoding and a full quantization range, while the V4L2 SRGB color
+ * space has a Y'CbCr encoding and a limited quantization range.
+ * - The sYCC libcamera color space is called JPEG in V4L2 due to historical
+ * reasons.
+ *
+ * \todo Define the color space fully in the libcamera API to avoid referencing
+ * V4L2
*/
/**
@@ -118,11 +135,130 @@ namespace libcamera {
*/
/**
+ * \brief A constant representing a raw color space (from a sensor)
+ */
+const ColorSpace ColorSpace::Raw = {
+ Primaries::Raw,
+ TransferFunction::Linear,
+ YcbcrEncoding::None,
+ Range::Full
+};
+
+/**
+ * \brief A constant representing the sRGB color space (RGB formats only)
+ */
+const ColorSpace ColorSpace::Srgb = {
+ Primaries::Rec709,
+ TransferFunction::Srgb,
+ YcbcrEncoding::None,
+ Range::Full
+};
+
+/**
+ * \brief A constant representing the sYCC color space, typically used for
+ * encoding JPEG images
+ */
+const ColorSpace ColorSpace::Sycc = {
+ Primaries::Rec709,
+ TransferFunction::Srgb,
+ YcbcrEncoding::Rec601,
+ Range::Full
+};
+
+/**
+ * \brief A constant representing the SMPTE170M color space
+ */
+const ColorSpace ColorSpace::Smpte170m = {
+ Primaries::Smpte170m,
+ TransferFunction::Rec709,
+ YcbcrEncoding::Rec601,
+ Range::Limited
+};
+
+/**
+ * \brief A constant representing the Rec.709 color space
+ */
+const ColorSpace ColorSpace::Rec709 = {
+ Primaries::Rec709,
+ TransferFunction::Rec709,
+ YcbcrEncoding::Rec709,
+ Range::Limited
+};
+
+/**
+ * \brief A constant representing the Rec.2020 color space
+ */
+const ColorSpace ColorSpace::Rec2020 = {
+ Primaries::Rec2020,
+ TransferFunction::Rec709,
+ YcbcrEncoding::Rec2020,
+ Range::Limited
+};
+
+/**
+ * \var ColorSpace::primaries
+ * \brief The color primaries of this color space
+ */
+
+/**
+ * \var ColorSpace::transferFunction
+ * \brief The transfer function used by this color space
+ */
+
+/**
+ * \var ColorSpace::ycbcrEncoding
+ * \brief The Y'CbCr encoding used by this color space
+ */
+
+/**
+ * \var ColorSpace::range
+ * \brief The pixel range used with by color space
+ */
+
+namespace {
+
+const std::array<std::pair<ColorSpace, const char *>, 6> colorSpaceNames = { {
+ { ColorSpace::Raw, "RAW" },
+ { ColorSpace::Srgb, "sRGB" },
+ { ColorSpace::Sycc, "sYCC" },
+ { ColorSpace::Smpte170m, "SMPTE170M" },
+ { ColorSpace::Rec709, "Rec709" },
+ { ColorSpace::Rec2020, "Rec2020" },
+} };
+
+const std::map<ColorSpace::Primaries, std::string> primariesNames = {
+ { ColorSpace::Primaries::Raw, "RAW" },
+ { ColorSpace::Primaries::Smpte170m, "SMPTE170M" },
+ { ColorSpace::Primaries::Rec709, "Rec709" },
+ { ColorSpace::Primaries::Rec2020, "Rec2020" },
+};
+
+const std::map<ColorSpace::TransferFunction, std::string> transferNames = {
+ { ColorSpace::TransferFunction::Linear, "Linear" },
+ { ColorSpace::TransferFunction::Srgb, "sRGB" },
+ { ColorSpace::TransferFunction::Rec709, "Rec709" },
+};
+
+const std::map<ColorSpace::YcbcrEncoding, std::string> encodingNames = {
+ { ColorSpace::YcbcrEncoding::None, "None" },
+ { ColorSpace::YcbcrEncoding::Rec601, "Rec601" },
+ { ColorSpace::YcbcrEncoding::Rec709, "Rec709" },
+ { ColorSpace::YcbcrEncoding::Rec2020, "Rec2020" },
+};
+
+const std::map<ColorSpace::Range, std::string> rangeNames = {
+ { ColorSpace::Range::Full, "Full" },
+ { ColorSpace::Range::Limited, "Limited" },
+};
+
+} /* namespace */
+
+/**
* \brief Assemble and return a readable string representation of the
* ColorSpace
*
- * If the color space matches a standard ColorSpace (such as ColorSpace::Jpeg)
- * then the short name of the color space ("JPEG") is returned. Otherwise
+ * If the color space matches a standard ColorSpace (such as ColorSpace::Sycc)
+ * then the short name of the color space ("sYCC") is returned. Otherwise
* the four constituent parts of the ColorSpace are assembled into a longer
* string.
*
@@ -132,14 +268,6 @@ std::string ColorSpace::toString() const
{
/* Print out a brief name only for standard color spaces. */
- static const std::array<std::pair<ColorSpace, const char *>, 6> colorSpaceNames = { {
- { ColorSpace::Raw, "RAW" },
- { ColorSpace::Jpeg, "JPEG" },
- { ColorSpace::Srgb, "sRGB" },
- { ColorSpace::Smpte170m, "SMPTE170M" },
- { ColorSpace::Rec709, "Rec709" },
- { ColorSpace::Rec2020, "Rec2020" },
- } };
auto it = std::find_if(colorSpaceNames.begin(), colorSpaceNames.end(),
[this](const auto &item) {
return *this == item.first;
@@ -149,28 +277,6 @@ std::string ColorSpace::toString() const
/* Assemble a name made of the constituent fields. */
- static const std::map<Primaries, std::string> primariesNames = {
- { Primaries::Raw, "RAW" },
- { Primaries::Smpte170m, "SMPTE170M" },
- { Primaries::Rec709, "Rec709" },
- { Primaries::Rec2020, "Rec2020" },
- };
- static const std::map<TransferFunction, std::string> transferNames = {
- { TransferFunction::Linear, "Linear" },
- { TransferFunction::Srgb, "sRGB" },
- { TransferFunction::Rec709, "Rec709" },
- };
- static const std::map<YcbcrEncoding, std::string> encodingNames = {
- { YcbcrEncoding::None, "None" },
- { YcbcrEncoding::Rec601, "Rec601" },
- { YcbcrEncoding::Rec709, "Rec709" },
- { YcbcrEncoding::Rec2020, "Rec2020" },
- };
- static const std::map<Range, std::string> rangeNames = {
- { Range::Full, "Full" },
- { Range::Limited, "Limited" },
- };
-
auto itPrimaries = primariesNames.find(primaries);
std::string primariesName =
itPrimaries == primariesNames.end() ? "Invalid" : itPrimaries->second;
@@ -213,88 +319,185 @@ std::string ColorSpace::toString(const std::optional<ColorSpace> &colorSpace)
}
/**
- * \var ColorSpace::primaries
- * \brief The color primaries of this color space
- */
-
-/**
- * \var ColorSpace::transferFunction
- * \brief The transfer function used by this color space
- */
-
-/**
- * \var ColorSpace::ycbcrEncoding
- * \brief The Y'CbCr encoding used by this color space
- */
-
-/**
- * \var ColorSpace::range
- * \brief The pixel range used with by color space
- */
-
-/**
- * \brief A constant representing a raw color space (from a sensor)
- */
-const ColorSpace ColorSpace::Raw = {
- Primaries::Raw,
- TransferFunction::Linear,
- YcbcrEncoding::None,
- Range::Full
-};
-
-/**
- * \brief A constant representing the JPEG color space used for
- * encoding JPEG images
- */
-const ColorSpace ColorSpace::Jpeg = {
- Primaries::Rec709,
- TransferFunction::Srgb,
- YcbcrEncoding::Rec601,
- Range::Full
-};
-
-/**
- * \brief A constant representing the sRGB color space
+ * \brief Construct a color space from a string
+ * \param[in] str The string
*
- * This is identical to the JPEG color space except that the Y'CbCr
- * range is limited rather than full.
- */
-const ColorSpace ColorSpace::Srgb = {
- Primaries::Rec709,
- TransferFunction::Srgb,
- YcbcrEncoding::Rec601,
- Range::Limited
-};
-
-/**
- * \brief A constant representing the SMPTE170M color space
- */
-const ColorSpace ColorSpace::Smpte170m = {
- Primaries::Smpte170m,
- TransferFunction::Rec709,
- YcbcrEncoding::Rec601,
- Range::Limited
-};
-
-/**
- * \brief A constant representing the Rec.709 color space
+ * The string \a str can contain the name of a well-known color space, or be
+ * made of the four color space components separated by a '/' character, ordered
+ * as
+ *
+ * \verbatim primaries '/' transferFunction '/' ycbcrEncoding '/' range \endverbatim
+ *
+ * Any failure to parse the string, either because it doesn't match the expected
+ * format, or because the one of the names isn't recognized, will cause this
+ * function to return std::nullopt.
+ *
+ * \return The ColorSpace corresponding to the string, or std::nullopt if the
+ * string doesn't describe a known color space
*/
-const ColorSpace ColorSpace::Rec709 = {
- Primaries::Rec709,
- TransferFunction::Rec709,
- YcbcrEncoding::Rec709,
- Range::Limited
-};
+std::optional<ColorSpace> ColorSpace::fromString(const std::string &str)
+{
+ /* First search for a standard color space name match. */
+ auto itColorSpace = std::find_if(colorSpaceNames.begin(), colorSpaceNames.end(),
+ [&str](const auto &item) {
+ return str == item.second;
+ });
+ if (itColorSpace != colorSpaceNames.end())
+ return itColorSpace->first;
+
+ /*
+ * If not found, the string must contain the four color space
+ * components separated by a '/' character.
+ */
+ const auto &split = utils::split(str, "/");
+ std::vector<std::string> components{ split.begin(), split.end() };
+
+ if (components.size() != 4)
+ return std::nullopt;
+
+ ColorSpace colorSpace = ColorSpace::Raw;
+
+ /* Color primaries */
+ auto itPrimaries = std::find_if(primariesNames.begin(), primariesNames.end(),
+ [&components](const auto &item) {
+ return components[0] == item.second;
+ });
+ if (itPrimaries == primariesNames.end())
+ return std::nullopt;
+
+ colorSpace.primaries = itPrimaries->first;
+
+ /* Transfer function */
+ auto itTransfer = std::find_if(transferNames.begin(), transferNames.end(),
+ [&components](const auto &item) {
+ return components[1] == item.second;
+ });
+ if (itTransfer == transferNames.end())
+ return std::nullopt;
+
+ colorSpace.transferFunction = itTransfer->first;
+
+ /* YCbCr encoding */
+ auto itEncoding = std::find_if(encodingNames.begin(), encodingNames.end(),
+ [&components](const auto &item) {
+ return components[2] == item.second;
+ });
+ if (itEncoding == encodingNames.end())
+ return std::nullopt;
+
+ colorSpace.ycbcrEncoding = itEncoding->first;
+
+ /* Quantization range */
+ auto itRange = std::find_if(rangeNames.begin(), rangeNames.end(),
+ [&components](const auto &item) {
+ return components[3] == item.second;
+ });
+ if (itRange == rangeNames.end())
+ return std::nullopt;
+
+ colorSpace.range = itRange->first;
+
+ return colorSpace;
+}
/**
- * \brief A constant representing the Rec.2020 color space
+ * \brief Adjust the color space to match a pixel format
+ * \param[in] format The pixel format
+ *
+ * Not all combinations of pixel formats and color spaces make sense. For
+ * instance, nobody uses a limited quantization range with raw Bayer formats,
+ * and the YcbcrEncoding::None encoding isn't valid for YUV formats. This
+ * function adjusts the ColorSpace to make it compatible with the given \a
+ * format, by applying the following rules:
+ *
+ * - The color space for RAW formats must be Raw.
+ * - The Y'CbCr encoding and quantization range for RGB formats must be
+ * YcbcrEncoding::None and Range::Full respectively.
+ * - The Y'CbCr encoding for YUV formats must not be YcbcrEncoding::None. The
+ * best encoding is in that case guessed based on the primaries and transfer
+ * function.
+ *
+ * \return True if the color space has been adjusted, or false if it was
+ * already compatible with the format and hasn't been changed
*/
-const ColorSpace ColorSpace::Rec2020 = {
- Primaries::Rec2020,
- TransferFunction::Rec709,
- YcbcrEncoding::Rec2020,
- Range::Limited
-};
+bool ColorSpace::adjust(PixelFormat format)
+{
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+ bool adjusted = false;
+
+ switch (info.colourEncoding) {
+ case PixelFormatInfo::ColourEncodingRAW:
+ /* Raw formats must use the raw color space. */
+ if (*this != ColorSpace::Raw) {
+ *this = ColorSpace::Raw;
+ adjusted = true;
+ }
+ break;
+
+ case PixelFormatInfo::ColourEncodingRGB:
+ /*
+ * RGB formats can't have a Y'CbCr encoding, and must use full
+ * range quantization.
+ */
+ if (ycbcrEncoding != YcbcrEncoding::None) {
+ ycbcrEncoding = YcbcrEncoding::None;
+ adjusted = true;
+ }
+
+ if (range != Range::Full) {
+ range = Range::Full;
+ adjusted = true;
+ }
+ break;
+
+ case PixelFormatInfo::ColourEncodingYUV:
+ if (ycbcrEncoding != YcbcrEncoding::None)
+ break;
+
+ /*
+ * YUV formats must have a Y'CbCr encoding. Infer the most
+ * probable option from the transfer function and primaries.
+ */
+ switch (transferFunction) {
+ case TransferFunction::Linear:
+ /*
+ * Linear YUV is not used in any standard color space,
+ * pick the widely supported and used Rec601 as default.
+ */
+ ycbcrEncoding = YcbcrEncoding::Rec601;
+ break;
+
+ case TransferFunction::Rec709:
+ switch (primaries) {
+ /* Raw should never happen. */
+ case Primaries::Raw:
+ case Primaries::Smpte170m:
+ ycbcrEncoding = YcbcrEncoding::Rec601;
+ break;
+ case Primaries::Rec709:
+ ycbcrEncoding = YcbcrEncoding::Rec709;
+ break;
+ case Primaries::Rec2020:
+ ycbcrEncoding = YcbcrEncoding::Rec2020;
+ break;
+ }
+ break;
+
+ case TransferFunction::Srgb:
+ /*
+ * Only the sYCC color space uses the sRGB transfer
+ * function, the corresponding encoding is Rec601.
+ */
+ ycbcrEncoding = YcbcrEncoding::Rec601;
+ break;
+ }
+
+ adjusted = true;
+ break;
+ }
+
+ return adjusted;
+}
/**
* \brief Compare color spaces for equality
diff --git a/src/libcamera/control_ids.cpp.in b/src/libcamera/control_ids.cpp.in
index 5fb1c2c3..d283c1c1 100644
--- a/src/libcamera/control_ids.cpp.in
+++ b/src/libcamera/control_ids.cpp.in
@@ -24,14 +24,7 @@ namespace controls {
${controls_doc}
-/**
- * \brief Namespace for libcamera draft controls
- */
-namespace draft {
-
-${draft_controls_doc}
-
-} /* namespace draft */
+${vendor_controls_doc}
#ifndef __DOXYGEN__
/*
@@ -40,11 +33,8 @@ ${draft_controls_doc}
*/
${controls_def}
-namespace draft {
-
-${draft_controls_def}
+${vendor_controls_def}
-} /* namespace draft */
#endif
/**
diff --git a/src/libcamera/control_ids.yaml b/src/libcamera/control_ids_core.yaml
index ecab3ae9..bf1f1a83 100644
--- a/src/libcamera/control_ids.yaml
+++ b/src/libcamera/control_ids_core.yaml
@@ -2,10 +2,11 @@
#
# Copyright (C) 2019, Google Inc.
#
-%YAML 1.2
+%YAML 1.1
---
# Unless otherwise stated, all controls are bi-directional, i.e. they can be
# set through Request::controls() and returned out through Request::metadata().
+vendor: libcamera
controls:
- AeEnable:
type: bool
@@ -156,6 +157,79 @@ controls:
control of which features should be automatically adjusted shouldn't
better be handled through a separate AE mode control.
+ - AeFlickerMode:
+ type: int32_t
+ description: |
+ Set the flicker mode, which determines whether, and how, the AGC/AEC
+ algorithm attempts to hide flicker effects caused by the duty cycle of
+ artificial lighting.
+
+ Although implementation dependent, many algorithms for "flicker
+ avoidance" work by restricting this exposure time to integer multiples
+ of the cycle period, wherever possible.
+
+ Implementations may not support all of the flicker modes listed below.
+
+ By default the system will start in FlickerAuto mode if this is
+ supported, otherwise the flicker mode will be set to FlickerOff.
+
+ enum:
+ - name: FlickerOff
+ value: 0
+ description: No flicker avoidance is performed.
+ - name: FlickerManual
+ value: 1
+ description: Manual flicker avoidance.
+ Suppress flicker effects caused by lighting running with a period
+ specified by the AeFlickerPeriod control.
+ \sa AeFlickerPeriod
+ - name: FlickerAuto
+ value: 2
+ description: Automatic flicker period detection and avoidance.
+ The system will automatically determine the most likely value of
+ flicker period, and avoid flicker of this frequency. Once flicker
+ is being corrected, it is implementation dependent whether the
+ system is still able to detect a change in the flicker period.
+ \sa AeFlickerDetected
+
+ - AeFlickerPeriod:
+ type: int32_t
+ description: Manual flicker period in microseconds.
+ This value sets the current flicker period to avoid. It is used when
+ AeFlickerMode is set to FlickerManual.
+
+ To cancel 50Hz mains flicker, this should be set to 10000 (corresponding
+ to 100Hz), or 8333 (120Hz) for 60Hz mains.
+
+ Setting the mode to FlickerManual when no AeFlickerPeriod has ever been
+ set means that no flicker cancellation occurs (until the value of this
+ control is updated).
+
+ Switching to modes other than FlickerManual has no effect on the
+ value of the AeFlickerPeriod control.
+
+ \sa AeFlickerMode
+
+ - AeFlickerDetected:
+ type: int32_t
+ description: Flicker period detected in microseconds.
+ The value reported here indicates the currently detected flicker
+ period, or zero if no flicker at all is detected.
+
+ When AeFlickerMode is set to FlickerAuto, there may be a period during
+ which the value reported here remains zero. Once a non-zero value is
+ reported, then this is the flicker period that has been detected and is
+ now being cancelled.
+
+ In the case of 50Hz mains flicker, the value would be 10000
+ (corresponding to 100Hz), or 8333 (120Hz) for 60Hz mains flicker.
+
+ It is implementation dependent whether the system can continue to detect
+ flicker of different periods when another frequency is already being
+ cancelled.
+
+ \sa AeFlickerMode
+
- Brightness:
type: float
description: |
@@ -275,12 +349,12 @@ controls:
type: int32_t
description: |
Reports a Figure of Merit (FoM) to indicate how in-focus the frame is.
- A larger FocusFoM value indicates a more in-focus frame. This control
- depends on the IPA to gather ISP statistics from the defined focus
- region, and combine them in a suitable way to generate a FocusFoM value.
- In this respect, it is not necessarily aimed at providing a way to
- implement a focus algorithm by the application, rather an indication of
- how in-focus a frame is.
+ A larger FocusFoM value indicates a more in-focus frame. This singular
+ value may be based on a combination of statistics gathered from
+ multiple focus regions within an image. The number of focus regions and
+ method of combination is platform dependent. In this respect, it is not
+ necessarily aimed at providing a way to implement a focus algorithm by
+ the application, rather an indication of how in-focus a frame is.
- ColourCorrectionMatrix:
type: float
@@ -291,7 +365,7 @@ controls:
transformation. The 3x3 matrix is stored in conventional reading
order in an array of 9 floating point values.
- size: [3x3]
+ size: [3,3]
- ScalerCrop:
type: Rectangle
@@ -333,8 +407,8 @@ controls:
- FrameDurationLimits:
type: int64_t
description: |
- The minimum and maximum (in that order) frame duration,
- expressed in microseconds.
+ The minimum and maximum (in that order) frame duration, expressed in
+ microseconds.
When provided by applications, the control specifies the sensor frame
duration interval the pipeline has to use. This limits the largest
@@ -343,7 +417,7 @@ controls:
the sensor will not be able to raise the exposure time above 33ms.
A fixed frame duration is achieved by setting the minimum and maximum
values to be the same. Setting both values to 0 reverts to using the
- IPA provided defaults.
+ camera defaults.
The maximum frame duration provides the absolute limit to the shutter
speed computed by the AE algorithm and it overrides any exposure mode
@@ -375,7 +449,7 @@ controls:
range of reported temperatures is device dependent.
The SensorTemperature control will only be returned in metadata if a
- themal sensor is present.
+ thermal sensor is present.
- SensorTimestamp:
type: int64_t
@@ -408,6 +482,13 @@ controls:
LensPosition control.
In this mode the AfState will always report AfStateIdle.
+
+ If the camera is started in AfModeManual, it will move the focus
+ lens to the position specified by the LensPosition control.
+
+ This mode is the recommended default value for the AfMode control.
+ External cameras (as reported by the Location property set to
+ CameraLocationExternal) may use a different default value.
- name: AfModeAuto
value: 1
description: |
@@ -591,25 +672,27 @@ controls:
AfModeManual, though the value is reported back unconditionally in all
modes.
- The units are a reciprocal distance scale like dioptres but normalised
- for the hyperfocal distance. That is, for a lens with hyperfocal
- distance H, and setting it to a focal distance D, the lens position LP,
- which is generally a non-integer, is given by
+ This value, which is generally a non-integer, is the reciprocal of the
+ focal distance in metres, also known as dioptres. That is, to set a
+ focal distance D, the lens position LP is given by
- \f$LP = \frac{H}{D}\f$
+ \f$LP = \frac{1\mathrm{m}}{D}\f$
For example:
0 moves the lens to infinity.
- 0.5 moves the lens to twice the hyperfocal distance.
- 1 moves the lens to the hyperfocal position.
- And larger values will focus the lens ever closer.
+ 0.5 moves the lens to focus on objects 2m away.
+ 2 moves the lens to focus on objects 50cm away.
+ And larger values will focus the lens closer.
- \todo Define a property to report the Hyperforcal distance of calibrated
- lenses.
+ The default value of the control should indicate a good general position
+ for the lens, often corresponding to the hyperfocal distance (the
+ closest position for which objects at infinity are still acceptably
+ sharp). The minimum will often be zero (meaning infinity), and the
+ maximum value defines the closest focus position.
- \todo Define a property to report the maximum and minimum positions of
- this lens. The minimum value will often be zero (meaning infinity).
+ \todo Define a property to report the Hyperfocal distance of calibrated
+ lenses.
- AfState:
type: int32_t
@@ -692,253 +775,94 @@ controls:
Continuous AF is paused. No further state changes or lens movements
will occur until the AfPauseResume control is sent.
- # ----------------------------------------------------------------------------
- # Draft controls section
-
- - AePrecaptureTrigger:
+ - HdrMode:
type: int32_t
- draft: true
description: |
- Control for AE metering trigger. Currently identical to
- ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER.
+ Control to set the mode to be used for High Dynamic Range (HDR)
+ imaging. HDR techniques typically include multiple exposure, image
+ fusion and tone mapping techniques to improve the dynamic range of the
+ resulting images.
- Whether the camera device will trigger a precapture metering sequence
- when it processes this request.
- enum:
- - name: AePrecaptureTriggerIdle
- value: 0
- description: The trigger is idle.
- - name: AePrecaptureTriggerStart
- value: 1
- description: The pre-capture AE metering is started by the camera.
- - name: AePrecaptureTriggerCancel
- value: 2
- description: |
- The camera will cancel any active or completed metering sequence.
- The AE algorithm is reset to its initial state.
+ When using an HDR mode, images are captured with different sets of AGC
+ settings called HDR channels. Channels indicate in particular the type
+ of exposure (short, medium or long) used to capture the raw image,
+ before fusion. Each HDR image is tagged with the corresponding channel
+ using the HdrChannel control.
- - NoiseReductionMode:
- type: int32_t
- draft: true
- description: |
- Control to select the noise reduction algorithm mode. Currently
- identical to ANDROID_NOISE_REDUCTION_MODE.
+ \sa HdrChannel
- Mode of operation for the noise reduction algorithm.
enum:
- - name: NoiseReductionModeOff
+ - name: HdrModeOff
value: 0
- description: No noise reduction is applied
- - name: NoiseReductionModeFast
+ description: |
+ HDR is disabled. Metadata for this frame will not include the
+ HdrChannel control.
+ - name: HdrModeMultiExposureUnmerged
value: 1
description: |
- Noise reduction is applied without reducing the frame rate.
- - name: NoiseReductionModeHighQuality
+ Multiple exposures will be generated in an alternating fashion.
+ However, they will not be merged together and will be returned to
+ the application as they are. Each image will be tagged with the
+ correct HDR channel, indicating what kind of exposure it is. The
+ tag should be the same as in the HdrModeMultiExposure case.
+
+ The expectation is that an application using this mode would merge
+ the frames to create HDR images for itself if it requires them.
+ - name: HdrModeMultiExposure
value: 2
description: |
- High quality noise reduction at the expense of frame rate.
- - name: NoiseReductionModeMinimal
+ Multiple exposures will be generated and merged to create HDR
+ images. Each image will be tagged with the HDR channel (long, medium
+ or short) that arrived and which caused this image to be output.
+
+ Systems that use two channels for HDR will return images tagged
+ alternately as the short and long channel. Systems that use three
+ channels for HDR will cycle through the short, medium and long
+ channel before repeating.
+ - name: HdrModeSingleExposure
value: 3
description: |
- Minimal noise reduction is applied without reducing the frame rate.
- - name: NoiseReductionModeZSL
- value: 4
- description: |
- Noise reduction is applied at different levels to different streams.
-
- - ColorCorrectionAberrationMode:
- type: int32_t
- draft: true
- description: |
- Control to select the color correction aberration mode. Currently
- identical to ANDROID_COLOR_CORRECTION_ABERRATION_MODE.
-
- Mode of operation for the chromatic aberration correction algorithm.
- enum:
- - name: ColorCorrectionAberrationOff
- value: 0
- description: No aberration correction is applied.
- - name: ColorCorrectionAberrationFast
- value: 1
- description: Aberration correction will not slow down the frame rate.
- - name: ColorCorrectionAberrationHighQuality
- value: 2
- description: |
- High quality aberration correction which might reduce the frame
- rate.
-
- - AeState:
- type: int32_t
- draft: true
- description: |
- Control to report the current AE algorithm state. Currently identical to
- ANDROID_CONTROL_AE_STATE.
-
- Current state of the AE algorithm.
- enum:
- - name: AeStateInactive
- value: 0
- description: The AE algorithm is inactive.
- - name: AeStateSearching
- value: 1
- description: The AE algorithm has not converged yet.
- - name: AeStateConverged
- value: 2
- description: The AE algorithm has converged.
- - name: AeStateLocked
- value: 3
- description: The AE algorithm is locked.
- - name: AeStateFlashRequired
+ Multiple frames all at a single exposure will be used to create HDR
+ images. These images should be reported as all corresponding to the
+ HDR short channel.
+ - name: HdrModeNight
value: 4
- description: The AE algorithm would need a flash for good results
- - name: AeStatePrecapture
- value: 5
description: |
- The AE algorithm has started a pre-capture metering session.
- \sa AePrecaptureTrigger
-
- - AwbState:
- type: int32_t
- draft: true
- description: |
- Control to report the current AWB algorithm state. Currently identical
- to ANDROID_CONTROL_AWB_STATE.
-
- Current state of the AWB algorithm.
- enum:
- - name: AwbStateInactive
- value: 0
- description: The AWB algorithm is inactive.
- - name: AwbStateSearching
- value: 1
- description: The AWB algorithm has not converged yet.
- - name: AwbConverged
- value: 2
- description: The AWB algorithm has converged.
- - name: AwbLocked
- value: 3
- description: The AWB algorithm is locked.
-
- - SensorRollingShutterSkew:
- type: int64_t
- draft: true
- description: |
- Control to report the time between the start of exposure of the first
- row and the start of exposure of the last row. Currently identical to
- ANDROID_SENSOR_ROLLING_SHUTTER_SKEW
-
- - LensShadingMapMode:
- type: int32_t
- draft: true
- description: |
- Control to report if the lens shading map is available. Currently
- identical to ANDROID_STATISTICS_LENS_SHADING_MAP_MODE.
- enum:
- - name: LensShadingMapModeOff
- value: 0
- description: No lens shading map mode is available.
- - name: LensShadingMapModeOn
- value: 1
- description: The lens shading map mode is available.
-
- - SceneFlicker:
- type: int32_t
- draft: true
- description: |
- Control to report the detected scene light frequency. Currently
- identical to ANDROID_STATISTICS_SCENE_FLICKER.
- enum:
- - name: SceneFickerOff
- value: 0
- description: No flickering detected.
- - name: SceneFicker50Hz
- value: 1
- description: 50Hz flickering detected.
- - name: SceneFicker60Hz
- value: 2
- description: 60Hz flickering detected.
+ Multiple frames will be combined to produce "night mode" images. It
+ is up to the implementation exactly which HDR channels it uses, and
+ the images will all be tagged accordingly with the correct HDR
+ channel information.
- - PipelineDepth:
+ - HdrChannel:
type: int32_t
- draft: true
description: |
- Specifies the number of pipeline stages the frame went through from when
- it was exposed to when the final completed result was available to the
- framework. Always less than or equal to PipelineMaxDepth. Currently
- identical to ANDROID_REQUEST_PIPELINE_DEPTH.
+ This value is reported back to the application so that it can discover
+ whether this capture corresponds to the short or long exposure image (or
+ any other image used by the HDR procedure). An application can monitor
+ the HDR channel to discover when the differently exposed images have
+ arrived.
- The typical value for this control is 3 as a frame is first exposed,
- captured and then processed in a single pass through the ISP. Any
- additional processing step performed after the ISP pass (in example face
- detection, additional format conversions etc) count as an additional
- pipeline stage.
+ This metadata is only available when an HDR mode has been enabled.
- - MaxLatency:
- type: int32_t
- draft: true
- description: |
- The maximum number of frames that can occur after a request (different
- than the previous) has been submitted, and before the result's state
- becomes synchronized. A value of -1 indicates unknown latency, and 0
- indicates per-frame control. Currently identical to
- ANDROID_SYNC_MAX_LATENCY.
+ \sa HdrMode
- - TestPatternMode:
- type: int32_t
- draft: true
- description: |
- Control to select the test pattern mode. Currently identical to
- ANDROID_SENSOR_TEST_PATTERN_MODE.
enum:
- - name: TestPatternModeOff
+ - name: HdrChannelNone
value: 0
description: |
- No test pattern mode is used. The camera device returns frames from
- the image sensor.
- - name: TestPatternModeSolidColor
+ This image does not correspond to any of the captures used to create
+ an HDR image.
+ - name: HdrChannelShort
value: 1
description: |
- Each pixel in [R, G_even, G_odd, B] is replaced by its respective
- color channel provided in test pattern data.
- \todo Add control for test pattern data.
- - name: TestPatternModeColorBars
+ This is a short exposure image.
+ - name: HdrChannelMedium
value: 2
description: |
- All pixel data is replaced with an 8-bar color pattern. The vertical
- bars (left-to-right) are as follows; white, yellow, cyan, green,
- magenta, red, blue and black. Each bar should take up 1/8 of the
- sensor pixel array width. When this is not possible, the bar size
- should be rounded down to the nearest integer and the pattern can
- repeat on the right side. Each bar's height must always take up the
- full sensor pixel array height.
- - name: TestPatternModeColorBarsFadeToGray
+ This is a medium exposure image.
+ - name: HdrChannelLong
value: 3
description: |
- The test pattern is similar to TestPatternModeColorBars,
- except that each bar should start at its specified color at the top
- and fade to gray at the bottom. Furthermore each bar is further
- subdevided into a left and right half. The left half should have a
- smooth gradient, and the right half should have a quantized
- gradient. In particular, the right half's should consist of blocks
- of the same color for 1/16th active sensor pixel array width. The
- least significant bits in the quantized gradient should be copied
- from the most significant bits of the smooth gradient. The height of
- each bar should always be a multiple of 128. When this is not the
- case, the pattern should repeat at the bottom of the image.
- - name: TestPatternModePn9
- value: 4
- description: |
- All pixel data is replaced by a pseudo-random sequence generated
- from a PN9 512-bit sequence (typically implemented in hardware with
- a linear feedback shift register). The generator should be reset at
- the beginning of each frame, and thus each subsequent raw frame with
- this test pattern should be exactly the same as the last.
- - name: TestPatternModeCustom1
- value: 256
- description: |
- The first custom test pattern. All custom patterns that are
- available only on this camera device are at least this numeric
- value. All of the custom test patterns will be static (that is the
- raw image must not vary from frame to frame).
+ This is a long exposure image.
...
diff --git a/src/libcamera/control_ids_draft.yaml b/src/libcamera/control_ids_draft.yaml
new file mode 100644
index 00000000..9bef5bf1
--- /dev/null
+++ b/src/libcamera/control_ids_draft.yaml
@@ -0,0 +1,230 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2019, Google Inc.
+#
+%YAML 1.1
+---
+# Unless otherwise stated, all controls are bi-directional, i.e. they can be
+# set through Request::controls() and returned out through Request::metadata().
+vendor: draft
+controls:
+ - AePrecaptureTrigger:
+ type: int32_t
+ description: |
+ Control for AE metering trigger. Currently identical to
+ ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER.
+
+ Whether the camera device will trigger a precapture metering sequence
+ when it processes this request.
+ enum:
+ - name: AePrecaptureTriggerIdle
+ value: 0
+ description: The trigger is idle.
+ - name: AePrecaptureTriggerStart
+ value: 1
+ description: The pre-capture AE metering is started by the camera.
+ - name: AePrecaptureTriggerCancel
+ value: 2
+ description: |
+ The camera will cancel any active or completed metering sequence.
+ The AE algorithm is reset to its initial state.
+
+ - NoiseReductionMode:
+ type: int32_t
+ description: |
+ Control to select the noise reduction algorithm mode. Currently
+ identical to ANDROID_NOISE_REDUCTION_MODE.
+
+ Mode of operation for the noise reduction algorithm.
+ enum:
+ - name: NoiseReductionModeOff
+ value: 0
+ description: No noise reduction is applied
+ - name: NoiseReductionModeFast
+ value: 1
+ description: |
+ Noise reduction is applied without reducing the frame rate.
+ - name: NoiseReductionModeHighQuality
+ value: 2
+ description: |
+ High quality noise reduction at the expense of frame rate.
+ - name: NoiseReductionModeMinimal
+ value: 3
+ description: |
+ Minimal noise reduction is applied without reducing the frame rate.
+ - name: NoiseReductionModeZSL
+ value: 4
+ description: |
+ Noise reduction is applied at different levels to different streams.
+
+ - ColorCorrectionAberrationMode:
+ type: int32_t
+ description: |
+ Control to select the color correction aberration mode. Currently
+ identical to ANDROID_COLOR_CORRECTION_ABERRATION_MODE.
+
+ Mode of operation for the chromatic aberration correction algorithm.
+ enum:
+ - name: ColorCorrectionAberrationOff
+ value: 0
+ description: No aberration correction is applied.
+ - name: ColorCorrectionAberrationFast
+ value: 1
+ description: Aberration correction will not slow down the frame rate.
+ - name: ColorCorrectionAberrationHighQuality
+ value: 2
+ description: |
+ High quality aberration correction which might reduce the frame
+ rate.
+
+ - AeState:
+ type: int32_t
+ description: |
+ Control to report the current AE algorithm state. Currently identical to
+ ANDROID_CONTROL_AE_STATE.
+
+ Current state of the AE algorithm.
+ enum:
+ - name: AeStateInactive
+ value: 0
+ description: The AE algorithm is inactive.
+ - name: AeStateSearching
+ value: 1
+ description: The AE algorithm has not converged yet.
+ - name: AeStateConverged
+ value: 2
+ description: The AE algorithm has converged.
+ - name: AeStateLocked
+ value: 3
+ description: The AE algorithm is locked.
+ - name: AeStateFlashRequired
+ value: 4
+ description: The AE algorithm would need a flash for good results
+ - name: AeStatePrecapture
+ value: 5
+ description: |
+ The AE algorithm has started a pre-capture metering session.
+ \sa AePrecaptureTrigger
+
+ - AwbState:
+ type: int32_t
+ description: |
+ Control to report the current AWB algorithm state. Currently identical
+ to ANDROID_CONTROL_AWB_STATE.
+
+ Current state of the AWB algorithm.
+ enum:
+ - name: AwbStateInactive
+ value: 0
+ description: The AWB algorithm is inactive.
+ - name: AwbStateSearching
+ value: 1
+ description: The AWB algorithm has not converged yet.
+ - name: AwbConverged
+ value: 2
+ description: The AWB algorithm has converged.
+ - name: AwbLocked
+ value: 3
+ description: The AWB algorithm is locked.
+
+ - SensorRollingShutterSkew:
+ type: int64_t
+ description: |
+ Control to report the time between the start of exposure of the first
+ row and the start of exposure of the last row. Currently identical to
+ ANDROID_SENSOR_ROLLING_SHUTTER_SKEW
+
+ - LensShadingMapMode:
+ type: int32_t
+ description: |
+ Control to report if the lens shading map is available. Currently
+ identical to ANDROID_STATISTICS_LENS_SHADING_MAP_MODE.
+ enum:
+ - name: LensShadingMapModeOff
+ value: 0
+ description: No lens shading map mode is available.
+ - name: LensShadingMapModeOn
+ value: 1
+ description: The lens shading map mode is available.
+
+ - PipelineDepth:
+ type: int32_t
+ description: |
+ Specifies the number of pipeline stages the frame went through from when
+ it was exposed to when the final completed result was available to the
+ framework. Always less than or equal to PipelineMaxDepth. Currently
+ identical to ANDROID_REQUEST_PIPELINE_DEPTH.
+
+ The typical value for this control is 3 as a frame is first exposed,
+ captured and then processed in a single pass through the ISP. Any
+ additional processing step performed after the ISP pass (in example face
+ detection, additional format conversions etc) count as an additional
+ pipeline stage.
+
+ - MaxLatency:
+ type: int32_t
+ description: |
+ The maximum number of frames that can occur after a request (different
+ than the previous) has been submitted, and before the result's state
+ becomes synchronized. A value of -1 indicates unknown latency, and 0
+ indicates per-frame control. Currently identical to
+ ANDROID_SYNC_MAX_LATENCY.
+
+ - TestPatternMode:
+ type: int32_t
+ description: |
+ Control to select the test pattern mode. Currently identical to
+ ANDROID_SENSOR_TEST_PATTERN_MODE.
+ enum:
+ - name: TestPatternModeOff
+ value: 0
+ description: |
+ No test pattern mode is used. The camera device returns frames from
+ the image sensor.
+ - name: TestPatternModeSolidColor
+ value: 1
+ description: |
+ Each pixel in [R, G_even, G_odd, B] is replaced by its respective
+ color channel provided in test pattern data.
+ \todo Add control for test pattern data.
+ - name: TestPatternModeColorBars
+ value: 2
+ description: |
+ All pixel data is replaced with an 8-bar color pattern. The vertical
+ bars (left-to-right) are as follows; white, yellow, cyan, green,
+ magenta, red, blue and black. Each bar should take up 1/8 of the
+ sensor pixel array width. When this is not possible, the bar size
+ should be rounded down to the nearest integer and the pattern can
+ repeat on the right side. Each bar's height must always take up the
+ full sensor pixel array height.
+ - name: TestPatternModeColorBarsFadeToGray
+ value: 3
+ description: |
+ The test pattern is similar to TestPatternModeColorBars,
+ except that each bar should start at its specified color at the top
+ and fade to gray at the bottom. Furthermore each bar is further
+ subdevided into a left and right half. The left half should have a
+ smooth gradient, and the right half should have a quantized
+ gradient. In particular, the right half's should consist of blocks
+ of the same color for 1/16th active sensor pixel array width. The
+ least significant bits in the quantized gradient should be copied
+ from the most significant bits of the smooth gradient. The height of
+ each bar should always be a multiple of 128. When this is not the
+ case, the pattern should repeat at the bottom of the image.
+ - name: TestPatternModePn9
+ value: 4
+ description: |
+ All pixel data is replaced by a pseudo-random sequence generated
+ from a PN9 512-bit sequence (typically implemented in hardware with
+ a linear feedback shift register). The generator should be reset at
+ the beginning of each frame, and thus each subsequent raw frame with
+ this test pattern should be exactly the same as the last.
+ - name: TestPatternModeCustom1
+ value: 256
+ description: |
+ The first custom test pattern. All custom patterns that are
+ available only on this camera device are at least this numeric
+ value. All of the custom test patterns will be static (that is the
+ raw image must not vary from frame to frame).
+
+...
diff --git a/src/libcamera/control_ids_rpi.yaml b/src/libcamera/control_ids_rpi.yaml
new file mode 100644
index 00000000..cb097f88
--- /dev/null
+++ b/src/libcamera/control_ids_rpi.yaml
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+%YAML 1.1
+---
+# Raspberry Pi (VC4 and PiSP) specific vendor controls
+vendor: rpi
+controls:
+ - StatsOutputEnable:
+ type: bool
+ description: |
+ Toggles the Raspberry Pi IPA to output a binary dump of the hardware
+ generated statistics through the Request metadata in the Bcm2835StatsOutput
+ control.
+
+ \sa Bcm2835StatsOutput
+
+ - Bcm2835StatsOutput:
+ type: uint8_t
+ size: [n]
+ description: |
+ Span of the BCM2835 ISP generated statistics for the current frame. This
+ is sent in the Request metadata if the StatsOutputEnable is set to true.
+ The statistics struct definition can be found in include/linux/bcm2835-isp.h.
+
+ \sa StatsOutputEnable
+
+...
diff --git a/src/libcamera/control_ranges.yaml b/src/libcamera/control_ranges.yaml
new file mode 100644
index 00000000..d42447d0
--- /dev/null
+++ b/src/libcamera/control_ranges.yaml
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+%YAML 1.1
+---
+# Specifies the control id ranges/offsets for core/draft libcamera and vendor
+# controls and properties.
+ranges:
+ # Core libcamera controls
+ libcamera: 0
+ # Draft designated libcamera controls
+ draft: 10000
+ # Raspberry Pi vendor controls
+ rpi: 20000
+ # Next range starts at 30000
+
+...
diff --git a/src/libcamera/control_serializer.cpp b/src/libcamera/control_serializer.cpp
index e87d2362..52fd714f 100644
--- a/src/libcamera/control_serializer.cpp
+++ b/src/libcamera/control_serializer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_serializer.cpp - Control (de)serializer
+ * Control (de)serializer
*/
#include "libcamera/internal/control_serializer.h"
@@ -144,7 +144,7 @@ void ControlSerializer::reset()
size_t ControlSerializer::binarySize(const ControlValue &value)
{
- return value.data().size_bytes();
+ return sizeof(ControlType) + value.data().size_bytes();
}
size_t ControlSerializer::binarySize(const ControlInfo &info)
@@ -195,6 +195,8 @@ size_t ControlSerializer::binarySize(const ControlList &list)
void ControlSerializer::store(const ControlValue &value,
ByteStreamBuffer &buffer)
{
+ const ControlType type = value.type();
+ buffer.write(&type);
buffer.write(value.data());
}
@@ -379,11 +381,13 @@ int ControlSerializer::serialize(const ControlList &list,
return 0;
}
-ControlValue ControlSerializer::loadControlValue(ControlType type,
- ByteStreamBuffer &buffer,
+ControlValue ControlSerializer::loadControlValue(ByteStreamBuffer &buffer,
bool isArray,
unsigned int count)
{
+ ControlType type;
+ buffer.read(&type);
+
ControlValue value;
value.reserve(type, isArray, count);
@@ -392,15 +396,11 @@ ControlValue ControlSerializer::loadControlValue(ControlType type,
return value;
}
-ControlInfo ControlSerializer::loadControlInfo(ControlType type,
- ByteStreamBuffer &b)
+ControlInfo ControlSerializer::loadControlInfo(ByteStreamBuffer &b)
{
- if (type == ControlTypeString)
- type = ControlTypeInteger32;
-
- ControlValue min = loadControlValue(type, b);
- ControlValue max = loadControlValue(type, b);
- ControlValue def = loadControlValue(type, b);
+ ControlValue min = loadControlValue(b);
+ ControlValue max = loadControlValue(b);
+ ControlValue def = loadControlValue(b);
return ControlInfo(min, max, def);
}
@@ -513,7 +513,7 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
}
/* Create and store the ControlInfo. */
- ctrls.emplace(controlId, loadControlInfo(type, values));
+ ctrls.emplace(controlId, loadControlInfo(values));
}
/*
@@ -624,10 +624,8 @@ ControlList ControlSerializer::deserialize<ControlList>(ByteStreamBuffer &buffer
return {};
}
- ControlType type = static_cast<ControlType>(entry->type);
ctrls.set(entry->id,
- loadControlValue(type, values, entry->is_array,
- entry->count));
+ loadControlValue(values, entry->is_array, entry->count));
}
return ctrls;
diff --git a/src/libcamera/control_validator.cpp b/src/libcamera/control_validator.cpp
index cf08b34a..93982cff 100644
--- a/src/libcamera/control_validator.cpp
+++ b/src/libcamera/control_validator.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_validator.cpp - Control validator
+ * Control validator
*/
#include "libcamera/internal/control_validator.h"
diff --git a/src/libcamera/controls.cpp b/src/libcamera/controls.cpp
index 03ac6345..11d35321 100644
--- a/src/libcamera/controls.cpp
+++ b/src/libcamera/controls.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * controls.cpp - Control handling
+ * Control handling
*/
#include <libcamera/controls.h>
@@ -677,6 +677,9 @@ ControlInfoMap::ControlInfoMap(Map &&info, const ControlIdMap &idmap)
bool ControlInfoMap::validate()
{
+ if (!idmap_)
+ return false;
+
for (const auto &ctrl : *this) {
const ControlId *id = ctrl.first;
auto it = idmap_->find(id->id());
@@ -719,6 +722,8 @@ bool ControlInfoMap::validate()
*/
ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id)
{
+ ASSERT(idmap_);
+
return at(idmap_->at(id));
}
@@ -729,6 +734,8 @@ ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id)
*/
const ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id) const
{
+ ASSERT(idmap_);
+
return at(idmap_->at(id));
}
@@ -739,6 +746,9 @@ const ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id) const
*/
ControlInfoMap::size_type ControlInfoMap::count(unsigned int id) const
{
+ if (!idmap_)
+ return 0;
+
/*
* The ControlInfoMap and its idmap have a 1:1 mapping between their
* entries, we can thus just count the matching entries in idmap to
@@ -755,6 +765,9 @@ ControlInfoMap::size_type ControlInfoMap::count(unsigned int id) const
*/
ControlInfoMap::iterator ControlInfoMap::find(unsigned int id)
{
+ if (!idmap_)
+ return end();
+
auto iter = idmap_->find(id);
if (iter == idmap_->end())
return end();
@@ -770,6 +783,9 @@ ControlInfoMap::iterator ControlInfoMap::find(unsigned int id)
*/
ControlInfoMap::const_iterator ControlInfoMap::find(unsigned int id) const
{
+ if (!idmap_)
+ return end();
+
auto iter = idmap_->find(id);
if (iter == idmap_->end())
return end();
@@ -892,12 +908,26 @@ ControlList::ControlList(const ControlInfoMap &infoMap,
*/
/**
+ * \enum ControlList::MergePolicy
+ * \brief The policy used by the merge function
+ *
+ * \var ControlList::MergePolicy::KeepExisting
+ * \brief Existing controls in the target list are kept
+ *
+ * \var ControlList::MergePolicy::OverwriteExisting
+ * \brief Existing controls in the target list are updated
+ */
+
+/**
* \brief Merge the \a source into the ControlList
* \param[in] source The ControlList to merge into this object
+ * \param[in] policy Controls if existing elements in *this shall be
+ * overwritten
*
* Merging two control lists copies elements from the \a source and inserts
* them in *this. If the \a source contains elements whose key is already
- * present in *this, then those elements are not overwritten.
+ * present in *this, then those elements are only overwritten if
+ * \a policy is MergePolicy::OverwriteExisting.
*
* Only control lists created from the same ControlIdMap or ControlInfoMap may
* be merged. Attempting to do otherwise results in undefined behaviour.
@@ -905,10 +935,10 @@ ControlList::ControlList(const ControlInfoMap &infoMap,
* \todo Reimplement or implement an overloaded version which internally uses
* std::unordered_map::merge() and accepts a non-const argument.
*/
-void ControlList::merge(const ControlList &source)
+void ControlList::merge(const ControlList &source, MergePolicy policy)
{
/**
- * \todo: ASSERT that the current and source ControlList are derived
+ * \todo ASSERT that the current and source ControlList are derived
* from a compatible ControlIdMap, to prevent undefined behaviour due to
* id collisions.
*
@@ -920,7 +950,7 @@ void ControlList::merge(const ControlList &source)
*/
for (const auto &ctrl : source) {
- if (contains(ctrl.first)) {
+ if (policy == MergePolicy::KeepExisting && contains(ctrl.first)) {
const ControlId *id = idmap_->at(ctrl.first);
LOG(Controls, Warning)
<< "Control " << id->name() << " not overwritten";
@@ -933,17 +963,6 @@ void ControlList::merge(const ControlList &source)
/**
* \brief Check if the list contains a control with the specified \a id
- * \param[in] id The control ID
- *
- * \return True if the list contains a matching control, false otherwise
- */
-bool ControlList::contains(const ControlId &id) const
-{
- return controls_.find(id.id()) != controls_.end();
-}
-
-/**
- * \brief Check if the list contains a control with the specified \a id
* \param[in] id The control numerical ID
*
* \return True if the list contains a matching control, false otherwise
@@ -954,22 +973,20 @@ bool ControlList::contains(unsigned int id) const
}
/**
- * \fn template<typename T> T ControlList::get(const Control<T> &ctrl) const
+ * \fn ControlList::get(const Control<T> &ctrl) const
* \brief Get the value of control \a ctrl
* \param[in] ctrl The control
*
- * The behaviour is undefined if the control \a ctrl is not present in the
- * list. Use ControlList::contains() to test for the presence of a control in
- * the list before retrieving its value.
- *
- * The control value type shall match the type T, otherwise the behaviour is
- * undefined.
+ * Beside getting the value of a control, this function can also be used to
+ * check if a control is present in the ControlList by converting the returned
+ * std::optional<T> to bool (or calling its has_value() function).
*
- * \return The control value
+ * \return A std::optional<T> containing the control value, or std::nullopt if
+ * the control \a ctrl is not present in the list
*/
/**
- * \fn template<typename T, typename V> void ControlList::set(const Control<T> &ctrl, const V &value)
+ * \fn ControlList::set(const Control<T> &ctrl, const V &value)
* \brief Set the control \a ctrl value to \a value
* \param[in] ctrl The control
* \param[in] value The control value
@@ -983,8 +1000,7 @@ bool ControlList::contains(unsigned int id) const
*/
/**
- * \fn template<typename T, typename V> \
- * void ControlList::set(const Control<T> &ctrl, const std::initializer_list<V> &value)
+ * \fn ControlList::set(const Control<Span<T, Size>> &ctrl, const std::initializer_list<V> &value)
* \copydoc ControlList::set(const Control<T> &ctrl, const V &value)
*/
diff --git a/src/libcamera/converter.cpp b/src/libcamera/converter.cpp
new file mode 100644
index 00000000..d3d38c1b
--- /dev/null
+++ b/src/libcamera/converter.cpp
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright 2022 NXP
+ *
+ * Generic format converter interface
+ */
+
+#include "libcamera/internal/converter.h"
+
+#include <algorithm>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/media_device.h"
+
+/**
+ * \file internal/converter.h
+ * \brief Abstract converter
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Converter)
+
+/**
+ * \class Converter
+ * \brief Abstract Base Class for converter
+ *
+ * The Converter class is an Abstract Base Class defining the interfaces of
+ * converter implementations.
+ *
+ * Converters offer scaling and pixel format conversion services on an input
+ * stream. The converter can output multiple streams with individual conversion
+ * parameters from the same input stream.
+ */
+
+/**
+ * \brief Construct a Converter instance
+ * \param[in] media The media device implementing the converter
+ *
+ * This searches for the entity implementing the data streaming function in the
+ * media graph entities and use its device node as the converter device node.
+ */
+Converter::Converter(MediaDevice *media)
+{
+ const std::vector<MediaEntity *> &entities = media->entities();
+ auto it = std::find_if(entities.begin(), entities.end(),
+ [](MediaEntity *entity) {
+ return entity->function() == MEDIA_ENT_F_IO_V4L;
+ });
+ if (it == entities.end()) {
+ LOG(Converter, Error)
+ << "No entity suitable for implementing a converter in "
+ << media->driver() << " entities list.";
+ return;
+ }
+
+ deviceNode_ = (*it)->deviceNode();
+}
+
+Converter::~Converter()
+{
+}
+
+/**
+ * \fn Converter::loadConfiguration()
+ * \brief Load converter configuration from file
+ * \param[in] filename The file name path
+ *
+ * Load converter dependent configuration parameters to apply on the hardware.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::isValid()
+ * \brief Check if the converter configuration is valid
+ * \return True is the converter is valid, false otherwise
+ */
+
+/**
+ * \fn Converter::formats()
+ * \brief Retrieve the list of supported pixel formats for an input pixel format
+ * \param[in] input Input pixel format to retrieve output pixel format list for
+ * \return The list of supported output pixel formats
+ */
+
+/**
+ * \fn Converter::sizes()
+ * \brief Retrieve the range of minimum and maximum output sizes for an input size
+ * \param[in] input Input stream size to retrieve range for
+ * \return A range of output image sizes
+ */
+
+/**
+ * \fn Converter::strideAndFrameSize()
+ * \brief Retrieve the output stride and frame size for an input configutation
+ * \param[in] pixelFormat Input stream pixel format
+ * \param[in] size Input stream size
+ * \return A tuple indicating the stride and frame size or an empty tuple on error
+ */
+
+/**
+ * \fn Converter::configure()
+ * \brief Configure a set of output stream conversion from an input stream
+ * \param[in] inputCfg Input stream configuration
+ * \param[out] outputCfgs A list of output stream configurations
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::exportBuffers()
+ * \brief Export buffers from the converter device
+ * \param[in] output Output stream index exporting the buffers
+ * \param[in] count Number of buffers to allocate
+ * \param[out] buffers Vector to store allocated buffers
+ *
+ * This function operates similarly to V4L2VideoDevice::exportBuffers() on the
+ * output stream indicated by the \a output index.
+ *
+ * \return The number of allocated buffers on success or a negative error code
+ * otherwise
+ */
+
+/**
+ * \fn Converter::start()
+ * \brief Start the converter streaming operation
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::stop()
+ * \brief Stop the converter streaming operation
+ */
+
+/**
+ * \fn Converter::queueBuffers()
+ * \brief Queue buffers to converter device
+ * \param[in] input The frame buffer to apply the conversion
+ * \param[out] outputs The container holding the output stream indexes and
+ * their respective frame buffer outputs.
+ *
+ * This function queues the \a input frame buffer on the output streams of the
+ * \a outputs map key and retrieve the output frame buffer indicated by the
+ * buffer map value.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \var Converter::inputBufferReady
+ * \brief A signal emitted when the input frame buffer completes
+ */
+
+/**
+ * \var Converter::outputBufferReady
+ * \brief A signal emitted on each frame buffer completion of the output queue
+ */
+
+/**
+ * \fn Converter::deviceNode()
+ * \brief The converter device node attribute accessor
+ * \return The converter device node string
+ */
+
+/**
+ * \class ConverterFactoryBase
+ * \brief Base class for converter factories
+ *
+ * The ConverterFactoryBase class is the base of all specializations of the
+ * ConverterFactory class template. It implements the factory registration,
+ * maintains a registry of factories, and provides access to the registered
+ * factories.
+ */
+
+/**
+ * \brief Construct a converter factory base
+ * \param[in] name Name of the converter class
+ * \param[in] compatibles Name aliases of the converter class
+ *
+ * Creating an instance of the factory base registers it with the global list of
+ * factories, accessible through the factories() function.
+ *
+ * The factory \a name is used as unique identifier. If the converter
+ * implementation fully relies on a generic framework, the name should be the
+ * same as the framework. Otherwise, if the implementation is specialized, the
+ * factory name should match the driver name implementing the function.
+ *
+ * The factory \a compatibles holds a list of driver names implementing a generic
+ * subsystem without any personalizations.
+ */
+ConverterFactoryBase::ConverterFactoryBase(const std::string name, std::initializer_list<std::string> compatibles)
+ : name_(name), compatibles_(compatibles)
+{
+ registerType(this);
+}
+
+/**
+ * \fn ConverterFactoryBase::compatibles()
+ * \return The list of compatible name aliases of the converter
+ */
+
+/**
+ * \brief Create an instance of the converter corresponding to the media device
+ * \param[in] media The media device to create the converter for
+ *
+ * The converter is created by matching the factory name or any of its
+ * compatible aliases with the media device driver name.
+ *
+ * \return A new instance of the converter subclass corresponding to the media
+ * device, or null if the media device driver name doesn't match anything
+ */
+std::unique_ptr<Converter> ConverterFactoryBase::create(MediaDevice *media)
+{
+ const std::vector<ConverterFactoryBase *> &factories =
+ ConverterFactoryBase::factories();
+
+ for (const ConverterFactoryBase *factory : factories) {
+ const std::vector<std::string> &compatibles = factory->compatibles();
+ auto it = std::find(compatibles.begin(), compatibles.end(), media->driver());
+
+ if (it == compatibles.end() && media->driver() != factory->name_)
+ continue;
+
+ LOG(Converter, Debug)
+ << "Creating converter from "
+ << factory->name_ << " factory with "
+ << (it == compatibles.end() ? "no" : media->driver()) << " alias.";
+
+ std::unique_ptr<Converter> converter = factory->createInstance(media);
+ if (converter->isValid())
+ return converter;
+ }
+
+ return nullptr;
+}
+
+/**
+ * \brief Add a converter factory to the registry
+ * \param[in] factory Factory to use to construct the converter class
+ *
+ * The caller is responsible to guarantee the uniqueness of the converter
+ * factory name.
+ */
+void ConverterFactoryBase::registerType(ConverterFactoryBase *factory)
+{
+ std::vector<ConverterFactoryBase *> &factories =
+ ConverterFactoryBase::factories();
+
+ factories.push_back(factory);
+}
+
+/**
+ * \brief Retrieve the list of all converter factory names
+ * \return The list of all converter factory names
+ */
+std::vector<std::string> ConverterFactoryBase::names()
+{
+ std::vector<std::string> list;
+
+ std::vector<ConverterFactoryBase *> &factories =
+ ConverterFactoryBase::factories();
+
+ for (ConverterFactoryBase *factory : factories) {
+ list.push_back(factory->name_);
+ for (auto alias : factory->compatibles())
+ list.push_back(alias);
+ }
+
+ return list;
+}
+
+/**
+ * \brief Retrieve the list of all converter factories
+ * \return The list of converter factories
+ */
+std::vector<ConverterFactoryBase *> &ConverterFactoryBase::factories()
+{
+ /*
+ * The static factories map is defined inside the function to ensure
+ * it gets initialized on first use, without any dependency on link
+ * order.
+ */
+ static std::vector<ConverterFactoryBase *> factories;
+ return factories;
+}
+
+/**
+ * \var ConverterFactoryBase::name_
+ * \brief The name of the factory
+ */
+
+/**
+ * \var ConverterFactoryBase::compatibles_
+ * \brief The list holding the factory compatibles
+ */
+
+/**
+ * \class ConverterFactory
+ * \brief Registration of ConverterFactory classes and creation of instances
+ * \param _Converter The converter class type for this factory
+ *
+ * To facilitate discovery and instantiation of Converter classes, the
+ * ConverterFactory class implements auto-registration of converter helpers.
+ * Each Converter subclass shall register itself using the REGISTER_CONVERTER()
+ * macro, which will create a corresponding instance of a ConverterFactory
+ * subclass and register it with the static list of factories.
+ */
+
+/**
+ * \fn ConverterFactory::ConverterFactory(const char *name, std::initializer_list<std::string> compatibles)
+ * \brief Construct a converter factory
+ * \details \copydetails ConverterFactoryBase::ConverterFactoryBase
+ */
+
+/**
+ * \fn ConverterFactory::createInstance() const
+ * \brief Create an instance of the Converter corresponding to the factory
+ * \param[in] media Media device pointer
+ * \return A unique pointer to a newly constructed instance of the Converter
+ * subclass corresponding to the factory
+ */
+
+/**
+ * \def REGISTER_CONVERTER
+ * \brief Register a converter with the Converter factory
+ * \param[in] name Converter name used to register the class
+ * \param[in] converter Class name of Converter derived class to register
+ * \param[in] compatibles List of compatible names
+ *
+ * Register a Converter subclass with the factory and make it available to try
+ * and match converters.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/simple/converter.cpp b/src/libcamera/converter/converter_v4l2_m2m.cpp
index 77c44fc8..d8929fc5 100644
--- a/src/libcamera/pipeline/simple/converter.cpp
+++ b/src/libcamera/converter/converter_v4l2_m2m.cpp
@@ -1,11 +1,12 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Laurent Pinchart
+ * Copyright 2022 NXP
*
- * converter.cpp - Format converter for simple pipeline handler
+ * V4L2 M2M Format converter
*/
-#include "converter.h"
+#include "libcamera/internal/converter/converter_v4l2_m2m.h"
#include <algorithm>
#include <limits.h>
@@ -21,18 +22,23 @@
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/v4l2_videodevice.h"
+/**
+ * \file internal/converter/converter_v4l2_m2m.h
+ * \brief V4L2 M2M based converter
+ */
+
namespace libcamera {
-LOG_DECLARE_CATEGORY(SimplePipeline)
+LOG_DECLARE_CATEGORY(Converter)
/* -----------------------------------------------------------------------------
- * SimpleConverter::Stream
+ * V4L2M2MConverter::Stream
*/
-SimpleConverter::Stream::Stream(SimpleConverter *converter, unsigned int index)
+V4L2M2MConverter::Stream::Stream(V4L2M2MConverter *converter, unsigned int index)
: converter_(converter), index_(index)
{
- m2m_ = std::make_unique<V4L2M2MDevice>(converter->deviceNode_);
+ m2m_ = std::make_unique<V4L2M2MDevice>(converter->deviceNode());
m2m_->output()->bufferReady.connect(this, &Stream::outputBufferReady);
m2m_->capture()->bufferReady.connect(this, &Stream::captureBufferReady);
@@ -42,11 +48,11 @@ SimpleConverter::Stream::Stream(SimpleConverter *converter, unsigned int index)
m2m_.reset();
}
-int SimpleConverter::Stream::configure(const StreamConfiguration &inputCfg,
- const StreamConfiguration &outputCfg)
+int V4L2M2MConverter::Stream::configure(const StreamConfiguration &inputCfg,
+ const StreamConfiguration &outputCfg)
{
V4L2PixelFormat videoFormat =
- V4L2PixelFormat::fromPixelFormat(inputCfg.pixelFormat);
+ m2m_->output()->toV4L2PixelFormat(inputCfg.pixelFormat);
V4L2DeviceFormat format;
format.fourcc = videoFormat;
@@ -56,14 +62,14 @@ int SimpleConverter::Stream::configure(const StreamConfiguration &inputCfg,
int ret = m2m_->output()->setFormat(&format);
if (ret < 0) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Failed to set input format: " << strerror(-ret);
return ret;
}
if (format.fourcc != videoFormat || format.size != inputCfg.size ||
format.planes[0].bpl != inputCfg.stride) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Input format not supported (requested "
<< inputCfg.size << "-" << videoFormat
<< ", got " << format << ")";
@@ -71,20 +77,20 @@ int SimpleConverter::Stream::configure(const StreamConfiguration &inputCfg,
}
/* Set the pixel format and size on the output. */
- videoFormat = V4L2PixelFormat::fromPixelFormat(outputCfg.pixelFormat);
+ videoFormat = m2m_->capture()->toV4L2PixelFormat(outputCfg.pixelFormat);
format = {};
format.fourcc = videoFormat;
format.size = outputCfg.size;
ret = m2m_->capture()->setFormat(&format);
if (ret < 0) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Failed to set output format: " << strerror(-ret);
return ret;
}
if (format.fourcc != videoFormat || format.size != outputCfg.size) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Output format not supported";
return -EINVAL;
}
@@ -95,13 +101,13 @@ int SimpleConverter::Stream::configure(const StreamConfiguration &inputCfg,
return 0;
}
-int SimpleConverter::Stream::exportBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+int V4L2M2MConverter::Stream::exportBuffers(unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
return m2m_->capture()->exportBuffers(count, buffers);
}
-int SimpleConverter::Stream::start()
+int V4L2M2MConverter::Stream::start()
{
int ret = m2m_->output()->importBuffers(inputBufferCount_);
if (ret < 0)
@@ -128,7 +134,7 @@ int SimpleConverter::Stream::start()
return 0;
}
-void SimpleConverter::Stream::stop()
+void V4L2M2MConverter::Stream::stop()
{
m2m_->capture()->streamOff();
m2m_->output()->streamOff();
@@ -136,8 +142,7 @@ void SimpleConverter::Stream::stop()
m2m_->output()->releaseBuffers();
}
-int SimpleConverter::Stream::queueBuffers(FrameBuffer *input,
- FrameBuffer *output)
+int V4L2M2MConverter::Stream::queueBuffers(FrameBuffer *input, FrameBuffer *output)
{
int ret = m2m_->output()->queueBuffer(input);
if (ret < 0)
@@ -150,12 +155,12 @@ int SimpleConverter::Stream::queueBuffers(FrameBuffer *input,
return 0;
}
-std::string SimpleConverter::Stream::logPrefix() const
+std::string V4L2M2MConverter::Stream::logPrefix() const
{
return "stream" + std::to_string(index_);
}
-void SimpleConverter::Stream::outputBufferReady(FrameBuffer *buffer)
+void V4L2M2MConverter::Stream::outputBufferReady(FrameBuffer *buffer)
{
auto it = converter_->queue_.find(buffer);
if (it == converter_->queue_.end())
@@ -167,32 +172,34 @@ void SimpleConverter::Stream::outputBufferReady(FrameBuffer *buffer)
}
}
-void SimpleConverter::Stream::captureBufferReady(FrameBuffer *buffer)
+void V4L2M2MConverter::Stream::captureBufferReady(FrameBuffer *buffer)
{
converter_->outputBufferReady.emit(buffer);
}
/* -----------------------------------------------------------------------------
- * SimpleConverter
+ * V4L2M2MConverter
*/
-SimpleConverter::SimpleConverter(MediaDevice *media)
+/**
+ * \class libcamera::V4L2M2MConverter
+ * \brief The V4L2 M2M converter implements the converter interface based on
+ * V4L2 M2M device.
+*/
+
+/**
+ * \fn V4L2M2MConverter::V4L2M2MConverter
+ * \brief Construct a V4L2M2MConverter instance
+ * \param[in] media The media device implementing the converter
+ */
+
+V4L2M2MConverter::V4L2M2MConverter(MediaDevice *media)
+ : Converter(media)
{
- /*
- * Locate the video node. There's no need to validate the pipeline
- * further, the caller guarantees that this is a V4L2 mem2mem device.
- */
- const std::vector<MediaEntity *> &entities = media->entities();
- auto it = std::find_if(entities.begin(), entities.end(),
- [](MediaEntity *entity) {
- return entity->function() == MEDIA_ENT_F_IO_V4L;
- });
- if (it == entities.end())
+ if (deviceNode().empty())
return;
- deviceNode_ = (*it)->deviceNode();
-
- m2m_ = std::make_unique<V4L2M2MDevice>(deviceNode_);
+ m2m_ = std::make_unique<V4L2M2MDevice>(deviceNode());
int ret = m2m_->open();
if (ret < 0) {
m2m_.reset();
@@ -200,7 +207,21 @@ SimpleConverter::SimpleConverter(MediaDevice *media)
}
}
-std::vector<PixelFormat> SimpleConverter::formats(PixelFormat input)
+/**
+ * \fn libcamera::V4L2M2MConverter::loadConfiguration
+ * \details \copydetails libcamera::Converter::loadConfiguration
+ */
+
+/**
+ * \fn libcamera::V4L2M2MConverter::isValid
+ * \details \copydetails libcamera::Converter::isValid
+ */
+
+/**
+ * \fn libcamera::V4L2M2MConverter::formats
+ * \details \copydetails libcamera::Converter::formats
+ */
+std::vector<PixelFormat> V4L2M2MConverter::formats(PixelFormat input)
{
if (!m2m_)
return {};
@@ -210,16 +231,22 @@ std::vector<PixelFormat> SimpleConverter::formats(PixelFormat input)
* enumerate the conversion capabilities on its output (V4L2 capture).
*/
V4L2DeviceFormat v4l2Format;
- v4l2Format.fourcc = V4L2PixelFormat::fromPixelFormat(input);
+ v4l2Format.fourcc = m2m_->output()->toV4L2PixelFormat(input);
v4l2Format.size = { 1, 1 };
int ret = m2m_->output()->setFormat(&v4l2Format);
if (ret < 0) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Failed to set format: " << strerror(-ret);
return {};
}
+ if (v4l2Format.fourcc != m2m_->output()->toV4L2PixelFormat(input)) {
+ LOG(Converter, Debug)
+ << "Input format " << input << " not supported.";
+ return {};
+ }
+
std::vector<PixelFormat> pixelFormats;
for (const auto &format : m2m_->capture()->formats()) {
@@ -231,7 +258,10 @@ std::vector<PixelFormat> SimpleConverter::formats(PixelFormat input)
return pixelFormats;
}
-SizeRange SimpleConverter::sizes(const Size &input)
+/**
+ * \copydoc libcamera::Converter::sizes
+ */
+SizeRange V4L2M2MConverter::sizes(const Size &input)
{
if (!m2m_)
return {};
@@ -246,7 +276,7 @@ SizeRange SimpleConverter::sizes(const Size &input)
int ret = m2m_->output()->setFormat(&format);
if (ret < 0) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Failed to set format: " << strerror(-ret);
return {};
}
@@ -256,7 +286,7 @@ SizeRange SimpleConverter::sizes(const Size &input)
format.size = { 1, 1 };
ret = m2m_->capture()->setFormat(&format);
if (ret < 0) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Failed to set format: " << strerror(-ret);
return {};
}
@@ -266,7 +296,7 @@ SizeRange SimpleConverter::sizes(const Size &input)
format.size = { UINT_MAX, UINT_MAX };
ret = m2m_->capture()->setFormat(&format);
if (ret < 0) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Failed to set format: " << strerror(-ret);
return {};
}
@@ -276,12 +306,15 @@ SizeRange SimpleConverter::sizes(const Size &input)
return sizes;
}
+/**
+ * \copydoc libcamera::Converter::strideAndFrameSize
+ */
std::tuple<unsigned int, unsigned int>
-SimpleConverter::strideAndFrameSize(const PixelFormat &pixelFormat,
- const Size &size)
+V4L2M2MConverter::strideAndFrameSize(const PixelFormat &pixelFormat,
+ const Size &size)
{
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(pixelFormat);
+ format.fourcc = m2m_->capture()->toV4L2PixelFormat(pixelFormat);
format.size = size;
int ret = m2m_->capture()->tryFormat(&format);
@@ -291,8 +324,11 @@ SimpleConverter::strideAndFrameSize(const PixelFormat &pixelFormat,
return std::make_tuple(format.planes[0].bpl, format.planes[0].size);
}
-int SimpleConverter::configure(const StreamConfiguration &inputCfg,
- const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+/**
+ * \copydoc libcamera::Converter::configure
+ */
+int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
{
int ret = 0;
@@ -303,7 +339,7 @@ int SimpleConverter::configure(const StreamConfiguration &inputCfg,
Stream &stream = streams_.emplace_back(this, i);
if (!stream.isValid()) {
- LOG(SimplePipeline, Error)
+ LOG(Converter, Error)
<< "Failed to create stream " << i;
ret = -EINVAL;
break;
@@ -322,8 +358,11 @@ int SimpleConverter::configure(const StreamConfiguration &inputCfg,
return 0;
}
-int SimpleConverter::exportBuffers(unsigned int output, unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+/**
+ * \copydoc libcamera::Converter::exportBuffers
+ */
+int V4L2M2MConverter::exportBuffers(unsigned int output, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
if (output >= streams_.size())
return -EINVAL;
@@ -331,7 +370,10 @@ int SimpleConverter::exportBuffers(unsigned int output, unsigned int count,
return streams_[output].exportBuffers(count, buffers);
}
-int SimpleConverter::start()
+/**
+ * \copydoc libcamera::Converter::start
+ */
+int V4L2M2MConverter::start()
{
int ret;
@@ -346,14 +388,20 @@ int SimpleConverter::start()
return 0;
}
-void SimpleConverter::stop()
+/**
+ * \copydoc libcamera::Converter::stop
+ */
+void V4L2M2MConverter::stop()
{
for (Stream &stream : utils::reverse(streams_))
stream.stop();
}
-int SimpleConverter::queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs)
+/**
+ * \copydoc libcamera::Converter::queueBuffers
+ */
+int V4L2M2MConverter::queueBuffers(FrameBuffer *input,
+ const std::map<unsigned int, FrameBuffer *> &outputs)
{
unsigned int mask = 0;
int ret;
@@ -396,4 +444,11 @@ int SimpleConverter::queueBuffers(FrameBuffer *input,
return 0;
}
+static std::initializer_list<std::string> compatibles = {
+ "mtk-mdp",
+ "pxp",
+};
+
+REGISTER_CONVERTER("v4l2_m2m", V4L2M2MConverter, compatibles)
+
} /* namespace libcamera */
diff --git a/src/libcamera/converter/meson.build b/src/libcamera/converter/meson.build
new file mode 100644
index 00000000..2aa72fe4
--- /dev/null
+++ b/src/libcamera/converter/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_sources += files([
+ 'converter_v4l2_m2m.cpp'
+])
diff --git a/src/libcamera/delayed_controls.cpp b/src/libcamera/delayed_controls.cpp
index 9667187e..94d0a575 100644
--- a/src/libcamera/delayed_controls.cpp
+++ b/src/libcamera/delayed_controls.cpp
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * delayed_controls.h - Helper to deal with controls that take effect with a delay
+ * Helper to deal with controls that take effect with a delay
*/
#include "libcamera/internal/delayed_controls.h"
@@ -115,8 +115,6 @@ DelayedControls::DelayedControls(V4L2Device *device,
*/
void DelayedControls::reset()
{
- running_ = false;
- firstSequence_ = 0;
queueCount_ = 1;
writeCount_ = 0;
@@ -204,8 +202,7 @@ bool DelayedControls::push(const ControlList &controls)
*/
ControlList DelayedControls::get(uint32_t sequence)
{
- uint32_t adjustedSeq = sequence - firstSequence_;
- unsigned int index = std::max<int>(0, adjustedSeq - maxDelay_);
+ unsigned int index = std::max<int>(0, sequence - maxDelay_);
ControlList out(device_->controls());
for (const auto &ctrl : values_) {
@@ -236,11 +233,6 @@ void DelayedControls::applyControls(uint32_t sequence)
{
LOG(DelayedControls, Debug) << "frame " << sequence << " started";
- if (!running_) {
- firstSequence_ = sequence;
- running_ = true;
- }
-
/*
* Create control list peeking ahead in the value queue to ensure
* values are set in time to satisfy the sensor delay.
@@ -279,7 +271,7 @@ void DelayedControls::applyControls(uint32_t sequence)
}
}
- writeCount_ = sequence - firstSequence_ + 1;
+ writeCount_ = sequence + 1;
while (writeCount_ > queueCount_) {
LOG(DelayedControls, Debug)
diff --git a/src/libcamera/device_enumerator.cpp b/src/libcamera/device_enumerator.cpp
index d1258050..ae17862f 100644
--- a/src/libcamera/device_enumerator.cpp
+++ b/src/libcamera/device_enumerator.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * device_enumerator.cpp - Enumeration and matching
+ * Enumeration and matching
*/
#include "libcamera/internal/device_enumerator.h"
@@ -56,7 +56,7 @@ LOG_DEFINE_CATEGORY(DeviceEnumerator)
* names can be added as match criteria.
*
* Pipeline handlers are recommended to add entities to DeviceMatch as
- * appropriare to ensure that the media device they need can be uniquely
+ * appropriate to ensure that the media device they need can be uniquely
* identified. This is useful when the corresponding kernel driver can produce
* different graphs, for instance as a result of different driver versions or
* hardware configurations, and not all those graphs are suitable for a pipeline
@@ -101,8 +101,14 @@ bool DeviceMatch::match(const MediaDevice *device) const
for (const MediaEntity *entity : device->entities()) {
if (name == entity->name()) {
- found = true;
- break;
+ if (!entity->deviceNode().empty()) {
+ found = true;
+ break;
+ } else {
+ LOG(DeviceEnumerator, Debug)
+ << "Skip " << entity->name()
+ << ": no device node";
+ }
}
}
@@ -161,7 +167,7 @@ std::unique_ptr<DeviceEnumerator> DeviceEnumerator::create()
DeviceEnumerator::~DeviceEnumerator()
{
- for (std::shared_ptr<MediaDevice> media : devices_) {
+ for (const std::shared_ptr<MediaDevice> &media : devices_) {
if (media->busy())
LOG(DeviceEnumerator, Error)
<< "Removing media device " << media->deviceNode()
diff --git a/src/libcamera/device_enumerator_sysfs.cpp b/src/libcamera/device_enumerator_sysfs.cpp
index 686bb809..fc33ba52 100644
--- a/src/libcamera/device_enumerator_sysfs.cpp
+++ b/src/libcamera/device_enumerator_sysfs.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * device_enumerator_sysfs.cpp - sysfs-based device enumerator
+ * sysfs-based device enumerator
*/
#include "libcamera/internal/device_enumerator_sysfs.h"
diff --git a/src/libcamera/device_enumerator_udev.cpp b/src/libcamera/device_enumerator_udev.cpp
index 5317afbd..01c70b6d 100644
--- a/src/libcamera/device_enumerator_udev.cpp
+++ b/src/libcamera/device_enumerator_udev.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018-2019, Google Inc.
*
- * device_enumerator_udev.cpp - udev-based device enumerator
+ * udev-based device enumerator
*/
#include "libcamera/internal/device_enumerator_udev.h"
@@ -13,6 +13,7 @@
#include <list>
#include <map>
#include <string.h>
+#include <string_view>
#include <sys/ioctl.h>
#include <sys/sysmacros.h>
#include <unistd.h>
@@ -315,6 +316,7 @@ int DeviceEnumeratorUdev::addV4L2Device(dev_t devnum)
* enumerator.
*/
deps->deps_.erase(devnum);
+ devMap_.erase(it);
if (deps->deps_.empty()) {
LOG(DeviceEnumerator, Debug)
@@ -330,18 +332,18 @@ int DeviceEnumeratorUdev::addV4L2Device(dev_t devnum)
void DeviceEnumeratorUdev::udevNotify()
{
struct udev_device *dev = udev_monitor_receive_device(monitor_);
- std::string action(udev_device_get_action(dev));
- std::string deviceNode(udev_device_get_devnode(dev));
+ std::string_view action(udev_device_get_action(dev));
+ std::string_view deviceNode(udev_device_get_devnode(dev));
LOG(DeviceEnumerator, Debug)
- << action << " device " << udev_device_get_devnode(dev);
+ << action << " device " << deviceNode;
if (action == "add") {
addUdevDevice(dev);
} else if (action == "remove") {
const char *subsystem = udev_device_get_subsystem(dev);
if (subsystem && !strcmp(subsystem, "media"))
- removeDevice(deviceNode);
+ removeDevice(std::string(deviceNode));
}
udev_device_unref(dev);
diff --git a/src/libcamera/dma_heaps.cpp b/src/libcamera/dma_heaps.cpp
new file mode 100644
index 00000000..d4cb880b
--- /dev/null
+++ b/src/libcamera/dma_heaps.cpp
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper class for dma-heap allocations.
+ */
+
+#include "libcamera/internal/dma_heaps.h"
+
+#include <array>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file dma_heaps.cpp
+ * \brief dma-heap allocator
+ */
+
+namespace libcamera {
+
+/*
+ * /dev/dma_heap/linux,cma is the dma-heap allocator, which allows dmaheap-cma
+ * to only have to worry about importing.
+ *
+ * Annoyingly, should the cma heap size be specified on the kernel command line
+ * instead of DT, the heap gets named "reserved" instead.
+ */
+
+#ifndef __DOXYGEN__
+struct DmaHeapInfo {
+ DmaHeap::DmaHeapFlag type;
+ const char *deviceNodeName;
+};
+#endif
+
+static constexpr std::array<DmaHeapInfo, 3> heapInfos = { {
+ { DmaHeap::DmaHeapFlag::Cma, "/dev/dma_heap/linux,cma" },
+ { DmaHeap::DmaHeapFlag::Cma, "/dev/dma_heap/reserved" },
+ { DmaHeap::DmaHeapFlag::System, "/dev/dma_heap/system" },
+} };
+
+LOG_DEFINE_CATEGORY(DmaHeap)
+
+/**
+ * \class DmaHeap
+ * \brief Helper class for dma-heap allocations
+ *
+ * DMA heaps are kernel devices that provide an API to allocate memory from
+ * different pools called "heaps", wrap each allocated piece of memory in a
+ * dmabuf object, and return the dmabuf file descriptor to userspace. Multiple
+ * heaps can be provided by the system, with different properties for the
+ * underlying memory.
+ *
+ * This class wraps a DMA heap selected at construction time, and exposes
+ * functions to manage memory allocation.
+ */
+
+/**
+ * \enum DmaHeap::DmaHeapFlag
+ * \brief Type of the dma-heap
+ * \var DmaHeap::Cma
+ * \brief Allocate from a CMA dma-heap, providing physically-contiguous memory
+ * \var DmaHeap::System
+ * \brief Allocate from the system dma-heap, using the page allocator
+ */
+
+/**
+ * \typedef DmaHeap::DmaHeapFlags
+ * \brief A bitwise combination of DmaHeap::DmaHeapFlag values
+ */
+
+/**
+ * \brief Construct a DmaHeap of a given type
+ * \param[in] type The type(s) of the dma-heap(s) to allocate from
+ *
+ * The DMA heap type is selected with the \a type parameter, which defaults to
+ * the CMA heap. If no heap of the given type can be accessed, the constructed
+ * DmaHeap instance is invalid as indicated by the isValid() function.
+ *
+ * Multiple types can be selected by combining type flags, in which case the
+ * constructed DmaHeap will match one of the types. If the system provides
+ * multiple heaps that match the requested types, which heap is used is
+ * undefined.
+ */
+DmaHeap::DmaHeap(DmaHeapFlags type)
+{
+ for (const auto &info : heapInfos) {
+ if (!(type & info.type))
+ continue;
+
+ int ret = ::open(info.deviceNodeName, O_RDWR | O_CLOEXEC, 0);
+ if (ret < 0) {
+ ret = errno;
+ LOG(DmaHeap, Debug)
+ << "Failed to open " << info.deviceNodeName << ": "
+ << strerror(ret);
+ continue;
+ }
+
+ LOG(DmaHeap, Debug) << "Using " << info.deviceNodeName;
+ dmaHeapHandle_ = UniqueFD(ret);
+ break;
+ }
+
+ if (!dmaHeapHandle_.isValid())
+ LOG(DmaHeap, Error) << "Could not open any dmaHeap device";
+}
+
+/**
+ * \brief Destroy the DmaHeap instance
+ */
+DmaHeap::~DmaHeap() = default;
+
+/**
+ * \fn DmaHeap::isValid()
+ * \brief Check if the DmaHeap instance is valid
+ * \return True if the DmaHeap is valid, false otherwise
+ */
+
+/**
+ * \brief Allocate a dma-buf from the DmaHeap
+ * \param [in] name The name to set for the allocated buffer
+ * \param [in] size The size of the buffer to allocate
+ *
+ * Allocates a dma-buf with read/write access.
+ *
+ * If the allocation fails, return an invalid UniqueFD.
+ *
+ * \return The UniqueFD of the allocated buffer
+ */
+UniqueFD DmaHeap::alloc(const char *name, std::size_t size)
+{
+ int ret;
+
+ if (!name)
+ return {};
+
+ struct dma_heap_allocation_data alloc = {};
+
+ alloc.len = size;
+ alloc.fd_flags = O_CLOEXEC | O_RDWR;
+
+ ret = ::ioctl(dmaHeapHandle_.get(), DMA_HEAP_IOCTL_ALLOC, &alloc);
+ if (ret < 0) {
+ LOG(DmaHeap, Error) << "dmaHeap allocation failure for " << name;
+ return {};
+ }
+
+ UniqueFD allocFd(alloc.fd);
+ ret = ::ioctl(allocFd.get(), DMA_BUF_SET_NAME, name);
+ if (ret < 0) {
+ LOG(DmaHeap, Error) << "dmaHeap naming failure for " << name;
+ return {};
+ }
+
+ return allocFd;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/fence.cpp b/src/libcamera/fence.cpp
index 7b784778..634c74f8 100644
--- a/src/libcamera/fence.cpp
+++ b/src/libcamera/fence.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * fence.cpp - Synchronization fence
+ * Synchronization fence
*/
#include "libcamera/fence.h"
diff --git a/src/libcamera/formats.cpp b/src/libcamera/formats.cpp
index 283ecb3d..cf41f2c2 100644
--- a/src/libcamera/formats.cpp
+++ b/src/libcamera/formats.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * formats.cpp - libcamera image formats
+ * libcamera image formats
*/
#include "libcamera/internal/formats.h"
@@ -33,7 +33,7 @@ LOG_DEFINE_CATEGORY(Formats)
* used in pipeline handlers.
*
* \var PixelFormatInfo::name
- * \brief The format name as a human-readable string, used as the test
+ * \brief The format name as a human-readable string, used as the text
* representation of the PixelFormat
*
* \var PixelFormatInfo::format
@@ -42,19 +42,16 @@ LOG_DEFINE_CATEGORY(Formats)
* \var PixelFormatInfo::v4l2Formats
* \brief The V4L2 pixel formats corresponding to the PixelFormat
*
- * Multiple V4L2 formats may exist for one PixelFormat when the format uses
- * multiple planes, as V4L2 defines separate 4CCs for contiguous and separate
- * planes formats. The two entries in the array store the contiguous and
- * non-contiguous V4L2 formats respectively. If the PixelFormat isn't a
- * multiplanar format, or if no corresponding non-contiguous V4L2 format
- * exists, the second entry is invalid.
+ * Multiple V4L2 formats may exist for one PixelFormat, as V4L2 defines
+ * separate 4CCs for contiguous and non-contiguous versions of the same image
+ * format.
*
* \var PixelFormatInfo::bitsPerPixel
* \brief The average number of bits per pixel
*
- * The number per pixel averages the total number of bits for all colour
- * components over the whole image, excluding any padding bits or padding
- * pixels.
+ * The number of bits per pixel averages the total number of bits for all
+ * colour components over the whole image, excluding any padding bits or
+ * padding pixels.
*
* For formats that store pixels with bit padding within words, only the
* effective bits are taken into account. For instance, 12-bit Bayer data
@@ -156,10 +153,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::RGB565, {
.name = "RGB565",
.format = formats::RGB565,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_RGB565),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB565), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -169,10 +163,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::RGB565_BE, {
.name = "RGB565_BE",
.format = formats::RGB565_BE,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_RGB565X),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB565X), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -182,10 +173,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::BGR888, {
.name = "BGR888",
.format = formats::BGR888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_RGB24),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB24), },
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -195,10 +183,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::RGB888, {
.name = "RGB888",
.format = formats::RGB888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_BGR24),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGR24), },
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -208,10 +193,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::XRGB8888, {
.name = "XRGB8888",
.format = formats::XRGB8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_XBGR32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_XBGR32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -221,10 +203,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::XBGR8888, {
.name = "XBGR8888",
.format = formats::XBGR8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_RGBX32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGBX32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -234,10 +213,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::RGBX8888, {
.name = "RGBX8888",
.format = formats::RGBX8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_BGRX32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGRX32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -247,10 +223,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::BGRX8888, {
.name = "BGRX8888",
.format = formats::BGRX8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_XRGB32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_XRGB32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -260,10 +233,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::ABGR8888, {
.name = "ABGR8888",
.format = formats::ABGR8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_RGBA32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGBA32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -273,10 +243,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::ARGB8888, {
.name = "ARGB8888",
.format = formats::ARGB8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_ABGR32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_ABGR32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -286,10 +253,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::BGRA8888, {
.name = "BGRA8888",
.format = formats::BGRA8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_ARGB32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_ARGB32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
@@ -299,25 +263,39 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::RGBA8888, {
.name = "RGBA8888",
.format = formats::RGBA8888,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_BGRA32),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGRA32), },
.bitsPerPixel = 32,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
.pixelsPerGroup = 1,
.planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
} },
+ { formats::BGR161616, {
+ .name = "BGR161616",
+ .format = formats::BGR161616,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB48), },
+ .bitsPerPixel = 48,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGB161616, {
+ .name = "RGB161616",
+ .format = formats::RGB161616,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGR48), },
+ .bitsPerPixel = 48,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
/* YUV packed formats. */
{ formats::YUYV, {
.name = "YUYV",
.format = formats::YUYV,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_YUYV),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUYV), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -327,10 +305,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::YVYU, {
.name = "YVYU",
.format = formats::YVYU,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_YVYU),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YVYU), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -340,10 +315,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::UYVY, {
.name = "UYVY",
.format = formats::UYVY,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_UYVY),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_UYVY), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -353,24 +325,41 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::VYUY, {
.name = "VYUY",
.format = formats::VYUY,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_VYUY),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_VYUY), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
.pixelsPerGroup = 2,
.planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
} },
+ { formats::AVUY8888, {
+ .name = "AVUY8888",
+ .format = formats::AVUY8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUVA32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::XVUY8888, {
+ .name = "XVUY8888",
+ .format = formats::XVUY8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUVX32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
/* YUV planar formats. */
{ formats::NV12, {
.name = "NV12",
.format = formats::NV12,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV12),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_NV12M),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV12),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV12M),
},
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -382,8 +371,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "NV21",
.format = formats::NV21,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV21),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_NV21M),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV21),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV21M),
},
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -395,8 +384,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "NV16",
.format = formats::NV16,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV16),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_NV16M),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV16),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV16M),
},
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -408,8 +397,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "NV61",
.format = formats::NV61,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV61),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_NV61M),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV61),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV61M),
},
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -420,10 +409,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::NV24, {
.name = "NV24",
.format = formats::NV24,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV24),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_NV24), },
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -433,10 +419,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::NV42, {
.name = "NV42",
.format = formats::NV42,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_NV42),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_NV42), },
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -447,8 +430,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "YUV420",
.format = formats::YUV420,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_YUV420),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_YUV420M),
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV420),
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV420M),
},
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -460,8 +443,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "YVU420",
.format = formats::YVU420,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_YVU420),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_YVU420M),
+ V4L2PixelFormat(V4L2_PIX_FMT_YVU420),
+ V4L2PixelFormat(V4L2_PIX_FMT_YVU420M),
},
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -473,8 +456,8 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.name = "YUV422",
.format = formats::YUV422,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_YUV422P),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_YUV422M),
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV422P),
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV422M),
},
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -485,10 +468,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::YVU422, {
.name = "YVU422",
.format = formats::YVU422,
- .v4l2Formats = {
- .single = V4L2PixelFormat(),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_YVU422M),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YVU422M), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -498,10 +478,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::YUV444, {
.name = "YUV444",
.format = formats::YUV444,
- .v4l2Formats = {
- .single = V4L2PixelFormat(),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_YUV444M),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUV444M), },
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -511,10 +488,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::YVU444, {
.name = "YVU444",
.format = formats::YVU444,
- .v4l2Formats = {
- .single = V4L2PixelFormat(),
- .multi = V4L2PixelFormat(V4L2_PIX_FMT_YVU444M),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YVU444M), },
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -526,10 +500,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::R8, {
.name = "R8",
.format = formats::R8,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_GREY),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_GREY), },
.bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
@@ -539,51 +510,59 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::R10, {
.name = "R10",
.format = formats::R10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_Y10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
.pixelsPerGroup = 1,
.planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
} },
+ { formats::R10_CSI2P, {
+ .name = "R10_CSI2P",
+ .format = formats::R10_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y10P), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
{ formats::R12, {
.name = "R12",
.format = formats::R12,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_Y12),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y12), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = false,
.pixelsPerGroup = 1,
.planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
} },
- { formats::R10_CSI2P, {
- .name = "R10_CSI2P",
- .format = formats::R10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_Y10P),
- .multi = V4L2PixelFormat(),
- },
- .bitsPerPixel = 10,
+ { formats::R16, {
+ .name = "R16",
+ .format = formats::R16,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y16), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::MONO_PISP_COMP1, {
+ .name = "MONO_PISP_COMP1",
+ .format = formats::MONO_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO), },
+ .bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
.packed = true,
- .pixelsPerGroup = 4,
- .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 0, 0 }, { 0, 0 } }},
} },
/* Bayer formats. */
{ formats::SBGGR8, {
.name = "SBGGR8",
.format = formats::SBGGR8,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR8),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR8), },
.bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -593,10 +572,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG8, {
.name = "SGBRG8",
.format = formats::SGBRG8,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG8),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG8), },
.bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -606,10 +582,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG8, {
.name = "SGRBG8",
.format = formats::SGRBG8,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8), },
.bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -619,10 +592,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB8, {
.name = "SRGGB8",
.format = formats::SRGGB8,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB8),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB8), },
.bitsPerPixel = 8,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -632,10 +602,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SBGGR10, {
.name = "SBGGR10",
.format = formats::SBGGR10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -645,10 +612,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG10, {
.name = "SGBRG10",
.format = formats::SGBRG10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -658,10 +622,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG10, {
.name = "SGRBG10",
.format = formats::SGRBG10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -671,10 +632,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB10, {
.name = "SRGGB10",
.format = formats::SRGGB10,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -684,10 +642,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SBGGR10_CSI2P, {
.name = "SBGGR10_CSI2P",
.format = formats::SBGGR10_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10P), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -697,10 +652,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG10_CSI2P, {
.name = "SGBRG10_CSI2P",
.format = formats::SGBRG10_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10P), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -710,10 +662,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG10_CSI2P, {
.name = "SGRBG10_CSI2P",
.format = formats::SGRBG10_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10P), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -723,10 +672,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB10_CSI2P, {
.name = "SRGGB10_CSI2P",
.format = formats::SRGGB10_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10P), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -736,10 +682,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SBGGR12, {
.name = "SBGGR12",
.format = formats::SBGGR12,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -749,10 +692,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG12, {
.name = "SGBRG12",
.format = formats::SGBRG12,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -762,10 +702,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG12, {
.name = "SGRBG12",
.format = formats::SGRBG12,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -775,10 +712,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB12, {
.name = "SRGGB12",
.format = formats::SRGGB12,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -788,10 +722,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SBGGR12_CSI2P, {
.name = "SBGGR12_CSI2P",
.format = formats::SBGGR12_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12P), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -801,10 +732,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG12_CSI2P, {
.name = "SGBRG12_CSI2P",
.format = formats::SGBRG12_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12P), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -814,10 +742,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG12_CSI2P, {
.name = "SGRBG12_CSI2P",
.format = formats::SGRBG12_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12P), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -827,23 +752,97 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB12_CSI2P, {
.name = "SRGGB12_CSI2P",
.format = formats::SRGGB12_CSI2P,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P), },
.bitsPerPixel = 12,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
.pixelsPerGroup = 2,
.planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
} },
+ { formats::SBGGR14, {
+ .name = "SBGGR14",
+ .format = formats::SBGGR14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG14, {
+ .name = "SGBRG14",
+ .format = formats::SGBRG14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG14, {
+ .name = "SGRBG14",
+ .format = formats::SGRBG14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB14, {
+ .name = "SRGGB14",
+ .format = formats::SRGGB14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR14_CSI2P, {
+ .name = "SBGGR14_CSI2P",
+ .format = formats::SBGGR14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG14_CSI2P, {
+ .name = "SGBRG14_CSI2P",
+ .format = formats::SGBRG14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG14_CSI2P, {
+ .name = "SGRBG14_CSI2P",
+ .format = formats::SGRBG14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB14_CSI2P, {
+ .name = "SRGGB14_CSI2P",
+ .format = formats::SRGGB14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
{ formats::SBGGR16, {
.name = "SBGGR16",
.format = formats::SBGGR16,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -853,10 +852,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG16, {
.name = "SGBRG16",
.format = formats::SGBRG16,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGBRG16),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG16), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -866,10 +862,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG16, {
.name = "SGRBG16",
.format = formats::SGRBG16,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG16),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG16), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -879,10 +872,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB16, {
.name = "SRGGB16",
.format = formats::SRGGB16,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16), },
.bitsPerPixel = 16,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = false,
@@ -892,10 +882,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SBGGR10_IPU3, {
.name = "SBGGR10_IPU3",
.format = formats::SBGGR10_IPU3,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SBGGR10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SBGGR10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -906,10 +893,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGBRG10_IPU3, {
.name = "SGBRG10_IPU3",
.format = formats::SGBRG10_IPU3,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGBRG10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGBRG10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -919,10 +903,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SGRBG10_IPU3, {
.name = "SGRBG10_IPU3",
.format = formats::SGRBG10_IPU3,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGRBG10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGRBG10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
@@ -932,24 +913,60 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
{ formats::SRGGB10_IPU3, {
.name = "SRGGB10_IPU3",
.format = formats::SRGGB10_IPU3,
- .v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SRGGB10),
- .multi = V4L2PixelFormat(),
- },
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SRGGB10), },
.bitsPerPixel = 10,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
.packed = true,
.pixelsPerGroup = 25,
.planes = {{ { 32, 1 }, { 0, 0 }, { 0, 0 } }},
} },
-
+ { formats::BGGR_PISP_COMP1, {
+ .name = "BGGR_PISP_COMP1",
+ .format = formats::BGGR_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::GBRG_PISP_COMP1, {
+ .name = "GBRG_PISP_COMP1",
+ .format = formats::GBRG_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::GRBG_PISP_COMP1, {
+ .name = "GRBG_PISP_COMP1",
+ .format = formats::GRBG_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGGB_PISP_COMP1, {
+ .name = "RGGB_PISP_COMP1",
+ .format = formats::RGGB_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
/* Compressed formats. */
{ formats::MJPEG, {
.name = "MJPEG",
.format = formats::MJPEG,
.v4l2Formats = {
- .single = V4L2PixelFormat(V4L2_PIX_FMT_MJPEG),
- .multi = V4L2PixelFormat(),
+ V4L2PixelFormat(V4L2_PIX_FMT_MJPEG),
+ V4L2PixelFormat(V4L2_PIX_FMT_JPEG),
},
.bitsPerPixel = 0,
.colourEncoding = PixelFormatInfo::ColourEncodingYUV,
@@ -987,22 +1004,22 @@ const PixelFormatInfo &PixelFormatInfo::info(const PixelFormat &format)
}
/**
- * \brief Retrieve information about a pixel format
+ * \brief Retrieve information about a V4L2 pixel format
* \param[in] format The V4L2 pixel format
* \return The PixelFormatInfo describing the V4L2 \a format if known, or an
* invalid PixelFormatInfo otherwise
*/
const PixelFormatInfo &PixelFormatInfo::info(const V4L2PixelFormat &format)
{
- const auto &info = std::find_if(pixelFormatInfo.begin(), pixelFormatInfo.end(),
- [format](auto pair) {
- return pair.second.v4l2Formats.single == format ||
- pair.second.v4l2Formats.multi == format;
- });
- if (info == pixelFormatInfo.end())
+ PixelFormat pixelFormat = format.toPixelFormat(false);
+ if (!pixelFormat.isValid())
return pixelFormatInfoInvalid;
- return info->second;
+ const auto iter = pixelFormatInfo.find(pixelFormat);
+ if (iter == pixelFormatInfo.end())
+ return pixelFormatInfoInvalid;
+
+ return iter->second;
}
/**
@@ -1049,7 +1066,7 @@ unsigned int PixelFormatInfo::stride(unsigned int width, unsigned int plane,
return 0;
}
- if (plane > planes.size() || !planes[plane].bytesPerGroup) {
+ if (plane >= planes.size() || !planes[plane].bytesPerGroup) {
LOG(Formats, Warning) << "Invalid plane index, stride is zero";
return 0;
}
diff --git a/src/libcamera/formats.yaml b/src/libcamera/formats.yaml
index 7dda0132..fe027a7c 100644
--- a/src/libcamera/formats.yaml
+++ b/src/libcamera/formats.yaml
@@ -2,7 +2,7 @@
#
# Copyright (C) 2020, Google Inc.
#
-%YAML 1.2
+%YAML 1.1
---
formats:
- R8:
@@ -11,6 +11,8 @@ formats:
fourcc: DRM_FORMAT_R10
- R12:
fourcc: DRM_FORMAT_R12
+ - R16:
+ fourcc: DRM_FORMAT_R16
- RGB565:
fourcc: DRM_FORMAT_RGB565
@@ -41,6 +43,11 @@ formats:
- BGRA8888:
fourcc: DRM_FORMAT_BGRA8888
+ - RGB161616:
+ fourcc: DRM_FORMAT_RGB161616
+ - BGR161616:
+ fourcc: DRM_FORMAT_BGR161616
+
- YUYV:
fourcc: DRM_FORMAT_YUYV
- YVYU:
@@ -49,6 +56,10 @@ formats:
fourcc: DRM_FORMAT_UYVY
- VYUY:
fourcc: DRM_FORMAT_VYUY
+ - AVUY8888:
+ fourcc: DRM_FORMAT_AVUY8888
+ - XVUY8888:
+ fourcc: DRM_FORMAT_XVUY8888
- NV12:
fourcc: DRM_FORMAT_NV12
@@ -106,6 +117,15 @@ formats:
- SBGGR12:
fourcc: DRM_FORMAT_SBGGR12
+ - SRGGB14:
+ fourcc: DRM_FORMAT_SRGGB14
+ - SGRBG14:
+ fourcc: DRM_FORMAT_SGRBG14
+ - SGBRG14:
+ fourcc: DRM_FORMAT_SGBRG14
+ - SBGGR14:
+ fourcc: DRM_FORMAT_SBGGR14
+
- SRGGB16:
fourcc: DRM_FORMAT_SRGGB16
- SGRBG16:
@@ -145,6 +165,19 @@ formats:
fourcc: DRM_FORMAT_SBGGR12
mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SRGGB14_CSI2P:
+ fourcc: DRM_FORMAT_SRGGB14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGRBG14_CSI2P:
+ fourcc: DRM_FORMAT_SGRBG14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGBRG14_CSI2P:
+ fourcc: DRM_FORMAT_SGBRG14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SBGGR14_CSI2P:
+ fourcc: DRM_FORMAT_SBGGR14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+
- SRGGB10_IPU3:
fourcc: DRM_FORMAT_SRGGB10
mod: IPU3_FORMAT_MOD_PACKED
@@ -157,4 +190,20 @@ formats:
- SBGGR10_IPU3:
fourcc: DRM_FORMAT_SBGGR10
mod: IPU3_FORMAT_MOD_PACKED
+
+ - RGGB_PISP_COMP1:
+ fourcc: DRM_FORMAT_SRGGB16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - GRBG_PISP_COMP1:
+ fourcc: DRM_FORMAT_SGRBG16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - GBRG_PISP_COMP1:
+ fourcc: DRM_FORMAT_SGBRG16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - BGGR_PISP_COMP1:
+ fourcc: DRM_FORMAT_SBGGR16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - MONO_PISP_COMP1:
+ fourcc: DRM_FORMAT_R16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
...
diff --git a/src/libcamera/framebuffer.cpp b/src/libcamera/framebuffer.cpp
index 7be18560..63d679cb 100644
--- a/src/libcamera/framebuffer.cpp
+++ b/src/libcamera/framebuffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * framebuffer.cpp - Frame buffer handling
+ * Frame buffer handling
*/
#include <libcamera/framebuffer.h>
@@ -114,9 +114,16 @@ LOG_DEFINE_CATEGORY(Buffer)
* pipeline handlers.
*/
-FrameBuffer::Private::Private()
- : request_(nullptr), isContiguous_(true)
+/**
+ * \brief Construct a FrameBuffer::Private instance
+ * \param[in] planes The frame memory planes
+ * \param[in] cookie Cookie
+ */
+FrameBuffer::Private::Private(const std::vector<Plane> &planes, uint64_t cookie)
+ : planes_(planes), cookie_(cookie), request_(nullptr),
+ isContiguous_(true)
{
+ metadata_.planes_.resize(planes_.size());
}
/**
@@ -195,6 +202,12 @@ FrameBuffer::Private::~Private()
*/
/**
+ * \fn FrameBuffer::Private::metadata()
+ * \brief Retrieve the dynamic metadata
+ * \return Dynamic metadata for the frame contained in the buffer
+ */
+
+/**
* \class FrameBuffer
* \brief Frame buffer data and its associated dynamic metadata
*
@@ -291,29 +304,22 @@ ino_t fileDescriptorInode(const SharedFD &fd)
* \param[in] cookie Cookie
*/
FrameBuffer::FrameBuffer(const std::vector<Plane> &planes, unsigned int cookie)
- : FrameBuffer(std::make_unique<Private>(), planes, cookie)
+ : FrameBuffer(std::make_unique<Private>(planes, cookie))
{
}
/**
- * \brief Construct a FrameBuffer with an extensible private class and an array
- * of planes
+ * \brief Construct a FrameBuffer with an extensible private class
* \param[in] d The extensible private class
- * \param[in] planes The frame memory planes
- * \param[in] cookie Cookie
*/
-FrameBuffer::FrameBuffer(std::unique_ptr<Private> d,
- const std::vector<Plane> &planes,
- unsigned int cookie)
- : Extensible(std::move(d)), planes_(planes), cookie_(cookie)
+FrameBuffer::FrameBuffer(std::unique_ptr<Private> d)
+ : Extensible(std::move(d))
{
- metadata_.planes_.resize(planes_.size());
-
unsigned int offset = 0;
bool isContiguous = true;
ino_t inode = 0;
- for (const auto &plane : planes_) {
+ for (const auto &plane : _d()->planes_) {
ASSERT(plane.offset != Plane::kInvalidOffset);
if (plane.offset != offset) {
@@ -325,9 +331,9 @@ FrameBuffer::FrameBuffer(std::unique_ptr<Private> d,
* Two different dmabuf file descriptors may still refer to the
* same dmabuf instance. Check this using inodes.
*/
- if (plane.fd != planes_[0].fd) {
+ if (plane.fd != _d()->planes_[0].fd) {
if (!inode)
- inode = fileDescriptorInode(planes_[0].fd);
+ inode = fileDescriptorInode(_d()->planes_[0].fd);
if (fileDescriptorInode(plane.fd) != inode) {
isContiguous = false;
break;
@@ -344,10 +350,13 @@ FrameBuffer::FrameBuffer(std::unique_ptr<Private> d,
}
/**
- * \fn FrameBuffer::planes()
* \brief Retrieve the static plane descriptors
* \return Array of plane descriptors
*/
+const std::vector<FrameBuffer::Plane> &FrameBuffer::planes() const
+{
+ return _d()->planes_;
+}
/**
* \brief Retrieve the request this buffer belongs to
@@ -368,13 +377,15 @@ Request *FrameBuffer::request() const
}
/**
- * \fn FrameBuffer::metadata()
* \brief Retrieve the dynamic metadata
* \return Dynamic metadata for the frame contained in the buffer
*/
+const FrameMetadata &FrameBuffer::metadata() const
+{
+ return _d()->metadata_;
+}
/**
- * \fn FrameBuffer::cookie()
* \brief Retrieve the cookie
*
* The cookie belongs to the creator of the FrameBuffer, which controls its
@@ -384,9 +395,12 @@ Request *FrameBuffer::request() const
*
* \return The cookie
*/
+uint64_t FrameBuffer::cookie() const
+{
+ return _d()->cookie_;
+}
/**
- * \fn FrameBuffer::setCookie()
* \brief Set the cookie
* \param[in] cookie Cookie to set
*
@@ -395,6 +409,10 @@ Request *FrameBuffer::request() const
* modify the cookie value of buffers they haven't created themselves. The
* libcamera core never modifies the buffer cookie.
*/
+void FrameBuffer::setCookie(uint64_t cookie)
+{
+ _d()->cookie_ = cookie;
+}
/**
* \brief Extract the Fence associated with this Framebuffer
diff --git a/src/libcamera/framebuffer_allocator.cpp b/src/libcamera/framebuffer_allocator.cpp
index 4df27cac..3d53bde2 100644
--- a/src/libcamera/framebuffer_allocator.cpp
+++ b/src/libcamera/framebuffer_allocator.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * framebuffer_allocator.cpp - FrameBuffer allocator
+ * FrameBuffer allocator
*/
#include <libcamera/framebuffer_allocator.h>
@@ -59,14 +59,11 @@ LOG_DEFINE_CATEGORY(Allocator)
* \param[in] camera The camera
*/
FrameBufferAllocator::FrameBufferAllocator(std::shared_ptr<Camera> camera)
- : camera_(camera)
+ : camera_(std::move(camera))
{
}
-FrameBufferAllocator::~FrameBufferAllocator()
-{
- buffers_.clear();
-}
+FrameBufferAllocator::~FrameBufferAllocator() = default;
/**
* \brief Allocate buffers for a configured stream
@@ -88,16 +85,22 @@ FrameBufferAllocator::~FrameBufferAllocator()
*/
int FrameBufferAllocator::allocate(Stream *stream)
{
- if (buffers_.count(stream)) {
+ const auto &[it, inserted] = buffers_.try_emplace(stream);
+
+ if (!inserted) {
LOG(Allocator, Error) << "Buffers already allocated for stream";
return -EBUSY;
}
- int ret = camera_->exportFrameBuffers(stream, &buffers_[stream]);
+ int ret = camera_->exportFrameBuffers(stream, &it->second);
if (ret == -EINVAL)
LOG(Allocator, Error)
<< "Stream is not part of " << camera_->id()
<< " active configuration";
+
+ if (ret < 0)
+ buffers_.erase(it);
+
return ret;
}
@@ -119,8 +122,6 @@ int FrameBufferAllocator::free(Stream *stream)
if (iter == buffers_.end())
return -EINVAL;
- std::vector<std::unique_ptr<FrameBuffer>> &buffers = iter->second;
- buffers.clear();
buffers_.erase(iter);
return 0;
diff --git a/src/libcamera/geometry.cpp b/src/libcamera/geometry.cpp
index e50b46c5..00015136 100644
--- a/src/libcamera/geometry.cpp
+++ b/src/libcamera/geometry.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * geometry.cpp - Geometry-related structures
+ * Geometry-related structures
*/
#include <libcamera/geometry.h>
@@ -95,10 +95,10 @@ std::ostream &operator<<(std::ostream &out, const Point &p)
}
/**
- * \struct Size
+ * \class Size
* \brief Describe a two-dimensional size
*
- * The Size structure defines a two-dimensional size with integer precision.
+ * The Size class defines a two-dimensional size with integer precision.
*/
/**
@@ -455,7 +455,7 @@ std::ostream &operator<<(std::ostream &out, const Size &s)
}
/**
- * \struct SizeRange
+ * \class SizeRange
* \brief Describe a range of sizes
*
* A SizeRange describes a range of sizes included in the [min, max] interval
@@ -589,7 +589,7 @@ std::ostream &operator<<(std::ostream &out, const SizeRange &sr)
}
/**
- * \struct Rectangle
+ * \class Rectangle
* \brief Describe a rectangle's position and dimensions
*
* Rectangles are used to identify an area of an image. They are specified by
diff --git a/src/libcamera/ipa/meson.build b/src/libcamera/ipa/meson.build
index 44695240..ef73b3f9 100644
--- a/src/libcamera/ipa/meson.build
+++ b/src/libcamera/ipa/meson.build
@@ -3,13 +3,10 @@
libcamera_ipa_interfaces = []
foreach file : ipa_mojom_files
- name = '@0@'.format(file).split('/')[-1].split('.')[0]
-
# {pipeline}_ipa_interface.cpp
libcamera_ipa_interfaces += \
- custom_target(name + '_ipa_interface_cpp',
- input : file,
- output : name + '_ipa_interface.cpp',
+ custom_target(input : file,
+ output : '@BASENAME@_ipa_interface.cpp',
command : [
mojom_docs_extractor,
'-o', '@OUTPUT@', '@INPUT@'
diff --git a/src/libcamera/ipa_controls.cpp b/src/libcamera/ipa_controls.cpp
index 870a443b..9420c889 100644
--- a/src/libcamera/ipa_controls.cpp
+++ b/src/libcamera/ipa_controls.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_controls.cpp - IPA control handling
+ * IPA control handling
*/
#include <libcamera/ipa/ipa_controls.h>
diff --git a/src/libcamera/ipa_data_serializer.cpp b/src/libcamera/ipa_data_serializer.cpp
index 0a259305..3e9bef08 100644
--- a/src/libcamera/ipa_data_serializer.cpp
+++ b/src/libcamera/ipa_data_serializer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipa_data_serializer.cpp - Image Processing Algorithm data serializer
+ * Image Processing Algorithm data serializer
*/
#include "libcamera/internal/ipa_data_serializer.h"
diff --git a/src/libcamera/ipa_interface.cpp b/src/libcamera/ipa_interface.cpp
index 8ea6cbee..a9dc54ad 100644
--- a/src/libcamera/ipa_interface.cpp
+++ b/src/libcamera/ipa_interface.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_interface.cpp - Image Processing Algorithm interface
+ * Image Processing Algorithm interface
*/
#include <libcamera/ipa/ipa_interface.h>
diff --git a/src/libcamera/ipa_manager.cpp b/src/libcamera/ipa_manager.cpp
index ec966045..f4e0b633 100644
--- a/src/libcamera/ipa_manager.cpp
+++ b/src/libcamera/ipa_manager.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_manager.cpp - Image Processing Algorithm module manager
+ * Image Processing Algorithm module manager
*/
#include "libcamera/internal/ipa_manager.h"
@@ -109,6 +109,11 @@ IPAManager::IPAManager()
LOG(IPAManager, Fatal)
<< "Multiple IPAManager objects are not allowed";
+#if HAVE_IPA_PUBKEY
+ if (!pubKey_.isValid())
+ LOG(IPAManager, Warning) << "Public key not valid";
+#endif
+
unsigned int ipaCount = 0;
/* User-specified paths take precedence. */
@@ -133,7 +138,7 @@ IPAManager::IPAManager()
std::string root = utils::libcameraBuildPath();
if (!root.empty()) {
std::string ipaBuildPath = root + "src/ipa";
- constexpr int maxDepth = 1;
+ constexpr int maxDepth = 2;
LOG(IPAManager, Info)
<< "libcamera is not installed. Adding '"
@@ -274,6 +279,19 @@ IPAModule *IPAManager::module(PipelineHandler *pipe, uint32_t minVersion,
* found or if the IPA proxy fails to initialize
*/
+#if HAVE_IPA_PUBKEY
+/**
+ * \fn IPAManager::pubKey()
+ * \brief Retrieve the IPA module signing public key
+ *
+ * IPA module signature verification is normally handled internally by the
+ * IPAManager class. This function is meant to be used by utilities that need to
+ * verify signatures externally.
+ *
+ * \return The IPA module signing public key
+ */
+#endif
+
bool IPAManager::isSignatureValid([[maybe_unused]] IPAModule *ipa) const
{
#if HAVE_IPA_PUBKEY
diff --git a/src/libcamera/ipa_module.cpp b/src/libcamera/ipa_module.cpp
index c9ff7de3..0756b691 100644
--- a/src/libcamera/ipa_module.cpp
+++ b/src/libcamera/ipa_module.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_module.cpp - Image Processing Algorithm module
+ * Image Processing Algorithm module
*/
#include "libcamera/internal/ipa_module.h"
@@ -225,9 +225,9 @@ Span<const uint8_t> elfLoadSymbol(Span<const uint8_t> elf, const char *symbol)
* \brief The name of the IPA module
*
* The name may be used to build file system paths to IPA-specific resources.
- * It shall only contain printable characters, and may not contain '/', '*',
- * '?' or '\'. For IPA modules included in libcamera, it shall match the
- * directory of the IPA module in the source tree.
+ * It shall only contain printable characters, and may not contain '*', '?' or
+ * '\'. For IPA modules included in libcamera, it shall match the directory of
+ * the IPA module in the source tree.
*
* \todo Allow user to choose to isolate open source IPAs
*/
@@ -288,25 +288,30 @@ int IPAModule::loadIPAModuleInfo()
}
Span<const uint8_t> info = elfLoadSymbol(data, "ipaModuleInfo");
- if (info.size() != sizeof(info_)) {
+ if (info.size() < sizeof(info_)) {
LOG(IPAModule, Error) << "IPA module has no valid info";
return -EINVAL;
}
- memcpy(&info_, info.data(), info.size());
+ memcpy(&info_, info.data(), sizeof(info_));
if (info_.moduleAPIVersion != IPA_MODULE_API_VERSION) {
LOG(IPAModule, Error) << "IPA module API version mismatch";
return -EINVAL;
}
- /* Validate the IPA module name. */
+ /*
+ * Validate the IPA module name.
+ *
+ * \todo Consider module naming restrictions to avoid escaping from a
+ * base directory. Forbidding ".." may be enough, but this may be best
+ * implemented in a different layer.
+ */
std::string ipaName = info_.name;
auto iter = std::find_if_not(ipaName.begin(), ipaName.end(),
[](unsigned char c) -> bool {
- return isprint(c) && c != '/' &&
- c != '?' && c != '*' &&
- c != '\\';
+ return isprint(c) && c != '?' &&
+ c != '*' && c != '\\';
});
if (iter != ipaName.end()) {
LOG(IPAModule, Error)
diff --git a/src/libcamera/ipa_proxy.cpp b/src/libcamera/ipa_proxy.cpp
index 3f2cc6b8..6c17c456 100644
--- a/src/libcamera/ipa_proxy.cpp
+++ b/src/libcamera/ipa_proxy.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_proxy.cpp - Image Processing Algorithm proxy
+ * Image Processing Algorithm proxy
*/
#include "libcamera/internal/ipa_proxy.h"
diff --git a/src/libcamera/ipa_pub_key.cpp.in b/src/libcamera/ipa_pub_key.cpp.in
index 01e5333b..5d8c92c2 100644
--- a/src/libcamera/ipa_pub_key.cpp.in
+++ b/src/libcamera/ipa_pub_key.cpp.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
- * ipa_pub_key.cpp - IPA module signing public key
+ * IPA module signing public key
*
* This file is auto-generated. Do not edit.
*/
diff --git a/src/libcamera/ipc_pipe.cpp b/src/libcamera/ipc_pipe.cpp
index 31a0ca09..548299d0 100644
--- a/src/libcamera/ipc_pipe.cpp
+++ b/src/libcamera/ipc_pipe.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipc_pipe.cpp - Image Processing Algorithm IPC module for IPA proxies
+ * Image Processing Algorithm IPC module for IPA proxies
*/
#include "libcamera/internal/ipc_pipe.h"
diff --git a/src/libcamera/ipc_pipe_unixsocket.cpp b/src/libcamera/ipc_pipe_unixsocket.cpp
index da2cffc3..668ec73b 100644
--- a/src/libcamera/ipc_pipe_unixsocket.cpp
+++ b/src/libcamera/ipc_pipe_unixsocket.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipc_pipe_unixsocket.cpp - Image Processing Algorithm IPC module using unix socket
+ * Image Processing Algorithm IPC module using unix socket
*/
#include "libcamera/internal/ipc_pipe_unixsocket.h"
diff --git a/src/libcamera/ipc_unixsocket.cpp b/src/libcamera/ipc_unixsocket.cpp
index 1980d374..75285b67 100644
--- a/src/libcamera/ipc_unixsocket.cpp
+++ b/src/libcamera/ipc_unixsocket.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipc_unixsocket.cpp - IPC mechanism based on Unix sockets
+ * IPC mechanism based on Unix sockets
*/
#include "libcamera/internal/ipc_unixsocket.h"
diff --git a/src/libcamera/mapped_framebuffer.cpp b/src/libcamera/mapped_framebuffer.cpp
index 6860069b..b3104e05 100644
--- a/src/libcamera/mapped_framebuffer.cpp
+++ b/src/libcamera/mapped_framebuffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * mapped_framebuffer.cpp - Mapped Framebuffer support
+ * Mapped Framebuffer support
*/
#include "libcamera/internal/mapped_framebuffer.h"
diff --git a/src/libcamera/media_device.cpp b/src/libcamera/media_device.cpp
index 7c94da9e..bd054552 100644
--- a/src/libcamera/media_device.cpp
+++ b/src/libcamera/media_device.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * media_device.cpp - Media device handler
+ * Media device handler
*/
#include "libcamera/internal/media_device.h"
@@ -352,8 +352,9 @@ MediaEntity *MediaDevice::getEntityByName(const std::string &name) const
* entity with name \a sourceName, to the pad at index \a sinkIdx of the
* sink entity with name \a sinkName, if any.
*
- * \sa MediaDevice::link(const MediaEntity *source, unsigned int sourceIdx, const MediaEntity *sink, unsigned int sinkIdx) const
- * \sa MediaDevice::link(const MediaPad *source, const MediaPad *sink) const
+ * \sa link(const MediaEntity *source, unsigned int sourceIdx,
+ * const MediaEntity *sink, unsigned int sinkIdx)
+ * \sa link(const MediaPad *source, const MediaPad *sink)
*
* \return The link that connects the two pads, or nullptr if no such a link
* exists
@@ -381,8 +382,9 @@ MediaLink *MediaDevice::link(const std::string &sourceName, unsigned int sourceI
* entity \a source, to the pad at index \a sinkIdx of the sink entity \a
* sink, if any.
*
- * \sa MediaDevice::link(const std::string &sourceName, unsigned int sourceIdx, const std::string &sinkName, unsigned int sinkIdx) const
- * \sa MediaDevice::link(const MediaPad *source, const MediaPad *sink) const
+ * \sa link(const std::string &sourceName, unsigned int sourceIdx,
+ * const std::string &sinkName, unsigned int sinkIdx)
+ * \sa link(const MediaPad *source, const MediaPad *sink)
*
* \return The link that connects the two pads, or nullptr if no such a link
* exists
@@ -404,8 +406,10 @@ MediaLink *MediaDevice::link(const MediaEntity *source, unsigned int sourceIdx,
* \param[in] source The source pad
* \param[in] sink The sink pad
*
- * \sa MediaDevice::link(const std::string &sourceName, unsigned int sourceIdx, const std::string &sinkName, unsigned int sinkIdx) const
- * \sa MediaDevice::link(const MediaEntity *source, unsigned int sourceIdx, const MediaEntity *sink, unsigned int sinkIdx) const
+ * \sa link(const std::string &sourceName, unsigned int sourceIdx,
+ * const std::string &sinkName, unsigned int sinkIdx)
+ * \sa link(const MediaEntity *source, unsigned int sourceIdx,
+ * const MediaEntity *sink, unsigned int sinkIdx)
*
* \return The link that connects the two pads, or nullptr if no such a link
* exists
@@ -473,7 +477,7 @@ int MediaDevice::open()
return -EBUSY;
}
- fd_ = UniqueFD(::open(deviceNode_.c_str(), O_RDWR));
+ fd_ = UniqueFD(::open(deviceNode_.c_str(), O_RDWR | O_CLOEXEC));
if (!fd_.isValid()) {
int ret = -errno;
LOG(MediaDevice, Error)
diff --git a/src/libcamera/media_object.cpp b/src/libcamera/media_object.cpp
index c78f4758..1b191a1e 100644
--- a/src/libcamera/media_object.cpp
+++ b/src/libcamera/media_object.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * media_object.cpp - Media device objects: entities, pads and links
+ * Media device objects: entities, pads and links
*/
#include "libcamera/internal/media_object.h"
diff --git a/src/libcamera/meson.build b/src/libcamera/meson.build
index b57bee7e..a3b12bc1 100644
--- a/src/libcamera/meson.build
+++ b/src/libcamera/meson.build
@@ -7,15 +7,15 @@ libcamera_sources = files([
'camera_controls.cpp',
'camera_lens.cpp',
'camera_manager.cpp',
- 'camera_sensor.cpp',
- 'camera_sensor_properties.cpp',
'color_space.cpp',
'controls.cpp',
'control_serializer.cpp',
'control_validator.cpp',
+ 'converter.cpp',
'delayed_controls.cpp',
'device_enumerator.cpp',
'device_enumerator_sysfs.cpp',
+ 'dma_heaps.cpp',
'fence.cpp',
'formats.cpp',
'framebuffer.cpp',
@@ -33,11 +33,13 @@ libcamera_sources = files([
'mapped_framebuffer.cpp',
'media_device.cpp',
'media_object.cpp',
+ 'orientation.cpp',
'pipeline_handler.cpp',
'pixel_format.cpp',
'process.cpp',
'pub_key.cpp',
'request.cpp',
+ 'shared_mem_object.cpp',
'source_paths.cpp',
'stream.cpp',
'sysfs.cpp',
@@ -57,20 +59,46 @@ includes = [
libcamera_includes,
]
+libcamera_deps = []
+
libatomic = cc.find_library('atomic', required : false)
+libthreads = dependency('threads')
subdir('base')
+subdir('converter')
subdir('ipa')
subdir('pipeline')
subdir('proxy')
+subdir('sensor')
+subdir('software_isp')
+
+null_dep = dependency('', required : false)
-libdl = cc.find_library('dl')
-libgnutls = cc.find_library('gnutls', required : true)
-libudev = dependency('libudev', required : false)
+# TODO: Use dependency('dl') when updating to meson 0.62.0 or newer.
+libdl = null_dep
+if not cc.has_function('dlopen')
+ libdl = cc.find_library('dl')
+endif
+libudev = dependency('libudev', required : get_option('udev'))
libyaml = dependency('yaml-0.1', required : false)
-if libgnutls.found()
+# Use one of gnutls or libcrypto (provided by OpenSSL), trying gnutls first.
+libcrypto = dependency('gnutls', required : false)
+if libcrypto.found()
config_h.set('HAVE_GNUTLS', 1)
+else
+ libcrypto = dependency('libcrypto', required : false)
+ if libcrypto.found()
+ config_h.set('HAVE_CRYPTO', 1)
+ endif
+endif
+
+if not libcrypto.found()
+ warning('Neither gnutls nor libcrypto found, all IPA modules will be isolated')
+ summary({'IPA modules signed with': 'None (modules will run isolated)'},
+ section : 'Configuration')
+else
+ summary({'IPA modules signed with' : libcrypto.name()}, section : 'Configuration')
endif
if liblttng.found()
@@ -101,12 +129,27 @@ endif
control_sources = []
-foreach source : control_source_files
- input_files = files(source +'.yaml', source + '.cpp.in')
- control_sources += custom_target(source + '_cpp',
+controls_mode_files = {
+ 'controls' : controls_files,
+ 'properties' : properties_files,
+}
+
+foreach mode, input_files : controls_mode_files
+ input_files = files(input_files)
+
+ if mode == 'controls'
+ template_file = files('control_ids.cpp.in')
+ else
+ template_file = files('property_ids.cpp.in')
+ endif
+
+ ranges_file = files('control_ranges.yaml')
+ control_sources += custom_target(mode + '_cpp',
input : input_files,
- output : source + '.cpp',
- command : [gen_controls, '-o', '@OUTPUT@', '@INPUT@'])
+ output : mode + '_ids.cpp',
+ command : [gen_controls, '-o', '@OUTPUT@',
+ '--mode', mode, '-t', template_file,
+ '-r', ranges_file, '@INPUT@'])
endforeach
libcamera_sources += control_sources
@@ -131,12 +174,12 @@ if ipa_sign_module
libcamera_sources += ipa_pub_key_cpp
endif
-libcamera_deps = [
+libcamera_deps += [
libatomic,
libcamera_base,
libcamera_base_private,
+ libcrypto,
libdl,
- libgnutls,
liblttng,
libudev,
libyaml,
@@ -150,6 +193,7 @@ libcamera_deps = [
libcamera = shared_library('libcamera',
libcamera_sources,
version : libcamera_version,
+ soversion : libcamera_soversion,
name_prefix : '',
install : true,
include_directories : includes,
@@ -179,4 +223,6 @@ pkg_mod.generate(libcamera,
description : 'Complex Camera Support Library',
subdirs : 'libcamera')
+meson.override_dependency('libcamera', libcamera_public)
+
subdir('proxy/worker')
diff --git a/src/libcamera/orientation.cpp b/src/libcamera/orientation.cpp
new file mode 100644
index 00000000..47fd6a32
--- /dev/null
+++ b/src/libcamera/orientation.cpp
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Ideas On Board Oy
+ *
+ * Image orientation
+ */
+
+#include <libcamera/orientation.h>
+
+#include <array>
+#include <string>
+
+/**
+ * \file libcamera/orientation.h
+ * \brief Image orientation definition
+ */
+
+namespace libcamera {
+
+/**
+ * \enum Orientation
+ * \brief The image orientation in a memory buffer
+ *
+ * The Orientation enumeration describes the orientation of the images
+ * produced by the camera pipeline as they get received by the application
+ * inside memory buffers.
+ *
+ * The image orientation expressed using the Orientation enumeration can be then
+ * inferred by applying to a naturally oriented image a multiple of a 90 degrees
+ * rotation in the clockwise direction from the origin and then by applying an
+ * optional horizontal mirroring.
+ *
+ * The enumeration numerical values follow the ones defined by the EXIF
+ * Specification version 2.32, Tag 274 "Orientation", while the names of the
+ * enumerated values report the rotation and mirroring operations performed.
+ *
+ * For example, Orientation::Rotate90Mirror describes the orientation obtained
+ * by rotating the image 90 degrees clockwise first and then applying a
+ * horizontal mirroring.
+ *
+ * \var CameraConfiguration::Rotate0
+ * \image html rotation/rotate0.svg
+ * \var CameraConfiguration::Rotate0Mirror
+ * \image html rotation/rotate0Mirror.svg
+ * \var CameraConfiguration::Rotate180
+ * \image html rotation/rotate180.svg
+ * \var CameraConfiguration::Rotate180Mirror
+ * \image html rotation/rotate180Mirror.svg
+ * \var CameraConfiguration::Rotate90Mirror
+ * \image html rotation/rotate90Mirror.svg
+ * \var CameraConfiguration::Rotate270
+ * \image html rotation/rotate270.svg
+ * \var CameraConfiguration::Rotate270Mirror
+ * \image html rotation/rotate270Mirror.svg
+ * \var CameraConfiguration::Rotate90
+ * \image html rotation/rotate90.svg
+ */
+
+/**
+ * \brief Return the orientation representing a rotation of the given angle
+ * clockwise
+ * \param[in] angle The angle of rotation in a clockwise sense. Negative values
+ * can be used to represent anticlockwise rotations
+ * \param[out] success Set to `true` if the angle is a multiple of 90 degrees,
+ * otherwise `false`
+ * \return The orientation corresponding to the rotation if \a success was set
+ * to `true`, otherwise the `Rotate0` orientation
+ */
+Orientation orientationFromRotation(int angle, bool *success)
+{
+ angle = angle % 360;
+ if (angle < 0)
+ angle += 360;
+
+ if (success != nullptr)
+ *success = true;
+
+ switch (angle) {
+ case 0:
+ return Orientation::Rotate0;
+ case 90:
+ return Orientation::Rotate90;
+ case 180:
+ return Orientation::Rotate180;
+ case 270:
+ return Orientation::Rotate270;
+ }
+
+ if (success != nullptr)
+ *success = false;
+
+ return Orientation::Rotate0;
+}
+
+/**
+ * \brief Prints human-friendly names for Orientation items
+ * \param[in] out The output stream
+ * \param[in] orientation The Orientation item
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const Orientation &orientation)
+{
+ constexpr std::array<const char *, 9> orientationNames = {
+ "", /* Orientation starts counting from 1. */
+ "Rotate0", "Rotate0Mirror",
+ "Rotate180", "Rotate180Mirror",
+ "Rotate90Mirror", "Rotate270",
+ "Rotate270Mirror", "Rotate90",
+ };
+
+ out << orientationNames[static_cast<unsigned int>(orientation)];
+ return out;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp
new file mode 100644
index 00000000..72aa6c75
--- /dev/null
+++ b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp
@@ -0,0 +1,1117 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022 - Jacopo Mondi <jacopo@jmondi.org>
+ *
+ * Pipeline handler for ISI interface found on NXP i.MX8 SoC
+ */
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera_manager.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+#include "linux/media-bus-format.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(ISI)
+
+class PipelineHandlerISI;
+
+class ISICameraData : public Camera::Private
+{
+public:
+ ISICameraData(PipelineHandler *ph)
+ : Camera::Private(ph)
+ {
+ /*
+ * \todo Assume 2 channels only for now, as that's the number of
+ * available channels on i.MX8MP.
+ */
+ streams_.resize(2);
+ }
+
+ PipelineHandlerISI *pipe();
+
+ int init();
+
+ unsigned int pipeIndex(const Stream *stream)
+ {
+ return stream - &*streams_.begin();
+ }
+
+ unsigned int getRawMediaBusFormat(PixelFormat *pixelFormat) const;
+ unsigned int getYuvMediaBusFormat(const PixelFormat &pixelFormat) const;
+ unsigned int getMediaBusFormat(PixelFormat *pixelFormat) const;
+
+ std::unique_ptr<CameraSensor> sensor_;
+ std::unique_ptr<V4L2Subdevice> csis_;
+
+ std::vector<Stream> streams_;
+
+ std::vector<Stream *> enabledStreams_;
+
+ unsigned int xbarSink_;
+};
+
+class ISICameraConfiguration : public CameraConfiguration
+{
+public:
+ ISICameraConfiguration(ISICameraData *data)
+ : data_(data)
+ {
+ }
+
+ Status validate() override;
+
+ static const std::map<PixelFormat, unsigned int> formatsMap_;
+
+ V4L2SubdeviceFormat sensorFormat_;
+
+private:
+ CameraConfiguration::Status
+ validateRaw(std::set<Stream *> &availableStreams, const Size &maxResolution);
+ CameraConfiguration::Status
+ validateYuv(std::set<Stream *> &availableStreams, const Size &maxResolution);
+
+ const ISICameraData *data_;
+};
+
+class PipelineHandlerISI : public PipelineHandler
+{
+public:
+ PipelineHandlerISI(CameraManager *manager);
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+
+protected:
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+private:
+ static constexpr Size kPreviewSize = { 1920, 1080 };
+ static constexpr Size kMinISISize = { 1, 1 };
+
+ struct Pipe {
+ std::unique_ptr<V4L2Subdevice> isi;
+ std::unique_ptr<V4L2VideoDevice> capture;
+ };
+
+ ISICameraData *cameraData(Camera *camera)
+ {
+ return static_cast<ISICameraData *>(camera->_d());
+ }
+
+ Pipe *pipeFromStream(Camera *camera, const Stream *stream);
+
+ StreamConfiguration generateYUVConfiguration(Camera *camera,
+ const Size &size);
+ StreamConfiguration generateRawConfiguration(Camera *camera);
+
+ void bufferReady(FrameBuffer *buffer);
+
+ MediaDevice *isiDev_;
+
+ std::unique_ptr<V4L2Subdevice> crossbar_;
+ std::vector<Pipe> pipes_;
+};
+
+/* -----------------------------------------------------------------------------
+ * Camera Data
+ */
+
+PipelineHandlerISI *ISICameraData::pipe()
+{
+ return static_cast<PipelineHandlerISI *>(Camera::Private::pipe());
+}
+
+/* Open and initialize pipe components. */
+int ISICameraData::init()
+{
+ int ret = sensor_->init();
+ if (ret)
+ return ret;
+
+ ret = csis_->open();
+ if (ret)
+ return ret;
+
+ properties_ = sensor_->properties();
+
+ return 0;
+}
+
+/*
+ * Get a RAW Bayer media bus format compatible with the requested pixelFormat.
+ *
+ * If the requested pixelFormat cannot be produced by the sensor adjust it to
+ * the one corresponding to the media bus format with the largest bit-depth.
+ */
+unsigned int ISICameraData::getRawMediaBusFormat(PixelFormat *pixelFormat) const
+{
+ std::vector<unsigned int> mbusCodes = sensor_->mbusCodes();
+
+ static const std::map<PixelFormat, unsigned int> rawFormats = {
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+ { formats::SBGGR14, MEDIA_BUS_FMT_SBGGR14_1X14 },
+ { formats::SGBRG14, MEDIA_BUS_FMT_SGBRG14_1X14 },
+ { formats::SGRBG14, MEDIA_BUS_FMT_SGRBG14_1X14 },
+ { formats::SRGGB14, MEDIA_BUS_FMT_SRGGB14_1X14 },
+ };
+
+ /*
+ * Make sure the requested PixelFormat is supported in the above
+ * map and the sensor can produce the compatible mbus code.
+ */
+ auto it = rawFormats.find(*pixelFormat);
+ if (it != rawFormats.end() &&
+ std::count(mbusCodes.begin(), mbusCodes.end(), it->second))
+ return it->second;
+
+ if (it == rawFormats.end())
+ LOG(ISI, Warning) << pixelFormat
+ << " not supported in ISI formats map.";
+
+ /*
+ * The desired pixel format cannot be produced. Adjust it to the one
+ * corresponding to the raw media bus format with the largest bit-depth
+ * the sensor provides.
+ */
+ unsigned int sensorCode = 0;
+ unsigned int maxDepth = 0;
+ *pixelFormat = {};
+
+ for (unsigned int code : mbusCodes) {
+ /* Make sure the media bus format is RAW Bayer. */
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(code);
+ if (!bayerFormat.isValid())
+ continue;
+
+ /* Make sure the media format is supported. */
+ it = std::find_if(rawFormats.begin(), rawFormats.end(),
+ [code](auto &rawFormat) {
+ return rawFormat.second == code;
+ });
+
+ if (it == rawFormats.end()) {
+ LOG(ISI, Warning) << bayerFormat
+ << " not supported in ISI formats map.";
+ continue;
+ }
+
+ /* Pick the one with the largest bit depth. */
+ if (bayerFormat.bitDepth > maxDepth) {
+ maxDepth = bayerFormat.bitDepth;
+ *pixelFormat = it->first;
+ sensorCode = code;
+ }
+ }
+
+ if (!pixelFormat->isValid())
+ LOG(ISI, Error) << "Cannot find a supported RAW format";
+
+ return sensorCode;
+}
+
+/*
+ * Get a YUV/RGB media bus format from which the ISI can produce a processed
+ * stream, preferring codes with the same colour encoding as the requested
+ * pixelformat.
+ *
+ * If the sensor does not provide any YUV/RGB media bus format the ISI cannot
+ * generate any processed pixel format as it cannot debayer.
+ */
+unsigned int ISICameraData::getYuvMediaBusFormat(const PixelFormat &pixelFormat) const
+{
+ std::vector<unsigned int> mbusCodes = sensor_->mbusCodes();
+
+ /*
+ * The ISI can produce YUV/RGB pixel formats from any non-RAW Bayer
+ * media bus formats.
+ *
+ * Keep the list in sync with the mxc_isi_bus_formats[] array in
+ * the ISI driver.
+ */
+ std::vector<unsigned int> yuvCodes = {
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_RGB565_1X16,
+ MEDIA_BUS_FMT_RGB888_1X24,
+ };
+
+ std::sort(mbusCodes.begin(), mbusCodes.end());
+ std::sort(yuvCodes.begin(), yuvCodes.end());
+
+ std::vector<unsigned int> supportedCodes;
+ std::set_intersection(mbusCodes.begin(), mbusCodes.end(),
+ yuvCodes.begin(), yuvCodes.end(),
+ std::back_inserter(supportedCodes));
+
+ if (supportedCodes.empty()) {
+ LOG(ISI, Warning) << "Cannot find a supported YUV/RGB format";
+
+ return 0;
+ }
+
+ /* Prefer codes with the same encoding as the requested pixel format. */
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixelFormat);
+ for (unsigned int code : supportedCodes) {
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingYUV &&
+ (code == MEDIA_BUS_FMT_UYVY8_1X16 ||
+ code == MEDIA_BUS_FMT_YUV8_1X24))
+ return code;
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRGB &&
+ (code == MEDIA_BUS_FMT_RGB565_1X16 ||
+ code == MEDIA_BUS_FMT_RGB888_1X24))
+ return code;
+ }
+
+ /* Otherwise return the first found code. */
+ return supportedCodes[0];
+}
+
+unsigned int ISICameraData::getMediaBusFormat(PixelFormat *pixelFormat) const
+{
+ if (PixelFormatInfo::info(*pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW)
+ return getRawMediaBusFormat(pixelFormat);
+
+ return getYuvMediaBusFormat(*pixelFormat);
+}
+
+/* -----------------------------------------------------------------------------
+ * Camera Configuration
+ */
+
+/*
+ * ISICameraConfiguration::formatsMap_ records the association between an output
+ * pixel format and the ISI source pixel format to be applied to the pipeline.
+ */
+const std::map<PixelFormat, unsigned int> ISICameraConfiguration::formatsMap_ = {
+ { formats::YUYV, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::AVUY8888, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::NV12, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::NV16, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::YUV444, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::RGB565, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::BGR888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::RGB888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::XRGB8888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::ABGR8888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+/*
+ * Adjust stream configuration when the first requested stream is RAW: all the
+ * streams will have the same RAW pixelformat and size.
+ */
+CameraConfiguration::Status
+ISICameraConfiguration::validateRaw(std::set<Stream *> &availableStreams,
+ const Size &maxResolution)
+{
+ CameraConfiguration::Status status = Valid;
+
+ /*
+ * Make sure the requested RAW format is supported by the
+ * pipeline, otherwise adjust it.
+ */
+ std::vector<unsigned int> mbusCodes = data_->sensor_->mbusCodes();
+ StreamConfiguration &rawConfig = config_[0];
+ PixelFormat rawFormat = rawConfig.pixelFormat;
+
+ unsigned int sensorCode = data_->getRawMediaBusFormat(&rawFormat);
+ if (!sensorCode) {
+ LOG(ISI, Error) << "Cannot adjust RAW pixelformat "
+ << rawConfig.pixelFormat;
+ return Invalid;
+ }
+
+ if (rawFormat != rawConfig.pixelFormat) {
+ LOG(ISI, Debug) << "RAW pixelformat adjusted to "
+ << rawFormat;
+ rawConfig.pixelFormat = rawFormat;
+ status = Adjusted;
+ }
+
+ /* Cap the RAW stream size to the maximum resolution. */
+ const Size configSize = rawConfig.size;
+ rawConfig.size.boundTo(maxResolution);
+ if (rawConfig.size != configSize) {
+ LOG(ISI, Debug) << "RAW size adjusted to "
+ << rawConfig.size;
+ status = Adjusted;
+ }
+
+ /* Adjust all other streams to RAW. */
+ for (const auto &[i, cfg] : utils::enumerate(config_)) {
+
+ LOG(ISI, Debug) << "Stream " << i << ": " << cfg.toString();
+ const PixelFormat pixFmt = cfg.pixelFormat;
+ const Size size = cfg.size;
+
+ cfg.pixelFormat = rawConfig.pixelFormat;
+ cfg.size = rawConfig.size;
+
+ if (cfg.pixelFormat != pixFmt || cfg.size != size) {
+ LOG(ISI, Debug) << "Stream " << i << " adjusted to "
+ << cfg.toString();
+ status = Adjusted;
+ }
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+ cfg.stride = info.stride(cfg.size.width, 0);
+ cfg.frameSize = info.frameSize(cfg.size, info.bitsPerPixel);
+
+ /* Assign streams in the order they are presented. */
+ auto stream = availableStreams.extract(availableStreams.begin());
+ cfg.setStream(stream.value());
+ }
+
+ return status;
+}
+
+/*
+ * Adjust stream configuration when the first requested stream is not RAW: all
+ * the streams will be either YUV or RGB processed formats.
+ */
+CameraConfiguration::Status
+ISICameraConfiguration::validateYuv(std::set<Stream *> &availableStreams,
+ const Size &maxResolution)
+{
+ CameraConfiguration::Status status = Valid;
+
+ StreamConfiguration &yuvConfig = config_[0];
+ PixelFormat yuvPixelFormat = yuvConfig.pixelFormat;
+
+ /*
+ * Make sure the sensor can produce a compatible YUV/RGB media bus
+ * format. If the sensor can only produce RAW Bayer we can only fail
+ * here as we can't adjust to anything but RAW.
+ */
+ unsigned int yuvMediaBusCode = data_->getYuvMediaBusFormat(yuvPixelFormat);
+ if (!yuvMediaBusCode) {
+ LOG(ISI, Error) << "Cannot adjust pixelformat "
+ << yuvConfig.pixelFormat;
+ return Invalid;
+ }
+
+ /* Adjust all the other streams. */
+ for (const auto &[i, cfg] : utils::enumerate(config_)) {
+
+ LOG(ISI, Debug) << "Stream " << i << ": " << cfg.toString();
+
+ /* If the stream is RAW or not supported default it to YUYV. */
+ const PixelFormatInfo &cfgInfo = PixelFormatInfo::info(cfg.pixelFormat);
+ if (cfgInfo.colourEncoding == PixelFormatInfo::ColourEncodingRAW ||
+ !formatsMap_.count(cfg.pixelFormat)) {
+
+ LOG(ISI, Debug) << "Stream " << i << " format: "
+ << cfg.pixelFormat << " adjusted to YUYV";
+
+ cfg.pixelFormat = formats::YUYV;
+ status = Adjusted;
+ }
+
+ /* Cap the streams size to the maximum accepted resolution. */
+ Size configSize = cfg.size;
+ cfg.size.boundTo(maxResolution);
+ if (cfg.size != configSize) {
+ LOG(ISI, Debug)
+ << "Stream " << i << " adjusted to " << cfg.size;
+ status = Adjusted;
+ }
+
+ /* Re-fetch the pixel format info in case it has been adjusted. */
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+
+ /* \todo Multiplane ? */
+ cfg.stride = info.stride(cfg.size.width, 0);
+ cfg.frameSize = info.frameSize(cfg.size, info.bitsPerPixel);
+
+ /* Assign streams in the order they are presented. */
+ auto stream = availableStreams.extract(availableStreams.begin());
+ cfg.setStream(stream.value());
+ }
+
+ return status;
+}
+
+CameraConfiguration::Status ISICameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ std::set<Stream *> availableStreams;
+ std::transform(data_->streams_.begin(), data_->streams_.end(),
+ std::inserter(availableStreams, availableStreams.end()),
+ [](const Stream &s) { return const_cast<Stream *>(&s); });
+
+ if (config_.empty())
+ return Invalid;
+
+ /* Cap the number of streams to the number of available ISI pipes. */
+ if (config_.size() > availableStreams.size()) {
+ config_.resize(availableStreams.size());
+ status = Adjusted;
+ }
+
+ /*
+ * If more than a single stream is requested, the maximum allowed input
+ * image width is 2048. Cap the maximum image size accordingly.
+ *
+ * \todo The (size > 1) check only applies to i.MX8MP which has 2 ISI
+ * channels. SoCs with more channels than the i.MX8MP are capable of
+ * supporting more streams with input width > 2048 by chaining
+ * successive channels together. Define a policy for channels allocation
+ * to fully support other SoCs.
+ */
+ CameraSensor *sensor = data_->sensor_.get();
+ Size maxResolution = sensor->resolution();
+ if (config_.size() > 1)
+ maxResolution.width = std::min(2048U, maxResolution.width);
+
+ /* Validate streams according to the format of the first one. */
+ const PixelFormatInfo info = PixelFormatInfo::info(config_[0].pixelFormat);
+
+ Status validationStatus;
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ validationStatus = validateRaw(availableStreams, maxResolution);
+ else
+ validationStatus = validateYuv(availableStreams, maxResolution);
+
+ if (validationStatus == Invalid)
+ return Invalid;
+
+ if (validationStatus == Adjusted)
+ status = Adjusted;
+
+ /*
+ * Sensor format selection policy: the first stream selects the media
+ * bus code to use, the largest stream selects the size.
+ *
+ * \todo The sensor format selection policy could be changed to
+ * prefer operating the sensor at full resolution to prioritize
+ * image quality in exchange of a usually slower frame rate.
+ * Usage of the STILL_CAPTURE role could be consider for this.
+ */
+ Size maxSize;
+ for (const auto &cfg : config_) {
+ if (cfg.size > maxSize)
+ maxSize = cfg.size;
+ }
+
+ PixelFormat pixelFormat = config_[0].pixelFormat;
+
+ V4L2SubdeviceFormat sensorFormat{};
+ sensorFormat.code = data_->getMediaBusFormat(&pixelFormat);
+ sensorFormat.size = maxSize;
+
+ LOG(ISI, Debug) << "Computed sensor configuration: " << sensorFormat;
+
+ /*
+ * We can't use CameraSensor::getFormat() as it might return a
+ * format larger than our strict width limit, as that function
+ * prioritizes formats with the same aspect ratio over formats with less
+ * difference in size.
+ *
+ * Manually walk all the sensor supported sizes searching for
+ * the smallest larger format without considering the aspect ratio
+ * as the ISI can freely scale.
+ */
+ auto sizes = sensor->sizes(sensorFormat.code);
+ Size bestSize;
+
+ for (const Size &s : sizes) {
+ /* Ignore smaller sizes. */
+ if (s.width < sensorFormat.size.width ||
+ s.height < sensorFormat.size.height)
+ continue;
+
+ /* Make sure the width stays in the limits. */
+ if (s.width > maxResolution.width)
+ continue;
+
+ bestSize = s;
+ break;
+ }
+
+ /*
+ * This should happen only if the sensor can only produce formats that
+ * exceed the maximum allowed input width.
+ */
+ if (bestSize.isNull()) {
+ LOG(ISI, Error) << "Unable to find a suitable sensor format";
+ return Invalid;
+ }
+
+ sensorFormat_.code = sensorFormat.code;
+ sensorFormat_.size = bestSize;
+
+ LOG(ISI, Debug) << "Selected sensor format: " << sensorFormat_;
+
+ return status;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Handler
+ */
+
+PipelineHandlerISI::PipelineHandlerISI(CameraManager *manager)
+ : PipelineHandler(manager)
+{
+}
+
+/*
+ * Generate a StreamConfiguration for YUV/RGB use case.
+ *
+ * Verify it the sensor can produce a YUV/RGB media bus format and collect
+ * all the processed pixel formats the ISI can generate as supported stream
+ * configurations.
+ */
+StreamConfiguration PipelineHandlerISI::generateYUVConfiguration(Camera *camera,
+ const Size &size)
+{
+ ISICameraData *data = cameraData(camera);
+ PixelFormat pixelFormat = formats::YUYV;
+ unsigned int mbusCode;
+
+ mbusCode = data->getYuvMediaBusFormat(pixelFormat);
+ if (!mbusCode)
+ return {};
+
+ /* Adjust the requested size to the sensor's capabilities. */
+ V4L2SubdeviceFormat sensorFmt;
+ sensorFmt.code = mbusCode;
+ sensorFmt.size = size;
+
+ int ret = data->sensor_->tryFormat(&sensorFmt);
+ if (ret) {
+ LOG(ISI, Error) << "Failed to try sensor format.";
+ return {};
+ }
+
+ Size sensorSize = sensorFmt.size;
+
+ /*
+ * Populate the StreamConfiguration.
+ *
+ * As the sensor supports at least one YUV/RGB media bus format all the
+ * processed ones in formatsMap_ can be generated from it.
+ */
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+
+ for (const auto &[pixFmt, pipeFmt] : ISICameraConfiguration::formatsMap_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ continue;
+
+ streamFormats[pixFmt] = { { kMinISISize, sensorSize } };
+ }
+
+ StreamFormats formats(streamFormats);
+
+ StreamConfiguration cfg(formats);
+ cfg.pixelFormat = pixelFormat;
+ cfg.size = sensorSize;
+ cfg.bufferCount = 4;
+
+ return cfg;
+}
+
+/*
+ * Generate a StreamConfiguration for Raw Bayer use case. Verify if the sensor
+ * can produce the requested RAW bayer format and eventually adjust it to
+ * the one with the largest bit-depth the sensor can produce.
+ */
+StreamConfiguration PipelineHandlerISI::generateRawConfiguration(Camera *camera)
+{
+ static const std::map<unsigned int, PixelFormat> rawFormats = {
+ { MEDIA_BUS_FMT_SBGGR8_1X8, formats::SBGGR8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, formats::SGBRG8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, formats::SGRBG8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, formats::SRGGB8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, formats::SBGGR10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, formats::SGBRG10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, formats::SGRBG10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, formats::SRGGB10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, formats::SBGGR12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, formats::SGBRG12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, formats::SGRBG12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, formats::SRGGB12 },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, formats::SBGGR14 },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, formats::SGBRG14 },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, formats::SGRBG14 },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, formats::SRGGB14 },
+ };
+
+ ISICameraData *data = cameraData(camera);
+ PixelFormat pixelFormat = formats::SBGGR10;
+ unsigned int mbusCode;
+
+ /* pixelFormat will be adjusted, if the sensor can produce RAW. */
+ mbusCode = data->getRawMediaBusFormat(&pixelFormat);
+ if (!mbusCode)
+ return {};
+
+ /*
+ * Populate the StreamConfiguration with all the supported Bayer
+ * formats the sensor can produce.
+ */
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+ const CameraSensor *sensor = data->sensor_.get();
+
+ for (unsigned int code : sensor->mbusCodes()) {
+ /* Find a Bayer media bus code from the sensor. */
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(code);
+ if (!bayerFormat.isValid())
+ continue;
+
+ auto it = rawFormats.find(code);
+ if (it == rawFormats.end()) {
+ LOG(ISI, Warning) << bayerFormat
+ << " not supported in ISI formats map.";
+ continue;
+ }
+
+ streamFormats[it->second] = { { sensor->resolution(), sensor->resolution() } };
+ }
+
+ StreamFormats formats(streamFormats);
+
+ StreamConfiguration cfg(formats);
+ cfg.size = sensor->resolution();
+ cfg.pixelFormat = pixelFormat;
+ cfg.bufferCount = 4;
+
+ return cfg;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerISI::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
+{
+ ISICameraData *data = cameraData(camera);
+ std::unique_ptr<ISICameraConfiguration> config =
+ std::make_unique<ISICameraConfiguration>(data);
+
+ if (roles.empty())
+ return config;
+
+ if (roles.size() > data->streams_.size()) {
+ LOG(ISI, Error) << "Only up to " << data->streams_.size()
+ << " streams are supported";
+ return nullptr;
+ }
+
+ for (const auto &role : roles) {
+ /*
+ * Prefer the following formats:
+ * - Still Capture: Full resolution YUYV
+ * - ViewFinder/VideoRecording: 1080p YUYV
+ * - RAW: Full resolution Bayer
+ */
+ StreamConfiguration cfg;
+
+ switch (role) {
+ case StreamRole::StillCapture:
+ case StreamRole::Viewfinder:
+ case StreamRole::VideoRecording: {
+ Size size = role == StreamRole::StillCapture
+ ? data->sensor_->resolution()
+ : PipelineHandlerISI::kPreviewSize;
+ cfg = generateYUVConfiguration(camera, size);
+ if (cfg.pixelFormat.isValid())
+ break;
+
+
+ /*
+ * Fallback to use a Bayer format if that's what the
+ * sensor supports.
+ */
+ [[fallthrough]];
+
+ }
+
+ case StreamRole::Raw: {
+ cfg = generateRawConfiguration(camera);
+ break;
+ }
+
+ default:
+ LOG(ISI, Error) << "Requested stream role not supported: " << role;
+ return nullptr;
+ }
+
+ if (!cfg.pixelFormat.isValid()) {
+ LOG(ISI, Error)
+ << "Cannot generate configuration for role: " << role;
+ return nullptr;
+ }
+
+ config->addConfiguration(cfg);
+ }
+
+ config->validate();
+
+ return config;
+}
+
+int PipelineHandlerISI::configure(Camera *camera, CameraConfiguration *c)
+{
+ ISICameraConfiguration *camConfig = static_cast<ISICameraConfiguration *>(c);
+ ISICameraData *data = cameraData(camera);
+
+ /* All links are immutable except the sensor -> csis link. */
+ const MediaPad *sensorSrc = data->sensor_->entity()->getPadByIndex(0);
+ sensorSrc->links()[0]->setEnabled(true);
+
+ /*
+ * Reset the crossbar switch routing and enable one route for each
+ * requested stream configuration.
+ *
+ * \todo Handle concurrent usage of multiple cameras by adjusting the
+ * routing table instead of resetting it.
+ */
+ V4L2Subdevice::Routing routing = {};
+ unsigned int xbarFirstSource = crossbar_->entity()->pads().size() / 2 + 1;
+
+ for (const auto &[idx, config] : utils::enumerate(*c)) {
+ uint32_t sourcePad = xbarFirstSource + idx;
+ routing.emplace_back(V4L2Subdevice::Stream{ data->xbarSink_, 0 },
+ V4L2Subdevice::Stream{ sourcePad, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+ }
+
+ int ret = crossbar_->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ /* Apply format to the sensor and CSIS receiver. */
+ V4L2SubdeviceFormat format = camConfig->sensorFormat_;
+ ret = data->sensor_->setFormat(&format);
+ if (ret)
+ return ret;
+
+ ret = data->csis_->setFormat(0, &format);
+ if (ret)
+ return ret;
+
+ ret = crossbar_->setFormat(data->xbarSink_, &format);
+ if (ret)
+ return ret;
+
+ /* Now configure the ISI and video node instances, one per stream. */
+ data->enabledStreams_.clear();
+ for (const auto &config : *c) {
+ Pipe *pipe = pipeFromStream(camera, config.stream());
+
+ /*
+ * Set the format on the ISI sink pad: it must match what is
+ * received by the CSIS.
+ */
+ ret = pipe->isi->setFormat(0, &format);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the ISI sink compose rectangle to downscale the
+ * image.
+ *
+ * \todo Additional cropping could be applied on the ISI source
+ * pad to further reduce the output image size.
+ */
+ Rectangle isiScale(config.size);
+ ret = pipe->isi->setSelection(0, V4L2_SEL_TGT_COMPOSE, &isiScale);
+ if (ret)
+ return ret;
+
+ /*
+ * Set the format on ISI source pad: only the media bus code
+ * is relevant as it configures format conversion, while the
+ * size is taken from the sink's COMPOSE (or source's CROP,
+ * if any) rectangles.
+ */
+ unsigned int isiCode = ISICameraConfiguration::formatsMap_.at(config.pixelFormat);
+
+ V4L2SubdeviceFormat isiFormat{};
+ isiFormat.code = isiCode;
+ isiFormat.size = config.size;
+
+ ret = pipe->isi->setFormat(1, &isiFormat);
+ if (ret)
+ return ret;
+
+ V4L2DeviceFormat captureFmt{};
+ captureFmt.fourcc = pipe->capture->toV4L2PixelFormat(config.pixelFormat);
+ captureFmt.size = config.size;
+
+ /* \todo Set stride and format. */
+ ret = pipe->capture->setFormat(&captureFmt);
+ if (ret)
+ return ret;
+
+ /* Store the list of enabled streams for later use. */
+ data->enabledStreams_.push_back(config.stream());
+ }
+
+ return 0;
+}
+
+int PipelineHandlerISI::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ unsigned int count = stream->configuration().bufferCount;
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ return pipe->capture->exportBuffers(count, buffers);
+}
+
+int PipelineHandlerISI::start(Camera *camera,
+ [[maybe_unused]] const ControlList *controls)
+{
+ ISICameraData *data = cameraData(camera);
+
+ for (const auto &stream : data->enabledStreams_) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+ const StreamConfiguration &config = stream->configuration();
+
+ int ret = pipe->capture->importBuffers(config.bufferCount);
+ if (ret)
+ return ret;
+
+ ret = pipe->capture->streamOn();
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void PipelineHandlerISI::stopDevice(Camera *camera)
+{
+ ISICameraData *data = cameraData(camera);
+
+ for (const auto &stream : data->enabledStreams_) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ pipe->capture->streamOff();
+ pipe->capture->releaseBuffers();
+ }
+}
+
+int PipelineHandlerISI::queueRequestDevice(Camera *camera, Request *request)
+{
+ for (auto &[stream, buffer] : request->buffers()) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ int ret = pipe->capture->queueBuffer(buffer);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+bool PipelineHandlerISI::match(DeviceEnumerator *enumerator)
+{
+ DeviceMatch dm("mxc-isi");
+ dm.add("crossbar");
+ dm.add("mxc_isi.0");
+ dm.add("mxc_isi.0.capture");
+
+ isiDev_ = acquireMediaDevice(enumerator, dm);
+ if (!isiDev_)
+ return false;
+
+ /*
+ * Acquire the subdevs and video nodes for the crossbar switch and the
+ * processing pipelines.
+ */
+ crossbar_ = V4L2Subdevice::fromEntityName(isiDev_, "crossbar");
+ if (!crossbar_)
+ return false;
+
+ int ret = crossbar_->open();
+ if (ret)
+ return false;
+
+ for (unsigned int i = 0; ; ++i) {
+ std::string entityName = "mxc_isi." + std::to_string(i);
+ std::unique_ptr<V4L2Subdevice> isi =
+ V4L2Subdevice::fromEntityName(isiDev_, entityName);
+ if (!isi)
+ break;
+
+ ret = isi->open();
+ if (ret)
+ return false;
+
+ entityName += ".capture";
+ std::unique_ptr<V4L2VideoDevice> capture =
+ V4L2VideoDevice::fromEntityName(isiDev_, entityName);
+ if (!capture)
+ return false;
+
+ capture->bufferReady.connect(this, &PipelineHandlerISI::bufferReady);
+
+ ret = capture->open();
+ if (ret)
+ return ret;
+
+ pipes_.push_back({ std::move(isi), std::move(capture) });
+ }
+
+ if (pipes_.empty()) {
+ LOG(ISI, Error) << "Unable to enumerate pipes";
+ return false;
+ }
+
+ /*
+ * Loop over all the crossbar switch sink pads to find connected CSI-2
+ * receivers and camera sensors.
+ */
+ unsigned int numCameras = 0;
+ unsigned int numSinks = 0;
+ for (MediaPad *pad : crossbar_->entity()->pads()) {
+ unsigned int sink = numSinks;
+
+ if (!(pad->flags() & MEDIA_PAD_FL_SINK) || pad->links().empty())
+ continue;
+
+ /*
+ * Count each crossbar sink pad to correctly configure
+ * routing and format for this camera.
+ */
+ numSinks++;
+
+ MediaEntity *csi = pad->links()[0]->source()->entity();
+ if (csi->pads().size() != 2) {
+ LOG(ISI, Debug) << "Skip unsupported CSI-2 receiver "
+ << csi->name();
+ continue;
+ }
+
+ pad = csi->pads()[0];
+ if (!(pad->flags() & MEDIA_PAD_FL_SINK) || pad->links().empty())
+ continue;
+
+ MediaEntity *sensor = pad->links()[0]->source()->entity();
+ if (sensor->function() != MEDIA_ENT_F_CAM_SENSOR) {
+ LOG(ISI, Debug) << "Skip unsupported subdevice "
+ << sensor->name();
+ continue;
+ }
+
+ /* Create the camera data. */
+ std::unique_ptr<ISICameraData> data =
+ std::make_unique<ISICameraData>(this);
+
+ data->sensor_ = std::make_unique<CameraSensor>(sensor);
+ data->csis_ = std::make_unique<V4L2Subdevice>(csi);
+ data->xbarSink_ = sink;
+
+ ret = data->init();
+ if (ret) {
+ LOG(ISI, Error) << "Failed to initialize camera data";
+ return false;
+ }
+
+ /* Register the camera. */
+ const std::string &id = data->sensor_->id();
+ std::set<Stream *> streams;
+ std::transform(data->streams_.begin(), data->streams_.end(),
+ std::inserter(streams, streams.end()),
+ [](Stream &s) { return &s; });
+
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(data), id, streams);
+
+ registerCamera(std::move(camera));
+ numCameras++;
+ }
+
+ return numCameras > 0;
+}
+
+PipelineHandlerISI::Pipe *PipelineHandlerISI::pipeFromStream(Camera *camera,
+ const Stream *stream)
+{
+ ISICameraData *data = cameraData(camera);
+ unsigned int pipeIndex = data->pipeIndex(stream);
+
+ ASSERT(pipeIndex < pipes_.size());
+
+ return &pipes_[pipeIndex];
+}
+
+void PipelineHandlerISI::bufferReady(FrameBuffer *buffer)
+{
+ Request *request = buffer->request();
+
+ /* Record the sensor's timestamp in the request metadata. */
+ ControlList &metadata = request->metadata();
+ if (!metadata.contains(controls::SensorTimestamp.id()))
+ metadata.set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
+
+ completeBuffer(request, buffer);
+ if (request->hasPendingBuffers())
+ return;
+
+ completeRequest(request);
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerISI, "imx8-isi")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/imx8-isi/meson.build b/src/libcamera/pipeline/imx8-isi/meson.build
new file mode 100644
index 00000000..ffd0ce54
--- /dev/null
+++ b/src/libcamera/pipeline/imx8-isi/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_sources += files([
+ 'imx8-isi.cpp'
+])
diff --git a/src/libcamera/pipeline/ipu3/cio2.cpp b/src/libcamera/pipeline/ipu3/cio2.cpp
index 08e254f7..81a7a8ab 100644
--- a/src/libcamera/pipeline/ipu3/cio2.cpp
+++ b/src/libcamera/pipeline/ipu3/cio2.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * cio2.cpp - Intel IPU3 CIO2
+ * Intel IPU3 CIO2
*/
#include "cio2.h"
@@ -15,6 +15,7 @@
#include <libcamera/formats.h>
#include <libcamera/geometry.h>
#include <libcamera/stream.h>
+#include <libcamera/transform.h>
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/framebuffer.h"
@@ -177,10 +178,12 @@ int CIO2Device::init(const MediaDevice *media, unsigned int index)
/**
* \brief Configure the CIO2 unit
* \param[in] size The requested CIO2 output frame size
+ * \param[in] transform The transformation to be applied on the image sensor
* \param[out] outputFormat The CIO2 unit output image format
* \return 0 on success or a negative error code otherwise
*/
-int CIO2Device::configure(const Size &size, V4L2DeviceFormat *outputFormat)
+int CIO2Device::configure(const Size &size, const Transform &transform,
+ V4L2DeviceFormat *outputFormat)
{
V4L2SubdeviceFormat sensorFormat;
int ret;
@@ -191,7 +194,7 @@ int CIO2Device::configure(const Size &size, V4L2DeviceFormat *outputFormat)
*/
std::vector<unsigned int> mbusCodes = utils::map_keys(mbusCodesToPixelFormat);
sensorFormat = getSensorFormat(mbusCodes, size);
- ret = sensor_->setFormat(&sensorFormat);
+ ret = sensor_->setFormat(&sensorFormat, transform);
if (ret)
return ret;
@@ -199,11 +202,11 @@ int CIO2Device::configure(const Size &size, V4L2DeviceFormat *outputFormat)
if (ret)
return ret;
- const auto &itInfo = mbusCodesToPixelFormat.find(sensorFormat.mbus_code);
+ const auto &itInfo = mbusCodesToPixelFormat.find(sensorFormat.code);
if (itInfo == mbusCodesToPixelFormat.end())
return -EINVAL;
- outputFormat->fourcc = V4L2PixelFormat::fromPixelFormat(itInfo->second);
+ outputFormat->fourcc = output_->toV4L2PixelFormat(itInfo->second);
outputFormat->size = sensorFormat.size;
outputFormat->planesCount = 1;
@@ -227,13 +230,13 @@ StreamConfiguration CIO2Device::generateConfiguration(Size size) const
/* Query the sensor static information for closest match. */
std::vector<unsigned int> mbusCodes = utils::map_keys(mbusCodesToPixelFormat);
V4L2SubdeviceFormat sensorFormat = getSensorFormat(mbusCodes, size);
- if (!sensorFormat.mbus_code) {
+ if (!sensorFormat.code) {
LOG(IPU3, Error) << "Sensor does not support mbus code";
return {};
}
cfg.size = sensorFormat.size;
- cfg.pixelFormat = mbusCodesToPixelFormat.at(sensorFormat.mbus_code);
+ cfg.pixelFormat = mbusCodesToPixelFormat.at(sensorFormat.code);
cfg.bufferCount = kBufferCount;
return cfg;
@@ -323,7 +326,7 @@ V4L2SubdeviceFormat CIO2Device::getSensorFormat(const std::vector<unsigned int>
}
V4L2SubdeviceFormat format{};
- format.mbus_code = bestCode;
+ format.code = bestCode;
format.size = bestSize;
return format;
diff --git a/src/libcamera/pipeline/ipu3/cio2.h b/src/libcamera/pipeline/ipu3/cio2.h
index 68504a2d..963c2f6b 100644
--- a/src/libcamera/pipeline/ipu3/cio2.h
+++ b/src/libcamera/pipeline/ipu3/cio2.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * cio2.h - Intel IPU3 CIO2
+ * Intel IPU3 CIO2
*/
#pragma once
@@ -26,6 +26,7 @@ class Request;
class Size;
class SizeRange;
struct StreamConfiguration;
+enum class Transform;
class CIO2Device
{
@@ -38,7 +39,8 @@ public:
std::vector<SizeRange> sizes(const PixelFormat &format) const;
int init(const MediaDevice *media, unsigned int index);
- int configure(const Size &size, V4L2DeviceFormat *outputFormat);
+ int configure(const Size &size, const Transform &transform,
+ V4L2DeviceFormat *outputFormat);
StreamConfiguration generateConfiguration(Size size) const;
diff --git a/src/libcamera/pipeline/ipu3/frames.cpp b/src/libcamera/pipeline/ipu3/frames.cpp
index a4c3477c..88eb9d05 100644
--- a/src/libcamera/pipeline/ipu3/frames.cpp
+++ b/src/libcamera/pipeline/ipu3/frames.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * frames.cpp - Intel IPU3 Frames helper
+ * Intel IPU3 Frames helper
*/
#include "frames.h"
diff --git a/src/libcamera/pipeline/ipu3/frames.h b/src/libcamera/pipeline/ipu3/frames.h
index 6e3cb915..a347b66f 100644
--- a/src/libcamera/pipeline/ipu3/frames.h
+++ b/src/libcamera/pipeline/ipu3/frames.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * frames.h - Intel IPU3 Frames helper
+ * Intel IPU3 Frames helper
*/
#pragma once
diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp b/src/libcamera/pipeline/ipu3/imgu.cpp
index 59305f85..7be78091 100644
--- a/src/libcamera/pipeline/ipu3/imgu.cpp
+++ b/src/libcamera/pipeline/ipu3/imgu.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * imgu.cpp - Intel IPU3 ImgU
+ * Intel IPU3 ImgU
*/
#include "imgu.h"
@@ -504,7 +504,7 @@ int ImgUDevice::configure(const PipeConfig &pipeConfig, V4L2DeviceFormat *inputF
LOG(IPU3, Debug) << "ImgU BDS rectangle = " << bds;
V4L2SubdeviceFormat gdcFormat = {};
- gdcFormat.mbus_code = MEDIA_BUS_FMT_FIXED;
+ gdcFormat.code = MEDIA_BUS_FMT_FIXED;
gdcFormat.size = pipeConfig.gdc;
ret = imgu_->setFormat(PAD_INPUT, &gdcFormat);
@@ -543,7 +543,7 @@ int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
V4L2DeviceFormat *outputFormat)
{
V4L2SubdeviceFormat imguFormat = {};
- imguFormat.mbus_code = MEDIA_BUS_FMT_FIXED;
+ imguFormat.code = MEDIA_BUS_FMT_FIXED;
imguFormat.size = cfg.size;
int ret = imgu_->setFormat(pad, &imguFormat);
@@ -558,7 +558,7 @@ int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
return 0;
*outputFormat = {};
- outputFormat->fourcc = V4L2PixelFormat::fromPixelFormat(formats::NV12);
+ outputFormat->fourcc = dev->toV4L2PixelFormat(formats::NV12);
outputFormat->size = cfg.size;
outputFormat->planesCount = 2;
diff --git a/src/libcamera/pipeline/ipu3/imgu.h b/src/libcamera/pipeline/ipu3/imgu.h
index 0af4dd8a..fa508316 100644
--- a/src/libcamera/pipeline/ipu3/imgu.h
+++ b/src/libcamera/pipeline/ipu3/imgu.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * imgu.h - Intel IPU3 ImgU
+ * Intel IPU3 ImgU
*/
#pragma once
diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
index b7dda282..066fd4a2 100644
--- a/src/libcamera/pipeline/ipu3/ipu3.cpp
+++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipu3.cpp - Pipeline handler for Intel IPU3
+ * Pipeline handler for Intel IPU3
*/
#include <algorithm>
@@ -11,6 +11,8 @@
#include <queue>
#include <vector>
+#include <linux/intel-ipu3.h>
+
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
@@ -49,7 +51,7 @@ class IPU3CameraData : public Camera::Private
{
public:
IPU3CameraData(PipelineHandler *pipe)
- : Camera::Private(pipe), supportsFlips_(false)
+ : Camera::Private(pipe)
{
}
@@ -71,8 +73,6 @@ public:
Stream rawStream_;
Rectangle cropRegion_;
- bool supportsFlips_;
- Transform rotationTransform_;
std::unique_ptr<DelayedControls> delayedCtrls_;
IPU3Frames frameInfos_;
@@ -134,8 +134,8 @@ public:
PipelineHandlerIPU3(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -182,48 +182,15 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
if (config_.empty())
return Invalid;
- Transform combined = transform * data_->rotationTransform_;
-
- /*
- * We combine the platform and user transform, but must "adjust away"
- * any combined result that includes a transposition, as we can't do
- * those. In this case, flipping only the transpose bit is helpful to
- * applications - they either get the transform they requested, or have
- * to do a simple transpose themselves (they don't have to worry about
- * the other possible cases).
- */
- if (!!(combined & Transform::Transpose)) {
- /*
- * Flipping the transpose bit in "transform" flips it in the
- * combined result too (as it's the last thing that happens),
- * which is of course clearing it.
- */
- transform ^= Transform::Transpose;
- combined &= ~Transform::Transpose;
- status = Adjusted;
- }
-
/*
- * We also check if the sensor doesn't do h/vflips at all, in which
- * case we clear them, and the application will have to do everything.
+ * Validate the requested transform against the sensor capabilities and
+ * rotation and store the final combined transform that configure() will
+ * need to apply to the sensor to save us working it out again.
*/
- if (!data_->supportsFlips_ && !!combined) {
- /*
- * If the sensor can do no transforms, then combined must be
- * changed to the identity. The only user transform that gives
- * rise to this is the inverse of the rotation. (Recall that
- * combined = transform * rotationTransform.)
- */
- transform = -data_->rotationTransform_;
- combined = Transform::Identity;
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->cio2_.sensor()->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
status = Adjusted;
- }
-
- /*
- * Store the final combined transform that configure() will need to
- * apply to the sensor to save us working it out again.
- */
- combinedTransform_ = combined;
/* Cap the number of entries to the available streams. */
if (config_.size() > kMaxStreams) {
@@ -243,6 +210,7 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
*/
unsigned int rawCount = 0;
unsigned int yuvCount = 0;
+ Size rawRequirement;
Size maxYuvSize;
Size rawSize;
@@ -251,10 +219,11 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
rawCount++;
- rawSize.expandTo(cfg.size);
+ rawSize = std::max(rawSize, cfg.size);
} else {
yuvCount++;
- maxYuvSize.expandTo(cfg.size);
+ maxYuvSize = std::max(maxYuvSize, cfg.size);
+ rawRequirement.expandTo(cfg.size);
}
}
@@ -283,17 +252,17 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
* The output YUV streams will be limited in size to the maximum frame
* size requested for the RAW stream, if present.
*
- * If no raw stream is requested generate a size as large as the maximum
- * requested YUV size aligned to the ImgU constraints and bound by the
- * sensor's maximum resolution. See
+ * If no raw stream is requested, generate a size from the largest YUV
+ * stream, aligned to the ImgU constraints and bound
+ * by the sensor's maximum resolution. See
* https://bugs.libcamera.org/show_bug.cgi?id=32
*/
if (rawSize.isNull())
- rawSize = maxYuvSize.expandedTo({ ImgUDevice::kIFMaxCropWidth,
- ImgUDevice::kIFMaxCropHeight })
- .grownBy({ ImgUDevice::kOutputMarginWidth,
- ImgUDevice::kOutputMarginHeight })
- .boundedTo(data_->cio2_.sensor()->resolution());
+ rawSize = rawRequirement.expandedTo({ ImgUDevice::kIFMaxCropWidth,
+ ImgUDevice::kIFMaxCropHeight })
+ .grownBy({ ImgUDevice::kOutputMarginWidth,
+ ImgUDevice::kOutputMarginHeight })
+ .boundedTo(data_->cio2_.sensor()->resolution());
cio2Configuration_ = data_->cio2_.generateConfiguration(rawSize);
if (!cio2Configuration_.pixelFormat.isValid())
@@ -420,11 +389,12 @@ PipelineHandlerIPU3::PipelineHandlerIPU3(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerIPU3::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
{
IPU3CameraData *data = cameraData(camera);
- IPU3CameraConfiguration *config = new IPU3CameraConfiguration(data);
+ std::unique_ptr<IPU3CameraConfiguration> config =
+ std::make_unique<IPU3CameraConfiguration>(data);
if (roles.empty())
return config;
@@ -490,7 +460,6 @@ CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
default:
LOG(IPU3, Error)
<< "Requested stream role not supported: " << role;
- delete config;
return nullptr;
}
@@ -552,7 +521,7 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
return ret;
/*
- * \todo: Enable links selectively based on the requested streams.
+ * \todo Enable links selectively based on the requested streams.
* As of now, enable all links unconditionally.
* \todo Don't configure the ImgU at all if we only have a single
* stream which is for raw capture, in which case no buffers will
@@ -568,7 +537,7 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
*/
const Size &sensorSize = config->cio2Format().size;
V4L2DeviceFormat cio2Format;
- ret = cio2->configure(sensorSize, &cio2Format);
+ ret = cio2->configure(sensorSize, config->combinedTransform_, &cio2Format);
if (ret)
return ret;
@@ -577,24 +546,6 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
data->cropRegion_ = sensorInfo.analogCrop;
/*
- * Configure the H/V flip controls based on the combination of
- * the sensor and user transform.
- */
- if (data->supportsFlips_) {
- ControlList sensorCtrls(cio2->sensor()->controls());
- sensorCtrls.set(V4L2_CID_HFLIP,
- static_cast<int32_t>(!!(config->combinedTransform_
- & Transform::HFlip)));
- sensorCtrls.set(V4L2_CID_VFLIP,
- static_cast<int32_t>(!!(config->combinedTransform_
- & Transform::VFlip)));
-
- ret = cio2->sensor()->setControls(&sensorCtrls);
- if (ret)
- return ret;
- }
-
- /*
* If the ImgU gets configured, its driver seems to expect that
* buffers will be queued to its outputs, as otherwise the next
* capture session that uses the ImgU fails when queueing
@@ -1143,25 +1094,12 @@ int PipelineHandlerIPU3::registerCameras()
&IPU3CameraData::frameStart);
/* Convert the sensor rotation to a transformation */
- int32_t rotation = 0;
- if (data->properties_.contains(properties::Rotation))
- rotation = data->properties_.get(properties::Rotation);
- else
+ const auto &rotation = data->properties_.get(properties::Rotation);
+ if (!rotation)
LOG(IPU3, Warning) << "Rotation control not exposed by "
<< cio2->sensor()->id()
<< ". Assume rotation 0";
- bool success;
- data->rotationTransform_ = transformFromRotation(rotation, &success);
- if (!success)
- LOG(IPU3, Warning) << "Invalid rotation of " << rotation
- << " degrees: ignoring";
-
- ControlList ctrls = cio2->sensor()->getControls({ V4L2_CID_HFLIP });
- if (!ctrls.empty())
- /* We assume the sensor supports VFLIP too. */
- data->supportsFlips_ = true;
-
/**
* \todo Dynamically assign ImgU and output devices to each
* stream and camera; as of now, limit support to two cameras
@@ -1244,8 +1182,16 @@ int IPU3CameraData::loadIPA()
if (ret)
return ret;
- ret = ipa_->init(IPASettings{ "", sensor->model() }, sensorInfo,
- sensor->controls(), &ipaControls_);
+ /*
+ * The API tuning file is made from the sensor name. If the tuning file
+ * isn't found, fall back to the 'uncalibrated' file.
+ */
+ std::string ipaTuningFile = ipa_->configurationFile(sensor->model() + ".yaml");
+ if (ipaTuningFile.empty())
+ ipaTuningFile = ipa_->configurationFile("uncalibrated.yaml");
+
+ ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() },
+ sensorInfo, sensor->controls(), &ipaControls_);
if (ret) {
LOG(IPU3, Error) << "Failed to initialise the IPU3 IPA";
return ret;
@@ -1289,6 +1235,8 @@ void IPU3CameraData::paramsBufferReady(unsigned int id)
imgu_->viewfinder_->queueBuffer(outbuffer);
}
+ info->paramBuffer->_d()->metadata().planes()[0].bytesused =
+ sizeof(struct ipu3_uapi_params);
imgu_->param_->queueBuffer(info->paramBuffer);
imgu_->stat_->queueBuffer(info->statBuffer);
imgu_->input_->queueBuffer(info->rawBuffer);
@@ -1330,8 +1278,9 @@ void IPU3CameraData::imguOutputBufferReady(FrameBuffer *buffer)
request->metadata().set(controls::draft::PipelineDepth, 3);
/* \todo Actually apply the scaler crop region to the ImgU. */
- if (request->controls().contains(controls::ScalerCrop))
- cropRegion_ = request->controls().get(controls::ScalerCrop);
+ const auto &scalerCrop = request->controls().get(controls::ScalerCrop);
+ if (scalerCrop)
+ cropRegion_ = *scalerCrop;
request->metadata().set(controls::ScalerCrop, cropRegion_);
if (frameInfos_.tryComplete(info))
@@ -1424,7 +1373,7 @@ void IPU3CameraData::statBufferReady(FrameBuffer *buffer)
return;
}
- ipa_->processStatsBuffer(info->id, request->metadata().get(controls::SensorTimestamp),
+ ipa_->processStatsBuffer(info->id, request->metadata().get(controls::SensorTimestamp).value_or(0),
info->statBuffer->cookie(), info->effectiveSensorControls);
}
@@ -1455,14 +1404,12 @@ void IPU3CameraData::frameStart(uint32_t sequence)
Request *request = processingRequests_.front();
processingRequests_.pop();
- if (!request->controls().contains(controls::draft::TestPatternMode))
+ const auto &testPatternMode = request->controls().get(controls::draft::TestPatternMode);
+ if (!testPatternMode)
return;
- const int32_t testPatternMode = request->controls().get(
- controls::draft::TestPatternMode);
-
int ret = cio2_.sensor()->setTestPatternMode(
- static_cast<controls::draft::TestPatternModeEnum>(testPatternMode));
+ static_cast<controls::draft::TestPatternModeEnum>(*testPatternMode));
if (ret) {
LOG(IPU3, Error) << "Failed to set test pattern mode: "
<< ret;
@@ -1470,9 +1417,9 @@ void IPU3CameraData::frameStart(uint32_t sequence)
}
request->metadata().set(controls::draft::TestPatternMode,
- testPatternMode);
+ *testPatternMode);
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerIPU3)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerIPU3, "ipu3")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/mali-c55/mali-c55.cpp b/src/libcamera/pipeline/mali-c55/mali-c55.cpp
new file mode 100644
index 00000000..45c71c1d
--- /dev/null
+++ b/src/libcamera/pipeline/mali-c55/mali-c55.cpp
@@ -0,0 +1,1066 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Pipeline Handler for ARM's Mali-C55 ISP
+ */
+
+#include <algorithm>
+#include <array>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+
+#include <linux/media-bus-format.h>
+#include <linux/media.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace {
+
+bool isFormatRaw(const libcamera::PixelFormat &pixFmt)
+{
+ return libcamera::PixelFormatInfo::info(pixFmt).colourEncoding ==
+ libcamera::PixelFormatInfo::ColourEncodingRAW;
+}
+
+} /* namespace */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(MaliC55)
+
+const std::map<libcamera::PixelFormat, unsigned int> maliC55FmtToCode = {
+ /* \todo Support all formats supported by the driver in libcamera. */
+
+ { formats::RGB565, MEDIA_BUS_FMT_RGB121212_1X36 },
+ { formats::RGB888, MEDIA_BUS_FMT_RGB121212_1X36 },
+ { formats::YUYV, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::UYVY, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::R8, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::NV12, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::NV21, MEDIA_BUS_FMT_YUV10_1X30 },
+
+ /* RAW formats, FR pipe only. */
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SGBRG14, MEDIA_BUS_FMT_SGBRG14_1X14 },
+ { formats::SRGGB14, MEDIA_BUS_FMT_SRGGB14_1X14 },
+ { formats::SBGGR14, MEDIA_BUS_FMT_SBGGR14_1X14 },
+ { formats::SGRBG14, MEDIA_BUS_FMT_SGRBG14_1X14 },
+ { formats::SGBRG16, MEDIA_BUS_FMT_SGBRG16_1X16 },
+ { formats::SRGGB16, MEDIA_BUS_FMT_SRGGB16_1X16 },
+ { formats::SBGGR16, MEDIA_BUS_FMT_SBGGR16_1X16 },
+ { formats::SGRBG16, MEDIA_BUS_FMT_SGRBG16_1X16 },
+};
+
+constexpr Size kMaliC55MinSize = { 128, 128 };
+constexpr Size kMaliC55MaxSize = { 8192, 8192 };
+constexpr unsigned int kMaliC55ISPInternalFormat = MEDIA_BUS_FMT_RGB121212_1X36;
+
+class MaliC55CameraData : public Camera::Private
+{
+public:
+ MaliC55CameraData(PipelineHandler *pipe, MediaEntity *entity)
+ : Camera::Private(pipe), entity_(entity)
+ {
+ }
+
+ int init();
+
+ /* Deflect these functionalities to either TPG or CameraSensor. */
+ const std::vector<unsigned int> mbusCodes() const;
+ const std::vector<Size> sizes(unsigned int mbusCode) const;
+ const Size resolution() const;
+
+ PixelFormat bestRawFormat() const;
+
+ PixelFormat adjustRawFormat(const PixelFormat &pixFmt) const;
+ Size adjustRawSizes(const PixelFormat &pixFmt, const Size &rawSize) const;
+
+ std::unique_ptr<CameraSensor> sensor_;
+
+ MediaEntity *entity_;
+ std::unique_ptr<V4L2Subdevice> csi_;
+ std::unique_ptr<V4L2Subdevice> sd_;
+ Stream frStream_;
+ Stream dsStream_;
+
+private:
+ void initTPGData();
+
+ std::string id_;
+ std::vector<unsigned int> tpgCodes_;
+ std::vector<Size> tpgSizes_;
+ Size tpgResolution_;
+};
+
+int MaliC55CameraData::init()
+{
+ int ret;
+
+ sd_ = std::make_unique<V4L2Subdevice>(entity_);
+ ret = sd_->open();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to open sensor subdevice";
+ return ret;
+ }
+
+ /* If this camera is created from TPG, we return here. */
+ if (entity_->name() == "mali-c55 tpg") {
+ initTPGData();
+ return 0;
+ }
+
+ /*
+ * Register a CameraSensor if we connect to a sensor and create
+ * an entity for the connected CSI-2 receiver.
+ */
+ sensor_ = std::make_unique<CameraSensor>(entity_);
+ ret = sensor_->init();
+ if (ret)
+ return ret;
+
+ const MediaPad *sourcePad = entity_->getPadByIndex(0);
+ MediaEntity *csiEntity = sourcePad->links()[0]->sink()->entity();
+
+ csi_ = std::make_unique<V4L2Subdevice>(csiEntity);
+ if (csi_->open()) {
+ LOG(MaliC55, Error) << "Failed to open CSI-2 subdevice";
+ return false;
+ }
+
+ return 0;
+}
+
+void MaliC55CameraData::initTPGData()
+{
+ /* Replicate the CameraSensor implementation for TPG. */
+ V4L2Subdevice::Formats formats = sd_->formats(0);
+ if (formats.empty())
+ return;
+
+ tpgCodes_ = utils::map_keys(formats);
+ std::sort(tpgCodes_.begin(), tpgCodes_.end());
+
+ for (const auto &format : formats) {
+ const std::vector<SizeRange> &ranges = format.second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(tpgSizes_),
+ [](const SizeRange &range) { return range.max; });
+ }
+
+ tpgResolution_ = tpgSizes_.back();
+}
+
+const std::vector<unsigned int> MaliC55CameraData::mbusCodes() const
+{
+ if (sensor_)
+ return sensor_->mbusCodes();
+
+ return tpgCodes_;
+}
+
+const std::vector<Size> MaliC55CameraData::sizes(unsigned int mbusCode) const
+{
+ if (sensor_)
+ return sensor_->sizes(mbusCode);
+
+ V4L2Subdevice::Formats formats = sd_->formats(0);
+ if (formats.empty())
+ return {};
+
+ std::vector<Size> sizes;
+ const auto &format = formats.find(mbusCode);
+ if (format == formats.end())
+ return {};
+
+ const std::vector<SizeRange> &ranges = format->second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+
+ std::sort(sizes.begin(), sizes.end());
+
+ return sizes;
+}
+
+const Size MaliC55CameraData::resolution() const
+{
+ if (sensor_)
+ return sensor_->resolution();
+
+ return tpgResolution_;
+}
+
+PixelFormat MaliC55CameraData::bestRawFormat() const
+{
+ unsigned int bitDepth = 0;
+ PixelFormat rawFormat;
+
+ /*
+ * Iterate over all the supported PixelFormat and find the one
+ * supported by the camera with the largest bitdepth.
+ */
+ for (const auto &maliFormat : maliC55FmtToCode) {
+ PixelFormat pixFmt = maliFormat.first;
+ if (!isFormatRaw(pixFmt))
+ continue;
+
+ unsigned int rawCode = maliFormat.second;
+ const auto rawSizes = sizes(rawCode);
+ if (rawSizes.empty())
+ continue;
+
+ BayerFormat bayer = BayerFormat::fromMbusCode(rawCode);
+ if (bayer.bitDepth > bitDepth) {
+ bitDepth = bayer.bitDepth;
+ rawFormat = pixFmt;
+ }
+ }
+
+ return rawFormat;
+}
+
+/*
+ * Make sure the provided raw pixel format is supported and adjust it to
+ * one of the supported ones if it's not.
+ */
+PixelFormat MaliC55CameraData::adjustRawFormat(const PixelFormat &rawFmt) const
+{
+ /* Make sure the provided raw format is supported by the pipeline. */
+ auto it = maliC55FmtToCode.find(rawFmt);
+ if (it == maliC55FmtToCode.end())
+ return bestRawFormat();
+
+ /* Now make sure the RAW mbus code is supported by the image source. */
+ unsigned int rawCode = it->second;
+ const auto rawSizes = sizes(rawCode);
+ if (rawSizes.empty())
+ return bestRawFormat();
+
+ return rawFmt;
+}
+
+Size MaliC55CameraData::adjustRawSizes(const PixelFormat &rawFmt, const Size &rawSize) const
+{
+ /* Just make sure the format is supported. */
+ auto it = maliC55FmtToCode.find(rawFmt);
+ if (it == maliC55FmtToCode.end())
+ return {};
+
+ /* Check if the size is natively supported. */
+ unsigned int rawCode = it->second;
+ const auto rawSizes = sizes(rawCode);
+ auto sizeIt = std::find(rawSizes.begin(), rawSizes.end(), rawSize);
+ if (sizeIt != rawSizes.end())
+ return rawSize;
+
+ /* Or adjust it to the closest supported size. */
+ uint16_t distance = std::numeric_limits<uint16_t>::max();
+ Size bestSize;
+ for (const Size &size : rawSizes) {
+ uint16_t dist = std::abs(static_cast<int>(rawSize.width) -
+ static_cast<int>(size.width)) +
+ std::abs(static_cast<int>(rawSize.height) -
+ static_cast<int>(size.height));
+ if (dist < distance) {
+ dist = distance;
+ bestSize = size;
+ }
+ }
+
+ return bestSize;
+}
+
+class MaliC55CameraConfiguration : public CameraConfiguration
+{
+public:
+ MaliC55CameraConfiguration(MaliC55CameraData *data)
+ : CameraConfiguration(), data_(data)
+ {
+ }
+
+ Status validate() override;
+
+ V4L2SubdeviceFormat sensorFormat_;
+
+private:
+ static constexpr unsigned int kMaxStreams = 2;
+
+ const MaliC55CameraData *data_;
+};
+
+CameraConfiguration::Status MaliC55CameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ /* Only 2 streams available. */
+ if (config_.size() > kMaxStreams) {
+ config_.resize(kMaxStreams);
+ status = Adjusted;
+ }
+
+ bool frPipeAvailable = true;
+ StreamConfiguration *rawConfig = nullptr;
+ for (StreamConfiguration &config : config_) {
+ if (!isFormatRaw(config.pixelFormat))
+ continue;
+
+ if (rawConfig) {
+ LOG(MaliC55, Error)
+ << "Only a single RAW stream is supported";
+ return Invalid;
+ }
+
+ rawConfig = &config;
+ }
+
+ Size maxSize = kMaliC55MaxSize;
+ if (rawConfig) {
+ /*
+ * \todo Take into account the Bayer components ordering once
+ * we support rotations.
+ */
+ PixelFormat rawFormat =
+ data_->adjustRawFormat(rawConfig->pixelFormat);
+ if (rawFormat != rawConfig->pixelFormat) {
+ LOG(MaliC55, Debug)
+ << "RAW format adjusted to " << rawFormat;
+ rawConfig->pixelFormat = rawFormat;
+ status = Adjusted;
+ }
+
+ Size rawSize =
+ data_->adjustRawSizes(rawFormat, rawConfig->size);
+ if (rawSize != rawConfig->size) {
+ LOG(MaliC55, Debug)
+ << "RAW sizes adjusted to " << rawSize;
+ rawConfig->size = rawSize;
+ status = Adjusted;
+ }
+
+ maxSize = rawSize;
+
+ rawConfig->setStream(const_cast<Stream *>(&data_->frStream_));
+ frPipeAvailable = false;
+ }
+
+ /* Adjust processed streams. */
+ Size maxYuvSize;
+ for (StreamConfiguration &config : config_) {
+ if (isFormatRaw(config.pixelFormat))
+ continue;
+
+ /* Adjust format and size for processed streams. */
+ const auto it = maliC55FmtToCode.find(config.pixelFormat);
+ if (it == maliC55FmtToCode.end()) {
+ LOG(MaliC55, Debug)
+ << "Format adjusted to " << formats::RGB565;
+ config.pixelFormat = formats::RGB565;
+ status = Adjusted;
+ }
+
+ Size size = std::clamp(config.size, kMaliC55MinSize, maxSize);
+ if (size != config.size) {
+ LOG(MaliC55, Debug)
+ << "Size adjusted to " << size;
+ config.size = size;
+ status = Adjusted;
+ }
+
+ if (maxYuvSize < size)
+ maxYuvSize = size;
+
+ if (frPipeAvailable) {
+ config.setStream(const_cast<Stream *>(&data_->frStream_));
+ frPipeAvailable = false;
+ } else {
+ config.setStream(const_cast<Stream *>(&data_->dsStream_));
+ }
+ }
+
+ /* Compute the sensor format. */
+
+ /* If there's a RAW config, sensor configuration follows it. */
+ if (rawConfig) {
+ const auto it = maliC55FmtToCode.find(rawConfig->pixelFormat);
+ sensorFormat_.code = it->second;
+ sensorFormat_.size = rawConfig->size;
+
+ return status;
+ }
+
+ /* If there's no RAW config, compute the sensor configuration here. */
+ PixelFormat rawFormat = data_->bestRawFormat();
+ const auto it = maliC55FmtToCode.find(rawFormat);
+ sensorFormat_.code = it->second;
+
+ uint16_t distance = std::numeric_limits<uint16_t>::max();
+ const auto sizes = data_->sizes(it->second);
+ Size bestSize;
+ for (const auto &size : sizes) {
+ /* Skip sensor sizes that are smaller than the max YUV size. */
+ if (maxYuvSize.width > size.width ||
+ maxYuvSize.height > size.height)
+ continue;
+
+ uint16_t dist = std::abs(static_cast<int>(maxYuvSize.width) -
+ static_cast<int>(size.width)) +
+ std::abs(static_cast<int>(maxYuvSize.height) -
+ static_cast<int>(size.height));
+ if (dist < distance) {
+ dist = distance;
+ bestSize = size;
+ }
+ }
+ sensorFormat_.size = bestSize;
+
+ LOG(MaliC55, Debug) << "Computed sensor configuration " << sensorFormat_;
+
+ return status;
+}
+
+class PipelineHandlerMaliC55 : public PipelineHandler
+{
+public:
+ PipelineHandlerMaliC55(CameraManager *manager);
+
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+ void bufferReady(FrameBuffer *buffer);
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ struct MaliC55Pipe {
+ std::unique_ptr<V4L2Subdevice> resizer;
+ std::unique_ptr<V4L2VideoDevice> cap;
+ Stream *stream;
+ };
+
+ enum {
+ MaliC55FR,
+ MaliC55DS,
+ MaliC55NumPipes,
+ };
+
+ MaliC55CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<MaliC55CameraData *>(camera->_d());
+ }
+
+ MaliC55Pipe *pipeFromStream(MaliC55CameraData *data, Stream *stream)
+ {
+ if (stream == &data->frStream_)
+ return &pipes_[MaliC55FR];
+ else if (stream == &data->dsStream_)
+ return &pipes_[MaliC55DS];
+ else
+ LOG(MaliC55, Fatal) << "Stream " << stream << " not valid";
+ return nullptr;
+ }
+
+ MaliC55Pipe *pipeFromStream(MaliC55CameraData *data, const Stream *stream)
+ {
+ return pipeFromStream(data, const_cast<Stream *>(stream));
+ }
+
+ void resetPipes()
+ {
+ for (MaliC55Pipe &pipe : pipes_)
+ pipe.stream = nullptr;
+ }
+
+ int configureRawStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat);
+ int configureProcessedStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat);
+
+ void registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
+ const std::string &name);
+ bool registerTPGCamera(MediaLink *link);
+ bool registerSensorCamera(MediaLink *link);
+
+ MediaDevice *media_;
+ std::unique_ptr<V4L2Subdevice> isp_;
+
+ std::array<MaliC55Pipe, MaliC55NumPipes> pipes_;
+
+ bool dsFitted_;
+};
+
+PipelineHandlerMaliC55::PipelineHandlerMaliC55(CameraManager *manager)
+ : PipelineHandler(manager), dsFitted_(true)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerMaliC55::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
+{
+ MaliC55CameraData *data = cameraData(camera);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<MaliC55CameraConfiguration>(data);
+ bool frPipeAvailable = true;
+
+ if (roles.empty())
+ return config;
+
+ /* Check if one stream is RAW to reserve the FR pipe for it. */
+ if (std::find(roles.begin(), roles.end(), StreamRole::Raw) != roles.end())
+ frPipeAvailable = false;
+
+ for (const StreamRole &role : roles) {
+ struct MaliC55Pipe *pipe;
+
+ /* Assign pipe for this role. */
+ if (role == StreamRole::Raw) {
+ pipe = &pipes_[MaliC55FR];
+ } else {
+ if (frPipeAvailable) {
+ pipe = &pipes_[MaliC55FR];
+ frPipeAvailable = false;
+ } else {
+ pipe = &pipes_[MaliC55DS];
+ }
+ }
+
+ Size size = std::min(Size{ 1920, 1080 }, data->resolution());
+ PixelFormat pixelFormat;
+
+ switch (role) {
+ case StreamRole::StillCapture:
+ size = data->resolution();
+ [[fallthrough]];
+ case StreamRole::VideoRecording:
+ pixelFormat = formats::NV12;
+ break;
+
+ case StreamRole::Viewfinder:
+ pixelFormat = formats::RGB565;
+ break;
+
+ case StreamRole::Raw:
+ pixelFormat = data->bestRawFormat();
+ if (!pixelFormat.isValid()) {
+ LOG(MaliC55, Error)
+ << "Camera does not support RAW formats";
+ return nullptr;
+ }
+
+ size = data->resolution();
+ break;
+
+ default:
+ LOG(MaliC55, Error)
+ << "Requested stream role not supported: " << role;
+ return nullptr;
+ }
+
+ std::map<PixelFormat, std::vector<SizeRange>> formats;
+ for (const auto &maliFormat : maliC55FmtToCode) {
+ PixelFormat pixFmt = maliFormat.first;
+ bool isRaw = isFormatRaw(pixFmt);
+
+ /* RAW formats are only supported on the FR pipe. */
+ if (pipe != &pipes_[MaliC55FR] && isRaw)
+ continue;
+
+ if (isRaw) {
+ /* Make sure the mbus code is supported. */
+ unsigned int rawCode = maliFormat.second;
+ const auto sizes = data->sizes(rawCode);
+ if (sizes.empty())
+ continue;
+
+ /* And list all sizes the sensor can produce. */
+ std::vector<SizeRange> sizeRanges;
+ std::transform(sizes.begin(), sizes.end(),
+ std::back_inserter(sizeRanges),
+ [](const Size &s) {
+ return SizeRange(s);
+ });
+
+ formats[pixFmt] = sizeRanges;
+ } else {
+ /* Processed formats are always available. */
+ Size maxSize = std::min(kMaliC55MaxSize,
+ data->resolution());
+ formats[pixFmt] = { kMaliC55MinSize, maxSize };
+ }
+ }
+
+ StreamFormats streamFormats(formats);
+ StreamConfiguration cfg(streamFormats);
+ cfg.pixelFormat = pixelFormat;
+ cfg.bufferCount = 4;
+ cfg.size = size;
+
+ config->addConfiguration(cfg);
+ }
+
+ if (config->validate() == CameraConfiguration::Invalid)
+ return nullptr;
+
+ return config;
+}
+
+int PipelineHandlerMaliC55::configureRawStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat)
+{
+ Stream *stream = config.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ if (pipe != &pipes_[MaliC55FR]) {
+ LOG(MaliC55, Fatal) << "Only the FR pipe supports RAW capture.";
+ return -EINVAL;
+ }
+
+ /* Enable the debayer route to set fixed internal format on pad #0. */
+ V4L2Subdevice::Routing routing = {};
+ routing.emplace_back(V4L2Subdevice::Stream{ 0, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+
+ int ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ unsigned int rawCode = subdevFormat.code;
+ subdevFormat.code = kMaliC55ISPInternalFormat;
+ ret = pipe->resizer->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ /* Enable the bypass route and apply RAW formats there. */
+ routing.clear();
+ routing.emplace_back(V4L2Subdevice::Stream{ 2, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+ ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ subdevFormat.code = rawCode;
+ ret = pipe->resizer->setFormat(2, &subdevFormat);
+ if (ret)
+ return ret;
+
+ ret = pipe->resizer->setFormat(1, &subdevFormat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int PipelineHandlerMaliC55::configureProcessedStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat)
+{
+ Stream *stream = config.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ /* Enable the debayer route on the resizer pipe. */
+ V4L2Subdevice::Routing routing = {};
+ routing.emplace_back(V4L2Subdevice::Stream{ 0, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+
+ int ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ subdevFormat.code = kMaliC55ISPInternalFormat;
+ ret = pipe->resizer->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ /* \todo Configure the resizer crop/compose rectangles. */
+ Rectangle ispCrop = { 0, 0, config.size };
+ ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_CROP, &ispCrop);
+ if (ret)
+ return ret;
+
+ ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_COMPOSE, &ispCrop);
+ if (ret)
+ return ret;
+
+ subdevFormat.code = maliC55FmtToCode.find(config.pixelFormat)->second;
+ return pipe->resizer->setFormat(1, &subdevFormat);
+}
+
+int PipelineHandlerMaliC55::configure(Camera *camera,
+ CameraConfiguration *config)
+{
+ resetPipes();
+
+ int ret = media_->disableLinks();
+ if (ret)
+ return ret;
+
+ /* Link the graph depending if we are operating the TPG or a sensor. */
+ MaliC55CameraData *data = cameraData(camera);
+ if (data->csi_) {
+ const MediaEntity *csiEntity = data->csi_->entity();
+ ret = csiEntity->getPadByIndex(1)->links()[0]->setEnabled(true);
+ } else {
+ ret = data->entity_->getPadByIndex(0)->links()[0]->setEnabled(true);
+ }
+ if (ret)
+ return ret;
+
+ MaliC55CameraConfiguration *maliConfig =
+ static_cast<MaliC55CameraConfiguration *>(config);
+ V4L2SubdeviceFormat subdevFormat = maliConfig->sensorFormat_;
+ ret = data->sd_->getFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ if (data->csi_) {
+ ret = data->csi_->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ ret = data->csi_->setFormat(1, &subdevFormat);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Propagate the format to the ISP sink pad and configure the input
+ * crop rectangle (no crop at the moment).
+ *
+ * \todo Configure the CSI-2 receiver.
+ */
+ ret = isp_->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ Rectangle ispCrop(0, 0, subdevFormat.size);
+ ret = isp_->setSelection(0, V4L2_SEL_TGT_CROP, &ispCrop);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the resizer: fixed format the sink pad; use the media
+ * bus code associated with the desired capture format on the source
+ * pad.
+ *
+ * Configure the crop and compose rectangles to match the desired
+ * stream output size
+ *
+ * \todo Make the crop/scaler configurable
+ */
+ for (const StreamConfiguration &streamConfig : *config) {
+ Stream *stream = streamConfig.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ if (isFormatRaw(streamConfig.pixelFormat))
+ ret = configureRawStream(data, streamConfig, subdevFormat);
+ else
+ ret = configureProcessedStream(data, streamConfig, subdevFormat);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to configure pipeline";
+ return ret;
+ }
+
+ /* Now apply the pixel format and size to the capture device. */
+ V4L2DeviceFormat captureFormat;
+ captureFormat.fourcc = pipe->cap->toV4L2PixelFormat(streamConfig.pixelFormat);
+ captureFormat.size = streamConfig.size;
+
+ ret = pipe->cap->setFormat(&captureFormat);
+ if (ret)
+ return ret;
+
+ pipe->stream = stream;
+ }
+
+ return 0;
+}
+
+int PipelineHandlerMaliC55::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ MaliC55Pipe *pipe = pipeFromStream(cameraData(camera), stream);
+ unsigned int count = stream->configuration().bufferCount;
+
+ return pipe->cap->exportBuffers(count, buffers);
+}
+
+int PipelineHandlerMaliC55::start([[maybe_unused]] Camera *camera, [[maybe_unused]] const ControlList *controls)
+{
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ Stream *stream = pipe.stream;
+
+ int ret = pipe.cap->importBuffers(stream->configuration().bufferCount);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to import buffers";
+ return ret;
+ }
+
+ ret = pipe.cap->streamOn();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to start stream";
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void PipelineHandlerMaliC55::stopDevice([[maybe_unused]] Camera *camera)
+{
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ pipe.cap->streamOff();
+ pipe.cap->releaseBuffers();
+ }
+}
+
+int PipelineHandlerMaliC55::queueRequestDevice(Camera *camera, Request *request)
+{
+ int ret;
+
+ for (auto &[stream, buffer] : request->buffers()) {
+ MaliC55Pipe *pipe = pipeFromStream(cameraData(camera), stream);
+
+ ret = pipe->cap->queueBuffer(buffer);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void PipelineHandlerMaliC55::bufferReady(FrameBuffer *buffer)
+{
+ Request *request = buffer->request();
+
+ completeBuffer(request, buffer);
+
+ if (request->hasPendingBuffers())
+ return;
+
+ completeRequest(request);
+}
+
+void PipelineHandlerMaliC55::registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
+ const std::string &name)
+{
+ std::set<Stream *> streams{ &data->frStream_ };
+ if (dsFitted_)
+ streams.insert(&data->dsStream_);
+
+ std::shared_ptr<Camera> camera = Camera::create(std::move(data),
+ name, streams);
+ registerCamera(std::move(camera));
+}
+
+/*
+ * The only camera we support through direct connection to the ISP is the
+ * Mali-C55 TPG. Check we have that and warn if not.
+ */
+bool PipelineHandlerMaliC55::registerTPGCamera(MediaLink *link)
+{
+ const std::string &name = link->source()->entity()->name();
+ if (name != "mali-c55 tpg") {
+ LOG(MaliC55, Warning) << "Unsupported direct connection to "
+ << link->source()->entity()->name();
+ /*
+ * Return true and just skip registering a camera for this
+ * entity.
+ */
+ return true;
+ }
+
+ std::unique_ptr<MaliC55CameraData> data =
+ std::make_unique<MaliC55CameraData>(this, link->source()->entity());
+
+ if (data->init())
+ return false;
+
+ registerMaliCamera(std::move(data), name);
+
+ return true;
+}
+
+/*
+ * Register a Camera for each sensor connected to the ISP through a CSI-2
+ * receiver.
+ *
+ * \todo Support more complex topologies, such as video muxes.
+ */
+bool PipelineHandlerMaliC55::registerSensorCamera(MediaLink *ispLink)
+{
+ MediaEntity *csi2 = ispLink->source()->entity();
+ const MediaPad *csi2Sink = csi2->getPadByIndex(0);
+
+ for (MediaLink *link : csi2Sink->links()) {
+ MediaEntity *sensor = link->source()->entity();
+ unsigned int function = sensor->function();
+
+ if (function != MEDIA_ENT_F_CAM_SENSOR)
+ continue;
+
+ std::unique_ptr<MaliC55CameraData> data =
+ std::make_unique<MaliC55CameraData>(this, sensor);
+ if (data->init())
+ return false;
+
+ /* \todo: Init properties and controls. */
+
+ registerMaliCamera(std::move(data), sensor->name());
+ }
+
+ return true;
+}
+
+bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator)
+{
+ const MediaPad *ispSink;
+
+ /*
+ * We search for just the ISP subdevice and the full resolution pipe.
+ * The TPG and the downscale pipe are both optional blocks and may not
+ * be fitted.
+ */
+ DeviceMatch dm("mali-c55");
+ dm.add("mali-c55 isp");
+ dm.add("mali-c55 resizer fr");
+ dm.add("mali-c55 fr");
+
+ media_ = acquireMediaDevice(enumerator, dm);
+ if (!media_)
+ return false;
+
+ isp_ = V4L2Subdevice::fromEntityName(media_, "mali-c55 isp");
+ if (isp_->open() < 0)
+ return false;
+
+ MaliC55Pipe *frPipe = &pipes_[MaliC55FR];
+ frPipe->resizer = V4L2Subdevice::fromEntityName(media_, "mali-c55 resizer fr");
+ if (frPipe->resizer->open() < 0)
+ return false;
+
+ frPipe->cap = V4L2VideoDevice::fromEntityName(media_, "mali-c55 fr");
+ if (frPipe->cap->open() < 0)
+ return false;
+
+ frPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::bufferReady);
+
+ dsFitted_ = !!media_->getEntityByName("mali-c55 ds");
+ if (dsFitted_) {
+ LOG(MaliC55, Debug) << "Downscaler pipe is fitted";
+
+ MaliC55Pipe *dsPipe = &pipes_[MaliC55DS];
+
+ dsPipe->resizer = V4L2Subdevice::fromEntityName(media_, "mali-c55 resizer ds");
+ if (dsPipe->resizer->open() < 0)
+ return false;
+
+ dsPipe->cap = V4L2VideoDevice::fromEntityName(media_, "mali-c55 ds");
+ if (dsPipe->cap->open() < 0)
+ return false;
+
+ dsPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::bufferReady);
+ }
+
+ ispSink = isp_->entity()->getPadByIndex(0);
+ if (!ispSink || ispSink->links().empty()) {
+ LOG(MaliC55, Error) << "ISP sink pad error";
+ return false;
+ }
+
+ /*
+ * We could have several links pointing to the ISP's sink pad, which
+ * will be from entities with one of the following functions:
+ *
+ * MEDIA_ENT_F_CAM_SENSOR - The test pattern generator
+ * MEDIA_ENT_F_VID_IF_BRIDGE - A CSI-2 receiver
+ * MEDIA_ENT_F_IO_V4L - An input device
+ *
+ * The last one will be unsupported for now. The TPG is relatively easy,
+ * we just register a Camera for it. If we have a CSI-2 receiver we need
+ * to check its sink pad and register Cameras for anything connected to
+ * it (probably...there are some complex situations in which that might
+ * not be true but let's pretend they don't exist until we come across
+ * them)
+ */
+ bool registered;
+ for (MediaLink *link : ispSink->links()) {
+ unsigned int function = link->source()->entity()->function();
+
+ switch (function) {
+ case MEDIA_ENT_F_CAM_SENSOR:
+ registered = registerTPGCamera(link);
+ if (!registered)
+ return registered;
+
+ break;
+ case MEDIA_ENT_F_VID_IF_BRIDGE:
+ registered = registerSensorCamera(link);
+ if (!registered)
+ return registered;
+
+ break;
+ case MEDIA_ENT_F_IO_V4L:
+ LOG(MaliC55, Warning) << "Memory input not yet supported";
+ break;
+ default:
+ LOG(MaliC55, Error) << "Unsupported entity function";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerMaliC55, "mali-c55")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/mali-c55/meson.build b/src/libcamera/pipeline/mali-c55/meson.build
new file mode 100644
index 00000000..30fd29b9
--- /dev/null
+++ b/src/libcamera/pipeline/mali-c55/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_sources += files([
+ 'mali-c55.cpp'
+])
diff --git a/src/libcamera/pipeline/meson.build b/src/libcamera/pipeline/meson.build
index 30dc5b97..8a61991c 100644
--- a/src/libcamera/pipeline/meson.build
+++ b/src/libcamera/pipeline/meson.build
@@ -1,5 +1,20 @@
# SPDX-License-Identifier: CC0-1.0
+# Location of pipeline specific configuration files
+pipeline_data_dir = libcamera_datadir / 'pipeline'
+
+# Allow multi-level directory structuring for the pipeline handlers if needed.
+subdirs = []
+
foreach pipeline : pipelines
+ pipeline = pipeline.split('/')[0]
+ if pipeline in subdirs
+ continue
+ endif
+
+ subdirs += pipeline
subdir(pipeline)
+
+ # Don't reuse the pipeline variable below, the subdirectory may have
+ # overwritten it.
endforeach
diff --git a/src/libcamera/pipeline/raspberrypi/dma_heaps.cpp b/src/libcamera/pipeline/raspberrypi/dma_heaps.cpp
deleted file mode 100644
index 69831dab..00000000
--- a/src/libcamera/pipeline/raspberrypi/dma_heaps.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
- *
- * dma_heaps.h - Helper class for dma-heap allocations.
- */
-
-#include "dma_heaps.h"
-
-#include <array>
-#include <fcntl.h>
-#include <linux/dma-buf.h>
-#include <linux/dma-heap.h>
-#include <sys/ioctl.h>
-#include <unistd.h>
-
-#include <libcamera/base/log.h>
-
-/*
- * /dev/dma-heap/linux,cma is the dma-heap allocator, which allows dmaheap-cma
- * to only have to worry about importing.
- *
- * Annoyingly, should the cma heap size be specified on the kernel command line
- * instead of DT, the heap gets named "reserved" instead.
- */
-static constexpr std::array<const char *, 2> heapNames = {
- "/dev/dma_heap/linux,cma",
- "/dev/dma_heap/reserved"
-};
-
-namespace libcamera {
-
-LOG_DECLARE_CATEGORY(RPI)
-
-namespace RPi {
-
-DmaHeap::DmaHeap()
-{
- for (const char *name : heapNames) {
- int ret = ::open(name, O_RDWR, 0);
- if (ret < 0) {
- ret = errno;
- LOG(RPI, Debug) << "Failed to open " << name << ": "
- << strerror(ret);
- continue;
- }
-
- dmaHeapHandle_ = UniqueFD(ret);
- break;
- }
-
- if (!dmaHeapHandle_.isValid())
- LOG(RPI, Error) << "Could not open any dmaHeap device";
-}
-
-DmaHeap::~DmaHeap() = default;
-
-UniqueFD DmaHeap::alloc(const char *name, std::size_t size)
-{
- int ret;
-
- if (!name)
- return {};
-
- struct dma_heap_allocation_data alloc = {};
-
- alloc.len = size;
- alloc.fd_flags = O_CLOEXEC | O_RDWR;
-
- ret = ::ioctl(dmaHeapHandle_.get(), DMA_HEAP_IOCTL_ALLOC, &alloc);
- if (ret < 0) {
- LOG(RPI, Error) << "dmaHeap allocation failure for "
- << name;
- return {};
- }
-
- UniqueFD allocFd(alloc.fd);
- ret = ::ioctl(allocFd.get(), DMA_BUF_SET_NAME, name);
- if (ret < 0) {
- LOG(RPI, Error) << "dmaHeap naming failure for "
- << name;
- return {};
- }
-
- return allocFd;
-}
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/dma_heaps.h b/src/libcamera/pipeline/raspberrypi/dma_heaps.h
deleted file mode 100644
index d38f41ea..00000000
--- a/src/libcamera/pipeline/raspberrypi/dma_heaps.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
- *
- * dma_heaps.h - Helper class for dma-heap allocations.
- */
-
-#pragma once
-
-#include <stddef.h>
-
-#include <libcamera/base/unique_fd.h>
-
-namespace libcamera {
-
-namespace RPi {
-
-class DmaHeap
-{
-public:
- DmaHeap();
- ~DmaHeap();
- bool isValid() const { return dmaHeapHandle_.isValid(); }
- UniqueFD alloc(const char *name, std::size_t size);
-
-private:
- UniqueFD dmaHeapHandle_;
-};
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
deleted file mode 100644
index 66a84b1d..00000000
--- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
+++ /dev/null
@@ -1,2200 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019-2021, Raspberry Pi (Trading) Ltd.
- *
- * raspberrypi.cpp - Pipeline handler for Raspberry Pi devices
- */
-#include <algorithm>
-#include <assert.h>
-#include <cmath>
-#include <fcntl.h>
-#include <memory>
-#include <mutex>
-#include <queue>
-#include <unordered_set>
-#include <utility>
-
-#include <libcamera/base/shared_fd.h>
-#include <libcamera/base/utils.h>
-
-#include <libcamera/camera.h>
-#include <libcamera/control_ids.h>
-#include <libcamera/formats.h>
-#include <libcamera/ipa/raspberrypi_ipa_interface.h>
-#include <libcamera/ipa/raspberrypi_ipa_proxy.h>
-#include <libcamera/logging.h>
-#include <libcamera/property_ids.h>
-#include <libcamera/request.h>
-
-#include <linux/bcm2835-isp.h>
-#include <linux/media-bus-format.h>
-#include <linux/videodev2.h>
-
-#include "libcamera/internal/bayer_format.h"
-#include "libcamera/internal/camera.h"
-#include "libcamera/internal/camera_sensor.h"
-#include "libcamera/internal/delayed_controls.h"
-#include "libcamera/internal/device_enumerator.h"
-#include "libcamera/internal/framebuffer.h"
-#include "libcamera/internal/ipa_manager.h"
-#include "libcamera/internal/media_device.h"
-#include "libcamera/internal/pipeline_handler.h"
-#include "libcamera/internal/v4l2_videodevice.h"
-
-#include "dma_heaps.h"
-#include "rpi_stream.h"
-
-using namespace std::chrono_literals;
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(RPI)
-
-namespace {
-
-constexpr unsigned int defaultRawBitDepth = 12;
-
-/* Map of mbus codes to supported sizes reported by the sensor. */
-using SensorFormats = std::map<unsigned int, std::vector<Size>>;
-
-SensorFormats populateSensorFormats(std::unique_ptr<CameraSensor> &sensor)
-{
- SensorFormats formats;
-
- for (auto const mbusCode : sensor->mbusCodes())
- formats.emplace(mbusCode, sensor->sizes(mbusCode));
-
- return formats;
-}
-
-PixelFormat mbusCodeToPixelFormat(unsigned int mbus_code,
- BayerFormat::Packing packingReq)
-{
- BayerFormat bayer = BayerFormat::fromMbusCode(mbus_code);
-
- ASSERT(bayer.isValid());
-
- bayer.packing = packingReq;
- PixelFormat pix = bayer.toPixelFormat();
-
- /*
- * Not all formats (e.g. 8-bit or 16-bit Bayer formats) can have packed
- * variants. So if the PixelFormat returns as invalid, use the non-packed
- * conversion instead.
- */
- if (!pix.isValid()) {
- bayer.packing = BayerFormat::Packing::None;
- pix = bayer.toPixelFormat();
- }
-
- return pix;
-}
-
-V4L2DeviceFormat toV4L2DeviceFormat(const V4L2SubdeviceFormat &format,
- BayerFormat::Packing packingReq)
-{
- const PixelFormat pix = mbusCodeToPixelFormat(format.mbus_code, packingReq);
- V4L2DeviceFormat deviceFormat;
-
- deviceFormat.fourcc = V4L2PixelFormat::fromPixelFormat(pix);
- deviceFormat.size = format.size;
- deviceFormat.colorSpace = format.colorSpace;
- return deviceFormat;
-}
-
-bool isRaw(const PixelFormat &pixFmt)
-{
- /*
- * The isRaw test might be redundant right now the pipeline handler only
- * supports RAW sensors. Leave it in for now, just as a sanity check.
- */
- if (!pixFmt.isValid())
- return false;
-
- const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
- if (!info.isValid())
- return false;
-
- return info.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
-}
-
-double scoreFormat(double desired, double actual)
-{
- double score = desired - actual;
- /* Smaller desired dimensions are preferred. */
- if (score < 0.0)
- score = (-score) / 8;
- /* Penalise non-exact matches. */
- if (actual != desired)
- score *= 2;
-
- return score;
-}
-
-V4L2SubdeviceFormat findBestFormat(const SensorFormats &formatsMap, const Size &req, unsigned int bitDepth)
-{
- double bestScore = std::numeric_limits<double>::max(), score;
- V4L2SubdeviceFormat bestFormat;
- bestFormat.colorSpace = ColorSpace::Raw;
-
- constexpr float penaltyAr = 1500.0;
- constexpr float penaltyBitDepth = 500.0;
-
- /* Calculate the closest/best mode from the user requested size. */
- for (const auto &iter : formatsMap) {
- const unsigned int mbusCode = iter.first;
- const PixelFormat format = mbusCodeToPixelFormat(mbusCode,
- BayerFormat::Packing::None);
- const PixelFormatInfo &info = PixelFormatInfo::info(format);
-
- for (const Size &size : iter.second) {
- double reqAr = static_cast<double>(req.width) / req.height;
- double fmtAr = static_cast<double>(size.width) / size.height;
-
- /* Score the dimensions for closeness. */
- score = scoreFormat(req.width, size.width);
- score += scoreFormat(req.height, size.height);
- score += penaltyAr * scoreFormat(reqAr, fmtAr);
-
- /* Add any penalties... this is not an exact science! */
- score += utils::abs_diff(info.bitsPerPixel, bitDepth) * penaltyBitDepth;
-
- if (score <= bestScore) {
- bestScore = score;
- bestFormat.mbus_code = mbusCode;
- bestFormat.size = size;
- }
-
- LOG(RPI, Debug) << "Format: " << size
- << " fmt " << format
- << " Score: " << score
- << " (best " << bestScore << ")";
- }
- }
-
- return bestFormat;
-}
-
-enum class Unicam : unsigned int { Image, Embedded };
-enum class Isp : unsigned int { Input, Output0, Output1, Stats };
-
-} /* namespace */
-
-class RPiCameraData : public Camera::Private
-{
-public:
- RPiCameraData(PipelineHandler *pipe)
- : Camera::Private(pipe), state_(State::Stopped),
- supportsFlips_(false), flipsAlterBayerOrder_(false),
- dropFrameCount_(0), buffersAllocated_(false), ispOutputCount_(0)
- {
- }
-
- ~RPiCameraData()
- {
- freeBuffers();
- }
-
- void freeBuffers();
- void frameStarted(uint32_t sequence);
-
- int loadIPA(ipa::RPi::IPAInitResult *result);
- int configureIPA(const CameraConfiguration *config, ipa::RPi::IPAConfigResult *result);
-
- void enumerateVideoDevices(MediaLink *link);
-
- void statsMetadataComplete(uint32_t bufferId, const ControlList &controls);
- void runIsp(uint32_t bufferId);
- void embeddedComplete(uint32_t bufferId);
- void setIspControls(const ControlList &controls);
- void setDelayedControls(const ControlList &controls);
- void setSensorControls(ControlList &controls);
- void unicamTimeout();
-
- /* bufferComplete signal handlers. */
- void unicamBufferDequeue(FrameBuffer *buffer);
- void ispInputDequeue(FrameBuffer *buffer);
- void ispOutputDequeue(FrameBuffer *buffer);
-
- void clearIncompleteRequests();
- void handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream);
- void handleExternalBuffer(FrameBuffer *buffer, RPi::Stream *stream);
- void handleState();
- Rectangle scaleIspCrop(const Rectangle &ispCrop) const;
- void applyScalerCrop(const ControlList &controls);
-
- std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
-
- std::unique_ptr<CameraSensor> sensor_;
- SensorFormats sensorFormats_;
- /* Array of Unicam and ISP device streams and associated buffers/streams. */
- RPi::Device<Unicam, 2> unicam_;
- RPi::Device<Isp, 4> isp_;
- /* The vector below is just for convenience when iterating over all streams. */
- std::vector<RPi::Stream *> streams_;
- /* Stores the ids of the buffers mapped in the IPA. */
- std::unordered_set<unsigned int> ipaBuffers_;
- /*
- * Stores a cascade of Video Mux or Bridge devices between the sensor and
- * Unicam together with media link across the entities.
- */
- std::vector<std::pair<std::unique_ptr<V4L2Subdevice>, MediaLink *>> bridgeDevices_;
-
- /* DMAHEAP allocation helper. */
- RPi::DmaHeap dmaHeap_;
- SharedFD lsTable_;
-
- std::unique_ptr<DelayedControls> delayedCtrls_;
- bool sensorMetadata_;
-
- /*
- * All the functions in this class are called from a single calling
- * thread. So, we do not need to have any mutex to protect access to any
- * of the variables below.
- */
- enum class State { Stopped, Idle, Busy, IpaComplete };
- State state_;
-
- struct BayerFrame {
- FrameBuffer *buffer;
- ControlList controls;
- };
-
- std::queue<BayerFrame> bayerQueue_;
- std::queue<FrameBuffer *> embeddedQueue_;
- std::deque<Request *> requestQueue_;
-
- /*
- * Manage horizontal and vertical flips supported (or not) by the
- * sensor. Also store the "native" Bayer order (that is, with no
- * transforms applied).
- */
- bool supportsFlips_;
- bool flipsAlterBayerOrder_;
- BayerFormat::Order nativeBayerOrder_;
-
- /* For handling digital zoom. */
- IPACameraSensorInfo sensorInfo_;
- Rectangle ispCrop_; /* crop in ISP (camera mode) pixels */
- Rectangle scalerCrop_; /* crop in sensor native pixels */
- Size ispMinCropSize_;
-
- unsigned int dropFrameCount_;
-
- /*
- * If set, this stores the value that represets a gain of one for
- * the V4L2_CID_NOTIFY_GAINS control.
- */
- std::optional<int32_t> notifyGainsUnity_;
-
- /* Have internal buffers been allocated? */
- bool buffersAllocated_;
-
-private:
- void checkRequestCompleted();
- void fillRequestMetadata(const ControlList &bufferControls,
- Request *request);
- void tryRunPipeline();
- bool findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer);
-
- unsigned int ispOutputCount_;
-};
-
-class RPiCameraConfiguration : public CameraConfiguration
-{
-public:
- RPiCameraConfiguration(const RPiCameraData *data);
-
- Status validate() override;
-
- /* Cache the combinedTransform_ that will be applied to the sensor */
- Transform combinedTransform_;
-
-private:
- const RPiCameraData *data_;
-};
-
-class PipelineHandlerRPi : public PipelineHandler
-{
-public:
- PipelineHandlerRPi(CameraManager *manager);
-
- CameraConfiguration *generateConfiguration(Camera *camera, const StreamRoles &roles) override;
- int configure(Camera *camera, CameraConfiguration *config) override;
-
- int exportFrameBuffers(Camera *camera, Stream *stream,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
-
- int start(Camera *camera, const ControlList *controls) override;
- void stopDevice(Camera *camera) override;
-
- int queueRequestDevice(Camera *camera, Request *request) override;
-
- bool match(DeviceEnumerator *enumerator) override;
-
-private:
- RPiCameraData *cameraData(Camera *camera)
- {
- return static_cast<RPiCameraData *>(camera->_d());
- }
-
- int registerCamera(MediaDevice *unicam, MediaDevice *isp, MediaEntity *sensorEntity);
- int queueAllBuffers(Camera *camera);
- int prepareBuffers(Camera *camera);
- void mapBuffers(Camera *camera, const RPi::BufferMap &buffers, unsigned int mask);
-};
-
-RPiCameraConfiguration::RPiCameraConfiguration(const RPiCameraData *data)
- : CameraConfiguration(), data_(data)
-{
-}
-
-CameraConfiguration::Status RPiCameraConfiguration::validate()
-{
- Status status = Valid;
-
- if (config_.empty())
- return Invalid;
-
- status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
-
- /*
- * What if the platform has a non-90 degree rotation? We can't even
- * "adjust" the configuration and carry on. Alternatively, raising an
- * error means the platform can never run. Let's just print a warning
- * and continue regardless; the rotation is effectively set to zero.
- */
- int32_t rotation = data_->sensor_->properties().get(properties::Rotation);
- bool success;
- Transform rotationTransform = transformFromRotation(rotation, &success);
- if (!success)
- LOG(RPI, Warning) << "Invalid rotation of " << rotation
- << " degrees - ignoring";
- Transform combined = transform * rotationTransform;
-
- /*
- * We combine the platform and user transform, but must "adjust away"
- * any combined result that includes a transform, as we can't do those.
- * In this case, flipping only the transpose bit is helpful to
- * applications - they either get the transform they requested, or have
- * to do a simple transpose themselves (they don't have to worry about
- * the other possible cases).
- */
- if (!!(combined & Transform::Transpose)) {
- /*
- * Flipping the transpose bit in "transform" flips it in the
- * combined result too (as it's the last thing that happens),
- * which is of course clearing it.
- */
- transform ^= Transform::Transpose;
- combined &= ~Transform::Transpose;
- status = Adjusted;
- }
-
- /*
- * We also check if the sensor doesn't do h/vflips at all, in which
- * case we clear them, and the application will have to do everything.
- */
- if (!data_->supportsFlips_ && !!combined) {
- /*
- * If the sensor can do no transforms, then combined must be
- * changed to the identity. The only user transform that gives
- * rise to this the inverse of the rotation. (Recall that
- * combined = transform * rotationTransform.)
- */
- transform = -rotationTransform;
- combined = Transform::Identity;
- status = Adjusted;
- }
-
- /*
- * Store the final combined transform that configure() will need to
- * apply to the sensor to save us working it out again.
- */
- combinedTransform_ = combined;
-
- unsigned int rawCount = 0, outCount = 0, count = 0, maxIndex = 0;
- std::pair<int, Size> outSize[2];
- Size maxSize;
- for (StreamConfiguration &cfg : config_) {
- if (isRaw(cfg.pixelFormat)) {
- /*
- * Calculate the best sensor mode we can use based on
- * the user request.
- */
- const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
- unsigned int bitDepth = info.isValid() ? info.bitsPerPixel : defaultRawBitDepth;
- V4L2SubdeviceFormat sensorFormat = findBestFormat(data_->sensorFormats_, cfg.size, bitDepth);
- BayerFormat::Packing packing = BayerFormat::Packing::CSI2;
- if (info.isValid() && !info.packed)
- packing = BayerFormat::Packing::None;
- V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat,
- packing);
- int ret = data_->unicam_[Unicam::Image].dev()->tryFormat(&unicamFormat);
- if (ret)
- return Invalid;
-
- /*
- * Some sensors change their Bayer order when they are
- * h-flipped or v-flipped, according to the transform.
- * If this one does, we must advertise the transformed
- * Bayer order in the raw stream. Note how we must
- * fetch the "native" (i.e. untransformed) Bayer order,
- * because the sensor may currently be flipped!
- */
- V4L2PixelFormat fourcc = unicamFormat.fourcc;
- if (data_->flipsAlterBayerOrder_) {
- BayerFormat bayer = BayerFormat::fromV4L2PixelFormat(fourcc);
- bayer.order = data_->nativeBayerOrder_;
- bayer = bayer.transform(combined);
- fourcc = bayer.toV4L2PixelFormat();
- }
-
- PixelFormat unicamPixFormat = fourcc.toPixelFormat();
- if (cfg.size != unicamFormat.size ||
- cfg.pixelFormat != unicamPixFormat) {
- cfg.size = unicamFormat.size;
- cfg.pixelFormat = unicamPixFormat;
- status = Adjusted;
- }
-
- cfg.stride = unicamFormat.planes[0].bpl;
- cfg.frameSize = unicamFormat.planes[0].size;
-
- rawCount++;
- } else {
- outSize[outCount] = std::make_pair(count, cfg.size);
- /* Record the largest resolution for fixups later. */
- if (maxSize < cfg.size) {
- maxSize = cfg.size;
- maxIndex = outCount;
- }
- outCount++;
- }
-
- count++;
-
- /* Can only output 1 RAW stream, or 2 YUV/RGB streams. */
- if (rawCount > 1 || outCount > 2) {
- LOG(RPI, Error) << "Invalid number of streams requested";
- return Invalid;
- }
- }
-
- /*
- * Now do any fixups needed. For the two ISP outputs, one stream must be
- * equal or smaller than the other in all dimensions.
- */
- for (unsigned int i = 0; i < outCount; i++) {
- outSize[i].second.width = std::min(outSize[i].second.width,
- maxSize.width);
- outSize[i].second.height = std::min(outSize[i].second.height,
- maxSize.height);
-
- if (config_.at(outSize[i].first).size != outSize[i].second) {
- config_.at(outSize[i].first).size = outSize[i].second;
- status = Adjusted;
- }
-
- /*
- * Also validate the correct pixel formats here.
- * Note that Output0 and Output1 support a different
- * set of formats.
- *
- * Output 0 must be for the largest resolution. We will
- * have that fixed up in the code above.
- *
- */
- StreamConfiguration &cfg = config_.at(outSize[i].first);
- PixelFormat &cfgPixFmt = cfg.pixelFormat;
- V4L2VideoDevice *dev;
-
- if (i == maxIndex)
- dev = data_->isp_[Isp::Output0].dev();
- else
- dev = data_->isp_[Isp::Output1].dev();
-
- V4L2VideoDevice::Formats fmts = dev->formats();
-
- if (fmts.find(V4L2PixelFormat::fromPixelFormat(cfgPixFmt)) == fmts.end()) {
- /* If we cannot find a native format, use a default one. */
- cfgPixFmt = formats::NV12;
- status = Adjusted;
- }
-
- V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
- format.size = cfg.size;
- format.colorSpace = cfg.colorSpace;
-
- LOG(RPI, Debug)
- << "Try color space " << ColorSpace::toString(cfg.colorSpace);
-
- int ret = dev->tryFormat(&format);
- if (ret)
- return Invalid;
-
- if (cfg.colorSpace != format.colorSpace) {
- status = Adjusted;
- LOG(RPI, Debug)
- << "Color space changed from "
- << ColorSpace::toString(cfg.colorSpace) << " to "
- << ColorSpace::toString(format.colorSpace);
- }
-
- cfg.colorSpace = format.colorSpace;
-
- cfg.stride = format.planes[0].bpl;
- cfg.frameSize = format.planes[0].size;
-
- }
-
- return status;
-}
-
-PipelineHandlerRPi::PipelineHandlerRPi(CameraManager *manager)
- : PipelineHandler(manager)
-{
-}
-
-CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
-{
- RPiCameraData *data = cameraData(camera);
- CameraConfiguration *config = new RPiCameraConfiguration(data);
- V4L2SubdeviceFormat sensorFormat;
- unsigned int bufferCount;
- PixelFormat pixelFormat;
- V4L2VideoDevice::Formats fmts;
- Size size;
- std::optional<ColorSpace> colorSpace;
-
- if (roles.empty())
- return config;
-
- unsigned int rawCount = 0;
- unsigned int outCount = 0;
- Size sensorSize = data->sensor_->resolution();
- for (const StreamRole role : roles) {
- switch (role) {
- case StreamRole::Raw:
- size = sensorSize;
- sensorFormat = findBestFormat(data->sensorFormats_, size, defaultRawBitDepth);
- pixelFormat = mbusCodeToPixelFormat(sensorFormat.mbus_code,
- BayerFormat::Packing::CSI2);
- ASSERT(pixelFormat.isValid());
- colorSpace = ColorSpace::Raw;
- bufferCount = 2;
- rawCount++;
- break;
-
- case StreamRole::StillCapture:
- fmts = data->isp_[Isp::Output0].dev()->formats();
- pixelFormat = formats::NV12;
- /*
- * Still image codecs usually expect the JPEG color space.
- * Even RGB codecs will be fine as the RGB we get with the
- * JPEG color space is the same as sRGB.
- */
- colorSpace = ColorSpace::Jpeg;
- /* Return the largest sensor resolution. */
- size = sensorSize;
- bufferCount = 1;
- outCount++;
- break;
-
- case StreamRole::VideoRecording:
- /*
- * The colour denoise algorithm requires the analysis
- * image, produced by the second ISP output, to be in
- * YUV420 format. Select this format as the default, to
- * maximize chances that it will be picked by
- * applications and enable usage of the colour denoise
- * algorithm.
- */
- fmts = data->isp_[Isp::Output0].dev()->formats();
- pixelFormat = formats::YUV420;
- /*
- * Choose a color space appropriate for video recording.
- * Rec.709 will be a good default for HD resolutions.
- */
- colorSpace = ColorSpace::Rec709;
- size = { 1920, 1080 };
- bufferCount = 4;
- outCount++;
- break;
-
- case StreamRole::Viewfinder:
- fmts = data->isp_[Isp::Output0].dev()->formats();
- pixelFormat = formats::ARGB8888;
- colorSpace = ColorSpace::Jpeg;
- size = { 800, 600 };
- bufferCount = 4;
- outCount++;
- break;
-
- default:
- LOG(RPI, Error) << "Requested stream role not supported: "
- << role;
- delete config;
- return nullptr;
- }
-
- if (rawCount > 1 || outCount > 2) {
- LOG(RPI, Error) << "Invalid stream roles requested";
- delete config;
- return nullptr;
- }
-
- std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
- if (role == StreamRole::Raw) {
- /* Translate the MBUS codes to a PixelFormat. */
- for (const auto &format : data->sensorFormats_) {
- PixelFormat pf = mbusCodeToPixelFormat(format.first,
- BayerFormat::Packing::CSI2);
- if (pf.isValid())
- deviceFormats.emplace(std::piecewise_construct, std::forward_as_tuple(pf),
- std::forward_as_tuple(format.second.begin(), format.second.end()));
- }
- } else {
- /*
- * Translate the V4L2PixelFormat to PixelFormat. Note that we
- * limit the recommended largest ISP output size to match the
- * sensor resolution.
- */
- for (const auto &format : fmts) {
- PixelFormat pf = format.first.toPixelFormat();
- if (pf.isValid()) {
- const SizeRange &ispSizes = format.second[0];
- deviceFormats[pf].emplace_back(ispSizes.min, sensorSize,
- ispSizes.hStep, ispSizes.vStep);
- }
- }
- }
-
- /* Add the stream format based on the device node used for the use case. */
- StreamFormats formats(deviceFormats);
- StreamConfiguration cfg(formats);
- cfg.size = size;
- cfg.pixelFormat = pixelFormat;
- cfg.colorSpace = colorSpace;
- cfg.bufferCount = bufferCount;
- config->addConfiguration(cfg);
- }
-
- config->validate();
-
- return config;
-}
-
-int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
-{
- RPiCameraData *data = cameraData(camera);
- int ret;
-
- /* Start by freeing all buffers and reset the Unicam and ISP stream states. */
- data->freeBuffers();
- for (auto const stream : data->streams_)
- stream->setExternal(false);
-
- BayerFormat::Packing packing = BayerFormat::Packing::CSI2;
- Size maxSize, sensorSize;
- unsigned int maxIndex = 0;
- bool rawStream = false;
- unsigned int bitDepth = defaultRawBitDepth;
-
- /*
- * Look for the RAW stream (if given) size as well as the largest
- * ISP output size.
- */
- for (unsigned i = 0; i < config->size(); i++) {
- StreamConfiguration &cfg = config->at(i);
-
- if (isRaw(cfg.pixelFormat)) {
- /*
- * If we have been given a RAW stream, use that size
- * for setting up the sensor.
- */
- sensorSize = cfg.size;
- rawStream = true;
- /* Check if the user has explicitly set an unpacked format. */
- BayerFormat bayerFormat = BayerFormat::fromPixelFormat(cfg.pixelFormat);
- packing = bayerFormat.packing;
- bitDepth = bayerFormat.bitDepth;
- } else {
- if (cfg.size > maxSize) {
- maxSize = config->at(i).size;
- maxIndex = i;
- }
- }
- }
-
- /*
- * Configure the H/V flip controls based on the combination of
- * the sensor and user transform.
- */
- if (data->supportsFlips_) {
- const RPiCameraConfiguration *rpiConfig =
- static_cast<const RPiCameraConfiguration *>(config);
- ControlList controls;
-
- controls.set(V4L2_CID_HFLIP,
- static_cast<int32_t>(!!(rpiConfig->combinedTransform_ & Transform::HFlip)));
- controls.set(V4L2_CID_VFLIP,
- static_cast<int32_t>(!!(rpiConfig->combinedTransform_ & Transform::VFlip)));
- data->setSensorControls(controls);
- }
-
- /* First calculate the best sensor mode we can use based on the user request. */
- V4L2SubdeviceFormat sensorFormat = findBestFormat(data->sensorFormats_, rawStream ? sensorSize : maxSize, bitDepth);
- ret = data->sensor_->setFormat(&sensorFormat);
- if (ret)
- return ret;
-
- V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat, packing);
- ret = data->unicam_[Unicam::Image].dev()->setFormat(&unicamFormat);
- if (ret)
- return ret;
-
- LOG(RPI, Info) << "Sensor: " << camera->id()
- << " - Selected sensor format: " << sensorFormat
- << " - Selected unicam format: " << unicamFormat;
-
- ret = data->isp_[Isp::Input].dev()->setFormat(&unicamFormat);
- if (ret)
- return ret;
-
- /*
- * See which streams are requested, and route the user
- * StreamConfiguration appropriately.
- */
- V4L2DeviceFormat format;
- bool output0Set = false, output1Set = false;
- for (unsigned i = 0; i < config->size(); i++) {
- StreamConfiguration &cfg = config->at(i);
-
- if (isRaw(cfg.pixelFormat)) {
- cfg.setStream(&data->unicam_[Unicam::Image]);
- data->unicam_[Unicam::Image].setExternal(true);
- continue;
- }
-
- /* The largest resolution gets routed to the ISP Output 0 node. */
- RPi::Stream *stream = i == maxIndex ? &data->isp_[Isp::Output0]
- : &data->isp_[Isp::Output1];
-
- V4L2PixelFormat fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
- format.size = cfg.size;
- format.fourcc = fourcc;
- format.colorSpace = cfg.colorSpace;
-
- LOG(RPI, Debug) << "Setting " << stream->name() << " to "
- << format;
-
- ret = stream->dev()->setFormat(&format);
- if (ret)
- return -EINVAL;
-
- if (format.size != cfg.size || format.fourcc != fourcc) {
- LOG(RPI, Error)
- << "Failed to set requested format on " << stream->name()
- << ", returned " << format;
- return -EINVAL;
- }
-
- LOG(RPI, Debug)
- << "Stream " << stream->name() << " has color space "
- << ColorSpace::toString(cfg.colorSpace);
-
- cfg.setStream(stream);
- stream->setExternal(true);
-
- if (i != maxIndex)
- output1Set = true;
- else
- output0Set = true;
- }
-
- /*
- * If ISP::Output0 stream has not been configured by the application,
- * we must allow the hardware to generate an output so that the data
- * flow in the pipeline handler remains consistent, and we still generate
- * statistics for the IPA to use. So enable the output at a very low
- * resolution for internal use.
- *
- * \todo Allow the pipeline to work correctly without Output0 and only
- * statistics coming from the hardware.
- */
- if (!output0Set) {
- maxSize = Size(320, 240);
- format = {};
- format.size = maxSize;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(formats::YUV420);
- /* No one asked for output, so the color space doesn't matter. */
- format.colorSpace = ColorSpace::Jpeg;
- ret = data->isp_[Isp::Output0].dev()->setFormat(&format);
- if (ret) {
- LOG(RPI, Error)
- << "Failed to set default format on ISP Output0: "
- << ret;
- return -EINVAL;
- }
-
- LOG(RPI, Debug) << "Defaulting ISP Output0 format to "
- << format;
- }
-
- /*
- * If ISP::Output1 stream has not been requested by the application, we
- * set it up for internal use now. This second stream will be used for
- * fast colour denoise, and must be a quarter resolution of the ISP::Output0
- * stream. However, also limit the maximum size to 1200 pixels in the
- * larger dimension, just to avoid being wasteful with buffer allocations
- * and memory bandwidth.
- *
- * \todo If Output 1 format is not YUV420, Output 1 ought to be disabled as
- * colour denoise will not run.
- */
- if (!output1Set) {
- V4L2DeviceFormat output1Format;
- constexpr Size maxDimensions(1200, 1200);
- const Size limit = maxDimensions.boundedToAspectRatio(format.size);
-
- output1Format.size = (format.size / 2).boundedTo(limit).alignedDownTo(2, 2);
- output1Format.colorSpace = format.colorSpace;
- output1Format.fourcc = V4L2PixelFormat::fromPixelFormat(formats::YUV420);
-
- LOG(RPI, Debug) << "Setting ISP Output1 (internal) to "
- << output1Format;
-
- ret = data->isp_[Isp::Output1].dev()->setFormat(&output1Format);
- if (ret) {
- LOG(RPI, Error) << "Failed to set format on ISP Output1: "
- << ret;
- return -EINVAL;
- }
- }
-
- /* ISP statistics output format. */
- format = {};
- format.fourcc = V4L2PixelFormat(V4L2_META_FMT_BCM2835_ISP_STATS);
- ret = data->isp_[Isp::Stats].dev()->setFormat(&format);
- if (ret) {
- LOG(RPI, Error) << "Failed to set format on ISP stats stream: "
- << format;
- return ret;
- }
-
- /* Figure out the smallest selection the ISP will allow. */
- Rectangle testCrop(0, 0, 1, 1);
- data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &testCrop);
- data->ispMinCropSize_ = testCrop.size();
-
- /* Adjust aspect ratio by providing crops on the input image. */
- Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
- Rectangle crop = size.centeredTo(Rectangle(unicamFormat.size).center());
- data->ispCrop_ = crop;
-
- data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop);
-
- ipa::RPi::IPAConfigResult result;
- ret = data->configureIPA(config, &result);
- if (ret)
- LOG(RPI, Error) << "Failed to configure the IPA: " << ret;
-
- /*
- * Set the scaler crop to the value we are using (scaled to native sensor
- * coordinates).
- */
- data->scalerCrop_ = data->scaleIspCrop(data->ispCrop_);
-
- /*
- * Configure the Unicam embedded data output format only if the sensor
- * supports it.
- */
- if (data->sensorMetadata_) {
- V4L2SubdeviceFormat embeddedFormat;
-
- data->sensor_->device()->getFormat(1, &embeddedFormat);
- format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
- format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
-
- LOG(RPI, Debug) << "Setting embedded data format.";
- ret = data->unicam_[Unicam::Embedded].dev()->setFormat(&format);
- if (ret) {
- LOG(RPI, Error) << "Failed to set format on Unicam embedded: "
- << format;
- return ret;
- }
- }
-
- /*
- * Update the ScalerCropMaximum to the correct value for this camera mode.
- * For us, it's the same as the "analogue crop".
- *
- * \todo Make this property the ScalerCrop maximum value when dynamic
- * controls are available and set it at validate() time
- */
- data->properties_.set(properties::ScalerCropMaximum, data->sensorInfo_.analogCrop);
-
- /* Store the mode sensitivity for the application. */
- data->properties_.set(properties::SensorSensitivity, result.modeSensitivity);
-
- /* Update the controls that the Raspberry Pi IPA can handle. */
- ControlInfoMap::Map ctrlMap;
- for (auto const &c : result.controlInfo)
- ctrlMap.emplace(c.first, c.second);
-
- /* Add the ScalerCrop control limits based on the current mode. */
- ctrlMap.emplace(&controls::ScalerCrop,
- ControlInfo(Rectangle(data->ispMinCropSize_), Rectangle(data->sensorInfo_.outputSize)));
-
- data->controlInfo_ = ControlInfoMap(std::move(ctrlMap), result.controlInfo.idmap());
-
- /* Setup the Video Mux/Bridge entities. */
- for (auto &[device, link] : data->bridgeDevices_) {
- /*
- * Start by disabling all the sink pad links on the devices in the
- * cascade, with the exception of the link connecting the device.
- */
- for (const MediaPad *p : device->entity()->pads()) {
- if (!(p->flags() & MEDIA_PAD_FL_SINK))
- continue;
-
- for (MediaLink *l : p->links()) {
- if (l != link)
- l->setEnabled(false);
- }
- }
-
- /*
- * Next, enable the entity -> entity links, and setup the pad format.
- *
- * \todo Some bridge devices may chainge the media bus code, so we
- * ought to read the source pad format and propagate it to the sink pad.
- */
- link->setEnabled(true);
- const MediaPad *sinkPad = link->sink();
- ret = device->setFormat(sinkPad->index(), &sensorFormat);
- if (ret) {
- LOG(RPI, Error) << "Failed to set format on " << device->entity()->name()
- << " pad " << sinkPad->index()
- << " with format " << format
- << ": " << ret;
- return ret;
- }
-
- LOG(RPI, Debug) << "Configured media link on device " << device->entity()->name()
- << " on pad " << sinkPad->index();
- }
-
- return ret;
-}
-
-int PipelineHandlerRPi::exportFrameBuffers([[maybe_unused]] Camera *camera, Stream *stream,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers)
-{
- RPi::Stream *s = static_cast<RPi::Stream *>(stream);
- unsigned int count = stream->configuration().bufferCount;
- int ret = s->dev()->exportBuffers(count, buffers);
-
- s->setExportedBuffers(buffers);
-
- return ret;
-}
-
-int PipelineHandlerRPi::start(Camera *camera, const ControlList *controls)
-{
- RPiCameraData *data = cameraData(camera);
- int ret;
-
- for (auto const stream : data->streams_)
- stream->resetBuffers();
-
- if (!data->buffersAllocated_) {
- /* Allocate buffers for internal pipeline usage. */
- ret = prepareBuffers(camera);
- if (ret) {
- LOG(RPI, Error) << "Failed to allocate buffers";
- data->freeBuffers();
- stop(camera);
- return ret;
- }
- data->buffersAllocated_ = true;
- }
-
- /* Check if a ScalerCrop control was specified. */
- if (controls)
- data->applyScalerCrop(*controls);
-
- /* Start the IPA. */
- ipa::RPi::StartConfig startConfig;
- data->ipa_->start(controls ? *controls : ControlList{ controls::controls },
- &startConfig);
-
- /* Apply any gain/exposure settings that the IPA may have passed back. */
- if (!startConfig.controls.empty())
- data->setSensorControls(startConfig.controls);
-
- /* Configure the number of dropped frames required on startup. */
- data->dropFrameCount_ = startConfig.dropFrameCount;
-
- /* We need to set the dropFrameCount_ before queueing buffers. */
- ret = queueAllBuffers(camera);
- if (ret) {
- LOG(RPI, Error) << "Failed to queue buffers";
- stop(camera);
- return ret;
- }
-
- /* Enable SOF event generation. */
- data->unicam_[Unicam::Image].dev()->setFrameStartEnabled(true);
-
- /*
- * Reset the delayed controls with the gain and exposure values set by
- * the IPA.
- */
- data->delayedCtrls_->reset();
-
- data->state_ = RPiCameraData::State::Idle;
-
- /* Start all streams. */
- for (auto const stream : data->streams_) {
- ret = stream->dev()->streamOn();
- if (ret) {
- stop(camera);
- return ret;
- }
- }
-
- /*
- * Set the dequeue timeout to the larger of 2x the maximum possible
- * frame duration or 1 second.
- */
- utils::Duration timeout =
- std::max<utils::Duration>(1s, 2 * startConfig.maxSensorFrameLengthMs * 1ms);
- data->unicam_[Unicam::Image].dev()->setDequeueTimeout(timeout);
-
- return 0;
-}
-
-void PipelineHandlerRPi::stopDevice(Camera *camera)
-{
- RPiCameraData *data = cameraData(camera);
-
- data->state_ = RPiCameraData::State::Stopped;
-
- /* Disable SOF event generation. */
- data->unicam_[Unicam::Image].dev()->setFrameStartEnabled(false);
-
- for (auto const stream : data->streams_)
- stream->dev()->streamOff();
-
- data->clearIncompleteRequests();
- data->bayerQueue_ = {};
- data->embeddedQueue_ = {};
-
- /* Stop the IPA. */
- data->ipa_->stop();
-}
-
-int PipelineHandlerRPi::queueRequestDevice(Camera *camera, Request *request)
-{
- RPiCameraData *data = cameraData(camera);
-
- if (data->state_ == RPiCameraData::State::Stopped)
- return -EINVAL;
-
- LOG(RPI, Debug) << "queueRequestDevice: New request.";
-
- /* Push all buffers supplied in the Request to the respective streams. */
- for (auto stream : data->streams_) {
- if (!stream->isExternal())
- continue;
-
- FrameBuffer *buffer = request->findBuffer(stream);
- if (buffer && stream->getBufferId(buffer) == -1) {
- /*
- * This buffer is not recognised, so it must have been allocated
- * outside the v4l2 device. Store it in the stream buffer list
- * so we can track it.
- */
- stream->setExternalBuffer(buffer);
- }
- /*
- * If no buffer is provided by the request for this stream, we
- * queue a nullptr to the stream to signify that it must use an
- * internally allocated buffer for this capture request. This
- * buffer will not be given back to the application, but is used
- * to support the internal pipeline flow.
- *
- * The below queueBuffer() call will do nothing if there are not
- * enough internal buffers allocated, but this will be handled by
- * queuing the request for buffers in the RPiStream object.
- */
- int ret = stream->queueBuffer(buffer);
- if (ret)
- return ret;
- }
-
- /* Push the request to the back of the queue. */
- data->requestQueue_.push_back(request);
- data->handleState();
-
- return 0;
-}
-
-bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
-{
- DeviceMatch unicam("unicam");
- MediaDevice *unicamDevice = acquireMediaDevice(enumerator, unicam);
-
- if (!unicamDevice) {
- LOG(RPI, Debug) << "Unable to acquire a Unicam instance";
- return false;
- }
-
- DeviceMatch isp("bcm2835-isp");
- MediaDevice *ispDevice = acquireMediaDevice(enumerator, isp);
-
- if (!ispDevice) {
- LOG(RPI, Debug) << "Unable to acquire ISP instance";
- return false;
- }
-
- /*
- * The loop below is used to register multiple cameras behind one or more
- * video mux devices that are attached to a particular Unicam instance.
- * Obviously these cameras cannot be used simultaneously.
- */
- unsigned int numCameras = 0;
- for (MediaEntity *entity : unicamDevice->entities()) {
- if (entity->function() != MEDIA_ENT_F_CAM_SENSOR)
- continue;
-
- int ret = registerCamera(unicamDevice, ispDevice, entity);
- if (ret)
- LOG(RPI, Error) << "Failed to register camera "
- << entity->name() << ": " << ret;
- else
- numCameras++;
- }
-
- return !!numCameras;
-}
-
-int PipelineHandlerRPi::registerCamera(MediaDevice *unicam, MediaDevice *isp, MediaEntity *sensorEntity)
-{
- std::unique_ptr<RPiCameraData> data = std::make_unique<RPiCameraData>(this);
-
- if (!data->dmaHeap_.isValid())
- return -ENOMEM;
-
- MediaEntity *unicamImage = unicam->getEntityByName("unicam-image");
- MediaEntity *ispOutput0 = isp->getEntityByName("bcm2835-isp0-output0");
- MediaEntity *ispCapture1 = isp->getEntityByName("bcm2835-isp0-capture1");
- MediaEntity *ispCapture2 = isp->getEntityByName("bcm2835-isp0-capture2");
- MediaEntity *ispCapture3 = isp->getEntityByName("bcm2835-isp0-capture3");
-
- if (!unicamImage || !ispOutput0 || !ispCapture1 || !ispCapture2 || !ispCapture3)
- return -ENOENT;
-
- /* Locate and open the unicam video streams. */
- data->unicam_[Unicam::Image] = RPi::Stream("Unicam Image", unicamImage);
-
- /* An embedded data node will not be present if the sensor does not support it. */
- MediaEntity *unicamEmbedded = unicam->getEntityByName("unicam-embedded");
- if (unicamEmbedded) {
- data->unicam_[Unicam::Embedded] = RPi::Stream("Unicam Embedded", unicamEmbedded);
- data->unicam_[Unicam::Embedded].dev()->bufferReady.connect(data.get(),
- &RPiCameraData::unicamBufferDequeue);
- }
-
- /* Tag the ISP input stream as an import stream. */
- data->isp_[Isp::Input] = RPi::Stream("ISP Input", ispOutput0, true);
- data->isp_[Isp::Output0] = RPi::Stream("ISP Output0", ispCapture1);
- data->isp_[Isp::Output1] = RPi::Stream("ISP Output1", ispCapture2);
- data->isp_[Isp::Stats] = RPi::Stream("ISP Stats", ispCapture3);
-
- /* Wire up all the buffer connections. */
- data->unicam_[Unicam::Image].dev()->dequeueTimeout.connect(data.get(), &RPiCameraData::unicamTimeout);
- data->unicam_[Unicam::Image].dev()->frameStart.connect(data.get(), &RPiCameraData::frameStarted);
- data->unicam_[Unicam::Image].dev()->bufferReady.connect(data.get(), &RPiCameraData::unicamBufferDequeue);
- data->isp_[Isp::Input].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispInputDequeue);
- data->isp_[Isp::Output0].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispOutputDequeue);
- data->isp_[Isp::Output1].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispOutputDequeue);
- data->isp_[Isp::Stats].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispOutputDequeue);
-
- data->sensor_ = std::make_unique<CameraSensor>(sensorEntity);
- if (!data->sensor_)
- return -EINVAL;
-
- if (data->sensor_->init())
- return -EINVAL;
-
- /*
- * Enumerate all the Video Mux/Bridge devices across the sensor -> unicam
- * chain. There may be a cascade of devices in this chain!
- */
- MediaLink *link = sensorEntity->getPadByIndex(0)->links()[0];
- data->enumerateVideoDevices(link);
-
- data->sensorFormats_ = populateSensorFormats(data->sensor_);
-
- ipa::RPi::IPAInitResult result;
- if (data->loadIPA(&result)) {
- LOG(RPI, Error) << "Failed to load a suitable IPA library";
- return -EINVAL;
- }
-
- if (result.sensorConfig.sensorMetadata ^ !!unicamEmbedded) {
- LOG(RPI, Warning) << "Mismatch between Unicam and CamHelper for embedded data usage!";
- result.sensorConfig.sensorMetadata = false;
- if (unicamEmbedded)
- data->unicam_[Unicam::Embedded].dev()->bufferReady.disconnect();
- }
-
- /*
- * Open all Unicam and ISP streams. The exception is the embedded data
- * stream, which only gets opened below if the IPA reports that the sensor
- * supports embedded data.
- *
- * The below grouping is just for convenience so that we can easily
- * iterate over all streams in one go.
- */
- data->streams_.push_back(&data->unicam_[Unicam::Image]);
- if (result.sensorConfig.sensorMetadata)
- data->streams_.push_back(&data->unicam_[Unicam::Embedded]);
-
- for (auto &stream : data->isp_)
- data->streams_.push_back(&stream);
-
- for (auto stream : data->streams_) {
- int ret = stream->dev()->open();
- if (ret)
- return ret;
- }
-
- if (!data->unicam_[Unicam::Image].dev()->caps().hasMediaController()) {
- LOG(RPI, Error) << "Unicam driver does not use the MediaController, please update your kernel!";
- return -EINVAL;
- }
-
- /*
- * Setup our delayed control writer with the sensor default
- * gain and exposure delays. Mark VBLANK for priority write.
- */
- std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
- { V4L2_CID_ANALOGUE_GAIN, { result.sensorConfig.gainDelay, false } },
- { V4L2_CID_EXPOSURE, { result.sensorConfig.exposureDelay, false } },
- { V4L2_CID_VBLANK, { result.sensorConfig.vblankDelay, true } }
- };
- data->delayedCtrls_ = std::make_unique<DelayedControls>(data->sensor_->device(), params);
- data->sensorMetadata_ = result.sensorConfig.sensorMetadata;
-
- /* Register initial controls that the Raspberry Pi IPA can handle. */
- data->controlInfo_ = std::move(result.controlInfo);
-
- /* Initialize the camera properties. */
- data->properties_ = data->sensor_->properties();
-
- /*
- * The V4L2_CID_NOTIFY_GAINS control, if present, is used to inform the
- * sensor of the colour gains. It is defined to be a linear gain where
- * the default value represents a gain of exactly one.
- */
- auto it = data->sensor_->controls().find(V4L2_CID_NOTIFY_GAINS);
- if (it != data->sensor_->controls().end())
- data->notifyGainsUnity_ = it->second.def().get<int32_t>();
-
- /*
- * Set a default value for the ScalerCropMaximum property to show
- * that we support its use, however, initialise it to zero because
- * it's not meaningful until a camera mode has been chosen.
- */
- data->properties_.set(properties::ScalerCropMaximum, Rectangle{});
-
- /*
- * We cache three things about the sensor in relation to transforms
- * (meaning horizontal and vertical flips).
- *
- * Firstly, does it support them?
- * Secondly, if you use them does it affect the Bayer ordering?
- * Thirdly, what is the "native" Bayer order, when no transforms are
- * applied?
- *
- * We note that the sensor's cached list of supported formats is
- * already in the "native" order, with any flips having been undone.
- */
- const V4L2Subdevice *sensor = data->sensor_->device();
- const struct v4l2_query_ext_ctrl *hflipCtrl = sensor->controlInfo(V4L2_CID_HFLIP);
- if (hflipCtrl) {
- /* We assume it will support vflips too... */
- data->supportsFlips_ = true;
- data->flipsAlterBayerOrder_ = hflipCtrl->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT;
- }
-
- /* Look for a valid Bayer format. */
- BayerFormat bayerFormat;
- for (const auto &iter : data->sensorFormats_) {
- bayerFormat = BayerFormat::fromMbusCode(iter.first);
- if (bayerFormat.isValid())
- break;
- }
-
- if (!bayerFormat.isValid()) {
- LOG(RPI, Error) << "No Bayer format found";
- return -EINVAL;
- }
- data->nativeBayerOrder_ = bayerFormat.order;
-
- /*
- * List the available streams an application may request. At present, we
- * do not advertise Unicam Embedded and ISP Statistics streams, as there
- * is no mechanism for the application to request non-image buffer formats.
- */
- std::set<Stream *> streams;
- streams.insert(&data->unicam_[Unicam::Image]);
- streams.insert(&data->isp_[Isp::Output0]);
- streams.insert(&data->isp_[Isp::Output1]);
-
- /* Create and register the camera. */
- const std::string &id = data->sensor_->id();
- std::shared_ptr<Camera> camera =
- Camera::create(std::move(data), id, streams);
- PipelineHandler::registerCamera(std::move(camera));
-
- LOG(RPI, Info) << "Registered camera " << id
- << " to Unicam device " << unicam->deviceNode()
- << " and ISP device " << isp->deviceNode();
- return 0;
-}
-
-int PipelineHandlerRPi::queueAllBuffers(Camera *camera)
-{
- RPiCameraData *data = cameraData(camera);
- int ret;
-
- for (auto const stream : data->streams_) {
- if (!stream->isExternal()) {
- ret = stream->queueAllBuffers();
- if (ret < 0)
- return ret;
- } else {
- /*
- * For external streams, we must queue up a set of internal
- * buffers to handle the number of drop frames requested by
- * the IPA. This is done by passing nullptr in queueBuffer().
- *
- * The below queueBuffer() call will do nothing if there
- * are not enough internal buffers allocated, but this will
- * be handled by queuing the request for buffers in the
- * RPiStream object.
- */
- unsigned int i;
- for (i = 0; i < data->dropFrameCount_; i++) {
- ret = stream->queueBuffer(nullptr);
- if (ret)
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-int PipelineHandlerRPi::prepareBuffers(Camera *camera)
-{
- RPiCameraData *data = cameraData(camera);
- unsigned int numRawBuffers = 0;
- int ret;
-
- for (Stream *s : camera->streams()) {
- if (isRaw(s->configuration().pixelFormat)) {
- numRawBuffers = s->configuration().bufferCount;
- break;
- }
- }
-
- /* Decide how many internal buffers to allocate. */
- for (auto const stream : data->streams_) {
- unsigned int numBuffers;
- /*
- * For Unicam, allocate a minimum of 4 buffers as we want
- * to avoid any frame drops.
- */
- constexpr unsigned int minBuffers = 4;
- if (stream == &data->unicam_[Unicam::Image]) {
- /*
- * If an application has configured a RAW stream, allocate
- * additional buffers to make up the minimum, but ensure
- * we have at least 2 sets of internal buffers to use to
- * minimise frame drops.
- */
- numBuffers = std::max<int>(2, minBuffers - numRawBuffers);
- } else if (stream == &data->isp_[Isp::Input]) {
- /*
- * ISP input buffers are imported from Unicam, so follow
- * similar logic as above to count all the RAW buffers
- * available.
- */
- numBuffers = numRawBuffers + std::max<int>(2, minBuffers - numRawBuffers);
-
- } else if (stream == &data->unicam_[Unicam::Embedded]) {
- /*
- * Embedded data buffers are (currently) for internal use,
- * so allocate the minimum required to avoid frame drops.
- */
- numBuffers = minBuffers;
- } else {
- /*
- * Since the ISP runs synchronous with the IPA and requests,
- * we only ever need one set of internal buffers. Any buffers
- * the application wants to hold onto will already be exported
- * through PipelineHandlerRPi::exportFrameBuffers().
- */
- numBuffers = 1;
- }
-
- ret = stream->prepareBuffers(numBuffers);
- if (ret < 0)
- return ret;
- }
-
- /*
- * Pass the stats and embedded data buffers to the IPA. No other
- * buffers need to be passed.
- */
- mapBuffers(camera, data->isp_[Isp::Stats].getBuffers(), ipa::RPi::MaskStats);
- if (data->sensorMetadata_)
- mapBuffers(camera, data->unicam_[Unicam::Embedded].getBuffers(),
- ipa::RPi::MaskEmbeddedData);
-
- return 0;
-}
-
-void PipelineHandlerRPi::mapBuffers(Camera *camera, const RPi::BufferMap &buffers, unsigned int mask)
-{
- RPiCameraData *data = cameraData(camera);
- std::vector<IPABuffer> ipaBuffers;
- /*
- * Link the FrameBuffers with the id (key value) in the map stored in
- * the RPi stream object - along with an identifier mask.
- *
- * This will allow us to identify buffers passed between the pipeline
- * handler and the IPA.
- */
- for (auto const &it : buffers) {
- ipaBuffers.push_back(IPABuffer(mask | it.first,
- it.second->planes()));
- data->ipaBuffers_.insert(mask | it.first);
- }
-
- data->ipa_->mapBuffers(ipaBuffers);
-}
-
-void RPiCameraData::freeBuffers()
-{
- if (ipa_) {
- /*
- * Copy the buffer ids from the unordered_set to a vector to
- * pass to the IPA.
- */
- std::vector<unsigned int> ipaBuffers(ipaBuffers_.begin(),
- ipaBuffers_.end());
- ipa_->unmapBuffers(ipaBuffers);
- ipaBuffers_.clear();
- }
-
- for (auto const stream : streams_)
- stream->releaseBuffers();
-
- buffersAllocated_ = false;
-}
-
-void RPiCameraData::frameStarted(uint32_t sequence)
-{
- LOG(RPI, Debug) << "frame start " << sequence;
-
- /* Write any controls for the next frame as soon as we can. */
- delayedCtrls_->applyControls(sequence);
-}
-
-int RPiCameraData::loadIPA(ipa::RPi::IPAInitResult *result)
-{
- ipa_ = IPAManager::createIPA<ipa::RPi::IPAProxyRPi>(pipe(), 1, 1);
-
- if (!ipa_)
- return -ENOENT;
-
- ipa_->statsMetadataComplete.connect(this, &RPiCameraData::statsMetadataComplete);
- ipa_->runIsp.connect(this, &RPiCameraData::runIsp);
- ipa_->embeddedComplete.connect(this, &RPiCameraData::embeddedComplete);
- ipa_->setIspControls.connect(this, &RPiCameraData::setIspControls);
- ipa_->setDelayedControls.connect(this, &RPiCameraData::setDelayedControls);
-
- /*
- * The configuration (tuning file) is made from the sensor name unless
- * the environment variable overrides it.
- */
- std::string configurationFile;
- char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_TUNING_FILE");
- if (!configFromEnv || *configFromEnv == '\0')
- configurationFile = ipa_->configurationFile(sensor_->model() + ".json");
- else
- configurationFile = std::string(configFromEnv);
-
- IPASettings settings(configurationFile, sensor_->model());
-
- return ipa_->init(settings, result);
-}
-
-int RPiCameraData::configureIPA(const CameraConfiguration *config, ipa::RPi::IPAConfigResult *result)
-{
- std::map<unsigned int, IPAStream> streamConfig;
- std::map<unsigned int, ControlInfoMap> entityControls;
- ipa::RPi::IPAConfig ipaConfig;
-
- /* Inform IPA of stream configuration and sensor controls. */
- unsigned int i = 0;
- for (auto const &stream : isp_) {
- if (stream.isExternal()) {
- streamConfig[i++] = IPAStream(
- stream.configuration().pixelFormat,
- stream.configuration().size);
- }
- }
-
- entityControls.emplace(0, sensor_->controls());
- entityControls.emplace(1, isp_[Isp::Input].dev()->controls());
-
- /* Always send the user transform to the IPA. */
- ipaConfig.transform = static_cast<unsigned int>(config->transform);
-
- /* Allocate the lens shading table via dmaHeap and pass to the IPA. */
- if (!lsTable_.isValid()) {
- lsTable_ = SharedFD(dmaHeap_.alloc("ls_grid", ipa::RPi::MaxLsGridSize));
- if (!lsTable_.isValid())
- return -ENOMEM;
-
- /* Allow the IPA to mmap the LS table via the file descriptor. */
- /*
- * \todo Investigate if mapping the lens shading table buffer
- * could be handled with mapBuffers().
- */
- ipaConfig.lsTableHandle = lsTable_;
- }
-
- /* We store the IPACameraSensorInfo for digital zoom calculations. */
- int ret = sensor_->sensorInfo(&sensorInfo_);
- if (ret) {
- LOG(RPI, Error) << "Failed to retrieve camera sensor info";
- return ret;
- }
-
- /* Ready the IPA - it must know about the sensor resolution. */
- ControlList controls;
- ret = ipa_->configure(sensorInfo_, streamConfig, entityControls, ipaConfig,
- &controls, result);
- if (ret < 0) {
- LOG(RPI, Error) << "IPA configuration failed!";
- return -EPIPE;
- }
-
- if (!controls.empty())
- setSensorControls(controls);
-
- return 0;
-}
-
-/*
- * enumerateVideoDevices() iterates over the Media Controller topology, starting
- * at the sensor and finishing at Unicam. For each sensor, RPiCameraData stores
- * a unique list of any intermediate video mux or bridge devices connected in a
- * cascade, together with the entity to entity link.
- *
- * Entity pad configuration and link enabling happens at the end of configure().
- * We first disable all pad links on each entity device in the chain, and then
- * selectively enabling the specific links to link sensor to Unicam across all
- * intermediate muxes and bridges.
- *
- * In the cascaded topology below, if Sensor1 is used, the Mux2 -> Mux1 link
- * will be disabled, and Sensor1 -> Mux1 -> Unicam links enabled. Alternatively,
- * if Sensor3 is used, the Sensor2 -> Mux2 and Sensor1 -> Mux1 links are disabled,
- * and Sensor3 -> Mux2 -> Mux1 -> Unicam links are enabled. All other links will
- * remain unchanged.
- *
- * +----------+
- * | Unicam |
- * +-----^----+
- * |
- * +---+---+
- * | Mux1 <-------+
- * +--^----+ |
- * | |
- * +-----+---+ +---+---+
- * | Sensor1 | | Mux2 |<--+
- * +---------+ +-^-----+ |
- * | |
- * +-------+-+ +---+-----+
- * | Sensor2 | | Sensor3 |
- * +---------+ +---------+
- */
-void RPiCameraData::enumerateVideoDevices(MediaLink *link)
-{
- const MediaPad *sinkPad = link->sink();
- const MediaEntity *entity = sinkPad->entity();
- bool unicamFound = false;
-
- /* We only deal with Video Mux and Bridge devices in cascade. */
- if (entity->function() != MEDIA_ENT_F_VID_MUX &&
- entity->function() != MEDIA_ENT_F_VID_IF_BRIDGE)
- return;
-
- /* Find the source pad for this Video Mux or Bridge device. */
- const MediaPad *sourcePad = nullptr;
- for (const MediaPad *pad : entity->pads()) {
- if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
- /*
- * We can only deal with devices that have a single source
- * pad. If this device has multiple source pads, ignore it
- * and this branch in the cascade.
- */
- if (sourcePad)
- return;
-
- sourcePad = pad;
- }
- }
-
- LOG(RPI, Debug) << "Found video mux device " << entity->name()
- << " linked to sink pad " << sinkPad->index();
-
- bridgeDevices_.emplace_back(std::make_unique<V4L2Subdevice>(entity), link);
- bridgeDevices_.back().first->open();
-
- /*
- * Iterate through all the sink pad links down the cascade to find any
- * other Video Mux and Bridge devices.
- */
- for (MediaLink *l : sourcePad->links()) {
- enumerateVideoDevices(l);
- /* Once we reach the Unicam entity, we are done. */
- if (l->sink()->entity()->name() == "unicam-image") {
- unicamFound = true;
- break;
- }
- }
-
- /* This identifies the end of our entity enumeration recursion. */
- if (link->source()->entity()->function() == MEDIA_ENT_F_CAM_SENSOR) {
- /*
- * If Unicam is not at the end of this cascade, we cannot configure
- * this topology automatically, so remove all entity references.
- */
- if (!unicamFound) {
- LOG(RPI, Warning) << "Cannot automatically configure this MC topology!";
- bridgeDevices_.clear();
- }
- }
-}
-
-void RPiCameraData::statsMetadataComplete(uint32_t bufferId, const ControlList &controls)
-{
- if (state_ == State::Stopped)
- return;
-
- FrameBuffer *buffer = isp_[Isp::Stats].getBuffers().at(bufferId);
-
- handleStreamBuffer(buffer, &isp_[Isp::Stats]);
-
- /* Add to the Request metadata buffer what the IPA has provided. */
- Request *request = requestQueue_.front();
- request->metadata().merge(controls);
-
- /*
- * Inform the sensor of the latest colour gains if it has the
- * V4L2_CID_NOTIFY_GAINS control (which means notifyGainsUnity_ is set).
- */
- if (notifyGainsUnity_ && controls.contains(libcamera::controls::ColourGains)) {
- libcamera::Span<const float> colourGains = controls.get(libcamera::controls::ColourGains);
- /* The control wants linear gains in the order B, Gb, Gr, R. */
- ControlList ctrls(sensor_->controls());
- std::array<int32_t, 4> gains{
- static_cast<int32_t>(colourGains[1] * *notifyGainsUnity_),
- *notifyGainsUnity_,
- *notifyGainsUnity_,
- static_cast<int32_t>(colourGains[0] * *notifyGainsUnity_)
- };
- ctrls.set(V4L2_CID_NOTIFY_GAINS, Span<const int32_t>{ gains });
-
- sensor_->setControls(&ctrls);
- }
-
- state_ = State::IpaComplete;
- handleState();
-}
-
-void RPiCameraData::runIsp(uint32_t bufferId)
-{
- if (state_ == State::Stopped)
- return;
-
- FrameBuffer *buffer = unicam_[Unicam::Image].getBuffers().at(bufferId);
-
- LOG(RPI, Debug) << "Input re-queue to ISP, buffer id " << bufferId
- << ", timestamp: " << buffer->metadata().timestamp;
-
- isp_[Isp::Input].queueBuffer(buffer);
- ispOutputCount_ = 0;
- handleState();
-}
-
-void RPiCameraData::embeddedComplete(uint32_t bufferId)
-{
- if (state_ == State::Stopped)
- return;
-
- FrameBuffer *buffer = unicam_[Unicam::Embedded].getBuffers().at(bufferId);
- handleStreamBuffer(buffer, &unicam_[Unicam::Embedded]);
- handleState();
-}
-
-void RPiCameraData::setIspControls(const ControlList &controls)
-{
- ControlList ctrls = controls;
-
- if (ctrls.contains(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING)) {
- ControlValue &value =
- const_cast<ControlValue &>(ctrls.get(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING));
- Span<uint8_t> s = value.data();
- bcm2835_isp_lens_shading *ls =
- reinterpret_cast<bcm2835_isp_lens_shading *>(s.data());
- ls->dmabuf = lsTable_.get();
- }
-
- isp_[Isp::Input].dev()->setControls(&ctrls);
- handleState();
-}
-
-void RPiCameraData::setDelayedControls(const ControlList &controls)
-{
- if (!delayedCtrls_->push(controls))
- LOG(RPI, Error) << "V4L2 DelayedControl set failed";
- handleState();
-}
-
-void RPiCameraData::setSensorControls(ControlList &controls)
-{
- /*
- * We need to ensure that if both VBLANK and EXPOSURE are present, the
- * former must be written ahead of, and separately from EXPOSURE to avoid
- * V4L2 rejecting the latter. This is identical to what DelayedControls
- * does with the priority write flag.
- *
- * As a consequence of the below logic, VBLANK gets set twice, and we
- * rely on the v4l2 framework to not pass the second control set to the
- * driver as the actual control value has not changed.
- */
- if (controls.contains(V4L2_CID_EXPOSURE) && controls.contains(V4L2_CID_VBLANK)) {
- ControlList vblank_ctrl;
-
- vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
- sensor_->setControls(&vblank_ctrl);
- }
-
- sensor_->setControls(&controls);
-}
-
-void RPiCameraData::unicamTimeout()
-{
- LOG(RPI, Error) << "Unicam has timed out!";
- LOG(RPI, Error) << "Please check that your camera sensor connector is attached securely.";
- LOG(RPI, Error) << "Alternatively, try another cable and/or sensor.";
-}
-
-void RPiCameraData::unicamBufferDequeue(FrameBuffer *buffer)
-{
- RPi::Stream *stream = nullptr;
- int index;
-
- if (state_ == State::Stopped)
- return;
-
- for (RPi::Stream &s : unicam_) {
- index = s.getBufferId(buffer);
- if (index != -1) {
- stream = &s;
- break;
- }
- }
-
- /* The buffer must belong to one of our streams. */
- ASSERT(stream);
-
- LOG(RPI, Debug) << "Stream " << stream->name() << " buffer dequeue"
- << ", buffer id " << index
- << ", timestamp: " << buffer->metadata().timestamp;
-
- if (stream == &unicam_[Unicam::Image]) {
- /*
- * Lookup the sensor controls used for this frame sequence from
- * DelayedControl and queue them along with the frame buffer.
- */
- ControlList ctrl = delayedCtrls_->get(buffer->metadata().sequence);
- /*
- * Add the frame timestamp to the ControlList for the IPA to use
- * as it does not receive the FrameBuffer object.
- */
- ctrl.set(controls::SensorTimestamp, buffer->metadata().timestamp);
- bayerQueue_.push({ buffer, std::move(ctrl) });
- } else {
- embeddedQueue_.push(buffer);
- }
-
- handleState();
-}
-
-void RPiCameraData::ispInputDequeue(FrameBuffer *buffer)
-{
- if (state_ == State::Stopped)
- return;
-
- LOG(RPI, Debug) << "Stream ISP Input buffer complete"
- << ", buffer id " << unicam_[Unicam::Image].getBufferId(buffer)
- << ", timestamp: " << buffer->metadata().timestamp;
-
- /* The ISP input buffer gets re-queued into Unicam. */
- handleStreamBuffer(buffer, &unicam_[Unicam::Image]);
- handleState();
-}
-
-void RPiCameraData::ispOutputDequeue(FrameBuffer *buffer)
-{
- RPi::Stream *stream = nullptr;
- int index;
-
- if (state_ == State::Stopped)
- return;
-
- for (RPi::Stream &s : isp_) {
- index = s.getBufferId(buffer);
- if (index != -1) {
- stream = &s;
- break;
- }
- }
-
- /* The buffer must belong to one of our ISP output streams. */
- ASSERT(stream);
-
- LOG(RPI, Debug) << "Stream " << stream->name() << " buffer complete"
- << ", buffer id " << index
- << ", timestamp: " << buffer->metadata().timestamp;
-
- /*
- * ISP statistics buffer must not be re-queued or sent back to the
- * application until after the IPA signals so.
- */
- if (stream == &isp_[Isp::Stats]) {
- ipa_->signalStatReady(ipa::RPi::MaskStats | static_cast<unsigned int>(index));
- } else {
- /* Any other ISP output can be handed back to the application now. */
- handleStreamBuffer(buffer, stream);
- }
-
- /*
- * Increment the number of ISP outputs generated.
- * This is needed to track dropped frames.
- */
- ispOutputCount_++;
-
- handleState();
-}
-
-void RPiCameraData::clearIncompleteRequests()
-{
- /*
- * All outstanding requests (and associated buffers) must be returned
- * back to the application.
- */
- while (!requestQueue_.empty()) {
- Request *request = requestQueue_.front();
-
- for (auto &b : request->buffers()) {
- FrameBuffer *buffer = b.second;
- /*
- * Has the buffer already been handed back to the
- * request? If not, do so now.
- */
- if (buffer->request()) {
- buffer->_d()->cancel();
- pipe()->completeBuffer(request, buffer);
- }
- }
-
- pipe()->completeRequest(request);
- requestQueue_.pop_front();
- }
-}
-
-void RPiCameraData::handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream)
-{
- /*
- * It is possible to be here without a pending request, so check
- * that we actually have one to action, otherwise we just return
- * buffer back to the stream.
- */
- Request *request = requestQueue_.empty() ? nullptr : requestQueue_.front();
- if (!dropFrameCount_ && request && request->findBuffer(stream) == buffer) {
- /*
- * Check if this is an externally provided buffer, and if
- * so, we must stop tracking it in the pipeline handler.
- */
- handleExternalBuffer(buffer, stream);
- /*
- * Tag the buffer as completed, returning it to the
- * application.
- */
- pipe()->completeBuffer(request, buffer);
- } else {
- /*
- * This buffer was not part of the Request (which happens if an
- * internal buffer was used for an external stream, or
- * unconditionally for internal streams), or there is no pending
- * request, so we can recycle it.
- */
- stream->returnBuffer(buffer);
- }
-}
-
-void RPiCameraData::handleExternalBuffer(FrameBuffer *buffer, RPi::Stream *stream)
-{
- unsigned int id = stream->getBufferId(buffer);
-
- if (!(id & ipa::RPi::MaskExternalBuffer))
- return;
-
- /* Stop the Stream object from tracking the buffer. */
- stream->removeExternalBuffer(buffer);
-}
-
-void RPiCameraData::handleState()
-{
- switch (state_) {
- case State::Stopped:
- case State::Busy:
- break;
-
- case State::IpaComplete:
- /* If the request is completed, we will switch to Idle state. */
- checkRequestCompleted();
- /*
- * No break here, we want to try running the pipeline again.
- * The fallthrough clause below suppresses compiler warnings.
- */
- [[fallthrough]];
-
- case State::Idle:
- tryRunPipeline();
- break;
- }
-}
-
-void RPiCameraData::checkRequestCompleted()
-{
- bool requestCompleted = false;
- /*
- * If we are dropping this frame, do not touch the request, simply
- * change the state to IDLE when ready.
- */
- if (!dropFrameCount_) {
- Request *request = requestQueue_.front();
- if (request->hasPendingBuffers())
- return;
-
- /* Must wait for metadata to be filled in before completing. */
- if (state_ != State::IpaComplete)
- return;
-
- pipe()->completeRequest(request);
- requestQueue_.pop_front();
- requestCompleted = true;
- }
-
- /*
- * Make sure we have three outputs completed in the case of a dropped
- * frame.
- */
- if (state_ == State::IpaComplete &&
- ((ispOutputCount_ == 3 && dropFrameCount_) || requestCompleted)) {
- state_ = State::Idle;
- if (dropFrameCount_) {
- dropFrameCount_--;
- LOG(RPI, Debug) << "Dropping frame at the request of the IPA ("
- << dropFrameCount_ << " left)";
- }
- }
-}
-
-Rectangle RPiCameraData::scaleIspCrop(const Rectangle &ispCrop) const
-{
- /*
- * Scale a crop rectangle defined in the ISP's coordinates into native sensor
- * coordinates.
- */
- Rectangle nativeCrop = ispCrop.scaledBy(sensorInfo_.analogCrop.size(),
- sensorInfo_.outputSize);
- nativeCrop.translateBy(sensorInfo_.analogCrop.topLeft());
- return nativeCrop;
-}
-
-void RPiCameraData::applyScalerCrop(const ControlList &controls)
-{
- if (controls.contains(controls::ScalerCrop)) {
- Rectangle nativeCrop = controls.get<Rectangle>(controls::ScalerCrop);
-
- if (!nativeCrop.width || !nativeCrop.height)
- nativeCrop = { 0, 0, 1, 1 };
-
- /* Create a version of the crop scaled to ISP (camera mode) pixels. */
- Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo_.analogCrop.topLeft());
- ispCrop.scaleBy(sensorInfo_.outputSize, sensorInfo_.analogCrop.size());
-
- /*
- * The crop that we set must be:
- * 1. At least as big as ispMinCropSize_, once that's been
- * enlarged to the same aspect ratio.
- * 2. With the same mid-point, if possible.
- * 3. But it can't go outside the sensor area.
- */
- Size minSize = ispMinCropSize_.expandedToAspectRatio(nativeCrop.size());
- Size size = ispCrop.size().expandedTo(minSize);
- ispCrop = size.centeredTo(ispCrop.center()).enclosedIn(Rectangle(sensorInfo_.outputSize));
-
- if (ispCrop != ispCrop_) {
- isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &ispCrop);
- ispCrop_ = ispCrop;
-
- /*
- * Also update the ScalerCrop in the metadata with what we actually
- * used. But we must first rescale that from ISP (camera mode) pixels
- * back into sensor native pixels.
- */
- scalerCrop_ = scaleIspCrop(ispCrop_);
- }
- }
-}
-
-void RPiCameraData::fillRequestMetadata(const ControlList &bufferControls,
- Request *request)
-{
- request->metadata().set(controls::SensorTimestamp,
- bufferControls.get(controls::SensorTimestamp));
-
- request->metadata().set(controls::ScalerCrop, scalerCrop_);
-}
-
-void RPiCameraData::tryRunPipeline()
-{
- FrameBuffer *embeddedBuffer;
- BayerFrame bayerFrame;
-
- /* If any of our request or buffer queues are empty, we cannot proceed. */
- if (state_ != State::Idle || requestQueue_.empty() ||
- bayerQueue_.empty() || (embeddedQueue_.empty() && sensorMetadata_))
- return;
-
- if (!findMatchingBuffers(bayerFrame, embeddedBuffer))
- return;
-
- /* Take the first request from the queue and action the IPA. */
- Request *request = requestQueue_.front();
-
- /* See if a new ScalerCrop value needs to be applied. */
- applyScalerCrop(request->controls());
-
- /*
- * Clear the request metadata and fill it with some initial non-IPA
- * related controls. We clear it first because the request metadata
- * may have been populated if we have dropped the previous frame.
- */
- request->metadata().clear();
- fillRequestMetadata(bayerFrame.controls, request);
-
- /*
- * Process all the user controls by the IPA. Once this is complete, we
- * queue the ISP output buffer listed in the request to start the HW
- * pipeline.
- */
- ipa_->signalQueueRequest(request->controls());
-
- /* Set our state to say the pipeline is active. */
- state_ = State::Busy;
-
- unsigned int bayerId = unicam_[Unicam::Image].getBufferId(bayerFrame.buffer);
-
- LOG(RPI, Debug) << "Signalling signalIspPrepare:"
- << " Bayer buffer id: " << bayerId;
-
- ipa::RPi::ISPConfig ispPrepare;
- ispPrepare.bayerBufferId = ipa::RPi::MaskBayerData | bayerId;
- ispPrepare.controls = std::move(bayerFrame.controls);
-
- if (embeddedBuffer) {
- unsigned int embeddedId = unicam_[Unicam::Embedded].getBufferId(embeddedBuffer);
-
- ispPrepare.embeddedBufferId = ipa::RPi::MaskEmbeddedData | embeddedId;
- ispPrepare.embeddedBufferPresent = true;
-
- LOG(RPI, Debug) << "Signalling signalIspPrepare:"
- << " Embedded buffer id: " << embeddedId;
- }
-
- ipa_->signalIspPrepare(ispPrepare);
-}
-
-bool RPiCameraData::findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer)
-{
- if (bayerQueue_.empty())
- return false;
-
- /* Start with the front of the bayer queue. */
- bayerFrame = std::move(bayerQueue_.front());
- bayerQueue_.pop();
-
- /*
- * Find the embedded data buffer with a matching timestamp to pass to
- * the IPA. Any embedded buffers with a timestamp lower than the
- * current bayer buffer will be removed and re-queued to the driver.
- */
- uint64_t ts = bayerFrame.buffer->metadata().timestamp;
- embeddedBuffer = nullptr;
- while (!embeddedQueue_.empty()) {
- FrameBuffer *b = embeddedQueue_.front();
- if (b->metadata().timestamp < ts) {
- embeddedQueue_.pop();
- unicam_[Unicam::Embedded].returnBuffer(b);
- LOG(RPI, Debug) << "Dropping unmatched input frame in stream "
- << unicam_[Unicam::Embedded].name();
- } else if (b->metadata().timestamp == ts) {
- /* Found a match! */
- embeddedBuffer = b;
- embeddedQueue_.pop();
- break;
- } else {
- break; /* Only higher timestamps from here. */
- }
- }
-
- if (!embeddedBuffer && sensorMetadata_) {
- /* Log if there is no matching embedded data buffer found. */
- LOG(RPI, Debug) << "Returning bayer frame without a matching embedded buffer.";
- }
-
- return true;
-}
-
-REGISTER_PIPELINE_HANDLER(PipelineHandlerRPi)
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/rpi_stream.h b/src/libcamera/pipeline/raspberrypi/rpi_stream.h
deleted file mode 100644
index fe011100..00000000
--- a/src/libcamera/pipeline/raspberrypi/rpi_stream.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
- *
- * rpi_stream.h - Raspberry Pi device stream abstraction class.
- */
-
-#pragma once
-
-#include <queue>
-#include <string>
-#include <unordered_map>
-#include <vector>
-
-#include <libcamera/ipa/raspberrypi_ipa_interface.h>
-#include <libcamera/stream.h>
-
-#include "libcamera/internal/v4l2_videodevice.h"
-
-namespace libcamera {
-
-namespace RPi {
-
-using BufferMap = std::unordered_map<unsigned int, FrameBuffer *>;
-
-/*
- * Device stream abstraction for either an internal or external stream.
- * Used for both Unicam and the ISP.
- */
-class Stream : public libcamera::Stream
-{
-public:
- Stream()
- : id_(ipa::RPi::MaskID)
- {
- }
-
- Stream(const char *name, MediaEntity *dev, bool importOnly = false)
- : external_(false), importOnly_(importOnly), name_(name),
- dev_(std::make_unique<V4L2VideoDevice>(dev)), id_(ipa::RPi::MaskID)
- {
- }
-
- V4L2VideoDevice *dev() const;
- std::string name() const;
- bool isImporter() const;
- void resetBuffers();
-
- void setExternal(bool external);
- bool isExternal() const;
-
- void setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers);
- const BufferMap &getBuffers() const;
- int getBufferId(FrameBuffer *buffer) const;
-
- void setExternalBuffer(FrameBuffer *buffer);
- void removeExternalBuffer(FrameBuffer *buffer);
-
- int prepareBuffers(unsigned int count);
- int queueBuffer(FrameBuffer *buffer);
- void returnBuffer(FrameBuffer *buffer);
-
- int queueAllBuffers();
- void releaseBuffers();
-
-private:
- class IdGenerator
- {
- public:
- IdGenerator(int max)
- : max_(max), id_(0)
- {
- }
-
- int get()
- {
- int id;
- if (!recycle_.empty()) {
- id = recycle_.front();
- recycle_.pop();
- } else {
- id = id_++;
- ASSERT(id_ <= max_);
- }
- return id;
- }
-
- void release(int id)
- {
- recycle_.push(id);
- }
-
- void reset()
- {
- id_ = 0;
- recycle_ = {};
- }
-
- private:
- int max_;
- int id_;
- std::queue<int> recycle_;
- };
-
- void clearBuffers();
- int queueToDevice(FrameBuffer *buffer);
-
- /*
- * Indicates that this stream is active externally, i.e. the buffers
- * might be provided by (and returned to) the application.
- */
- bool external_;
-
- /* Indicates that this stream only imports buffers, e.g. ISP input. */
- bool importOnly_;
-
- /* Stream name identifier. */
- std::string name_;
-
- /* The actual device stream. */
- std::unique_ptr<V4L2VideoDevice> dev_;
-
- /* Tracks a unique id key for the bufferMap_ */
- IdGenerator id_;
-
- /* All frame buffers associated with this device stream. */
- BufferMap bufferMap_;
-
- /*
- * List of frame buffers that we can use if none have been provided by
- * the application for external streams. This is populated by the
- * buffers exported internally.
- */
- std::queue<FrameBuffer *> availableBuffers_;
-
- /*
- * List of frame buffers that are to be queued into the device from a Request.
- * A nullptr indicates any internal buffer can be used (from availableBuffers_),
- * whereas a valid pointer indicates an external buffer to be queued.
- *
- * Ordering buffers to be queued is important here as it must match the
- * requests coming from the application.
- */
- std::queue<FrameBuffer *> requestBuffers_;
-
- /*
- * This is a list of buffers exported internally. Need to keep this around
- * as the stream needs to maintain ownership of these buffers.
- */
- std::vector<std::unique_ptr<FrameBuffer>> internalBuffers_;
-};
-
-/*
- * The following class is just a convenient (and typesafe) array of device
- * streams indexed with an enum class.
- */
-template<typename E, std::size_t N>
-class Device : public std::array<class Stream, N>
-{
-private:
- constexpr auto index(E e) const noexcept
- {
- return static_cast<std::underlying_type_t<E>>(e);
- }
-public:
- Stream &operator[](E e)
- {
- return std::array<class Stream, N>::operator[](index(e));
- }
- const Stream &operator[](E e) const
- {
- return std::array<class Stream, N>::operator[](index(e));
- }
-};
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
index 3dc0850c..4cbf105d 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * rkisp1.cpp - Pipeline handler for Rockchip ISP1
+ * Pipeline handler for Rockchip ISP1
*/
#include <algorithm>
@@ -13,24 +13,29 @@
#include <queue>
#include <linux/media-bus-format.h>
+#include <linux/rkisp1-config.h>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
+#include <libcamera/color_space.h>
#include <libcamera/control_ids.h>
#include <libcamera/formats.h>
#include <libcamera/framebuffer.h>
+#include <libcamera/request.h>
+#include <libcamera/stream.h>
+#include <libcamera/transform.h>
+
#include <libcamera/ipa/core_ipa_interface.h>
#include <libcamera/ipa/rkisp1_ipa_interface.h>
#include <libcamera/ipa/rkisp1_ipa_proxy.h>
-#include <libcamera/request.h>
-#include <libcamera/stream.h>
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/delayed_controls.h"
#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
@@ -64,7 +69,8 @@ class RkISP1Frames
public:
RkISP1Frames(PipelineHandler *pipe);
- RkISP1FrameInfo *create(const RkISP1CameraData *data, Request *request);
+ RkISP1FrameInfo *create(const RkISP1CameraData *data, Request *request,
+ bool isRaw);
int destroy(unsigned int frame);
void clear();
@@ -119,6 +125,7 @@ public:
Status validate() override;
const V4L2SubdeviceFormat &sensorFormat() { return sensorFormat_; }
+ const Transform &combinedTransform() { return combinedTransform_; }
private:
bool fitsAllPaths(const StreamConfiguration &cfg);
@@ -132,6 +139,7 @@ private:
const RkISP1CameraData *data_;
V4L2SubdeviceFormat sensorFormat_;
+ Transform combinedTransform_;
};
class PipelineHandlerRkISP1 : public PipelineHandler
@@ -139,8 +147,8 @@ class PipelineHandlerRkISP1 : public PipelineHandler
public:
PipelineHandlerRkISP1(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -154,6 +162,8 @@ public:
bool match(DeviceEnumerator *enumerator) override;
private:
+ static constexpr Size kRkISP1PreviewSize = { 1920, 1080 };
+
RkISP1CameraData *cameraData(Camera *camera)
{
return static_cast<RkISP1CameraData *>(camera->_d());
@@ -165,7 +175,7 @@ private:
int initLinks(Camera *camera, const CameraSensor *sensor,
const RkISP1CameraConfiguration &config);
int createCamera(MediaEntity *sensor);
- void tryCompleteRequest(Request *request);
+ void tryCompleteRequest(RkISP1FrameInfo *info);
void bufferReady(FrameBuffer *buffer);
void paramReady(FrameBuffer *buffer);
void statReady(FrameBuffer *buffer);
@@ -178,6 +188,10 @@ private:
std::unique_ptr<V4L2Subdevice> isp_;
std::unique_ptr<V4L2VideoDevice> param_;
std::unique_ptr<V4L2VideoDevice> stat_;
+ std::unique_ptr<V4L2Subdevice> csi_;
+
+ bool hasSelfPath_;
+ bool isRaw_;
RkISP1MainPath mainPath_;
RkISP1SelfPath selfPath_;
@@ -188,6 +202,8 @@ private:
std::queue<FrameBuffer *> availableStatBuffers_;
Camera *activeCamera_;
+
+ const MediaPad *ispSink_;
};
RkISP1Frames::RkISP1Frames(PipelineHandler *pipe)
@@ -195,28 +211,35 @@ RkISP1Frames::RkISP1Frames(PipelineHandler *pipe)
{
}
-RkISP1FrameInfo *RkISP1Frames::create(const RkISP1CameraData *data, Request *request)
+RkISP1FrameInfo *RkISP1Frames::create(const RkISP1CameraData *data, Request *request,
+ bool isRaw)
{
unsigned int frame = data->frame_;
- if (pipe_->availableParamBuffers_.empty()) {
- LOG(RkISP1, Error) << "Parameters buffer underrun";
- return nullptr;
- }
- FrameBuffer *paramBuffer = pipe_->availableParamBuffers_.front();
+ FrameBuffer *paramBuffer = nullptr;
+ FrameBuffer *statBuffer = nullptr;
- if (pipe_->availableStatBuffers_.empty()) {
- LOG(RkISP1, Error) << "Statisitc buffer underrun";
- return nullptr;
+ if (!isRaw) {
+ if (pipe_->availableParamBuffers_.empty()) {
+ LOG(RkISP1, Error) << "Parameters buffer underrun";
+ return nullptr;
+ }
+
+ if (pipe_->availableStatBuffers_.empty()) {
+ LOG(RkISP1, Error) << "Statistic buffer underrun";
+ return nullptr;
+ }
+
+ paramBuffer = pipe_->availableParamBuffers_.front();
+ pipe_->availableParamBuffers_.pop();
+
+ statBuffer = pipe_->availableStatBuffers_.front();
+ pipe_->availableStatBuffers_.pop();
}
- FrameBuffer *statBuffer = pipe_->availableStatBuffers_.front();
FrameBuffer *mainPathBuffer = request->findBuffer(&data->mainPathStream_);
FrameBuffer *selfPathBuffer = request->findBuffer(&data->selfPathStream_);
- pipe_->availableParamBuffers_.pop();
- pipe_->availableStatBuffers_.pop();
-
RkISP1FrameInfo *info = new RkISP1FrameInfo;
info->frame = frame;
@@ -323,7 +346,7 @@ int RkISP1CameraData::loadIPA(unsigned int hwRevision)
/*
* The API tuning file is made from the sensor name unless the
- * environment variable overrides it. If
+ * environment variable overrides it.
*/
std::string ipaTuningFile;
char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RKISP1_TUNING_FILE");
@@ -339,7 +362,15 @@ int RkISP1CameraData::loadIPA(unsigned int hwRevision)
ipaTuningFile = std::string(configFromEnv);
}
- int ret = ipa_->init({ ipaTuningFile, sensor_->model() }, hwRevision);
+ IPACameraSensorInfo sensorInfo{};
+ int ret = sensor_->sensorInfo(&sensorInfo);
+ if (ret) {
+ LOG(RkISP1, Error) << "Camera sensor information not available";
+ return ret;
+ }
+
+ ret = ipa_->init({ ipaTuningFile, sensor_->model() }, hwRevision,
+ sensorInfo, sensor_->controls(), &controlInfo_);
if (ret < 0) {
LOG(RkISP1, Error) << "IPA initialization failure";
return ret;
@@ -355,13 +386,15 @@ void RkISP1CameraData::paramFilled(unsigned int frame)
if (!info)
return;
+ info->paramBuffer->_d()->metadata().planes()[0].bytesused =
+ sizeof(struct rkisp1_params_cfg);
pipe->param_->queueBuffer(info->paramBuffer);
pipe->stat_->queueBuffer(info->statBuffer);
if (info->mainPathBuffer)
mainPath_->queueBuffer(info->mainPathBuffer);
- if (info->selfPathBuffer)
+ if (selfPath_ && info->selfPathBuffer)
selfPath_->queueBuffer(info->selfPathBuffer);
}
@@ -380,9 +413,33 @@ void RkISP1CameraData::metadataReady(unsigned int frame, const ControlList &meta
info->request->metadata().merge(metadata);
info->metadataProcessed = true;
- pipe()->tryCompleteRequest(info->request);
+ pipe()->tryCompleteRequest(info);
}
+/* -----------------------------------------------------------------------------
+ * Camera Configuration
+ */
+
+namespace {
+
+/* Keep in sync with the supported raw formats in rkisp1_path.cpp. */
+const std::map<PixelFormat, uint32_t> rawFormats = {
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+} /* namespace */
+
RkISP1CameraConfiguration::RkISP1CameraConfiguration(Camera *camera,
RkISP1CameraData *data)
: CameraConfiguration()
@@ -393,14 +450,15 @@ RkISP1CameraConfiguration::RkISP1CameraConfiguration(Camera *camera,
bool RkISP1CameraConfiguration::fitsAllPaths(const StreamConfiguration &cfg)
{
+ const CameraSensor *sensor = data_->sensor_.get();
StreamConfiguration config;
config = cfg;
- if (data_->mainPath_->validate(&config) != Valid)
+ if (data_->mainPath_->validate(sensor, &config) != Valid)
return false;
config = cfg;
- if (data_->selfPath_->validate(&config) != Valid)
+ if (data_->selfPath_ && data_->selfPath_->validate(sensor, &config) != Valid)
return false;
return true;
@@ -409,20 +467,38 @@ bool RkISP1CameraConfiguration::fitsAllPaths(const StreamConfiguration &cfg)
CameraConfiguration::Status RkISP1CameraConfiguration::validate()
{
const CameraSensor *sensor = data_->sensor_.get();
- Status status = Valid;
+ unsigned int pathCount = data_->selfPath_ ? 2 : 1;
+ Status status;
if (config_.empty())
return Invalid;
- if (transform != Transform::Identity) {
- transform = Transform::Identity;
+ status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
+
+ /* Cap the number of entries to the available streams. */
+ if (config_.size() > pathCount) {
+ config_.resize(pathCount);
status = Adjusted;
}
- /* Cap the number of entries to the available streams. */
- if (config_.size() > 2) {
- config_.resize(2);
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->sensor_->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
status = Adjusted;
+
+ /*
+ * Simultaneous capture of raw and processed streams isn't possible. If
+ * there is any raw stream, cap the number of streams to one.
+ */
+ if (config_.size() > 1) {
+ for (const auto &cfg : config_) {
+ if (PixelFormatInfo::info(cfg.pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW) {
+ config_.resize(1);
+ status = Adjusted;
+ break;
+ }
+ }
}
/*
@@ -438,14 +514,14 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
std::reverse(order.begin(), order.end());
bool mainPathAvailable = true;
- bool selfPathAvailable = true;
+ bool selfPathAvailable = data_->selfPath_;
for (unsigned int index : order) {
StreamConfiguration &cfg = config_[index];
/* Try to match stream without adjusting configuration. */
if (mainPathAvailable) {
StreamConfiguration tryCfg = cfg;
- if (data_->mainPath_->validate(&tryCfg) == Valid) {
+ if (data_->mainPath_->validate(sensor, &tryCfg) == Valid) {
mainPathAvailable = false;
cfg = tryCfg;
cfg.setStream(const_cast<Stream *>(&data_->mainPathStream_));
@@ -455,7 +531,7 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
if (selfPathAvailable) {
StreamConfiguration tryCfg = cfg;
- if (data_->selfPath_->validate(&tryCfg) == Valid) {
+ if (data_->selfPath_->validate(sensor, &tryCfg) == Valid) {
selfPathAvailable = false;
cfg = tryCfg;
cfg.setStream(const_cast<Stream *>(&data_->selfPathStream_));
@@ -466,7 +542,7 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
/* Try to match stream allowing adjusting configuration. */
if (mainPathAvailable) {
StreamConfiguration tryCfg = cfg;
- if (data_->mainPath_->validate(&tryCfg) == Adjusted) {
+ if (data_->mainPath_->validate(sensor, &tryCfg) == Adjusted) {
mainPathAvailable = false;
cfg = tryCfg;
cfg.setStream(const_cast<Stream *>(&data_->mainPathStream_));
@@ -477,7 +553,7 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
if (selfPathAvailable) {
StreamConfiguration tryCfg = cfg;
- if (data_->selfPath_->validate(&tryCfg) == Adjusted) {
+ if (data_->selfPath_->validate(sensor, &tryCfg) == Adjusted) {
selfPathAvailable = false;
cfg = tryCfg;
cfg.setStream(const_cast<Stream *>(&data_->selfPathStream_));
@@ -486,86 +562,147 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
}
}
- /* All paths rejected configuraiton. */
+ /* All paths rejected configuration. */
LOG(RkISP1, Debug) << "Camera configuration not supported "
<< cfg.toString();
return Invalid;
}
/* Select the sensor format. */
+ PixelFormat rawFormat;
Size maxSize;
- for (const StreamConfiguration &cfg : config_)
+
+ for (const StreamConfiguration &cfg : config_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ rawFormat = cfg.pixelFormat;
+
maxSize = std::max(maxSize, cfg.size);
+ }
+
+ std::vector<unsigned int> mbusCodes;
+
+ if (rawFormat.isValid()) {
+ mbusCodes = { rawFormats.at(rawFormat) };
+ } else {
+ std::transform(rawFormats.begin(), rawFormats.end(),
+ std::back_inserter(mbusCodes),
+ [](const auto &value) { return value.second; });
+ }
+
+ sensorFormat_ = sensor->getFormat(mbusCodes, maxSize);
- sensorFormat_ = sensor->getFormat({ MEDIA_BUS_FMT_SBGGR12_1X12,
- MEDIA_BUS_FMT_SGBRG12_1X12,
- MEDIA_BUS_FMT_SGRBG12_1X12,
- MEDIA_BUS_FMT_SRGGB12_1X12,
- MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10,
- MEDIA_BUS_FMT_SBGGR8_1X8,
- MEDIA_BUS_FMT_SGBRG8_1X8,
- MEDIA_BUS_FMT_SGRBG8_1X8,
- MEDIA_BUS_FMT_SRGGB8_1X8 },
- maxSize);
if (sensorFormat_.size.isNull())
sensorFormat_.size = sensor->resolution();
return status;
}
-PipelineHandlerRkISP1::PipelineHandlerRkISP1(CameraManager *manager)
- : PipelineHandler(manager)
-{
-}
-
/* -----------------------------------------------------------------------------
* Pipeline Operations
*/
-CameraConfiguration *PipelineHandlerRkISP1::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+PipelineHandlerRkISP1::PipelineHandlerRkISP1(CameraManager *manager)
+ : PipelineHandler(manager), hasSelfPath_(true)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerRkISP1::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
RkISP1CameraData *data = cameraData(camera);
- CameraConfiguration *config = new RkISP1CameraConfiguration(camera, data);
+
+ unsigned int pathCount = data->selfPath_ ? 2 : 1;
+ if (roles.size() > pathCount) {
+ LOG(RkISP1, Error) << "Too many stream roles requested";
+ return nullptr;
+ }
+
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<RkISP1CameraConfiguration>(camera, data);
if (roles.empty())
return config;
+ /*
+ * As the ISP can't output different color spaces for the main and self
+ * path, pick a sensible default color space based on the role of the
+ * first stream and use it for all streams.
+ */
+ std::optional<ColorSpace> colorSpace;
bool mainPathAvailable = true;
- bool selfPathAvailable = true;
+
for (const StreamRole role : roles) {
- bool useMainPath;
+ Size size;
switch (role) {
- case StreamRole::StillCapture: {
- useMainPath = mainPathAvailable;
+ case StreamRole::StillCapture:
+ /* JPEG encoders typically expect sYCC. */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Sycc;
+
+ size = data->sensor_->resolution();
break;
- }
+
case StreamRole::Viewfinder:
- case StreamRole::VideoRecording: {
- useMainPath = !selfPathAvailable;
+ /*
+ * sYCC is the YCbCr encoding of sRGB, which is commonly
+ * used by displays.
+ */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Sycc;
+
+ size = kRkISP1PreviewSize;
break;
- }
+
+ case StreamRole::VideoRecording:
+ /* Rec. 709 is a good default for HD video recording. */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Rec709;
+
+ size = kRkISP1PreviewSize;
+ break;
+
+ case StreamRole::Raw:
+ if (roles.size() > 1) {
+ LOG(RkISP1, Error)
+ << "Can't capture both raw and processed streams";
+ return nullptr;
+ }
+
+ colorSpace = ColorSpace::Raw;
+ size = data->sensor_->resolution();
+ break;
+
default:
LOG(RkISP1, Warning)
<< "Requested stream role not supported: " << role;
- delete config;
return nullptr;
}
- StreamConfiguration cfg;
- if (useMainPath) {
- cfg = data->mainPath_->generateConfiguration(
- data->sensor_->resolution());
+ /*
+ * Prefer the main path if available, as it supports higher
+ * resolutions.
+ *
+ * \todo Using the main path unconditionally hides support for
+ * RGB (only available on the self path) in the streams formats
+ * exposed to applications. This likely calls for a better API
+ * to expose streams capabilities.
+ */
+ RkISP1Path *path;
+ if (mainPathAvailable) {
+ path = data->mainPath_;
mainPathAvailable = false;
} else {
- cfg = data->selfPath_->generateConfiguration(
- data->sensor_->resolution());
- selfPathAvailable = false;
+ path = data->selfPath_;
}
+ StreamConfiguration cfg =
+ path->generateConfiguration(data->sensor_.get(), size, role);
+ if (!cfg.pixelFormat.isValid())
+ return nullptr;
+
+ cfg.colorSpace = colorSpace;
config->addConfiguration(cfg);
}
@@ -593,12 +730,18 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
V4L2SubdeviceFormat format = config->sensorFormat();
LOG(RkISP1, Debug) << "Configuring sensor with " << format;
- ret = sensor->setFormat(&format);
+ ret = sensor->setFormat(&format, config->combinedTransform());
if (ret < 0)
return ret;
LOG(RkISP1, Debug) << "Sensor configured with " << format;
+ if (csi_) {
+ ret = csi_->setFormat(0, &format);
+ if (ret < 0)
+ return ret;
+ }
+
ret = isp_->setFormat(0, &format);
if (ret < 0)
return ret;
@@ -612,8 +755,14 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
<< "ISP input pad configured with " << format
<< " crop " << rect;
+ const PixelFormat &streamFormat = config->at(0).pixelFormat;
+ const PixelFormatInfo &info = PixelFormatInfo::info(streamFormat);
+ isRaw_ = info.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
+
/* YUYV8_2X8 is required on the ISP source path pad for YUV output. */
- format.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
+ if (!isRaw_)
+ format.code = MEDIA_BUS_FMT_YUYV8_2X8;
+
LOG(RkISP1, Debug)
<< "Configuring ISP output pad with " << format
<< " crop " << rect;
@@ -622,6 +771,7 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
if (ret < 0)
return ret;
+ format.colorSpace = config->at(0).colorSpace;
ret = isp_->setFormat(2, &format);
if (ret < 0)
return ret;
@@ -637,10 +787,12 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
ret = mainPath_.configure(cfg, format);
streamConfig[0] = IPAStream(cfg.pixelFormat,
cfg.size);
- } else {
+ } else if (hasSelfPath_) {
ret = selfPath_.configure(cfg, format);
streamConfig[1] = IPAStream(cfg.pixelFormat,
cfg.size);
+ } else {
+ return -ENODEV;
}
if (ret)
@@ -660,19 +812,15 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
return ret;
/* Inform IPA of stream configuration and sensor controls. */
- IPACameraSensorInfo sensorInfo = {};
- ret = data->sensor_->sensorInfo(&sensorInfo);
- if (ret) {
- /* \todo Turn this into a hard failure. */
- LOG(RkISP1, Warning) << "Camera sensor information not available";
- sensorInfo = {};
- ret = 0;
- }
+ ipa::rkisp1::IPAConfigInfo ipaConfig{};
+
+ ret = data->sensor_->sensorInfo(&ipaConfig.sensorInfo);
+ if (ret)
+ return ret;
- std::map<uint32_t, ControlInfoMap> entityControls;
- entityControls.emplace(0, data->sensor_->controls());
+ ipaConfig.sensorControls = data->sensor_->controls();
- ret = data->ipa_->configure(sensorInfo, streamConfig, entityControls);
+ ret = data->ipa_->configure(ipaConfig, streamConfig, &data->controlInfo_);
if (ret) {
LOG(RkISP1, Error) << "failed configuring IPA (" << ret << ")";
return ret;
@@ -688,7 +836,7 @@ int PipelineHandlerRkISP1::exportFrameBuffers([[maybe_unused]] Camera *camera, S
if (stream == &data->mainPathStream_)
return mainPath_.exportBuffers(count, buffers);
- else if (stream == &data->selfPathStream_)
+ else if (hasSelfPath_ && stream == &data->selfPathStream_)
return selfPath_.exportBuffers(count, buffers);
return -EINVAL;
@@ -705,13 +853,15 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
data->selfPathStream_.configuration().bufferCount,
});
- ret = param_->allocateBuffers(maxCount, &paramBuffers_);
- if (ret < 0)
- goto error;
+ if (!isRaw_) {
+ ret = param_->allocateBuffers(maxCount, &paramBuffers_);
+ if (ret < 0)
+ goto error;
- ret = stat_->allocateBuffers(maxCount, &statBuffers_);
- if (ret < 0)
- goto error;
+ ret = stat_->allocateBuffers(maxCount, &statBuffers_);
+ if (ret < 0)
+ goto error;
+ }
for (std::unique_ptr<FrameBuffer> &buffer : paramBuffers_) {
buffer->setCookie(ipaBufferId++);
@@ -787,23 +937,25 @@ int PipelineHandlerRkISP1::start(Camera *camera, [[maybe_unused]] const ControlL
data->frame_ = 0;
- ret = param_->streamOn();
- if (ret) {
- data->ipa_->stop();
- freeBuffers(camera);
- LOG(RkISP1, Error)
- << "Failed to start parameters " << camera->id();
- return ret;
- }
+ if (!isRaw_) {
+ ret = param_->streamOn();
+ if (ret) {
+ data->ipa_->stop();
+ freeBuffers(camera);
+ LOG(RkISP1, Error)
+ << "Failed to start parameters " << camera->id();
+ return ret;
+ }
- ret = stat_->streamOn();
- if (ret) {
- param_->streamOff();
- data->ipa_->stop();
- freeBuffers(camera);
- LOG(RkISP1, Error)
- << "Failed to start statistics " << camera->id();
- return ret;
+ ret = stat_->streamOn();
+ if (ret) {
+ param_->streamOff();
+ data->ipa_->stop();
+ freeBuffers(camera);
+ LOG(RkISP1, Error)
+ << "Failed to start statistics " << camera->id();
+ return ret;
+ }
}
if (data->mainPath_->isEnabled()) {
@@ -817,7 +969,7 @@ int PipelineHandlerRkISP1::start(Camera *camera, [[maybe_unused]] const ControlL
}
}
- if (data->selfPath_->isEnabled()) {
+ if (hasSelfPath_ && data->selfPath_->isEnabled()) {
ret = selfPath_.start();
if (ret) {
mainPath_.stop();
@@ -844,18 +996,21 @@ void PipelineHandlerRkISP1::stopDevice(Camera *camera)
data->ipa_->stop();
- selfPath_.stop();
+ if (hasSelfPath_)
+ selfPath_.stop();
mainPath_.stop();
- ret = stat_->streamOff();
- if (ret)
- LOG(RkISP1, Warning)
- << "Failed to stop statistics for " << camera->id();
+ if (!isRaw_) {
+ ret = stat_->streamOff();
+ if (ret)
+ LOG(RkISP1, Warning)
+ << "Failed to stop statistics for " << camera->id();
- ret = param_->streamOff();
- if (ret)
- LOG(RkISP1, Warning)
- << "Failed to stop parameters for " << camera->id();
+ ret = param_->streamOff();
+ if (ret)
+ LOG(RkISP1, Warning)
+ << "Failed to stop parameters for " << camera->id();
+ }
ASSERT(data->queuedRequests_.empty());
data->frameInfo_.clear();
@@ -869,12 +1024,21 @@ int PipelineHandlerRkISP1::queueRequestDevice(Camera *camera, Request *request)
{
RkISP1CameraData *data = cameraData(camera);
- RkISP1FrameInfo *info = data->frameInfo_.create(data, request);
+ RkISP1FrameInfo *info = data->frameInfo_.create(data, request, isRaw_);
if (!info)
return -ENOENT;
data->ipa_->queueRequest(data->frame_, request->controls());
- data->ipa_->fillParamsBuffer(data->frame_, info->paramBuffer->cookie());
+ if (isRaw_) {
+ if (info->mainPathBuffer)
+ data->mainPath_->queueBuffer(info->mainPathBuffer);
+
+ if (data->selfPath_ && info->selfPathBuffer)
+ data->selfPath_->queueBuffer(info->selfPathBuffer);
+ } else {
+ data->ipa_->fillParamsBuffer(data->frame_,
+ info->paramBuffer->cookie());
+ }
data->frame_++;
@@ -900,8 +1064,7 @@ int PipelineHandlerRkISP1::initLinks(Camera *camera,
* Configure the sensor links: enable the link corresponding to this
* camera.
*/
- const MediaPad *pad = isp_->entity()->getPadByIndex(0);
- for (MediaLink *link : pad->links()) {
+ for (MediaLink *link : ispSink_->links()) {
if (link->source()->entity() != sensor->entity())
continue;
@@ -915,10 +1078,18 @@ int PipelineHandlerRkISP1::initLinks(Camera *camera,
return ret;
}
+ if (csi_) {
+ MediaLink *link = isp_->entity()->getPadByIndex(0)->links().at(0);
+
+ ret = link->setEnabled(true);
+ if (ret < 0)
+ return ret;
+ }
+
for (const StreamConfiguration &cfg : config) {
if (cfg.stream() == &data->mainPathStream_)
ret = data->mainPath_->setEnabled(true);
- else if (cfg.stream() == &data->selfPathStream_)
+ else if (hasSelfPath_ && cfg.stream() == &data->selfPathStream_)
ret = data->selfPath_->setEnabled(true);
else
return -EINVAL;
@@ -935,15 +1106,8 @@ int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor)
int ret;
std::unique_ptr<RkISP1CameraData> data =
- std::make_unique<RkISP1CameraData>(this, &mainPath_, &selfPath_);
-
- ControlInfoMap::Map ctrls;
- ctrls.emplace(std::piecewise_construct,
- std::forward_as_tuple(&controls::AeEnable),
- std::forward_as_tuple(false, true));
-
- data->controlInfo_ = ControlInfoMap(std::move(ctrls),
- controls::controls);
+ std::make_unique<RkISP1CameraData>(this, &mainPath_,
+ hasSelfPath_ ? &selfPath_ : nullptr);
data->sensor_ = std::make_unique<CameraSensor>(sensor);
ret = data->sensor_->init();
@@ -991,9 +1155,7 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
DeviceMatch dm("rkisp1");
dm.add("rkisp1_isp");
- dm.add("rkisp1_resizer_selfpath");
dm.add("rkisp1_resizer_mainpath");
- dm.add("rkisp1_selfpath");
dm.add("rkisp1_mainpath");
dm.add("rkisp1_stats");
dm.add("rkisp1_params");
@@ -1008,11 +1170,29 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
return false;
}
+ hasSelfPath_ = !!media_->getEntityByName("rkisp1_selfpath");
+
/* Create the V4L2 subdevices we will need. */
isp_ = V4L2Subdevice::fromEntityName(media_, "rkisp1_isp");
if (isp_->open() < 0)
return false;
+ /* Locate and open the optional CSI-2 receiver. */
+ ispSink_ = isp_->entity()->getPadByIndex(0);
+ if (!ispSink_ || ispSink_->links().empty())
+ return false;
+
+ pad = ispSink_->links().at(0)->source();
+ if (pad->entity()->function() == MEDIA_ENT_F_VID_IF_BRIDGE) {
+ csi_ = std::make_unique<V4L2Subdevice>(pad->entity());
+ if (csi_->open() < 0)
+ return false;
+
+ ispSink_ = csi_->entity()->getPadByIndex(0);
+ if (!ispSink_)
+ return false;
+ }
+
/* Locate and open the stats and params video nodes. */
stat_ = V4L2VideoDevice::fromEntityName(media_, "rkisp1_stats");
if (stat_->open() < 0)
@@ -1026,11 +1206,12 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
if (!mainPath_.init(media_))
return false;
- if (!selfPath_.init(media_))
+ if (hasSelfPath_ && !selfPath_.init(media_))
return false;
mainPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady);
- selfPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady);
+ if (hasSelfPath_)
+ selfPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady);
stat_->bufferReady.connect(this, &PipelineHandlerRkISP1::statReady);
param_->bufferReady.connect(this, &PipelineHandlerRkISP1::paramReady);
@@ -1038,12 +1219,8 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
* Enumerate all sensors connected to the ISP and create one
* camera instance for each of them.
*/
- pad = isp_->entity()->getPadByIndex(0);
- if (!pad)
- return false;
-
bool registered = false;
- for (MediaLink *link : pad->links()) {
+ for (MediaLink *link : ispSink_->links()) {
if (!createCamera(link->source()->entity()))
registered = true;
}
@@ -1055,12 +1232,10 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
* Buffer Handling
*/
-void PipelineHandlerRkISP1::tryCompleteRequest(Request *request)
+void PipelineHandlerRkISP1::tryCompleteRequest(RkISP1FrameInfo *info)
{
RkISP1CameraData *data = cameraData(activeCamera_);
- RkISP1FrameInfo *info = data->frameInfo_.find(request);
- if (!info)
- return;
+ Request *request = info->request;
if (request->hasPendingBuffers())
return;
@@ -1068,7 +1243,7 @@ void PipelineHandlerRkISP1::tryCompleteRequest(Request *request)
if (!info->metadataProcessed)
return;
- if (!info->paramDequeued)
+ if (!isRaw_ && !info->paramDequeued)
return;
data->frameInfo_.destroy(info->frame);
@@ -1078,19 +1253,38 @@ void PipelineHandlerRkISP1::tryCompleteRequest(Request *request)
void PipelineHandlerRkISP1::bufferReady(FrameBuffer *buffer)
{
+ ASSERT(activeCamera_);
+ RkISP1CameraData *data = cameraData(activeCamera_);
+
+ RkISP1FrameInfo *info = data->frameInfo_.find(buffer);
+ if (!info)
+ return;
+
+ const FrameMetadata &metadata = buffer->metadata();
Request *request = buffer->request();
- /*
- * Record the sensor's timestamp in the request metadata.
- *
- * \todo The sensor timestamp should be better estimated by connecting
- * to the V4L2Device::frameStart signal.
- */
- request->metadata().set(controls::SensorTimestamp,
- buffer->metadata().timestamp);
+ if (metadata.status != FrameMetadata::FrameCancelled) {
+ /*
+ * Record the sensor's timestamp in the request metadata.
+ *
+ * \todo The sensor timestamp should be better estimated by connecting
+ * to the V4L2Device::frameStart signal.
+ */
+ request->metadata().set(controls::SensorTimestamp,
+ metadata.timestamp);
+
+ if (isRaw_) {
+ const ControlList &ctrls =
+ data->delayedCtrls_->get(metadata.sequence);
+ data->ipa_->processStatsBuffer(info->frame, 0, ctrls);
+ }
+ } else {
+ if (isRaw_)
+ info->metadataProcessed = true;
+ }
completeBuffer(request, buffer);
- tryCompleteRequest(request);
+ tryCompleteRequest(info);
}
void PipelineHandlerRkISP1::paramReady(FrameBuffer *buffer)
@@ -1103,7 +1297,7 @@ void PipelineHandlerRkISP1::paramReady(FrameBuffer *buffer)
return;
info->paramDequeued = true;
- tryCompleteRequest(info->request);
+ tryCompleteRequest(info);
}
void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
@@ -1117,7 +1311,7 @@ void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
info->metadataProcessed = true;
- tryCompleteRequest(info->request);
+ tryCompleteRequest(info);
return;
}
@@ -1128,6 +1322,6 @@ void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
data->delayedCtrls_->get(buffer->metadata().sequence));
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerRkISP1)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerRkISP1, "rkisp1")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
index 6f175758..c49017d1 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * rkisp1path.cpp - Rockchip ISP1 path helper
+ * Rockchip ISP1 path helper
*/
#include "rkisp1_path.h"
@@ -12,6 +12,7 @@
#include <libcamera/formats.h>
#include <libcamera/stream.h>
+#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
@@ -20,6 +21,39 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(RkISP1)
+namespace {
+
+/* Keep in sync with the supported raw formats in rkisp1.cpp. */
+const std::map<PixelFormat, uint32_t> formatToMediaBus = {
+ { formats::UYVY, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YUYV, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::NV12, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::NV21, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::NV16, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::NV61, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YUV420, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::YVU420, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::YUV422, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YVU422, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::R8, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::RGB565, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::XRGB8888, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+} /* namespace */
+
RkISP1Path::RkISP1Path(const char *name, const Span<const PixelFormat> &formats,
const Size &minResolution, const Size &maxResolution)
: name_(name), running_(false), formats_(formats),
@@ -41,6 +75,8 @@ bool RkISP1Path::init(MediaDevice *media)
if (video_->open() < 0)
return false;
+ populateFormats();
+
link_ = media->link("rkisp1_isp", 2, resizer, 0);
if (!link_)
return false;
@@ -48,40 +84,227 @@ bool RkISP1Path::init(MediaDevice *media)
return true;
}
-StreamConfiguration RkISP1Path::generateConfiguration(const Size &resolution)
+void RkISP1Path::populateFormats()
{
+ V4L2VideoDevice::Formats v4l2Formats = video_->formats();
+ if (v4l2Formats.empty()) {
+ LOG(RkISP1, Info)
+ << "Failed to enumerate supported formats and sizes, using defaults";
+
+ for (const PixelFormat &format : formats_)
+ streamFormats_.insert(format);
+ return;
+ }
+
+ minResolution_ = { 65535, 65535 };
+ maxResolution_ = { 0, 0 };
+
+ std::vector<PixelFormat> formats;
+ for (const auto &[format, sizes] : v4l2Formats) {
+ const PixelFormat pixelFormat = format.toPixelFormat();
+
+ /*
+ * As a defensive measure, skip any pixel format exposed by the
+ * driver that we don't know about. This ensures that looking up
+ * formats in formatToMediaBus using a key from streamFormats_
+ * will never fail in any of the other functions.
+ */
+ if (!formatToMediaBus.count(pixelFormat)) {
+ LOG(RkISP1, Warning)
+ << "Unsupported pixel format " << pixelFormat;
+ continue;
+ }
+
+ streamFormats_.insert(pixelFormat);
+
+ for (const auto &size : sizes) {
+ if (minResolution_ > size.min)
+ minResolution_ = size.min;
+ if (maxResolution_ < size.max)
+ maxResolution_ = size.max;
+ }
+ }
+}
+
+StreamConfiguration
+RkISP1Path::generateConfiguration(const CameraSensor *sensor, const Size &size,
+ StreamRole role)
+{
+ const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes();
+ const Size &resolution = sensor->resolution();
+
+ /* Min and max resolutions to populate the available stream formats. */
Size maxResolution = maxResolution_.boundedToAspectRatio(resolution)
.boundedTo(resolution);
Size minResolution = minResolution_.expandedToAspectRatio(resolution);
+ /* The desired stream size, bound to the max resolution. */
+ Size streamSize = size.boundedTo(maxResolution);
+
+ /* Create the list of supported stream formats. */
std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
- for (const PixelFormat &format : formats_)
- streamFormats[format] = { { minResolution, maxResolution } };
+ unsigned int rawBitsPerPixel = 0;
+ PixelFormat rawFormat;
+
+ for (const auto &format : streamFormats_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ /* Populate stream formats for non-RAW configurations. */
+ if (info.colourEncoding != PixelFormatInfo::ColourEncodingRAW) {
+ if (role == StreamRole::Raw)
+ continue;
+
+ streamFormats[format] = { { minResolution, maxResolution } };
+ continue;
+ }
+
+ /* Skip RAW formats for non-raw roles. */
+ if (role != StreamRole::Raw)
+ continue;
+
+ /* Populate stream formats for RAW configurations. */
+ uint32_t mbusCode = formatToMediaBus.at(format);
+ if (std::find(mbusCodes.begin(), mbusCodes.end(), mbusCode) ==
+ mbusCodes.end())
+ /* Skip formats not supported by sensor. */
+ continue;
+
+ /* Add all the RAW sizes the sensor can produce for this code. */
+ for (const auto &rawSize : sensor->sizes(mbusCode)) {
+ if (rawSize.width > maxResolution_.width ||
+ rawSize.height > maxResolution_.height)
+ continue;
+
+ streamFormats[format].push_back({ rawSize, rawSize });
+ }
+
+ /*
+ * Store the raw format with the highest bits per pixel for
+ * later usage.
+ */
+ if (info.bitsPerPixel > rawBitsPerPixel) {
+ rawBitsPerPixel = info.bitsPerPixel;
+ rawFormat = format;
+ }
+ }
+
+ /*
+ * Pick a suitable pixel format for the role. Raw streams need to use a
+ * raw format, processed streams use NV12 by default.
+ */
+ PixelFormat format;
+
+ if (role == StreamRole::Raw) {
+ if (!rawFormat.isValid()) {
+ LOG(RkISP1, Error)
+ << "Sensor " << sensor->model()
+ << " doesn't support raw capture";
+ return {};
+ }
+
+ format = rawFormat;
+ } else {
+ format = formats::NV12;
+ }
StreamFormats formats(streamFormats);
StreamConfiguration cfg(formats);
- cfg.pixelFormat = formats::NV12;
- cfg.size = maxResolution;
+ cfg.pixelFormat = format;
+ cfg.size = streamSize;
cfg.bufferCount = RKISP1_BUFFER_COUNT;
return cfg;
}
-CameraConfiguration::Status RkISP1Path::validate(StreamConfiguration *cfg)
+CameraConfiguration::Status RkISP1Path::validate(const CameraSensor *sensor,
+ StreamConfiguration *cfg)
{
+ const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes();
+ const Size &resolution = sensor->resolution();
+
const StreamConfiguration reqCfg = *cfg;
CameraConfiguration::Status status = CameraConfiguration::Valid;
- if (std::find(formats_.begin(), formats_.end(), cfg->pixelFormat) ==
- formats_.end())
- cfg->pixelFormat = formats::NV12;
+ /*
+ * Validate the pixel format. If the requested format isn't supported,
+ * default to either NV12 (all versions of the ISP are guaranteed to
+ * support NV12 on both the main and self paths) if the requested format
+ * is not a raw format, or to the supported raw format with the highest
+ * bits per pixel otherwise.
+ */
+ unsigned int rawBitsPerPixel = 0;
+ PixelFormat rawFormat;
+ bool found = false;
+
+ for (const auto &format : streamFormats_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
+ /* Skip raw formats not supported by the sensor. */
+ uint32_t mbusCode = formatToMediaBus.at(format);
+ if (std::find(mbusCodes.begin(), mbusCodes.end(), mbusCode) ==
+ mbusCodes.end())
+ continue;
+
+ /*
+ * Store the raw format with the highest bits per pixel
+ * for later usage.
+ */
+ if (info.bitsPerPixel > rawBitsPerPixel) {
+ rawBitsPerPixel = info.bitsPerPixel;
+ rawFormat = format;
+ }
+ }
+
+ if (cfg->pixelFormat == format) {
+ found = true;
+ break;
+ }
+ }
+
+ bool isRaw = PixelFormatInfo::info(cfg->pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW;
+
+ /*
+ * If no raw format supported by the sensor has been found, use a
+ * processed format.
+ */
+ if (!rawFormat.isValid())
+ isRaw = false;
+
+ if (!found)
+ cfg->pixelFormat = isRaw ? rawFormat : formats::NV12;
+
+ Size minResolution;
+ Size maxResolution;
+
+ if (isRaw) {
+ /*
+ * Use the sensor output size closest to the requested stream
+ * size.
+ */
+ uint32_t mbusCode = formatToMediaBus.at(cfg->pixelFormat);
+ V4L2SubdeviceFormat sensorFormat =
+ sensor->getFormat({ mbusCode }, cfg->size);
+
+ minResolution = sensorFormat.size;
+ maxResolution = sensorFormat.size;
+ } else {
+ /*
+ * Adjust the size based on the sensor resolution and absolute
+ * limits of the ISP.
+ */
+ minResolution = minResolution_.expandedToAspectRatio(resolution);
+ maxResolution = maxResolution_.boundedToAspectRatio(resolution)
+ .boundedTo(resolution);
+ }
- cfg->size.boundTo(maxResolution_);
- cfg->size.expandTo(minResolution_);
+ cfg->size.boundTo(maxResolution);
+ cfg->size.expandTo(minResolution);
cfg->bufferCount = RKISP1_BUFFER_COUNT;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg->pixelFormat);
+ format.fourcc = video_->toV4L2PixelFormat(cfg->pixelFormat);
format.size = cfg->size;
int ret = video_->tryFormat(&format);
@@ -112,7 +335,18 @@ int RkISP1Path::configure(const StreamConfiguration &config,
if (ret < 0)
return ret;
- Rectangle rect(0, 0, ispFormat.size);
+ /*
+ * Crop on the resizer input to maintain FOV before downscaling.
+ *
+ * \todo The alignment to a multiple of 2 pixels is required but may
+ * change the aspect ratio very slightly. A more advanced algorithm to
+ * compute the resizer input crop rectangle is needed, and it should
+ * also take into account the need to crop away the edge pixels affected
+ * by the ISP processing blocks.
+ */
+ Size ispCrop = inputFormat.size.boundedToAspectRatio(config.size)
+ .alignedUpTo(2, 2);
+ Rectangle rect = ispCrop.centeredTo(Rectangle(inputFormat.size).center());
ret = resizer_->setSelection(0, V4L2_SEL_TGT_CROP, &rect);
if (ret < 0)
return ret;
@@ -127,15 +361,11 @@ int RkISP1Path::configure(const StreamConfiguration &config,
<< "Configuring " << name_ << " resizer output pad with "
<< ispFormat;
- switch (config.pixelFormat) {
- case formats::NV12:
- case formats::NV21:
- ispFormat.mbus_code = MEDIA_BUS_FMT_YUYV8_1_5X8;
- break;
- default:
- ispFormat.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
- break;
- }
+ /*
+ * The configuration has been validated, the pixel format is guaranteed
+ * to be supported and thus found in formatToMediaBus.
+ */
+ ispFormat.code = formatToMediaBus.at(config.pixelFormat);
ret = resizer_->setFormat(1, &ispFormat);
if (ret < 0)
@@ -147,7 +377,7 @@ int RkISP1Path::configure(const StreamConfiguration &config,
const PixelFormatInfo &info = PixelFormatInfo::info(config.pixelFormat);
V4L2DeviceFormat outputFormat;
- outputFormat.fourcc = V4L2PixelFormat::fromPixelFormat(config.pixelFormat);
+ outputFormat.fourcc = video_->toV4L2PixelFormat(config.pixelFormat);
outputFormat.size = config.size;
outputFormat.planesCount = info.numPlanes();
@@ -156,7 +386,7 @@ int RkISP1Path::configure(const StreamConfiguration &config,
return ret;
if (outputFormat.size != config.size ||
- outputFormat.fourcc != V4L2PixelFormat::fromPixelFormat(config.pixelFormat)) {
+ outputFormat.fourcc != video_->toV4L2PixelFormat(config.pixelFormat)) {
LOG(RkISP1, Error)
<< "Unable to configure capture in " << config.toString();
return -EINVAL;
@@ -204,17 +434,32 @@ void RkISP1Path::stop()
running_ = false;
}
+/*
+ * \todo Remove the hardcoded resolutions and formats once all users will have
+ * migrated to a recent enough kernel.
+ */
namespace {
constexpr Size RKISP1_RSZ_MP_SRC_MIN{ 32, 16 };
constexpr Size RKISP1_RSZ_MP_SRC_MAX{ 4416, 3312 };
-constexpr std::array<PixelFormat, 6> RKISP1_RSZ_MP_FORMATS{
+constexpr std::array<PixelFormat, 18> RKISP1_RSZ_MP_FORMATS{
formats::YUYV,
formats::NV16,
formats::NV61,
formats::NV21,
formats::NV12,
formats::R8,
- /* \todo Add support for RAW formats. */
+ formats::SBGGR8,
+ formats::SGBRG8,
+ formats::SGRBG8,
+ formats::SRGGB8,
+ formats::SBGGR10,
+ formats::SGBRG10,
+ formats::SGRBG10,
+ formats::SRGGB10,
+ formats::SBGGR12,
+ formats::SGBRG12,
+ formats::SGRBG12,
+ formats::SRGGB12,
};
constexpr Size RKISP1_RSZ_SP_SRC_MIN{ 32, 16 };
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
index f3f1ae39..08edefec 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * rkisp1path.h - Rockchip ISP1 path helper
+ * Rockchip ISP1 path helper
*/
#pragma once
#include <memory>
+#include <set>
#include <vector>
#include <libcamera/base/signal.h>
@@ -22,6 +23,7 @@
namespace libcamera {
+class CameraSensor;
class MediaDevice;
class V4L2Subdevice;
struct StreamConfiguration;
@@ -38,8 +40,11 @@ public:
int setEnabled(bool enable) { return link_->setEnabled(enable); }
bool isEnabled() const { return link_->flags() & MEDIA_LNK_FL_ENABLED; }
- StreamConfiguration generateConfiguration(const Size &resolution);
- CameraConfiguration::Status validate(StreamConfiguration *cfg);
+ StreamConfiguration generateConfiguration(const CameraSensor *sensor,
+ const Size &resolution,
+ StreamRole role);
+ CameraConfiguration::Status validate(const CameraSensor *sensor,
+ StreamConfiguration *cfg);
int configure(const StreamConfiguration &config,
const V4L2SubdeviceFormat &inputFormat);
@@ -57,14 +62,17 @@ public:
Signal<FrameBuffer *> &bufferReady() { return video_->bufferReady; }
private:
+ void populateFormats();
+
static constexpr unsigned int RKISP1_BUFFER_COUNT = 4;
const char *name_;
bool running_;
const Span<const PixelFormat> formats_;
- const Size minResolution_;
- const Size maxResolution_;
+ std::set<PixelFormat> streamFormats_;
+ Size minResolution_;
+ Size maxResolution_;
std::unique_ptr<V4L2Subdevice> resizer_;
std::unique_ptr<V4L2VideoDevice> video_;
diff --git a/src/libcamera/pipeline/rpi/common/delayed_controls.cpp b/src/libcamera/pipeline/rpi/common/delayed_controls.cpp
new file mode 100644
index 00000000..ad50a7c8
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/delayed_controls.cpp
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper to deal with controls that take effect with a delay
+ *
+ * Note: This has been forked from the libcamera core implementation.
+ */
+
+#include "delayed_controls.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/v4l2_device.h"
+
+/**
+ * \file delayed_controls.h
+ * \brief Helper to deal with controls that take effect with a delay
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(RPiDelayedControls)
+
+namespace RPi {
+
+/**
+ * \class DelayedControls
+ * \brief Helper to deal with controls that take effect with a delay
+ *
+ * Some sensor controls take effect with a delay as the sensor needs time to
+ * adjust, for example exposure and analog gain. This is a helper class to deal
+ * with such controls and the intended users are pipeline handlers.
+ *
+ * The idea is to extend the concept of the buffer depth of a pipeline the
+ * application needs to maintain to also cover controls. Just as with buffer
+ * depth if the application keeps the number of requests queued above the
+ * control depth the controls are guaranteed to take effect for the correct
+ * request. The control depth is determined by the control with the greatest
+ * delay.
+ */
+
+/**
+ * \struct DelayedControls::ControlParams
+ * \brief Parameters associated with controls handled by the \a DelayedControls
+ * helper class
+ *
+ * \var ControlParams::delay
+ * \brief Frame delay from setting the control on a sensor device to when it is
+ * consumed during framing.
+ *
+ * \var ControlParams::priorityWrite
+ * \brief Flag to indicate that this control must be applied ahead of, and
+ * separately from the other controls.
+ *
+ * Typically set for the \a V4L2_CID_VBLANK control so that the device driver
+ * does not reject \a V4L2_CID_EXPOSURE control values that may be outside of
+ * the existing vertical blanking specified bounds, but are within the new
+ * blanking bounds.
+ */
+
+/**
+ * \brief Construct a DelayedControls instance
+ * \param[in] device The V4L2 device the controls have to be applied to
+ * \param[in] controlParams Map of the numerical V4L2 control ids to their
+ * associated control parameters.
+ *
+ * The control parameters comprise of delays (in frames) and a priority write
+ * flag. If this flag is set, the relevant control is written separately from,
+ * and ahead of the rest of the batched controls.
+ *
+ * Only controls specified in \a controlParams are handled. If it's desired to
+ * mix delayed controls and controls that take effect immediately the immediate
+ * controls must be listed in the \a controlParams map with a delay value of 0.
+ */
+DelayedControls::DelayedControls(V4L2Device *device,
+ const std::unordered_map<uint32_t, ControlParams> &controlParams)
+ : device_(device), maxDelay_(0)
+{
+ const ControlInfoMap &controls = device_->controls();
+
+ /*
+ * Create a map of control ids to delays for controls exposed by the
+ * device.
+ */
+ for (auto const &param : controlParams) {
+ auto it = controls.find(param.first);
+ if (it == controls.end()) {
+ LOG(RPiDelayedControls, Error)
+ << "Delay request for control id "
+ << utils::hex(param.first)
+ << " but control is not exposed by device "
+ << device_->deviceNode();
+ continue;
+ }
+
+ const ControlId *id = it->first;
+
+ controlParams_[id] = param.second;
+
+ LOG(RPiDelayedControls, Debug)
+ << "Set a delay of " << controlParams_[id].delay
+ << " and priority write flag " << controlParams_[id].priorityWrite
+ << " for " << id->name();
+
+ maxDelay_ = std::max(maxDelay_, controlParams_[id].delay);
+ }
+
+ reset(0);
+}
+
+/**
+ * \brief Reset state machine
+ *
+ * Resets the state machine to a starting position based on control values
+ * retrieved from the device.
+ */
+void DelayedControls::reset(unsigned int cookie)
+{
+ queueCount_ = 1;
+ writeCount_ = 0;
+ cookies_[0] = cookie;
+
+ /* Retrieve control as reported by the device. */
+ std::vector<uint32_t> ids;
+ for (auto const &param : controlParams_)
+ ids.push_back(param.first->id());
+
+ ControlList controls = device_->getControls(ids);
+
+ /* Seed the control queue with the controls reported by the device. */
+ values_.clear();
+ for (const auto &ctrl : controls) {
+ const ControlId *id = device_->controls().idmap().at(ctrl.first);
+ /*
+ * Do not mark this control value as updated, it does not need
+ * to be written to to device on startup.
+ */
+ values_[id][0] = Info(ctrl.second, false);
+ }
+}
+
+/**
+ * \brief Push a set of controls on the queue
+ * \param[in] controls List of controls to add to the device queue
+ *
+ * Push a set of controls to the control queue. This increases the control queue
+ * depth by one.
+ *
+ * \returns true if \a controls are accepted, or false otherwise
+ */
+bool DelayedControls::push(const ControlList &controls, const unsigned int cookie)
+{
+ /* Copy state from previous frame. */
+ for (auto &ctrl : values_) {
+ Info &info = ctrl.second[queueCount_];
+ info = values_[ctrl.first][queueCount_ - 1];
+ info.updated = false;
+ }
+
+ /* Update with new controls. */
+ const ControlIdMap &idmap = device_->controls().idmap();
+ for (const auto &control : controls) {
+ const auto &it = idmap.find(control.first);
+ if (it == idmap.end()) {
+ LOG(RPiDelayedControls, Warning)
+ << "Unknown control " << control.first;
+ return false;
+ }
+
+ const ControlId *id = it->second;
+
+ if (controlParams_.find(id) == controlParams_.end())
+ return false;
+
+ Info &info = values_[id][queueCount_];
+
+ info = Info(control.second);
+
+ LOG(RPiDelayedControls, Debug)
+ << "Queuing " << id->name()
+ << " to " << info.toString()
+ << " at index " << queueCount_;
+ }
+
+ cookies_[queueCount_] = cookie;
+ queueCount_++;
+
+ return true;
+}
+
+/**
+ * \brief Read back controls in effect at a sequence number
+ * \param[in] sequence The sequence number to get controls for
+ *
+ * Read back what controls where in effect at a specific sequence number. The
+ * history is a ring buffer of 16 entries where new and old values coexist. It's
+ * the callers responsibility to not read too old sequence numbers that have been
+ * pushed out of the history.
+ *
+ * Historic values are evicted by pushing new values onto the queue using
+ * push(). The max history from the current sequence number that yields valid
+ * values are thus 16 minus number of controls pushed.
+ *
+ * \return The controls at \a sequence number
+ */
+std::pair<ControlList, unsigned int> DelayedControls::get(uint32_t sequence)
+{
+ unsigned int index = std::max<int>(0, sequence - maxDelay_);
+
+ ControlList out(device_->controls());
+ for (const auto &ctrl : values_) {
+ const ControlId *id = ctrl.first;
+ const Info &info = ctrl.second[index];
+
+ out.set(id->id(), info);
+
+ LOG(RPiDelayedControls, Debug)
+ << "Reading " << id->name()
+ << " to " << info.toString()
+ << " at index " << index;
+ }
+
+ return { out, cookies_[index] };
+}
+
+/**
+ * \brief Inform DelayedControls of the start of a new frame
+ * \param[in] sequence Sequence number of the frame that started
+ *
+ * Inform the state machine that a new frame has started and of its sequence
+ * number. Any user of these helpers is responsible to inform the helper about
+ * the start of any frame. This can be connected with ease to the start of a
+ * exposure (SOE) V4L2 event.
+ */
+void DelayedControls::applyControls(uint32_t sequence)
+{
+ LOG(RPiDelayedControls, Debug) << "frame " << sequence << " started";
+
+ /*
+ * Create control list peeking ahead in the value queue to ensure
+ * values are set in time to satisfy the sensor delay.
+ */
+ ControlList out(device_->controls());
+ for (auto &ctrl : values_) {
+ const ControlId *id = ctrl.first;
+ unsigned int delayDiff = maxDelay_ - controlParams_[id].delay;
+ unsigned int index = std::max<int>(0, writeCount_ - delayDiff);
+ Info &info = ctrl.second[index];
+
+ if (info.updated) {
+ if (controlParams_[id].priorityWrite) {
+ /*
+ * This control must be written now, it could
+ * affect validity of the other controls.
+ */
+ ControlList priority(device_->controls());
+ priority.set(id->id(), info);
+ device_->setControls(&priority);
+ } else {
+ /*
+ * Batch up the list of controls and write them
+ * at the end of the function.
+ */
+ out.set(id->id(), info);
+ }
+
+ LOG(RPiDelayedControls, Debug)
+ << "Setting " << id->name()
+ << " to " << info.toString()
+ << " at index " << index;
+
+ /* Done with this update, so mark as completed. */
+ info.updated = false;
+ }
+ }
+
+ writeCount_ = sequence + 1;
+
+ while (writeCount_ > queueCount_) {
+ LOG(RPiDelayedControls, Debug)
+ << "Queue is empty, auto queue no-op.";
+ push({}, cookies_[queueCount_ - 1]);
+ }
+
+ device_->setControls(&out);
+}
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/delayed_controls.h b/src/libcamera/pipeline/rpi/common/delayed_controls.h
new file mode 100644
index 00000000..487b0057
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/delayed_controls.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper to deal with controls that take effect with a delay
+ *
+ * Note: This has been forked from the libcamera core implementation.
+ */
+
+#pragma once
+
+#include <stdint.h>
+#include <unordered_map>
+#include <utility>
+
+#include <libcamera/controls.h>
+
+namespace libcamera {
+
+class V4L2Device;
+
+namespace RPi {
+
+class DelayedControls
+{
+public:
+ struct ControlParams {
+ unsigned int delay;
+ bool priorityWrite;
+ };
+
+ DelayedControls(V4L2Device *device,
+ const std::unordered_map<uint32_t, ControlParams> &controlParams);
+
+ void reset(unsigned int cookie);
+
+ bool push(const ControlList &controls, unsigned int cookie);
+ std::pair<ControlList, unsigned int> get(uint32_t sequence);
+
+ void applyControls(uint32_t sequence);
+
+private:
+ class Info : public ControlValue
+ {
+ public:
+ Info()
+ : updated(false)
+ {
+ }
+
+ Info(const ControlValue &v, bool updated_ = true)
+ : ControlValue(v), updated(updated_)
+ {
+ }
+
+ bool updated;
+ };
+
+ static constexpr int listSize = 16;
+ template<typename T>
+ class RingBuffer : public std::array<T, listSize>
+ {
+ public:
+ T &operator[](unsigned int index)
+ {
+ return std::array<T, listSize>::operator[](index % listSize);
+ }
+
+ const T &operator[](unsigned int index) const
+ {
+ return std::array<T, listSize>::operator[](index % listSize);
+ }
+ };
+
+ V4L2Device *device_;
+ std::unordered_map<const ControlId *, ControlParams> controlParams_;
+ unsigned int maxDelay_;
+
+ uint32_t queueCount_;
+ uint32_t writeCount_;
+ std::unordered_map<const ControlId *, RingBuffer<Info>> values_;
+ RingBuffer<unsigned int> cookies_;
+};
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/meson.build b/src/libcamera/pipeline/rpi/common/meson.build
index f1a2f5ee..8fb7e823 100644
--- a/src/libcamera/pipeline/raspberrypi/meson.build
+++ b/src/libcamera/pipeline/rpi/common/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: CC0-1.0
libcamera_sources += files([
- 'dma_heaps.cpp',
- 'raspberrypi.cpp',
+ 'delayed_controls.cpp',
+ 'pipeline_base.cpp',
'rpi_stream.cpp',
])
diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.cpp b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
new file mode 100644
index 00000000..289af516
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
@@ -0,0 +1,1491 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler base class for Raspberry Pi devices
+ */
+
+#include "pipeline_base.h"
+
+#include <chrono>
+
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/logging.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+using namespace RPi;
+
+LOG_DEFINE_CATEGORY(RPI)
+
+using StreamFlag = RPi::Stream::StreamFlag;
+
+namespace {
+
+constexpr unsigned int defaultRawBitDepth = 12;
+
+PixelFormat mbusCodeToPixelFormat(unsigned int code,
+ BayerFormat::Packing packingReq)
+{
+ BayerFormat bayer = BayerFormat::fromMbusCode(code);
+
+ ASSERT(bayer.isValid());
+
+ bayer.packing = packingReq;
+ PixelFormat pix = bayer.toPixelFormat();
+
+ /*
+ * Not all formats (e.g. 8-bit or 16-bit Bayer formats) can have packed
+ * variants. So if the PixelFormat returns as invalid, use the non-packed
+ * conversion instead.
+ */
+ if (!pix.isValid()) {
+ bayer.packing = BayerFormat::Packing::None;
+ pix = bayer.toPixelFormat();
+ }
+
+ return pix;
+}
+
+bool isMonoSensor(std::unique_ptr<CameraSensor> &sensor)
+{
+ unsigned int mbusCode = sensor->mbusCodes()[0];
+ const BayerFormat &bayer = BayerFormat::fromMbusCode(mbusCode);
+
+ return bayer.order == BayerFormat::Order::MONO;
+}
+
+const std::vector<ColorSpace> validColorSpaces = {
+ ColorSpace::Sycc,
+ ColorSpace::Smpte170m,
+ ColorSpace::Rec709
+};
+
+std::optional<ColorSpace> findValidColorSpace(const ColorSpace &colourSpace)
+{
+ for (auto cs : validColorSpaces) {
+ if (colourSpace.primaries == cs.primaries &&
+ colourSpace.transferFunction == cs.transferFunction)
+ return cs;
+ }
+
+ return std::nullopt;
+}
+
+} /* namespace */
+
+/*
+ * Raspberry Pi drivers expect the following colour spaces:
+ * - V4L2_COLORSPACE_RAW for raw streams.
+ * - One of V4L2_COLORSPACE_JPEG, V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_REC709 for
+ * non-raw streams. Other fields such as transfer function, YCbCr encoding and
+ * quantisation are not used.
+ *
+ * The libcamera colour spaces that we wish to use corresponding to these are therefore:
+ * - ColorSpace::Raw for V4L2_COLORSPACE_RAW
+ * - ColorSpace::Sycc for V4L2_COLORSPACE_JPEG
+ * - ColorSpace::Smpte170m for V4L2_COLORSPACE_SMPTE170M
+ * - ColorSpace::Rec709 for V4L2_COLORSPACE_REC709
+ */
+CameraConfiguration::Status RPiCameraConfiguration::validateColorSpaces([[maybe_unused]] ColorSpaceFlags flags)
+{
+ Status status = Valid;
+ yuvColorSpace_.reset();
+
+ for (auto cfg : config_) {
+ /* First fix up raw streams to have the "raw" colour space. */
+ if (PipelineHandlerBase::isRaw(cfg.pixelFormat)) {
+ /* If there was no value here, that doesn't count as "adjusted". */
+ if (cfg.colorSpace && cfg.colorSpace != ColorSpace::Raw)
+ status = Adjusted;
+ cfg.colorSpace = ColorSpace::Raw;
+ continue;
+ }
+
+ /* Next we need to find our shared colour space. The first valid one will do. */
+ if (cfg.colorSpace && !yuvColorSpace_)
+ yuvColorSpace_ = findValidColorSpace(cfg.colorSpace.value());
+ }
+
+ /* If no colour space was given anywhere, choose sYCC. */
+ if (!yuvColorSpace_)
+ yuvColorSpace_ = ColorSpace::Sycc;
+
+ /* Note the version of this that any RGB streams will have to use. */
+ rgbColorSpace_ = yuvColorSpace_;
+ rgbColorSpace_->ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
+ rgbColorSpace_->range = ColorSpace::Range::Full;
+
+ /* Go through the streams again and force everyone to the same colour space. */
+ for (auto cfg : config_) {
+ if (cfg.colorSpace == ColorSpace::Raw)
+ continue;
+
+ if (PipelineHandlerBase::isYuv(cfg.pixelFormat) && cfg.colorSpace != yuvColorSpace_) {
+ /* Again, no value means "not adjusted". */
+ if (cfg.colorSpace)
+ status = Adjusted;
+ cfg.colorSpace = yuvColorSpace_;
+ }
+ if (PipelineHandlerBase::isRgb(cfg.pixelFormat) && cfg.colorSpace != rgbColorSpace_) {
+ /* Be nice, and let the YUV version count as non-adjusted too. */
+ if (cfg.colorSpace && cfg.colorSpace != yuvColorSpace_)
+ status = Adjusted;
+ cfg.colorSpace = rgbColorSpace_;
+ }
+ }
+
+ return status;
+}
+
+CameraConfiguration::Status RPiCameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ /*
+ * Make sure that if a sensor configuration has been requested it
+ * is valid.
+ */
+ if (sensorConfig && !sensorConfig->isValid()) {
+ LOG(RPI, Error) << "Invalid sensor configuration request";
+ return Invalid;
+ }
+
+ status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
+
+ /*
+ * Validate the requested transform against the sensor capabilities and
+ * rotation and store the final combined transform that configure() will
+ * need to apply to the sensor to save us working it out again.
+ */
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->sensor_->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
+ status = Adjusted;
+
+ rawStreams_.clear();
+ outStreams_.clear();
+
+ for (const auto &[index, cfg] : utils::enumerate(config_)) {
+ if (PipelineHandlerBase::isRaw(cfg.pixelFormat))
+ rawStreams_.emplace_back(index, &cfg);
+ else
+ outStreams_.emplace_back(index, &cfg);
+ }
+
+ /* Sort the streams so the highest resolution is first. */
+ std::sort(rawStreams_.begin(), rawStreams_.end(),
+ [](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
+
+ std::sort(outStreams_.begin(), outStreams_.end(),
+ [](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
+
+ /* Compute the sensor's format then do any platform specific fixups. */
+ unsigned int bitDepth;
+ Size sensorSize;
+
+ if (sensorConfig) {
+ /* Use the application provided sensor configuration. */
+ bitDepth = sensorConfig->bitDepth;
+ sensorSize = sensorConfig->outputSize;
+ } else if (!rawStreams_.empty()) {
+ /* Use the RAW stream format and size. */
+ BayerFormat bayerFormat = BayerFormat::fromPixelFormat(rawStreams_[0].cfg->pixelFormat);
+ bitDepth = bayerFormat.bitDepth;
+ sensorSize = rawStreams_[0].cfg->size;
+ } else {
+ bitDepth = defaultRawBitDepth;
+ sensorSize = outStreams_[0].cfg->size;
+ }
+
+ sensorFormat_ = data_->findBestFormat(sensorSize, bitDepth);
+
+ /*
+ * If a sensor configuration has been requested, it should apply
+ * without modifications.
+ */
+ if (sensorConfig) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(sensorFormat_.code);
+
+ if (bayer.bitDepth != sensorConfig->bitDepth ||
+ sensorFormat_.size != sensorConfig->outputSize) {
+ LOG(RPI, Error) << "Invalid sensor configuration: "
+ << "bitDepth/size mismatch";
+ return Invalid;
+ }
+ }
+
+ /* Start with some initial generic RAW stream adjustments. */
+ for (auto &raw : rawStreams_) {
+ StreamConfiguration *rawStream = raw.cfg;
+
+ /*
+ * Some sensors change their Bayer order when they are
+ * h-flipped or v-flipped, according to the transform. Adjust
+ * the RAW stream to match the computed sensor format by
+ * applying the sensor Bayer order resulting from the transform
+ * to the user request.
+ */
+
+ BayerFormat cfgBayer = BayerFormat::fromPixelFormat(rawStream->pixelFormat);
+ cfgBayer.order = data_->sensor_->bayerOrder(combinedTransform_);
+
+ if (rawStream->pixelFormat != cfgBayer.toPixelFormat()) {
+ rawStream->pixelFormat = cfgBayer.toPixelFormat();
+ status = Adjusted;
+ }
+ }
+
+ /* Do any platform specific fixups. */
+ Status st = data_->platformValidate(this);
+ if (st == Invalid)
+ return Invalid;
+ else if (st == Adjusted)
+ status = Adjusted;
+
+ /* Further fixups on the RAW streams. */
+ for (auto &raw : rawStreams_) {
+ int ret = raw.dev->tryFormat(&raw.format);
+ if (ret)
+ return Invalid;
+
+ if (RPi::PipelineHandlerBase::updateStreamConfig(raw.cfg, raw.format))
+ status = Adjusted;
+ }
+
+ /* Further fixups on the ISP output streams. */
+ for (auto &out : outStreams_) {
+
+ /*
+ * We want to send the associated YCbCr info through to the driver.
+ *
+ * But for RGB streams, the YCbCr info gets overwritten on the way back
+ * so we must check against what the stream cfg says, not what we actually
+ * requested (which carefully included the YCbCr info)!
+ */
+ out.format.colorSpace = yuvColorSpace_;
+
+ LOG(RPI, Debug)
+ << "Try color space " << ColorSpace::toString(out.cfg->colorSpace);
+
+ int ret = out.dev->tryFormat(&out.format);
+ if (ret)
+ return Invalid;
+
+ if (RPi::PipelineHandlerBase::updateStreamConfig(out.cfg, out.format))
+ status = Adjusted;
+ }
+
+ return status;
+}
+
+bool PipelineHandlerBase::isRgb(const PixelFormat &pixFmt)
+{
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ return info.colourEncoding == PixelFormatInfo::ColourEncodingRGB;
+}
+
+bool PipelineHandlerBase::isYuv(const PixelFormat &pixFmt)
+{
+ /* The code below would return true for raw mono streams, so weed those out first. */
+ if (PipelineHandlerBase::isRaw(pixFmt))
+ return false;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ return info.colourEncoding == PixelFormatInfo::ColourEncodingYUV;
+}
+
+bool PipelineHandlerBase::isRaw(const PixelFormat &pixFmt)
+{
+ /* This test works for both Bayer and raw mono formats. */
+ return BayerFormat::fromPixelFormat(pixFmt).isValid();
+}
+
+/*
+ * Adjust a StreamConfiguration fields to match a video device format.
+ * Returns true if the StreamConfiguration has been adjusted.
+ */
+bool PipelineHandlerBase::updateStreamConfig(StreamConfiguration *stream,
+ const V4L2DeviceFormat &format)
+{
+ const PixelFormat &pixFormat = format.fourcc.toPixelFormat();
+ bool adjusted = false;
+
+ if (stream->pixelFormat != pixFormat || stream->size != format.size) {
+ stream->pixelFormat = pixFormat;
+ stream->size = format.size;
+ adjusted = true;
+ }
+
+ if (stream->colorSpace != format.colorSpace) {
+ stream->colorSpace = format.colorSpace;
+ adjusted = true;
+ LOG(RPI, Debug)
+ << "Color space changed from "
+ << ColorSpace::toString(stream->colorSpace) << " to "
+ << ColorSpace::toString(format.colorSpace);
+ }
+
+ stream->stride = format.planes[0].bpl;
+ stream->frameSize = format.planes[0].size;
+
+ return adjusted;
+}
+
+/*
+ * Populate and return a video device format using a StreamConfiguration. */
+V4L2DeviceFormat PipelineHandlerBase::toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const StreamConfiguration *stream)
+{
+ V4L2DeviceFormat deviceFormat;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(stream->pixelFormat);
+ deviceFormat.planesCount = info.numPlanes();
+ deviceFormat.fourcc = dev->toV4L2PixelFormat(stream->pixelFormat);
+ deviceFormat.size = stream->size;
+ deviceFormat.planes[0].bpl = stream->stride;
+ deviceFormat.colorSpace = stream->colorSpace;
+
+ return deviceFormat;
+}
+
+V4L2DeviceFormat PipelineHandlerBase::toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const V4L2SubdeviceFormat &format,
+ BayerFormat::Packing packingReq)
+{
+ unsigned int code = format.code;
+ const PixelFormat pix = mbusCodeToPixelFormat(code, packingReq);
+ V4L2DeviceFormat deviceFormat;
+
+ deviceFormat.fourcc = dev->toV4L2PixelFormat(pix);
+ deviceFormat.size = format.size;
+ deviceFormat.colorSpace = format.colorSpace;
+ return deviceFormat;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerBase::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
+{
+ CameraData *data = cameraData(camera);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<RPiCameraConfiguration>(data);
+ V4L2SubdeviceFormat sensorFormat;
+ unsigned int bufferCount;
+ PixelFormat pixelFormat;
+ V4L2VideoDevice::Formats fmts;
+ Size size;
+ std::optional<ColorSpace> colorSpace;
+
+ if (roles.empty())
+ return config;
+
+ Size sensorSize = data->sensor_->resolution();
+ for (const StreamRole role : roles) {
+ switch (role) {
+ case StreamRole::Raw:
+ size = sensorSize;
+ sensorFormat = data->findBestFormat(size, defaultRawBitDepth);
+ pixelFormat = mbusCodeToPixelFormat(sensorFormat.code,
+ BayerFormat::Packing::CSI2);
+ ASSERT(pixelFormat.isValid());
+ colorSpace = ColorSpace::Raw;
+ bufferCount = 2;
+ break;
+
+ case StreamRole::StillCapture:
+ fmts = data->ispFormats();
+ pixelFormat = formats::YUV420;
+ /*
+ * Still image codecs usually expect the sYCC color space.
+ * Even RGB codecs will be fine as the RGB we get with the
+ * sYCC color space is the same as sRGB.
+ */
+ colorSpace = ColorSpace::Sycc;
+ /* Return the largest sensor resolution. */
+ size = sensorSize;
+ bufferCount = 1;
+ break;
+
+ case StreamRole::VideoRecording:
+ /*
+ * The colour denoise algorithm requires the analysis
+ * image, produced by the second ISP output, to be in
+ * YUV420 format. Select this format as the default, to
+ * maximize chances that it will be picked by
+ * applications and enable usage of the colour denoise
+ * algorithm.
+ */
+ fmts = data->ispFormats();
+ pixelFormat = formats::YUV420;
+ /*
+ * Choose a color space appropriate for video recording.
+ * Rec.709 will be a good default for HD resolutions.
+ */
+ colorSpace = ColorSpace::Rec709;
+ size = { 1920, 1080 };
+ bufferCount = 4;
+ break;
+
+ case StreamRole::Viewfinder:
+ fmts = data->ispFormats();
+ pixelFormat = formats::XRGB8888;
+ colorSpace = ColorSpace::Sycc;
+ size = { 800, 600 };
+ bufferCount = 4;
+ break;
+
+ default:
+ LOG(RPI, Error) << "Requested stream role not supported: "
+ << role;
+ return nullptr;
+ }
+
+ std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
+ if (role == StreamRole::Raw) {
+ /* Translate the MBUS codes to a PixelFormat. */
+ for (const auto &format : data->sensorFormats_) {
+ PixelFormat pf = mbusCodeToPixelFormat(format.first,
+ BayerFormat::Packing::CSI2);
+ if (pf.isValid())
+ deviceFormats.emplace(std::piecewise_construct, std::forward_as_tuple(pf),
+ std::forward_as_tuple(format.second.begin(), format.second.end()));
+ }
+ } else {
+ /*
+ * Translate the V4L2PixelFormat to PixelFormat. Note that we
+ * limit the recommended largest ISP output size to match the
+ * sensor resolution.
+ */
+ for (const auto &format : fmts) {
+ PixelFormat pf = format.first.toPixelFormat();
+ /*
+ * Some V4L2 formats translate to the same pixel format (e.g. YU12, YM12
+ * both give YUV420). We must avoid duplicating the range in this case.
+ */
+ if (pf.isValid() && deviceFormats.find(pf) == deviceFormats.end()) {
+ const SizeRange &ispSizes = format.second[0];
+ deviceFormats[pf].emplace_back(ispSizes.min, sensorSize,
+ ispSizes.hStep, ispSizes.vStep);
+ }
+ }
+ }
+
+ /* Add the stream format based on the device node used for the use case. */
+ StreamFormats formats(deviceFormats);
+ StreamConfiguration cfg(formats);
+ cfg.size = size;
+ cfg.pixelFormat = pixelFormat;
+ cfg.colorSpace = colorSpace;
+ cfg.bufferCount = bufferCount;
+ config->addConfiguration(cfg);
+ }
+
+ config->validate();
+
+ return config;
+}
+
+int PipelineHandlerBase::configure(Camera *camera, CameraConfiguration *config)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ /* Start by freeing all buffers and reset the stream states. */
+ data->freeBuffers();
+ for (auto const stream : data->streams_)
+ stream->clearFlags(StreamFlag::External);
+
+ /*
+ * Apply the format on the sensor with any cached transform.
+ *
+ * If the application has provided a sensor configuration apply it
+ * instead of just applying a format.
+ */
+ RPiCameraConfiguration *rpiConfig = static_cast<RPiCameraConfiguration *>(config);
+ V4L2SubdeviceFormat *sensorFormat = &rpiConfig->sensorFormat_;
+
+ if (rpiConfig->sensorConfig) {
+ ret = data->sensor_->applyConfiguration(*rpiConfig->sensorConfig,
+ rpiConfig->combinedTransform_,
+ sensorFormat);
+ } else {
+ ret = data->sensor_->setFormat(sensorFormat,
+ rpiConfig->combinedTransform_);
+ }
+ if (ret)
+ return ret;
+
+ /*
+ * Platform specific internal stream configuration. This also assigns
+ * external streams which get configured below.
+ */
+ ret = data->platformConfigure(rpiConfig);
+ if (ret)
+ return ret;
+
+ ipa::RPi::ConfigResult result;
+ ret = data->configureIPA(config, &result);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to configure the IPA: " << ret;
+ return ret;
+ }
+
+ /*
+ * Set the scaler crop to the value we are using (scaled to native sensor
+ * coordinates).
+ */
+ data->scalerCrop_ = data->scaleIspCrop(data->ispCrop_);
+
+ /*
+ * Update the ScalerCropMaximum to the correct value for this camera mode.
+ * For us, it's the same as the "analogue crop".
+ *
+ * \todo Make this property the ScalerCrop maximum value when dynamic
+ * controls are available and set it at validate() time
+ */
+ data->properties_.set(properties::ScalerCropMaximum, data->sensorInfo_.analogCrop);
+
+ /* Store the mode sensitivity for the application. */
+ data->properties_.set(properties::SensorSensitivity, result.modeSensitivity);
+
+ /* Update the controls that the Raspberry Pi IPA can handle. */
+ ControlInfoMap::Map ctrlMap;
+ for (auto const &c : result.controlInfo)
+ ctrlMap.emplace(c.first, c.second);
+
+ /* Add the ScalerCrop control limits based on the current mode. */
+ Rectangle ispMinCrop = data->scaleIspCrop(Rectangle(data->ispMinCropSize_));
+ ctrlMap[&controls::ScalerCrop] = ControlInfo(ispMinCrop, data->sensorInfo_.analogCrop, data->scalerCrop_);
+
+ data->controlInfo_ = ControlInfoMap(std::move(ctrlMap), result.controlInfo.idmap());
+
+ /* Setup the Video Mux/Bridge entities. */
+ for (auto &[device, link] : data->bridgeDevices_) {
+ /*
+ * Start by disabling all the sink pad links on the devices in the
+ * cascade, with the exception of the link connecting the device.
+ */
+ for (const MediaPad *p : device->entity()->pads()) {
+ if (!(p->flags() & MEDIA_PAD_FL_SINK))
+ continue;
+
+ for (MediaLink *l : p->links()) {
+ if (l != link)
+ l->setEnabled(false);
+ }
+ }
+
+ /*
+ * Next, enable the entity -> entity links, and setup the pad format.
+ *
+ * \todo Some bridge devices may chainge the media bus code, so we
+ * ought to read the source pad format and propagate it to the sink pad.
+ */
+ link->setEnabled(true);
+ const MediaPad *sinkPad = link->sink();
+ ret = device->setFormat(sinkPad->index(), sensorFormat);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on " << device->entity()->name()
+ << " pad " << sinkPad->index()
+ << " with format " << *sensorFormat
+ << ": " << ret;
+ return ret;
+ }
+
+ LOG(RPI, Debug) << "Configured media link on device " << device->entity()->name()
+ << " on pad " << sinkPad->index();
+ }
+
+ return 0;
+}
+
+int PipelineHandlerBase::exportFrameBuffers([[maybe_unused]] Camera *camera, libcamera::Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ RPi::Stream *s = static_cast<RPi::Stream *>(stream);
+ unsigned int count = stream->configuration().bufferCount;
+ int ret = s->dev()->exportBuffers(count, buffers);
+
+ s->setExportedBuffers(buffers);
+
+ return ret;
+}
+
+int PipelineHandlerBase::start(Camera *camera, const ControlList *controls)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ /* Check if a ScalerCrop control was specified. */
+ if (controls)
+ data->applyScalerCrop(*controls);
+
+ /* Start the IPA. */
+ ipa::RPi::StartResult result;
+ data->ipa_->start(controls ? *controls : ControlList{ controls::controls },
+ &result);
+
+ /* Apply any gain/exposure settings that the IPA may have passed back. */
+ if (!result.controls.empty())
+ data->setSensorControls(result.controls);
+
+ /* Configure the number of dropped frames required on startup. */
+ data->dropFrameCount_ = data->config_.disableStartupFrameDrops
+ ? 0 : result.dropFrameCount;
+
+ for (auto const stream : data->streams_)
+ stream->resetBuffers();
+
+ if (!data->buffersAllocated_) {
+ /* Allocate buffers for internal pipeline usage. */
+ ret = prepareBuffers(camera);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to allocate buffers";
+ data->freeBuffers();
+ stop(camera);
+ return ret;
+ }
+ data->buffersAllocated_ = true;
+ }
+
+ /* We need to set the dropFrameCount_ before queueing buffers. */
+ ret = queueAllBuffers(camera);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to queue buffers";
+ stop(camera);
+ return ret;
+ }
+
+ /*
+ * Reset the delayed controls with the gain and exposure values set by
+ * the IPA.
+ */
+ data->delayedCtrls_->reset(0);
+ data->state_ = CameraData::State::Idle;
+
+ /* Enable SOF event generation. */
+ data->frontendDevice()->setFrameStartEnabled(true);
+
+ data->platformStart();
+
+ /* Start all streams. */
+ for (auto const stream : data->streams_) {
+ ret = stream->dev()->streamOn();
+ if (ret) {
+ stop(camera);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void PipelineHandlerBase::stopDevice(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+
+ data->state_ = CameraData::State::Stopped;
+ data->platformStop();
+
+ for (auto const stream : data->streams_)
+ stream->dev()->streamOff();
+
+ /* Disable SOF event generation. */
+ data->frontendDevice()->setFrameStartEnabled(false);
+
+ data->clearIncompleteRequests();
+
+ /* Stop the IPA. */
+ data->ipa_->stop();
+}
+
+void PipelineHandlerBase::releaseDevice(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+ data->freeBuffers();
+}
+
+int PipelineHandlerBase::queueRequestDevice(Camera *camera, Request *request)
+{
+ CameraData *data = cameraData(camera);
+
+ if (!data->isRunning())
+ return -EINVAL;
+
+ LOG(RPI, Debug) << "queueRequestDevice: New request sequence: "
+ << request->sequence();
+
+ /* Push all buffers supplied in the Request to the respective streams. */
+ for (auto stream : data->streams_) {
+ if (!(stream->getFlags() & StreamFlag::External))
+ continue;
+
+ FrameBuffer *buffer = request->findBuffer(stream);
+ if (buffer && !stream->getBufferId(buffer)) {
+ /*
+ * This buffer is not recognised, so it must have been allocated
+ * outside the v4l2 device. Store it in the stream buffer list
+ * so we can track it.
+ */
+ stream->setExportedBuffer(buffer);
+ }
+
+ /*
+ * If no buffer is provided by the request for this stream, we
+ * queue a nullptr to the stream to signify that it must use an
+ * internally allocated buffer for this capture request. This
+ * buffer will not be given back to the application, but is used
+ * to support the internal pipeline flow.
+ *
+ * The below queueBuffer() call will do nothing if there are not
+ * enough internal buffers allocated, but this will be handled by
+ * queuing the request for buffers in the RPiStream object.
+ */
+ int ret = stream->queueBuffer(buffer);
+ if (ret)
+ return ret;
+ }
+
+ /* Push the request to the back of the queue. */
+ data->requestQueue_.push(request);
+ data->handleState();
+
+ return 0;
+}
+
+int PipelineHandlerBase::registerCamera(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *frontend, const std::string &frontendName,
+ MediaDevice *backend, MediaEntity *sensorEntity)
+{
+ CameraData *data = cameraData.get();
+ int ret;
+
+ data->sensor_ = std::make_unique<CameraSensor>(sensorEntity);
+ if (!data->sensor_)
+ return -EINVAL;
+
+ if (data->sensor_->init())
+ return -EINVAL;
+
+ /* Populate the map of sensor supported formats and sizes. */
+ for (auto const mbusCode : data->sensor_->mbusCodes())
+ data->sensorFormats_.emplace(mbusCode,
+ data->sensor_->sizes(mbusCode));
+
+ /*
+ * Enumerate all the Video Mux/Bridge devices across the sensor -> Fr
+ * chain. There may be a cascade of devices in this chain!
+ */
+ MediaLink *link = sensorEntity->getPadByIndex(0)->links()[0];
+ data->enumerateVideoDevices(link, frontendName);
+
+ ipa::RPi::InitResult result;
+ if (data->loadIPA(&result)) {
+ LOG(RPI, Error) << "Failed to load a suitable IPA library";
+ return -EINVAL;
+ }
+
+ /*
+ * Setup our delayed control writer with the sensor default
+ * gain and exposure delays. Mark VBLANK for priority write.
+ */
+ std::unordered_map<uint32_t, RPi::DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { result.sensorConfig.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { result.sensorConfig.exposureDelay, false } },
+ { V4L2_CID_HBLANK, { result.sensorConfig.hblankDelay, false } },
+ { V4L2_CID_VBLANK, { result.sensorConfig.vblankDelay, true } }
+ };
+ data->delayedCtrls_ = std::make_unique<RPi::DelayedControls>(data->sensor_->device(), params);
+ data->sensorMetadata_ = result.sensorConfig.sensorMetadata;
+
+ /* Register initial controls that the Raspberry Pi IPA can handle. */
+ data->controlInfo_ = std::move(result.controlInfo);
+
+ /* Initialize the camera properties. */
+ data->properties_ = data->sensor_->properties();
+
+ /*
+ * The V4L2_CID_NOTIFY_GAINS control, if present, is used to inform the
+ * sensor of the colour gains. It is defined to be a linear gain where
+ * the default value represents a gain of exactly one.
+ */
+ auto it = data->sensor_->controls().find(V4L2_CID_NOTIFY_GAINS);
+ if (it != data->sensor_->controls().end())
+ data->notifyGainsUnity_ = it->second.def().get<int32_t>();
+
+ /*
+ * Set a default value for the ScalerCropMaximum property to show
+ * that we support its use, however, initialise it to zero because
+ * it's not meaningful until a camera mode has been chosen.
+ */
+ data->properties_.set(properties::ScalerCropMaximum, Rectangle{});
+
+ ret = platformRegister(cameraData, frontend, backend);
+ if (ret)
+ return ret;
+
+ ret = data->loadPipelineConfiguration();
+ if (ret) {
+ LOG(RPI, Error) << "Unable to load pipeline configuration";
+ return ret;
+ }
+
+ /* Setup the general IPA signal handlers. */
+ data->frontendDevice()->dequeueTimeout.connect(data, &RPi::CameraData::cameraTimeout);
+ data->frontendDevice()->frameStart.connect(data, &RPi::CameraData::frameStarted);
+ data->ipa_->setDelayedControls.connect(data, &CameraData::setDelayedControls);
+ data->ipa_->setLensControls.connect(data, &CameraData::setLensControls);
+ data->ipa_->metadataReady.connect(data, &CameraData::metadataReady);
+
+ return 0;
+}
+
+void PipelineHandlerBase::mapBuffers(Camera *camera, const BufferMap &buffers, unsigned int mask)
+{
+ CameraData *data = cameraData(camera);
+ std::vector<IPABuffer> bufferIds;
+ /*
+ * Link the FrameBuffers with the id (key value) in the map stored in
+ * the RPi stream object - along with an identifier mask.
+ *
+ * This will allow us to identify buffers passed between the pipeline
+ * handler and the IPA.
+ */
+ for (auto const &it : buffers) {
+ bufferIds.push_back(IPABuffer(mask | it.first,
+ it.second.buffer->planes()));
+ data->bufferIds_.insert(mask | it.first);
+ }
+
+ data->ipa_->mapBuffers(bufferIds);
+}
+
+int PipelineHandlerBase::queueAllBuffers(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ for (auto const stream : data->streams_) {
+ if (!(stream->getFlags() & StreamFlag::External)) {
+ ret = stream->queueAllBuffers();
+ if (ret < 0)
+ return ret;
+ } else {
+ /*
+ * For external streams, we must queue up a set of internal
+ * buffers to handle the number of drop frames requested by
+ * the IPA. This is done by passing nullptr in queueBuffer().
+ *
+ * The below queueBuffer() call will do nothing if there
+ * are not enough internal buffers allocated, but this will
+ * be handled by queuing the request for buffers in the
+ * RPiStream object.
+ */
+ unsigned int i;
+ for (i = 0; i < data->dropFrameCount_; i++) {
+ ret = stream->queueBuffer(nullptr);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+double CameraData::scoreFormat(double desired, double actual) const
+{
+ double score = desired - actual;
+ /* Smaller desired dimensions are preferred. */
+ if (score < 0.0)
+ score = (-score) / 8;
+ /* Penalise non-exact matches. */
+ if (actual != desired)
+ score *= 2;
+
+ return score;
+}
+
+V4L2SubdeviceFormat CameraData::findBestFormat(const Size &req, unsigned int bitDepth) const
+{
+ double bestScore = std::numeric_limits<double>::max(), score;
+ V4L2SubdeviceFormat bestFormat;
+ bestFormat.colorSpace = ColorSpace::Raw;
+
+ constexpr float penaltyAr = 1500.0;
+ constexpr float penaltyBitDepth = 500.0;
+
+ /* Calculate the closest/best mode from the user requested size. */
+ for (const auto &iter : sensorFormats_) {
+ const unsigned int mbusCode = iter.first;
+ const PixelFormat format = mbusCodeToPixelFormat(mbusCode,
+ BayerFormat::Packing::None);
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ for (const Size &size : iter.second) {
+ double reqAr = static_cast<double>(req.width) / req.height;
+ double fmtAr = static_cast<double>(size.width) / size.height;
+
+ /* Score the dimensions for closeness. */
+ score = scoreFormat(req.width, size.width);
+ score += scoreFormat(req.height, size.height);
+ score += penaltyAr * scoreFormat(reqAr, fmtAr);
+
+ /* Add any penalties... this is not an exact science! */
+ score += utils::abs_diff(info.bitsPerPixel, bitDepth) * penaltyBitDepth;
+
+ if (score <= bestScore) {
+ bestScore = score;
+ bestFormat.code = mbusCode;
+ bestFormat.size = size;
+ }
+
+ LOG(RPI, Debug) << "Format: " << size
+ << " fmt " << format
+ << " Score: " << score
+ << " (best " << bestScore << ")";
+ }
+ }
+
+ return bestFormat;
+}
+
+void CameraData::freeBuffers()
+{
+ if (ipa_) {
+ /*
+ * Copy the buffer ids from the unordered_set to a vector to
+ * pass to the IPA.
+ */
+ std::vector<unsigned int> bufferIds(bufferIds_.begin(),
+ bufferIds_.end());
+ ipa_->unmapBuffers(bufferIds);
+ bufferIds_.clear();
+ }
+
+ for (auto const stream : streams_)
+ stream->releaseBuffers();
+
+ platformFreeBuffers();
+
+ buffersAllocated_ = false;
+}
+
+/*
+ * enumerateVideoDevices() iterates over the Media Controller topology, starting
+ * at the sensor and finishing at the frontend. For each sensor, CameraData stores
+ * a unique list of any intermediate video mux or bridge devices connected in a
+ * cascade, together with the entity to entity link.
+ *
+ * Entity pad configuration and link enabling happens at the end of configure().
+ * We first disable all pad links on each entity device in the chain, and then
+ * selectively enabling the specific links to link sensor to the frontend across
+ * all intermediate muxes and bridges.
+ *
+ * In the cascaded topology below, if Sensor1 is used, the Mux2 -> Mux1 link
+ * will be disabled, and Sensor1 -> Mux1 -> Frontend links enabled. Alternatively,
+ * if Sensor3 is used, the Sensor2 -> Mux2 and Sensor1 -> Mux1 links are disabled,
+ * and Sensor3 -> Mux2 -> Mux1 -> Frontend links are enabled. All other links will
+ * remain unchanged.
+ *
+ * +----------+
+ * | FE |
+ * +-----^----+
+ * |
+ * +---+---+
+ * | Mux1 |<------+
+ * +--^---- |
+ * | |
+ * +-----+---+ +---+---+
+ * | Sensor1 | | Mux2 |<--+
+ * +---------+ +-^-----+ |
+ * | |
+ * +-------+-+ +---+-----+
+ * | Sensor2 | | Sensor3 |
+ * +---------+ +---------+
+ */
+void CameraData::enumerateVideoDevices(MediaLink *link, const std::string &frontend)
+{
+ const MediaPad *sinkPad = link->sink();
+ const MediaEntity *entity = sinkPad->entity();
+ bool frontendFound = false;
+
+ /* We only deal with Video Mux and Bridge devices in cascade. */
+ if (entity->function() != MEDIA_ENT_F_VID_MUX &&
+ entity->function() != MEDIA_ENT_F_VID_IF_BRIDGE)
+ return;
+
+ /* Find the source pad for this Video Mux or Bridge device. */
+ const MediaPad *sourcePad = nullptr;
+ for (const MediaPad *pad : entity->pads()) {
+ if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
+ /*
+ * We can only deal with devices that have a single source
+ * pad. If this device has multiple source pads, ignore it
+ * and this branch in the cascade.
+ */
+ if (sourcePad)
+ return;
+
+ sourcePad = pad;
+ }
+ }
+
+ LOG(RPI, Debug) << "Found video mux device " << entity->name()
+ << " linked to sink pad " << sinkPad->index();
+
+ bridgeDevices_.emplace_back(std::make_unique<V4L2Subdevice>(entity), link);
+ bridgeDevices_.back().first->open();
+
+ /*
+ * Iterate through all the sink pad links down the cascade to find any
+ * other Video Mux and Bridge devices.
+ */
+ for (MediaLink *l : sourcePad->links()) {
+ enumerateVideoDevices(l, frontend);
+ /* Once we reach the Frontend entity, we are done. */
+ if (l->sink()->entity()->name() == frontend) {
+ frontendFound = true;
+ break;
+ }
+ }
+
+ /* This identifies the end of our entity enumeration recursion. */
+ if (link->source()->entity()->function() == MEDIA_ENT_F_CAM_SENSOR) {
+ /*
+ * If the frontend is not at the end of this cascade, we cannot
+ * configure this topology automatically, so remove all entity
+ * references.
+ */
+ if (!frontendFound) {
+ LOG(RPI, Warning) << "Cannot automatically configure this MC topology!";
+ bridgeDevices_.clear();
+ }
+ }
+}
+
+int CameraData::loadPipelineConfiguration()
+{
+ config_ = {
+ .disableStartupFrameDrops = false,
+ .cameraTimeoutValue = 0,
+ };
+
+ /* Initial configuration of the platform, in case no config file is present */
+ platformPipelineConfigure({});
+
+ char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_CONFIG_FILE");
+ if (!configFromEnv || *configFromEnv == '\0')
+ return 0;
+
+ std::string filename = std::string(configFromEnv);
+ File file(filename);
+
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(RPI, Warning) << "Failed to open configuration file '" << filename << "'"
+ << ", using defaults";
+ return 0;
+ }
+
+ LOG(RPI, Info) << "Using configuration file '" << filename << "'";
+
+ std::unique_ptr<YamlObject> root = YamlParser::parse(file);
+ if (!root) {
+ LOG(RPI, Warning) << "Failed to parse configuration file, using defaults";
+ return 0;
+ }
+
+ std::optional<double> ver = (*root)["version"].get<double>();
+ if (!ver || *ver != 1.0) {
+ LOG(RPI, Warning) << "Unexpected configuration file version reported: "
+ << *ver;
+ return 0;
+ }
+
+ const YamlObject &phConfig = (*root)["pipeline_handler"];
+
+ config_.disableStartupFrameDrops =
+ phConfig["disable_startup_frame_drops"].get<bool>(config_.disableStartupFrameDrops);
+
+ config_.cameraTimeoutValue =
+ phConfig["camera_timeout_value_ms"].get<unsigned int>(config_.cameraTimeoutValue);
+
+ if (config_.cameraTimeoutValue) {
+ /* Disable the IPA signal to control timeout and set the user requested value. */
+ ipa_->setCameraTimeout.disconnect();
+ frontendDevice()->setDequeueTimeout(config_.cameraTimeoutValue * 1ms);
+ }
+
+ return platformPipelineConfigure(root);
+}
+
+int CameraData::loadIPA(ipa::RPi::InitResult *result)
+{
+ int ret;
+
+ ipa_ = IPAManager::createIPA<ipa::RPi::IPAProxyRPi>(pipe(), 1, 1);
+
+ if (!ipa_)
+ return -ENOENT;
+
+ /*
+ * The configuration (tuning file) is made from the sensor name unless
+ * the environment variable overrides it.
+ */
+ std::string configurationFile;
+ char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_TUNING_FILE");
+ if (!configFromEnv || *configFromEnv == '\0') {
+ std::string model = sensor_->model();
+ if (isMonoSensor(sensor_))
+ model += "_mono";
+ configurationFile = ipa_->configurationFile(model + ".json");
+ } else {
+ configurationFile = std::string(configFromEnv);
+ }
+
+ IPASettings settings(configurationFile, sensor_->model());
+ ipa::RPi::InitParams params;
+
+ ret = sensor_->sensorInfo(&params.sensorInfo);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to retrieve camera sensor info";
+ return ret;
+ }
+
+ params.lensPresent = !!sensor_->focusLens();
+ ret = platformInitIpa(params);
+ if (ret)
+ return ret;
+
+ return ipa_->init(settings, params, result);
+}
+
+int CameraData::configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result)
+{
+ ipa::RPi::ConfigParams params;
+ int ret;
+
+ params.sensorControls = sensor_->controls();
+ if (sensor_->focusLens())
+ params.lensControls = sensor_->focusLens()->controls();
+
+ ret = platformConfigureIpa(params);
+ if (ret)
+ return ret;
+
+ /* We store the IPACameraSensorInfo for digital zoom calculations. */
+ ret = sensor_->sensorInfo(&sensorInfo_);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to retrieve camera sensor info";
+ return ret;
+ }
+
+ /* Always send the user transform to the IPA. */
+ Transform transform = config->orientation / Orientation::Rotate0;
+ params.transform = static_cast<unsigned int>(transform);
+
+ /* Ready the IPA - it must know about the sensor resolution. */
+ ret = ipa_->configure(sensorInfo_, params, result);
+ if (ret < 0) {
+ LOG(RPI, Error) << "IPA configuration failed!";
+ return -EPIPE;
+ }
+
+ if (!result->sensorControls.empty())
+ setSensorControls(result->sensorControls);
+ if (!result->lensControls.empty())
+ setLensControls(result->lensControls);
+
+ return 0;
+}
+
+void CameraData::metadataReady(const ControlList &metadata)
+{
+ if (!isRunning())
+ return;
+
+ /* Add to the Request metadata buffer what the IPA has provided. */
+ /* Last thing to do is to fill up the request metadata. */
+ Request *request = requestQueue_.front();
+ request->metadata().merge(metadata);
+
+ /*
+ * Inform the sensor of the latest colour gains if it has the
+ * V4L2_CID_NOTIFY_GAINS control (which means notifyGainsUnity_ is set).
+ */
+ const auto &colourGains = metadata.get(libcamera::controls::ColourGains);
+ if (notifyGainsUnity_ && colourGains) {
+ /* The control wants linear gains in the order B, Gb, Gr, R. */
+ ControlList ctrls(sensor_->controls());
+ std::array<int32_t, 4> gains{
+ static_cast<int32_t>((*colourGains)[1] * *notifyGainsUnity_),
+ *notifyGainsUnity_,
+ *notifyGainsUnity_,
+ static_cast<int32_t>((*colourGains)[0] * *notifyGainsUnity_)
+ };
+ ctrls.set(V4L2_CID_NOTIFY_GAINS, Span<const int32_t>{ gains });
+
+ sensor_->setControls(&ctrls);
+ }
+}
+
+void CameraData::setDelayedControls(const ControlList &controls, uint32_t delayContext)
+{
+ if (!delayedCtrls_->push(controls, delayContext))
+ LOG(RPI, Error) << "V4L2 DelayedControl set failed";
+}
+
+void CameraData::setLensControls(const ControlList &controls)
+{
+ CameraLens *lens = sensor_->focusLens();
+
+ if (lens && controls.contains(V4L2_CID_FOCUS_ABSOLUTE)) {
+ ControlValue const &focusValue = controls.get(V4L2_CID_FOCUS_ABSOLUTE);
+ lens->setFocusPosition(focusValue.get<int32_t>());
+ }
+}
+
+void CameraData::setSensorControls(ControlList &controls)
+{
+ /*
+ * We need to ensure that if both VBLANK and EXPOSURE are present, the
+ * former must be written ahead of, and separately from EXPOSURE to avoid
+ * V4L2 rejecting the latter. This is identical to what DelayedControls
+ * does with the priority write flag.
+ *
+ * As a consequence of the below logic, VBLANK gets set twice, and we
+ * rely on the v4l2 framework to not pass the second control set to the
+ * driver as the actual control value has not changed.
+ */
+ if (controls.contains(V4L2_CID_EXPOSURE) && controls.contains(V4L2_CID_VBLANK)) {
+ ControlList vblank_ctrl;
+
+ vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
+ sensor_->setControls(&vblank_ctrl);
+ }
+
+ sensor_->setControls(&controls);
+}
+
+Rectangle CameraData::scaleIspCrop(const Rectangle &ispCrop) const
+{
+ /*
+ * Scale a crop rectangle defined in the ISP's coordinates into native sensor
+ * coordinates.
+ */
+ Rectangle nativeCrop = ispCrop.scaledBy(sensorInfo_.analogCrop.size(),
+ sensorInfo_.outputSize);
+ nativeCrop.translateBy(sensorInfo_.analogCrop.topLeft());
+ return nativeCrop;
+}
+
+void CameraData::applyScalerCrop(const ControlList &controls)
+{
+ const auto &scalerCrop = controls.get<Rectangle>(controls::ScalerCrop);
+ if (scalerCrop) {
+ Rectangle nativeCrop = *scalerCrop;
+
+ if (!nativeCrop.width || !nativeCrop.height)
+ nativeCrop = { 0, 0, 1, 1 };
+
+ /* Create a version of the crop scaled to ISP (camera mode) pixels. */
+ Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo_.analogCrop.topLeft());
+ ispCrop.scaleBy(sensorInfo_.outputSize, sensorInfo_.analogCrop.size());
+
+ /*
+ * The crop that we set must be:
+ * 1. At least as big as ispMinCropSize_, once that's been
+ * enlarged to the same aspect ratio.
+ * 2. With the same mid-point, if possible.
+ * 3. But it can't go outside the sensor area.
+ */
+ Size minSize = ispMinCropSize_.expandedToAspectRatio(nativeCrop.size());
+ Size size = ispCrop.size().expandedTo(minSize);
+ ispCrop = size.centeredTo(ispCrop.center()).enclosedIn(Rectangle(sensorInfo_.outputSize));
+
+ if (ispCrop != ispCrop_) {
+ ispCrop_ = ispCrop;
+ platformSetIspCrop();
+
+ /*
+ * Also update the ScalerCrop in the metadata with what we actually
+ * used. But we must first rescale that from ISP (camera mode) pixels
+ * back into sensor native pixels.
+ */
+ scalerCrop_ = scaleIspCrop(ispCrop_);
+ }
+ }
+}
+
+void CameraData::cameraTimeout()
+{
+ LOG(RPI, Error) << "Camera frontend has timed out!";
+ LOG(RPI, Error) << "Please check that your camera sensor connector is attached securely.";
+ LOG(RPI, Error) << "Alternatively, try another cable and/or sensor.";
+
+ state_ = CameraData::State::Error;
+ platformStop();
+
+ /*
+ * To allow the application to attempt a recovery from this timeout,
+ * stop all devices streaming, and return any outstanding requests as
+ * incomplete and cancelled.
+ */
+ for (auto const stream : streams_)
+ stream->dev()->streamOff();
+
+ clearIncompleteRequests();
+}
+
+void CameraData::frameStarted(uint32_t sequence)
+{
+ LOG(RPI, Debug) << "Frame start " << sequence;
+
+ /* Write any controls for the next frame as soon as we can. */
+ delayedCtrls_->applyControls(sequence);
+}
+
+void CameraData::clearIncompleteRequests()
+{
+ /*
+ * All outstanding requests (and associated buffers) must be returned
+ * back to the application.
+ */
+ while (!requestQueue_.empty()) {
+ Request *request = requestQueue_.front();
+
+ for (auto &b : request->buffers()) {
+ FrameBuffer *buffer = b.second;
+ /*
+ * Has the buffer already been handed back to the
+ * request? If not, do so now.
+ */
+ if (buffer->request()) {
+ buffer->_d()->cancel();
+ pipe()->completeBuffer(request, buffer);
+ }
+ }
+
+ pipe()->completeRequest(request);
+ requestQueue_.pop();
+ }
+}
+
+void CameraData::handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream)
+{
+ /*
+ * It is possible to be here without a pending request, so check
+ * that we actually have one to action, otherwise we just return
+ * buffer back to the stream.
+ */
+ Request *request = requestQueue_.empty() ? nullptr : requestQueue_.front();
+ if (!dropFrameCount_ && request && request->findBuffer(stream) == buffer) {
+ /*
+ * Tag the buffer as completed, returning it to the
+ * application.
+ */
+ LOG(RPI, Debug) << "Completing request buffer for stream "
+ << stream->name();
+ pipe()->completeBuffer(request, buffer);
+ } else {
+ /*
+ * This buffer was not part of the Request (which happens if an
+ * internal buffer was used for an external stream, or
+ * unconditionally for internal streams), or there is no pending
+ * request, so we can recycle it.
+ */
+ LOG(RPI, Debug) << "Returning buffer to stream "
+ << stream->name();
+ stream->returnBuffer(buffer);
+ }
+}
+
+void CameraData::handleState()
+{
+ switch (state_) {
+ case State::Stopped:
+ case State::Busy:
+ case State::Error:
+ break;
+
+ case State::IpaComplete:
+ /* If the request is completed, we will switch to Idle state. */
+ checkRequestCompleted();
+ /*
+ * No break here, we want to try running the pipeline again.
+ * The fallthrough clause below suppresses compiler warnings.
+ */
+ [[fallthrough]];
+
+ case State::Idle:
+ tryRunPipeline();
+ break;
+ }
+}
+
+void CameraData::checkRequestCompleted()
+{
+ bool requestCompleted = false;
+ /*
+ * If we are dropping this frame, do not touch the request, simply
+ * change the state to IDLE when ready.
+ */
+ if (!dropFrameCount_) {
+ Request *request = requestQueue_.front();
+ if (request->hasPendingBuffers())
+ return;
+
+ /* Must wait for metadata to be filled in before completing. */
+ if (state_ != State::IpaComplete)
+ return;
+
+ LOG(RPI, Debug) << "Completing request sequence: "
+ << request->sequence();
+
+ pipe()->completeRequest(request);
+ requestQueue_.pop();
+ requestCompleted = true;
+ }
+
+ /*
+ * Make sure we have three outputs completed in the case of a dropped
+ * frame.
+ */
+ if (state_ == State::IpaComplete &&
+ ((ispOutputCount_ == ispOutputTotal_ && dropFrameCount_) ||
+ requestCompleted)) {
+ LOG(RPI, Debug) << "Going into Idle state";
+ state_ = State::Idle;
+ if (dropFrameCount_) {
+ dropFrameCount_--;
+ LOG(RPI, Debug) << "Dropping frame at the request of the IPA ("
+ << dropFrameCount_ << " left)";
+ }
+ }
+}
+
+void CameraData::fillRequestMetadata(const ControlList &bufferControls, Request *request)
+{
+ request->metadata().set(controls::SensorTimestamp,
+ bufferControls.get(controls::SensorTimestamp).value_or(0));
+
+ request->metadata().set(controls::ScalerCrop, scalerCrop_);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.h b/src/libcamera/pipeline/rpi/common/pipeline_base.h
new file mode 100644
index 00000000..f9cecf70
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/pipeline_base.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler base class for Raspberry Pi devices
+ */
+
+#include <map>
+#include <memory>
+#include <optional>
+#include <queue>
+#include <string>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include <libcamera/controls.h>
+#include <libcamera/request.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/media_object.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include <libcamera/ipa/raspberrypi_ipa_interface.h>
+#include <libcamera/ipa/raspberrypi_ipa_proxy.h>
+
+#include "delayed_controls.h"
+#include "rpi_stream.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+namespace RPi {
+
+/* Map of mbus codes to supported sizes reported by the sensor. */
+using SensorFormats = std::map<unsigned int, std::vector<Size>>;
+
+class RPiCameraConfiguration;
+class CameraData : public Camera::Private
+{
+public:
+ CameraData(PipelineHandler *pipe)
+ : Camera::Private(pipe), state_(State::Stopped),
+ dropFrameCount_(0), buffersAllocated_(false),
+ ispOutputCount_(0), ispOutputTotal_(0)
+ {
+ }
+
+ virtual ~CameraData()
+ {
+ }
+
+ virtual CameraConfiguration::Status platformValidate(RPiCameraConfiguration *rpiConfig) const = 0;
+ virtual int platformConfigure(const RPiCameraConfiguration *rpiConfig) = 0;
+ virtual void platformStart() = 0;
+ virtual void platformStop() = 0;
+
+ double scoreFormat(double desired, double actual) const;
+ V4L2SubdeviceFormat findBestFormat(const Size &req, unsigned int bitDepth) const;
+
+ void freeBuffers();
+ virtual void platformFreeBuffers() = 0;
+
+ void enumerateVideoDevices(MediaLink *link, const std::string &frontend);
+
+ int loadPipelineConfiguration();
+ int loadIPA(ipa::RPi::InitResult *result);
+ int configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result);
+ virtual int platformInitIpa(ipa::RPi::InitParams &params) = 0;
+ virtual int platformConfigureIpa(ipa::RPi::ConfigParams &params) = 0;
+
+ void metadataReady(const ControlList &metadata);
+ void setDelayedControls(const ControlList &controls, uint32_t delayContext);
+ void setLensControls(const ControlList &controls);
+ void setSensorControls(ControlList &controls);
+
+ Rectangle scaleIspCrop(const Rectangle &ispCrop) const;
+ void applyScalerCrop(const ControlList &controls);
+ virtual void platformSetIspCrop() = 0;
+
+ void cameraTimeout();
+ void frameStarted(uint32_t sequence);
+
+ void clearIncompleteRequests();
+ void handleStreamBuffer(FrameBuffer *buffer, Stream *stream);
+ void handleState();
+
+ virtual V4L2VideoDevice::Formats ispFormats() const = 0;
+ virtual V4L2VideoDevice::Formats rawFormats() const = 0;
+ virtual V4L2VideoDevice *frontendDevice() = 0;
+
+ virtual int platformPipelineConfigure(const std::unique_ptr<YamlObject> &root) = 0;
+
+ std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
+
+ std::unique_ptr<CameraSensor> sensor_;
+ SensorFormats sensorFormats_;
+
+ /* The vector below is just for convenience when iterating over all streams. */
+ std::vector<Stream *> streams_;
+ /* Stores the ids of the buffers mapped in the IPA. */
+ std::unordered_set<unsigned int> bufferIds_;
+ /*
+ * Stores a cascade of Video Mux or Bridge devices between the sensor and
+ * Unicam together with media link across the entities.
+ */
+ std::vector<std::pair<std::unique_ptr<V4L2Subdevice>, MediaLink *>> bridgeDevices_;
+
+ std::unique_ptr<DelayedControls> delayedCtrls_;
+ bool sensorMetadata_;
+
+ /*
+ * All the functions in this class are called from a single calling
+ * thread. So, we do not need to have any mutex to protect access to any
+ * of the variables below.
+ */
+ enum class State { Stopped, Idle, Busy, IpaComplete, Error };
+ State state_;
+
+ bool isRunning()
+ {
+ return state_ != State::Stopped && state_ != State::Error;
+ }
+
+ std::queue<Request *> requestQueue_;
+
+ /* For handling digital zoom. */
+ IPACameraSensorInfo sensorInfo_;
+ Rectangle ispCrop_; /* crop in ISP (camera mode) pixels */
+ Rectangle scalerCrop_; /* crop in sensor native pixels */
+ Size ispMinCropSize_;
+
+ unsigned int dropFrameCount_;
+
+ /*
+ * If set, this stores the value that represets a gain of one for
+ * the V4L2_CID_NOTIFY_GAINS control.
+ */
+ std::optional<int32_t> notifyGainsUnity_;
+
+ /* Have internal buffers been allocated? */
+ bool buffersAllocated_;
+
+ struct Config {
+ /*
+ * Override any request from the IPA to drop a number of startup
+ * frames.
+ */
+ bool disableStartupFrameDrops;
+ /*
+ * Override the camera timeout value calculated by the IPA based
+ * on frame durations.
+ */
+ unsigned int cameraTimeoutValue;
+ };
+
+ Config config_;
+
+protected:
+ void fillRequestMetadata(const ControlList &bufferControls,
+ Request *request);
+
+ virtual void tryRunPipeline() = 0;
+
+ unsigned int ispOutputCount_;
+ unsigned int ispOutputTotal_;
+
+private:
+ void checkRequestCompleted();
+};
+
+class PipelineHandlerBase : public PipelineHandler
+{
+public:
+ PipelineHandlerBase(CameraManager *manager)
+ : PipelineHandler(manager)
+ {
+ }
+
+ virtual ~PipelineHandlerBase()
+ {
+ }
+
+ static bool isRgb(const PixelFormat &pixFmt);
+ static bool isYuv(const PixelFormat &pixFmt);
+ static bool isRaw(const PixelFormat &pixFmt);
+
+ static bool updateStreamConfig(StreamConfiguration *stream,
+ const V4L2DeviceFormat &format);
+ static V4L2DeviceFormat toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const StreamConfiguration *stream);
+ static V4L2DeviceFormat toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const V4L2SubdeviceFormat &format,
+ BayerFormat::Packing packingReq);
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, libcamera::Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+ void releaseDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+protected:
+ int registerCamera(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *frontent, const std::string &frontendName,
+ MediaDevice *backend, MediaEntity *sensorEntity);
+
+ void mapBuffers(Camera *camera, const BufferMap &buffers, unsigned int mask);
+
+ virtual int platformRegister(std::unique_ptr<CameraData> &cameraData,
+ MediaDevice *unicam, MediaDevice *isp) = 0;
+
+private:
+ CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<CameraData *>(camera->_d());
+ }
+
+ int queueAllBuffers(Camera *camera);
+ virtual int prepareBuffers(Camera *camera) = 0;
+};
+
+class RPiCameraConfiguration final : public CameraConfiguration
+{
+public:
+ RPiCameraConfiguration(const CameraData *data)
+ : CameraConfiguration(), data_(data)
+ {
+ }
+
+ CameraConfiguration::Status validateColorSpaces(ColorSpaceFlags flags);
+ Status validate() override;
+
+ /* Cache the combinedTransform_ that will be applied to the sensor */
+ Transform combinedTransform_;
+ /* The sensor format computed in validate() */
+ V4L2SubdeviceFormat sensorFormat_;
+
+ struct StreamParams {
+ StreamParams()
+ : index(0), cfg(nullptr), dev(nullptr)
+ {
+ }
+
+ StreamParams(unsigned int index_, StreamConfiguration *cfg_)
+ : index(index_), cfg(cfg_), dev(nullptr)
+ {
+ }
+
+ unsigned int index;
+ StreamConfiguration *cfg;
+ V4L2VideoDevice *dev;
+ V4L2DeviceFormat format;
+ };
+
+ std::vector<StreamParams> rawStreams_;
+ std::vector<StreamParams> outStreams_;
+
+ /*
+ * Store the colour spaces that all our streams will have. RGB format streams
+ * will have the same colorspace as YUV streams, with YCbCr field cleared and
+ * range set to full.
+ */
+ std::optional<ColorSpace> yuvColorSpace_;
+ std::optional<ColorSpace> rgbColorSpace_;
+
+private:
+ const CameraData *data_;
+};
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/rpi_stream.cpp b/src/libcamera/pipeline/rpi/common/rpi_stream.cpp
index 7a93efaa..accf59eb 100644
--- a/src/libcamera/pipeline/raspberrypi/rpi_stream.cpp
+++ b/src/libcamera/pipeline/rpi/common/rpi_stream.cpp
@@ -1,14 +1,19 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * rpi_stream.cpp - Raspberry Pi device stream abstraction class.
+ * Raspberry Pi device stream abstraction class.
*/
#include "rpi_stream.h"
+#include <algorithm>
+#include <tuple>
+#include <utility>
+
#include <libcamera/base/log.h>
-#include <libcamera/ipa/raspberrypi_ipa_interface.h>
+/* Maximum number of buffer slots to allocate in the V4L2 device driver. */
+static constexpr unsigned int maxV4L2BufferCount = 32;
namespace libcamera {
@@ -16,40 +21,64 @@ LOG_DEFINE_CATEGORY(RPISTREAM)
namespace RPi {
+const BufferObject Stream::errorBufferObject{ nullptr, false };
+
+void Stream::setFlags(StreamFlags flags)
+{
+ /* We don't want dynamic mmapping. */
+ ASSERT(!(flags & StreamFlag::RequiresMmap));
+
+ flags_ |= flags;
+
+ /* Import streams cannot be external. */
+ ASSERT(!(flags_ & StreamFlag::External) || !(flags_ & StreamFlag::ImportOnly));
+}
+
+void Stream::clearFlags(StreamFlags flags)
+{
+ /* We don't want dynamic mmapping. */
+ ASSERT(!(flags & StreamFlag::RequiresMmap));
+
+ flags_ &= ~flags;
+}
+
+RPi::Stream::StreamFlags Stream::getFlags() const
+{
+ return flags_;
+}
+
V4L2VideoDevice *Stream::dev() const
{
return dev_.get();
}
-std::string Stream::name() const
+const std::string &Stream::name() const
{
return name_;
}
-void Stream::resetBuffers()
+unsigned int Stream::swDownscale() const
{
- /* Add all internal buffers to the queue of usable buffers. */
- availableBuffers_ = {};
- for (auto const &buffer : internalBuffers_)
- availableBuffers_.push(buffer.get());
+ return swDownscale_;
}
-void Stream::setExternal(bool external)
+void Stream::setSwDownscale(unsigned int swDownscale)
{
- /* Import streams cannot be external. */
- ASSERT(!external || !importOnly_);
- external_ = external;
+ swDownscale_ = swDownscale;
}
-bool Stream::isExternal() const
+void Stream::resetBuffers()
{
- return external_;
+ /* Add all internal buffers to the queue of usable buffers. */
+ availableBuffers_ = {};
+ for (auto const &buffer : internalBuffers_)
+ availableBuffers_.push(buffer.get());
}
void Stream::setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
for (auto const &buffer : *buffers)
- bufferMap_.emplace(id_.get(), buffer.get());
+ bufferEmplace(++id_, buffer.get());
}
const BufferMap &Stream::getBuffers() const
@@ -57,68 +86,42 @@ const BufferMap &Stream::getBuffers() const
return bufferMap_;
}
-int Stream::getBufferId(FrameBuffer *buffer) const
+unsigned int Stream::getBufferId(FrameBuffer *buffer) const
{
- if (importOnly_)
- return -1;
+ if (flags_ & StreamFlag::ImportOnly)
+ return 0;
/* Find the buffer in the map, and return the buffer id. */
auto it = std::find_if(bufferMap_.begin(), bufferMap_.end(),
- [&buffer](auto const &p) { return p.second == buffer; });
+ [&buffer](auto const &p) { return p.second.buffer == buffer; });
if (it == bufferMap_.end())
- return -1;
+ return 0;
return it->first;
}
-void Stream::setExternalBuffer(FrameBuffer *buffer)
+void Stream::setExportedBuffer(FrameBuffer *buffer)
{
- bufferMap_.emplace(ipa::RPi::MaskExternalBuffer | id_.get(), buffer);
-}
-
-void Stream::removeExternalBuffer(FrameBuffer *buffer)
-{
- int id = getBufferId(buffer);
-
- /* Ensure we have this buffer in the stream, and it is marked external. */
- ASSERT(id != -1 && (id & ipa::RPi::MaskExternalBuffer));
- bufferMap_.erase(id);
+ bufferEmplace(++id_, buffer);
}
int Stream::prepareBuffers(unsigned int count)
{
int ret;
- if (!importOnly_) {
- if (count) {
- /* Export some frame buffers for internal use. */
- ret = dev_->exportBuffers(count, &internalBuffers_);
- if (ret < 0)
- return ret;
-
- /* Add these exported buffers to the internal/external buffer list. */
- setExportedBuffers(&internalBuffers_);
- resetBuffers();
- }
+ if (!(flags_ & StreamFlag::ImportOnly)) {
+ /* Export some frame buffers for internal use. */
+ ret = dev_->exportBuffers(count, &internalBuffers_);
+ if (ret < 0)
+ return ret;
- /* We must import all internal/external exported buffers. */
- count = bufferMap_.size();
+ /* Add these exported buffers to the internal/external buffer list. */
+ setExportedBuffers(&internalBuffers_);
+ resetBuffers();
}
- /*
- * If this is an external stream, we must allocate slots for buffers that
- * might be externally allocated. We have no indication of how many buffers
- * may be used, so this might overallocate slots in the buffer cache.
- * Similarly, if this stream is only importing buffers, we do the same.
- *
- * \todo Find a better heuristic, or, even better, an exact solution to
- * this issue.
- */
- if (isExternal() || importOnly_)
- count = count * 2;
-
- return dev_->importBuffers(count);
+ return dev_->importBuffers(maxV4L2BufferCount);
}
int Stream::queueBuffer(FrameBuffer *buffer)
@@ -162,7 +165,7 @@ int Stream::queueBuffer(FrameBuffer *buffer)
void Stream::returnBuffer(FrameBuffer *buffer)
{
- if (!external_) {
+ if (!(flags_ & StreamFlag::External) && !(flags_ & StreamFlag::Recurrent)) {
/* For internal buffers, simply requeue back to the device. */
queueToDevice(buffer);
return;
@@ -171,9 +174,6 @@ void Stream::returnBuffer(FrameBuffer *buffer)
/* Push this buffer back into the queue to be used again. */
availableBuffers_.push(buffer);
- /* Allow the buffer id to be reused. */
- id_.release(getBufferId(buffer));
-
/*
* Do we have any Request buffers that are waiting to be queued?
* If so, do it now as availableBuffers_ will not be empty.
@@ -202,11 +202,32 @@ void Stream::returnBuffer(FrameBuffer *buffer)
}
}
+const BufferObject &Stream::getBuffer(unsigned int id)
+{
+ auto const &it = bufferMap_.find(id);
+ if (it == bufferMap_.end())
+ return errorBufferObject;
+
+ return it->second;
+}
+
+const BufferObject &Stream::acquireBuffer()
+{
+ /* No id provided, so pick up the next available buffer if possible. */
+ if (availableBuffers_.empty())
+ return errorBufferObject;
+
+ unsigned int id = getBufferId(availableBuffers_.front());
+ availableBuffers_.pop();
+
+ return getBuffer(id);
+}
+
int Stream::queueAllBuffers()
{
int ret;
- if (external_)
+ if ((flags_ & StreamFlag::External) || (flags_ & StreamFlag::Recurrent))
return 0;
while (!availableBuffers_.empty()) {
@@ -226,13 +247,23 @@ void Stream::releaseBuffers()
clearBuffers();
}
+void Stream::bufferEmplace(unsigned int id, FrameBuffer *buffer)
+{
+ if (flags_ & StreamFlag::RequiresMmap)
+ bufferMap_.emplace(std::piecewise_construct, std::forward_as_tuple(id),
+ std::forward_as_tuple(buffer, true));
+ else
+ bufferMap_.emplace(std::piecewise_construct, std::forward_as_tuple(id),
+ std::forward_as_tuple(buffer, false));
+}
+
void Stream::clearBuffers()
{
availableBuffers_ = std::queue<FrameBuffer *>{};
requestBuffers_ = std::queue<FrameBuffer *>{};
internalBuffers_.clear();
bufferMap_.clear();
- id_.reset();
+ id_ = 0;
}
int Stream::queueToDevice(FrameBuffer *buffer)
diff --git a/src/libcamera/pipeline/rpi/common/rpi_stream.h b/src/libcamera/pipeline/rpi/common/rpi_stream.h
new file mode 100644
index 00000000..a13d5dc0
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/rpi_stream.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Raspberry Pi device stream abstraction class.
+ */
+
+#pragma once
+
+#include <optional>
+#include <queue>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include <libcamera/base/flags.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace libcamera {
+
+namespace RPi {
+
+enum BufferMask {
+ MaskID = 0x00ffff,
+ MaskStats = 0x010000,
+ MaskEmbeddedData = 0x020000,
+ MaskBayerData = 0x040000,
+};
+
+struct BufferObject {
+ BufferObject(FrameBuffer *b, bool requiresMmap)
+ : buffer(b), mapped(std::nullopt)
+ {
+ if (requiresMmap)
+ mapped = std::make_optional<MappedFrameBuffer>
+ (b, MappedFrameBuffer::MapFlag::ReadWrite);
+ }
+
+ FrameBuffer *buffer;
+ std::optional<MappedFrameBuffer> mapped;
+};
+
+using BufferMap = std::unordered_map<unsigned int, BufferObject>;
+
+/*
+ * Device stream abstraction for either an internal or external stream.
+ * Used for both Unicam and the ISP.
+ */
+class Stream : public libcamera::Stream
+{
+public:
+ enum class StreamFlag {
+ None = 0,
+ /*
+ * Indicates that this stream only imports buffers, e.g. the ISP
+ * input stream.
+ */
+ ImportOnly = (1 << 0),
+ /*
+ * Indicates that this stream is active externally, i.e. the
+ * buffers might be provided by (and returned to) the application.
+ */
+ External = (1 << 1),
+ /*
+ * Indicates that the stream buffers need to be mmaped and returned
+ * to the pipeline handler when requested.
+ */
+ RequiresMmap = (1 << 2),
+ /*
+ * Indicates a stream that needs buffers recycled every frame internally
+ * in the pipeline handler, e.g. stitch, TDN, config. All buffer
+ * management will be handled by the pipeline handler.
+ */
+ Recurrent = (1 << 3),
+ /*
+ * Indicates that the output stream needs a software format conversion
+ * to be applied after ISP processing.
+ */
+ Needs32bitConv = (1 << 4),
+ };
+
+ using StreamFlags = Flags<StreamFlag>;
+
+ Stream()
+ : flags_(StreamFlag::None), id_(0), swDownscale_(0)
+ {
+ }
+
+ Stream(const char *name, MediaEntity *dev, StreamFlags flags = StreamFlag::None)
+ : flags_(flags), name_(name),
+ dev_(std::make_unique<V4L2VideoDevice>(dev)), id_(0),
+ swDownscale_(0)
+ {
+ }
+
+ void setFlags(StreamFlags flags);
+ void clearFlags(StreamFlags flags);
+ StreamFlags getFlags() const;
+
+ V4L2VideoDevice *dev() const;
+ const std::string &name() const;
+ void resetBuffers();
+
+ unsigned int swDownscale() const;
+ void setSwDownscale(unsigned int swDownscale);
+
+ void setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers);
+ const BufferMap &getBuffers() const;
+ unsigned int getBufferId(FrameBuffer *buffer) const;
+
+ void setExportedBuffer(FrameBuffer *buffer);
+
+ int prepareBuffers(unsigned int count);
+ int queueBuffer(FrameBuffer *buffer);
+ void returnBuffer(FrameBuffer *buffer);
+
+ const BufferObject &getBuffer(unsigned int id);
+ const BufferObject &acquireBuffer();
+
+ int queueAllBuffers();
+ void releaseBuffers();
+
+ /* For error handling. */
+ static const BufferObject errorBufferObject;
+
+private:
+ void bufferEmplace(unsigned int id, FrameBuffer *buffer);
+ void clearBuffers();
+ int queueToDevice(FrameBuffer *buffer);
+
+ StreamFlags flags_;
+
+ /* Stream name identifier. */
+ std::string name_;
+
+ /* The actual device stream. */
+ std::unique_ptr<V4L2VideoDevice> dev_;
+
+ /* Tracks a unique id key for the bufferMap_ */
+ unsigned int id_;
+
+ /* Power of 2 greater than one if software downscaling will be required. */
+ unsigned int swDownscale_;
+
+ /* All frame buffers associated with this device stream. */
+ BufferMap bufferMap_;
+
+ /*
+ * List of frame buffers that we can use if none have been provided by
+ * the application for external streams. This is populated by the
+ * buffers exported internally.
+ */
+ std::queue<FrameBuffer *> availableBuffers_;
+
+ /*
+ * List of frame buffers that are to be queued into the device from a Request.
+ * A nullptr indicates any internal buffer can be used (from availableBuffers_),
+ * whereas a valid pointer indicates an external buffer to be queued.
+ *
+ * Ordering buffers to be queued is important here as it must match the
+ * requests coming from the application.
+ */
+ std::queue<FrameBuffer *> requestBuffers_;
+
+ /*
+ * This is a list of buffers exported internally. Need to keep this around
+ * as the stream needs to maintain ownership of these buffers.
+ */
+ std::vector<std::unique_ptr<FrameBuffer>> internalBuffers_;
+};
+
+/*
+ * The following class is just a convenient (and typesafe) array of device
+ * streams indexed with an enum class.
+ */
+template<typename E, std::size_t N>
+class Device : public std::array<class Stream, N>
+{
+public:
+ Stream &operator[](E e)
+ {
+ return std::array<class Stream, N>::operator[](utils::to_underlying(e));
+ }
+ const Stream &operator[](E e) const
+ {
+ return std::array<class Stream, N>::operator[](utils::to_underlying(e));
+ }
+};
+
+} /* namespace RPi */
+
+LIBCAMERA_FLAGS_ENABLE_OPERATORS(RPi::Stream::StreamFlag)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/meson.build b/src/libcamera/pipeline/rpi/meson.build
new file mode 100644
index 00000000..2391b6a9
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: CC0-1.0
+
+subdir('common')
+
+foreach pipeline : pipelines
+ pipeline = pipeline.split('/')
+ if pipeline.length() < 2 or pipeline[0] != 'rpi'
+ continue
+ endif
+
+ subdir(pipeline[1])
+endforeach
diff --git a/src/libcamera/pipeline/rpi/vc4/data/example.yaml b/src/libcamera/pipeline/rpi/vc4/data/example.yaml
new file mode 100644
index 00000000..b8e01ade
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/data/example.yaml
@@ -0,0 +1,46 @@
+{
+ "version": 1.0,
+ "target": "bcm2835",
+
+ "pipeline_handler":
+ {
+ # The minimum number of internal buffers to be allocated for
+ # Unicam. This value must be greater than 0, but less than or
+ # equal to min_total_unicam_buffers.
+ #
+ # A larger number of internal buffers can reduce the occurrence
+ # of frame drops during high CPU loads, but might also cause
+ # additional latency in the system.
+ #
+ # Note that the pipeline handler might override this value and
+ # not allocate any internal buffers if it knows they will never
+ # be used. For example if the RAW stream is marked as mandatory
+ # and there are no dropped frames signalled for algorithm
+ # convergence.
+ #
+ # "min_unicam_buffers": 2,
+
+ # The minimum total (internal + external) buffer count used for
+ # Unicam. The number of internal buffers allocated for Unicam is
+ # given by:
+ #
+ # internal buffer count = max(min_unicam_buffers,
+ # min_total_unicam_buffers - external buffer count)
+ #
+ # "min_total_unicam_buffers": 4,
+
+ # Override any request from the IPA to drop a number of startup
+ # frames.
+ #
+ # "disable_startup_frame_drops": false,
+
+ # Custom timeout value (in ms) for camera to use. This overrides
+ # the value computed by the pipeline handler based on frame
+ # durations.
+ #
+ # Set this value to 0 to use the pipeline handler computed
+ # timeout value.
+ #
+ # "camera_timeout_value_ms": 0,
+ }
+}
diff --git a/src/libcamera/pipeline/rpi/vc4/data/meson.build b/src/libcamera/pipeline/rpi/vc4/data/meson.build
new file mode 100644
index 00000000..179feebc
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/data/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'example.yaml',
+])
+
+install_data(conf_files,
+ install_dir : pipeline_data_dir / 'rpi' / 'vc4',
+ install_tag : 'runtime')
diff --git a/src/libcamera/pipeline/rpi/vc4/meson.build b/src/libcamera/pipeline/rpi/vc4/meson.build
new file mode 100644
index 00000000..386e2296
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_sources += files([
+ 'vc4.cpp',
+])
+
+subdir('data')
diff --git a/src/libcamera/pipeline/rpi/vc4/vc4.cpp b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
new file mode 100644
index 00000000..37fb310f
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
@@ -0,0 +1,1023 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler for VC4-based Raspberry Pi devices
+ */
+
+#include <linux/bcm2835-isp.h>
+#include <linux/v4l2-controls.h>
+#include <linux/videodev2.h>
+
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/dma_heaps.h"
+
+#include "../common/pipeline_base.h"
+#include "../common/rpi_stream.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(RPI)
+
+using StreamFlag = RPi::Stream::StreamFlag;
+using StreamParams = RPi::RPiCameraConfiguration::StreamParams;
+
+namespace {
+
+enum class Unicam : unsigned int { Image, Embedded };
+enum class Isp : unsigned int { Input, Output0, Output1, Stats };
+
+} /* namespace */
+
+class Vc4CameraData final : public RPi::CameraData
+{
+public:
+ Vc4CameraData(PipelineHandler *pipe)
+ : RPi::CameraData(pipe)
+ {
+ }
+
+ ~Vc4CameraData()
+ {
+ freeBuffers();
+ }
+
+ V4L2VideoDevice::Formats ispFormats() const override
+ {
+ return isp_[Isp::Output0].dev()->formats();
+ }
+
+ V4L2VideoDevice::Formats rawFormats() const override
+ {
+ return unicam_[Unicam::Image].dev()->formats();
+ }
+
+ V4L2VideoDevice *frontendDevice() override
+ {
+ return unicam_[Unicam::Image].dev();
+ }
+
+ void platformFreeBuffers() override
+ {
+ }
+
+ CameraConfiguration::Status platformValidate(RPi::RPiCameraConfiguration *rpiConfig) const override;
+
+ int platformPipelineConfigure(const std::unique_ptr<YamlObject> &root) override;
+
+ void platformStart() override;
+ void platformStop() override;
+
+ void unicamBufferDequeue(FrameBuffer *buffer);
+ void ispInputDequeue(FrameBuffer *buffer);
+ void ispOutputDequeue(FrameBuffer *buffer);
+
+ void processStatsComplete(const ipa::RPi::BufferIds &buffers);
+ void prepareIspComplete(const ipa::RPi::BufferIds &buffers, bool stitchSwapBuffers);
+ void setIspControls(const ControlList &controls);
+ void setCameraTimeout(uint32_t maxFrameLengthMs);
+
+ /* Array of Unicam and ISP device streams and associated buffers/streams. */
+ RPi::Device<Unicam, 2> unicam_;
+ RPi::Device<Isp, 4> isp_;
+
+ /* DMAHEAP allocation helper. */
+ DmaHeap dmaHeap_;
+ SharedFD lsTable_;
+
+ struct Config {
+ /*
+ * The minimum number of internal buffers to be allocated for
+ * the Unicam Image stream.
+ */
+ unsigned int minUnicamBuffers;
+ /*
+ * The minimum total (internal + external) buffer count used for
+ * the Unicam Image stream.
+ *
+ * Note that:
+ * minTotalUnicamBuffers must be >= 1, and
+ * minTotalUnicamBuffers >= minUnicamBuffers
+ */
+ unsigned int minTotalUnicamBuffers;
+ };
+
+ Config config_;
+
+private:
+ void platformSetIspCrop() override
+ {
+ isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &ispCrop_);
+ }
+
+ int platformConfigure(const RPi::RPiCameraConfiguration *rpiConfig) override;
+ int platformConfigureIpa(ipa::RPi::ConfigParams &params) override;
+
+ int platformInitIpa([[maybe_unused]] ipa::RPi::InitParams &params) override
+ {
+ return 0;
+ }
+
+ struct BayerFrame {
+ FrameBuffer *buffer;
+ ControlList controls;
+ unsigned int delayContext;
+ };
+
+ void tryRunPipeline() override;
+ bool findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer);
+
+ std::queue<BayerFrame> bayerQueue_;
+ std::queue<FrameBuffer *> embeddedQueue_;
+};
+
+class PipelineHandlerVc4 : public RPi::PipelineHandlerBase
+{
+public:
+ PipelineHandlerVc4(CameraManager *manager)
+ : RPi::PipelineHandlerBase(manager)
+ {
+ }
+
+ ~PipelineHandlerVc4()
+ {
+ }
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ Vc4CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<Vc4CameraData *>(camera->_d());
+ }
+
+ int prepareBuffers(Camera *camera) override;
+ int platformRegister(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *unicam, MediaDevice *isp) override;
+};
+
+bool PipelineHandlerVc4::match(DeviceEnumerator *enumerator)
+{
+ constexpr unsigned int numUnicamDevices = 2;
+
+ /*
+ * Loop over all Unicam instances, but return out once a match is found.
+ * This is to ensure we correctly enumrate the camera when an instance
+ * of Unicam has registered with media controller, but has not registered
+ * device nodes due to a sensor subdevice failure.
+ */
+ for (unsigned int i = 0; i < numUnicamDevices; i++) {
+ DeviceMatch unicam("unicam");
+ MediaDevice *unicamDevice = acquireMediaDevice(enumerator, unicam);
+
+ if (!unicamDevice) {
+ LOG(RPI, Debug) << "Unable to acquire a Unicam instance";
+ continue;
+ }
+
+ DeviceMatch isp("bcm2835-isp");
+ MediaDevice *ispDevice = acquireMediaDevice(enumerator, isp);
+
+ if (!ispDevice) {
+ LOG(RPI, Debug) << "Unable to acquire ISP instance";
+ continue;
+ }
+
+ /*
+ * The loop below is used to register multiple cameras behind one or more
+ * video mux devices that are attached to a particular Unicam instance.
+ * Obviously these cameras cannot be used simultaneously.
+ */
+ unsigned int numCameras = 0;
+ for (MediaEntity *entity : unicamDevice->entities()) {
+ if (entity->function() != MEDIA_ENT_F_CAM_SENSOR)
+ continue;
+
+ std::unique_ptr<RPi::CameraData> cameraData = std::make_unique<Vc4CameraData>(this);
+ int ret = RPi::PipelineHandlerBase::registerCamera(cameraData,
+ unicamDevice, "unicam-image",
+ ispDevice, entity);
+ if (ret)
+ LOG(RPI, Error) << "Failed to register camera "
+ << entity->name() << ": " << ret;
+ else
+ numCameras++;
+ }
+
+ if (numCameras)
+ return true;
+ }
+
+ return false;
+}
+
+int PipelineHandlerVc4::prepareBuffers(Camera *camera)
+{
+ Vc4CameraData *data = cameraData(camera);
+ unsigned int numRawBuffers = 0;
+ int ret;
+
+ for (Stream *s : camera->streams()) {
+ if (BayerFormat::fromPixelFormat(s->configuration().pixelFormat).isValid()) {
+ numRawBuffers = s->configuration().bufferCount;
+ break;
+ }
+ }
+
+ /* Decide how many internal buffers to allocate. */
+ for (auto const stream : data->streams_) {
+ unsigned int numBuffers;
+ /*
+ * For Unicam, allocate a minimum number of buffers for internal
+ * use as we want to avoid any frame drops.
+ */
+ const unsigned int minBuffers = data->config_.minTotalUnicamBuffers;
+ if (stream == &data->unicam_[Unicam::Image]) {
+ /*
+ * If an application has configured a RAW stream, allocate
+ * additional buffers to make up the minimum, but ensure
+ * we have at least minUnicamBuffers of internal buffers
+ * to use to minimise frame drops.
+ */
+ numBuffers = std::max<int>(data->config_.minUnicamBuffers,
+ minBuffers - numRawBuffers);
+ } else if (stream == &data->isp_[Isp::Input]) {
+ /*
+ * ISP input buffers are imported from Unicam, so follow
+ * similar logic as above to count all the RAW buffers
+ * available.
+ */
+ numBuffers = numRawBuffers +
+ std::max<int>(data->config_.minUnicamBuffers,
+ minBuffers - numRawBuffers);
+
+ } else if (stream == &data->unicam_[Unicam::Embedded]) {
+ /*
+ * Embedded data buffers are (currently) for internal use, and
+ * are small enough (typically 1-2KB) that we can
+ * allocate them generously to avoid causing problems in the
+ * IPA when we cannot supply the metadata.
+ *
+ * 12 are allocated as a typical application will have 8-10
+ * input buffers, so allocating more embedded buffers than that
+ * is a sensible choice.
+ *
+ * The lifetimes of these buffers are smaller than those of the
+ * raw buffers, so allocating a fixed number will still suffice
+ * if the application requests a greater number of raw
+ * buffers, as these will be recycled quicker.
+ */
+ numBuffers = 12;
+ } else {
+ /*
+ * Since the ISP runs synchronous with the IPA and requests,
+ * we only ever need one set of internal buffers. Any buffers
+ * the application wants to hold onto will already be exported
+ * through PipelineHandlerRPi::exportFrameBuffers().
+ */
+ numBuffers = 1;
+ }
+
+ LOG(RPI, Debug) << "Preparing " << numBuffers
+ << " buffers for stream " << stream->name();
+
+ ret = stream->prepareBuffers(numBuffers);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Pass the stats and embedded data buffers to the IPA. No other
+ * buffers need to be passed.
+ */
+ mapBuffers(camera, data->isp_[Isp::Stats].getBuffers(), RPi::MaskStats);
+ if (data->sensorMetadata_)
+ mapBuffers(camera, data->unicam_[Unicam::Embedded].getBuffers(),
+ RPi::MaskEmbeddedData);
+
+ return 0;
+}
+
+int PipelineHandlerVc4::platformRegister(std::unique_ptr<RPi::CameraData> &cameraData, MediaDevice *unicam, MediaDevice *isp)
+{
+ Vc4CameraData *data = static_cast<Vc4CameraData *>(cameraData.get());
+
+ if (!data->dmaHeap_.isValid())
+ return -ENOMEM;
+
+ MediaEntity *unicamImage = unicam->getEntityByName("unicam-image");
+ MediaEntity *ispOutput0 = isp->getEntityByName("bcm2835-isp0-output0");
+ MediaEntity *ispCapture1 = isp->getEntityByName("bcm2835-isp0-capture1");
+ MediaEntity *ispCapture2 = isp->getEntityByName("bcm2835-isp0-capture2");
+ MediaEntity *ispCapture3 = isp->getEntityByName("bcm2835-isp0-capture3");
+
+ if (!unicamImage || !ispOutput0 || !ispCapture1 || !ispCapture2 || !ispCapture3)
+ return -ENOENT;
+
+ /* Locate and open the unicam video streams. */
+ data->unicam_[Unicam::Image] = RPi::Stream("Unicam Image", unicamImage);
+
+ /* An embedded data node will not be present if the sensor does not support it. */
+ MediaEntity *unicamEmbedded = unicam->getEntityByName("unicam-embedded");
+ if (unicamEmbedded) {
+ data->unicam_[Unicam::Embedded] = RPi::Stream("Unicam Embedded", unicamEmbedded);
+ data->unicam_[Unicam::Embedded].dev()->bufferReady.connect(data,
+ &Vc4CameraData::unicamBufferDequeue);
+ }
+
+ /* Tag the ISP input stream as an import stream. */
+ data->isp_[Isp::Input] = RPi::Stream("ISP Input", ispOutput0, StreamFlag::ImportOnly);
+ data->isp_[Isp::Output0] = RPi::Stream("ISP Output0", ispCapture1);
+ data->isp_[Isp::Output1] = RPi::Stream("ISP Output1", ispCapture2);
+ data->isp_[Isp::Stats] = RPi::Stream("ISP Stats", ispCapture3);
+
+ /* Wire up all the buffer connections. */
+ data->unicam_[Unicam::Image].dev()->bufferReady.connect(data, &Vc4CameraData::unicamBufferDequeue);
+ data->isp_[Isp::Input].dev()->bufferReady.connect(data, &Vc4CameraData::ispInputDequeue);
+ data->isp_[Isp::Output0].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+ data->isp_[Isp::Output1].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+ data->isp_[Isp::Stats].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+
+ if (data->sensorMetadata_ ^ !!data->unicam_[Unicam::Embedded].dev()) {
+ LOG(RPI, Warning) << "Mismatch between Unicam and CamHelper for embedded data usage!";
+ data->sensorMetadata_ = false;
+ if (data->unicam_[Unicam::Embedded].dev())
+ data->unicam_[Unicam::Embedded].dev()->bufferReady.disconnect();
+ }
+
+ /*
+ * Open all Unicam and ISP streams. The exception is the embedded data
+ * stream, which only gets opened below if the IPA reports that the sensor
+ * supports embedded data.
+ *
+ * The below grouping is just for convenience so that we can easily
+ * iterate over all streams in one go.
+ */
+ data->streams_.push_back(&data->unicam_[Unicam::Image]);
+ if (data->sensorMetadata_)
+ data->streams_.push_back(&data->unicam_[Unicam::Embedded]);
+
+ for (auto &stream : data->isp_)
+ data->streams_.push_back(&stream);
+
+ for (auto stream : data->streams_) {
+ int ret = stream->dev()->open();
+ if (ret)
+ return ret;
+ }
+
+ if (!data->unicam_[Unicam::Image].dev()->caps().hasMediaController()) {
+ LOG(RPI, Error) << "Unicam driver does not use the MediaController, please update your kernel!";
+ return -EINVAL;
+ }
+
+ /* Write up all the IPA connections. */
+ data->ipa_->processStatsComplete.connect(data, &Vc4CameraData::processStatsComplete);
+ data->ipa_->prepareIspComplete.connect(data, &Vc4CameraData::prepareIspComplete);
+ data->ipa_->setIspControls.connect(data, &Vc4CameraData::setIspControls);
+ data->ipa_->setCameraTimeout.connect(data, &Vc4CameraData::setCameraTimeout);
+
+ /*
+ * List the available streams an application may request. At present, we
+ * do not advertise Unicam Embedded and ISP Statistics streams, as there
+ * is no mechanism for the application to request non-image buffer formats.
+ */
+ std::set<Stream *> streams;
+ streams.insert(&data->unicam_[Unicam::Image]);
+ streams.insert(&data->isp_[Isp::Output0]);
+ streams.insert(&data->isp_[Isp::Output1]);
+
+ /* Create and register the camera. */
+ const std::string &id = data->sensor_->id();
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(cameraData), id, streams);
+ PipelineHandler::registerCamera(std::move(camera));
+
+ LOG(RPI, Info) << "Registered camera " << id
+ << " to Unicam device " << unicam->deviceNode()
+ << " and ISP device " << isp->deviceNode();
+
+ return 0;
+}
+
+CameraConfiguration::Status Vc4CameraData::platformValidate(RPi::RPiCameraConfiguration *rpiConfig) const
+{
+ std::vector<StreamParams> &rawStreams = rpiConfig->rawStreams_;
+ std::vector<StreamParams> &outStreams = rpiConfig->outStreams_;
+
+ CameraConfiguration::Status status = CameraConfiguration::Status::Valid;
+
+ /* Can only output 1 RAW stream, or 2 YUV/RGB streams. */
+ if (rawStreams.size() > 1 || outStreams.size() > 2) {
+ LOG(RPI, Error) << "Invalid number of streams requested";
+ return CameraConfiguration::Status::Invalid;
+ }
+
+ if (!rawStreams.empty()) {
+ rawStreams[0].dev = unicam_[Unicam::Image].dev();
+
+ /* Adjust the RAW stream to match the computed sensor format. */
+ StreamConfiguration *rawStream = rawStreams[0].cfg;
+ BayerFormat rawBayer = BayerFormat::fromPixelFormat(rawStream->pixelFormat);
+
+ /* Apply the sensor bitdepth. */
+ rawBayer.bitDepth = BayerFormat::fromMbusCode(rpiConfig->sensorFormat_.code).bitDepth;
+
+ /* Default to CSI2 packing if the user request is unsupported. */
+ if (rawBayer.packing != BayerFormat::Packing::CSI2 &&
+ rawBayer.packing != BayerFormat::Packing::None)
+ rawBayer.packing = BayerFormat::Packing::CSI2;
+
+ PixelFormat rawFormat = rawBayer.toPixelFormat();
+
+ /*
+ * Try for an unpacked format if a packed one wasn't available.
+ * This catches 8 (and 16) bit formats which would otherwise
+ * fail.
+ */
+ if (!rawFormat.isValid() && rawBayer.packing != BayerFormat::Packing::None) {
+ rawBayer.packing = BayerFormat::Packing::None;
+ rawFormat = rawBayer.toPixelFormat();
+ }
+
+ if (rawStream->pixelFormat != rawFormat ||
+ rawStream->size != rpiConfig->sensorFormat_.size) {
+ rawStream->pixelFormat = rawFormat;
+ rawStream->size = rpiConfig->sensorFormat_.size;
+
+ status = CameraConfiguration::Adjusted;
+ }
+
+ rawStreams[0].format =
+ RPi::PipelineHandlerBase::toV4L2DeviceFormat(unicam_[Unicam::Image].dev(), rawStream);
+ }
+
+ /*
+ * For the two ISP outputs, one stream must be equal or smaller than the
+ * other in all dimensions.
+ *
+ * Index 0 contains the largest requested resolution.
+ */
+ for (unsigned int i = 0; i < outStreams.size(); i++) {
+ Size size;
+
+ /*
+ * \todo Should we warn if upscaling, as it reduces the image
+ * quality and is usually undesired ?
+ */
+
+ size.width = std::min(outStreams[i].cfg->size.width,
+ outStreams[0].cfg->size.width);
+ size.height = std::min(outStreams[i].cfg->size.height,
+ outStreams[0].cfg->size.height);
+
+ if (outStreams[i].cfg->size != size) {
+ outStreams[i].cfg->size = size;
+ status = CameraConfiguration::Status::Adjusted;
+ }
+
+ /*
+ * Output 0 must be for the largest resolution. We will
+ * have that fixed up in the code above.
+ */
+ outStreams[i].dev = isp_[i == 0 ? Isp::Output0 : Isp::Output1].dev();
+
+ outStreams[i].format = RPi::PipelineHandlerBase::toV4L2DeviceFormat(outStreams[i].dev, outStreams[i].cfg);
+ }
+
+ return status;
+}
+
+int Vc4CameraData::platformPipelineConfigure(const std::unique_ptr<YamlObject> &root)
+{
+ config_ = {
+ .minUnicamBuffers = 2,
+ .minTotalUnicamBuffers = 4,
+ };
+
+ if (!root)
+ return 0;
+
+ std::optional<double> ver = (*root)["version"].get<double>();
+ if (!ver || *ver != 1.0) {
+ LOG(RPI, Error) << "Unexpected configuration file version reported";
+ return -EINVAL;
+ }
+
+ std::optional<std::string> target = (*root)["target"].get<std::string>();
+ if (!target || *target != "bcm2835") {
+ LOG(RPI, Error) << "Unexpected target reported: expected \"bcm2835\", got "
+ << *target;
+ return -EINVAL;
+ }
+
+ const YamlObject &phConfig = (*root)["pipeline_handler"];
+ config_.minUnicamBuffers =
+ phConfig["min_unicam_buffers"].get<unsigned int>(config_.minUnicamBuffers);
+ config_.minTotalUnicamBuffers =
+ phConfig["min_total_unicam_buffers"].get<unsigned int>(config_.minTotalUnicamBuffers);
+
+ if (config_.minTotalUnicamBuffers < config_.minUnicamBuffers) {
+ LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= min_unicam_buffers";
+ return -EINVAL;
+ }
+
+ if (config_.minTotalUnicamBuffers < 1) {
+ LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= 1";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int Vc4CameraData::platformConfigure(const RPi::RPiCameraConfiguration *rpiConfig)
+{
+ const std::vector<StreamParams> &rawStreams = rpiConfig->rawStreams_;
+ const std::vector<StreamParams> &outStreams = rpiConfig->outStreams_;
+ int ret;
+
+ V4L2VideoDevice *unicam = unicam_[Unicam::Image].dev();
+ V4L2DeviceFormat unicamFormat;
+
+ /*
+ * See which streams are requested, and route the user
+ * StreamConfiguration appropriately.
+ */
+ if (!rawStreams.empty()) {
+ rawStreams[0].cfg->setStream(&unicam_[Unicam::Image]);
+ unicam_[Unicam::Image].setFlags(StreamFlag::External);
+ unicamFormat = rawStreams[0].format;
+ } else {
+ unicamFormat =
+ RPi::PipelineHandlerBase::toV4L2DeviceFormat(unicam,
+ rpiConfig->sensorFormat_,
+ BayerFormat::Packing::CSI2);
+ }
+
+ ret = unicam->setFormat(&unicamFormat);
+ if (ret)
+ return ret;
+
+ ret = isp_[Isp::Input].dev()->setFormat(&unicamFormat);
+ if (ret)
+ return ret;
+
+ LOG(RPI, Info) << "Sensor: " << sensor_->id()
+ << " - Selected sensor format: " << rpiConfig->sensorFormat_
+ << " - Selected unicam format: " << unicamFormat;
+
+ /* Use a sensible small default size if no output streams are configured. */
+ Size maxSize = outStreams.empty() ? Size(320, 240) : outStreams[0].cfg->size;
+ V4L2DeviceFormat format;
+
+ for (unsigned int i = 0; i < outStreams.size(); i++) {
+ StreamConfiguration *cfg = outStreams[i].cfg;
+
+ /* The largest resolution gets routed to the ISP Output 0 node. */
+ RPi::Stream *stream = i == 0 ? &isp_[Isp::Output0] : &isp_[Isp::Output1];
+ format = outStreams[i].format;
+
+ LOG(RPI, Debug) << "Setting " << stream->name() << " to "
+ << format;
+
+ ret = stream->dev()->setFormat(&format);
+ if (ret)
+ return -EINVAL;
+
+ LOG(RPI, Debug)
+ << "Stream " << stream->name() << " has color space "
+ << ColorSpace::toString(cfg->colorSpace);
+
+ cfg->setStream(stream);
+ stream->setFlags(StreamFlag::External);
+ }
+
+ ispOutputTotal_ = outStreams.size();
+
+ /*
+ * If ISP::Output0 stream has not been configured by the application,
+ * we must allow the hardware to generate an output so that the data
+ * flow in the pipeline handler remains consistent, and we still generate
+ * statistics for the IPA to use. So enable the output at a very low
+ * resolution for internal use.
+ *
+ * \todo Allow the pipeline to work correctly without Output0 and only
+ * statistics coming from the hardware.
+ */
+ if (outStreams.empty()) {
+ V4L2VideoDevice *dev = isp_[Isp::Output0].dev();
+
+ format = {};
+ format.size = maxSize;
+ format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
+ /* No one asked for output, so the color space doesn't matter. */
+ format.colorSpace = ColorSpace::Sycc;
+ ret = dev->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error)
+ << "Failed to set default format on ISP Output0: "
+ << ret;
+ return -EINVAL;
+ }
+
+ ispOutputTotal_++;
+
+ LOG(RPI, Debug) << "Defaulting ISP Output0 format to "
+ << format;
+ }
+
+ /*
+ * If ISP::Output1 stream has not been requested by the application, we
+ * set it up for internal use now. This second stream will be used for
+ * fast colour denoise, and must be a quarter resolution of the ISP::Output0
+ * stream. However, also limit the maximum size to 1200 pixels in the
+ * larger dimension, just to avoid being wasteful with buffer allocations
+ * and memory bandwidth.
+ *
+ * \todo If Output 1 format is not YUV420, Output 1 ought to be disabled as
+ * colour denoise will not run.
+ */
+ if (outStreams.size() <= 1) {
+ V4L2VideoDevice *dev = isp_[Isp::Output1].dev();
+
+ V4L2DeviceFormat output1Format;
+ constexpr Size maxDimensions(1200, 1200);
+ const Size limit = maxDimensions.boundedToAspectRatio(format.size);
+
+ output1Format.size = (format.size / 2).boundedTo(limit).alignedDownTo(2, 2);
+ output1Format.colorSpace = format.colorSpace;
+ output1Format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
+
+ LOG(RPI, Debug) << "Setting ISP Output1 (internal) to "
+ << output1Format;
+
+ ret = dev->setFormat(&output1Format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on ISP Output1: "
+ << ret;
+ return -EINVAL;
+ }
+
+ ispOutputTotal_++;
+ }
+
+ /* ISP statistics output format. */
+ format = {};
+ format.fourcc = V4L2PixelFormat(V4L2_META_FMT_BCM2835_ISP_STATS);
+ ret = isp_[Isp::Stats].dev()->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on ISP stats stream: "
+ << format;
+ return ret;
+ }
+
+ ispOutputTotal_++;
+
+ /*
+ * Configure the Unicam embedded data output format only if the sensor
+ * supports it.
+ */
+ if (sensorMetadata_) {
+ V4L2SubdeviceFormat embeddedFormat;
+
+ sensor_->device()->getFormat(1, &embeddedFormat);
+ format = {};
+ format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
+ format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
+
+ LOG(RPI, Debug) << "Setting embedded data format " << format.toString();
+ ret = unicam_[Unicam::Embedded].dev()->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on Unicam embedded: "
+ << format;
+ return ret;
+ }
+ }
+
+ /* Figure out the smallest selection the ISP will allow. */
+ Rectangle testCrop(0, 0, 1, 1);
+ isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &testCrop);
+ ispMinCropSize_ = testCrop.size();
+
+ /* Adjust aspect ratio by providing crops on the input image. */
+ Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
+ ispCrop_ = size.centeredTo(Rectangle(unicamFormat.size).center());
+
+ platformSetIspCrop();
+
+ return 0;
+}
+
+int Vc4CameraData::platformConfigureIpa(ipa::RPi::ConfigParams &params)
+{
+ params.ispControls = isp_[Isp::Input].dev()->controls();
+
+ /* Allocate the lens shading table via dmaHeap and pass to the IPA. */
+ if (!lsTable_.isValid()) {
+ lsTable_ = SharedFD(dmaHeap_.alloc("ls_grid", ipa::RPi::MaxLsGridSize));
+ if (!lsTable_.isValid())
+ return -ENOMEM;
+
+ /* Allow the IPA to mmap the LS table via the file descriptor. */
+ /*
+ * \todo Investigate if mapping the lens shading table buffer
+ * could be handled with mapBuffers().
+ */
+ params.lsTableHandle = lsTable_;
+ }
+
+ return 0;
+}
+
+void Vc4CameraData::platformStart()
+{
+}
+
+void Vc4CameraData::platformStop()
+{
+ bayerQueue_ = {};
+ embeddedQueue_ = {};
+}
+
+void Vc4CameraData::unicamBufferDequeue(FrameBuffer *buffer)
+{
+ RPi::Stream *stream = nullptr;
+ unsigned int index;
+
+ if (!isRunning())
+ return;
+
+ for (RPi::Stream &s : unicam_) {
+ index = s.getBufferId(buffer);
+ if (index) {
+ stream = &s;
+ break;
+ }
+ }
+
+ /* The buffer must belong to one of our streams. */
+ ASSERT(stream);
+
+ LOG(RPI, Debug) << "Stream " << stream->name() << " buffer dequeue"
+ << ", buffer id " << index
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ if (stream == &unicam_[Unicam::Image]) {
+ /*
+ * Lookup the sensor controls used for this frame sequence from
+ * DelayedControl and queue them along with the frame buffer.
+ */
+ auto [ctrl, delayContext] = delayedCtrls_->get(buffer->metadata().sequence);
+ /*
+ * Add the frame timestamp to the ControlList for the IPA to use
+ * as it does not receive the FrameBuffer object.
+ */
+ ctrl.set(controls::SensorTimestamp, buffer->metadata().timestamp);
+ bayerQueue_.push({ buffer, std::move(ctrl), delayContext });
+ } else {
+ embeddedQueue_.push(buffer);
+ }
+
+ handleState();
+}
+
+void Vc4CameraData::ispInputDequeue(FrameBuffer *buffer)
+{
+ if (!isRunning())
+ return;
+
+ LOG(RPI, Debug) << "Stream ISP Input buffer complete"
+ << ", buffer id " << unicam_[Unicam::Image].getBufferId(buffer)
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ /* The ISP input buffer gets re-queued into Unicam. */
+ handleStreamBuffer(buffer, &unicam_[Unicam::Image]);
+ handleState();
+}
+
+void Vc4CameraData::ispOutputDequeue(FrameBuffer *buffer)
+{
+ RPi::Stream *stream = nullptr;
+ unsigned int index;
+
+ if (!isRunning())
+ return;
+
+ for (RPi::Stream &s : isp_) {
+ index = s.getBufferId(buffer);
+ if (index) {
+ stream = &s;
+ break;
+ }
+ }
+
+ /* The buffer must belong to one of our ISP output streams. */
+ ASSERT(stream);
+
+ LOG(RPI, Debug) << "Stream " << stream->name() << " buffer complete"
+ << ", buffer id " << index
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ /*
+ * ISP statistics buffer must not be re-queued or sent back to the
+ * application until after the IPA signals so.
+ */
+ if (stream == &isp_[Isp::Stats]) {
+ ipa::RPi::ProcessParams params;
+ params.buffers.stats = index | RPi::MaskStats;
+ params.ipaContext = requestQueue_.front()->sequence();
+ ipa_->processStats(params);
+ } else {
+ /* Any other ISP output can be handed back to the application now. */
+ handleStreamBuffer(buffer, stream);
+ }
+
+ /*
+ * Increment the number of ISP outputs generated.
+ * This is needed to track dropped frames.
+ */
+ ispOutputCount_++;
+
+ handleState();
+}
+
+void Vc4CameraData::processStatsComplete(const ipa::RPi::BufferIds &buffers)
+{
+ if (!isRunning())
+ return;
+
+ FrameBuffer *buffer = isp_[Isp::Stats].getBuffers().at(buffers.stats & RPi::MaskID).buffer;
+
+ handleStreamBuffer(buffer, &isp_[Isp::Stats]);
+
+ state_ = State::IpaComplete;
+ handleState();
+}
+
+void Vc4CameraData::prepareIspComplete(const ipa::RPi::BufferIds &buffers,
+ [[maybe_unused]] bool stitchSwapBuffers)
+{
+ unsigned int embeddedId = buffers.embedded & RPi::MaskID;
+ unsigned int bayer = buffers.bayer & RPi::MaskID;
+ FrameBuffer *buffer;
+
+ if (!isRunning())
+ return;
+
+ buffer = unicam_[Unicam::Image].getBuffers().at(bayer & RPi::MaskID).buffer;
+ LOG(RPI, Debug) << "Input re-queue to ISP, buffer id " << (bayer & RPi::MaskID)
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ isp_[Isp::Input].queueBuffer(buffer);
+ ispOutputCount_ = 0;
+
+ if (sensorMetadata_ && embeddedId) {
+ buffer = unicam_[Unicam::Embedded].getBuffers().at(embeddedId & RPi::MaskID).buffer;
+ handleStreamBuffer(buffer, &unicam_[Unicam::Embedded]);
+ }
+
+ handleState();
+}
+
+void Vc4CameraData::setIspControls(const ControlList &controls)
+{
+ ControlList ctrls = controls;
+
+ if (ctrls.contains(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING)) {
+ ControlValue &value =
+ const_cast<ControlValue &>(ctrls.get(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING));
+ Span<uint8_t> s = value.data();
+ bcm2835_isp_lens_shading *ls =
+ reinterpret_cast<bcm2835_isp_lens_shading *>(s.data());
+ ls->dmabuf = lsTable_.get();
+ }
+
+ isp_[Isp::Input].dev()->setControls(&ctrls);
+ handleState();
+}
+
+void Vc4CameraData::setCameraTimeout(uint32_t maxFrameLengthMs)
+{
+ /*
+ * Set the dequeue timeout to the larger of 5x the maximum reported
+ * frame length advertised by the IPA over a number of frames. Allow
+ * a minimum timeout value of 1s.
+ */
+ utils::Duration timeout =
+ std::max<utils::Duration>(1s, 5 * maxFrameLengthMs * 1ms);
+
+ LOG(RPI, Debug) << "Setting Unicam timeout to " << timeout;
+ unicam_[Unicam::Image].dev()->setDequeueTimeout(timeout);
+}
+
+void Vc4CameraData::tryRunPipeline()
+{
+ FrameBuffer *embeddedBuffer;
+ BayerFrame bayerFrame;
+
+ /* If any of our request or buffer queues are empty, we cannot proceed. */
+ if (state_ != State::Idle || requestQueue_.empty() ||
+ bayerQueue_.empty() || (embeddedQueue_.empty() && sensorMetadata_))
+ return;
+
+ if (!findMatchingBuffers(bayerFrame, embeddedBuffer))
+ return;
+
+ /* Take the first request from the queue and action the IPA. */
+ Request *request = requestQueue_.front();
+
+ /* See if a new ScalerCrop value needs to be applied. */
+ applyScalerCrop(request->controls());
+
+ /*
+ * Clear the request metadata and fill it with some initial non-IPA
+ * related controls. We clear it first because the request metadata
+ * may have been populated if we have dropped the previous frame.
+ */
+ request->metadata().clear();
+ fillRequestMetadata(bayerFrame.controls, request);
+
+ /* Set our state to say the pipeline is active. */
+ state_ = State::Busy;
+
+ unsigned int bayer = unicam_[Unicam::Image].getBufferId(bayerFrame.buffer);
+
+ LOG(RPI, Debug) << "Signalling prepareIsp:"
+ << " Bayer buffer id: " << bayer;
+
+ ipa::RPi::PrepareParams params;
+ params.buffers.bayer = RPi::MaskBayerData | bayer;
+ params.sensorControls = std::move(bayerFrame.controls);
+ params.requestControls = request->controls();
+ params.ipaContext = request->sequence();
+ params.delayContext = bayerFrame.delayContext;
+ params.buffers.embedded = 0;
+
+ if (embeddedBuffer) {
+ unsigned int embeddedId = unicam_[Unicam::Embedded].getBufferId(embeddedBuffer);
+
+ params.buffers.embedded = RPi::MaskEmbeddedData | embeddedId;
+ LOG(RPI, Debug) << "Signalling prepareIsp:"
+ << " Embedded buffer id: " << embeddedId;
+ }
+
+ ipa_->prepareIsp(params);
+}
+
+bool Vc4CameraData::findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer)
+{
+ if (bayerQueue_.empty())
+ return false;
+
+ /*
+ * Find the embedded data buffer with a matching timestamp to pass to
+ * the IPA. Any embedded buffers with a timestamp lower than the
+ * current bayer buffer will be removed and re-queued to the driver.
+ */
+ uint64_t ts = bayerQueue_.front().buffer->metadata().timestamp;
+ embeddedBuffer = nullptr;
+ while (!embeddedQueue_.empty()) {
+ FrameBuffer *b = embeddedQueue_.front();
+ if (b->metadata().timestamp < ts) {
+ embeddedQueue_.pop();
+ unicam_[Unicam::Embedded].returnBuffer(b);
+ LOG(RPI, Debug) << "Dropping unmatched input frame in stream "
+ << unicam_[Unicam::Embedded].name();
+ } else if (b->metadata().timestamp == ts) {
+ /* Found a match! */
+ embeddedBuffer = b;
+ embeddedQueue_.pop();
+ break;
+ } else {
+ break; /* Only higher timestamps from here. */
+ }
+ }
+
+ if (!embeddedBuffer && sensorMetadata_) {
+ if (embeddedQueue_.empty()) {
+ /*
+ * If the embedded buffer queue is empty, wait for the next
+ * buffer to arrive - dequeue ordering may send the image
+ * buffer first.
+ */
+ LOG(RPI, Debug) << "Waiting for next embedded buffer.";
+ return false;
+ }
+
+ /* Log if there is no matching embedded data buffer found. */
+ LOG(RPI, Debug) << "Returning bayer frame without a matching embedded buffer.";
+ }
+
+ bayerFrame = std::move(bayerQueue_.front());
+ bayerQueue_.pop();
+
+ return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVc4, "rpi/vc4")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/simple/converter.h b/src/libcamera/pipeline/simple/converter.h
deleted file mode 100644
index f0ebe2e0..00000000
--- a/src/libcamera/pipeline/simple/converter.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Laurent Pinchart
- *
- * converter.h - Format converter for simple pipeline handler
- */
-
-#pragma once
-
-#include <functional>
-#include <map>
-#include <memory>
-#include <string>
-#include <tuple>
-#include <vector>
-
-#include <libcamera/pixel_format.h>
-
-#include <libcamera/base/log.h>
-#include <libcamera/base/signal.h>
-
-namespace libcamera {
-
-class FrameBuffer;
-class MediaDevice;
-class Size;
-class SizeRange;
-struct StreamConfiguration;
-class V4L2M2MDevice;
-
-class SimpleConverter
-{
-public:
- SimpleConverter(MediaDevice *media);
-
- bool isValid() const { return m2m_ != nullptr; }
-
- std::vector<PixelFormat> formats(PixelFormat input);
- SizeRange sizes(const Size &input);
-
- std::tuple<unsigned int, unsigned int>
- strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size);
-
- int configure(const StreamConfiguration &inputCfg,
- const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfg);
- int exportBuffers(unsigned int ouput, unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers);
-
- int start();
- void stop();
-
- int queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs);
-
- Signal<FrameBuffer *> inputBufferReady;
- Signal<FrameBuffer *> outputBufferReady;
-
-private:
- class Stream : protected Loggable
- {
- public:
- Stream(SimpleConverter *converter, unsigned int index);
-
- bool isValid() const { return m2m_ != nullptr; }
-
- int configure(const StreamConfiguration &inputCfg,
- const StreamConfiguration &outputCfg);
- int exportBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers);
-
- int start();
- void stop();
-
- int queueBuffers(FrameBuffer *input, FrameBuffer *output);
-
- protected:
- std::string logPrefix() const override;
-
- private:
- void captureBufferReady(FrameBuffer *buffer);
- void outputBufferReady(FrameBuffer *buffer);
-
- SimpleConverter *converter_;
- unsigned int index_;
- std::unique_ptr<V4L2M2MDevice> m2m_;
-
- unsigned int inputBufferCount_;
- unsigned int outputBufferCount_;
- };
-
- std::string deviceNode_;
- std::unique_ptr<V4L2M2MDevice> m2m_;
-
- std::vector<Stream> streams_;
- std::map<FrameBuffer *, unsigned int> queue_;
-};
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/simple/meson.build b/src/libcamera/pipeline/simple/meson.build
index 9c99b32f..42b0896d 100644
--- a/src/libcamera/pipeline/simple/meson.build
+++ b/src/libcamera/pipeline/simple/meson.build
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: CC0-1.0
libcamera_sources += files([
- 'converter.cpp',
'simple.cpp',
])
diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
index bc0cb1a0..db3575c3 100644
--- a/src/libcamera/pipeline/simple/simple.cpp
+++ b/src/libcamera/pipeline/simple/simple.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Laurent Pinchart
* Copyright (C) 2019, Martijn Braam
*
- * simple.cpp - Pipeline handler for simple pipelines
+ * Pipeline handler for simple pipelines
*/
#include <algorithm>
@@ -30,13 +30,14 @@
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/converter.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/software_isp/software_isp.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
-#include "converter.h"
namespace libcamera {
@@ -100,8 +101,14 @@ LOG_DEFINE_CATEGORY(SimplePipeline)
*
* During the breadth-first search, the pipeline is traversed from entity to
* entity, by following media graph links from source to sink, starting at the
- * camera sensor. When reaching an entity (on its sink side), all its source
- * pads are considered to continue the graph traversal.
+ * camera sensor.
+ *
+ * When reaching an entity (on its sink side), if the entity is a V4L2 subdev
+ * that supports the streams API, the subdev internal routes are followed to
+ * find the connected source pads. Otherwise all of the entity's source pads
+ * are considered to continue the graph traversal. The pipeline handler
+ * currently considers the default internal routes only and doesn't attempt to
+ * setup custom routes. This can be extended if needed.
*
* The shortest path between the camera sensor and a video node is stored in
* SimpleCameraData::entities_ as a list of SimpleCameraData::Entity structures,
@@ -179,14 +186,24 @@ struct SimplePipelineInfo {
* and the number of streams it supports.
*/
std::vector<std::pair<const char *, unsigned int>> converters;
+ /*
+ * Using Software ISP is to be enabled per driver.
+ *
+ * The Software ISP can't be used together with the converters.
+ */
+ bool swIspEnabled;
};
namespace {
static const SimplePipelineInfo supportedDevices[] = {
- { "imx7-csi", { { "pxp", 1 } } },
- { "qcom-camss", {} },
- { "sun6i-csi", {} },
+ { "dcmipp", {}, false },
+ { "imx7-csi", { { "pxp", 1 } }, false },
+ { "j721e-csi2rx", {}, false },
+ { "mtk-seninf", { { "mtk-mdp", 3 } }, false },
+ { "mxc-isi", {}, false },
+ { "qcom-camss", {}, true },
+ { "sun6i-csi", {}, false },
};
} /* namespace */
@@ -204,7 +221,8 @@ public:
int init();
int setupLinks();
int setupFormats(V4L2SubdeviceFormat *format,
- V4L2Subdevice::Whence whence);
+ V4L2Subdevice::Whence whence,
+ Transform transform = Transform::Identity);
void bufferReady(FrameBuffer *buffer);
unsigned int streamIndex(const Stream *stream) const
@@ -216,6 +234,11 @@ public:
/* The media entity, always valid. */
MediaEntity *entity;
/*
+ * Whether or not the entity is a subdev that supports the
+ * routing API.
+ */
+ bool supportsRouting;
+ /*
* The local sink pad connected to the upstream entity, null for
* the camera sensor at the beginning of the pipeline.
*/
@@ -254,16 +277,22 @@ public:
std::vector<Configuration> configs_;
std::map<PixelFormat, std::vector<const Configuration *>> formats_;
- std::unique_ptr<SimpleConverter> converter_;
- std::vector<std::unique_ptr<FrameBuffer>> converterBuffers_;
- bool useConverter_;
- std::queue<std::map<unsigned int, FrameBuffer *>> converterQueue_;
+ std::vector<std::unique_ptr<FrameBuffer>> conversionBuffers_;
+ std::queue<std::map<unsigned int, FrameBuffer *>> conversionQueue_;
+ bool useConversion_;
+
+ std::unique_ptr<Converter> converter_;
+ std::unique_ptr<SoftwareIsp> swIsp_;
private:
void tryPipeline(unsigned int code, const Size &size);
+ static std::vector<const MediaPad *> routedSourcePads(MediaPad *sink);
- void converterInputDone(FrameBuffer *buffer);
- void converterOutputDone(FrameBuffer *buffer);
+ void conversionInputDone(FrameBuffer *buffer);
+ void conversionOutputDone(FrameBuffer *buffer);
+
+ void ispStatsReady();
+ void setSensorControls(const ControlList &sensorControls);
};
class SimpleCameraConfiguration : public CameraConfiguration
@@ -279,6 +308,7 @@ public:
}
bool needConversion() const { return needConversion_; }
+ const Transform &combinedTransform() const { return combinedTransform_; }
private:
/*
@@ -291,6 +321,7 @@ private:
const SimpleCameraData::Configuration *pipeConfig_;
bool needConversion_;
+ Transform combinedTransform_;
};
class SimplePipelineHandler : public PipelineHandler
@@ -298,8 +329,8 @@ class SimplePipelineHandler : public PipelineHandler
public:
SimplePipelineHandler(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -313,6 +344,7 @@ public:
V4L2VideoDevice *video(const MediaEntity *entity);
V4L2Subdevice *subdev(const MediaEntity *entity);
MediaDevice *converter() { return converter_; }
+ bool swIspEnabled() const { return swIspEnabled_; }
protected:
int queueRequestDevice(Camera *camera, Request *request) override;
@@ -332,6 +364,7 @@ private:
}
std::vector<MediaEntity *> locateSensors();
+ static int resetRoutingTable(V4L2Subdevice *subdev);
const MediaPad *acquirePipeline(SimpleCameraData *data);
void releasePipeline(SimpleCameraData *data);
@@ -340,6 +373,7 @@ private:
std::map<const MediaEntity *, EntityData> entities_;
MediaDevice *converter_;
+ bool swIspEnabled_;
};
/* -----------------------------------------------------------------------------
@@ -386,17 +420,40 @@ SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe,
break;
}
- /* The actual breadth-first search algorithm. */
visited.insert(entity);
- for (MediaPad *pad : entity->pads()) {
- if (!(pad->flags() & MEDIA_PAD_FL_SOURCE))
- continue;
+ /*
+ * Add direct downstream entities to the search queue. If the
+ * current entity supports the subdev internal routing API,
+ * restrict the search to downstream entities reachable through
+ * active routes.
+ */
+
+ std::vector<const MediaPad *> pads;
+ bool supportsRouting = false;
+
+ if (sinkPad) {
+ pads = routedSourcePads(sinkPad);
+ if (!pads.empty())
+ supportsRouting = true;
+ }
+
+ if (pads.empty()) {
+ for (const MediaPad *pad : entity->pads()) {
+ if (!(pad->flags() & MEDIA_PAD_FL_SOURCE))
+ continue;
+ pads.push_back(pad);
+ }
+ }
+
+ for (const MediaPad *pad : pads) {
for (MediaLink *link : pad->links()) {
MediaEntity *next = link->sink()->entity();
if (visited.find(next) == visited.end()) {
queue.push({ next, link->sink() });
- parents.insert({ next, { entity, sinkPad, pad, link } });
+
+ Entity e{ entity, supportsRouting, sinkPad, pad, link };
+ parents.insert({ next, e });
}
}
}
@@ -410,7 +467,7 @@ SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe,
* to the sensor. Store all the entities in the pipeline, from the
* camera sensor to the video node, in entities_.
*/
- entities_.push_front({ entity, sinkPad, nullptr, nullptr });
+ entities_.push_front({ entity, false, sinkPad, nullptr, nullptr });
for (auto it = parents.find(entity); it != parents.end();
it = parents.find(entity)) {
@@ -455,14 +512,45 @@ int SimpleCameraData::init()
/* Open the converter, if any. */
MediaDevice *converter = pipe->converter();
if (converter) {
- converter_ = std::make_unique<SimpleConverter>(converter);
- if (!converter_->isValid()) {
+ converter_ = ConverterFactoryBase::create(converter);
+ if (!converter_) {
LOG(SimplePipeline, Warning)
<< "Failed to create converter, disabling format conversion";
converter_.reset();
} else {
- converter_->inputBufferReady.connect(this, &SimpleCameraData::converterInputDone);
- converter_->outputBufferReady.connect(this, &SimpleCameraData::converterOutputDone);
+ converter_->inputBufferReady.connect(this, &SimpleCameraData::conversionInputDone);
+ converter_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
+ }
+ }
+
+ /*
+ * Instantiate Soft ISP if this is enabled for the given driver and no converter is used.
+ */
+ if (!converter_ && pipe->swIspEnabled()) {
+ swIsp_ = std::make_unique<SoftwareIsp>(pipe, sensor_.get());
+ if (!swIsp_->isValid()) {
+ LOG(SimplePipeline, Warning)
+ << "Failed to create software ISP, disabling software debayering";
+ swIsp_.reset();
+ } else {
+ /*
+ * The inputBufferReady signal is emitted from the soft ISP thread,
+ * and needs to be handled in the pipeline handler thread. Signals
+ * implement queued delivery, but this works transparently only if
+ * the receiver is bound to the target thread. As the
+ * SimpleCameraData class doesn't inherit from the Object class, it
+ * is not bound to any thread, and the signal would be delivered
+ * synchronously. Instead, connect the signal to a lambda function
+ * bound explicitly to the pipe, which is bound to the pipeline
+ * handler thread. The function then simply forwards the call to
+ * conversionInputDone().
+ */
+ swIsp_->inputBufferReady.connect(pipe, [this](FrameBuffer *buffer) {
+ this->conversionInputDone(buffer);
+ });
+ swIsp_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
+ swIsp_->ispStatsReady.connect(this, &SimpleCameraData::ispStatsReady);
+ swIsp_->setSensorControls.connect(this, &SimpleCameraData::setSensorControls);
}
}
@@ -520,13 +608,13 @@ void SimpleCameraData::tryPipeline(unsigned int code, const Size &size)
* corresponding possible V4L2 pixel formats on the video node.
*/
V4L2SubdeviceFormat format{};
- format.mbus_code = code;
+ format.code = code;
format.size = size;
int ret = setupFormats(&format, V4L2Subdevice::TryFormat);
if (ret < 0) {
/* Pipeline configuration failed, skip this configuration. */
- format.mbus_code = code;
+ format.code = code;
format.size = size;
LOG(SimplePipeline, Debug)
<< "Sensor format " << format
@@ -534,7 +622,7 @@ void SimpleCameraData::tryPipeline(unsigned int code, const Size &size)
return;
}
- V4L2VideoDevice::Formats videoFormats = video_->formats(format.mbus_code);
+ V4L2VideoDevice::Formats videoFormats = video_->formats(format.code);
LOG(SimplePipeline, Debug)
<< "Adding configuration for " << format.size
@@ -556,12 +644,20 @@ void SimpleCameraData::tryPipeline(unsigned int code, const Size &size)
config.captureFormat = pixelFormat;
config.captureSize = format.size;
- if (!converter_) {
- config.outputFormats = { pixelFormat };
- config.outputSizes = config.captureSize;
- } else {
+ if (converter_) {
config.outputFormats = converter_->formats(pixelFormat);
config.outputSizes = converter_->sizes(format.size);
+ } else if (swIsp_) {
+ config.outputFormats = swIsp_->formats(pixelFormat);
+ config.outputSizes = swIsp_->sizes(pixelFormat, format.size);
+ if (config.outputFormats.empty()) {
+ /* Do not use swIsp for unsupported pixelFormat's. */
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
+ }
+ } else {
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
}
configs_.push_back(config);
@@ -577,15 +673,32 @@ int SimpleCameraData::setupLinks()
* multiple sink links to be enabled together, even on different sink
* pads. We must thus start by disabling all sink links (but the one we
* want to enable) before enabling the pipeline link.
+ *
+ * The entities_ list stores entities along with their source link. We
+ * need to process the link in the context of the sink entity, so
+ * record the source link of the current entity as the sink link of the
+ * next entity, and skip the first entity in the loop.
*/
+ MediaLink *sinkLink = nullptr;
+
for (SimpleCameraData::Entity &e : entities_) {
- if (!e.sourceLink)
- break;
+ if (!sinkLink) {
+ sinkLink = e.sourceLink;
+ continue;
+ }
+
+ for (MediaPad *pad : e.entity->pads()) {
+ /*
+ * If the entity supports the V4L2 internal routing API,
+ * assume that it may carry multiple independent streams
+ * concurrently, and only disable links on the sink and
+ * source pads used by the pipeline.
+ */
+ if (e.supportsRouting && pad != e.sink && pad != e.source)
+ continue;
- MediaEntity *remote = e.sourceLink->sink()->entity();
- for (MediaPad *pad : remote->pads()) {
for (MediaLink *link : pad->links()) {
- if (link == e.sourceLink)
+ if (link == sinkLink)
continue;
if ((link->flags() & MEDIA_LNK_FL_ENABLED) &&
@@ -597,18 +710,21 @@ int SimpleCameraData::setupLinks()
}
}
- if (!(e.sourceLink->flags() & MEDIA_LNK_FL_ENABLED)) {
- ret = e.sourceLink->setEnabled(true);
+ if (!(sinkLink->flags() & MEDIA_LNK_FL_ENABLED)) {
+ ret = sinkLink->setEnabled(true);
if (ret < 0)
return ret;
}
+
+ sinkLink = e.sourceLink;
}
return 0;
}
int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format,
- V4L2Subdevice::Whence whence)
+ V4L2Subdevice::Whence whence,
+ Transform transform)
{
SimplePipelineHandler *pipe = SimpleCameraData::pipe();
int ret;
@@ -617,7 +733,7 @@ int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format,
* Configure the format on the sensor output and propagate it through
* the pipeline.
*/
- ret = sensor_->setFormat(format);
+ ret = sensor_->setFormat(format, transform);
if (ret < 0)
return ret;
@@ -644,7 +760,7 @@ int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format,
if (ret < 0)
return ret;
- if (format->mbus_code != sourceFormat.mbus_code ||
+ if (format->code != sourceFormat.code ||
format->size != sourceFormat.size) {
LOG(SimplePipeline, Debug)
<< "Source '" << source->entity()->name()
@@ -678,7 +794,7 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
* point converting an erroneous buffer.
*/
if (buffer->metadata().status != FrameMetadata::FrameSuccess) {
- if (!useConverter_) {
+ if (!useConversion_) {
/* No conversion, just complete the request. */
Request *request = buffer->request();
pipe->completeBuffer(request, buffer);
@@ -687,23 +803,23 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
}
/*
- * The converter is in use. Requeue the internal buffer for
- * capture (unless the stream is being stopped), and complete
- * the request with all the user-facing buffers.
+ * The converter or Software ISP is in use. Requeue the internal
+ * buffer for capture (unless the stream is being stopped), and
+ * complete the request with all the user-facing buffers.
*/
if (buffer->metadata().status != FrameMetadata::FrameCancelled)
video_->queueBuffer(buffer);
- if (converterQueue_.empty())
+ if (conversionQueue_.empty())
return;
Request *request = nullptr;
- for (auto &item : converterQueue_.front()) {
+ for (auto &item : conversionQueue_.front()) {
FrameBuffer *outputBuffer = item.second;
request = outputBuffer->request();
pipe->completeBuffer(request, outputBuffer);
}
- converterQueue_.pop();
+ conversionQueue_.pop();
if (request)
pipe->completeRequest(request);
@@ -720,9 +836,9 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
*/
Request *request = buffer->request();
- if (useConverter_ && !converterQueue_.empty()) {
+ if (useConversion_ && !conversionQueue_.empty()) {
const std::map<unsigned int, FrameBuffer *> &outputs =
- converterQueue_.front();
+ conversionQueue_.front();
if (!outputs.empty()) {
FrameBuffer *outputBuffer = outputs.begin()->second;
if (outputBuffer)
@@ -735,18 +851,22 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
buffer->metadata().timestamp);
/*
- * Queue the captured and the request buffer to the converter if format
- * conversion is needed. If there's no queued request, just requeue the
- * captured buffer for capture.
+ * Queue the captured and the request buffer to the converter or Software
+ * ISP if format conversion is needed. If there's no queued request, just
+ * requeue the captured buffer for capture.
*/
- if (useConverter_) {
- if (converterQueue_.empty()) {
+ if (useConversion_) {
+ if (conversionQueue_.empty()) {
video_->queueBuffer(buffer);
return;
}
- converter_->queueBuffers(buffer, converterQueue_.front());
- converterQueue_.pop();
+ if (converter_)
+ converter_->queueBuffers(buffer, conversionQueue_.front());
+ else
+ swIsp_->queueBuffers(buffer, conversionQueue_.front());
+
+ conversionQueue_.pop();
return;
}
@@ -755,13 +875,13 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
pipe->completeRequest(request);
}
-void SimpleCameraData::converterInputDone(FrameBuffer *buffer)
+void SimpleCameraData::conversionInputDone(FrameBuffer *buffer)
{
/* Queue the input buffer back for capture. */
video_->queueBuffer(buffer);
}
-void SimpleCameraData::converterOutputDone(FrameBuffer *buffer)
+void SimpleCameraData::conversionOutputDone(FrameBuffer *buffer)
{
SimplePipelineHandler *pipe = SimpleCameraData::pipe();
@@ -771,6 +891,56 @@ void SimpleCameraData::converterOutputDone(FrameBuffer *buffer)
pipe->completeRequest(request);
}
+void SimpleCameraData::ispStatsReady()
+{
+ /* \todo Use the DelayedControls class */
+ swIsp_->processStats(sensor_->getControls({ V4L2_CID_ANALOGUE_GAIN,
+ V4L2_CID_EXPOSURE }));
+}
+
+void SimpleCameraData::setSensorControls(const ControlList &sensorControls)
+{
+ ControlList ctrls(sensorControls);
+ sensor_->setControls(&ctrls);
+}
+
+/* Retrieve all source pads connected to a sink pad through active routes. */
+std::vector<const MediaPad *> SimpleCameraData::routedSourcePads(MediaPad *sink)
+{
+ MediaEntity *entity = sink->entity();
+ std::unique_ptr<V4L2Subdevice> subdev =
+ std::make_unique<V4L2Subdevice>(entity);
+
+ int ret = subdev->open();
+ if (ret < 0)
+ return {};
+
+ V4L2Subdevice::Routing routing = {};
+ ret = subdev->getRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret < 0)
+ return {};
+
+ std::vector<const MediaPad *> pads;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (sink->index() != route.sink.pad ||
+ !(route.flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ const MediaPad *pad = entity->getPadByIndex(route.source.pad);
+ if (!pad) {
+ LOG(SimplePipeline, Warning)
+ << "Entity " << entity->name()
+ << " has invalid route source pad "
+ << route.source.pad;
+ }
+
+ pads.push_back(pad);
+ }
+
+ return pads;
+}
+
/* -----------------------------------------------------------------------------
* Camera Configuration
*/
@@ -782,17 +952,45 @@ SimpleCameraConfiguration::SimpleCameraConfiguration(Camera *camera,
{
}
+namespace {
+
+static Size adjustSize(const Size &requestedSize, const SizeRange &supportedSizes)
+{
+ ASSERT(supportedSizes.min <= supportedSizes.max);
+
+ if (supportedSizes.min == supportedSizes.max)
+ return supportedSizes.max;
+
+ unsigned int hStep = supportedSizes.hStep;
+ unsigned int vStep = supportedSizes.vStep;
+
+ if (hStep == 0)
+ hStep = supportedSizes.max.width - supportedSizes.min.width;
+ if (vStep == 0)
+ vStep = supportedSizes.max.height - supportedSizes.min.height;
+
+ Size adjusted = requestedSize.boundedTo(supportedSizes.max)
+ .expandedTo(supportedSizes.min);
+
+ return adjusted.shrunkBy(supportedSizes.min)
+ .alignedDownTo(hStep, vStep)
+ .grownBy(supportedSizes.min);
+}
+
+} /* namespace */
+
CameraConfiguration::Status SimpleCameraConfiguration::validate()
{
+ const CameraSensor *sensor = data_->sensor_.get();
Status status = Valid;
if (config_.empty())
return Invalid;
- if (transform != Transform::Identity) {
- transform = Transform::Identity;
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = sensor->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
status = Adjusted;
- }
/* Cap the number of entries to the available streams. */
if (config_.size() > data_->streams_.size()) {
@@ -897,10 +1095,19 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
}
if (!pipeConfig_->outputSizes.contains(cfg.size)) {
+ Size adjustedSize = pipeConfig_->captureSize;
+ /*
+ * The converter (when present) may not be able to output
+ * a size identical to its input size. The capture size is thus
+ * not guaranteed to be a valid output size. In such cases, use
+ * the smaller valid output size closest to the requested.
+ */
+ if (!pipeConfig_->outputSizes.contains(adjustedSize))
+ adjustedSize = adjustSize(cfg.size, pipeConfig_->outputSizes);
LOG(SimplePipeline, Debug)
<< "Adjusting size from " << cfg.size
- << " to " << pipeConfig_->captureSize;
- cfg.size = pipeConfig_->captureSize;
+ << " to " << adjustedSize;
+ cfg.size = adjustedSize;
status = Adjusted;
}
@@ -912,13 +1119,16 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
/* Set the stride, frameSize and bufferCount. */
if (needConversion_) {
std::tie(cfg.stride, cfg.frameSize) =
- data_->converter_->strideAndFrameSize(cfg.pixelFormat,
- cfg.size);
+ data_->converter_
+ ? data_->converter_->strideAndFrameSize(cfg.pixelFormat,
+ cfg.size)
+ : data_->swIsp_->strideAndFrameSize(cfg.pixelFormat,
+ cfg.size);
if (cfg.stride == 0)
return Invalid;
} else {
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
int ret = data_->video_->tryFormat(&format);
@@ -944,12 +1154,12 @@ SimplePipelineHandler::SimplePipelineHandler(CameraManager *manager)
{
}
-CameraConfiguration *SimplePipelineHandler::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+SimplePipelineHandler::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
{
SimpleCameraData *data = cameraData(camera);
- CameraConfiguration *config =
- new SimpleCameraConfiguration(camera, data);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<SimpleCameraConfiguration>(camera, data);
if (roles.empty())
return config;
@@ -1020,15 +1230,16 @@ int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
const SimpleCameraData::Configuration *pipeConfig = config->pipeConfig();
V4L2SubdeviceFormat format{};
- format.mbus_code = pipeConfig->code;
+ format.code = pipeConfig->code;
format.size = pipeConfig->sensorSize;
- ret = data->setupFormats(&format, V4L2Subdevice::ActiveFormat);
+ ret = data->setupFormats(&format, V4L2Subdevice::ActiveFormat,
+ config->combinedTransform());
if (ret < 0)
return ret;
/* Configure the video node. */
- V4L2PixelFormat videoFormat = V4L2PixelFormat::fromPixelFormat(pipeConfig->captureFormat);
+ V4L2PixelFormat videoFormat = video->toV4L2PixelFormat(pipeConfig->captureFormat);
V4L2DeviceFormat captureFormat;
captureFormat.fourcc = videoFormat;
@@ -1055,14 +1266,14 @@ int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
/* Configure the converter if needed. */
std::vector<std::reference_wrapper<StreamConfiguration>> outputCfgs;
- data->useConverter_ = config->needConversion();
+ data->useConversion_ = config->needConversion();
for (unsigned int i = 0; i < config->size(); ++i) {
StreamConfiguration &cfg = config->at(i);
cfg.setStream(&data->streams_[i]);
- if (data->useConverter_)
+ if (data->useConversion_)
outputCfgs.push_back(cfg);
}
@@ -1075,7 +1286,10 @@ int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
inputCfg.stride = captureFormat.planes[0].bpl;
inputCfg.bufferCount = kNumInternalBuffers;
- return data->converter_->configure(inputCfg, outputCfgs);
+ return data->converter_
+ ? data->converter_->configure(inputCfg, outputCfgs)
+ : data->swIsp_->configure(inputCfg, outputCfgs,
+ data->sensor_->controls());
}
int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
@@ -1088,9 +1302,12 @@ int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
* Export buffers on the converter or capture video node, depending on
* whether the converter is used or not.
*/
- if (data->useConverter_)
- return data->converter_->exportBuffers(data->streamIndex(stream),
- count, buffers);
+ if (data->useConversion_)
+ return data->converter_
+ ? data->converter_->exportBuffers(data->streamIndex(stream),
+ count, buffers)
+ : data->swIsp_->exportBuffers(data->streamIndex(stream),
+ count, buffers);
else
return data->video_->exportBuffers(count, buffers);
}
@@ -1109,13 +1326,13 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
return -EBUSY;
}
- if (data->useConverter_) {
+ if (data->useConversion_) {
/*
* When using the converter allocate a fixed number of internal
* buffers.
*/
ret = video->allocateBuffers(kNumInternalBuffers,
- &data->converterBuffers_);
+ &data->conversionBuffers_);
} else {
/* Otherwise, prepare for using buffers from the only stream. */
Stream *stream = &data->streams_[0];
@@ -1134,15 +1351,21 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
return ret;
}
- if (data->useConverter_) {
- ret = data->converter_->start();
+ if (data->useConversion_) {
+ if (data->converter_)
+ ret = data->converter_->start();
+ else if (data->swIsp_)
+ ret = data->swIsp_->start();
+ else
+ ret = 0;
+
if (ret < 0) {
stop(camera);
return ret;
}
/* Queue all internal buffers for capture. */
- for (std::unique_ptr<FrameBuffer> &buffer : data->converterBuffers_)
+ for (std::unique_ptr<FrameBuffer> &buffer : data->conversionBuffers_)
video->queueBuffer(buffer.get());
}
@@ -1154,15 +1377,19 @@ void SimplePipelineHandler::stopDevice(Camera *camera)
SimpleCameraData *data = cameraData(camera);
V4L2VideoDevice *video = data->video_;
- if (data->useConverter_)
- data->converter_->stop();
+ if (data->useConversion_) {
+ if (data->converter_)
+ data->converter_->stop();
+ else if (data->swIsp_)
+ data->swIsp_->stop();
+ }
video->streamOff();
video->releaseBuffers();
video->bufferReady.disconnect(data, &SimpleCameraData::bufferReady);
- data->converterBuffers_.clear();
+ data->conversionBuffers_.clear();
releasePipeline(data);
}
@@ -1180,7 +1407,7 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
* queue, it will be handed to the converter in the capture
* completion handler.
*/
- if (data->useConverter_) {
+ if (data->useConversion_) {
buffers.emplace(data->streamIndex(stream), buffer);
} else {
ret = data->video_->queueBuffer(buffer);
@@ -1189,8 +1416,8 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
}
}
- if (data->useConverter_)
- data->converterQueue_.push(std::move(buffers));
+ if (data->useConversion_)
+ data->conversionQueue_.push(std::move(buffers));
return 0;
}
@@ -1260,6 +1487,37 @@ std::vector<MediaEntity *> SimplePipelineHandler::locateSensors()
return sensors;
}
+int SimplePipelineHandler::resetRoutingTable(V4L2Subdevice *subdev)
+{
+ /* Reset the media entity routing table to its default state. */
+ V4L2Subdevice::Routing routing = {};
+
+ int ret = subdev->getRouting(&routing, V4L2Subdevice::TryFormat);
+ if (ret)
+ return ret;
+
+ ret = subdev->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ /*
+ * If the routing table is empty we won't be able to meaningfully use
+ * the subdev.
+ */
+ if (routing.empty()) {
+ LOG(SimplePipeline, Error)
+ << "Default routing table of " << subdev->deviceNode()
+ << " is empty";
+ return -EINVAL;
+ }
+
+ LOG(SimplePipeline, Debug)
+ << "Routing table of " << subdev->deviceNode()
+ << " reset to " << routing;
+
+ return 0;
+}
+
bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
{
const SimplePipelineInfo *info = nullptr;
@@ -1286,6 +1544,8 @@ bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
}
}
+ swIspEnabled_ = info->swIspEnabled;
+
/* Locate the sensors. */
std::vector<MediaEntity *> sensors = locateSensors();
if (sensors.empty()) {
@@ -1352,6 +1612,23 @@ bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
<< ": " << strerror(-ret);
return false;
}
+
+ if (subdev->caps().hasStreams()) {
+ /*
+ * Reset the routing table to its default state
+ * to make sure entities are enumerate according
+ * to the defaul routing configuration.
+ */
+ ret = resetRoutingTable(subdev.get());
+ if (ret) {
+ LOG(SimplePipeline, Error)
+ << "Failed to reset routes for "
+ << subdev->deviceNode() << ": "
+ << strerror(-ret);
+ return false;
+ }
+ }
+
break;
default:
@@ -1455,6 +1732,6 @@ void SimplePipelineHandler::releasePipeline(SimpleCameraData *data)
}
}
-REGISTER_PIPELINE_HANDLER(SimplePipelineHandler)
+REGISTER_PIPELINE_HANDLER(SimplePipelineHandler, "simple")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
index 53b2f23a..8a7409fc 100644
--- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
+++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * uvcvideo.cpp - Pipeline handler for uvcvideo devices
+ * Pipeline handler for uvcvideo devices
*/
#include <algorithm>
@@ -46,8 +46,16 @@ public:
ControlInfoMap::Map *ctrls);
void bufferReady(FrameBuffer *buffer);
+ const std::string &id() const { return id_; }
+
std::unique_ptr<V4L2VideoDevice> video_;
Stream stream_;
+ std::map<PixelFormat, std::vector<SizeRange>> formats_;
+
+private:
+ bool generateId();
+
+ std::string id_;
};
class UVCCameraConfiguration : public CameraConfiguration
@@ -66,8 +74,8 @@ class PipelineHandlerUVC : public PipelineHandler
public:
PipelineHandlerUVC(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -81,8 +89,6 @@ public:
bool match(DeviceEnumerator *enumerator) override;
private:
- std::string generateId(const UVCCameraData *data);
-
int processControl(ControlList *controls, unsigned int id,
const ControlValue &value);
int processControls(UVCCameraData *data, Request *request);
@@ -105,8 +111,8 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
if (config_.empty())
return Invalid;
- if (transform != Transform::Identity) {
- transform = Transform::Identity;
+ if (orientation != Orientation::Rotate0) {
+ orientation = Orientation::Rotate0;
status = Adjusted;
}
@@ -149,7 +155,7 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
cfg.bufferCount = 4;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
int ret = data_->video_->tryFormat(&format);
@@ -159,6 +165,11 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
cfg.stride = format.planes[0].bpl;
cfg.frameSize = format.planes[0].size;
+ if (cfg.colorSpace != format.colorSpace) {
+ cfg.colorSpace = format.colorSpace;
+ status = Adjusted;
+ }
+
return status;
}
@@ -167,24 +178,18 @@ PipelineHandlerUVC::PipelineHandlerUVC(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerUVC::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerUVC::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
UVCCameraData *data = cameraData(camera);
- CameraConfiguration *config = new UVCCameraConfiguration(data);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<UVCCameraConfiguration>(data);
if (roles.empty())
return config;
- V4L2VideoDevice::Formats v4l2Formats = data->video_->formats();
- std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
- for (const auto &format : v4l2Formats) {
- PixelFormat pixelFormat = format.first.toPixelFormat();
- if (pixelFormat.isValid())
- deviceFormats[pixelFormat] = format.second;
- }
-
- StreamFormats formats(deviceFormats);
+ StreamFormats formats(data->formats_);
StreamConfiguration cfg(formats);
cfg.pixelFormat = formats.pixelformats().front();
@@ -205,7 +210,7 @@ int PipelineHandlerUVC::configure(Camera *camera, CameraConfiguration *config)
int ret;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
ret = data->video_->setFormat(&format);
@@ -213,7 +218,7 @@ int PipelineHandlerUVC::configure(Camera *camera, CameraConfiguration *config)
return ret;
if (format.size != cfg.size ||
- format.fourcc != V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat))
+ format.fourcc != data->video_->toV4L2PixelFormat(cfg.pixelFormat))
return -EINVAL;
cfg.setStream(&data->stream_);
@@ -340,12 +345,8 @@ int PipelineHandlerUVC::processControls(UVCCameraData *data, Request *request)
{
ControlList controls(data->video_->controls());
- for (auto it : request->controls()) {
- unsigned int id = it.first;
- ControlValue &value = it.second;
-
+ for (const auto &[id, value] : request->controls())
processControl(&controls, id, value);
- }
for (const auto &ctrl : controls)
LOG(UVC, Debug)
@@ -383,69 +384,6 @@ int PipelineHandlerUVC::queueRequestDevice(Camera *camera, Request *request)
return 0;
}
-std::string PipelineHandlerUVC::generateId(const UVCCameraData *data)
-{
- const std::string path = data->video_->devicePath();
-
- /* Create a controller ID from first device described in firmware. */
- std::string controllerId;
- std::string searchPath = path;
- while (true) {
- std::string::size_type pos = searchPath.rfind('/');
- if (pos <= 1) {
- LOG(UVC, Error) << "Can not find controller ID";
- return {};
- }
-
- searchPath = searchPath.substr(0, pos);
-
- controllerId = sysfs::firmwareNodePath(searchPath);
- if (!controllerId.empty())
- break;
- }
-
- /*
- * Create a USB ID from the device path which has the known format:
- *
- * path = bus, "-", ports, ":", config, ".", interface ;
- * bus = number ;
- * ports = port, [ ".", ports ] ;
- * port = number ;
- * config = number ;
- * interface = number ;
- *
- * Example: 3-2.4:1.0
- *
- * The bus is not guaranteed to be stable and needs to be stripped from
- * the USB ID. The final USB ID is built up of the ports, config and
- * interface properties.
- *
- * Example 2.4:1.0.
- */
- std::string usbId = utils::basename(path.c_str());
- usbId = usbId.substr(usbId.find('-') + 1);
-
- /* Creata a device ID from the USB devices vendor and product ID. */
- std::string deviceId;
- for (const char *name : { "idVendor", "idProduct" }) {
- std::ifstream file(path + "/../" + name);
-
- if (!file.is_open())
- return {};
-
- std::string value;
- std::getline(file, value);
- file.close();
-
- if (!deviceId.empty())
- deviceId += ":";
-
- deviceId += value;
- }
-
- return controllerId + "-" + usbId + "-" + deviceId;
-}
-
bool PipelineHandlerUVC::match(DeviceEnumerator *enumerator)
{
MediaDevice *media;
@@ -461,12 +399,7 @@ bool PipelineHandlerUVC::match(DeviceEnumerator *enumerator)
return false;
/* Create and register the camera. */
- std::string id = generateId(data.get());
- if (id.empty()) {
- LOG(UVC, Error) << "Failed to generate camera ID";
- return false;
- }
-
+ std::string id = data->id();
std::set<Stream *> streams{ &data->stream_ };
std::shared_ptr<Camera> camera =
Camera::create(std::move(data), id, streams);
@@ -501,6 +434,39 @@ int UVCCameraData::init(MediaDevice *media)
video_->bufferReady.connect(this, &UVCCameraData::bufferReady);
+ /* Generate the camera ID. */
+ if (!generateId()) {
+ LOG(UVC, Error) << "Failed to generate camera ID";
+ return -EINVAL;
+ }
+
+ /*
+ * Populate the map of supported formats, and infer the camera sensor
+ * resolution from the largest size it advertises.
+ */
+ Size resolution;
+ for (const auto &format : video_->formats()) {
+ PixelFormat pixelFormat = format.first.toPixelFormat();
+ if (!pixelFormat.isValid())
+ continue;
+
+ formats_[pixelFormat] = format.second;
+
+ const std::vector<SizeRange> &sizeRanges = format.second;
+ for (const SizeRange &sizeRange : sizeRanges) {
+ if (sizeRange.max > resolution)
+ resolution = sizeRange.max;
+ }
+ }
+
+ if (formats_.empty()) {
+ LOG(UVC, Error)
+ << "Camera " << id_ << " (" << media->model()
+ << ") doesn't expose any supported format";
+ return -EINVAL;
+ }
+
+ /* Populate the camera properties. */
properties_.set(properties::Model, utils::toAscii(media->model()));
/*
@@ -531,19 +497,6 @@ int UVCCameraData::init(MediaDevice *media)
properties_.set(properties::Location, location);
- /*
- * Get the current format in order to initialize the sensor array
- * properties.
- */
- Size resolution;
- for (const auto &it : video_->formats()) {
- const std::vector<SizeRange> &sizeRanges = it.second;
- for (const SizeRange &sizeRange : sizeRanges) {
- if (sizeRange.max > resolution)
- resolution = sizeRange.max;
- }
- }
-
properties_.set(properties::PixelArraySize, resolution);
properties_.set(properties::PixelArrayActiveAreas, { Rectangle(resolution) });
@@ -562,6 +515,70 @@ int UVCCameraData::init(MediaDevice *media)
return 0;
}
+bool UVCCameraData::generateId()
+{
+ const std::string path = video_->devicePath();
+
+ /* Create a controller ID from first device described in firmware. */
+ std::string controllerId;
+ std::string searchPath = path;
+ while (true) {
+ std::string::size_type pos = searchPath.rfind('/');
+ if (pos <= 1) {
+ LOG(UVC, Error) << "Can not find controller ID";
+ return false;
+ }
+
+ searchPath = searchPath.substr(0, pos);
+
+ controllerId = sysfs::firmwareNodePath(searchPath);
+ if (!controllerId.empty())
+ break;
+ }
+
+ /*
+ * Create a USB ID from the device path which has the known format:
+ *
+ * path = bus, "-", ports, ":", config, ".", interface ;
+ * bus = number ;
+ * ports = port, [ ".", ports ] ;
+ * port = number ;
+ * config = number ;
+ * interface = number ;
+ *
+ * Example: 3-2.4:1.0
+ *
+ * The bus is not guaranteed to be stable and needs to be stripped from
+ * the USB ID. The final USB ID is built up of the ports, config and
+ * interface properties.
+ *
+ * Example 2.4:1.0.
+ */
+ std::string usbId = utils::basename(path.c_str());
+ usbId = usbId.substr(usbId.find('-') + 1);
+
+ /* Creata a device ID from the USB devices vendor and product ID. */
+ std::string deviceId;
+ for (const char *name : { "idVendor", "idProduct" }) {
+ std::ifstream file(path + "/../" + name);
+
+ if (!file.is_open())
+ return false;
+
+ std::string value;
+ std::getline(file, value);
+ file.close();
+
+ if (!deviceId.empty())
+ deviceId += ":";
+
+ deviceId += value;
+ }
+
+ id_ = controllerId + "-" + usbId + "-" + deviceId;
+ return true;
+}
+
void UVCCameraData::addControl(uint32_t cid, const ControlInfo &v4l2Info,
ControlInfoMap::Map *ctrls)
{
@@ -692,6 +709,6 @@ void UVCCameraData::bufferReady(FrameBuffer *buffer)
pipe()->completeRequest(request);
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerUVC)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerUVC, "uvcvideo")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
index 3379ac5c..c7650432 100644
--- a/src/libcamera/pipeline/vimc/vimc.cpp
+++ b/src/libcamera/pipeline/vimc/vimc.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * vimc.cpp - Pipeline handler for the vimc device
+ * Pipeline handler for the vimc device
*/
#include <algorithm>
@@ -54,7 +54,7 @@ public:
int init();
int allocateMockIPABuffers();
void bufferReady(FrameBuffer *buffer);
- void paramsBufferReady(unsigned int id);
+ void paramsBufferReady(unsigned int id, const Flags<ipa::vimc::TestFlag> flags);
MediaDevice *media_;
std::unique_ptr<CameraSensor> sensor_;
@@ -84,8 +84,8 @@ class PipelineHandlerVimc : public PipelineHandler
public:
PipelineHandlerVimc(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -128,8 +128,8 @@ CameraConfiguration::Status VimcCameraConfiguration::validate()
if (config_.empty())
return Invalid;
- if (transform != Transform::Identity) {
- transform = Transform::Identity;
+ if (orientation != Orientation::Rotate0) {
+ orientation = Orientation::Rotate0;
status = Adjusted;
}
@@ -171,7 +171,7 @@ CameraConfiguration::Status VimcCameraConfiguration::validate()
cfg.bufferCount = 4;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
int ret = data_->video_->tryFormat(&format);
@@ -189,11 +189,13 @@ PipelineHandlerVimc::PipelineHandlerVimc(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerVimc::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerVimc::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
VimcCameraData *data = cameraData(camera);
- CameraConfiguration *config = new VimcCameraConfiguration(data);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<VimcCameraConfiguration>(data);
if (roles.empty())
return config;
@@ -242,7 +244,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
/* The scaler hardcodes a x3 scale-up ratio. */
V4L2SubdeviceFormat subformat = {};
- subformat.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8;
+ subformat.code = MEDIA_BUS_FMT_SGRBG8_1X8;
subformat.size = { cfg.size.width / 3, cfg.size.height / 3 };
ret = data->sensor_->setFormat(&subformat);
@@ -253,7 +255,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
if (ret)
return ret;
- subformat.mbus_code = pixelformats.find(cfg.pixelFormat)->second;
+ subformat.code = pixelformats.find(cfg.pixelFormat)->second;
ret = data->debayer_->setFormat(1, &subformat);
if (ret)
return ret;
@@ -275,7 +277,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
return ret;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
ret = data->video_->setFormat(&format);
@@ -283,7 +285,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
return ret;
if (format.size != cfg.size ||
- format.fourcc != V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat))
+ format.fourcc != data->video_->toV4L2PixelFormat(cfg.pixelFormat))
return -EINVAL;
/*
@@ -378,7 +380,7 @@ int PipelineHandlerVimc::processControls(VimcCameraData *data, Request *request)
{
ControlList controls(data->sensor_->controls());
- for (auto it : request->controls()) {
+ for (const auto &it : request->controls()) {
unsigned int id = it.first;
unsigned int offset;
uint32_t cid;
@@ -471,7 +473,15 @@ bool PipelineHandlerVimc::match(DeviceEnumerator *enumerator)
data->ipa_->paramsBufferReady.connect(data.get(), &VimcCameraData::paramsBufferReady);
std::string conf = data->ipa_->configurationFile("vimc.conf");
- data->ipa_->init(IPASettings{ conf, data->sensor_->model() });
+ Flags<ipa::vimc::TestFlag> inFlags = ipa::vimc::TestFlag::Flag2;
+ Flags<ipa::vimc::TestFlag> outFlags;
+ data->ipa_->init(IPASettings{ conf, data->sensor_->model() },
+ ipa::vimc::IPAOperationInit, inFlags, &outFlags);
+
+ LOG(VIMC, Debug)
+ << "Flag 1 was "
+ << (outFlags & ipa::vimc::TestFlag::Flag1 ? "" : "not ")
+ << "set";
/* Create and register the camera. */
std::set<Stream *> streams{ &data->stream_ };
@@ -598,7 +608,7 @@ int VimcCameraData::allocateMockIPABuffers()
constexpr unsigned int kBufCount = 2;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(formats::BGR888);
+ format.fourcc = video_->toV4L2PixelFormat(formats::BGR888);
format.size = Size (160, 120);
int ret = video_->setFormat(&format);
@@ -608,10 +618,11 @@ int VimcCameraData::allocateMockIPABuffers()
return video_->exportBuffers(kBufCount, &mockIPABufs_);
}
-void VimcCameraData::paramsBufferReady([[maybe_unused]] unsigned int id)
+void VimcCameraData::paramsBufferReady([[maybe_unused]] unsigned int id,
+ [[maybe_unused]] const Flags<ipa::vimc::TestFlag> flags)
{
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerVimc)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVimc, "vimc")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline_handler.cpp b/src/libcamera/pipeline_handler.cpp
index 7ebd76ad..5ea2ca78 100644
--- a/src/libcamera/pipeline_handler.cpp
+++ b/src/libcamera/pipeline_handler.cpp
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * pipeline_handler.cpp - Pipeline handler infrastructure
+ * Pipeline handler infrastructure
*/
#include "libcamera/internal/pipeline_handler.h"
#include <chrono>
+#include <sys/stat.h>
#include <sys/sysmacros.h>
#include <libcamera/base/log.h>
@@ -15,10 +16,11 @@
#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
-#include <libcamera/camera_manager.h>
#include <libcamera/framebuffer.h>
+#include <libcamera/property_ids.h>
#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_manager.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/media_device.h"
@@ -64,11 +66,10 @@ LOG_DEFINE_CATEGORY(Pipeline)
*
* In order to honour the std::enable_shared_from_this<> contract,
* PipelineHandler instances shall never be constructed manually, but always
- * through the PipelineHandlerFactory::create() function implemented by the
- * respective factories.
+ * through the PipelineHandlerFactoryBase::create() function.
*/
PipelineHandler::PipelineHandler(CameraManager *manager)
- : manager_(manager), lockOwner_(false)
+ : manager_(manager), useCount_(0)
{
}
@@ -143,58 +144,89 @@ MediaDevice *PipelineHandler::acquireMediaDevice(DeviceEnumerator *enumerator,
}
/**
- * \brief Lock all media devices acquired by the pipeline
+ * \brief Acquire exclusive access to the pipeline handler for the process
*
- * This function shall not be called from pipeline handler implementation, as
- * the Camera class handles locking directly.
+ * This function locks all the media devices used by the pipeline to ensure
+ * that no other process can access them concurrently.
+ *
+ * Access to a pipeline handler may be acquired recursively from within the
+ * same process. Every successful acquire() call shall be matched with a
+ * release() call. This allows concurrent access to the same pipeline handler
+ * from different cameras within the same process.
+ *
+ * Pipeline handlers shall not call this function directly as the Camera class
+ * handles access internally.
*
* \context This function is \threadsafe.
*
- * \return True if the devices could be locked, false otherwise
- * \sa unlock()
- * \sa MediaDevice::lock()
+ * \return True if the pipeline handler was acquired, false if another process
+ * has already acquired it
+ * \sa release()
*/
-bool PipelineHandler::lock()
+bool PipelineHandler::acquire()
{
MutexLocker locker(lock_);
- /* Do not allow nested locking in the same libcamera instance. */
- if (lockOwner_)
- return false;
+ if (useCount_) {
+ ++useCount_;
+ return true;
+ }
for (std::shared_ptr<MediaDevice> &media : mediaDevices_) {
if (!media->lock()) {
- unlock();
+ unlockMediaDevices();
return false;
}
}
- lockOwner_ = true;
-
+ ++useCount_;
return true;
}
/**
- * \brief Unlock all media devices acquired by the pipeline
+ * \brief Release exclusive access to the pipeline handler
+ * \param[in] camera The camera for which to release data
+ *
+ * This function releases access to the pipeline handler previously acquired by
+ * a call to acquire(). Every release() call shall match a previous successful
+ * acquire() call. Calling this function on a pipeline handler that hasn't been
+ * acquired results in undefined behaviour.
*
- * This function shall not be called from pipeline handler implementation, as
- * the Camera class handles locking directly.
+ * Pipeline handlers shall not call this function directly as the Camera class
+ * handles access internally.
*
* \context This function is \threadsafe.
*
- * \sa lock()
+ * \sa acquire()
*/
-void PipelineHandler::unlock()
+void PipelineHandler::release(Camera *camera)
{
MutexLocker locker(lock_);
- if (!lockOwner_)
- return;
+ ASSERT(useCount_);
+
+ unlockMediaDevices();
+ releaseDevice(camera);
+
+ --useCount_;
+}
+
+/**
+ * \brief Release resources associated with this camera
+ * \param[in] camera The camera for which to release resources
+ *
+ * Pipeline handlers may override this in order to perform cleanup operations
+ * when a camera is released, such as freeing memory.
+ */
+void PipelineHandler::releaseDevice([[maybe_unused]] Camera *camera)
+{
+}
+
+void PipelineHandler::unlockMediaDevices()
+{
for (std::shared_ptr<MediaDevice> &media : mediaDevices_)
media->unlock();
-
- lockOwner_ = false;
}
/**
@@ -217,8 +249,7 @@ void PipelineHandler::unlock()
* handler.
*
* \return A valid CameraConfiguration if the requested roles can be satisfied,
- * or a null pointer otherwise. The ownership of the returned configuration is
- * passed to the caller.
+ * or a null pointer otherwise.
*/
/**
@@ -312,6 +343,8 @@ void PipelineHandler::stop(Camera *camera)
/* Make sure no requests are pending. */
Camera::Private *data = camera->_d();
ASSERT(data->queuedRequests_.empty());
+
+ data->requestSequence_ = 0;
}
/**
@@ -504,6 +537,62 @@ void PipelineHandler::completeRequest(Request *request)
}
/**
+ * \brief Retrieve the absolute path to a platform configuration file
+ * \param[in] subdir The pipeline handler specific subdirectory name
+ * \param[in] name The configuration file name
+ *
+ * This function locates a named platform configuration file and returns
+ * its absolute path to the pipeline handler. It searches the following
+ * directories, in order:
+ *
+ * - If libcamera is not installed, the src/libcamera/pipeline/\<subdir\>/data/
+ * directory within the source tree ; otherwise
+ * - The system data (share/libcamera/pipeline/\<subdir\>) directory.
+ *
+ * The system directories are not searched if libcamera is not installed.
+ *
+ * \return The full path to the pipeline handler configuration file, or an empty
+ * string if no configuration file can be found
+ */
+std::string PipelineHandler::configurationFile(const std::string &subdir,
+ const std::string &name) const
+{
+ std::string confPath;
+ struct stat statbuf;
+ int ret;
+
+ std::string root = utils::libcameraSourcePath();
+ if (!root.empty()) {
+ /*
+ * When libcamera is used before it is installed, load
+ * configuration files from the source directory. The
+ * configuration files are then located in the 'data'
+ * subdirectory of the corresponding pipeline handler.
+ */
+ std::string confDir = root + "src/libcamera/pipeline/";
+ confPath = confDir + subdir + "/data/" + name;
+
+ LOG(Pipeline, Info)
+ << "libcamera is not installed. Loading platform configuration file from '"
+ << confPath << "'";
+ } else {
+ /* Else look in the system locations. */
+ confPath = std::string(LIBCAMERA_DATA_DIR)
+ + "/pipeline/" + subdir + '/' + name;
+ }
+
+ ret = stat(confPath.c_str(), &statbuf);
+ if (ret == 0 && (statbuf.st_mode & S_IFMT) == S_IFREG)
+ return confPath;
+
+ LOG(Pipeline, Error)
+ << "Configuration file '" << confPath
+ << "' not found for pipeline handler '" << PipelineHandler::name() << "'";
+
+ return std::string();
+}
+
+/**
* \brief Register a camera to the camera manager and pipeline handler
* \param[in] camera The camera to be added
*
@@ -524,7 +613,7 @@ void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera)
* Walk the entity list and map the devnums of all capture video nodes
* to the camera.
*/
- std::vector<dev_t> devnums;
+ std::vector<int64_t> devnums;
for (const std::shared_ptr<MediaDevice> &media : mediaDevices_) {
for (const MediaEntity *entity : media->entities()) {
if (entity->pads().size() == 1 &&
@@ -536,7 +625,14 @@ void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera)
}
}
- manager_->addCamera(std::move(camera), devnums);
+ /*
+ * Store the associated devices as a property of the camera to allow
+ * systems to identify which devices are managed by libcamera.
+ */
+ Camera::Private *data = camera->_d();
+ data->properties_.set(properties::SystemDevices, devnums);
+
+ manager_->_d()->addCamera(std::move(camera));
}
/**
@@ -553,7 +649,7 @@ void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera)
*/
void PipelineHandler::hotplugMediaDevice(MediaDevice *media)
{
- media->disconnected.connect(this, [=]() { mediaDeviceDisconnected(media); });
+ media->disconnected.connect(this, [this, media] { mediaDeviceDisconnected(media); });
}
/**
@@ -597,13 +693,13 @@ void PipelineHandler::disconnect()
*/
std::vector<std::weak_ptr<Camera>> cameras{ std::move(cameras_) };
- for (std::weak_ptr<Camera> ptr : cameras) {
+ for (const std::weak_ptr<Camera> &ptr : cameras) {
std::shared_ptr<Camera> camera = ptr.lock();
if (!camera)
continue;
camera->disconnect();
- manager_->removeCamera(camera);
+ manager_->_d()->removeCamera(camera);
}
}
@@ -624,27 +720,25 @@ void PipelineHandler::disconnect()
*/
/**
- * \class PipelineHandlerFactory
- * \brief Registration of PipelineHandler classes and creation of instances
+ * \class PipelineHandlerFactoryBase
+ * \brief Base class for pipeline handler factories
*
- * To facilitate discovery and instantiation of PipelineHandler classes, the
- * PipelineHandlerFactory class maintains a registry of pipeline handler
- * classes. Each PipelineHandler subclass shall register itself using the
- * REGISTER_PIPELINE_HANDLER() macro, which will create a corresponding
- * instance of a PipelineHandlerFactory subclass and register it with the
- * static list of factories.
+ * The PipelineHandlerFactoryBase class is the base of all specializations of
+ * the PipelineHandlerFactory class template. It implements the factory
+ * registration, maintains a registry of factories, and provides access to the
+ * registered factories.
*/
/**
- * \brief Construct a pipeline handler factory
+ * \brief Construct a pipeline handler factory base
* \param[in] name Name of the pipeline handler class
*
- * Creating an instance of the factory registers is with the global list of
+ * Creating an instance of the factory base registers it with the global list of
* factories, accessible through the factories() function.
*
* The factory \a name is used for debug purpose and shall be unique.
*/
-PipelineHandlerFactory::PipelineHandlerFactory(const char *name)
+PipelineHandlerFactoryBase::PipelineHandlerFactoryBase(const char *name)
: name_(name)
{
registerType(this);
@@ -657,15 +751,15 @@ PipelineHandlerFactory::PipelineHandlerFactory(const char *name)
* \return A shared pointer to a new instance of the PipelineHandler subclass
* corresponding to the factory
*/
-std::shared_ptr<PipelineHandler> PipelineHandlerFactory::create(CameraManager *manager)
+std::shared_ptr<PipelineHandler> PipelineHandlerFactoryBase::create(CameraManager *manager) const
{
- PipelineHandler *handler = createInstance(manager);
+ std::unique_ptr<PipelineHandler> handler = createInstance(manager);
handler->name_ = name_.c_str();
- return std::shared_ptr<PipelineHandler>(handler);
+ return std::shared_ptr<PipelineHandler>(std::move(handler));
}
/**
- * \fn PipelineHandlerFactory::name()
+ * \fn PipelineHandlerFactoryBase::name()
* \brief Retrieve the factory name
* \return The factory name
*/
@@ -677,9 +771,10 @@ std::shared_ptr<PipelineHandler> PipelineHandlerFactory::create(CameraManager *m
* The caller is responsible to guarantee the uniqueness of the pipeline handler
* name.
*/
-void PipelineHandlerFactory::registerType(PipelineHandlerFactory *factory)
+void PipelineHandlerFactoryBase::registerType(PipelineHandlerFactoryBase *factory)
{
- std::vector<PipelineHandlerFactory *> &factories = PipelineHandlerFactory::factories();
+ std::vector<PipelineHandlerFactoryBase *> &factories =
+ PipelineHandlerFactoryBase::factories();
factories.push_back(factory);
}
@@ -688,34 +783,77 @@ void PipelineHandlerFactory::registerType(PipelineHandlerFactory *factory)
* \brief Retrieve the list of all pipeline handler factories
* \return the list of pipeline handler factories
*/
-std::vector<PipelineHandlerFactory *> &PipelineHandlerFactory::factories()
+std::vector<PipelineHandlerFactoryBase *> &PipelineHandlerFactoryBase::factories()
{
/*
* The static factories map is defined inside the function to ensure
* it gets initialized on first use, without any dependency on
* link order.
*/
- static std::vector<PipelineHandlerFactory *> factories;
+ static std::vector<PipelineHandlerFactoryBase *> factories;
return factories;
}
/**
- * \fn PipelineHandlerFactory::createInstance()
- * \brief Create an instance of the PipelineHandler corresponding to the factory
- * \param[in] manager The camera manager
+ * \brief Return the factory for the pipeline handler with name \a name
+ * \param[in] name The pipeline handler name
+ * \return The factory of the pipeline with name \a name, or nullptr if not found
+ */
+const PipelineHandlerFactoryBase *PipelineHandlerFactoryBase::getFactoryByName(const std::string &name)
+{
+ const std::vector<PipelineHandlerFactoryBase *> &factories =
+ PipelineHandlerFactoryBase::factories();
+
+ auto iter = std::find_if(factories.begin(),
+ factories.end(),
+ [&name](const PipelineHandlerFactoryBase *f) {
+ return f->name() == name;
+ });
+
+ if (iter != factories.end())
+ return *iter;
+
+ return nullptr;
+}
+
+/**
+ * \class PipelineHandlerFactory
+ * \brief Registration of PipelineHandler classes and creation of instances
+ * \tparam _PipelineHandler The pipeline handler class type for this factory
*
- * This virtual function is implemented by the REGISTER_PIPELINE_HANDLER()
- * macro. It creates a pipeline handler instance associated with the camera
- * \a manager.
+ * To facilitate discovery and instantiation of PipelineHandler classes, the
+ * PipelineHandlerFactory class implements auto-registration of pipeline
+ * handlers. Each PipelineHandler subclass shall register itself using the
+ * REGISTER_PIPELINE_HANDLER() macro, which will create a corresponding
+ * instance of a PipelineHandlerFactory and register it with the static list of
+ * factories.
+ */
+
+/**
+ * \fn PipelineHandlerFactory::PipelineHandlerFactory(const char *name)
+ * \brief Construct a pipeline handler factory
+ * \param[in] name Name of the pipeline handler class
*
- * \return a pointer to a newly constructed instance of the PipelineHandler
- * subclass corresponding to the factory
+ * Creating an instance of the factory registers it with the global list of
+ * factories, accessible through the factories() function.
+ *
+ * The factory \a name is used for debug purpose and shall be unique.
+ */
+
+/**
+ * \fn PipelineHandlerFactory::createInstance() const
+ * \brief Create an instance of the PipelineHandler corresponding to the factory
+ * \param[in] manager The camera manager
+ * \return A unique pointer to a newly constructed instance of the
+ * PipelineHandler subclass corresponding to the factory
*/
/**
* \def REGISTER_PIPELINE_HANDLER
* \brief Register a pipeline handler with the pipeline handler factory
* \param[in] handler Class name of PipelineHandler derived class to register
+ * \param[in] name Name assigned to the pipeline handler, matching the pipeline
+ * subdirectory name in the source tree.
*
* Register a PipelineHandler subclass with the factory and make it available to
* try and match devices.
diff --git a/src/libcamera/pixel_format.cpp b/src/libcamera/pixel_format.cpp
index 80c22072..314179a8 100644
--- a/src/libcamera/pixel_format.cpp
+++ b/src/libcamera/pixel_format.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * pixel_format.cpp - libcamera Pixel Format
+ * libcamera Pixel Format
*/
#include <libcamera/formats.h>
diff --git a/src/libcamera/process.cpp b/src/libcamera/process.cpp
index 0e6b4e1d..86d27b2d 100644
--- a/src/libcamera/process.cpp
+++ b/src/libcamera/process.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * process.cpp - Process object
+ * Process object
*/
#include "libcamera/internal/process.h"
@@ -263,7 +263,9 @@ int Process::start(const std::string &path,
closeAllFdsExcept(fds);
- unsetenv("LIBCAMERA_LOG_FILE");
+ const char *file = utils::secure_getenv("LIBCAMERA_LOG_FILE");
+ if (file && strcmp(file, "syslog"))
+ unsetenv("LIBCAMERA_LOG_FILE");
const char **argv = new const char *[args.size() + 2];
unsigned int len = args.size();
diff --git a/src/libcamera/property_ids.cpp.in b/src/libcamera/property_ids.cpp.in
index f917e334..8b274c38 100644
--- a/src/libcamera/property_ids.cpp.in
+++ b/src/libcamera/property_ids.cpp.in
@@ -23,14 +23,7 @@ namespace properties {
${controls_doc}
-/**
- * \brief Namespace for libcamera draft properties
- */
-namespace draft {
-
-${draft_controls_doc}
-
-} /* namespace draft */
+${vendor_controls_doc}
#ifndef __DOXYGEN__
/*
@@ -39,11 +32,8 @@ ${draft_controls_doc}
*/
${controls_def}
-namespace draft {
-
-${draft_controls_def}
+${vendor_controls_def}
-} /* namespace draft */
#endif
/**
diff --git a/src/libcamera/property_ids.yaml b/src/libcamera/property_ids_core.yaml
index 11b7ebdc..834454a4 100644
--- a/src/libcamera/property_ids.yaml
+++ b/src/libcamera/property_ids_core.yaml
@@ -2,8 +2,9 @@
#
# Copyright (C) 2019, Google Inc.
#
-%YAML 1.2
+%YAML 1.1
---
+vendor: libcamera
controls:
- Location:
type: int32_t
@@ -29,10 +30,10 @@ controls:
- Rotation:
type: int32_t
description: |
- The camera rotation is expressed as the angular difference in degrees
- between two reference systems, one relative to the camera module, and
- one defined on the external world scene to be captured when projected
- on the image sensor pixel array.
+ The camera physical mounting rotation. It is expressed as the angular
+ difference in degrees between two reference systems, one relative to the
+ camera module, and one defined on the external world scene to be
+ captured when projected on the image sensor pixel array.
A camera sensor has a 2-dimensional reference system 'Rc' defined by
its pixel array read-out order. The origin is set to the first pixel
@@ -690,37 +691,14 @@ controls:
that is twice that of the full resolution mode. This value will be valid
after the configure method has returned successfully.
- # ----------------------------------------------------------------------------
- # Draft properties section
-
- - ColorFilterArrangement:
- type: int32_t
- draft: true
+ - SystemDevices:
+ type: int64_t
+ size: [n]
description: |
- The arrangement of color filters on sensor; represents the colors in the
- top-left 2x2 section of the sensor, in reading order. Currently
- identical to ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT.
- enum:
- - name: RGGB
- value: 0
- description: RGGB Bayer pattern
- - name: GRBG
- value: 1
- description: GRBG Bayer pattern
- - name: GBRG
- value: 2
- description: GBRG Bayer pattern
- - name: BGGR
- value: 3
- description: BGGR Bayer pattern
- - name: RGB
- value: 4
- description: |
- Sensor is not Bayer; output has 3 16-bit values for each pixel,
- instead of just 1 16-bit value per pixel.
- - name: MONO
- value: 5
- description: |
- Sensor is not Bayer; output consists of a single colour channel.
+ A list of integer values of type dev_t denoting the major and minor
+ device numbers of the underlying devices used in the operation of this
+ camera.
+
+ Different cameras may report identical devices.
...
diff --git a/src/libcamera/property_ids_draft.yaml b/src/libcamera/property_ids_draft.yaml
new file mode 100644
index 00000000..62f0e242
--- /dev/null
+++ b/src/libcamera/property_ids_draft.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2019, Google Inc.
+#
+%YAML 1.1
+---
+vendor: draft
+controls:
+ - ColorFilterArrangement:
+ type: int32_t
+ vendor: draft
+ description: |
+ The arrangement of color filters on sensor; represents the colors in the
+ top-left 2x2 section of the sensor, in reading order. Currently
+ identical to ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT.
+ enum:
+ - name: RGGB
+ value: 0
+ description: RGGB Bayer pattern
+ - name: GRBG
+ value: 1
+ description: GRBG Bayer pattern
+ - name: GBRG
+ value: 2
+ description: GBRG Bayer pattern
+ - name: BGGR
+ value: 3
+ description: BGGR Bayer pattern
+ - name: RGB
+ value: 4
+ description: |
+ Sensor is not Bayer; output has 3 16-bit values for each pixel,
+ instead of just 1 16-bit value per pixel.
+ - name: MONO
+ value: 5
+ description: |
+ Sensor is not Bayer; output consists of a single colour channel.
+
+...
diff --git a/src/libcamera/proxy/worker/meson.build b/src/libcamera/proxy/worker/meson.build
index 70c8760a..aa4d9cd7 100644
--- a/src/libcamera/proxy/worker/meson.build
+++ b/src/libcamera/proxy/worker/meson.build
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: CC0-1.0
-proxy_install_dir = get_option('libexecdir') / 'libcamera'
+proxy_install_dir = libcamera_libexecdir
# generate {pipeline}_ipa_proxy_worker.cpp
foreach mojom : ipa_mojoms
diff --git a/src/libcamera/pub_key.cpp b/src/libcamera/pub_key.cpp
index 9bb08fda..f1d73a5c 100644
--- a/src/libcamera/pub_key.cpp
+++ b/src/libcamera/pub_key.cpp
@@ -2,12 +2,17 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * pub_key.cpp - Public key signature verification
+ * Public key signature verification
*/
#include "libcamera/internal/pub_key.h"
-#if HAVE_GNUTLS
+#if HAVE_CRYPTO
+#include <openssl/evp.h>
+#include <openssl/rsa.h>
+#include <openssl/sha.h>
+#include <openssl/x509.h>
+#elif HAVE_GNUTLS
#include <gnutls/abstract.h>
#endif
@@ -33,7 +38,14 @@ namespace libcamera {
PubKey::PubKey([[maybe_unused]] Span<const uint8_t> key)
: valid_(false)
{
-#if HAVE_GNUTLS
+#if HAVE_CRYPTO
+ const uint8_t *data = key.data();
+ pubkey_ = d2i_PUBKEY(nullptr, &data, key.size());
+ if (!pubkey_)
+ return;
+
+ valid_ = true;
+#elif HAVE_GNUTLS
int ret = gnutls_pubkey_init(&pubkey_);
if (ret < 0)
return;
@@ -52,7 +64,9 @@ PubKey::PubKey([[maybe_unused]] Span<const uint8_t> key)
PubKey::~PubKey()
{
-#if HAVE_GNUTLS
+#if HAVE_CRYPTO
+ EVP_PKEY_free(pubkey_);
+#elif HAVE_GNUTLS
gnutls_pubkey_deinit(pubkey_);
#endif
}
@@ -76,7 +90,35 @@ PubKey::~PubKey()
bool PubKey::verify([[maybe_unused]] Span<const uint8_t> data,
[[maybe_unused]] Span<const uint8_t> sig) const
{
-#if HAVE_GNUTLS
+ if (!valid_)
+ return false;
+
+#if HAVE_CRYPTO
+ /*
+ * Create and initialize a public key algorithm context for signature
+ * verification.
+ */
+ EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(pubkey_, nullptr);
+ if (!ctx)
+ return false;
+
+ if (EVP_PKEY_verify_init(ctx) <= 0 ||
+ EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING) <= 0 ||
+ EVP_PKEY_CTX_set_signature_md(ctx, EVP_sha256()) <= 0) {
+ EVP_PKEY_CTX_free(ctx);
+ return false;
+ }
+
+ /* Calculate the SHA256 digest of the data. */
+ uint8_t digest[SHA256_DIGEST_LENGTH];
+ SHA256(data.data(), data.size(), digest);
+
+ /* Decrypt the signature and verify it matches the digest. */
+ int ret = EVP_PKEY_verify(ctx, sig.data(), sig.size(), digest,
+ SHA256_DIGEST_LENGTH);
+ EVP_PKEY_CTX_free(ctx);
+ return ret == 1;
+#elif HAVE_GNUTLS
const gnutls_datum_t gnuTlsData{
const_cast<unsigned char *>(data.data()),
static_cast<unsigned int>(data.size())
diff --git a/src/libcamera/request.cpp b/src/libcamera/request.cpp
index 51d74b29..cfb451e9 100644
--- a/src/libcamera/request.cpp
+++ b/src/libcamera/request.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * request.cpp - Capture request handling
+ * Capture request handling
*/
#include "libcamera/internal/request.h"
@@ -158,9 +158,12 @@ void Request::Private::cancel()
}
/**
- * \copydoc Request::reuse()
+ * \brief Reset the request internal data to default values
+ *
+ * After calling this function, all request internal data will have default
+ * values as if the Request::Private instance had just been constructed.
*/
-void Request::Private::reuse()
+void Request::Private::reset()
{
sequence_ = 0;
cancelled_ = false;
@@ -349,7 +352,7 @@ Request::Request(Camera *camera, uint64_t cookie)
camera->_d()->validator());
/**
- * \todo: Add a validator for metadata controls.
+ * \todo Add a validator for metadata controls.
*/
metadata_ = new ControlList(controls::controls);
@@ -380,7 +383,7 @@ void Request::reuse(ReuseFlag flags)
{
LIBCAMERA_TRACEPOINT(request_reuse, this);
- _d()->reuse();
+ _d()->reset();
if (flags & ReuseBuffers) {
for (auto pair : bufferMap_) {
@@ -526,8 +529,8 @@ FrameBuffer *Request::findBuffer(const Stream *stream) const
*
* When requests are queued, they are given a sequential number to track the
* order in which requests are queued to a camera. This number counts all
- * requests given to a camera through its lifetime, and is not reset to zero
- * between camera stop/start sequences.
+ * requests given to a camera and is reset to zero between camera stop/start
+ * sequences.
*
* It can be used to support debugging and identifying the flow of requests
* through a pipeline, but does not guarantee to represent the sequence number
diff --git a/src/libcamera/camera_sensor.cpp b/src/libcamera/sensor/camera_sensor.cpp
index d055c16a..c6d7f801 100644
--- a/src/libcamera/camera_sensor.cpp
+++ b/src/libcamera/sensor/camera_sensor.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_sensor.cpp - A camera sensor
+ * A camera sensor
*/
#include "libcamera/internal/camera_sensor.h"
@@ -15,6 +15,8 @@
#include <math.h>
#include <string.h>
+#include <libcamera/camera.h>
+#include <libcamera/orientation.h>
#include <libcamera/property_ids.h>
#include <libcamera/base/utils.h>
@@ -55,7 +57,8 @@ LOG_DEFINE_CATEGORY(CameraSensor)
*/
CameraSensor::CameraSensor(const MediaEntity *entity)
: entity_(entity), pad_(UINT_MAX), staticProps_(nullptr),
- bayerFormat_(nullptr), properties_(properties::properties)
+ bayerFormat_(nullptr), supportFlips_(false),
+ flipsAlterBayerOrder_(false), properties_(properties::properties)
{
}
@@ -152,7 +155,12 @@ int CameraSensor::init()
*/
if (entity_->device()->driver() == "vimc") {
initVimcDefaultProperties();
- return initProperties();
+
+ ret = initProperties();
+ if (ret)
+ return ret;
+
+ return discoverAncillaryDevices();
}
/* Get the color filter array pattern (only for RAW sensors). */
@@ -176,9 +184,48 @@ int CameraSensor::init()
if (ret)
return ret;
+ /*
+ * Set HBLANK to the minimum to start with a well-defined line length,
+ * allowing IPA modules that do not modify HBLANK to use the sensor
+ * minimum line length in their calculations.
+ */
+ const struct v4l2_query_ext_ctrl *hblankInfo = subdev_->controlInfo(V4L2_CID_HBLANK);
+ if (hblankInfo && !(hblankInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ ControlList ctrl(subdev_->controls());
+
+ ctrl.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblankInfo->minimum));
+ ret = subdev_->setControls(&ctrl);
+ if (ret)
+ return ret;
+ }
+
return applyTestPatternMode(controls::draft::TestPatternModeEnum::TestPatternModeOff);
}
+int CameraSensor::generateId()
+{
+ const std::string devPath = subdev_->devicePath();
+
+ /* Try to get ID from firmware description. */
+ id_ = sysfs::firmwareNodePath(devPath);
+ if (!id_.empty())
+ return 0;
+
+ /*
+ * Virtual sensors not described in firmware
+ *
+ * Verify it's a platform device and construct ID from the device path
+ * and model of sensor.
+ */
+ if (devPath.find("/sys/devices/platform/", 0) == 0) {
+ id_ = devPath.substr(strlen("/sys/devices/")) + " " + model();
+ return 0;
+ }
+
+ LOG(CameraSensor, Error) << "Can't generate sensor ID";
+ return -EINVAL;
+}
+
int CameraSensor::validateSensorDriver()
{
int err = 0;
@@ -217,6 +264,26 @@ int CameraSensor::validateSensorDriver()
}
/*
+ * Verify if sensor supports horizontal/vertical flips
+ *
+ * \todo Handle horizontal and vertical flips independently.
+ */
+ const struct v4l2_query_ext_ctrl *hflipInfo = subdev_->controlInfo(V4L2_CID_HFLIP);
+ const struct v4l2_query_ext_ctrl *vflipInfo = subdev_->controlInfo(V4L2_CID_VFLIP);
+ if (hflipInfo && !(hflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY) &&
+ vflipInfo && !(vflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ supportFlips_ = true;
+
+ if (hflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT ||
+ vflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT)
+ flipsAlterBayerOrder_ = true;
+ }
+
+ if (!supportFlips_)
+ LOG(CameraSensor, Debug)
+ << "Camera sensor does not support horizontal/vertical flip";
+
+ /*
* Make sure the required selection targets are supported.
*
* Failures in reading any of the targets are not deemed to be fatal,
@@ -275,6 +342,7 @@ int CameraSensor::validateSensorDriver()
* required by the CameraSensor class.
*/
static constexpr uint32_t mandatoryControls[] = {
+ V4L2_CID_ANALOGUE_GAIN,
V4L2_CID_EXPOSURE,
V4L2_CID_HBLANK,
V4L2_CID_PIXEL_RATE,
@@ -384,18 +452,18 @@ int CameraSensor::initProperties()
/* Retrieve and register properties from the kernel interface. */
const ControlInfoMap &controls = subdev_->controls();
- int32_t propertyValue;
const auto &orientation = controls.find(V4L2_CID_CAMERA_ORIENTATION);
if (orientation != controls.end()) {
int32_t v4l2Orientation = orientation->second.def().get<int32_t>();
+ int32_t propertyValue;
switch (v4l2Orientation) {
default:
LOG(CameraSensor, Warning)
<< "Unsupported camera location "
<< v4l2Orientation << ", setting to External";
- /* Fall-through */
+ [[fallthrough]];
case V4L2_CAMERA_ORIENTATION_EXTERNAL:
propertyValue = properties::CameraLocationExternal;
break;
@@ -413,8 +481,27 @@ int CameraSensor::initProperties()
const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION);
if (rotationControl != controls.end()) {
- propertyValue = rotationControl->second.def().get<int32_t>();
+ int32_t propertyValue = rotationControl->second.def().get<int32_t>();
+
+ /*
+ * Cache the Transform associated with the camera mounting
+ * rotation for later use in computeTransform().
+ */
+ bool success;
+ mountingOrientation_ = orientationFromRotation(propertyValue, &success);
+ if (!success) {
+ LOG(CameraSensor, Warning)
+ << "Invalid rotation of " << propertyValue
+ << " degrees - ignoring";
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
properties_.set(properties::Rotation, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning)
+ << "Rotation control not available, default to 0 degrees";
+ properties_.set(properties::Rotation, 0);
+ mountingOrientation_ = Orientation::Rotate0;
}
properties_.set(properties::PixelArraySize, pixelArraySize_);
@@ -467,8 +554,8 @@ int CameraSensor::discoverAncillaryDevices()
ret = focusLens_->init();
if (ret) {
LOG(CameraSensor, Error)
- << "CameraLens initialisation failed";
- return ret;
+ << "Lens initialisation failed, lens disabled";
+ focusLens_.reset();
}
break;
@@ -510,6 +597,21 @@ int CameraSensor::discoverAncillaryDevices()
*/
/**
+ * \fn CameraSensor::device()
+ * \brief Retrieve the camera sensor device
+ * \todo Remove this function by integrating DelayedControl with CameraSensor
+ * \return The camera sensor device
+ */
+
+/**
+ * \fn CameraSensor::focusLens()
+ * \brief Retrieve the focus lens controller
+ *
+ * \return The focus lens controller. nullptr if no focus lens controller is
+ * connected to the sensor
+ */
+
+/**
* \fn CameraSensor::mbusCodes()
* \brief Retrieve the media bus codes supported by the camera sensor
*
@@ -562,64 +664,6 @@ Size CameraSensor::resolution() const
}
/**
- * \fn CameraSensor::testPatternModes()
- * \brief Retrieve all the supported test pattern modes of the camera sensor
- * The test pattern mode values correspond to the controls::TestPattern control.
- *
- * \return The list of test pattern modes
- */
-
-/**
- * \brief Set the test pattern mode for the camera sensor
- * \param[in] mode The test pattern mode
- *
- * The new \a mode is applied to the sensor if it differs from the active test
- * pattern mode. Otherwise, this function is a no-op. Setting the same test
- * pattern mode for every frame thus incurs no performance penalty.
- */
-int CameraSensor::setTestPatternMode(controls::draft::TestPatternModeEnum mode)
-{
- if (testPatternMode_ == mode)
- return 0;
-
- if (testPatternModes_.empty()) {
- LOG(CameraSensor, Error)
- << "Camera sensor does not support test pattern modes.";
- return -EINVAL;
- }
-
- return applyTestPatternMode(mode);
-}
-
-int CameraSensor::applyTestPatternMode(controls::draft::TestPatternModeEnum mode)
-{
- if (testPatternModes_.empty())
- return 0;
-
- auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(),
- mode);
- if (it == testPatternModes_.end()) {
- LOG(CameraSensor, Error) << "Unsupported test pattern mode "
- << mode;
- return -EINVAL;
- }
-
- LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode;
-
- int32_t index = staticProps_->testPatternModes.at(mode);
- ControlList ctrls{ controls() };
- ctrls.set(V4L2_CID_TEST_PATTERN, index);
-
- int ret = setControls(&ctrls);
- if (ret)
- return ret;
-
- testPatternMode_ = mode;
-
- return 0;
-}
-
-/**
* \brief Retrieve the best sensor format for a desired output
* \param[in] mbusCodes The list of acceptable media bus codes
* \param[in] size The desired size
@@ -699,7 +743,7 @@ V4L2SubdeviceFormat CameraSensor::getFormat(const std::vector<unsigned int> &mbu
}
V4L2SubdeviceFormat format{
- .mbus_code = bestCode,
+ .code = bestCode,
.size = *bestSize,
.colorSpace = ColorSpace::Raw,
};
@@ -710,96 +754,143 @@ V4L2SubdeviceFormat CameraSensor::getFormat(const std::vector<unsigned int> &mbu
/**
* \brief Set the sensor output format
* \param[in] format The desired sensor output format
+ * \param[in] transform The transform to be applied on the sensor.
+ * Defaults to Identity.
+ *
+ * If flips are writable they are configured according to the desired Transform.
+ * Transform::Identity always corresponds to H/V flip being disabled if the
+ * controls are writable. Flips are set before the new format is applied as
+ * they can effectively change the Bayer pattern ordering.
*
* The ranges of any controls associated with the sensor are also updated.
*
* \return 0 on success or a negative error code otherwise
*/
-int CameraSensor::setFormat(V4L2SubdeviceFormat *format)
+int CameraSensor::setFormat(V4L2SubdeviceFormat *format, Transform transform)
{
+ /* Configure flips if the sensor supports that. */
+ if (supportFlips_) {
+ ControlList flipCtrls(subdev_->controls());
+
+ flipCtrls.set(V4L2_CID_HFLIP,
+ static_cast<int32_t>(!!(transform & Transform::HFlip)));
+ flipCtrls.set(V4L2_CID_VFLIP,
+ static_cast<int32_t>(!!(transform & Transform::VFlip)));
+
+ int ret = subdev_->setControls(&flipCtrls);
+ if (ret)
+ return ret;
+ }
+
+ /* Apply format on the subdev. */
int ret = subdev_->setFormat(pad_, format);
if (ret)
return ret;
- updateControlInfo();
+ subdev_->updateControlInfo();
return 0;
}
/**
- * \brief Retrieve the supported V4L2 controls and their information
- *
- * Control information is updated automatically to reflect the current sensor
- * configuration when the setFormat() function is called, without invalidating
- * any iterator on the ControlInfoMap. A manual update can also be forced by
- * calling the updateControlInfo() function for pipeline handlers that change
- * the sensor configuration wihtout using setFormat().
- *
- * \return A map of the V4L2 controls supported by the sensor
- */
-const ControlInfoMap &CameraSensor::controls() const
-{
- return subdev_->controls();
-}
-
-/**
- * \brief Read V4L2 controls from the sensor
- * \param[in] ids The list of controls to read, specified by their ID
- *
- * This function reads the value of all controls contained in \a ids, and
- * returns their values as a ControlList. The control identifiers are defined by
- * the V4L2 specification (V4L2_CID_*).
+ * \brief Try the sensor output format
+ * \param[in] format The desired sensor output format
*
- * If any control in \a ids is not supported by the device, is disabled (i.e.
- * has the V4L2_CTRL_FLAG_DISABLED flag set), or if any other error occurs
- * during validation of the requested controls, no control is read and this
- * function returns an empty control list.
+ * The ranges of any controls associated with the sensor are not updated.
*
- * \sa V4L2Device::getControls()
+ * \todo Add support for Transform by changing the format's Bayer ordering
+ * before calling subdev_->setFormat().
*
- * \return The control values in a ControlList on success, or an empty list on
- * error
+ * \return 0 on success or a negative error code otherwise
*/
-ControlList CameraSensor::getControls(const std::vector<uint32_t> &ids)
+int CameraSensor::tryFormat(V4L2SubdeviceFormat *format) const
{
- return subdev_->getControls(ids);
+ return subdev_->setFormat(pad_, format,
+ V4L2Subdevice::Whence::TryFormat);
}
/**
- * \brief Write V4L2 controls to the sensor
- * \param[in] ctrls The list of controls to write
+ * \brief Apply a sensor configuration to the camera sensor
+ * \param[in] config The sensor configuration
+ * \param[in] transform The transform to be applied on the sensor.
+ * Defaults to Identity
+ * \param[out] sensorFormat Format applied to the sensor (optional)
*
- * This function writes the value of all controls contained in \a ctrls, and
- * stores the values actually applied to the device in the corresponding \a
- * ctrls entry. The control identifiers are defined by the V4L2 specification
- * (V4L2_CID_*).
+ * Apply to the camera sensor the configuration \a config.
*
- * If any control in \a ctrls is not supported by the device, is disabled (i.e.
- * has the V4L2_CTRL_FLAG_DISABLED flag set), is read-only, or if any other
- * error occurs during validation of the requested controls, no control is
- * written and this function returns -EINVAL.
+ * \todo The configuration shall be fully populated and if any of the fields
+ * specified cannot be applied exactly, an error code is returned.
*
- * If an error occurs while writing the controls, the index of the first
- * control that couldn't be written is returned. All controls below that index
- * are written and their values are updated in \a ctrls, while all other
- * controls are not written and their values are not changed.
- *
- * \sa V4L2Device::setControls()
- *
- * \return 0 on success or an error code otherwise
- * \retval -EINVAL One of the control is not supported or not accessible
- * \retval i The index of the control that failed
+ * \return 0 if \a config is applied correctly to the camera sensor, a negative
+ * error code otherwise
*/
-int CameraSensor::setControls(ControlList *ctrls)
+int CameraSensor::applyConfiguration(const SensorConfiguration &config,
+ Transform transform,
+ V4L2SubdeviceFormat *sensorFormat)
{
- return subdev_->setControls(ctrls);
-}
+ if (!config.isValid()) {
+ LOG(CameraSensor, Error) << "Invalid sensor configuration";
+ return -EINVAL;
+ }
-/**
- * \fn CameraSensor::device()
- * \brief Retrieve the camera sensor device
- * \todo Remove this function by integrating DelayedControl with CameraSensor
- * \return The camera sensor device
- */
+ std::vector<unsigned int> filteredCodes;
+ std::copy_if(mbusCodes_.begin(), mbusCodes_.end(),
+ std::back_inserter(filteredCodes),
+ [&config](unsigned int mbusCode) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(mbusCode);
+ if (bayer.bitDepth == config.bitDepth)
+ return true;
+ return false;
+ });
+ if (filteredCodes.empty()) {
+ LOG(CameraSensor, Error)
+ << "Cannot find any format with bit depth "
+ << config.bitDepth;
+ return -EINVAL;
+ }
+
+ /*
+ * Compute the sensor's data frame size by applying the cropping
+ * rectangle, subsampling and output crop to the sensor's pixel array
+ * size.
+ *
+ * \todo The actual size computation is for now ignored and only the
+ * output size is considered. This implies that resolutions obtained
+ * with two different cropping/subsampling will look identical and
+ * only the first found one will be considered.
+ */
+ V4L2SubdeviceFormat subdevFormat = {};
+ for (unsigned int code : filteredCodes) {
+ for (const Size &size : sizes(code)) {
+ if (size.width != config.outputSize.width ||
+ size.height != config.outputSize.height)
+ continue;
+
+ subdevFormat.code = code;
+ subdevFormat.size = size;
+ break;
+ }
+ }
+ if (!subdevFormat.code) {
+ LOG(CameraSensor, Error) << "Invalid output size in sensor configuration";
+ return -EINVAL;
+ }
+
+ int ret = setFormat(&subdevFormat, transform);
+ if (ret)
+ return ret;
+
+ /*
+ * Return to the caller the format actually applied to the sensor.
+ * This is relevant if transform has changed the bayer pattern order.
+ */
+ if (sensorFormat)
+ *sensorFormat = subdevFormat;
+
+ /* \todo Handle AnalogCrop. Most sensors do not support set_selection */
+ /* \todo Handle scaling in the digital domain. */
+
+ return 0;
+}
/**
* \fn CameraSensor::properties()
@@ -819,10 +910,6 @@ int CameraSensor::setControls(ControlList *ctrls)
* Sensor information is only available for raw sensors. When called for a YUV
* sensor, this function returns -EINVAL.
*
- * Pipeline handlers that do not change the sensor format using the setFormat()
- * function may need to call updateControlInfo() beforehand, to ensure all the
- * control ranges are up to date.
- *
* \return 0 on success, a negative error code otherwise
*/
int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const
@@ -866,9 +953,13 @@ int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const
ret = subdev_->getFormat(pad_, &format);
if (ret)
return ret;
- info->bitsPerPixel = format.bitsPerPixel();
+
+ info->bitsPerPixel = MediaBusFormatInfo::info(format.code).bitsPerPixel;
info->outputSize = format.size;
+ std::optional<int32_t> cfa = properties_.get(properties::draft::ColorFilterArrangement);
+ info->cfaPattern = cfa ? *cfa : properties::draft::RGB;
+
/*
* Retrieve the pixel rate, line length and minimum/maximum frame
* duration through V4L2 controls. Support for the V4L2_CID_PIXEL_RATE,
@@ -883,10 +974,12 @@ int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const
return -EINVAL;
}
- int32_t hblank = ctrls.get(V4L2_CID_HBLANK).get<int32_t>();
- info->lineLength = info->outputSize.width + hblank;
info->pixelRate = ctrls.get(V4L2_CID_PIXEL_RATE).get<int64_t>();
+ const ControlInfo hblank = ctrls.infoMap()->at(V4L2_CID_HBLANK);
+ info->minLineLength = info->outputSize.width + hblank.min().get<int32_t>();
+ info->maxLineLength = info->outputSize.width + hblank.max().get<int32_t>();
+
const ControlInfo vblank = ctrls.infoMap()->at(V4L2_CID_VBLANK);
info->minFrameLength = info->outputSize.height + vblank.min().get<int32_t>();
info->maxFrameLength = info->outputSize.height + vblank.max().get<int32_t>();
@@ -895,50 +988,220 @@ int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const
}
/**
- * \fn void CameraSensor::updateControlInfo()
- * \brief Update the sensor's ControlInfoMap in case they have changed
- * \sa V4L2Device::updateControlInfo()
+ * \brief Compute the Transform that gives the requested \a orientation
+ * \param[inout] orientation The desired image orientation
+ *
+ * This function computes the Transform that the pipeline handler should apply
+ * to the CameraSensor to obtain the requested \a orientation.
+ *
+ * The intended caller of this function is the validate() implementation of
+ * pipeline handlers, that pass in the application requested
+ * CameraConfiguration::orientation and obtain a Transform to apply to the
+ * camera sensor, likely at configure() time.
+ *
+ * If the requested \a orientation cannot be obtained, the \a orientation
+ * parameter is adjusted to report the current image orientation and
+ * Transform::Identity is returned.
+ *
+ * If the requested \a orientation can be obtained, the function computes a
+ * Transform and does not adjust \a orientation.
+ *
+ * Pipeline handlers are expected to verify if \a orientation has been
+ * adjusted by this function and set the CameraConfiguration::status to
+ * Adjusted accordingly.
+ *
+ * \return A Transform instance that applied to the CameraSensor produces images
+ * with \a orientation
*/
-void CameraSensor::updateControlInfo()
+Transform CameraSensor::computeTransform(Orientation *orientation) const
{
- subdev_->updateControlInfo();
+ /*
+ * If we cannot do any flips we cannot change the native camera mounting
+ * orientation.
+ */
+ if (!supportFlips_) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ /*
+ * Now compute the required transform to obtain 'orientation' starting
+ * from the mounting rotation.
+ *
+ * As a note:
+ * orientation / mountingOrientation_ = transform
+ * mountingOrientation_ * transform = orientation
+ */
+ Transform transform = *orientation / mountingOrientation_;
+
+ /*
+ * If transform contains any Transpose we cannot do it, so adjust
+ * 'orientation' to report the image native orientation and return Identity.
+ */
+ if (!!(transform & Transform::Transpose)) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ return transform;
}
/**
- * \fn CameraSensor::focusLens()
- * \brief Retrieve the focus lens controller
+ * \brief Compute the Bayer order that results from the given Transform
+ * \param[in] t The Transform to apply to the sensor
*
- * \return The focus lens controller. nullptr if no focus lens controller is
- * connected to the sensor
+ * Some sensors change their Bayer order when they are h-flipped or v-flipped.
+ * This function computes and returns the Bayer order that would result from the
+ * given transform applied to the sensor.
+ *
+ * This function is valid only when the sensor produces raw Bayer formats.
+ *
+ * \return The Bayer order produced by the sensor when the Transform is applied
*/
+BayerFormat::Order CameraSensor::bayerOrder(Transform t) const
+{
+ /* Return a defined by meaningless value for non-Bayer sensors. */
+ if (!bayerFormat_)
+ return BayerFormat::Order::BGGR;
-std::string CameraSensor::logPrefix() const
+ if (!flipsAlterBayerOrder_)
+ return bayerFormat_->order;
+
+ /*
+ * Apply the transform to the native (i.e. untransformed) Bayer order,
+ * using the rest of the Bayer format supplied by the caller.
+ */
+ return bayerFormat_->transform(t).order;
+}
+
+/**
+ * \brief Retrieve the supported V4L2 controls and their information
+ *
+ * Control information is updated automatically to reflect the current sensor
+ * configuration when the setFormat() function is called, without invalidating
+ * any iterator on the ControlInfoMap.
+ *
+ * \return A map of the V4L2 controls supported by the sensor
+ */
+const ControlInfoMap &CameraSensor::controls() const
{
- return "'" + entity_->name() + "'";
+ return subdev_->controls();
}
-int CameraSensor::generateId()
+/**
+ * \brief Read V4L2 controls from the sensor
+ * \param[in] ids The list of controls to read, specified by their ID
+ *
+ * This function reads the value of all controls contained in \a ids, and
+ * returns their values as a ControlList. The control identifiers are defined by
+ * the V4L2 specification (V4L2_CID_*).
+ *
+ * If any control in \a ids is not supported by the device, is disabled (i.e.
+ * has the V4L2_CTRL_FLAG_DISABLED flag set), or if any other error occurs
+ * during validation of the requested controls, no control is read and this
+ * function returns an empty control list.
+ *
+ * \sa V4L2Device::getControls()
+ *
+ * \return The control values in a ControlList on success, or an empty list on
+ * error
+ */
+ControlList CameraSensor::getControls(const std::vector<uint32_t> &ids)
{
- const std::string devPath = subdev_->devicePath();
+ return subdev_->getControls(ids);
+}
- /* Try to get ID from firmware description. */
- id_ = sysfs::firmwareNodePath(devPath);
- if (!id_.empty())
+/**
+ * \brief Write V4L2 controls to the sensor
+ * \param[in] ctrls The list of controls to write
+ *
+ * This function writes the value of all controls contained in \a ctrls, and
+ * stores the values actually applied to the device in the corresponding \a
+ * ctrls entry. The control identifiers are defined by the V4L2 specification
+ * (V4L2_CID_*).
+ *
+ * If any control in \a ctrls is not supported by the device, is disabled (i.e.
+ * has the V4L2_CTRL_FLAG_DISABLED flag set), is read-only, or if any other
+ * error occurs during validation of the requested controls, no control is
+ * written and this function returns -EINVAL.
+ *
+ * If an error occurs while writing the controls, the index of the first
+ * control that couldn't be written is returned. All controls below that index
+ * are written and their values are updated in \a ctrls, while all other
+ * controls are not written and their values are not changed.
+ *
+ * \sa V4L2Device::setControls()
+ *
+ * \return 0 on success or an error code otherwise
+ * \retval -EINVAL One of the control is not supported or not accessible
+ * \retval i The index of the control that failed
+ */
+int CameraSensor::setControls(ControlList *ctrls)
+{
+ return subdev_->setControls(ctrls);
+}
+
+/**
+ * \fn CameraSensor::testPatternModes()
+ * \brief Retrieve all the supported test pattern modes of the camera sensor
+ * The test pattern mode values correspond to the controls::TestPattern control.
+ *
+ * \return The list of test pattern modes
+ */
+
+/**
+ * \brief Set the test pattern mode for the camera sensor
+ * \param[in] mode The test pattern mode
+ *
+ * The new \a mode is applied to the sensor if it differs from the active test
+ * pattern mode. Otherwise, this function is a no-op. Setting the same test
+ * pattern mode for every frame thus incurs no performance penalty.
+ */
+int CameraSensor::setTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternMode_ == mode)
return 0;
- /*
- * Virtual sensors not described in firmware
- *
- * Verify it's a platform device and construct ID from the device path
- * and model of sensor.
- */
- if (devPath.find("/sys/devices/platform/", 0) == 0) {
- id_ = devPath.substr(strlen("/sys/devices/")) + " " + model();
+ if (testPatternModes_.empty()) {
+ LOG(CameraSensor, Error)
+ << "Camera sensor does not support test pattern modes.";
+ return -EINVAL;
+ }
+
+ return applyTestPatternMode(mode);
+}
+
+int CameraSensor::applyTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternModes_.empty())
return 0;
+
+ auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(),
+ mode);
+ if (it == testPatternModes_.end()) {
+ LOG(CameraSensor, Error) << "Unsupported test pattern mode "
+ << mode;
+ return -EINVAL;
}
- LOG(CameraSensor, Error) << "Can't generate sensor ID";
- return -EINVAL;
+ LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode;
+
+ int32_t index = staticProps_->testPatternModes.at(mode);
+ ControlList ctrls{ controls() };
+ ctrls.set(V4L2_CID_TEST_PATTERN, index);
+
+ int ret = setControls(&ctrls);
+ if (ret)
+ return ret;
+
+ testPatternMode_ = mode;
+
+ return 0;
+}
+
+std::string CameraSensor::logPrefix() const
+{
+ return "'" + entity_->name() + "'";
}
} /* namespace libcamera */
diff --git a/src/libcamera/camera_sensor_properties.cpp b/src/libcamera/sensor/camera_sensor_properties.cpp
index e5f27f06..b18524d8 100644
--- a/src/libcamera/camera_sensor_properties.cpp
+++ b/src/libcamera/sensor/camera_sensor_properties.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_sensor_properties.cpp - Database of camera sensor properties
+ * Database of camera sensor properties
*/
#include "libcamera/internal/camera_sensor_properties.h"
@@ -52,6 +52,15 @@ LOG_DEFINE_CATEGORY(CameraSensorProperties)
const CameraSensorProperties *CameraSensorProperties::get(const std::string &sensor)
{
static const std::map<std::string, const CameraSensorProperties> sensorProps = {
+ { "ar0521", {
+ .unitCellSize = { 2200, 2200 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 1 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ },
+ } },
{ "hi846", {
.unitCellSize = { 1120, 1120 },
.testPatternModes = {
@@ -90,6 +99,10 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
{ controls::draft::TestPatternModePn9, 4 },
},
} },
+ { "imx283", {
+ .unitCellSize = { 2400, 2400 },
+ .testPatternModes = {},
+ } },
{ "imx290", {
.unitCellSize = { 2900, 2900 },
.testPatternModes = {},
@@ -98,10 +111,58 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
.unitCellSize = { 3450, 3450 },
.testPatternModes = {},
} },
+ { "imx327", {
+ .unitCellSize = { 2900, 2900 },
+ .testPatternModes = {},
+ } },
+ { "imx335", {
+ .unitCellSize = { 2000, 2000 },
+ .testPatternModes = {},
+ } },
+ { "imx415", {
+ .unitCellSize = { 1450, 1450 },
+ .testPatternModes = {},
+ } },
{ "imx477", {
.unitCellSize = { 1550, 1550 },
.testPatternModes = {},
} },
+ { "imx519", {
+ .unitCellSize = { 1220, 1220 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModePn9, 4 },
+ /*
+ * The driver reports ColorBars and ColorBarsFadeToGray as well but
+ * these two patterns do not comply with MIPI CCS v1.1 (Section 10.1).
+ */
+ },
+ } },
+ { "imx708", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ },
+ } },
+ { "ov2685", {
+ .unitCellSize = { 1750, 1750 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1},
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
+ /*
+ * No corresponding test pattern mode for:
+ * 3: "Random Data"
+ * 4: "Black White Square"
+ * 5: "Color Square"
+ */
+ },
+ } },
{ "ov2740", {
.unitCellSize = { 1400, 1400 },
.testPatternModes = {
@@ -109,6 +170,19 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
{ controls::draft::TestPatternModeColorBars, 1},
},
} },
+ { "ov4689", {
+ .unitCellSize = { 2000, 2000 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1},
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2},
+ /*
+ * No corresponding test patterns in
+ * MIPI CCS specification for sensor's
+ * colorBarType2 and colorBarType3.
+ */
+ },
+ } },
{ "ov5640", {
.unitCellSize = { 1400, 1400 },
.testPatternModes = {
@@ -146,6 +220,32 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
*/
},
} },
+ { "ov64a40", {
+ .unitCellSize = { 1008, 1008 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
+ /*
+ * No corresponding test patter mode
+ * 3: "Vertical Color Bar Type 3",
+ * 4: "Vertical Color Bar Type 4"
+ */
+ },
+ } },
+ { "ov8858", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
+ /*
+ * No corresponding test patter mode
+ * 3: "Vertical Color Bar Type 3",
+ * 4: "Vertical Color Bar Type 4"
+ */
+ },
+ } },
{ "ov8865", {
.unitCellSize = { 1400, 1400 },
.testPatternModes = {
diff --git a/src/libcamera/sensor/meson.build b/src/libcamera/sensor/meson.build
new file mode 100644
index 00000000..bf4b131a
--- /dev/null
+++ b/src/libcamera/sensor/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_sources += files([
+ 'camera_sensor.cpp',
+ 'camera_sensor_properties.cpp',
+])
diff --git a/src/libcamera/shared_mem_object.cpp b/src/libcamera/shared_mem_object.cpp
new file mode 100644
index 00000000..809fbdaf
--- /dev/null
+++ b/src/libcamera/shared_mem_object.cpp
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ * Copyright (C) 2024 Andrei Konovalov
+ * Copyright (C) 2024 Dennis Bonke
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Helpers for shared memory allocations
+ */
+
+#include "libcamera/internal/shared_mem_object.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+/**
+ * \file shared_mem_object.cpp
+ * \brief Helpers for shared memory allocations
+ */
+
+namespace libcamera {
+
+/**
+ * \class SharedMem
+ * \brief Helper class to allocate and manage memory shareable between processes
+ *
+ * SharedMem manages memory suitable for sharing between processes. When an
+ * instance is constructed, it allocates a memory buffer of the requested size
+ * backed by an anonymous file, using the memfd API.
+ *
+ * The allocated memory is exposed by the mem() function. If memory allocation
+ * fails, the function returns an empty Span. This can be also checked using the
+ * bool() operator.
+ *
+ * The file descriptor for the backing file is exposed as a SharedFD by the fd()
+ * function. It can be shared with other processes across IPC boundaries, which
+ * can then map the memory with mmap().
+ *
+ * A single memfd is created for every SharedMem. If there is a need to allocate
+ * a large number of objects in shared memory, these objects should be grouped
+ * together and use the shared memory allocated by a single SharedMem object if
+ * possible. This will help to minimize the number of created memfd's.
+ */
+
+SharedMem::SharedMem() = default;
+
+/**
+ * \brief Construct a SharedMem with memory of the given \a size
+ * \param[in] name Name of the SharedMem
+ * \param[in] size Size of the shared memory to allocate and map
+ *
+ * The \a name is used for debugging purpose only. Multiple SharedMem instances
+ * can have the same name.
+ */
+SharedMem::SharedMem(const std::string &name, std::size_t size)
+{
+#if HAVE_MEMFD_CREATE
+ int fd = memfd_create(name.c_str(), MFD_CLOEXEC);
+#else
+ int fd = syscall(SYS_memfd_create, name.c_str(), MFD_CLOEXEC);
+#endif
+ if (fd < 0)
+ return;
+
+ fd_ = SharedFD(std::move(fd));
+ if (!fd_.isValid())
+ return;
+
+ if (ftruncate(fd_.get(), size) < 0) {
+ fd_ = SharedFD();
+ return;
+ }
+
+ void *mem = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd_.get(), 0);
+ if (mem == MAP_FAILED) {
+ fd_ = SharedFD();
+ return;
+ }
+
+ mem_ = { static_cast<uint8_t *>(mem), size };
+}
+
+/**
+ * \brief Move constructor for SharedMem
+ * \param[in] rhs The object to move
+ */
+SharedMem::SharedMem(SharedMem &&rhs)
+{
+ this->fd_ = std::move(rhs.fd_);
+ this->mem_ = rhs.mem_;
+ rhs.mem_ = {};
+}
+
+/**
+ * \brief Destroy the SharedMem instance
+ *
+ * Destroying an instance invalidates the memory mapping exposed with mem().
+ * Other mappings of the backing file, created in this or other processes with
+ * mmap(), remain valid.
+ *
+ * Similarly, other references to the backing file descriptor created by copying
+ * the SharedFD returned by fd() remain valid. The underlying memory will be
+ * freed only when all file descriptors that reference the anonymous file get
+ * closed.
+ */
+SharedMem::~SharedMem()
+{
+ if (!mem_.empty())
+ munmap(mem_.data(), mem_.size_bytes());
+}
+
+/**
+ * \brief Move assignment operator for SharedMem
+ * \param[in] rhs The object to move
+ */
+SharedMem &SharedMem::operator=(SharedMem &&rhs)
+{
+ this->fd_ = std::move(rhs.fd_);
+ this->mem_ = rhs.mem_;
+ rhs.mem_ = {};
+ return *this;
+}
+
+/**
+ * \fn const SharedFD &SharedMem::fd() const
+ * \brief Retrieve the file descriptor for the underlying shared memory
+ * \return The file descriptor, or an invalid SharedFD if allocation failed
+ */
+
+/**
+ * \fn Span<uint8_t> SharedMem::mem() const
+ * \brief Retrieve the underlying shared memory
+ * \return The memory buffer, or an empty Span if allocation failed
+ */
+
+/**
+ * \fn SharedMem::operator bool()
+ * \brief Check if the shared memory allocation succeeded
+ * \return True if allocation of the shared memory succeeded, false otherwise
+ */
+
+/**
+ * \class SharedMemObject
+ * \brief Helper class to allocate an object in shareable memory
+ * \tparam The object type
+ *
+ * The SharedMemObject class is a specialization of the SharedMem class that
+ * wraps an object of type \a T and constructs it in shareable memory. It uses
+ * the same underlying memory allocation and sharing mechanism as the SharedMem
+ * class.
+ *
+ * The wrapped object is constructed at the same time as the SharedMemObject
+ * instance, by forwarding the arguments passed to the SharedMemObject
+ * constructor. The underlying memory allocation is sized to the object \a T
+ * size. The bool() operator should be used to check the allocation was
+ * successful. The object can be accessed using the dereference operators
+ * operator*() and operator->().
+ *
+ * While no restriction on the type \a T is enforced, not all types are suitable
+ * for sharing between multiple processes. Most notably, any object type that
+ * contains pointer or reference members will likely cause issues. Even if those
+ * members refer to other members of the same object, the shared memory will be
+ * mapped at different addresses in different processes, and the pointers will
+ * not be valid.
+ *
+ * A new anonymous file is created for every SharedMemObject instance. If there
+ * is a need to share a large number of small objects, these objects should be
+ * grouped into a single larger object to limit the number of file descriptors.
+ *
+ * To share the object with other processes, see the SharedMem documentation.
+ */
+
+/**
+ * \var SharedMemObject::kSize
+ * \brief The size of the object stored in shared memory
+ */
+
+/**
+ * \fn SharedMemObject::SharedMemObject(const std::string &name, Args &&...args)
+ * \brief Construct a SharedMemObject
+ * \param[in] name Name of the SharedMemObject
+ * \param[in] args Arguments to pass to the constructor of the object T
+ *
+ * The \a name is used for debugging purpose only. Multiple SharedMem instances
+ * can have the same name.
+ */
+
+/**
+ * \fn SharedMemObject::SharedMemObject(SharedMemObject<T> &&rhs)
+ * \brief Move constructor for SharedMemObject
+ * \param[in] rhs The object to move
+ */
+
+/**
+ * \fn SharedMemObject::~SharedMemObject()
+ * \brief Destroy the SharedMemObject instance
+ *
+ * Destroying a SharedMemObject calls the wrapped T object's destructor. While
+ * the underlying memory may not be freed immediately if other mappings have
+ * been created manually (see SharedMem::~SharedMem() for more information), the
+ * stored object may be modified. Depending on the ~T() destructor, accessing
+ * the object after destruction of the SharedMemObject causes undefined
+ * behaviour. It is the responsibility of the user of this class to synchronize
+ * with other users who have access to the shared object.
+ */
+
+/**
+ * \fn SharedMemObject::operator=(SharedMemObject<T> &&rhs)
+ * \brief Move assignment operator for SharedMemObject
+ * \param[in] rhs The SharedMemObject object to take the data from
+ *
+ * Moving a SharedMemObject does not affect the stored object.
+ */
+
+/**
+ * \fn SharedMemObject::operator->()
+ * \brief Dereference the stored object
+ * \return Pointer to the stored object
+ */
+
+/**
+ * \fn const T *SharedMemObject::operator->() const
+ * \copydoc SharedMemObject::operator->
+ */
+
+/**
+ * \fn SharedMemObject::operator*()
+ * \brief Dereference the stored object
+ * \return Reference to the stored object
+ */
+
+/**
+ * \fn const T &SharedMemObject::operator*() const
+ * \copydoc SharedMemObject::operator*
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/TODO b/src/libcamera/software_isp/TODO
new file mode 100644
index 00000000..4fcee39b
--- /dev/null
+++ b/src/libcamera/software_isp/TODO
@@ -0,0 +1,279 @@
+1. Setting F_SEAL_SHRINK and F_SEAL_GROW after ftruncate()
+
+>> SharedMem::SharedMem(const std::string &name, std::size_t size)
+>> : name_(name), size_(size), mem_(nullptr)
+>>
+>> ...
+>>
+>> if (ftruncate(fd_.get(), size_) < 0)
+>> return;
+>
+> Should we set the GROW and SHRINK seals (in a separate patch) ?
+
+Yes, this can be done.
+Setting F_SEAL_SHRINK and F_SEAL_GROW after the ftruncate() call above could catch
+some potential errors related to improper access to the shared memory allocated by
+the SharedMemObject.
+
+---
+
+2. Reconsider stats sharing
+
+>>> +void SwStatsCpu::finishFrame(void)
+>>> +{
+>>> + *sharedStats_ = stats_;
+>>
+>> Is it more efficient to copy the stats instead of operating directly on
+>> the shared memory ?
+>
+> I inherited doing things this way from Andrey. I kept this because
+> we don't really have any synchronization with the IPA reading this.
+>
+> So the idea is to only touch this when the next set of statistics
+> is ready since we don't know when the IPA is done with accessing
+> the previous set of statistics ...
+>
+> This is both something which seems mostly a theoretic problem,
+> yet also definitely something which I think we need to fix.
+>
+> Maybe use a ringbuffer of stats buffers and pass the index into
+> the ringbuffer to the emit signal ?
+
+That would match how we deal with hardware ISPs, and I think that's a
+good idea. It will help decoupling the processing side from the IPA.
+
+---
+
+3. Remove statsReady signal
+
+> class SwStatsCpu
+> {
+> /**
+> * \brief Signals that the statistics are ready
+> */
+> Signal<> statsReady;
+
+But better, I wonder if the signal could be dropped completely. The
+SwStatsCpu class does not operate asynchronously. Shouldn't whoever
+calls the finishFrame() function then handle emitting the signal ?
+
+Now, the trouble is that this would be the DebayerCpu class, whose name
+doesn't indicate as a prime candidate to handle stats. However, it
+already exposes a getStatsFD() function, so we're already calling for
+trouble :-) Either that should be moved to somewhere else, or the class
+should be renamed. Considering that the class applies colour gains in
+addition to performing the interpolation, it may be more of a naming
+issue.
+
+Removing the signal and refactoring those classes doesn't have to be
+addressed now, I think it would be part of a larger refactoring
+(possibly also considering platforms that have no ISP but can produce
+stats in hardware, such as the i.MX7), but please keep it on your radar.
+
+---
+
+4. Hide internal representation of gains from callers
+
+> struct DebayerParams {
+> static constexpr unsigned int kGain10 = 256;
+
+Forcing the caller to deal with the internal representation of gains
+isn't nice, especially given that it precludes implementing gains of
+different precisions in different backend. Wouldn't it be better to pass
+the values as floating point numbers, and convert them to the internal
+representation in the implementation of process() before using them ?
+
+---
+
+5. Store ISP parameters in per-frame buffers
+
+> /**
+> * \fn void Debayer::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+> * \brief Process the bayer data into the requested format.
+> * \param[in] input The input buffer.
+> * \param[in] output The output buffer.
+> * \param[in] params The parameters to be used in debayering.
+> *
+> * \note DebayerParams is passed by value deliberately so that a copy is passed
+> * when this is run in another thread by invokeMethod().
+> */
+
+Possibly something to address later, by storing ISP parameters in
+per-frame buffers like we do for hardware ISPs.
+
+---
+
+6. Input buffer copying configuration
+
+> DebayerCpu::DebayerCpu(std::unique_ptr<SwStatsCpu> stats)
+> : stats_(std::move(stats)), gammaCorrection_(1.0)
+> {
+> enableInputMemcpy_ = true;
+
+Set this appropriately and/or make it configurable.
+
+---
+
+7. Performance measurement configuration
+
+> void DebayerCpu::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+> /* Measure before emitting signals */
+> if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure &&
+> ++measuredFrames_ > DebayerCpu::kFramesToSkip) {
+> timespec frameEndTime = {};
+> clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime);
+> frameProcessTime_ += timeDiff(frameEndTime, frameStartTime);
+> if (measuredFrames_ == DebayerCpu::kLastFrameToMeasure) {
+> const unsigned int measuredFrames = DebayerCpu::kLastFrameToMeasure -
+> DebayerCpu::kFramesToSkip;
+> LOG(Debayer, Info)
+> << "Processed " << measuredFrames
+> << " frames in " << frameProcessTime_ / 1000 << "us, "
+> << frameProcessTime_ / (1000 * measuredFrames)
+> << " us/frame";
+> }
+> }
+
+I wonder if there would be a way to control at runtime when/how to
+perform those measurements. Maybe that's a bit overkill.
+
+---
+
+8. DebayerCpu cleanups
+
+> >> class DebayerCpu : public Debayer, public Object
+> >> const SharedFD &getStatsFD() { return stats_->getStatsFD(); }
+> >
+> > This,
+>
+> Note the statistics pass-through stuff is sort of a necessary evil
+> since we want one main loop going over the data line by line and
+> doing both debayering as well as stats while the line is still
+> hot in the l2 cache. And things like the process2() and process4()
+> loops are highly CPU debayering specific so I don't think we should
+> move those out of the CpuDebayer code.
+
+Yes, that I understood from the review. "necessary evil" is indeed the
+right term :-) I expect it will take quite some design skills to balance
+the need for performances and the need for a maintainable architecture.
+
+> > plus the fact that this class handles colour gains and gamma,
+> > makes me thing we have either a naming issue, or an architecture issue.
+>
+> I agree that this does a bit more then debayering, although
+> the debayering really is the main thing it does.
+>
+> I guess the calculation of the rgb lookup tables which do the
+> color gains and gamma could be moved outside of this class,
+> that might even be beneficial for GPU based debayering assuming
+> that that is going to use rgb lookup tables too (it could
+> implement actual color gains + gamma correction in some different
+> way).
+>
+> I think this falls under the lets wait until we have a GPU
+> based SoftISP MVP/POC and then do some refactoring to see which
+> bits should go where.
+
+---
+
+8. Decouple pipeline and IPA naming
+
+> The current src/ipa/meson.build assumes the IPA name to match the
+> pipeline name. For this reason "-Dipas=simple" is used for the
+> Soft IPA module.
+
+This should be addressed.
+
+---
+
+9. Doxyfile cleanup
+
+>> diff --git a/Documentation/Doxyfile.in b/Documentation/Doxyfile.in
+>> index a86ea6c1..2be8d47b 100644
+>> --- a/Documentation/Doxyfile.in
+>> +++ b/Documentation/Doxyfile.in
+>> @@ -44,6 +44,7 @@ EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
+>> @TOP_SRCDIR@/src/libcamera/pipeline/ \
+>> @TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
+>> @TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
+>> + @TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \
+> Why is this needed ?
+>
+>> @TOP_BUILDDIR@/src/libcamera/proxy/
+>> EXCLUDE_PATTERNS = @TOP_BUILDDIR@/include/libcamera/ipa/*_serializer.h \
+>> diff --git a/include/libcamera/ipa/meson.build b/include/libcamera/ipa/meson.build
+>> index f3b4881c..3352d08f 100644
+>> --- a/include/libcamera/ipa/meson.build
+>> +++ b/include/libcamera/ipa/meson.build
+>> @@ -65,6 +65,7 @@ pipeline_ipa_mojom_mapping = {
+>> 'ipu3': 'ipu3.mojom',
+>> 'rkisp1': 'rkisp1.mojom',
+>> 'rpi/vc4': 'raspberrypi.mojom',
+>> + 'simple': 'soft.mojom',
+>> 'vimc': 'vimc.mojom',
+>> }
+>> diff --git a/include/libcamera/ipa/soft.mojom b/include/libcamera/ipa/soft.mojom
+>> new file mode 100644
+>> index 00000000..c249bd75
+>> --- /dev/null
+>> +++ b/include/libcamera/ipa/soft.mojom
+>> @@ -0,0 +1,28 @@
+>> +/* SPDX-License-Identifier: LGPL-2.1-or-later */
+>> +
+>> +/*
+>> + * \todo Document the interface and remove the related EXCLUDE_PATTERNS entry.
+> Ah that's why.
+
+Yes, because, well... all the other IPAs were doing that...
+
+> It doesn't have to be done before merging, but could you
+> address this sooner than later ?
+
+---
+
+10. Switch to libipa/algorithm.h API in processStats
+
+>> void IPASoftSimple::processStats(const ControlList &sensorControls)
+>>
+> Do you envision switching to the libipa/algorithm.h API at some point ?
+
+At some point, yes.
+
+---
+
+11. Improve handling the sensor controls which take effect with a delay
+
+> void IPASoftSimple::processStats(const ControlList &sensorControls)
+> {
+> ...
+> /*
+> * AE / AGC, use 2 frames delay to make sure that the exposure and
+> * the gain set have applied to the camera sensor.
+> */
+> if (ignore_updates_ > 0) {
+> --ignore_updates_;
+> return;
+> }
+
+This could be handled better with DelayedControls.
+
+---
+
+12. Use DelayedControls class in ispStatsReady()
+
+> void SimpleCameraData::ispStatsReady()
+> {
+> swIsp_->processStats(sensor_->getControls({ V4L2_CID_ANALOGUE_GAIN,
+> V4L2_CID_EXPOSURE }));
+
+You should use the DelayedControls class.
+
+---
+
+13. Improve black level and colour gains application
+
+I think the black level should eventually be moved before debayering, and
+ideally the colour gains as well. I understand the need for optimizations to
+lower the CPU consumption, but at the same time I don't feel comfortable
+building up on top of an implementation that may work a bit more by chance than
+by correctness, as that's not very maintainable.
diff --git a/src/libcamera/software_isp/debayer.cpp b/src/libcamera/software_isp/debayer.cpp
new file mode 100644
index 00000000..efe75ea8
--- /dev/null
+++ b/src/libcamera/software_isp/debayer.cpp
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayer base class
+ */
+
+#include "debayer.h"
+
+namespace libcamera {
+
+/**
+ * \struct DebayerParams
+ * \brief Struct to hold the debayer parameters.
+ */
+
+/**
+ * \var DebayerParams::kGain10
+ * \brief const value for 1.0 gain
+ */
+
+/**
+ * \var DebayerParams::gainR
+ * \brief Red gain
+ *
+ * 128 = 0.5, 256 = 1.0, 512 = 2.0, etc.
+ */
+
+/**
+ * \var DebayerParams::gainG
+ * \brief Green gain
+ *
+ * 128 = 0.5, 256 = 1.0, 512 = 2.0, etc.
+ */
+
+/**
+ * \var DebayerParams::gainB
+ * \brief Blue gain
+ *
+ * 128 = 0.5, 256 = 1.0, 512 = 2.0, etc.
+ */
+
+/**
+ * \var DebayerParams::gamma
+ * \brief Gamma correction, 1.0 is no correction
+ */
+
+/**
+ * \class Debayer
+ * \brief Base debayering class
+ *
+ * Base class that provides functions for setting up the debayering process.
+ */
+
+LOG_DEFINE_CATEGORY(Debayer)
+
+Debayer::~Debayer()
+{
+}
+
+/**
+ * \fn int Debayer::configure(const StreamConfiguration &inputCfg, const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+ * \brief Configure the debayer object according to the passed in parameters.
+ * \param[in] inputCfg The input configuration.
+ * \param[in] outputCfgs The output configurations.
+ *
+ * \return 0 on success, a negative errno on failure.
+ */
+
+/**
+ * \fn Size Debayer::patternSize(PixelFormat inputFormat)
+ * \brief Get the width and height at which the bayer pattern repeats.
+ * \param[in] inputFormat The input format.
+ *
+ * Valid sizes are: 2x2, 4x2 or 4x4.
+ *
+ * \return Pattern size or an empty size for unsupported inputFormats.
+ */
+
+/**
+ * \fn std::vector<PixelFormat> Debayer::formats(PixelFormat inputFormat)
+ * \brief Get the supported output formats.
+ * \param[in] inputFormat The input format.
+ *
+ * \return All supported output formats or an empty vector if there are none.
+ */
+
+/**
+ * \fn std::tuple<unsigned int, unsigned int> Debayer::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+ * \brief Get the stride and the frame size.
+ * \param[in] outputFormat The output format.
+ * \param[in] size The output size.
+ *
+ * \return A tuple of the stride and the frame size, or a tuple with 0,0 if
+ * there is no valid output config.
+ */
+
+/**
+ * \fn void Debayer::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+ * \brief Process the bayer data into the requested format.
+ * \param[in] input The input buffer.
+ * \param[in] output The output buffer.
+ * \param[in] params The parameters to be used in debayering.
+ *
+ * \note DebayerParams is passed by value deliberately so that a copy is passed
+ * when this is run in another thread by invokeMethod().
+ */
+
+/**
+ * \fn virtual SizeRange Debayer::sizes(PixelFormat inputFormat, const Size &inputSize)
+ * \brief Get the supported output sizes for the given input format and size.
+ * \param[in] inputFormat The input format.
+ * \param[in] inputSize The input size.
+ *
+ * \return The valid size ranges or an empty range if there are none.
+ */
+
+/**
+ * \var Signal<FrameBuffer *> Debayer::inputBufferReady
+ * \brief Signals when the input buffer is ready.
+ */
+
+/**
+ * \var Signal<FrameBuffer *> Debayer::outputBufferReady
+ * \brief Signals when the output buffer is ready.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer.h b/src/libcamera/software_isp/debayer.h
new file mode 100644
index 00000000..c151fe5d
--- /dev/null
+++ b/src/libcamera/software_isp/debayer.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayering base class
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/signal.h>
+
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/software_isp/debayer_params.h"
+
+namespace libcamera {
+
+class FrameBuffer;
+
+LOG_DECLARE_CATEGORY(Debayer)
+
+class Debayer
+{
+public:
+ virtual ~Debayer() = 0;
+
+ virtual int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs) = 0;
+
+ virtual std::vector<PixelFormat> formats(PixelFormat inputFormat) = 0;
+
+ virtual std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size) = 0;
+
+ virtual void process(FrameBuffer *input, FrameBuffer *output, DebayerParams params) = 0;
+
+ virtual SizeRange sizes(PixelFormat inputFormat, const Size &inputSize) = 0;
+
+ Signal<FrameBuffer *> inputBufferReady;
+ Signal<FrameBuffer *> outputBufferReady;
+
+private:
+ virtual Size patternSize(PixelFormat inputFormat) = 0;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer_cpu.cpp b/src/libcamera/software_isp/debayer_cpu.cpp
new file mode 100644
index 00000000..8254bbe9
--- /dev/null
+++ b/src/libcamera/software_isp/debayer_cpu.cpp
@@ -0,0 +1,807 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based debayering class
+ */
+
+#include "debayer_cpu.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <time.h>
+
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+
+namespace libcamera {
+
+/**
+ * \class DebayerCpu
+ * \brief Class for debayering on the CPU
+ *
+ * Implementation for CPU based debayering
+ */
+
+/**
+ * \brief Constructs a DebayerCpu object
+ * \param[in] stats Pointer to the stats object to use
+ */
+DebayerCpu::DebayerCpu(std::unique_ptr<SwStatsCpu> stats)
+ : stats_(std::move(stats)), gammaCorrection_(1.0), blackLevel_(0)
+{
+ /*
+ * Reading from uncached buffers may be very slow.
+ * In such a case, it's better to copy input buffer data to normal memory.
+ * But in case of cached buffers, copying the data is unnecessary overhead.
+ * enable_input_memcpy_ makes this behavior configurable. At the moment, we
+ * always set it to true as the safer choice but this should be changed in
+ * future.
+ */
+ enableInputMemcpy_ = true;
+
+ /* Initialize gamma to 1.0 curve */
+ for (unsigned int i = 0; i < kGammaLookupSize; i++)
+ gamma_[i] = i / (kGammaLookupSize / kRGBLookupSize);
+
+ for (unsigned int i = 0; i < kMaxLineBuffers; i++)
+ lineBuffers_[i] = nullptr;
+}
+
+DebayerCpu::~DebayerCpu()
+{
+ for (unsigned int i = 0; i < kMaxLineBuffers; i++)
+ free(lineBuffers_[i]);
+}
+
+#define DECLARE_SRC_POINTERS(pixel_t) \
+ const pixel_t *prev = (const pixel_t *)src[0] + xShift_; \
+ const pixel_t *curr = (const pixel_t *)src[1] + xShift_; \
+ const pixel_t *next = (const pixel_t *)src[2] + xShift_;
+
+/*
+ * RGR
+ * GBG
+ * RGR
+ */
+#define BGGR_BGR888(p, n, div) \
+ *dst++ = blue_[curr[x] / (div)]; \
+ *dst++ = green_[(prev[x] + curr[x - p] + curr[x + n] + next[x]) / (4 * (div))]; \
+ *dst++ = red_[(prev[x - p] + prev[x + n] + next[x - p] + next[x + n]) / (4 * (div))]; \
+ x++;
+
+/*
+ * GBG
+ * RGR
+ * GBG
+ */
+#define GRBG_BGR888(p, n, div) \
+ *dst++ = blue_[(prev[x] + next[x]) / (2 * (div))]; \
+ *dst++ = green_[curr[x] / (div)]; \
+ *dst++ = red_[(curr[x - p] + curr[x + n]) / (2 * (div))]; \
+ x++;
+
+/*
+ * GRG
+ * BGB
+ * GRG
+ */
+#define GBRG_BGR888(p, n, div) \
+ *dst++ = blue_[(curr[x - p] + curr[x + n]) / (2 * (div))]; \
+ *dst++ = green_[curr[x] / (div)]; \
+ *dst++ = red_[(prev[x] + next[x]) / (2 * (div))]; \
+ x++;
+
+/*
+ * BGB
+ * GRG
+ * BGB
+ */
+#define RGGB_BGR888(p, n, div) \
+ *dst++ = blue_[(prev[x - p] + prev[x + n] + next[x - p] + next[x + n]) / (4 * (div))]; \
+ *dst++ = green_[(prev[x] + curr[x - p] + curr[x + n] + next[x]) / (4 * (div))]; \
+ *dst++ = red_[curr[x] / (div)]; \
+ x++;
+
+void DebayerCpu::debayer8_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint8_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ BGGR_BGR888(1, 1, 1)
+ GBRG_BGR888(1, 1, 1)
+ }
+}
+
+void DebayerCpu::debayer8_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint8_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ GRBG_BGR888(1, 1, 1)
+ RGGB_BGR888(1, 1, 1)
+ }
+}
+
+void DebayerCpu::debayer10_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 4 for 10 -> 8 bpp value */
+ BGGR_BGR888(1, 1, 4)
+ GBRG_BGR888(1, 1, 4)
+ }
+}
+
+void DebayerCpu::debayer10_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 4 for 10 -> 8 bpp value */
+ GRBG_BGR888(1, 1, 4)
+ RGGB_BGR888(1, 1, 4)
+ }
+}
+
+void DebayerCpu::debayer12_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 16 for 12 -> 8 bpp value */
+ BGGR_BGR888(1, 1, 16)
+ GBRG_BGR888(1, 1, 16)
+ }
+}
+
+void DebayerCpu::debayer12_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 16 for 12 -> 8 bpp value */
+ GRBG_BGR888(1, 1, 16)
+ RGGB_BGR888(1, 1, 16)
+ }
+}
+
+void DebayerCpu::debayer10P_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ /*
+ * For the first pixel getting a pixel from the previous column uses
+ * x - 2 to skip the 5th byte with least-significant bits for 4 pixels.
+ * Same for last pixel (uses x + 2) and looking at the next column.
+ */
+ for (int x = 0; x < widthInBytes;) {
+ /* First pixel */
+ BGGR_BGR888(2, 1, 1)
+ /* Second pixel BGGR -> GBRG */
+ GBRG_BGR888(1, 1, 1)
+ /* Same thing for third and fourth pixels */
+ BGGR_BGR888(1, 1, 1)
+ GBRG_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+void DebayerCpu::debayer10P_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* First pixel */
+ GRBG_BGR888(2, 1, 1)
+ /* Second pixel GRBG -> RGGB */
+ RGGB_BGR888(1, 1, 1)
+ /* Same thing for third and fourth pixels */
+ GRBG_BGR888(1, 1, 1)
+ RGGB_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+void DebayerCpu::debayer10P_GBGB_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* Even pixel */
+ GBRG_BGR888(2, 1, 1)
+ /* Odd pixel GBGR -> BGGR */
+ BGGR_BGR888(1, 1, 1)
+ /* Same thing for next 2 pixels */
+ GBRG_BGR888(1, 1, 1)
+ BGGR_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+void DebayerCpu::debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* Even pixel */
+ RGGB_BGR888(2, 1, 1)
+ /* Odd pixel RGGB -> GRBG */
+ GRBG_BGR888(1, 1, 1)
+ /* Same thing for next 2 pixels */
+ RGGB_BGR888(1, 1, 1)
+ GRBG_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+static bool isStandardBayerOrder(BayerFormat::Order order)
+{
+ return order == BayerFormat::BGGR || order == BayerFormat::GBRG ||
+ order == BayerFormat::GRBG || order == BayerFormat::RGGB;
+}
+
+/*
+ * Setup the Debayer object according to the passed in parameters.
+ * Return 0 on success, a negative errno value on failure
+ * (unsupported parameters).
+ */
+int DebayerCpu::getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputFormat);
+
+ if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10 || bayerFormat.bitDepth == 12) &&
+ bayerFormat.packing == BayerFormat::Packing::None &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ config.bpp = (bayerFormat.bitDepth + 7) & ~7;
+ config.patternSize.width = 2;
+ config.patternSize.height = 2;
+ config.outputFormats = std::vector<PixelFormat>({ formats::RGB888, formats::BGR888 });
+ return 0;
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2 &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ config.bpp = 10;
+ config.patternSize.width = 4; /* 5 bytes per *4* pixels */
+ config.patternSize.height = 2;
+ config.outputFormats = std::vector<PixelFormat>({ formats::RGB888, formats::BGR888 });
+ return 0;
+ }
+
+ LOG(Debayer, Info)
+ << "Unsupported input format " << inputFormat.toString();
+ return -EINVAL;
+}
+
+int DebayerCpu::getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config)
+{
+ if (outputFormat == formats::RGB888 || outputFormat == formats::BGR888) {
+ config.bpp = 24;
+ return 0;
+ }
+
+ LOG(Debayer, Info)
+ << "Unsupported output format " << outputFormat.toString();
+ return -EINVAL;
+}
+
+/*
+ * Check for standard Bayer orders and set xShift_ and swap debayer0/1, so that
+ * a single pair of BGGR debayer functions can be used for all 4 standard orders.
+ */
+int DebayerCpu::setupStandardBayerOrder(BayerFormat::Order order)
+{
+ switch (order) {
+ case BayerFormat::BGGR:
+ break;
+ case BayerFormat::GBRG:
+ xShift_ = 1; /* BGGR -> GBRG */
+ break;
+ case BayerFormat::GRBG:
+ std::swap(debayer0_, debayer1_); /* BGGR -> GRBG */
+ break;
+ case BayerFormat::RGGB:
+ xShift_ = 1; /* BGGR -> GBRG */
+ std::swap(debayer0_, debayer1_); /* GBRG -> RGGB */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int DebayerCpu::setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputFormat);
+
+ xShift_ = 0;
+ swapRedBlueGains_ = false;
+
+ auto invalidFmt = []() -> int {
+ LOG(Debayer, Error) << "Unsupported input output format combination";
+ return -EINVAL;
+ };
+
+ switch (outputFormat) {
+ case formats::RGB888:
+ break;
+ case formats::BGR888:
+ /* Swap R and B in bayer order to generate BGR888 instead of RGB888 */
+ swapRedBlueGains_ = true;
+
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ bayerFormat.order = BayerFormat::RGGB;
+ break;
+ case BayerFormat::GBRG:
+ bayerFormat.order = BayerFormat::GRBG;
+ break;
+ case BayerFormat::GRBG:
+ bayerFormat.order = BayerFormat::GBRG;
+ break;
+ case BayerFormat::RGGB:
+ bayerFormat.order = BayerFormat::BGGR;
+ break;
+ default:
+ return invalidFmt();
+ }
+ break;
+ default:
+ return invalidFmt();
+ }
+
+ if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10 || bayerFormat.bitDepth == 12) &&
+ bayerFormat.packing == BayerFormat::Packing::None &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ switch (bayerFormat.bitDepth) {
+ case 8:
+ debayer0_ = &DebayerCpu::debayer8_BGBG_BGR888;
+ debayer1_ = &DebayerCpu::debayer8_GRGR_BGR888;
+ break;
+ case 10:
+ debayer0_ = &DebayerCpu::debayer10_BGBG_BGR888;
+ debayer1_ = &DebayerCpu::debayer10_GRGR_BGR888;
+ break;
+ case 12:
+ debayer0_ = &DebayerCpu::debayer12_BGBG_BGR888;
+ debayer1_ = &DebayerCpu::debayer12_GRGR_BGR888;
+ break;
+ }
+ setupStandardBayerOrder(bayerFormat.order);
+ return 0;
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2) {
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ debayer0_ = &DebayerCpu::debayer10P_BGBG_BGR888;
+ debayer1_ = &DebayerCpu::debayer10P_GRGR_BGR888;
+ return 0;
+ case BayerFormat::GBRG:
+ debayer0_ = &DebayerCpu::debayer10P_GBGB_BGR888;
+ debayer1_ = &DebayerCpu::debayer10P_RGRG_BGR888;
+ return 0;
+ case BayerFormat::GRBG:
+ debayer0_ = &DebayerCpu::debayer10P_GRGR_BGR888;
+ debayer1_ = &DebayerCpu::debayer10P_BGBG_BGR888;
+ return 0;
+ case BayerFormat::RGGB:
+ debayer0_ = &DebayerCpu::debayer10P_RGRG_BGR888;
+ debayer1_ = &DebayerCpu::debayer10P_GBGB_BGR888;
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ return invalidFmt();
+}
+
+int DebayerCpu::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+{
+ if (getInputConfig(inputCfg.pixelFormat, inputConfig_) != 0)
+ return -EINVAL;
+
+ if (stats_->configure(inputCfg) != 0)
+ return -EINVAL;
+
+ const Size &statsPatternSize = stats_->patternSize();
+ if (inputConfig_.patternSize.width != statsPatternSize.width ||
+ inputConfig_.patternSize.height != statsPatternSize.height) {
+ LOG(Debayer, Error)
+ << "mismatching stats and debayer pattern sizes for "
+ << inputCfg.pixelFormat.toString();
+ return -EINVAL;
+ }
+
+ inputConfig_.stride = inputCfg.stride;
+
+ if (outputCfgs.size() != 1) {
+ LOG(Debayer, Error)
+ << "Unsupported number of output streams: "
+ << outputCfgs.size();
+ return -EINVAL;
+ }
+
+ const StreamConfiguration &outputCfg = outputCfgs[0];
+ SizeRange outSizeRange = sizes(inputCfg.pixelFormat, inputCfg.size);
+ std::tie(outputConfig_.stride, outputConfig_.frameSize) =
+ strideAndFrameSize(outputCfg.pixelFormat, outputCfg.size);
+
+ if (!outSizeRange.contains(outputCfg.size) || outputConfig_.stride != outputCfg.stride) {
+ LOG(Debayer, Error)
+ << "Invalid output size/stride: "
+ << "\n " << outputCfg.size << " (" << outSizeRange << ")"
+ << "\n " << outputCfg.stride << " (" << outputConfig_.stride << ")";
+ return -EINVAL;
+ }
+
+ if (setDebayerFunctions(inputCfg.pixelFormat, outputCfg.pixelFormat) != 0)
+ return -EINVAL;
+
+ window_.x = ((inputCfg.size.width - outputCfg.size.width) / 2) &
+ ~(inputConfig_.patternSize.width - 1);
+ window_.y = ((inputCfg.size.height - outputCfg.size.height) / 2) &
+ ~(inputConfig_.patternSize.height - 1);
+ window_.width = outputCfg.size.width;
+ window_.height = outputCfg.size.height;
+
+ /* Don't pass x,y since process() already adjusts src before passing it */
+ stats_->setWindow(Rectangle(window_.size()));
+
+ /* pad with patternSize.Width on both left and right side */
+ lineBufferPadding_ = inputConfig_.patternSize.width * inputConfig_.bpp / 8;
+ lineBufferLength_ = window_.width * inputConfig_.bpp / 8 +
+ 2 * lineBufferPadding_;
+ for (unsigned int i = 0;
+ i < (inputConfig_.patternSize.height + 1) && enableInputMemcpy_;
+ i++) {
+ free(lineBuffers_[i]);
+ lineBuffers_[i] = (uint8_t *)malloc(lineBufferLength_);
+ if (!lineBuffers_[i])
+ return -ENOMEM;
+ }
+
+ measuredFrames_ = 0;
+ frameProcessTime_ = 0;
+
+ return 0;
+}
+
+/*
+ * Get width and height at which the bayer-pattern repeats.
+ * Return pattern-size or an empty Size for an unsupported inputFormat.
+ */
+Size DebayerCpu::patternSize(PixelFormat inputFormat)
+{
+ DebayerCpu::DebayerInputConfig config;
+
+ if (getInputConfig(inputFormat, config) != 0)
+ return {};
+
+ return config.patternSize;
+}
+
+std::vector<PixelFormat> DebayerCpu::formats(PixelFormat inputFormat)
+{
+ DebayerCpu::DebayerInputConfig config;
+
+ if (getInputConfig(inputFormat, config) != 0)
+ return std::vector<PixelFormat>();
+
+ return config.outputFormats;
+}
+
+std::tuple<unsigned int, unsigned int>
+DebayerCpu::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+{
+ DebayerCpu::DebayerOutputConfig config;
+
+ if (getOutputConfig(outputFormat, config) != 0)
+ return std::make_tuple(0, 0);
+
+ /* round up to multiple of 8 for 64 bits alignment */
+ unsigned int stride = (size.width * config.bpp / 8 + 7) & ~7;
+
+ return std::make_tuple(stride, stride * size.height);
+}
+
+void DebayerCpu::setupInputMemcpy(const uint8_t *linePointers[])
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ if (!enableInputMemcpy_)
+ return;
+
+ for (unsigned int i = 0; i < patternHeight; i++) {
+ memcpy(lineBuffers_[i], linePointers[i + 1] - lineBufferPadding_,
+ lineBufferLength_);
+ linePointers[i + 1] = lineBuffers_[i] + lineBufferPadding_;
+ }
+
+ /* Point lineBufferIndex_ to first unused lineBuffer */
+ lineBufferIndex_ = patternHeight;
+}
+
+void DebayerCpu::shiftLinePointers(const uint8_t *linePointers[], const uint8_t *src)
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ for (unsigned int i = 0; i < patternHeight; i++)
+ linePointers[i] = linePointers[i + 1];
+
+ linePointers[patternHeight] = src +
+ (patternHeight / 2) * (int)inputConfig_.stride;
+}
+
+void DebayerCpu::memcpyNextLine(const uint8_t *linePointers[])
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ if (!enableInputMemcpy_)
+ return;
+
+ memcpy(lineBuffers_[lineBufferIndex_], linePointers[patternHeight] - lineBufferPadding_,
+ lineBufferLength_);
+ linePointers[patternHeight] = lineBuffers_[lineBufferIndex_] + lineBufferPadding_;
+
+ lineBufferIndex_ = (lineBufferIndex_ + 1) % (patternHeight + 1);
+}
+
+void DebayerCpu::process2(const uint8_t *src, uint8_t *dst)
+{
+ unsigned int yEnd = window_.y + window_.height;
+ /* Holds [0] previous- [1] current- [2] next-line */
+ const uint8_t *linePointers[3];
+
+ /* Adjust src to top left corner of the window */
+ src += window_.y * inputConfig_.stride + window_.x * inputConfig_.bpp / 8;
+
+ /* [x] becomes [x - 1] after initial shiftLinePointers() call */
+ if (window_.y) {
+ linePointers[1] = src - inputConfig_.stride; /* previous-line */
+ linePointers[2] = src;
+ } else {
+ /* window_.y == 0, use the next line as prev line */
+ linePointers[1] = src + inputConfig_.stride;
+ linePointers[2] = src;
+ /* Last 2 lines also need special handling */
+ yEnd -= 2;
+ }
+
+ setupInputMemcpy(linePointers);
+
+ for (unsigned int y = window_.y; y < yEnd; y += 2) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(y, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+
+ if (window_.y == 0) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(yEnd, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ /* next line may point outside of src, use prev. */
+ linePointers[2] = linePointers[0];
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+}
+
+void DebayerCpu::process4(const uint8_t *src, uint8_t *dst)
+{
+ const unsigned int yEnd = window_.y + window_.height;
+ /*
+ * This holds pointers to [0] 2-lines-up [1] 1-line-up [2] current-line
+ * [3] 1-line-down [4] 2-lines-down.
+ */
+ const uint8_t *linePointers[5];
+
+ /* Adjust src to top left corner of the window */
+ src += window_.y * inputConfig_.stride + window_.x * inputConfig_.bpp / 8;
+
+ /* [x] becomes [x - 1] after initial shiftLinePointers() call */
+ linePointers[1] = src - 2 * inputConfig_.stride;
+ linePointers[2] = src - inputConfig_.stride;
+ linePointers[3] = src;
+ linePointers[4] = src + inputConfig_.stride;
+
+ setupInputMemcpy(linePointers);
+
+ for (unsigned int y = window_.y; y < yEnd; y += 4) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(y, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine2(y, linePointers);
+ (this->*debayer2_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer3_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+}
+
+static inline int64_t timeDiff(timespec &after, timespec &before)
+{
+ return (after.tv_sec - before.tv_sec) * 1000000000LL +
+ (int64_t)after.tv_nsec - (int64_t)before.tv_nsec;
+}
+
+void DebayerCpu::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+{
+ timespec frameStartTime;
+
+ if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure) {
+ frameStartTime = {};
+ clock_gettime(CLOCK_MONOTONIC_RAW, &frameStartTime);
+ }
+
+ /* Apply DebayerParams */
+ if (params.gamma != gammaCorrection_ || params.blackLevel != blackLevel_) {
+ const unsigned int blackIndex =
+ params.blackLevel * kGammaLookupSize / 256;
+ std::fill(gamma_.begin(), gamma_.begin() + blackIndex, 0);
+ const float divisor = kGammaLookupSize - blackIndex - 1.0;
+ for (unsigned int i = blackIndex; i < kGammaLookupSize; i++)
+ gamma_[i] = UINT8_MAX * powf((i - blackIndex) / divisor, params.gamma);
+
+ gammaCorrection_ = params.gamma;
+ blackLevel_ = params.blackLevel;
+ }
+
+ if (swapRedBlueGains_)
+ std::swap(params.gainR, params.gainB);
+
+ for (unsigned int i = 0; i < kRGBLookupSize; i++) {
+ constexpr unsigned int div =
+ kRGBLookupSize * DebayerParams::kGain10 / kGammaLookupSize;
+ unsigned int idx;
+
+ /* Apply gamma after gain! */
+ idx = std::min({ i * params.gainR / div, (kGammaLookupSize - 1) });
+ red_[i] = gamma_[idx];
+
+ idx = std::min({ i * params.gainG / div, (kGammaLookupSize - 1) });
+ green_[i] = gamma_[idx];
+
+ idx = std::min({ i * params.gainB / div, (kGammaLookupSize - 1) });
+ blue_[i] = gamma_[idx];
+ }
+
+ /* Copy metadata from the input buffer */
+ FrameMetadata &metadata = output->_d()->metadata();
+ metadata.status = input->metadata().status;
+ metadata.sequence = input->metadata().sequence;
+ metadata.timestamp = input->metadata().timestamp;
+
+ MappedFrameBuffer in(input, MappedFrameBuffer::MapFlag::Read);
+ MappedFrameBuffer out(output, MappedFrameBuffer::MapFlag::Write);
+ if (!in.isValid() || !out.isValid()) {
+ LOG(Debayer, Error) << "mmap-ing buffer(s) failed";
+ metadata.status = FrameMetadata::FrameError;
+ return;
+ }
+
+ stats_->startFrame();
+
+ if (inputConfig_.patternSize.height == 2)
+ process2(in.planes()[0].data(), out.planes()[0].data());
+ else
+ process4(in.planes()[0].data(), out.planes()[0].data());
+
+ metadata.planes()[0].bytesused = out.planes()[0].size();
+
+ /* Measure before emitting signals */
+ if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure &&
+ ++measuredFrames_ > DebayerCpu::kFramesToSkip) {
+ timespec frameEndTime = {};
+ clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime);
+ frameProcessTime_ += timeDiff(frameEndTime, frameStartTime);
+ if (measuredFrames_ == DebayerCpu::kLastFrameToMeasure) {
+ const unsigned int measuredFrames = DebayerCpu::kLastFrameToMeasure -
+ DebayerCpu::kFramesToSkip;
+ LOG(Debayer, Info)
+ << "Processed " << measuredFrames
+ << " frames in " << frameProcessTime_ / 1000 << "us, "
+ << frameProcessTime_ / (1000 * measuredFrames)
+ << " us/frame";
+ }
+ }
+
+ stats_->finishFrame();
+ outputBufferReady.emit(output);
+ inputBufferReady.emit(input);
+}
+
+SizeRange DebayerCpu::sizes(PixelFormat inputFormat, const Size &inputSize)
+{
+ Size patternSize = this->patternSize(inputFormat);
+ unsigned int borderHeight = patternSize.height;
+
+ if (patternSize.isNull())
+ return {};
+
+ /* No need for top/bottom border with a pattern height of 2 */
+ if (patternSize.height == 2)
+ borderHeight = 0;
+
+ /*
+ * For debayer interpolation a border is kept around the entire image
+ * and the minimum output size is pattern-height x pattern-width.
+ */
+ if (inputSize.width < (3 * patternSize.width) ||
+ inputSize.height < (2 * borderHeight + patternSize.height)) {
+ LOG(Debayer, Warning)
+ << "Input format size too small: " << inputSize.toString();
+ return {};
+ }
+
+ return SizeRange(Size(patternSize.width, patternSize.height),
+ Size((inputSize.width - 2 * patternSize.width) & ~(patternSize.width - 1),
+ (inputSize.height - 2 * borderHeight) & ~(patternSize.height - 1)),
+ patternSize.width, patternSize.height);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer_cpu.h b/src/libcamera/software_isp/debayer_cpu.h
new file mode 100644
index 00000000..de216fe3
--- /dev/null
+++ b/src/libcamera/software_isp/debayer_cpu.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based debayering header
+ */
+
+#pragma once
+
+#include <memory>
+#include <stdint.h>
+#include <vector>
+
+#include <libcamera/base/object.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+#include "debayer.h"
+#include "swstats_cpu.h"
+
+namespace libcamera {
+
+class DebayerCpu : public Debayer, public Object
+{
+public:
+ DebayerCpu(std::unique_ptr<SwStatsCpu> stats);
+ ~DebayerCpu();
+
+ int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs);
+ Size patternSize(PixelFormat inputFormat);
+ std::vector<PixelFormat> formats(PixelFormat input);
+ std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size);
+ void process(FrameBuffer *input, FrameBuffer *output, DebayerParams params);
+ SizeRange sizes(PixelFormat inputFormat, const Size &inputSize);
+
+ /**
+ * \brief Get the file descriptor for the statistics
+ *
+ * \return the file descriptor pointing to the statistics
+ */
+ const SharedFD &getStatsFD() { return stats_->getStatsFD(); }
+
+ /**
+ * \brief Get the output frame size
+ *
+ * \return The output frame size
+ */
+ unsigned int frameSize() { return outputConfig_.frameSize; }
+
+private:
+ /**
+ * \brief Called to debayer 1 line of Bayer input data to output format
+ * \param[out] dst Pointer to the start of the output line to write
+ * \param[in] src The input data
+ *
+ * Input data is an array of (patternSize_.height + 1) src
+ * pointers each pointing to a line in the Bayer source. The middle
+ * element of the array will point to the actual line being processed.
+ * Earlier element(s) will point to the previous line(s) and later
+ * element(s) to the next line(s).
+ *
+ * These functions take an array of src pointers, rather than
+ * a single src pointer + a stride for the source, so that when the src
+ * is slow uncached memory it can be copied to faster memory before
+ * debayering. Debayering a standard 2x2 Bayer pattern requires access
+ * to the previous and next src lines for interpolating the missing
+ * colors. To allow copying the src lines only once 3 temporary buffers
+ * each holding a single line are used, re-using the oldest buffer for
+ * the next line and the pointers are swizzled so that:
+ * src[0] = previous-line, src[1] = currrent-line, src[2] = next-line.
+ * This way the 3 pointers passed to the debayer functions form
+ * a sliding window over the src avoiding the need to copy each
+ * line more than once.
+ *
+ * Similarly for bayer patterns which repeat every 4 lines, 5 src
+ * pointers are passed holding: src[0] = 2-lines-up, src[1] = 1-line-up
+ * src[2] = current-line, src[3] = 1-line-down, src[4] = 2-lines-down.
+ */
+ using debayerFn = void (DebayerCpu::*)(uint8_t *dst, const uint8_t *src[]);
+
+ /* 8-bit raw bayer format */
+ void debayer8_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer8_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* unpacked 10-bit raw bayer format */
+ void debayer10_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer10_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* unpacked 12-bit raw bayer format */
+ void debayer12_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer12_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* CSI-2 packed 10-bit raw bayer format (all the 4 orders) */
+ void debayer10P_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer10P_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer10P_GBGB_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[]);
+
+ struct DebayerInputConfig {
+ Size patternSize;
+ unsigned int bpp; /* Memory used per pixel, not precision */
+ unsigned int stride;
+ std::vector<PixelFormat> outputFormats;
+ };
+
+ struct DebayerOutputConfig {
+ unsigned int bpp; /* Memory used per pixel, not precision */
+ unsigned int stride;
+ unsigned int frameSize;
+ };
+
+ int getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config);
+ int getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config);
+ int setupStandardBayerOrder(BayerFormat::Order order);
+ int setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat);
+ void setupInputMemcpy(const uint8_t *linePointers[]);
+ void shiftLinePointers(const uint8_t *linePointers[], const uint8_t *src);
+ void memcpyNextLine(const uint8_t *linePointers[]);
+ void process2(const uint8_t *src, uint8_t *dst);
+ void process4(const uint8_t *src, uint8_t *dst);
+
+ static constexpr unsigned int kGammaLookupSize = 1024;
+ static constexpr unsigned int kRGBLookupSize = 256;
+ /* Max. supported Bayer pattern height is 4, debayering this requires 5 lines */
+ static constexpr unsigned int kMaxLineBuffers = 5;
+
+ std::array<uint8_t, kGammaLookupSize> gamma_;
+ std::array<uint8_t, kRGBLookupSize> red_;
+ std::array<uint8_t, kRGBLookupSize> green_;
+ std::array<uint8_t, kRGBLookupSize> blue_;
+ debayerFn debayer0_;
+ debayerFn debayer1_;
+ debayerFn debayer2_;
+ debayerFn debayer3_;
+ Rectangle window_;
+ DebayerInputConfig inputConfig_;
+ DebayerOutputConfig outputConfig_;
+ std::unique_ptr<SwStatsCpu> stats_;
+ uint8_t *lineBuffers_[kMaxLineBuffers];
+ unsigned int lineBufferLength_;
+ unsigned int lineBufferPadding_;
+ unsigned int lineBufferIndex_;
+ unsigned int xShift_; /* Offset of 0/1 applied to window_.x */
+ bool enableInputMemcpy_;
+ bool swapRedBlueGains_;
+ float gammaCorrection_;
+ unsigned int blackLevel_;
+ unsigned int measuredFrames_;
+ int64_t frameProcessTime_;
+ /* Skip 30 frames for things to stabilize then measure 30 frames */
+ static constexpr unsigned int kFramesToSkip = 30;
+ static constexpr unsigned int kLastFrameToMeasure = 60;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/meson.build b/src/libcamera/software_isp/meson.build
new file mode 100644
index 00000000..f7c66e28
--- /dev/null
+++ b/src/libcamera/software_isp/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: CC0-1.0
+
+softisp_enabled = pipelines.contains('simple')
+summary({'SoftISP support' : softisp_enabled}, section : 'Configuration')
+
+if not softisp_enabled
+ subdir_done()
+endif
+
+libcamera_sources += files([
+ 'debayer.cpp',
+ 'debayer_cpu.cpp',
+ 'software_isp.cpp',
+ 'swstats_cpu.cpp',
+])
diff --git a/src/libcamera/software_isp/software_isp.cpp b/src/libcamera/software_isp/software_isp.cpp
new file mode 100644
index 00000000..c9b6be56
--- /dev/null
+++ b/src/libcamera/software_isp/software_isp.cpp
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * Simple software ISP implementation
+ */
+
+#include "libcamera/internal/software_isp/software_isp.h"
+
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include "debayer_cpu.h"
+
+/**
+ * \file software_isp.cpp
+ * \brief Simple software ISP implementation
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(SoftwareIsp)
+
+/**
+ * \class SoftwareIsp
+ * \brief Class for the Software ISP
+ */
+
+/**
+ * \var SoftwareIsp::inputBufferReady
+ * \brief A signal emitted when the input frame buffer completes
+ */
+
+/**
+ * \var SoftwareIsp::outputBufferReady
+ * \brief A signal emitted when the output frame buffer completes
+ */
+
+/**
+ * \var SoftwareIsp::ispStatsReady
+ * \brief A signal emitted when the statistics for IPA are ready
+ */
+
+/**
+ * \var SoftwareIsp::setSensorControls
+ * \brief A signal emitted when the values to write to the sensor controls are
+ * ready
+ */
+
+/**
+ * \brief Constructs SoftwareIsp object
+ * \param[in] pipe The pipeline handler in use
+ * \param[in] sensor Pointer to the CameraSensor instance owned by the pipeline
+ * handler
+ */
+SoftwareIsp::SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor)
+ : debayerParams_{ DebayerParams::kGain10, DebayerParams::kGain10,
+ DebayerParams::kGain10, 0.5f, 0 },
+ dmaHeap_(DmaHeap::DmaHeapFlag::Cma | DmaHeap::DmaHeapFlag::System)
+{
+ if (!dmaHeap_.isValid()) {
+ LOG(SoftwareIsp, Error) << "Failed to create DmaHeap object";
+ return;
+ }
+
+ sharedParams_ = SharedMemObject<DebayerParams>("softIsp_params");
+ if (!sharedParams_) {
+ LOG(SoftwareIsp, Error) << "Failed to create shared memory for parameters";
+ return;
+ }
+
+ auto stats = std::make_unique<SwStatsCpu>();
+ if (!stats->isValid()) {
+ LOG(SoftwareIsp, Error) << "Failed to create SwStatsCpu object";
+ return;
+ }
+ stats->statsReady.connect(this, &SoftwareIsp::statsReady);
+
+ debayer_ = std::make_unique<DebayerCpu>(std::move(stats));
+ debayer_->inputBufferReady.connect(this, &SoftwareIsp::inputReady);
+ debayer_->outputBufferReady.connect(this, &SoftwareIsp::outputReady);
+
+ ipa_ = IPAManager::createIPA<ipa::soft::IPAProxySoft>(pipe, 0, 0);
+ if (!ipa_) {
+ LOG(SoftwareIsp, Error)
+ << "Creating IPA for software ISP failed";
+ debayer_.reset();
+ return;
+ }
+
+ /*
+ * The API tuning file is made from the sensor name. If the tuning file
+ * isn't found, fall back to the 'uncalibrated' file.
+ */
+ std::string ipaTuningFile = ipa_->configurationFile(sensor->model() + ".yaml");
+ if (ipaTuningFile.empty())
+ ipaTuningFile = ipa_->configurationFile("uncalibrated.yaml");
+
+ int ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() },
+ debayer_->getStatsFD(),
+ sharedParams_.fd(),
+ sensor->controls());
+ if (ret) {
+ LOG(SoftwareIsp, Error) << "IPA init failed";
+ debayer_.reset();
+ return;
+ }
+
+ ipa_->setIspParams.connect(this, &SoftwareIsp::saveIspParams);
+ ipa_->setSensorControls.connect(this, &SoftwareIsp::setSensorCtrls);
+
+ debayer_->moveToThread(&ispWorkerThread_);
+}
+
+SoftwareIsp::~SoftwareIsp()
+{
+ /* make sure to destroy the DebayerCpu before the ispWorkerThread_ is gone */
+ debayer_.reset();
+}
+
+/**
+ * \fn int SoftwareIsp::loadConfiguration([[maybe_unused]] const std::string &filename)
+ * \brief Load a configuration from a file
+ * \param[in] filename The file to load the configuration data from
+ *
+ * Currently is a stub doing nothing and always returning "success".
+ *
+ * \return 0 on success
+ */
+
+/**
+ * \brief Process the statistics gathered
+ * \param[in] sensorControls The sensor controls
+ *
+ * Requests the IPA to calculate new parameters for ISP and new control
+ * values for the sensor.
+ */
+void SoftwareIsp::processStats(const ControlList &sensorControls)
+{
+ ASSERT(ipa_);
+ ipa_->processStats(sensorControls);
+}
+
+/**
+ * \brief Check the validity of Software Isp object
+ * \return True if Software Isp is valid, false otherwise
+ */
+bool SoftwareIsp::isValid() const
+{
+ return !!debayer_;
+}
+
+/**
+ * \brief Get the output formats supported for the given input format
+ * \param[in] inputFormat The input format
+ * \return All the supported output formats or an empty vector if there are none
+ */
+std::vector<PixelFormat> SoftwareIsp::formats(PixelFormat inputFormat)
+{
+ ASSERT(debayer_);
+
+ return debayer_->formats(inputFormat);
+}
+
+/**
+ * \brief Get the supported output sizes for the given input format and size
+ * \param[in] inputFormat The input format
+ * \param[in] inputSize The input frame size
+ * \return The valid size range or an empty range if there are none
+ */
+SizeRange SoftwareIsp::sizes(PixelFormat inputFormat, const Size &inputSize)
+{
+ ASSERT(debayer_);
+
+ return debayer_->sizes(inputFormat, inputSize);
+}
+
+/**
+ * Get the output stride and the frame size in bytes for the given output format and size
+ * \param[in] outputFormat The output format
+ * \param[in] size The output size (width and height in pixels)
+ * \return A tuple of the stride and the frame size in bytes, or a tuple of 0,0
+ * if there is no valid output config
+ */
+std::tuple<unsigned int, unsigned int>
+SoftwareIsp::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+{
+ ASSERT(debayer_);
+
+ return debayer_->strideAndFrameSize(outputFormat, size);
+}
+
+/**
+ * \brief Configure the SoftwareIsp object according to the passed in parameters
+ * \param[in] inputCfg The input configuration
+ * \param[in] outputCfgs The output configurations
+ * \param[in] sensorControls ControlInfoMap of the controls supported by the sensor
+ * \return 0 on success, a negative errno on failure
+ */
+int SoftwareIsp::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs,
+ const ControlInfoMap &sensorControls)
+{
+ ASSERT(ipa_ && debayer_);
+
+ int ret = ipa_->configure(sensorControls);
+ if (ret < 0)
+ return ret;
+
+ return debayer_->configure(inputCfg, outputCfgs);
+}
+
+/**
+ * \brief Export the buffers from the Software ISP
+ * \param[in] output Output stream index exporting the buffers
+ * \param[in] count Number of buffers to allocate
+ * \param[out] buffers Vector to store the allocated buffers
+ * \return The number of allocated buffers on success or a negative error code
+ * otherwise
+ */
+int SoftwareIsp::exportBuffers(unsigned int output, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ ASSERT(debayer_ != nullptr);
+
+ /* single output for now */
+ if (output >= 1)
+ return -EINVAL;
+
+ for (unsigned int i = 0; i < count; i++) {
+ const std::string name = "frame-" + std::to_string(i);
+ const size_t frameSize = debayer_->frameSize();
+
+ FrameBuffer::Plane outPlane;
+ outPlane.fd = SharedFD(dmaHeap_.alloc(name.c_str(), frameSize));
+ if (!outPlane.fd.isValid()) {
+ LOG(SoftwareIsp, Error)
+ << "failed to allocate a dma_buf";
+ return -ENOMEM;
+ }
+ outPlane.offset = 0;
+ outPlane.length = frameSize;
+
+ std::vector<FrameBuffer::Plane> planes{ outPlane };
+ buffers->emplace_back(std::make_unique<FrameBuffer>(std::move(planes)));
+ }
+
+ return count;
+}
+
+/**
+ * \brief Queue buffers to Software ISP
+ * \param[in] input The input framebuffer
+ * \param[in] outputs The container holding the output stream indexes and
+ * their respective frame buffer outputs
+ * \return 0 on success, a negative errno on failure
+ */
+int SoftwareIsp::queueBuffers(FrameBuffer *input,
+ const std::map<unsigned int, FrameBuffer *> &outputs)
+{
+ unsigned int mask = 0;
+
+ /*
+ * Validate the outputs as a sanity check: at least one output is
+ * required, all outputs must reference a valid stream and no two
+ * outputs can reference the same stream.
+ */
+ if (outputs.empty())
+ return -EINVAL;
+
+ for (auto [index, buffer] : outputs) {
+ if (!buffer)
+ return -EINVAL;
+ if (index >= 1) /* only single stream atm */
+ return -EINVAL;
+ if (mask & (1 << index))
+ return -EINVAL;
+
+ mask |= 1 << index;
+ }
+
+ process(input, outputs.at(0));
+
+ return 0;
+}
+
+/**
+ * \brief Starts the Software ISP streaming operation
+ * \return 0 on success, any other value indicates an error
+ */
+int SoftwareIsp::start()
+{
+ int ret = ipa_->start();
+ if (ret)
+ return ret;
+
+ ispWorkerThread_.start();
+ return 0;
+}
+
+/**
+ * \brief Stops the Software ISP streaming operation
+ */
+void SoftwareIsp::stop()
+{
+ ispWorkerThread_.exit();
+ ispWorkerThread_.wait();
+
+ ipa_->stop();
+}
+
+/**
+ * \brief Passes the input framebuffer to the ISP worker to process
+ * \param[in] input The input framebuffer
+ * \param[out] output The framebuffer to write the processed frame to
+ */
+void SoftwareIsp::process(FrameBuffer *input, FrameBuffer *output)
+{
+ debayer_->invokeMethod(&DebayerCpu::process,
+ ConnectionTypeQueued, input, output, debayerParams_);
+}
+
+void SoftwareIsp::saveIspParams()
+{
+ debayerParams_ = *sharedParams_;
+}
+
+void SoftwareIsp::setSensorCtrls(const ControlList &sensorControls)
+{
+ setSensorControls.emit(sensorControls);
+}
+
+void SoftwareIsp::statsReady()
+{
+ ispStatsReady.emit();
+}
+
+void SoftwareIsp::inputReady(FrameBuffer *input)
+{
+ inputBufferReady.emit(input);
+}
+
+void SoftwareIsp::outputReady(FrameBuffer *output)
+{
+ outputBufferReady.emit(output);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/swstats_cpu.cpp b/src/libcamera/software_isp/swstats_cpu.cpp
new file mode 100644
index 00000000..815c4d4f
--- /dev/null
+++ b/src/libcamera/software_isp/swstats_cpu.cpp
@@ -0,0 +1,432 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based software statistics implementation
+ */
+
+#include "swstats_cpu.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+namespace libcamera {
+
+/**
+ * \class SwStatsCpu
+ * \brief Class for gathering statistics on the CPU
+ *
+ * CPU based software ISP statistics implementation.
+ *
+ * This class offers a configure function + functions to gather statistics on a
+ * line by line basis. This allows CPU based software debayering to interleave
+ * debayering and statistics gathering on a line by line basis while the input
+ * data is still hot in the cache.
+ *
+ * It is also possible to specify a window over which to gather statistics
+ * instead of processing the whole frame.
+ */
+
+/**
+ * \fn bool SwStatsCpu::isValid() const
+ * \brief Gets whether the statistics object is valid
+ *
+ * \return True if it's valid, false otherwise
+ */
+
+/**
+ * \fn const SharedFD &SwStatsCpu::getStatsFD()
+ * \brief Get the file descriptor for the statistics
+ *
+ * \return The file descriptor
+ */
+
+/**
+ * \fn const Size &SwStatsCpu::patternSize()
+ * \brief Get the pattern size
+ *
+ * For some input-formats, e.g. Bayer data, processing is done multiple lines
+ * and/or columns at a time. Get width and height at which the (bayer) pattern
+ * repeats. Window values are rounded down to a multiple of this and the height
+ * also indicates if processLine2() should be called or not.
+ * This may only be called after a successful configure() call.
+ *
+ * \return The pattern size
+ */
+
+/**
+ * \fn void SwStatsCpu::processLine0(unsigned int y, const uint8_t *src[])
+ * \brief Process line 0
+ * \param[in] y The y coordinate.
+ * \param[in] src The input data.
+ *
+ * This function processes line 0 for input formats with
+ * patternSize height == 1.
+ * It'll process line 0 and 1 for input formats with patternSize height >= 2.
+ * This function may only be called after a successful setWindow() call.
+ */
+
+/**
+ * \fn void SwStatsCpu::processLine2(unsigned int y, const uint8_t *src[])
+ * \brief Process line 2 and 3
+ * \param[in] y The y coordinate.
+ * \param[in] src The input data.
+ *
+ * This function processes line 2 and 3 for input formats with
+ * patternSize height == 4.
+ * This function may only be called after a successful setWindow() call.
+ */
+
+/**
+ * \var Signal<> SwStatsCpu::statsReady
+ * \brief Signals that the statistics are ready
+ */
+
+/**
+ * \typedef SwStatsCpu::statsProcessFn
+ * \brief Called when there is data to get statistics from
+ * \param[in] src The input data
+ *
+ * These functions take an array of (patternSize_.height + 1) src
+ * pointers each pointing to a line in the source image. The middle
+ * element of the array will point to the actual line being processed.
+ * Earlier element(s) will point to the previous line(s) and later
+ * element(s) to the next line(s).
+ *
+ * See the documentation of DebayerCpu::debayerFn for more details.
+ */
+
+/**
+ * \var unsigned int SwStatsCpu::ySkipMask_
+ * \brief Skip lines where this bitmask is set in y
+ */
+
+/**
+ * \var Rectangle SwStatsCpu::window_
+ * \brief Statistics window, set by setWindow(), used every line
+ */
+
+/**
+ * \var Size SwStatsCpu::patternSize_
+ * \brief The size of the bayer pattern
+ *
+ * Valid sizes are: 2x2, 4x2 or 4x4.
+ */
+
+/**
+ * \var unsigned int SwStatsCpu::xShift_
+ * \brief The offset of x, applied to window_.x for bayer variants
+ *
+ * This can either be 0 or 1.
+ */
+
+LOG_DEFINE_CATEGORY(SwStatsCpu)
+
+SwStatsCpu::SwStatsCpu()
+ : sharedStats_("softIsp_stats")
+{
+ if (!sharedStats_)
+ LOG(SwStatsCpu, Error)
+ << "Failed to create shared memory for statistics";
+}
+
+static constexpr unsigned int kRedYMul = 77; /* 0.299 * 256 */
+static constexpr unsigned int kGreenYMul = 150; /* 0.587 * 256 */
+static constexpr unsigned int kBlueYMul = 29; /* 0.114 * 256 */
+
+#define SWSTATS_START_LINE_STATS(pixel_t) \
+ pixel_t r, g, g2, b; \
+ uint64_t yVal; \
+ \
+ uint64_t sumR = 0; \
+ uint64_t sumG = 0; \
+ uint64_t sumB = 0;
+
+#define SWSTATS_ACCUMULATE_LINE_STATS(div) \
+ sumR += r; \
+ sumG += g; \
+ sumB += b; \
+ \
+ yVal = r * kRedYMul; \
+ yVal += g * kGreenYMul; \
+ yVal += b * kBlueYMul; \
+ stats_.yHistogram[yVal * SwIspStats::kYHistogramSize / (256 * 256 * (div))]++;
+
+#define SWSTATS_FINISH_LINE_STATS() \
+ stats_.sumR_ += sumR; \
+ stats_.sumG_ += sumG; \
+ stats_.sumB_ += sumB;
+
+void SwStatsCpu::statsBGGR8Line0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x;
+ const uint8_t *src1 = src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR10Line0(const uint8_t *src[])
+{
+ const uint16_t *src0 = (const uint16_t *)src[1] + window_.x;
+ const uint16_t *src1 = (const uint16_t *)src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint16_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ /* divide Y by 4 for 10 -> 8 bpp value */
+ SWSTATS_ACCUMULATE_LINE_STATS(4)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR12Line0(const uint8_t *src[])
+{
+ const uint16_t *src0 = (const uint16_t *)src[1] + window_.x;
+ const uint16_t *src1 = (const uint16_t *)src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint16_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ /* divide Y by 16 for 12 -> 8 bpp value */
+ SWSTATS_ACCUMULATE_LINE_STATS(16)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR10PLine0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x * 5 / 4;
+ const uint8_t *src1 = src[2] + window_.x * 5 / 4;
+ const int widthInBytes = window_.width * 5 / 4;
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ /* x += 5 sample every other 2x2 block */
+ for (int x = 0; x < widthInBytes; x += 5) {
+ /* BGGR */
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+ g = (g + g2) / 2;
+ /* Data is already 8 bits, divide by 1 */
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsGBRG10PLine0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x * 5 / 4;
+ const uint8_t *src1 = src[2] + window_.x * 5 / 4;
+ const int widthInBytes = window_.width * 5 / 4;
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ /* x += 5 sample every other 2x2 block */
+ for (int x = 0; x < widthInBytes; x += 5) {
+ /* GBRG */
+ g = src0[x];
+ b = src0[x + 1];
+ r = src1[x];
+ g2 = src1[x + 1];
+ g = (g + g2) / 2;
+ /* Data is already 8 bits, divide by 1 */
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+/**
+ * \brief Reset state to start statistics gathering for a new frame
+ *
+ * This may only be called after a successful setWindow() call.
+ */
+void SwStatsCpu::startFrame(void)
+{
+ if (window_.width == 0)
+ LOG(SwStatsCpu, Error) << "Calling startFrame() without setWindow()";
+
+ stats_.sumR_ = 0;
+ stats_.sumB_ = 0;
+ stats_.sumG_ = 0;
+ stats_.yHistogram.fill(0);
+}
+
+/**
+ * \brief Finish statistics calculation for the current frame
+ *
+ * This may only be called after a successful setWindow() call.
+ */
+void SwStatsCpu::finishFrame(void)
+{
+ *sharedStats_ = stats_;
+ statsReady.emit();
+}
+
+/**
+ * \brief Setup SwStatsCpu object for standard Bayer orders
+ * \param[in] order The Bayer order
+ *
+ * Check if order is a standard Bayer order and setup xShift_ and swapLines_
+ * so that a single BGGR stats function can be used for all 4 standard orders.
+ */
+int SwStatsCpu::setupStandardBayerOrder(BayerFormat::Order order)
+{
+ switch (order) {
+ case BayerFormat::BGGR:
+ xShift_ = 0;
+ swapLines_ = false;
+ break;
+ case BayerFormat::GBRG:
+ xShift_ = 1; /* BGGR -> GBRG */
+ swapLines_ = false;
+ break;
+ case BayerFormat::GRBG:
+ xShift_ = 0;
+ swapLines_ = true; /* BGGR -> GRBG */
+ break;
+ case BayerFormat::RGGB:
+ xShift_ = 1; /* BGGR -> GBRG */
+ swapLines_ = true; /* GBRG -> RGGB */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ patternSize_.height = 2;
+ patternSize_.width = 2;
+ ySkipMask_ = 0x02; /* Skip every 3th and 4th line */
+ return 0;
+}
+
+/**
+ * \brief Configure the statistics object for the passed in input format
+ * \param[in] inputCfg The input format
+ *
+ * \return 0 on success, a negative errno value on failure
+ */
+int SwStatsCpu::configure(const StreamConfiguration &inputCfg)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputCfg.pixelFormat);
+
+ if (bayerFormat.packing == BayerFormat::Packing::None &&
+ setupStandardBayerOrder(bayerFormat.order) == 0) {
+ switch (bayerFormat.bitDepth) {
+ case 8:
+ stats0_ = &SwStatsCpu::statsBGGR8Line0;
+ return 0;
+ case 10:
+ stats0_ = &SwStatsCpu::statsBGGR10Line0;
+ return 0;
+ case 12:
+ stats0_ = &SwStatsCpu::statsBGGR12Line0;
+ return 0;
+ }
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2) {
+ patternSize_.height = 2;
+ patternSize_.width = 4; /* 5 bytes per *4* pixels */
+ /* Skip every 3th and 4th line, sample every other 2x2 block */
+ ySkipMask_ = 0x02;
+ xShift_ = 0;
+
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ case BayerFormat::GRBG:
+ stats0_ = &SwStatsCpu::statsBGGR10PLine0;
+ swapLines_ = bayerFormat.order == BayerFormat::GRBG;
+ return 0;
+ case BayerFormat::GBRG:
+ case BayerFormat::RGGB:
+ stats0_ = &SwStatsCpu::statsGBRG10PLine0;
+ swapLines_ = bayerFormat.order == BayerFormat::RGGB;
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ LOG(SwStatsCpu, Info)
+ << "Unsupported input format " << inputCfg.pixelFormat.toString();
+ return -EINVAL;
+}
+
+/**
+ * \brief Specify window coordinates over which to gather statistics
+ * \param[in] window The window object.
+ */
+void SwStatsCpu::setWindow(const Rectangle &window)
+{
+ window_ = window;
+
+ window_.x &= ~(patternSize_.width - 1);
+ window_.x += xShift_;
+ window_.y &= ~(patternSize_.height - 1);
+
+ /* width_ - xShift_ to make sure the window fits */
+ window_.width -= xShift_;
+ window_.width &= ~(patternSize_.width - 1);
+ window_.height &= ~(patternSize_.height - 1);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/swstats_cpu.h b/src/libcamera/software_isp/swstats_cpu.h
new file mode 100644
index 00000000..363e326f
--- /dev/null
+++ b/src/libcamera/software_isp/swstats_cpu.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based software statistics implementation
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include <libcamera/base/signal.h>
+
+#include <libcamera/geometry.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/shared_mem_object.h"
+#include "libcamera/internal/software_isp/swisp_stats.h"
+
+namespace libcamera {
+
+class PixelFormat;
+struct StreamConfiguration;
+
+class SwStatsCpu
+{
+public:
+ SwStatsCpu();
+ ~SwStatsCpu() = default;
+
+ bool isValid() const { return sharedStats_.fd().isValid(); }
+
+ const SharedFD &getStatsFD() { return sharedStats_.fd(); }
+
+ const Size &patternSize() { return patternSize_; }
+
+ int configure(const StreamConfiguration &inputCfg);
+ void setWindow(const Rectangle &window);
+ void startFrame();
+ void finishFrame();
+
+ void processLine0(unsigned int y, const uint8_t *src[])
+ {
+ if ((y & ySkipMask_) || y < static_cast<unsigned int>(window_.y) ||
+ y >= (window_.y + window_.height))
+ return;
+
+ (this->*stats0_)(src);
+ }
+
+ void processLine2(unsigned int y, const uint8_t *src[])
+ {
+ if ((y & ySkipMask_) || y < static_cast<unsigned int>(window_.y) ||
+ y >= (window_.y + window_.height))
+ return;
+
+ (this->*stats2_)(src);
+ }
+
+ Signal<> statsReady;
+
+private:
+ using statsProcessFn = void (SwStatsCpu::*)(const uint8_t *src[]);
+
+ int setupStandardBayerOrder(BayerFormat::Order order);
+ /* Bayer 8 bpp unpacked */
+ void statsBGGR8Line0(const uint8_t *src[]);
+ /* Bayer 10 bpp unpacked */
+ void statsBGGR10Line0(const uint8_t *src[]);
+ /* Bayer 12 bpp unpacked */
+ void statsBGGR12Line0(const uint8_t *src[]);
+ /* Bayer 10 bpp packed */
+ void statsBGGR10PLine0(const uint8_t *src[]);
+ void statsGBRG10PLine0(const uint8_t *src[]);
+
+ /* Variables set by configure(), used every line */
+ statsProcessFn stats0_;
+ statsProcessFn stats2_;
+ bool swapLines_;
+
+ unsigned int ySkipMask_;
+
+ Rectangle window_;
+
+ Size patternSize_;
+
+ unsigned int xShift_;
+
+ SharedMemObject<SwIspStats> sharedStats_;
+ SwIspStats stats_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/source_paths.cpp b/src/libcamera/source_paths.cpp
index 19689585..1af5386a 100644
--- a/src/libcamera/source_paths.cpp
+++ b/src/libcamera/source_paths.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * source_paths.cpp - Identify libcamera source and build paths
+ * Identify libcamera source and build paths
*/
#include "libcamera/internal/source_paths.h"
diff --git a/src/libcamera/stream.cpp b/src/libcamera/stream.cpp
index 686e693b..053cc4b8 100644
--- a/src/libcamera/stream.cpp
+++ b/src/libcamera/stream.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * stream.cpp - Video stream for a Camera
+ * Video stream for a Camera
*/
#include <libcamera/stream.h>
@@ -311,7 +311,8 @@ StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
* The stride value reports the number of bytes between the beginning of
* successive lines in an image buffer for this stream. The value is
* valid after successfully validating the configuration with a call to
- * CameraConfiguration::validate().
+ * CameraConfiguration::validate(). For compressed formats (such as MJPEG),
+ * this value will be zero.
*/
/**
@@ -418,9 +419,23 @@ std::string StreamConfiguration::toString() const
*/
/**
- * \typedef StreamRoles
- * \brief A vector of StreamRole
+ * \brief Insert a text representation of a StreamRole into an output stream
+ * \param[in] out The output stream
+ * \param[in] role The StreamRole
+ * \return The output stream \a out
*/
+std::ostream &operator<<(std::ostream &out, StreamRole role)
+{
+ static constexpr std::array<const char *, 4> names{
+ "Raw",
+ "StillCapture",
+ "VideoRecording",
+ "Viewfinder",
+ };
+
+ out << names[utils::to_underlying(role)];
+ return out;
+}
/**
* \class Stream
diff --git a/src/libcamera/sysfs.cpp b/src/libcamera/sysfs.cpp
index 44c3331b..3d9885b0 100644
--- a/src/libcamera/sysfs.cpp
+++ b/src/libcamera/sysfs.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * sysfs.cpp - Miscellaneous utility functions to access sysfs
+ * Miscellaneous utility functions to access sysfs
*/
#include "libcamera/internal/sysfs.h"
diff --git a/src/libcamera/tracepoints.cpp b/src/libcamera/tracepoints.cpp
index 0173b75a..90662d12 100644
--- a/src/libcamera/tracepoints.cpp
+++ b/src/libcamera/tracepoints.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * tracepoints.cpp - Tracepoints with lttng
+ * Tracepoints with lttng
*/
#define TRACEPOINT_CREATE_PROBES
#define TRACEPOINT_DEFINE
diff --git a/src/libcamera/transform.cpp b/src/libcamera/transform.cpp
index 99a043ba..9fe8b562 100644
--- a/src/libcamera/transform.cpp
+++ b/src/libcamera/transform.cpp
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * transform.cpp - 2D plane transforms.
+ * 2D plane transforms.
*/
#include <libcamera/transform.h>
+#include <libcamera/orientation.h>
+
/**
* \file transform.h
* \brief Enum to represent and manipulate 2D plane transforms
@@ -187,24 +189,24 @@ Input image | | goes to output image | |
*/
/**
- * \brief Compose two transforms together
- * \param[in] t1 The second transform
- * \param[in] t0 The first transform
+ * \brief Compose two transforms by applying \a t0 first then \a t1
+ * \param[in] t0 The first transform to apply
+ * \param[in] t1 The second transform to apply
*
- * Composing transforms follows the usual mathematical convention for
- * composing functions. That is, when performing `t1 * t0`, \a t0 is applied
- * first, and then \a t1.
- * For example, `Transpose * HFlip` performs `HFlip` first and then the
- * `Transpose` yielding `Rot270`, as shown below.
+ * Compose two transforms into a transform that is equivalent to first applying
+ * \a t0 and then applying \a t1. For example, `HFlip * Transpose` performs
+ * `HFlip` first and then the `Transpose` yielding `Rot270`, as shown below.
~~~
A-B B-A B-D
Input image | | -> HFLip -> | | -> Transpose -> | | = Rot270
C-D D-C A-C
~~~
- * Note that composition is generally non-commutative for Transforms,
- * and not the same as XOR-ing the underlying bit representations.
+ * Note that composition is generally non-commutative for Transforms, and not
+ * the same as XOR-ing the underlying bit representations.
+ *
+ * \return A Transform equivalent to applying \a t0 and then \a t1
*/
-Transform operator*(Transform t1, Transform t0)
+Transform operator*(Transform t0, Transform t1)
{
/*
* Reorder the operations so that we imagine doing t0's transpose
@@ -299,6 +301,91 @@ Transform transformFromRotation(int angle, bool *success)
return Transform::Identity;
}
+namespace {
+
+/**
+ * \brief Return the transform representing \a orientation
+ * \param[in] orientation The orientation to convert
+ * \return The transform corresponding to \a orientation
+ */
+Transform transformFromOrientation(const Orientation &orientation)
+{
+ switch (orientation) {
+ case Orientation::Rotate0:
+ return Transform::Identity;
+ case Orientation::Rotate0Mirror:
+ return Transform::HFlip;
+ case Orientation::Rotate180:
+ return Transform::Rot180;
+ case Orientation::Rotate180Mirror:
+ return Transform::VFlip;
+ case Orientation::Rotate90Mirror:
+ return Transform::Transpose;
+ case Orientation::Rotate90:
+ return Transform::Rot90;
+ case Orientation::Rotate270Mirror:
+ return Transform::Rot180Transpose;
+ case Orientation::Rotate270:
+ return Transform::Rot270;
+ }
+
+ return Transform::Identity;
+}
+
+} /* namespace */
+
+/**
+ * \brief Return the Transform that applied to \a o2 gives \a o1
+ * \param o1 The Orientation to obtain
+ * \param o2 The base Orientation
+ *
+ * This operation can be used to easily compute the Transform to apply to a
+ * base orientation \a o2 to get the desired orientation \a o1.
+ *
+ * \return A Transform that applied to \a o2 gives \a o1
+ */
+Transform operator/(const Orientation &o1, const Orientation &o2)
+{
+ Transform t1 = transformFromOrientation(o1);
+ Transform t2 = transformFromOrientation(o2);
+
+ return -t2 * t1;
+}
+
+/**
+ * \brief Apply the Transform \a t on the orientation \a o
+ * \param o The orientation
+ * \param t The transform to apply on \a o
+ * \return The Orientation resulting from applying \a t on \a o
+ */
+Orientation operator*(const Orientation &o, const Transform &t)
+{
+ /*
+ * Apply a Transform corresponding to the orientation first and
+ * then apply \a t to it.
+ */
+ switch (transformFromOrientation(o) * t) {
+ case Transform::Identity:
+ return Orientation::Rotate0;
+ case Transform::HFlip:
+ return Orientation::Rotate0Mirror;
+ case Transform::VFlip:
+ return Orientation::Rotate180Mirror;
+ case Transform::Rot180:
+ return Orientation::Rotate180;
+ case Transform::Transpose:
+ return Orientation::Rotate90Mirror;
+ case Transform::Rot270:
+ return Orientation::Rotate270;
+ case Transform::Rot90:
+ return Orientation::Rotate90;
+ case Transform::Rot180Transpose:
+ return Orientation::Rotate270Mirror;
+ }
+
+ return Orientation::Rotate0;
+}
+
/**
* \brief Return a character string describing the transform
* \param[in] t The transform to be described.
diff --git a/src/libcamera/v4l2_device.cpp b/src/libcamera/v4l2_device.cpp
index 3fc8438f..4a2048cf 100644
--- a/src/libcamera/v4l2_device.cpp
+++ b/src/libcamera/v4l2_device.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_device.cpp - Common base for V4L2 video devices and subdevices
+ * Common base for V4L2 video devices and subdevices
*/
#include "libcamera/internal/v4l2_device.h"
@@ -24,6 +24,7 @@
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
+#include "libcamera/internal/formats.h"
#include "libcamera/internal/sysfs.h"
/**
@@ -85,18 +86,18 @@ int V4L2Device::open(unsigned int flags)
return -EBUSY;
}
- UniqueFD fd(syscall(SYS_openat, AT_FDCWD, deviceNode_.c_str(), flags));
+ UniqueFD fd(syscall(SYS_openat, AT_FDCWD, deviceNode_.c_str(),
+ flags | O_CLOEXEC));
if (!fd.isValid()) {
int ret = -errno;
- LOG(V4L2, Error) << "Failed to open V4L2 device: "
+ LOG(V4L2, Error) << "Failed to open V4L2 device '"
+ << deviceNode_ << "': "
<< strerror(-ret);
return ret;
}
setFd(std::move(fd));
- listControls();
-
return 0;
}
@@ -127,6 +128,8 @@ int V4L2Device::setFd(UniqueFD fd)
fdEventNotifier_->activated.connect(this, &V4L2Device::eventAvailable);
fdEventNotifier_->setEnabled(false);
+ listControls();
+
return 0;
}
@@ -242,7 +245,8 @@ ControlList V4L2Device::getControls(const std::vector<uint32_t> &ids)
}
/* A specific control failed. */
- LOG(V4L2, Error) << "Unable to read control " << errorIdx
+ const unsigned int id = v4l2Ctrls[errorIdx].id;
+ LOG(V4L2, Error) << "Unable to read control " << utils::hex(id)
<< ": " << strerror(-ret);
v4l2Ctrls.resize(errorIdx);
@@ -352,7 +356,8 @@ int V4L2Device::setControls(ControlList *ctrls)
}
/* A specific control failed. */
- LOG(V4L2, Error) << "Unable to set control " << errorIdx
+ const unsigned int id = v4l2Ctrls[errorIdx].id;
+ LOG(V4L2, Error) << "Unable to set control " << utils::hex(id)
<< ": " << strerror(-ret);
v4l2Ctrls.resize(errorIdx);
@@ -525,7 +530,7 @@ std::unique_ptr<ControlId> V4L2Device::v4l2ControlId(const v4l2_query_ext_ctrl &
* \param[in] ctrl The v4l2_query_ext_ctrl that represents a V4L2 control
* \return A ControlInfo that represents \a ctrl
*/
-ControlInfo V4L2Device::v4l2ControlInfo(const v4l2_query_ext_ctrl &ctrl)
+std::optional<ControlInfo> V4L2Device::v4l2ControlInfo(const v4l2_query_ext_ctrl &ctrl)
{
switch (ctrl.type) {
case V4L2_CTRL_TYPE_U8:
@@ -562,14 +567,14 @@ ControlInfo V4L2Device::v4l2ControlInfo(const v4l2_query_ext_ctrl &ctrl)
*
* \return A ControlInfo that represents \a ctrl
*/
-ControlInfo V4L2Device::v4l2MenuControlInfo(const struct v4l2_query_ext_ctrl &ctrl)
+std::optional<ControlInfo> V4L2Device::v4l2MenuControlInfo(const struct v4l2_query_ext_ctrl &ctrl)
{
std::vector<ControlValue> indices;
struct v4l2_querymenu menu = {};
menu.id = ctrl.id;
if (ctrl.minimum < 0)
- return ControlInfo();
+ return std::nullopt;
for (int32_t index = ctrl.minimum; index <= ctrl.maximum; ++index) {
menu.index = index;
@@ -579,6 +584,14 @@ ControlInfo V4L2Device::v4l2MenuControlInfo(const struct v4l2_query_ext_ctrl &ct
indices.push_back(index);
}
+ /*
+ * Some faulty UVC devices are known to return an empty menu control.
+ * Controls without a menu option can not be set, or read, so they are
+ * not exposed.
+ */
+ if (indices.size() == 0)
+ return std::nullopt;
+
return ControlInfo(indices,
ControlValue(static_cast<int32_t>(ctrl.default_value)));
}
@@ -627,7 +640,17 @@ void V4L2Device::listControls()
controlIdMap_[ctrl.id] = controlIds_.back().get();
controlInfo_.emplace(ctrl.id, ctrl);
- ctrls.emplace(controlIds_.back().get(), v4l2ControlInfo(ctrl));
+ std::optional<ControlInfo> info = v4l2ControlInfo(ctrl);
+
+ if (!info) {
+ LOG(V4L2, Error)
+ << "Control " << ctrl.name
+ << " cannot be registered";
+
+ continue;
+ }
+
+ ctrls.emplace(controlIds_.back().get(), *info);
}
controls_ = ControlInfoMap(std::move(ctrls), controlIdMap_);
@@ -666,7 +689,7 @@ void V4L2Device::updateControlInfo()
continue;
}
- info = v4l2ControlInfo(ctrl);
+ info = *v4l2ControlInfo(ctrl);
}
}
@@ -745,8 +768,12 @@ void V4L2Device::eventAvailable()
static const std::map<uint32_t, ColorSpace> v4l2ToColorSpace = {
{ V4L2_COLORSPACE_RAW, ColorSpace::Raw },
- { V4L2_COLORSPACE_JPEG, ColorSpace::Jpeg },
- { V4L2_COLORSPACE_SRGB, ColorSpace::Srgb },
+ { V4L2_COLORSPACE_SRGB, {
+ ColorSpace::Primaries::Rec709,
+ ColorSpace::TransferFunction::Srgb,
+ ColorSpace::YcbcrEncoding::Rec601,
+ ColorSpace::Range::Limited } },
+ { V4L2_COLORSPACE_JPEG, ColorSpace::Sycc },
{ V4L2_COLORSPACE_SMPTE170M, ColorSpace::Smpte170m },
{ V4L2_COLORSPACE_REC709, ColorSpace::Rec709 },
{ V4L2_COLORSPACE_BT2020, ColorSpace::Rec2020 },
@@ -771,8 +798,7 @@ static const std::map<uint32_t, ColorSpace::Range> v4l2ToRange = {
static const std::vector<std::pair<ColorSpace, v4l2_colorspace>> colorSpaceToV4l2 = {
{ ColorSpace::Raw, V4L2_COLORSPACE_RAW },
- { ColorSpace::Jpeg, V4L2_COLORSPACE_JPEG },
- { ColorSpace::Srgb, V4L2_COLORSPACE_SRGB },
+ { ColorSpace::Sycc, V4L2_COLORSPACE_JPEG },
{ ColorSpace::Smpte170m, V4L2_COLORSPACE_SMPTE170M },
{ ColorSpace::Rec709, V4L2_COLORSPACE_REC709 },
{ ColorSpace::Rec2020, V4L2_COLORSPACE_BT2020 },
@@ -792,6 +818,8 @@ static const std::map<ColorSpace::TransferFunction, v4l2_xfer_func> transferFunc
};
static const std::map<ColorSpace::YcbcrEncoding, v4l2_ycbcr_encoding> ycbcrEncodingToV4l2 = {
+ /* V4L2 has no "none" encoding. */
+ { ColorSpace::YcbcrEncoding::None, V4L2_YCBCR_ENC_DEFAULT },
{ ColorSpace::YcbcrEncoding::Rec601, V4L2_YCBCR_ENC_601 },
{ ColorSpace::YcbcrEncoding::Rec709, V4L2_YCBCR_ENC_709 },
{ ColorSpace::YcbcrEncoding::Rec2020, V4L2_YCBCR_ENC_BT2020 },
@@ -805,6 +833,7 @@ static const std::map<ColorSpace::Range, v4l2_quantization> rangeToV4l2 = {
/**
* \brief Convert the color space fields in a V4L2 format to a ColorSpace
* \param[in] v4l2Format A V4L2 format containing color space information
+ * \param[in] colourEncoding Type of colour encoding
*
* The colorspace, ycbcr_enc, xfer_func and quantization fields within a
* V4L2 format structure are converted to a corresponding ColorSpace.
@@ -816,7 +845,8 @@ static const std::map<ColorSpace::Range, v4l2_quantization> rangeToV4l2 = {
* \retval std::nullopt One or more V4L2 color space fields were not recognised
*/
template<typename T>
-std::optional<ColorSpace> V4L2Device::toColorSpace(const T &v4l2Format)
+std::optional<ColorSpace> V4L2Device::toColorSpace(const T &v4l2Format,
+ PixelFormatInfo::ColourEncoding colourEncoding)
{
auto itColor = v4l2ToColorSpace.find(v4l2Format.colorspace);
if (itColor == v4l2ToColorSpace.end())
@@ -839,6 +869,14 @@ std::optional<ColorSpace> V4L2Device::toColorSpace(const T &v4l2Format)
return std::nullopt;
colorSpace.ycbcrEncoding = itYcbcrEncoding->second;
+
+ /*
+ * V4L2 has no "none" encoding, override the value returned by
+ * the kernel for non-YUV formats as YCbCr encoding isn't
+ * applicable in that case.
+ */
+ if (colourEncoding != PixelFormatInfo::ColourEncodingYUV)
+ colorSpace.ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
}
if (v4l2Format.quantization != V4L2_QUANTIZATION_DEFAULT) {
@@ -847,14 +885,24 @@ std::optional<ColorSpace> V4L2Device::toColorSpace(const T &v4l2Format)
return std::nullopt;
colorSpace.range = itRange->second;
+
+ /*
+ * "Limited" quantization range is only meant for YUV formats.
+ * Override the range to "Full" for all other formats.
+ */
+ if (colourEncoding != PixelFormatInfo::ColourEncodingYUV)
+ colorSpace.range = ColorSpace::Range::Full;
}
return colorSpace;
}
-template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_pix_format &);
-template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_pix_format_mplane &);
-template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_mbus_framefmt &);
+template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_pix_format &,
+ PixelFormatInfo::ColourEncoding);
+template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_pix_format_mplane &,
+ PixelFormatInfo::ColourEncoding);
+template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_mbus_framefmt &,
+ PixelFormatInfo::ColourEncoding);
/**
* \brief Fill in the color space fields of a V4L2 format from a ColorSpace
diff --git a/src/libcamera/v4l2_pixelformat.cpp b/src/libcamera/v4l2_pixelformat.cpp
index 58fc4e9d..70568335 100644
--- a/src/libcamera/v4l2_pixelformat.cpp
+++ b/src/libcamera/v4l2_pixelformat.cpp
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * v4l2_pixelformat.cpp - V4L2 Pixel Format
+ * V4L2 Pixel Format
*/
#include "libcamera/internal/v4l2_pixelformat.h"
@@ -71,6 +71,10 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::BGRA8888, "32-bit ARGB 8-8-8-8" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_BGRA32),
{ formats::RGBA8888, "32-bit ABGR 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_RGB48),
+ { formats::BGR161616, "48-bit RGB 16-16-16" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_BGR48),
+ { formats::RGB161616, "48-bit BGR 16-16-16" } },
/* YUV packed formats. */
{ V4L2PixelFormat(V4L2_PIX_FMT_YUYV),
@@ -81,6 +85,10 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::UYVY, "UYVY 4:2:2" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_VYUY),
{ formats::VYUY, "VYUY 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUVA32),
+ { formats::AVUY8888, "32-bit YUVA 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUVX32),
+ { formats::XVUY8888, "32-bit YUVX 8-8-8-8" } },
/* YUV planar formats. */
{ V4L2PixelFormat(V4L2_PIX_FMT_NV16),
@@ -119,7 +127,7 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::YVU422, "Planar YVU 4:2:2 (N-C)" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_YUV444M),
{ formats::YUV444, "Planar YUV 4:4:4 (N-C)" } },
- { V4L2PixelFormat(V4L2_PIX_FMT_YUV444M),
+ { V4L2PixelFormat(V4L2_PIX_FMT_YVU444M),
{ formats::YVU444, "Planar YVU 4:4:4 (N-C)" } },
/* Greyscale formats. */
@@ -127,8 +135,12 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::R8, "8-bit Greyscale" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_Y10),
{ formats::R10, "10-bit Greyscale" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y10P),
+ { formats::R10_CSI2P, "10-bit Greyscale Packed" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_Y12),
{ formats::R12, "12-bit Greyscale" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y16),
+ { formats::R16, "16-bit Greyscale" } },
/* Bayer formats. */
{ V4L2PixelFormat(V4L2_PIX_FMT_SBGGR8),
@@ -171,6 +183,22 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::SGRBG12_CSI2P, "12-bit Bayer GRGR/BGBG Packed" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P),
{ formats::SRGGB12_CSI2P, "12-bit Bayer RGRG/GBGB Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14),
+ { formats::SBGGR14, "14-bit Bayer BGBG/GRGR" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14),
+ { formats::SGBRG14, "14-bit Bayer GBGB/RGRG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14),
+ { formats::SGRBG14, "14-bit Bayer GRGR/BGBG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14),
+ { formats::SRGGB14, "14-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14P),
+ { formats::SBGGR14_CSI2P, "14-bit Bayer BGBG/GRGR Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14P),
+ { formats::SGBRG14_CSI2P, "14-bit Bayer GBGB/RGRG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14P),
+ { formats::SGRBG14_CSI2P, "14-bit Bayer GRGR/BGBG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14P),
+ { formats::SRGGB14_CSI2P, "14-bit Bayer RGRG/GBGB Packed" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16),
{ formats::SBGGR16, "16-bit Bayer BGBG/GRGR" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_SGBRG16),
@@ -179,10 +207,22 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::SGRBG16, "16-bit Bayer GRGR/BGBG" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16),
{ formats::SRGGB16, "16-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR),
+ { formats::BGGR_PISP_COMP1, "16-bit Bayer BGBG/GRGR PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG),
+ { formats::GBRG_PISP_COMP1, "16-bit Bayer GBGB/RGRG PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG),
+ { formats::GRBG_PISP_COMP1, "16-bit Bayer GRGR/BGBG PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB),
+ { formats::RGGB_PISP_COMP1, "16-bit Bayer RGRG/GBGB PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO),
+ { formats::MONO_PISP_COMP1, "16-bit Mono PiSP Compress Mode 1" } },
/* Compressed formats. */
{ V4L2PixelFormat(V4L2_PIX_FMT_MJPEG),
{ formats::MJPEG, "Motion-JPEG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_JPEG),
+ { formats::MJPEG, "JPEG JFIF" } },
};
} /* namespace */
@@ -286,15 +326,23 @@ const char *V4L2PixelFormat::description() const
/**
* \brief Convert the V4L2 pixel format to the corresponding PixelFormat
+ * \param[in] warn When true, log a warning message if the V4L2 pixel format
+ * isn't known
+ *
+ * Users of this function might try to convert a V4L2PixelFormat to a
+ * PixelFormat just to check if the format is supported or not. In that case,
+ * they can suppress the warning message by setting the \a warn argument to
+ * false to not pollute the log with unnecessary messages.
+ *
* \return The PixelFormat corresponding to the V4L2 pixel format
*/
-PixelFormat V4L2PixelFormat::toPixelFormat() const
+PixelFormat V4L2PixelFormat::toPixelFormat(bool warn) const
{
const auto iter = vpf2pf.find(*this);
if (iter == vpf2pf.end()) {
- LOG(V4L2, Warning)
- << "Unsupported V4L2 pixel format "
- << toString();
+ if (warn)
+ LOG(V4L2, Warning) << "Unsupported V4L2 pixel format "
+ << toString();
return PixelFormat();
}
@@ -302,26 +350,24 @@ PixelFormat V4L2PixelFormat::toPixelFormat() const
}
/**
- * \brief Convert \a pixelFormat to its corresponding V4L2PixelFormat
+ * \brief Retrieve the list of V4L2PixelFormat associated with \a pixelFormat
* \param[in] pixelFormat The PixelFormat to convert
- * \param[in] multiplanar V4L2 Multiplanar API support flag
*
- * Multiple V4L2 formats may exist for one PixelFormat when the format uses
- * multiple planes, as V4L2 defines separate 4CCs for contiguous and separate
- * planes formats. Set the \a multiplanar parameter to false to select a format
- * with contiguous planes, or to true to select a format with non-contiguous
- * planes.
+ * Multiple V4L2 formats may exist for one PixelFormat as V4L2 defines separate
+ * 4CCs for contiguous and non-contiguous versions of the same image format.
*
- * \return The V4L2PixelFormat corresponding to \a pixelFormat
+ * \return The list of V4L2PixelFormat corresponding to \a pixelFormat
*/
-V4L2PixelFormat V4L2PixelFormat::fromPixelFormat(const PixelFormat &pixelFormat,
- bool multiplanar)
+const std::vector<V4L2PixelFormat> &
+V4L2PixelFormat::fromPixelFormat(const PixelFormat &pixelFormat)
{
+ static const std::vector<V4L2PixelFormat> empty;
+
const PixelFormatInfo &info = PixelFormatInfo::info(pixelFormat);
if (!info.isValid())
- return V4L2PixelFormat();
+ return empty;
- return multiplanar ? info.v4l2Formats.multi : info.v4l2Formats.single;
+ return info.v4l2Formats;
}
/**
diff --git a/src/libcamera/v4l2_subdevice.cpp b/src/libcamera/v4l2_subdevice.cpp
index 98a3911a..6da77775 100644
--- a/src/libcamera/v4l2_subdevice.cpp
+++ b/src/libcamera/v4l2_subdevice.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_subdevice.cpp - V4L2 Subdevice
+ * V4L2 Subdevice
*/
#include "libcamera/internal/v4l2_subdevice.h"
@@ -23,6 +23,7 @@
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
+#include "libcamera/internal/formats.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/media_object.h"
@@ -35,105 +36,727 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(V4L2)
-namespace {
-
-/*
- * \struct V4L2SubdeviceFormatInfo
+/**
+ * \class MediaBusFormatInfo
* \brief Information about media bus formats
- * \param bitsPerPixel Bits per pixel
- * \param name Name of MBUS format
+ *
+ * The MediaBusFormatInfo class groups together information describing a media
+ * bus format. It facilitates handling of media bus formats by providing data
+ * commonly used in pipeline handlers.
+ *
+ * \var MediaBusFormatInfo::name
+ * \brief The format name as a human-readable string, used as the text
+ * representation of the format
+ *
+ * \var MediaBusFormatInfo::code
+ * \brief The media bus format code described by this instance (MEDIA_BUS_FMT_*)
+ *
+ * \var MediaBusFormatInfo::type
+ * \brief The media bus format type
+ *
+ * \var MediaBusFormatInfo::bitsPerPixel
+ * \brief The average number of bits per pixel
+ *
+ * The number of bits per pixel averages the total number of bits for all
+ * colour components over the whole image, excluding any padding bits or
+ * padding pixels.
+ *
+ * For formats that transmit multiple or fractional pixels per sample, the
+ * value will differ from the bus width.
+ *
+ * Formats that don't have a fixed number of bits per pixel, such as compressed
+ * formats, or device-specific embedded data formats, report 0 in this field.
+ *
+ * \var MediaBusFormatInfo::colourEncoding
+ * \brief The colour encoding type
+ *
+ * This field is valid for Type::Image formats only.
*/
-struct V4L2SubdeviceFormatInfo {
- unsigned int bitsPerPixel;
- const char *name;
-};
-/*
- * \var formatInfoMap
- * \brief A map that associates V4L2SubdeviceFormatInfo struct to V4L2 media
- * bus codes
- */
-const std::map<uint32_t, V4L2SubdeviceFormatInfo> formatInfoMap = {
- { MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE, { 16, "RGB444_2X8_PADHI_BE" } },
- { MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE, { 16, "RGB444_2X8_PADHI_LE" } },
- { MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, { 16, "RGB555_2X8_PADHI_BE" } },
- { MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, { 16, "RGB555_2X8_PADHI_LE" } },
- { MEDIA_BUS_FMT_RGB565_1X16, { 16, "RGB565_1X16" } },
- { MEDIA_BUS_FMT_BGR565_2X8_BE, { 16, "BGR565_2X8_BE" } },
- { MEDIA_BUS_FMT_BGR565_2X8_LE, { 16, "BGR565_2X8_LE" } },
- { MEDIA_BUS_FMT_RGB565_2X8_BE, { 16, "RGB565_2X8_BE" } },
- { MEDIA_BUS_FMT_RGB565_2X8_LE, { 16, "RGB565_2X8_LE" } },
- { MEDIA_BUS_FMT_RGB666_1X18, { 18, "RGB666_1X18" } },
- { MEDIA_BUS_FMT_RGB888_1X24, { 24, "RGB888_1X24" } },
- { MEDIA_BUS_FMT_RGB888_2X12_BE, { 24, "RGB888_2X12_BE" } },
- { MEDIA_BUS_FMT_RGB888_2X12_LE, { 24, "RGB888_2X12_LE" } },
- { MEDIA_BUS_FMT_ARGB8888_1X32, { 32, "ARGB8888_1X32" } },
- { MEDIA_BUS_FMT_Y8_1X8, { 8, "Y8_1X8" } },
- { MEDIA_BUS_FMT_UV8_1X8, { 8, "UV8_1X8" } },
- { MEDIA_BUS_FMT_UYVY8_1_5X8, { 12, "UYVY8_1_5X8" } },
- { MEDIA_BUS_FMT_VYUY8_1_5X8, { 12, "VYUY8_1_5X8" } },
- { MEDIA_BUS_FMT_YUYV8_1_5X8, { 12, "YUYV8_1_5X8" } },
- { MEDIA_BUS_FMT_YVYU8_1_5X8, { 12, "YVYU8_1_5X8" } },
- { MEDIA_BUS_FMT_UYVY8_2X8, { 16, "UYVY8_2X8" } },
- { MEDIA_BUS_FMT_VYUY8_2X8, { 16, "VYUY8_2X8" } },
- { MEDIA_BUS_FMT_YUYV8_2X8, { 16, "YUYV8_2X8" } },
- { MEDIA_BUS_FMT_YVYU8_2X8, { 16, "YVYU8_2X8" } },
- { MEDIA_BUS_FMT_Y10_1X10, { 10, "Y10_1X10" } },
- { MEDIA_BUS_FMT_UYVY10_2X10, { 20, "UYVY10_2X10" } },
- { MEDIA_BUS_FMT_VYUY10_2X10, { 20, "VYUY10_2X10" } },
- { MEDIA_BUS_FMT_YUYV10_2X10, { 20, "YUYV10_2X10" } },
- { MEDIA_BUS_FMT_YVYU10_2X10, { 20, "YVYU10_2X10" } },
- { MEDIA_BUS_FMT_Y12_1X12, { 12, "Y12_1X12" } },
- { MEDIA_BUS_FMT_UYVY8_1X16, { 16, "UYVY8_1X16" } },
- { MEDIA_BUS_FMT_VYUY8_1X16, { 16, "VYUY8_1X16" } },
- { MEDIA_BUS_FMT_YUYV8_1X16, { 16, "YUYV8_1X16" } },
- { MEDIA_BUS_FMT_YVYU8_1X16, { 16, "YVYU8_1X16" } },
- { MEDIA_BUS_FMT_YDYUYDYV8_1X16, { 16, "YDYUYDYV8_1X16" } },
- { MEDIA_BUS_FMT_UYVY10_1X20, { 20, "UYVY10_1X20" } },
- { MEDIA_BUS_FMT_VYUY10_1X20, { 20, "VYUY10_1X20" } },
- { MEDIA_BUS_FMT_YUYV10_1X20, { 20, "YUYV10_1X20" } },
- { MEDIA_BUS_FMT_YVYU10_1X20, { 20, "YVYU10_1X20" } },
- { MEDIA_BUS_FMT_YUV8_1X24, { 24, "YUV8_1X24" } },
- { MEDIA_BUS_FMT_YUV10_1X30, { 30, "YUV10_1X30" } },
- { MEDIA_BUS_FMT_AYUV8_1X32, { 32, "AYUV8_1X32" } },
- { MEDIA_BUS_FMT_UYVY12_2X12, { 24, "UYVY12_2X12" } },
- { MEDIA_BUS_FMT_VYUY12_2X12, { 24, "VYUY12_2X12" } },
- { MEDIA_BUS_FMT_YUYV12_2X12, { 24, "YUYV12_2X12" } },
- { MEDIA_BUS_FMT_YVYU12_2X12, { 24, "YVYU12_2X12" } },
- { MEDIA_BUS_FMT_UYVY12_1X24, { 24, "UYVY12_1X24" } },
- { MEDIA_BUS_FMT_VYUY12_1X24, { 24, "VYUY12_1X24" } },
- { MEDIA_BUS_FMT_YUYV12_1X24, { 24, "YUYV12_1X24" } },
- { MEDIA_BUS_FMT_YVYU12_1X24, { 24, "YVYU12_1X24" } },
- { MEDIA_BUS_FMT_SBGGR8_1X8, { 8, "SBGGR8_1X8" } },
- { MEDIA_BUS_FMT_SGBRG8_1X8, { 8, "SGBRG8_1X8" } },
- { MEDIA_BUS_FMT_SGRBG8_1X8, { 8, "SGRBG8_1X8" } },
- { MEDIA_BUS_FMT_SRGGB8_1X8, { 8, "SRGGB8_1X8" } },
- { MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8, { 8, "SBGGR10_ALAW8_1X8" } },
- { MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8, { 8, "SGBRG10_ALAW8_1X8" } },
- { MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8, { 8, "SGRBG10_ALAW8_1X8" } },
- { MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8, { 8, "SRGGB10_ALAW8_1X8" } },
- { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, { 8, "SBGGR10_DPCM8_1X8" } },
- { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, { 8, "SGBRG10_DPCM8_1X8" } },
- { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, { 8, "SGRBG10_DPCM8_1X8" } },
- { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, { 8, "SRGGB10_DPCM8_1X8" } },
- { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, { 16, "SBGGR10_2X8_PADHI_BE" } },
- { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, { 16, "SBGGR10_2X8_PADHI_LE" } },
- { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, { 16, "SBGGR10_2X8_PADLO_BE" } },
- { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, { 16, "SBGGR10_2X8_PADLO_LE" } },
- { MEDIA_BUS_FMT_SBGGR10_1X10, { 10, "SBGGR10_1X10" } },
- { MEDIA_BUS_FMT_SGBRG10_1X10, { 10, "SGBRG10_1X10" } },
- { MEDIA_BUS_FMT_SGRBG10_1X10, { 10, "SGRBG10_1X10" } },
- { MEDIA_BUS_FMT_SRGGB10_1X10, { 10, "SRGGB10_1X10" } },
- { MEDIA_BUS_FMT_SBGGR12_1X12, { 12, "SBGGR12_1X12" } },
- { MEDIA_BUS_FMT_SGBRG12_1X12, { 12, "SGBRG12_1X12" } },
- { MEDIA_BUS_FMT_SGRBG12_1X12, { 12, "SGRBG12_1X12" } },
- { MEDIA_BUS_FMT_SRGGB12_1X12, { 12, "SRGGB12_1X12" } },
- { MEDIA_BUS_FMT_AHSV8888_1X32, { 32, "AHSV8888_1X32" } },
+/**
+ * \enum MediaBusFormatInfo::Type
+ * \brief The format type
+ *
+ * \var MediaBusFormatInfo::Type::Image
+ * \brief The format describes image data
+ *
+ * \var MediaBusFormatInfo::Type::Metadata
+ * \brief The format describes generic metadata
+ *
+ * \var MediaBusFormatInfo::Type::EmbeddedData
+ * \brief The format describes sensor embedded data
+ */
+
+namespace {
+
+const std::map<uint32_t, MediaBusFormatInfo> mediaBusFormatInfo{
+ /* This table is sorted to match the order in linux/media-bus-format.h */
+ { MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE, {
+ .name = "RGB444_2X8_PADHI_BE",
+ .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE, {
+ .name = "RGB444_2X8_PADHI_LE",
+ .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, {
+ .name = "RGB555_2X8_PADHI_BE",
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, {
+ .name = "RGB555_2X8_PADHI_LE",
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB565_1X16, {
+ .name = "RGB565_1X16",
+ .code = MEDIA_BUS_FMT_RGB565_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_BGR565_2X8_BE, {
+ .name = "BGR565_2X8_BE",
+ .code = MEDIA_BUS_FMT_BGR565_2X8_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_BGR565_2X8_LE, {
+ .name = "BGR565_2X8_LE",
+ .code = MEDIA_BUS_FMT_BGR565_2X8_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB565_2X8_BE, {
+ .name = "RGB565_2X8_BE",
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB565_2X8_LE, {
+ .name = "RGB565_2X8_LE",
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB666_1X18, {
+ .name = "RGB666_1X18",
+ .code = MEDIA_BUS_FMT_RGB666_1X18,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 18,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_BGR888_1X24, {
+ .name = "BGR888_1X24",
+ .code = MEDIA_BUS_FMT_BGR888_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB888_1X24, {
+ .name = "RGB888_1X24",
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB888_2X12_BE, {
+ .name = "RGB888_2X12_BE",
+ .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB888_2X12_LE, {
+ .name = "RGB888_2X12_LE",
+ .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_ARGB8888_1X32, {
+ .name = "ARGB8888_1X32",
+ .code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_Y8_1X8, {
+ .name = "Y8_1X8",
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UV8_1X8, {
+ .name = "UV8_1X8",
+ .code = MEDIA_BUS_FMT_UV8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY8_1_5X8, {
+ .name = "UYVY8_1_5X8",
+ .code = MEDIA_BUS_FMT_UYVY8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY8_1_5X8, {
+ .name = "VYUY8_1_5X8",
+ .code = MEDIA_BUS_FMT_VYUY8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV8_1_5X8, {
+ .name = "YUYV8_1_5X8",
+ .code = MEDIA_BUS_FMT_YUYV8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU8_1_5X8, {
+ .name = "YVYU8_1_5X8",
+ .code = MEDIA_BUS_FMT_YVYU8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY8_2X8, {
+ .name = "UYVY8_2X8",
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, {
+ .name = "VYUY8_2X8",
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, {
+ .name = "YUYV8_2X8",
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, {
+ .name = "YVYU8_2X8",
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_Y10_1X10, {
+ .name = "Y10_1X10",
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY10_2X10, {
+ .name = "UYVY10_2X10",
+ .code = MEDIA_BUS_FMT_UYVY10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY10_2X10, {
+ .name = "VYUY10_2X10",
+ .code = MEDIA_BUS_FMT_VYUY10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV10_2X10, {
+ .name = "YUYV10_2X10",
+ .code = MEDIA_BUS_FMT_YUYV10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU10_2X10, {
+ .name = "YVYU10_2X10",
+ .code = MEDIA_BUS_FMT_YVYU10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_Y12_1X12, {
+ .name = "Y12_1X12",
+ .code = MEDIA_BUS_FMT_Y12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_Y16_1X16, {
+ .name = "Y16_1X16",
+ .code = MEDIA_BUS_FMT_Y16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY8_1X16, {
+ .name = "UYVY8_1X16",
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY8_1X16, {
+ .name = "VYUY8_1X16",
+ .code = MEDIA_BUS_FMT_VYUY8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV8_1X16, {
+ .name = "YUYV8_1X16",
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU8_1X16, {
+ .name = "YVYU8_1X16",
+ .code = MEDIA_BUS_FMT_YVYU8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YDYUYDYV8_1X16, {
+ .name = "YDYUYDYV8_1X16",
+ .code = MEDIA_BUS_FMT_YDYUYDYV8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY10_1X20, {
+ .name = "UYVY10_1X20",
+ .code = MEDIA_BUS_FMT_UYVY10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY10_1X20, {
+ .name = "VYUY10_1X20",
+ .code = MEDIA_BUS_FMT_VYUY10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV10_1X20, {
+ .name = "YUYV10_1X20",
+ .code = MEDIA_BUS_FMT_YUYV10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU10_1X20, {
+ .name = "YVYU10_1X20",
+ .code = MEDIA_BUS_FMT_YVYU10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUV8_1X24, {
+ .name = "YUV8_1X24",
+ .code = MEDIA_BUS_FMT_YUV8_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUV10_1X30, {
+ .name = "YUV10_1X30",
+ .code = MEDIA_BUS_FMT_YUV10_1X30,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 30,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_AYUV8_1X32, {
+ .name = "AYUV8_1X32",
+ .code = MEDIA_BUS_FMT_AYUV8_1X32,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY12_2X12, {
+ .name = "UYVY12_2X12",
+ .code = MEDIA_BUS_FMT_UYVY12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY12_2X12, {
+ .name = "VYUY12_2X12",
+ .code = MEDIA_BUS_FMT_VYUY12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV12_2X12, {
+ .name = "YUYV12_2X12",
+ .code = MEDIA_BUS_FMT_YUYV12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU12_2X12, {
+ .name = "YVYU12_2X12",
+ .code = MEDIA_BUS_FMT_YVYU12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY12_1X24, {
+ .name = "UYVY12_1X24",
+ .code = MEDIA_BUS_FMT_UYVY12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY12_1X24, {
+ .name = "VYUY12_1X24",
+ .code = MEDIA_BUS_FMT_VYUY12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV12_1X24, {
+ .name = "YUYV12_1X24",
+ .code = MEDIA_BUS_FMT_YUYV12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU12_1X24, {
+ .name = "YVYU12_1X24",
+ .code = MEDIA_BUS_FMT_YVYU12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, {
+ .name = "SBGGR8_1X8",
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, {
+ .name = "SGBRG8_1X8",
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, {
+ .name = "SGRBG8_1X8",
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, {
+ .name = "SRGGB8_1X8",
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8, {
+ .name = "SBGGR10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8, {
+ .name = "SGBRG10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8, {
+ .name = "SGRBG10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8, {
+ .name = "SRGGB10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, {
+ .name = "SBGGR10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, {
+ .name = "SGBRG10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, {
+ .name = "SGRBG10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, {
+ .name = "SRGGB10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, {
+ .name = "SBGGR10_2X8_PADHI_BE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, {
+ .name = "SBGGR10_2X8_PADHI_LE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, {
+ .name = "SBGGR10_2X8_PADLO_BE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, {
+ .name = "SBGGR10_2X8_PADLO_LE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, {
+ .name = "SBGGR10_1X10",
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, {
+ .name = "SGBRG10_1X10",
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, {
+ .name = "SGRBG10_1X10",
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, {
+ .name = "SRGGB10_1X10",
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, {
+ .name = "SBGGR12_1X12",
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, {
+ .name = "SGBRG12_1X12",
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, {
+ .name = "SGRBG12_1X12",
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, {
+ .name = "SRGGB12_1X12",
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, {
+ .name = "SBGGR14_1X14",
+ .code = MEDIA_BUS_FMT_SBGGR14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, {
+ .name = "SGBRG14_1X14",
+ .code = MEDIA_BUS_FMT_SGBRG14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, {
+ .name = "SGRBG14_1X14",
+ .code = MEDIA_BUS_FMT_SGRBG14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, {
+ .name = "SRGGB14_1X14",
+ .code = MEDIA_BUS_FMT_SRGGB14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR16_1X16, {
+ .name = "SBGGR16_1X16",
+ .code = MEDIA_BUS_FMT_SBGGR16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGBRG16_1X16, {
+ .name = "SGBRG16_1X16",
+ .code = MEDIA_BUS_FMT_SGBRG16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGRBG16_1X16, {
+ .name = "SGRBG16_1X16",
+ .code = MEDIA_BUS_FMT_SGRBG16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SRGGB16_1X16, {
+ .name = "SRGGB16_1X16",
+ .code = MEDIA_BUS_FMT_SRGGB16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ /* \todo Clarify colour encoding for HSV formats */
+ { MEDIA_BUS_FMT_AHSV8888_1X32, {
+ .name = "AHSV8888_1X32",
+ .code = MEDIA_BUS_FMT_AHSV8888_1X32,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_JPEG_1X8, {
+ .name = "JPEG_1X8",
+ .code = MEDIA_BUS_FMT_JPEG_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_METADATA_FIXED, {
+ .name = "METADATA_FIXED",
+ .code = MEDIA_BUS_FMT_METADATA_FIXED,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
};
} /* namespace */
/**
+ * \fn bool MediaBusFormatInfo::isValid() const
+ * \brief Check if the media bus format info is valid
+ * \return True if the media bus format info is valid, false otherwise
+ */
+
+/**
+ * \brief Retrieve information about a media bus format
+ * \param[in] code The media bus format code
+ * \return The MediaBusFormatInfo describing the \a code if known, or an invalid
+ * MediaBusFormatInfo otherwise
+ */
+const MediaBusFormatInfo &MediaBusFormatInfo::info(uint32_t code)
+{
+ static const MediaBusFormatInfo invalid{};
+
+ const auto it = mediaBusFormatInfo.find(code);
+ if (it == mediaBusFormatInfo.end()) {
+ LOG(V4L2, Warning)
+ << "Unsupported media bus format "
+ << utils::hex(code, 4);
+ return invalid;
+ }
+
+ return it->second;
+}
+
+/**
+ * \struct V4L2SubdeviceCapability
+ * \brief struct v4l2_subdev_capability object wrapper and helpers
+ *
+ * The V4L2SubdeviceCapability structure manages the information returned by the
+ * VIDIOC_SUBDEV_QUERYCAP ioctl.
+ */
+
+/**
+ * \fn V4L2SubdeviceCapability::isReadOnly()
+ * \brief Retrieve if a subdevice is registered as read-only
+ *
+ * A V4L2 subdevice is registered as read-only if V4L2_SUBDEV_CAP_RO_SUBDEV
+ * is listed as part of its capabilities.
+ *
+ * \return True if the subdevice is registered as read-only, false otherwise
+ */
+
+/**
+ * \fn V4L2SubdeviceCapability::hasStreams()
+ * \brief Retrieve if a subdevice supports the V4L2 streams API
+ * \return True if the subdevice supports the streams API, false otherwise
+ */
+
+/**
* \struct V4L2SubdeviceFormat
* \brief The V4L2 sub-device image format and sizes
*
@@ -162,7 +785,7 @@ const std::map<uint32_t, V4L2SubdeviceFormatInfo> formatInfoMap = {
*/
/**
- * \var V4L2SubdeviceFormat::mbus_code
+ * \var V4L2SubdeviceFormat::code
* \brief The image format bus code
*/
@@ -199,23 +822,6 @@ const std::string V4L2SubdeviceFormat::toString() const
}
/**
- * \brief Retrieve the number of bits per pixel for the V4L2 subdevice format
- * \return The number of bits per pixel for the format, or 0 if the format is
- * not supported
- */
-uint8_t V4L2SubdeviceFormat::bitsPerPixel() const
-{
- const auto it = formatInfoMap.find(mbus_code);
- if (it == formatInfoMap.end()) {
- LOG(V4L2, Error) << "No information available for format '"
- << *this << "'";
- return 0;
- }
-
- return it->second.bitsPerPixel;
-}
-
-/**
* \brief Insert a text representation of a V4L2SubdeviceFormat into an output
* stream
* \param[in] out The output stream
@@ -226,10 +832,10 @@ std::ostream &operator<<(std::ostream &out, const V4L2SubdeviceFormat &f)
{
out << f.size << "-";
- const auto it = formatInfoMap.find(f.mbus_code);
+ const auto it = mediaBusFormatInfo.find(f.code);
- if (it == formatInfoMap.end())
- out << utils::hex(f.mbus_code, 4);
+ if (it == mediaBusFormatInfo.end())
+ out << utils::hex(f.code, 4);
else
out << it->second.name;
@@ -265,6 +871,134 @@ std::ostream &operator<<(std::ostream &out, const V4L2SubdeviceFormat &f)
*/
/**
+ * \class V4L2Subdevice::Stream
+ * \brief V4L2 subdevice stream
+ *
+ * This class identifies a subdev stream, by bundling the pad number with the
+ * stream number. It is used in all stream-aware functions of the V4L2Subdevice
+ * class to identify the stream the functions operate on.
+ *
+ * \var V4L2Subdevice::Stream::pad
+ * \brief The 0-indexed pad number
+ *
+ * \var V4L2Subdevice::Stream::stream
+ * \brief The stream number
+ */
+
+/**
+ * \fn V4L2Subdevice::Stream::Stream()
+ * \brief Construct a Stream with pad and stream set to 0
+ */
+
+/**
+ * \fn V4L2Subdevice::Stream::Stream(unsigned int pad, unsigned int stream)
+ * \brief Construct a Stream with a given \a pad and \a stream number
+ * \param[in] pad The indexed pad number
+ * \param[in] stream The stream number
+ */
+
+/**
+ * \brief Compare streams for equality
+ * \return True if the two streams are equal, false otherwise
+ */
+bool operator==(const V4L2Subdevice::Stream &lhs, const V4L2Subdevice::Stream &rhs)
+{
+ return lhs.pad == rhs.pad && lhs.stream == rhs.stream;
+}
+
+/**
+ * \fn bool operator!=(const V4L2Subdevice::Stream &lhs, const V4L2Subdevice::Stream &rhs)
+ * \brief Compare streams for inequality
+ * \return True if the two streams are not equal, false otherwise
+ */
+
+/**
+ * \brief Insert a text representation of a V4L2Subdevice::Stream into an
+ * output stream
+ * \param[in] out The output stream
+ * \param[in] stream The V4L2Subdevice::Stream
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Stream &stream)
+{
+ out << stream.pad << "/" << stream.stream;
+
+ return out;
+}
+
+/**
+ * \class V4L2Subdevice::Route
+ * \brief V4L2 subdevice routing table entry
+ *
+ * This class models a route in the subdevice routing table. It is similar to
+ * the v4l2_subdev_route structure, but uses the V4L2Subdevice::Stream class
+ * for easier usage with the V4L2Subdevice stream-aware functions.
+ *
+ * \var V4L2Subdevice::Route::sink
+ * \brief The sink stream of the route
+ *
+ * \var V4L2Subdevice::Route::source
+ * \brief The source stream of the route
+ *
+ * \var V4L2Subdevice::Route::flags
+ * \brief The route flags (V4L2_SUBDEV_ROUTE_FL_*)
+ */
+
+/**
+ * \fn V4L2Subdevice::Route::Route()
+ * \brief Construct a Route with default streams
+ */
+
+/**
+ * \fn V4L2Subdevice::Route::Route(const Stream &sink, const Stream &source,
+ * uint32_t flags)
+ * \brief Construct a Route from \a sink to \a source
+ * \param[in] sink The sink stream
+ * \param[in] source The source stream
+ * \param[in] flags The route flags
+ */
+
+/**
+ * \brief Insert a text representation of a V4L2Subdevice::Route into an
+ * output stream
+ * \param[in] out The output stream
+ * \param[in] route The V4L2Subdevice::Route
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Route &route)
+{
+ out << route.sink << " -> " << route.source
+ << " (" << utils::hex(route.flags) << ")";
+
+ return out;
+}
+
+/**
+ * \typedef V4L2Subdevice::Routing
+ * \brief V4L2 subdevice routing table
+ *
+ * This class stores a subdevice routing table as a vector of routes.
+ */
+
+/**
+ * \brief Insert a text representation of a V4L2Subdevice::Routing into an
+ * output stream
+ * \param[in] out The output stream
+ * \param[in] routing The V4L2Subdevice::Routing
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Routing &routing)
+{
+ for (const auto &[i, route] : utils::enumerate(routing)) {
+ out << "[" << i << "] " << route;
+ if (i != routing.size() - 1)
+ out << ", ";
+ }
+
+ return out;
+}
+
+/**
* \brief Create a V4L2 subdevice from a MediaEntity using its device node
* path
*/
@@ -284,7 +1018,40 @@ V4L2Subdevice::~V4L2Subdevice()
*/
int V4L2Subdevice::open()
{
- return V4L2Device::open(O_RDWR);
+ int ret = V4L2Device::open(O_RDWR);
+ if (ret)
+ return ret;
+
+ /*
+ * Try to query the subdev capabilities. The VIDIOC_SUBDEV_QUERYCAP API
+ * was introduced in kernel v5.8, ENOTTY errors must be ignored to
+ * support older kernels.
+ */
+ caps_ = {};
+ ret = ioctl(VIDIOC_SUBDEV_QUERYCAP, &caps_);
+ if (ret < 0 && errno != ENOTTY) {
+ ret = -errno;
+ LOG(V4L2, Error)
+ << "Unable to query capabilities: " << strerror(-ret);
+ return ret;
+ }
+
+ /* If the subdev supports streams, enable the streams API. */
+ if (caps_.hasStreams()) {
+ struct v4l2_subdev_client_capability clientCaps{};
+ clientCaps.capabilities = V4L2_SUBDEV_CLIENT_CAP_STREAMS;
+
+ ret = ioctl(VIDIOC_SUBDEV_S_CLIENT_CAP, &clientCaps);
+ if (ret < 0) {
+ ret = -errno;
+ LOG(V4L2, Error)
+ << "Unable to set client capabilities: "
+ << strerror(-ret);
+ return ret;
+ }
+ }
+
+ return 0;
}
/**
@@ -295,7 +1062,7 @@ int V4L2Subdevice::open()
/**
* \brief Get selection rectangle \a rect for \a target
- * \param[in] pad The 0-indexed pad number the rectangle is retrieved from
+ * \param[in] stream The stream the rectangle is retrieved from
* \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
* \param[out] rect The retrieved selection rectangle
*
@@ -303,13 +1070,14 @@ int V4L2Subdevice::open()
*
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
+int V4L2Subdevice::getSelection(const Stream &stream, unsigned int target,
Rectangle *rect)
{
struct v4l2_subdev_selection sel = {};
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- sel.pad = pad;
+ sel.pad = stream.pad;
+ sel.stream = stream.stream;
sel.target = target;
sel.flags = 0;
@@ -317,7 +1085,7 @@ int V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
if (ret < 0) {
LOG(V4L2, Error)
<< "Unable to get rectangle " << target << " on pad "
- << pad << ": " << strerror(-ret);
+ << stream << ": " << strerror(-ret);
return ret;
}
@@ -330,8 +1098,19 @@ int V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
}
/**
+ * \fn V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
+ * Rectangle *rect)
+ * \brief Get selection rectangle \a rect for \a target
+ * \param[in] pad The 0-indexed pad number the rectangle is retrieved from
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[out] rect The retrieved selection rectangle
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
* \brief Set selection rectangle \a rect for \a target
- * \param[in] pad The 0-indexed pad number the rectangle is to be applied to
+ * \param[in] stream The stream the rectangle is to be applied to
* \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
* \param[inout] rect The selection rectangle to be applied
*
@@ -339,13 +1118,14 @@ int V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
*
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::setSelection(unsigned int pad, unsigned int target,
+int V4L2Subdevice::setSelection(const Stream &stream, unsigned int target,
Rectangle *rect)
{
struct v4l2_subdev_selection sel = {};
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- sel.pad = pad;
+ sel.pad = stream.pad;
+ sel.stream = stream.stream;
sel.target = target;
sel.flags = 0;
@@ -358,7 +1138,7 @@ int V4L2Subdevice::setSelection(unsigned int pad, unsigned int target,
if (ret < 0) {
LOG(V4L2, Error)
<< "Unable to set rectangle " << target << " on pad "
- << pad << ": " << strerror(-ret);
+ << stream << ": " << strerror(-ret);
return ret;
}
@@ -369,26 +1149,40 @@ int V4L2Subdevice::setSelection(unsigned int pad, unsigned int target,
return 0;
}
+
/**
- * \brief Enumerate all media bus codes and frame sizes on a \a pad
- * \param[in] pad The 0-indexed pad number to enumerate formats on
+ * \fn V4L2Subdevice::setSelection(unsigned int pad, unsigned int target,
+ * Rectangle *rect)
+ * \brief Set selection rectangle \a rect for \a target
+ * \param[in] pad The 0-indexed pad number the rectangle is to be applied to
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[inout] rect The selection rectangle to be applied
+ *
+ * \todo Define a V4L2SelectionTarget enum for the selection target
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \brief Enumerate all media bus codes and frame sizes on a \a stream
+ * \param[in] stream The stream to enumerate formats for
*
* Enumerate all media bus codes and frame sizes supported by the subdevice on
- * a \a pad.
+ * a \a stream.
*
* \return A list of the supported device formats
*/
-V4L2Subdevice::Formats V4L2Subdevice::formats(unsigned int pad)
+V4L2Subdevice::Formats V4L2Subdevice::formats(const Stream &stream)
{
Formats formats;
- if (pad >= entity_->pads().size()) {
- LOG(V4L2, Error) << "Invalid pad: " << pad;
+ if (stream.pad >= entity_->pads().size()) {
+ LOG(V4L2, Error) << "Invalid pad: " << stream.pad;
return {};
}
- for (unsigned int code : enumPadCodes(pad)) {
- std::vector<SizeRange> sizes = enumPadSizes(pad, code);
+ for (unsigned int code : enumPadCodes(stream)) {
+ std::vector<SizeRange> sizes = enumPadSizes(stream, code);
if (sizes.empty())
return {};
@@ -396,7 +1190,7 @@ V4L2Subdevice::Formats V4L2Subdevice::formats(unsigned int pad)
if (!inserted.second) {
LOG(V4L2, Error)
<< "Could not add sizes for media bus code "
- << code << " on pad " << pad;
+ << code << " on pad " << stream.pad;
return {};
}
}
@@ -405,79 +1199,273 @@ V4L2Subdevice::Formats V4L2Subdevice::formats(unsigned int pad)
}
/**
- * \brief Retrieve the image format set on one of the V4L2 subdevice pads
- * \param[in] pad The 0-indexed pad number the format is to be retrieved from
+ * \fn V4L2Subdevice::formats(unsigned int pad)
+ * \brief Enumerate all media bus codes and frame sizes on a \a pad
+ * \param[in] pad The 0-indexed pad number to enumerate formats on
+ *
+ * Enumerate all media bus codes and frame sizes supported by the subdevice on
+ * a \a pad
+ *
+ * \return A list of the supported device formats
+ */
+
+std::optional<ColorSpace> V4L2Subdevice::toColorSpace(const v4l2_mbus_framefmt &format) const
+{
+ /*
+ * Only image formats have a color space, for other formats (such as
+ * metadata formats) the color space concept isn't applicable. V4L2
+ * subdev drivers return a colorspace set to V4L2_COLORSPACE_DEFAULT in
+ * that case (as well as for image formats when the driver hasn't
+ * bothered implementing color space support). Check the colorspace
+ * field here and return std::nullopt directly to avoid logging a
+ * warning.
+ */
+ if (format.colorspace == V4L2_COLORSPACE_DEFAULT)
+ return std::nullopt;
+
+ PixelFormatInfo::ColourEncoding colourEncoding;
+ const MediaBusFormatInfo &info = MediaBusFormatInfo::info(format.code);
+ if (info.isValid()) {
+ colourEncoding = info.colourEncoding;
+ } else {
+ LOG(V4L2, Warning)
+ << "Unknown subdev format "
+ << utils::hex(format.code, 4)
+ << ", defaulting to RGB encoding";
+
+ colourEncoding = PixelFormatInfo::ColourEncodingRGB;
+ }
+
+ return V4L2Device::toColorSpace(format, colourEncoding);
+}
+
+/**
+ * \brief Retrieve the image format set on one of the V4L2 subdevice streams
+ * \param[in] stream The stream the format is to be retrieved from
* \param[out] format The image bus format
* \param[in] whence The format to get, \ref V4L2Subdevice::ActiveFormat
* "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::getFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+int V4L2Subdevice::getFormat(const Stream &stream, V4L2SubdeviceFormat *format,
Whence whence)
{
struct v4l2_subdev_format subdevFmt = {};
- subdevFmt.which = whence == ActiveFormat ? V4L2_SUBDEV_FORMAT_ACTIVE
- : V4L2_SUBDEV_FORMAT_TRY;
- subdevFmt.pad = pad;
+ subdevFmt.which = whence;
+ subdevFmt.pad = stream.pad;
+ subdevFmt.stream = stream.stream;
int ret = ioctl(VIDIOC_SUBDEV_G_FMT, &subdevFmt);
if (ret) {
LOG(V4L2, Error)
- << "Unable to get format on pad " << pad
- << ": " << strerror(-ret);
+ << "Unable to get format on pad " << stream << ": "
+ << strerror(-ret);
return ret;
}
format->size.width = subdevFmt.format.width;
format->size.height = subdevFmt.format.height;
- format->mbus_code = subdevFmt.format.code;
+ format->code = subdevFmt.format.code;
format->colorSpace = toColorSpace(subdevFmt.format);
return 0;
}
/**
+ * \fn V4L2Subdevice::getFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+ * Whence whence)
+ * \brief Retrieve the image format set on one of the V4L2 subdevice pads
+ * \param[in] pad The 0-indexed pad number the format is to be retrieved from
+ * \param[out] format The image bus format
+ * \param[in] whence The format to get, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
* \brief Set an image format on one of the V4L2 subdevice pads
- * \param[in] pad The 0-indexed pad number the format is to be applied to
- * \param[inout] format The image bus format to apply to the subdevice's pad
+ * \param[in] stream The stream the format is to be applied to
+ * \param[inout] format The image bus format to apply to the stream
* \param[in] whence The format to set, \ref V4L2Subdevice::ActiveFormat
* "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
*
- * Apply the requested image format to the desired media pad and return the
+ * Apply the requested image format to the desired stream and return the
* actually applied format parameters, as getFormat() would do.
*
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::setFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+int V4L2Subdevice::setFormat(const Stream &stream, V4L2SubdeviceFormat *format,
Whence whence)
{
struct v4l2_subdev_format subdevFmt = {};
- subdevFmt.which = whence == ActiveFormat ? V4L2_SUBDEV_FORMAT_ACTIVE
- : V4L2_SUBDEV_FORMAT_TRY;
- subdevFmt.pad = pad;
+ subdevFmt.which = whence;
+ subdevFmt.pad = stream.pad;
+ subdevFmt.stream = stream.stream;
subdevFmt.format.width = format->size.width;
subdevFmt.format.height = format->size.height;
- subdevFmt.format.code = format->mbus_code;
+ subdevFmt.format.code = format->code;
subdevFmt.format.field = V4L2_FIELD_NONE;
- fromColorSpace(format->colorSpace, subdevFmt.format);
+ if (format->colorSpace) {
+ fromColorSpace(format->colorSpace, subdevFmt.format);
+
+ /* The CSC flag is only applicable to source pads. */
+ if (entity_->pads()[stream.pad]->flags() & MEDIA_PAD_FL_SOURCE)
+ subdevFmt.format.flags |= V4L2_MBUS_FRAMEFMT_SET_CSC;
+ }
int ret = ioctl(VIDIOC_SUBDEV_S_FMT, &subdevFmt);
if (ret) {
LOG(V4L2, Error)
- << "Unable to set format on pad " << pad
- << ": " << strerror(-ret);
+ << "Unable to set format on pad " << stream << ": "
+ << strerror(-ret);
return ret;
}
format->size.width = subdevFmt.format.width;
format->size.height = subdevFmt.format.height;
- format->mbus_code = subdevFmt.format.code;
+ format->code = subdevFmt.format.code;
format->colorSpace = toColorSpace(subdevFmt.format);
return 0;
}
/**
+ * \fn V4L2Subdevice::setFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+ * Whence whence)
+ * \brief Set an image format on one of the V4L2 subdevice pads
+ * \param[in] pad The 0-indexed pad number the format is to be applied to
+ * \param[inout] format The image bus format to apply to the subdevice's pad
+ * \param[in] whence The format to set, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ *
+ * Apply the requested image format to the desired media pad and return the
+ * actually applied format parameters, as getFormat() would do.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+namespace {
+
+void routeFromKernel(V4L2Subdevice::Route &route,
+ const struct v4l2_subdev_route &kroute)
+{
+ route.sink.pad = kroute.sink_pad;
+ route.sink.stream = kroute.sink_stream;
+ route.source.pad = kroute.source_pad;
+ route.source.stream = kroute.source_stream;
+ route.flags = kroute.flags;
+}
+
+void routeToKernel(const V4L2Subdevice::Route &route,
+ struct v4l2_subdev_route &kroute)
+{
+ kroute.sink_pad = route.sink.pad;
+ kroute.sink_stream = route.sink.stream;
+ kroute.source_pad = route.source.pad;
+ kroute.source_stream = route.source.stream;
+ kroute.flags = route.flags;
+}
+
+} /* namespace */
+
+/**
+ * \brief Retrieve the subdevice's internal routing table
+ * \param[out] routing The routing table
+ * \param[in] whence The routing table to get, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int V4L2Subdevice::getRouting(Routing *routing, Whence whence)
+{
+ routing->clear();
+
+ if (!caps_.hasStreams())
+ return 0;
+
+ struct v4l2_subdev_routing rt = {};
+
+ rt.which = whence;
+
+ int ret = ioctl(VIDIOC_SUBDEV_G_ROUTING, &rt);
+ if (ret == 0 || ret == -ENOTTY)
+ return ret;
+
+ if (ret != -ENOSPC) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve number of routes: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ std::vector<struct v4l2_subdev_route> routes{ rt.num_routes };
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ ret = ioctl(VIDIOC_SUBDEV_G_ROUTING, &rt);
+ if (ret) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve routes: " << strerror(-ret);
+ return ret;
+ }
+
+ if (rt.num_routes != routes.size()) {
+ LOG(V4L2, Error) << "Invalid number of routes";
+ return -EINVAL;
+ }
+
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
+
+ return 0;
+}
+
+/**
+ * \brief Set a routing table on the V4L2 subdevice
+ * \param[inout] routing The routing table
+ * \param[in] whence The routing table to set, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ *
+ * Apply to the V4L2 subdevice the routing table \a routing and update its
+ * content to reflect the actually applied routing table as getRouting() would
+ * do.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int V4L2Subdevice::setRouting(Routing *routing, Whence whence)
+{
+ if (!caps_.hasStreams()) {
+ routing->clear();
+ return 0;
+ }
+
+ std::vector<struct v4l2_subdev_route> routes{ routing->size() };
+
+ for (const auto &[i, route] : utils::enumerate(*routing))
+ routeToKernel(route, routes[i]);
+
+ struct v4l2_subdev_routing rt = {};
+ rt.which = whence;
+ rt.num_routes = routes.size();
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ int ret = ioctl(VIDIOC_SUBDEV_S_ROUTING, &rt);
+ if (ret) {
+ LOG(V4L2, Error) << "Failed to set routes: " << strerror(-ret);
+ return ret;
+ }
+
+ routes.resize(rt.num_routes);
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
+
+ return 0;
+}
+
+/**
* \brief Retrieve the model name of the device
*
* The model name allows identification of the specific device model. This can
@@ -530,6 +1518,12 @@ const std::string &V4L2Subdevice::model()
}
/**
+ * \fn V4L2Subdevice::caps()
+ * \brief Retrieve the subdevice V4L2 capabilities
+ * \return The subdevice V4L2 capabilities
+ */
+
+/**
* \brief Create a new video subdevice instance from \a entity in media device
* \a media
* \param[in] media The media device where the entity is registered
@@ -553,14 +1547,15 @@ std::string V4L2Subdevice::logPrefix() const
return "'" + entity_->name() + "'";
}
-std::vector<unsigned int> V4L2Subdevice::enumPadCodes(unsigned int pad)
+std::vector<unsigned int> V4L2Subdevice::enumPadCodes(const Stream &stream)
{
std::vector<unsigned int> codes;
int ret;
for (unsigned int index = 0; ; index++) {
struct v4l2_subdev_mbus_code_enum mbusEnum = {};
- mbusEnum.pad = pad;
+ mbusEnum.pad = stream.pad;
+ mbusEnum.stream = stream.stream;
mbusEnum.index = index;
mbusEnum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
@@ -573,7 +1568,7 @@ std::vector<unsigned int> V4L2Subdevice::enumPadCodes(unsigned int pad)
if (ret < 0 && ret != -EINVAL) {
LOG(V4L2, Error)
- << "Unable to enumerate formats on pad " << pad
+ << "Unable to enumerate formats on pad " << stream
<< ": " << strerror(-ret);
return {};
}
@@ -581,7 +1576,7 @@ std::vector<unsigned int> V4L2Subdevice::enumPadCodes(unsigned int pad)
return codes;
}
-std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
+std::vector<SizeRange> V4L2Subdevice::enumPadSizes(const Stream &stream,
unsigned int code)
{
std::vector<SizeRange> sizes;
@@ -590,7 +1585,8 @@ std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
for (unsigned int index = 0;; index++) {
struct v4l2_subdev_frame_size_enum sizeEnum = {};
sizeEnum.index = index;
- sizeEnum.pad = pad;
+ sizeEnum.pad = stream.pad;
+ sizeEnum.stream = stream.stream;
sizeEnum.code = code;
sizeEnum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
@@ -604,7 +1600,7 @@ std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
if (ret < 0 && ret != -EINVAL && ret != -ENOTTY) {
LOG(V4L2, Error)
- << "Unable to enumerate sizes on pad " << pad
+ << "Unable to enumerate sizes on pad " << stream
<< ": " << strerror(-ret);
return {};
}
diff --git a/src/libcamera/v4l2_videodevice.cpp b/src/libcamera/v4l2_videodevice.cpp
index 63911339..4947aa3d 100644
--- a/src/libcamera/v4l2_videodevice.cpp
+++ b/src/libcamera/v4l2_videodevice.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_videodevice.cpp - V4L2 Video Device
+ * V4L2 Video Device
*/
#include "libcamera/internal/v4l2_videodevice.h"
@@ -633,13 +633,9 @@ int V4L2VideoDevice::open()
<< "Opened device " << caps_.bus_info() << ": "
<< caps_.driver() << ": " << caps_.card();
- ret = getFormat(&format_);
- if (ret) {
- LOG(V4L2, Error) << "Failed to get format";
+ ret = initFormats();
+ if (ret)
return ret;
- }
-
- formatInfo_ = &PixelFormatInfo::info(format_.fourcc);
return 0;
}
@@ -726,7 +722,24 @@ int V4L2VideoDevice::open(SharedFD handle, enum v4l2_buf_type type)
<< "Opened device " << caps_.bus_info() << ": "
<< caps_.driver() << ": " << caps_.card();
- ret = getFormat(&format_);
+ ret = initFormats();
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int V4L2VideoDevice::initFormats()
+{
+ const std::vector<V4L2PixelFormat> &deviceFormats = enumPixelformats(0);
+ if (deviceFormats.empty()) {
+ LOG(V4L2, Error) << "Failed to initialize device formats";
+ return -EINVAL;
+ }
+
+ pixelFormats_ = { deviceFormats.begin(), deviceFormats.end() };
+
+ int ret = getFormat(&format_);
if (ret) {
LOG(V4L2, Error) << "Failed to get format";
return ret;
@@ -901,6 +914,13 @@ int V4L2VideoDevice::trySetFormatMeta(V4L2DeviceFormat *format, bool set)
return 0;
}
+template<typename T>
+std::optional<ColorSpace> V4L2VideoDevice::toColorSpace(const T &v4l2Format)
+{
+ V4L2PixelFormat fourcc{ v4l2Format.pixelformat };
+ return V4L2Device::toColorSpace(v4l2Format, PixelFormatInfo::info(fourcc).colourEncoding);
+}
+
int V4L2VideoDevice::getFormatMultiplane(V4L2DeviceFormat *format)
{
struct v4l2_format v4l2Format = {};
@@ -940,7 +960,12 @@ int V4L2VideoDevice::trySetFormatMultiplane(V4L2DeviceFormat *format, bool set)
pix->pixelformat = format->fourcc;
pix->num_planes = format->planesCount;
pix->field = V4L2_FIELD_NONE;
- fromColorSpace(format->colorSpace, *pix);
+ if (format->colorSpace) {
+ fromColorSpace(format->colorSpace, *pix);
+
+ if (caps_.isVideoCapture())
+ pix->flags |= V4L2_PIX_FMT_FLAG_SET_CSC;
+ }
ASSERT(pix->num_planes <= std::size(pix->plane_fmt));
@@ -1010,7 +1035,12 @@ int V4L2VideoDevice::trySetFormatSingleplane(V4L2DeviceFormat *format, bool set)
pix->pixelformat = format->fourcc;
pix->bytesperline = format->planes[0].bpl;
pix->field = V4L2_FIELD_NONE;
- fromColorSpace(format->colorSpace, *pix);
+ if (format->colorSpace) {
+ fromColorSpace(format->colorSpace, *pix);
+
+ if (caps_.isVideoCapture())
+ pix->flags |= V4L2_PIX_FMT_FLAG_SET_CSC;
+ }
ret = ioctl(set ? VIDIOC_S_FMT : VIDIOC_TRY_FMT, &v4l2Format);
if (ret) {
@@ -1441,7 +1471,7 @@ UniqueFD V4L2VideoDevice::exportDmabufFd(unsigned int index,
expbuf.type = bufferType_;
expbuf.index = index;
expbuf.plane = plane;
- expbuf.flags = O_RDWR;
+ expbuf.flags = O_CLOEXEC | O_RDWR;
ret = ioctl(VIDIOC_EXPBUF, &expbuf);
if (ret < 0) {
@@ -1503,6 +1533,9 @@ int V4L2VideoDevice::importBuffers(unsigned int count)
*/
int V4L2VideoDevice::releaseBuffers()
{
+ if (!cache_)
+ return 0;
+
LOG(V4L2, Debug) << "Releasing buffers";
delete cache_;
@@ -1593,6 +1626,11 @@ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
if (V4L2_TYPE_IS_OUTPUT(buf.type)) {
const FrameMetadata &metadata = buffer->metadata();
+ for (const auto &plane : metadata.planes()) {
+ if (!plane.bytesused)
+ LOG(V4L2, Warning) << "byteused == 0 is deprecated";
+ }
+
if (numV4l2Planes != planes.size()) {
/*
* If we have a multi-planar buffer with a V4L2
@@ -1761,12 +1799,14 @@ FrameBuffer *V4L2VideoDevice::dequeueBuffer()
watchdog_.start(std::chrono::duration_cast<std::chrono::milliseconds>(watchdogDuration_));
}
- buffer->metadata_.status = buf.flags & V4L2_BUF_FLAG_ERROR
- ? FrameMetadata::FrameError
- : FrameMetadata::FrameSuccess;
- buffer->metadata_.sequence = buf.sequence;
- buffer->metadata_.timestamp = buf.timestamp.tv_sec * 1000000000ULL
- + buf.timestamp.tv_usec * 1000ULL;
+ FrameMetadata &metadata = buffer->_d()->metadata();
+
+ metadata.status = buf.flags & V4L2_BUF_FLAG_ERROR
+ ? FrameMetadata::FrameError
+ : FrameMetadata::FrameSuccess;
+ metadata.sequence = buf.sequence;
+ metadata.timestamp = buf.timestamp.tv_sec * 1000000000ULL
+ + buf.timestamp.tv_usec * 1000ULL;
if (V4L2_TYPE_IS_OUTPUT(buf.type))
return buffer;
@@ -1777,15 +1817,14 @@ FrameBuffer *V4L2VideoDevice::dequeueBuffer()
*/
if (!firstFrame_) {
if (buf.sequence)
- LOG(V4L2, Warning)
+ LOG(V4L2, Info)
<< "Zero sequence expected for first frame (got "
<< buf.sequence << ")";
firstFrame_ = buf.sequence;
}
- buffer->metadata_.sequence -= firstFrame_.value();
+ metadata.sequence -= firstFrame_.value();
unsigned int numV4l2Planes = multiPlanar ? buf.length : 1;
- FrameMetadata &metadata = buffer->metadata_;
if (numV4l2Planes != buffer->planes().size()) {
/*
@@ -1911,9 +1950,10 @@ int V4L2VideoDevice::streamOff()
/* Send back all queued buffers. */
for (auto it : queuedBuffers_) {
FrameBuffer *buffer = it.second;
+ FrameMetadata &metadata = buffer->_d()->metadata();
cache_->put(it.first);
- buffer->metadata_.status = FrameMetadata::FrameCancelled;
+ metadata.status = FrameMetadata::FrameCancelled;
bufferReady.emit(buffer);
}
@@ -1990,6 +2030,40 @@ V4L2VideoDevice::fromEntityName(const MediaDevice *media,
}
/**
+ * \brief Convert \a PixelFormat to a V4L2PixelFormat supported by the device
+ * \param[in] pixelFormat The PixelFormat to convert
+ *
+ * Convert \a pixelformat to a V4L2 FourCC that is known to be supported by
+ * the video device.
+ *
+ * A V4L2VideoDevice may support different V4L2 pixel formats that map the same
+ * PixelFormat. This is the case of the contiguous and non-contiguous variants
+ * of multiplanar formats, and with the V4L2 MJPEG and JPEG pixel formats.
+ * Converting a PixelFormat to a V4L2PixelFormat may thus have multiple answers.
+ *
+ * This function converts the \a pixelFormat using the list of V4L2 pixel
+ * formats that the V4L2VideoDevice supports. This guarantees that the returned
+ * V4L2PixelFormat will be valid for the device. If multiple matches are still
+ * possible, contiguous variants are preferred. If the \a pixelFormat is not
+ * supported by the device, the function returns an invalid V4L2PixelFormat.
+ *
+ * \return The V4L2PixelFormat corresponding to \a pixelFormat if supported by
+ * the device, or an invalid V4L2PixelFormat otherwise
+ */
+V4L2PixelFormat V4L2VideoDevice::toV4L2PixelFormat(const PixelFormat &pixelFormat) const
+{
+ const std::vector<V4L2PixelFormat> &v4l2PixelFormats =
+ V4L2PixelFormat::fromPixelFormat(pixelFormat);
+
+ for (const V4L2PixelFormat &v4l2Format : v4l2PixelFormats) {
+ if (pixelFormats_.count(v4l2Format))
+ return v4l2Format;
+ }
+
+ return {};
+}
+
+/**
* \class V4L2M2MDevice
* \brief Memory-to-Memory video device
*
diff --git a/src/libcamera/version.cpp.in b/src/libcamera/version.cpp.in
index 5aec08a1..bf5a2c30 100644
--- a/src/libcamera/version.cpp.in
+++ b/src/libcamera/version.cpp.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * version.cpp - libcamera version
+ * libcamera version
*
* This file is auto-generated. Do not edit.
*/
diff --git a/src/libcamera/yaml_parser.cpp b/src/libcamera/yaml_parser.cpp
index 5c45e44e..55f81916 100644
--- a/src/libcamera/yaml_parser.cpp
+++ b/src/libcamera/yaml_parser.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Google Inc.
*
- * yaml_parser.cpp - libcamera YAML parsing helper
+ * libcamera YAML parsing helper
*/
#include "libcamera/internal/yaml_parser.h"
@@ -31,12 +31,6 @@ namespace {
/* Empty static YamlObject as a safe result for invalid operations */
static const YamlObject empty;
-void setOk(bool *ok, bool result)
-{
- if (ok)
- *ok = result;
-}
-
} /* namespace */
/**
@@ -91,7 +85,6 @@ std::size_t YamlObject::size() const
{
switch (type_) {
case Type::Dictionary:
- return dictionary_.size();
case Type::List:
return list_.size();
default:
@@ -100,232 +93,293 @@ std::size_t YamlObject::size() const
}
/**
- * \fn template<typename T> YamlObject::get<T>(
- * const T &defaultValue, bool *ok) const
+ * \fn template<typename T> YamlObject::get<T>() const
* \brief Parse the YamlObject as a \a T value
- * \param[in] defaultValue The default value when failing to parse
- * \param[out] ok The result of whether the parse succeeded
*
* This function parses the value of the YamlObject as a \a T object, and
* returns the value. If parsing fails (usually because the YamlObject doesn't
- * store a \a T value), the \a defaultValue is returned, and \a ok is set to
- * false. Otherwise, the YamlObject value is returned, and \a ok is set to true.
+ * store a \a T value), std::nullopt is returned.
+ *
+ * \return The YamlObject value, or std::nullopt if parsing failed
+ */
+
+/**
+ * \fn template<typename T> YamlObject::get<T>(const T &defaultValue) const
+ * \brief Parse the YamlObject as a \a T value
+ * \param[in] defaultValue The default value when failing to parse
*
- * The \a ok pointer is optional and can be a nullptr if the caller doesn't
- * need to know if parsing succeeded.
+ * This function parses the value of the YamlObject as a \a T object, and
+ * returns the value. If parsing fails (usually because the YamlObject doesn't
+ * store a \a T value), the \a defaultValue is returned.
*
- * \return Value as a bool type
+ * \return The YamlObject value, or \a defaultValue if parsing failed
*/
#ifndef __DOXYGEN__
template<>
-bool YamlObject::get(const bool &defaultValue, bool *ok) const
+std::optional<bool> YamlObject::get() const
{
- setOk(ok, false);
-
if (type_ != Type::Value)
- return defaultValue;
+ return std::nullopt;
- if (value_ == "true") {
- setOk(ok, true);
+ if (value_ == "true")
return true;
- } else if (value_ == "false") {
- setOk(ok, true);
+ else if (value_ == "false")
return false;
- }
- return defaultValue;
+ return std::nullopt;
}
-template<>
-int16_t YamlObject::get(const int16_t &defaultValue, bool *ok) const
-{
- setOk(ok, false);
-
- if (type_ != Type::Value)
- return defaultValue;
+namespace {
- if (value_ == "")
- return defaultValue;
+bool parseSignedInteger(const std::string &str, long min, long max,
+ long *result)
+{
+ if (str == "")
+ return false;
char *end;
errno = 0;
- int16_t value = std::strtol(value_.c_str(), &end, 10);
+ long value = std::strtol(str.c_str(), &end, 10);
- if ('\0' != *end || errno == ERANGE ||
- value < std::numeric_limits<int16_t>::min() ||
- value > std::numeric_limits<int16_t>::max())
- return defaultValue;
+ if ('\0' != *end || errno == ERANGE || value < min || value > max)
+ return false;
- setOk(ok, true);
- return value;
+ *result = value;
+ return true;
}
-template<>
-uint16_t YamlObject::get(const uint16_t &defaultValue, bool *ok) const
+bool parseUnsignedInteger(const std::string &str, unsigned long max,
+ unsigned long *result)
{
- setOk(ok, false);
-
- if (type_ != Type::Value)
- return defaultValue;
-
- if (value_ == "")
- return defaultValue;
+ if (str == "")
+ return false;
/*
- * libyaml parses all scalar values as strings. When a string has
- * leading spaces before a minus sign, for example " -10", strtoul
- * skips leading spaces, accepts the leading minus sign, and the
- * calculated digits are negated as if by unary minus. Rule it out in
- * case the user gets a large number when the value is negative.
+ * strtoul() accepts strings representing a negative number, in which
+ * case it negates the converted value. We don't want to silently accept
+ * negative values and return a large positive number, so check for a
+ * minus sign (after optional whitespace) and return an error.
*/
- std::size_t found = value_.find_first_not_of(" \t");
- if (found != std::string::npos && value_[found] == '-')
- return defaultValue;
+ std::size_t found = str.find_first_not_of(" \t");
+ if (found != std::string::npos && str[found] == '-')
+ return false;
char *end;
errno = 0;
- uint16_t value = std::strtoul(value_.c_str(), &end, 10);
+ unsigned long value = std::strtoul(str.c_str(), &end, 10);
- if ('\0' != *end || errno == ERANGE ||
- value < std::numeric_limits<uint16_t>::min() ||
- value > std::numeric_limits<uint16_t>::max())
- return defaultValue;
+ if ('\0' != *end || errno == ERANGE || value > max)
+ return false;
- setOk(ok, true);
- return value;
+ *result = value;
+ return true;
}
+} /* namespace */
+
template<>
-int32_t YamlObject::get(const int32_t &defaultValue, bool *ok) const
+std::optional<int8_t> YamlObject::get() const
{
- setOk(ok, false);
-
if (type_ != Type::Value)
- return defaultValue;
+ return std::nullopt;
- if (value_ == "")
- return defaultValue;
+ long value;
- char *end;
+ if (!parseSignedInteger(value_, std::numeric_limits<int8_t>::min(),
+ std::numeric_limits<int8_t>::max(), &value))
+ return std::nullopt;
- errno = 0;
- long value = std::strtol(value_.c_str(), &end, 10);
+ return value;
+}
+
+template<>
+std::optional<uint8_t> YamlObject::get() const
+{
+ if (type_ != Type::Value)
+ return std::nullopt;
- if ('\0' != *end || errno == ERANGE ||
- value < std::numeric_limits<int32_t>::min() ||
- value > std::numeric_limits<int32_t>::max())
- return defaultValue;
+ unsigned long value;
+
+ if (!parseUnsignedInteger(value_, std::numeric_limits<uint8_t>::max(),
+ &value))
+ return std::nullopt;
- setOk(ok, true);
return value;
}
template<>
-uint32_t YamlObject::get(const uint32_t &defaultValue, bool *ok) const
+std::optional<int16_t> YamlObject::get() const
{
- setOk(ok, false);
+ if (type_ != Type::Value)
+ return std::nullopt;
+
+ long value;
+
+ if (!parseSignedInteger(value_, std::numeric_limits<int16_t>::min(),
+ std::numeric_limits<int16_t>::max(), &value))
+ return std::nullopt;
+
+ return value;
+}
+template<>
+std::optional<uint16_t> YamlObject::get() const
+{
if (type_ != Type::Value)
- return defaultValue;
+ return std::nullopt;
- if (value_ == "")
- return defaultValue;
+ unsigned long value;
- /*
- * libyaml parses all scalar values as strings. When a string has
- * leading spaces before a minus sign, for example " -10", strtoul
- * skips leading spaces, accepts the leading minus sign, and the
- * calculated digits are negated as if by unary minus. Rule it out in
- * case the user gets a large number when the value is negative.
- */
- std::size_t found = value_.find_first_not_of(" \t");
- if (found != std::string::npos && value_[found] == '-')
- return defaultValue;
+ if (!parseUnsignedInteger(value_, std::numeric_limits<uint16_t>::max(),
+ &value))
+ return std::nullopt;
- char *end;
+ return value;
+}
- errno = 0;
- unsigned long value = std::strtoul(value_.c_str(), &end, 10);
+template<>
+std::optional<int32_t> YamlObject::get() const
+{
+ if (type_ != Type::Value)
+ return std::nullopt;
+
+ long value;
- if ('\0' != *end || errno == ERANGE ||
- value < std::numeric_limits<uint32_t>::min() ||
- value > std::numeric_limits<uint32_t>::max())
- return defaultValue;
+ if (!parseSignedInteger(value_, std::numeric_limits<int32_t>::min(),
+ std::numeric_limits<int32_t>::max(), &value))
+ return std::nullopt;
- setOk(ok, true);
return value;
}
template<>
-double YamlObject::get(const double &defaultValue, bool *ok) const
+std::optional<uint32_t> YamlObject::get() const
{
- setOk(ok, false);
+ if (type_ != Type::Value)
+ return std::nullopt;
+
+ unsigned long value;
+ if (!parseUnsignedInteger(value_, std::numeric_limits<uint32_t>::max(),
+ &value))
+ return std::nullopt;
+
+ return value;
+}
+
+template<>
+std::optional<double> YamlObject::get() const
+{
if (type_ != Type::Value)
- return defaultValue;
+ return std::nullopt;
if (value_ == "")
- return defaultValue;
+ return std::nullopt;
char *end;
errno = 0;
- double value = std::strtod(value_.c_str(), &end);
+ double value = utils::strtod(value_.c_str(), &end);
if ('\0' != *end || errno == ERANGE)
- return defaultValue;
+ return std::nullopt;
- setOk(ok, true);
return value;
}
template<>
-std::string YamlObject::get(const std::string &defaultValue, bool *ok) const
+std::optional<std::string> YamlObject::get() const
{
- setOk(ok, false);
-
if (type_ != Type::Value)
- return defaultValue;
+ return std::nullopt;
- setOk(ok, true);
return value_;
}
template<>
-Size YamlObject::get(const Size &defaultValue, bool *ok) const
+std::optional<Size> YamlObject::get() const
{
- setOk(ok, false);
-
if (type_ != Type::List)
- return defaultValue;
+ return std::nullopt;
if (list_.size() != 2)
- return defaultValue;
+ return std::nullopt;
- /*
- * Add a local variable to validate each dimension in case
- * that ok == nullptr.
- */
- bool valid;
- uint32_t width = list_[0]->get<uint32_t>(0, &valid);
- if (!valid)
- return defaultValue;
+ auto width = list_[0].value->get<uint32_t>();
+ if (!width)
+ return std::nullopt;
- uint32_t height = list_[1]->get<uint32_t>(0, &valid);
- if (!valid)
- return defaultValue;
+ auto height = list_[1].value->get<uint32_t>();
+ if (!height)
+ return std::nullopt;
- setOk(ok, true);
- return Size(width, height);
+ return Size(*width, *height);
}
#endif /* __DOXYGEN__ */
/**
+ * \fn template<typename T> YamlObject::getList<T>() const
+ * \brief Parse the YamlObject as a list of \a T
+ *
+ * This function parses the value of the YamlObject as a list of \a T objects,
+ * and returns the value as a \a std::vector<T>. If parsing fails, std::nullopt
+ * is returned.
+ *
+ * \return The YamlObject value as a std::vector<T>, or std::nullopt if parsing
+ * failed
+ */
+
+#ifndef __DOXYGEN__
+
+template<typename T,
+ std::enable_if_t<
+ std::is_same_v<bool, T> ||
+ std::is_same_v<double, T> ||
+ std::is_same_v<int8_t, T> ||
+ std::is_same_v<uint8_t, T> ||
+ std::is_same_v<int16_t, T> ||
+ std::is_same_v<uint16_t, T> ||
+ std::is_same_v<int32_t, T> ||
+ std::is_same_v<uint32_t, T> ||
+ std::is_same_v<std::string, T> ||
+ std::is_same_v<Size, T>> *>
+std::optional<std::vector<T>> YamlObject::getList() const
+{
+ if (type_ != Type::List)
+ return std::nullopt;
+
+ std::vector<T> values;
+ values.reserve(list_.size());
+
+ for (const YamlObject &entry : asList()) {
+ const auto value = entry.get<T>();
+ if (!value)
+ return std::nullopt;
+ values.emplace_back(*value);
+ }
+
+ return values;
+}
+
+template std::optional<std::vector<bool>> YamlObject::getList<bool>() const;
+template std::optional<std::vector<double>> YamlObject::getList<double>() const;
+template std::optional<std::vector<int8_t>> YamlObject::getList<int8_t>() const;
+template std::optional<std::vector<uint8_t>> YamlObject::getList<uint8_t>() const;
+template std::optional<std::vector<int16_t>> YamlObject::getList<int16_t>() const;
+template std::optional<std::vector<uint16_t>> YamlObject::getList<uint16_t>() const;
+template std::optional<std::vector<int32_t>> YamlObject::getList<int32_t>() const;
+template std::optional<std::vector<uint32_t>> YamlObject::getList<uint32_t>() const;
+template std::optional<std::vector<std::string>> YamlObject::getList<std::string>() const;
+template std::optional<std::vector<Size>> YamlObject::getList<Size>() const;
+
+#endif /* __DOXYGEN__ */
+
+/**
* \fn YamlObject::asDict() const
* \brief Wrap a dictionary YamlObject in an adapter that exposes iterators
*
@@ -379,7 +433,7 @@ const YamlObject &YamlObject::operator[](std::size_t index) const
if (type_ != Type::List || index >= size())
return empty;
- return *list_[index];
+ return *list_[index].value;
}
/**
@@ -395,7 +449,7 @@ const YamlObject &YamlObject::operator[](std::size_t index) const
*/
bool YamlObject::contains(const std::string &key) const
{
- if (dictionary_.find(key) == dictionary_.end())
+ if (dictionary_.find(std::ref(key)) == dictionary_.end())
return false;
return true;
@@ -622,7 +676,7 @@ int YamlParserContext::parseDictionaryOrList(YamlObject::Type type,
* Add a safety counter to make sure we don't loop indefinitely in case
* the YAML file is malformed.
*/
- for (unsigned int sentinel = 1000; sentinel; sentinel--) {
+ for (unsigned int sentinel = 2000; sentinel; sentinel--) {
auto evt = nextEvent();
if (!evt)
return -EINVAL;
@@ -667,16 +721,16 @@ int YamlParserContext::parseNextYamlObject(YamlObject &yamlObject, EventPtr even
yamlObject.type_ = YamlObject::Type::List;
auto &list = yamlObject.list_;
auto handler = [this, &list](EventPtr evt) {
- list.emplace_back(new YamlObject());
- return parseNextYamlObject(*list.back(), std::move(evt));
+ list.emplace_back(std::string{}, std::make_unique<YamlObject>());
+ return parseNextYamlObject(*list.back().value, std::move(evt));
};
return parseDictionaryOrList(YamlObject::Type::List, handler);
}
case YAML_MAPPING_START_EVENT: {
yamlObject.type_ = YamlObject::Type::Dictionary;
- auto &dictionary = yamlObject.dictionary_;
- auto handler = [this, &dictionary](EventPtr evtKey) {
+ auto &list = yamlObject.list_;
+ auto handler = [this, &list](EventPtr evtKey) {
/* Parse key */
if (evtKey->type != YAML_SCALAR_EVENT) {
LOG(YamlParser, Error) << "Expect key at line: "
@@ -694,10 +748,19 @@ int YamlParserContext::parseNextYamlObject(YamlObject &yamlObject, EventPtr even
if (!evtValue)
return -EINVAL;
- auto elem = dictionary.emplace(key, std::make_unique<YamlObject>());
- return parseNextYamlObject(*elem.first->second.get(), std::move(evtValue));
+ auto &elem = list.emplace_back(std::move(key),
+ std::make_unique<YamlObject>());
+ return parseNextYamlObject(*elem.value, std::move(evtValue));
};
- return parseDictionaryOrList(YamlObject::Type::Dictionary, handler);
+ int ret = parseDictionaryOrList(YamlObject::Type::Dictionary, handler);
+ if (ret)
+ return ret;
+
+ auto &dictionary = yamlObject.dictionary_;
+ for (const auto &elem : list)
+ dictionary.emplace(elem.key, elem.value.get());
+
+ return 0;
}
default:
@@ -753,6 +816,9 @@ int YamlParserContext::parseNextYamlObject(YamlObject &yamlObject, EventPtr even
* The YamlParser::parse() function takes an open FILE, parses its contents, and
* returns a pointer to a YamlObject corresponding to the root node of the YAML
* document.
+ *
+ * The parser preserves the order of items in the YAML file, for both lists and
+ * dictionaries.
*/
/**
@@ -775,7 +841,9 @@ std::unique_ptr<YamlObject> YamlParser::parse(File &file)
std::unique_ptr<YamlObject> root(new YamlObject());
if (context.parseContent(*root)) {
- LOG(YamlParser, Error) << "Failed to parse YAML content";
+ LOG(YamlParser, Error)
+ << "Failed to parse YAML content from "
+ << file.fileName();
return nullptr;
}