summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/Doxyfile.in1
-rw-r--r--Documentation/coding-style.rst2
-rw-r--r--Documentation/index.rst1
-rw-r--r--Documentation/meson.build1
-rw-r--r--Documentation/software-isp-benchmarking.rst77
-rw-r--r--include/libcamera/internal/converter/converter_v4l2_m2m.h2
-rw-r--r--include/libcamera/internal/dma_heaps.h (renamed from src/libcamera/pipeline/rpi/vc4/dma_heaps.h)14
-rw-r--r--include/libcamera/internal/meson.build3
-rw-r--r--include/libcamera/internal/shared_mem_object.h127
-rw-r--r--include/libcamera/internal/software_isp/debayer_params.h29
-rw-r--r--include/libcamera/internal/software_isp/meson.build7
-rw-r--r--include/libcamera/internal/software_isp/software_isp.h99
-rw-r--r--include/libcamera/internal/software_isp/swisp_stats.h49
-rw-r--r--include/libcamera/ipa/meson.build1
-rw-r--r--include/libcamera/ipa/soft.mojom28
-rw-r--r--meson_options.txt2
-rw-r--r--src/android/camera_device.cpp2
-rw-r--r--src/ipa/meson.build3
-rw-r--r--src/ipa/simple/black_level.cpp88
-rw-r--r--src/ipa/simple/black_level.h28
-rw-r--r--src/ipa/simple/data/meson.build10
-rw-r--r--src/ipa/simple/data/uncalibrated.yaml5
-rw-r--r--src/ipa/simple/meson.build30
-rw-r--r--src/ipa/simple/soft_simple.cpp403
-rw-r--r--src/libcamera/device_enumerator.cpp2
-rw-r--r--src/libcamera/dma_heaps.cpp165
-rw-r--r--src/libcamera/meson.build3
-rw-r--r--src/libcamera/pipeline/rpi/common/shared_mem_object.h128
-rw-r--r--src/libcamera/pipeline/rpi/vc4/dma_heaps.cpp90
-rw-r--r--src/libcamera/pipeline/rpi/vc4/meson.build1
-rw-r--r--src/libcamera/pipeline/rpi/vc4/vc4.cpp5
-rw-r--r--src/libcamera/pipeline/simple/simple.cpp241
-rw-r--r--src/libcamera/shared_mem_object.cpp236
-rw-r--r--src/libcamera/software_isp/TODO279
-rw-r--r--src/libcamera/software_isp/debayer.cpp132
-rw-r--r--src/libcamera/software_isp/debayer.h54
-rw-r--r--src/libcamera/software_isp/debayer_cpu.cpp807
-rw-r--r--src/libcamera/software_isp/debayer_cpu.h158
-rw-r--r--src/libcamera/software_isp/meson.build15
-rw-r--r--src/libcamera/software_isp/software_isp.cpp357
-rw-r--r--src/libcamera/software_isp/swstats_cpu.cpp432
-rw-r--r--src/libcamera/software_isp/swstats_cpu.h97
-rwxr-xr-xutils/checkstyle.py12
-rw-r--r--utils/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl4
-rw-r--r--utils/ipu3/ipu3-pack.c4
-rw-r--r--utils/ipu3/ipu3-unpack.c3
-rwxr-xr-xutils/raspberrypi/ctt/ctt.py2
47 files changed, 3939 insertions, 300 deletions
diff --git a/Documentation/Doxyfile.in b/Documentation/Doxyfile.in
index a86ea6c1..2be8d47b 100644
--- a/Documentation/Doxyfile.in
+++ b/Documentation/Doxyfile.in
@@ -44,6 +44,7 @@ EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
@TOP_SRCDIR@/src/libcamera/pipeline/ \
@TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
@TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
+ @TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \
@TOP_BUILDDIR@/src/libcamera/proxy/
EXCLUDE_PATTERNS = @TOP_BUILDDIR@/include/libcamera/ipa/*_serializer.h \
diff --git a/Documentation/coding-style.rst b/Documentation/coding-style.rst
index 053fdd99..72cb28d2 100644
--- a/Documentation/coding-style.rst
+++ b/Documentation/coding-style.rst
@@ -59,7 +59,7 @@ document:
underscores in between
* All formatting rules specified in the selected sections of the Linux kernel
Code Style for indentation, braces, spacing, etc
-* Header guards are formatted as '__LIBCAMERA_FILE_NAME_H__'
+* Headers are guarded by the use of '#pragma once'
Order of Includes
~~~~~~~~~~~~~~~~~
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 63fac72d..5442ae75 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -24,3 +24,4 @@
Lens driver requirements <lens_driver_requirements>
Python Bindings <python-bindings>
Camera Sensor Model <camera-sensor-model>
+ SoftwareISP Benchmarking <software-isp-benchmarking>
diff --git a/Documentation/meson.build b/Documentation/meson.build
index 7a58fec8..3872e0a8 100644
--- a/Documentation/meson.build
+++ b/Documentation/meson.build
@@ -80,6 +80,7 @@ if sphinx.found()
'lens_driver_requirements.rst',
'python-bindings.rst',
'sensor_driver_requirements.rst',
+ 'software-isp-benchmarking.rst',
'../README.rst',
]
diff --git a/Documentation/software-isp-benchmarking.rst b/Documentation/software-isp-benchmarking.rst
new file mode 100644
index 00000000..b3033132
--- /dev/null
+++ b/Documentation/software-isp-benchmarking.rst
@@ -0,0 +1,77 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+.. _software-isp-benchmarking:
+
+Software ISP benchmarking
+=========================
+
+The Software ISP is particularly sensitive to performance regressions therefore
+it is a good idea to always benchmark the Software ISP before and after making
+changes to it and ensure that there are no performance regressions.
+
+DebayerCpu class builtin benchmark
+----------------------------------
+
+The DebayerCpu class has a builtin benchmark. This benchmark measures the time
+spent on processing (collecting statistics and debayering) only, it does not
+measure the time spent on capturing or outputting the frames.
+
+The builtin benchmark always runs. So this can be used by simply running "cam"
+or "qcam" with a pipeline using the Software ISP.
+
+When it runs it will skip measuring the first 30 frames to allow the caches and
+the CPU temperature (turbo-ing) to warm-up and then it measures 30 fps and shows
+the total and per frame processing time using an info level log message:
+
+.. code-block:: text
+
+ INFO Debayer debayer_cpu.cpp:907 Processed 30 frames in 244317us, 8143 us/frame
+
+To get stable measurements it is advised to disable any other processes which
+may cause significant CPU usage (e.g. disable wifi, bluetooth and browsers).
+When possible it is also advisable to disable CPU turbo-ing and
+frequency-scaling.
+
+For example when benchmarking on a Lenovo ThinkPad X1 Yoga Gen 8, with the
+charger plugged in, the CPU can be fixed to run at 2 GHz using:
+
+.. code-block:: shell
+
+ sudo x86_energy_perf_policy --turbo-enable 0
+ sudo cpupower frequency-set -d 2GHz -u 2GHz
+
+with these settings the builtin bench reports a processing time of ~7.8ms/frame
+on this laptop for FHD SGRBG10 (unpacked) bayer data.
+
+Measuring power consumption
+---------------------------
+
+Since the Software ISP is often used on mobile devices it is also important to
+measure power consumption and ensure that that does not regress.
+
+For example to measure power consumption on a Lenovo ThinkPad X1 Yoga Gen 8 it
+needs to be running on battery and it should be configured with its
+platform-profile (/sys/firmware/acpi/platform_profile) set to balanced and with
+its default turbo and frequency-scaling behavior to match real world usage.
+
+Then start qcam to capture a FHD picture at 30 fps and position the qcam window
+so that it is fully visible. After this run the following command to monitor the
+power consumption:
+
+.. code-block:: shell
+
+ watch -n 10 cat /sys/class/power_supply/BAT0/power_now /sys/class/hwmon/hwmon6/fan?_input
+
+Note this not only measures the power consumption in µW it also monitors the
+speed of this laptop's 2 fans. This is important because depending on the
+ambient temperature the 2 fans may spin up while testing and this will cause an
+additional power consumption of approx. 0.5 W messing up the measurement.
+
+After starting qcam + the watch command let the laptop sit without using it for
+2 minutes for the readings to stabilize. Then check that the fans have not
+turned on and manually take a couple of consecutive power readings and average
+these.
+
+On the example Lenovo ThinkPad X1 Yoga Gen 8 laptop this results in a measured
+power consumption of approx. 13 W while running qcam versus approx. 4-5 W while
+setting idle with its OLED panel on.
diff --git a/include/libcamera/internal/converter/converter_v4l2_m2m.h b/include/libcamera/internal/converter/converter_v4l2_m2m.h
index 815916d0..84fb485f 100644
--- a/include/libcamera/internal/converter/converter_v4l2_m2m.h
+++ b/include/libcamera/internal/converter/converter_v4l2_m2m.h
@@ -47,7 +47,7 @@ public:
int configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfg);
- int exportBuffers(unsigned int ouput, unsigned int count,
+ int exportBuffers(unsigned int output, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers);
int start();
diff --git a/src/libcamera/pipeline/rpi/vc4/dma_heaps.h b/include/libcamera/internal/dma_heaps.h
index 0a4a8d86..80bf29e7 100644
--- a/src/libcamera/pipeline/rpi/vc4/dma_heaps.h
+++ b/include/libcamera/internal/dma_heaps.h
@@ -9,16 +9,22 @@
#include <stddef.h>
+#include <libcamera/base/flags.h>
#include <libcamera/base/unique_fd.h>
namespace libcamera {
-namespace RPi {
-
class DmaHeap
{
public:
- DmaHeap();
+ enum class DmaHeapFlag {
+ Cma = 1 << 0,
+ System = 1 << 1,
+ };
+
+ using DmaHeapFlags = Flags<DmaHeapFlag>;
+
+ DmaHeap(DmaHeapFlags flags = DmaHeapFlag::Cma);
~DmaHeap();
bool isValid() const { return dmaHeapHandle_.isValid(); }
UniqueFD alloc(const char *name, std::size_t size);
@@ -27,6 +33,6 @@ private:
UniqueFD dmaHeapHandle_;
};
-} /* namespace RPi */
+LIBCAMERA_FLAGS_ENABLE_OPERATORS(DmaHeap::DmaHeapFlag)
} /* namespace libcamera */
diff --git a/include/libcamera/internal/meson.build b/include/libcamera/internal/meson.build
index 7f1f3440..160fdc37 100644
--- a/include/libcamera/internal/meson.build
+++ b/include/libcamera/internal/meson.build
@@ -25,6 +25,7 @@ libcamera_internal_headers = files([
'device_enumerator.h',
'device_enumerator_sysfs.h',
'device_enumerator_udev.h',
+ 'dma_heaps.h',
'formats.h',
'framebuffer.h',
'ipa_manager.h',
@@ -38,6 +39,7 @@ libcamera_internal_headers = files([
'process.h',
'pub_key.h',
'request.h',
+ 'shared_mem_object.h',
'source_paths.h',
'sysfs.h',
'v4l2_device.h',
@@ -48,3 +50,4 @@ libcamera_internal_headers = files([
])
subdir('converter')
+subdir('software_isp')
diff --git a/include/libcamera/internal/shared_mem_object.h b/include/libcamera/internal/shared_mem_object.h
new file mode 100644
index 00000000..9b1d9393
--- /dev/null
+++ b/include/libcamera/internal/shared_mem_object.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ * Copyright (C) 2024 Andrei Konovalov
+ * Copyright (C) 2024 Dennis Bonke
+ *
+ * shared_mem_object.h - Helpers for shared memory allocations
+ */
+#pragma once
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string>
+#include <sys/mman.h>
+#include <type_traits>
+#include <utility>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/shared_fd.h>
+#include <libcamera/base/span.h>
+
+namespace libcamera {
+
+class SharedMem
+{
+public:
+ SharedMem();
+
+ SharedMem(const std::string &name, std::size_t size);
+ SharedMem(SharedMem &&rhs);
+
+ virtual ~SharedMem();
+
+ SharedMem &operator=(SharedMem &&rhs);
+
+ const SharedFD &fd() const
+ {
+ return fd_;
+ }
+
+ Span<uint8_t> mem() const
+ {
+ return mem_;
+ }
+
+ explicit operator bool() const
+ {
+ return !mem_.empty();
+ }
+
+private:
+ LIBCAMERA_DISABLE_COPY(SharedMem)
+
+ SharedFD fd_;
+
+ Span<uint8_t> mem_;
+};
+
+template<class T, typename = std::enable_if_t<std::is_standard_layout<T>::value>>
+class SharedMemObject : public SharedMem
+{
+public:
+ static constexpr std::size_t kSize = sizeof(T);
+
+ SharedMemObject()
+ : SharedMem(), obj_(nullptr)
+ {
+ }
+
+ template<class... Args>
+ SharedMemObject(const std::string &name, Args &&...args)
+ : SharedMem(name, kSize), obj_(nullptr)
+ {
+ if (mem().empty())
+ return;
+
+ obj_ = new (mem().data()) T(std::forward<Args>(args)...);
+ }
+
+ SharedMemObject(SharedMemObject<T> &&rhs)
+ : SharedMem(std::move(rhs))
+ {
+ this->obj_ = rhs.obj_;
+ rhs.obj_ = nullptr;
+ }
+
+ ~SharedMemObject()
+ {
+ if (obj_)
+ obj_->~T();
+ }
+
+ SharedMemObject<T> &operator=(SharedMemObject<T> &&rhs)
+ {
+ SharedMem::operator=(std::move(rhs));
+ this->obj_ = rhs.obj_;
+ rhs.obj_ = nullptr;
+ return *this;
+ }
+
+ T *operator->()
+ {
+ return obj_;
+ }
+
+ const T *operator->() const
+ {
+ return obj_;
+ }
+
+ T &operator*()
+ {
+ return *obj_;
+ }
+
+ const T &operator*() const
+ {
+ return *obj_;
+ }
+
+private:
+ LIBCAMERA_DISABLE_COPY(SharedMemObject)
+
+ T *obj_;
+};
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/software_isp/debayer_params.h b/include/libcamera/internal/software_isp/debayer_params.h
new file mode 100644
index 00000000..32cd448a
--- /dev/null
+++ b/include/libcamera/internal/software_isp/debayer_params.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayer_params.h - DebayerParams header
+ */
+
+#pragma once
+
+namespace libcamera {
+
+struct DebayerParams {
+ static constexpr unsigned int kGain10 = 256;
+
+ unsigned int gainR;
+ unsigned int gainG;
+ unsigned int gainB;
+
+ float gamma;
+ /**
+ * \brief Level of the black point, 0..255, 0 is no correction.
+ */
+ unsigned int blackLevel;
+};
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/software_isp/meson.build b/include/libcamera/internal/software_isp/meson.build
new file mode 100644
index 00000000..508ddddc
--- /dev/null
+++ b/include/libcamera/internal/software_isp/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_headers += files([
+ 'debayer_params.h',
+ 'software_isp.h',
+ 'swisp_stats.h',
+])
diff --git a/include/libcamera/internal/software_isp/software_isp.h b/include/libcamera/internal/software_isp/software_isp.h
new file mode 100644
index 00000000..42e96dcf
--- /dev/null
+++ b/include/libcamera/internal/software_isp/software_isp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * software_isp.h - Simple software ISP implementation
+ */
+
+#pragma once
+
+#include <functional>
+#include <initializer_list>
+#include <map>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/signal.h>
+#include <libcamera/base/thread.h>
+
+#include <libcamera/geometry.h>
+#include <libcamera/pixel_format.h>
+
+#include <libcamera/ipa/soft_ipa_interface.h>
+#include <libcamera/ipa/soft_ipa_proxy.h>
+
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/dma_heaps.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/shared_mem_object.h"
+#include "libcamera/internal/software_isp/debayer_params.h"
+
+namespace libcamera {
+
+class DebayerCpu;
+class FrameBuffer;
+class PixelFormat;
+struct StreamConfiguration;
+
+LOG_DECLARE_CATEGORY(SoftwareIsp)
+
+class SoftwareIsp
+{
+public:
+ SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor);
+ ~SoftwareIsp();
+
+ int loadConfiguration([[maybe_unused]] const std::string &filename) { return 0; }
+
+ bool isValid() const;
+
+ std::vector<PixelFormat> formats(PixelFormat input);
+
+ SizeRange sizes(PixelFormat inputFormat, const Size &inputSize);
+
+ std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size);
+
+ int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs,
+ const ControlInfoMap &sensorControls);
+
+ int exportBuffers(unsigned int output, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers);
+
+ void processStats(const ControlList &sensorControls);
+
+ int start();
+ void stop();
+
+ int queueBuffers(FrameBuffer *input,
+ const std::map<unsigned int, FrameBuffer *> &outputs);
+
+ void process(FrameBuffer *input, FrameBuffer *output);
+
+ Signal<FrameBuffer *> inputBufferReady;
+ Signal<FrameBuffer *> outputBufferReady;
+ Signal<> ispStatsReady;
+ Signal<const ControlList &> setSensorControls;
+
+private:
+ void saveIspParams();
+ void setSensorCtrls(const ControlList &sensorControls);
+ void statsReady();
+ void inputReady(FrameBuffer *input);
+ void outputReady(FrameBuffer *output);
+
+ std::unique_ptr<DebayerCpu> debayer_;
+ Thread ispWorkerThread_;
+ SharedMemObject<DebayerParams> sharedParams_;
+ DebayerParams debayerParams_;
+ DmaHeap dmaHeap_;
+
+ std::unique_ptr<ipa::soft::IPAProxySoft> ipa_;
+};
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/software_isp/swisp_stats.h b/include/libcamera/internal/software_isp/swisp_stats.h
new file mode 100644
index 00000000..4ca8d647
--- /dev/null
+++ b/include/libcamera/internal/software_isp/swisp_stats.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * swisp_stats.h - Statistics data format used by the software ISP and software IPA
+ */
+
+#pragma once
+
+#include <array>
+#include <stdint.h>
+
+namespace libcamera {
+
+/**
+ * \brief Struct that holds the statistics for the Software ISP
+ *
+ * The struct value types are large enough to not overflow.
+ * Should they still overflow for some reason, no check is performed and they
+ * wrap around.
+ */
+struct SwIspStats {
+ /**
+ * \brief Holds the sum of all sampled red pixels
+ */
+ uint64_t sumR_;
+ /**
+ * \brief Holds the sum of all sampled green pixels
+ */
+ uint64_t sumG_;
+ /**
+ * \brief Holds the sum of all sampled blue pixels
+ */
+ uint64_t sumB_;
+ /**
+ * \brief Number of bins in the yHistogram
+ */
+ static constexpr unsigned int kYHistogramSize = 64;
+ /**
+ * \brief Type of the histogram.
+ */
+ using Histogram = std::array<uint32_t, kYHistogramSize>;
+ /**
+ * \brief A histogram of luminance values
+ */
+ Histogram yHistogram;
+};
+
+} /* namespace libcamera */
diff --git a/include/libcamera/ipa/meson.build b/include/libcamera/ipa/meson.build
index f3b4881c..3352d08f 100644
--- a/include/libcamera/ipa/meson.build
+++ b/include/libcamera/ipa/meson.build
@@ -65,6 +65,7 @@ pipeline_ipa_mojom_mapping = {
'ipu3': 'ipu3.mojom',
'rkisp1': 'rkisp1.mojom',
'rpi/vc4': 'raspberrypi.mojom',
+ 'simple': 'soft.mojom',
'vimc': 'vimc.mojom',
}
diff --git a/include/libcamera/ipa/soft.mojom b/include/libcamera/ipa/soft.mojom
new file mode 100644
index 00000000..3aa2066e
--- /dev/null
+++ b/include/libcamera/ipa/soft.mojom
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+/*
+ * \todo Document the interface and remove the related EXCLUDE_PATTERNS entry.
+ */
+
+module ipa.soft;
+
+import "include/libcamera/ipa/core.mojom";
+
+interface IPASoftInterface {
+ init(libcamera.IPASettings settings,
+ libcamera.SharedFD fdStats,
+ libcamera.SharedFD fdParams,
+ libcamera.ControlInfoMap sensorCtrlInfoMap)
+ => (int32 ret);
+ start() => (int32 ret);
+ stop();
+ configure(libcamera.ControlInfoMap sensorCtrlInfoMap)
+ => (int32 ret);
+
+ [async] processStats(libcamera.ControlList sensorControls);
+};
+
+interface IPASoftEventInterface {
+ setSensorControls(libcamera.ControlList sensorControls);
+ setIspParams();
+};
diff --git a/meson_options.txt b/meson_options.txt
index 7c4f6d3a..c61eb555 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -27,7 +27,7 @@ option('gstreamer',
option('ipas',
type : 'array',
- choices : ['ipu3', 'rkisp1', 'rpi/vc4', 'vimc'],
+ choices : ['ipu3', 'rkisp1', 'rpi/vc4', 'simple', 'vimc'],
description : 'Select which IPA modules to build')
option('lc-compliance',
diff --git a/src/android/camera_device.cpp b/src/android/camera_device.cpp
index d2679a97..1b6f3f3a 100644
--- a/src/android/camera_device.cpp
+++ b/src/android/camera_device.cpp
@@ -433,8 +433,6 @@ void CameraDevice::flush()
void CameraDevice::stop()
{
MutexLocker stateLock(stateMutex_);
- if (state_ == State::Stopped)
- return;
camera_->stop();
diff --git a/src/ipa/meson.build b/src/ipa/meson.build
index 48793e07..0ad4631d 100644
--- a/src/ipa/meson.build
+++ b/src/ipa/meson.build
@@ -41,6 +41,9 @@ ipa_names = []
subdirs = []
foreach pipeline : pipelines
+ # The current implementation expects the IPA module name to match the
+ # pipeline name.
+ # \todo Make the IPA naming scheme more flexible.
if not ipa_modules.contains(pipeline)
continue
endif
diff --git a/src/ipa/simple/black_level.cpp b/src/ipa/simple/black_level.cpp
new file mode 100644
index 00000000..c7e8d8b7
--- /dev/null
+++ b/src/ipa/simple/black_level.cpp
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * black_level.cpp - black level handling
+ */
+
+#include "black_level.h"
+
+#include <numeric>
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPASoftBL)
+
+/**
+ * \class BlackLevel
+ * \brief Object providing black point level for software ISP
+ *
+ * Black level can be provided in hardware tuning files or, if no tuning file is
+ * available for the given hardware, guessed automatically, with less accuracy.
+ * As tuning files are not yet implemented for software ISP, BlackLevel
+ * currently provides only guessed black levels.
+ *
+ * This class serves for tracking black level as a property of the underlying
+ * hardware, not as means of enhancing a particular scene or image.
+ *
+ * The class is supposed to be instantiated for the given camera stream.
+ * The black level can be retrieved using BlackLevel::get() method. It is
+ * initially 0 and may change when updated using BlackLevel::update() method.
+ */
+
+BlackLevel::BlackLevel()
+ : blackLevel_(255), blackLevelSet_(false)
+{
+}
+
+/**
+ * \brief Return the current black level
+ *
+ * \return The black level, in the range from 0 (minimum) to 255 (maximum).
+ * If the black level couldn't be determined yet, return 0.
+ */
+unsigned int BlackLevel::get() const
+{
+ return blackLevelSet_ ? blackLevel_ : 0;
+}
+
+/**
+ * \brief Update black level from the provided histogram
+ * \param[in] yHistogram The histogram to be used for updating black level
+ *
+ * The black level is property of the given hardware, not image. It is updated
+ * only if it has not been yet set or if it is lower than the lowest value seen
+ * so far.
+ */
+void BlackLevel::update(SwIspStats::Histogram &yHistogram)
+{
+ /*
+ * The constant is selected to be "good enough", not overly conservative or
+ * aggressive. There is no magic about the given value.
+ */
+ constexpr float ignoredPercentage_ = 0.02;
+ const unsigned int total =
+ std::accumulate(begin(yHistogram), end(yHistogram), 0);
+ const unsigned int pixelThreshold = ignoredPercentage_ * total;
+ const unsigned int histogramRatio = 256 / SwIspStats::kYHistogramSize;
+ const unsigned int currentBlackIdx = blackLevel_ / histogramRatio;
+
+ for (unsigned int i = 0, seen = 0;
+ i < currentBlackIdx && i < SwIspStats::kYHistogramSize;
+ i++) {
+ seen += yHistogram[i];
+ if (seen >= pixelThreshold) {
+ blackLevel_ = i * histogramRatio;
+ blackLevelSet_ = true;
+ LOG(IPASoftBL, Debug)
+ << "Auto-set black level: "
+ << i << "/" << SwIspStats::kYHistogramSize
+ << " (" << 100 * (seen - yHistogram[i]) / total << "% below, "
+ << 100 * seen / total << "% at or below)";
+ break;
+ }
+ };
+}
+} /* namespace libcamera */
diff --git a/src/ipa/simple/black_level.h b/src/ipa/simple/black_level.h
new file mode 100644
index 00000000..7e37757e
--- /dev/null
+++ b/src/ipa/simple/black_level.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * black_level.h - black level handling
+ */
+
+#pragma once
+
+#include <array>
+
+#include "libcamera/internal/software_isp/swisp_stats.h"
+
+namespace libcamera {
+
+class BlackLevel
+{
+public:
+ BlackLevel();
+ unsigned int get() const;
+ void update(SwIspStats::Histogram &yHistogram);
+
+private:
+ unsigned int blackLevel_;
+ bool blackLevelSet_;
+};
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/data/meson.build b/src/ipa/simple/data/meson.build
new file mode 100644
index 00000000..92795ee4
--- /dev/null
+++ b/src/ipa/simple/data/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'uncalibrated.yaml',
+])
+
+# The install_dir must match the name from the IPAModuleInfo
+install_data(conf_files,
+ install_dir : ipa_data_dir / 'simple',
+ install_tag : 'runtime')
diff --git a/src/ipa/simple/data/uncalibrated.yaml b/src/ipa/simple/data/uncalibrated.yaml
new file mode 100644
index 00000000..ff981a1a
--- /dev/null
+++ b/src/ipa/simple/data/uncalibrated.yaml
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+...
diff --git a/src/ipa/simple/meson.build b/src/ipa/simple/meson.build
new file mode 100644
index 00000000..44b5f1d7
--- /dev/null
+++ b/src/ipa/simple/meson.build
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: CC0-1.0
+
+ipa_name = 'ipa_soft_simple'
+
+soft_simple_sources = files([
+ 'soft_simple.cpp',
+ 'black_level.cpp',
+])
+
+mod = shared_module(ipa_name,
+ [soft_simple_sources, libcamera_generated_ipa_headers],
+ name_prefix : '',
+ include_directories : [ipa_includes, libipa_includes],
+ dependencies : libcamera_private,
+ link_with : libipa,
+ install : true,
+ install_dir : ipa_install_dir)
+
+if ipa_sign_module
+ custom_target(ipa_name + '.so.sign',
+ input : mod,
+ output : ipa_name + '.so.sign',
+ command : [ipa_sign, ipa_priv_key, '@INPUT@', '@OUTPUT@'],
+ install : false,
+ build_by_default : true)
+endif
+
+subdir('data')
+
+ipa_names += ipa_name
diff --git a/src/ipa/simple/soft_simple.cpp b/src/ipa/simple/soft_simple.cpp
new file mode 100644
index 00000000..b9fb58b5
--- /dev/null
+++ b/src/ipa/simple/soft_simple.cpp
@@ -0,0 +1,403 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * soft_simple.cpp - Simple Software Image Processing Algorithm module
+ */
+
+#include <sys/mman.h>
+
+#include <linux/v4l2-controls.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/shared_fd.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+
+#include <libcamera/ipa/ipa_interface.h>
+#include <libcamera/ipa/ipa_module_info.h>
+#include <libcamera/ipa/soft_ipa_interface.h>
+
+#include "libcamera/internal/software_isp/debayer_params.h"
+#include "libcamera/internal/software_isp/swisp_stats.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "libipa/camera_sensor_helper.h"
+
+#include "black_level.h"
+
+namespace libcamera {
+LOG_DEFINE_CATEGORY(IPASoft)
+
+namespace ipa::soft {
+
+/*
+ * The number of bins to use for the optimal exposure calculations.
+ */
+static constexpr unsigned int kExposureBinsCount = 5;
+
+/*
+ * The exposure is optimal when the mean sample value of the histogram is
+ * in the middle of the range.
+ */
+static constexpr float kExposureOptimal = kExposureBinsCount / 2.0;
+
+/*
+ * The below value implements the hysteresis for the exposure adjustment.
+ * It is small enough to have the exposure close to the optimal, and is big
+ * enough to prevent the exposure from wobbling around the optimal value.
+ */
+static constexpr float kExposureSatisfactory = 0.2;
+
+class IPASoftSimple : public ipa::soft::IPASoftInterface
+{
+public:
+ IPASoftSimple()
+ : params_(nullptr), stats_(nullptr), blackLevel_(BlackLevel()),
+ ignoreUpdates_(0)
+ {
+ }
+
+ ~IPASoftSimple();
+
+ int init(const IPASettings &settings,
+ const SharedFD &fdStats,
+ const SharedFD &fdParams,
+ const ControlInfoMap &sensorInfoMap) override;
+ int configure(const ControlInfoMap &sensorInfoMap) override;
+
+ int start() override;
+ void stop() override;
+
+ void processStats(const ControlList &sensorControls) override;
+
+private:
+ void updateExposure(double exposureMSV);
+
+ DebayerParams *params_;
+ SwIspStats *stats_;
+ std::unique_ptr<CameraSensorHelper> camHelper_;
+ ControlInfoMap sensorInfoMap_;
+ BlackLevel blackLevel_;
+
+ int32_t exposureMin_, exposureMax_;
+ int32_t exposure_;
+ double againMin_, againMax_, againMinStep_;
+ double again_;
+ unsigned int ignoreUpdates_;
+};
+
+IPASoftSimple::~IPASoftSimple()
+{
+ if (stats_)
+ munmap(stats_, sizeof(SwIspStats));
+ if (params_)
+ munmap(params_, sizeof(DebayerParams));
+}
+
+int IPASoftSimple::init(const IPASettings &settings,
+ const SharedFD &fdStats,
+ const SharedFD &fdParams,
+ const ControlInfoMap &sensorInfoMap)
+{
+ camHelper_ = CameraSensorHelperFactoryBase::create(settings.sensorModel);
+ if (!camHelper_) {
+ LOG(IPASoft, Warning)
+ << "Failed to create camera sensor helper for "
+ << settings.sensorModel;
+ }
+
+ /* Load the tuning data file */
+ File file(settings.configurationFile);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ int ret = file.error();
+ LOG(IPASoft, Error)
+ << "Failed to open configuration file "
+ << settings.configurationFile << ": " << strerror(-ret);
+ return ret;
+ }
+
+ std::unique_ptr<libcamera::YamlObject> data = YamlParser::parse(file);
+ if (!data)
+ return -EINVAL;
+
+ /* \todo Use the IPA configuration file for real. */
+ unsigned int version = (*data)["version"].get<uint32_t>(0);
+ LOG(IPASoft, Debug) << "Tuning file version " << version;
+
+ params_ = nullptr;
+ stats_ = nullptr;
+
+ if (!fdStats.isValid()) {
+ LOG(IPASoft, Error) << "Invalid Statistics handle";
+ return -ENODEV;
+ }
+
+ if (!fdParams.isValid()) {
+ LOG(IPASoft, Error) << "Invalid Parameters handle";
+ return -ENODEV;
+ }
+
+ {
+ void *mem = mmap(nullptr, sizeof(DebayerParams), PROT_WRITE,
+ MAP_SHARED, fdParams.get(), 0);
+ if (mem == MAP_FAILED) {
+ LOG(IPASoft, Error) << "Unable to map Parameters";
+ return -errno;
+ }
+
+ params_ = static_cast<DebayerParams *>(mem);
+ }
+
+ {
+ void *mem = mmap(nullptr, sizeof(SwIspStats), PROT_READ,
+ MAP_SHARED, fdStats.get(), 0);
+ if (mem == MAP_FAILED) {
+ LOG(IPASoft, Error) << "Unable to map Statistics";
+ return -errno;
+ }
+
+ stats_ = static_cast<SwIspStats *>(mem);
+ }
+
+ /*
+ * Check if the sensor driver supports the controls required by the
+ * Soft IPA.
+ * Don't save the min and max control values yet, as e.g. the limits
+ * for V4L2_CID_EXPOSURE depend on the configured sensor resolution.
+ */
+ if (sensorInfoMap.find(V4L2_CID_EXPOSURE) == sensorInfoMap.end()) {
+ LOG(IPASoft, Error) << "Don't have exposure control";
+ return -EINVAL;
+ }
+
+ if (sensorInfoMap.find(V4L2_CID_ANALOGUE_GAIN) == sensorInfoMap.end()) {
+ LOG(IPASoft, Error) << "Don't have gain control";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int IPASoftSimple::configure(const ControlInfoMap &sensorInfoMap)
+{
+ sensorInfoMap_ = sensorInfoMap;
+
+ const ControlInfo &exposureInfo = sensorInfoMap_.find(V4L2_CID_EXPOSURE)->second;
+ const ControlInfo &gainInfo = sensorInfoMap_.find(V4L2_CID_ANALOGUE_GAIN)->second;
+
+ exposureMin_ = exposureInfo.min().get<int32_t>();
+ exposureMax_ = exposureInfo.max().get<int32_t>();
+ if (!exposureMin_) {
+ LOG(IPASoft, Warning) << "Minimum exposure is zero, that can't be linear";
+ exposureMin_ = 1;
+ }
+
+ int32_t againMin = gainInfo.min().get<int32_t>();
+ int32_t againMax = gainInfo.max().get<int32_t>();
+
+ if (camHelper_) {
+ againMin_ = camHelper_->gain(againMin);
+ againMax_ = camHelper_->gain(againMax);
+ againMinStep_ = (againMax_ - againMin_) / 100.0;
+ } else {
+ /*
+ * The camera sensor gain (g) is usually not equal to the value written
+ * into the gain register (x). But the way how the AGC algorithm changes
+ * the gain value to make the total exposure closer to the optimum
+ * assumes that g(x) is not too far from linear function. If the minimal
+ * gain is 0, the g(x) is likely to be far from the linear, like
+ * g(x) = a / (b * x + c). To avoid unexpected changes to the gain by
+ * the AGC algorithm (abrupt near one edge, and very small near the
+ * other) we limit the range of the gain values used.
+ */
+ againMax_ = againMax;
+ if (!againMin) {
+ LOG(IPASoft, Warning)
+ << "Minimum gain is zero, that can't be linear";
+ againMin_ = std::min(100, againMin / 2 + againMax / 2);
+ }
+ againMinStep_ = 1.0;
+ }
+
+ LOG(IPASoft, Info) << "Exposure " << exposureMin_ << "-" << exposureMax_
+ << ", gain " << againMin_ << "-" << againMax_
+ << " (" << againMinStep_ << ")";
+
+ return 0;
+}
+
+int IPASoftSimple::start()
+{
+ return 0;
+}
+
+void IPASoftSimple::stop()
+{
+}
+
+void IPASoftSimple::processStats(const ControlList &sensorControls)
+{
+ /*
+ * Calculate red and blue gains for AWB.
+ * Clamp max gain at 4.0, this also avoids 0 division.
+ */
+ if (stats_->sumR_ <= stats_->sumG_ / 4)
+ params_->gainR = 1024;
+ else
+ params_->gainR = 256 * stats_->sumG_ / stats_->sumR_;
+
+ if (stats_->sumB_ <= stats_->sumG_ / 4)
+ params_->gainB = 1024;
+ else
+ params_->gainB = 256 * stats_->sumG_ / stats_->sumB_;
+
+ /* Green gain and gamma values are fixed */
+ params_->gainG = 256;
+ params_->gamma = 0.5;
+
+ if (ignoreUpdates_ > 0)
+ blackLevel_.update(stats_->yHistogram);
+ params_->blackLevel = blackLevel_.get();
+
+ setIspParams.emit();
+
+ /* \todo Switch to the libipa/algorithm.h API someday. */
+
+ /*
+ * AE / AGC, use 2 frames delay to make sure that the exposure and
+ * the gain set have applied to the camera sensor.
+ * \todo This could be handled better with DelayedControls.
+ */
+ if (ignoreUpdates_ > 0) {
+ --ignoreUpdates_;
+ return;
+ }
+
+ /*
+ * Calculate Mean Sample Value (MSV) according to formula from:
+ * https://www.araa.asn.au/acra/acra2007/papers/paper84final.pdf
+ */
+ const unsigned int blackLevelHistIdx =
+ params_->blackLevel / (256 / SwIspStats::kYHistogramSize);
+ const unsigned int histogramSize =
+ SwIspStats::kYHistogramSize - blackLevelHistIdx;
+ const unsigned int yHistValsPerBin = histogramSize / kExposureBinsCount;
+ const unsigned int yHistValsPerBinMod =
+ histogramSize / (histogramSize % kExposureBinsCount + 1);
+ int exposureBins[kExposureBinsCount] = {};
+ unsigned int denom = 0;
+ unsigned int num = 0;
+
+ for (unsigned int i = 0; i < histogramSize; i++) {
+ unsigned int idx = (i - (i / yHistValsPerBinMod)) / yHistValsPerBin;
+ exposureBins[idx] += stats_->yHistogram[blackLevelHistIdx + i];
+ }
+
+ for (unsigned int i = 0; i < kExposureBinsCount; i++) {
+ LOG(IPASoft, Debug) << i << ": " << exposureBins[i];
+ denom += exposureBins[i];
+ num += exposureBins[i] * (i + 1);
+ }
+
+ float exposureMSV = static_cast<float>(num) / denom;
+
+ /* Sanity check */
+ if (!sensorControls.contains(V4L2_CID_EXPOSURE) ||
+ !sensorControls.contains(V4L2_CID_ANALOGUE_GAIN)) {
+ LOG(IPASoft, Error) << "Control(s) missing";
+ return;
+ }
+
+ exposure_ = sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
+ int32_t again = sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>();
+ again_ = camHelper_ ? camHelper_->gain(again) : again;
+
+ updateExposure(exposureMSV);
+
+ ControlList ctrls(sensorInfoMap_);
+
+ ctrls.set(V4L2_CID_EXPOSURE, exposure_);
+ ctrls.set(V4L2_CID_ANALOGUE_GAIN,
+ static_cast<int32_t>(camHelper_ ? camHelper_->gainCode(again_) : again_));
+
+ ignoreUpdates_ = 2;
+
+ setSensorControls.emit(ctrls);
+
+ LOG(IPASoft, Debug) << "exposureMSV " << exposureMSV
+ << " exp " << exposure_ << " again " << again_
+ << " gain R/B " << params_->gainR << "/" << params_->gainB
+ << " black level " << params_->blackLevel;
+}
+
+void IPASoftSimple::updateExposure(double exposureMSV)
+{
+ /*
+ * kExpDenominator of 10 gives ~10% increment/decrement;
+ * kExpDenominator of 5 - about ~20%
+ */
+ static constexpr uint8_t kExpDenominator = 10;
+ static constexpr uint8_t kExpNumeratorUp = kExpDenominator + 1;
+ static constexpr uint8_t kExpNumeratorDown = kExpDenominator - 1;
+
+ double next;
+
+ if (exposureMSV < kExposureOptimal - kExposureSatisfactory) {
+ next = exposure_ * kExpNumeratorUp / kExpDenominator;
+ if (next - exposure_ < 1)
+ exposure_ += 1;
+ else
+ exposure_ = next;
+ if (exposure_ >= exposureMax_) {
+ next = again_ * kExpNumeratorUp / kExpDenominator;
+ if (next - again_ < againMinStep_)
+ again_ += againMinStep_;
+ else
+ again_ = next;
+ }
+ }
+
+ if (exposureMSV > kExposureOptimal + kExposureSatisfactory) {
+ if (exposure_ == exposureMax_ && again_ > againMin_) {
+ next = again_ * kExpNumeratorDown / kExpDenominator;
+ if (again_ - next < againMinStep_)
+ again_ -= againMinStep_;
+ else
+ again_ = next;
+ } else {
+ next = exposure_ * kExpNumeratorDown / kExpDenominator;
+ if (exposure_ - next < 1)
+ exposure_ -= 1;
+ else
+ exposure_ = next;
+ }
+ }
+
+ exposure_ = std::clamp(exposure_, exposureMin_, exposureMax_);
+ again_ = std::clamp(again_, againMin_, againMax_);
+}
+
+} /* namespace ipa::soft */
+
+/*
+ * External IPA module interface
+ */
+extern "C" {
+const struct IPAModuleInfo ipaModuleInfo = {
+ IPA_MODULE_API_VERSION,
+ 0,
+ "SimplePipelineHandler",
+ "simple",
+};
+
+IPAInterface *ipaCreate()
+{
+ return new ipa::soft::IPASoftSimple();
+}
+
+} /* extern "C" */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/device_enumerator.cpp b/src/libcamera/device_enumerator.cpp
index 42b5ba6c..fbbf0559 100644
--- a/src/libcamera/device_enumerator.cpp
+++ b/src/libcamera/device_enumerator.cpp
@@ -56,7 +56,7 @@ LOG_DEFINE_CATEGORY(DeviceEnumerator)
* names can be added as match criteria.
*
* Pipeline handlers are recommended to add entities to DeviceMatch as
- * appropriare to ensure that the media device they need can be uniquely
+ * appropriate to ensure that the media device they need can be uniquely
* identified. This is useful when the corresponding kernel driver can produce
* different graphs, for instance as a result of different driver versions or
* hardware configurations, and not all those graphs are suitable for a pipeline
diff --git a/src/libcamera/dma_heaps.cpp b/src/libcamera/dma_heaps.cpp
new file mode 100644
index 00000000..b4509e72
--- /dev/null
+++ b/src/libcamera/dma_heaps.cpp
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * dma_heaps.cpp - Helper class for dma-heap allocations.
+ */
+
+#include "libcamera/internal/dma_heaps.h"
+
+#include <array>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file dma_heaps.cpp
+ * \brief dma-heap allocator
+ */
+
+namespace libcamera {
+
+/*
+ * /dev/dma_heap/linux,cma is the dma-heap allocator, which allows dmaheap-cma
+ * to only have to worry about importing.
+ *
+ * Annoyingly, should the cma heap size be specified on the kernel command line
+ * instead of DT, the heap gets named "reserved" instead.
+ */
+
+#ifndef __DOXYGEN__
+struct DmaHeapInfo {
+ DmaHeap::DmaHeapFlag type;
+ const char *deviceNodeName;
+};
+#endif
+
+static constexpr std::array<DmaHeapInfo, 3> heapInfos = { {
+ { DmaHeap::DmaHeapFlag::Cma, "/dev/dma_heap/linux,cma" },
+ { DmaHeap::DmaHeapFlag::Cma, "/dev/dma_heap/reserved" },
+ { DmaHeap::DmaHeapFlag::System, "/dev/dma_heap/system" },
+} };
+
+LOG_DEFINE_CATEGORY(DmaHeap)
+
+/**
+ * \class DmaHeap
+ * \brief Helper class for dma-heap allocations
+ *
+ * DMA heaps are kernel devices that provide an API to allocate memory from
+ * different pools called "heaps", wrap each allocated piece of memory in a
+ * dmabuf object, and return the dmabuf file descriptor to userspace. Multiple
+ * heaps can be provided by the system, with different properties for the
+ * underlying memory.
+ *
+ * This class wraps a DMA heap selected at construction time, and exposes
+ * functions to manage memory allocation.
+ */
+
+/**
+ * \enum DmaHeap::DmaHeapFlag
+ * \brief Type of the dma-heap
+ * \var DmaHeap::Cma
+ * \brief Allocate from a CMA dma-heap, providing physically-contiguous memory
+ * \var DmaHeap::System
+ * \brief Allocate from the system dma-heap, using the page allocator
+ */
+
+/**
+ * \typedef DmaHeap::DmaHeapFlags
+ * \brief A bitwise combination of DmaHeap::DmaHeapFlag values
+ */
+
+/**
+ * \brief Construct a DmaHeap of a given type
+ * \param[in] type The type(s) of the dma-heap(s) to allocate from
+ *
+ * The DMA heap type is selected with the \a type parameter, which defaults to
+ * the CMA heap. If no heap of the given type can be accessed, the constructed
+ * DmaHeap instance is invalid as indicated by the isValid() function.
+ *
+ * Multiple types can be selected by combining type flags, in which case the
+ * constructed DmaHeap will match one of the types. If the system provides
+ * multiple heaps that match the requested types, which heap is used is
+ * undefined.
+ */
+DmaHeap::DmaHeap(DmaHeapFlags type)
+{
+ for (const auto &info : heapInfos) {
+ if (!(type & info.type))
+ continue;
+
+ int ret = ::open(info.deviceNodeName, O_RDWR | O_CLOEXEC, 0);
+ if (ret < 0) {
+ ret = errno;
+ LOG(DmaHeap, Debug)
+ << "Failed to open " << info.deviceNodeName << ": "
+ << strerror(ret);
+ continue;
+ }
+
+ LOG(DmaHeap, Debug) << "Using " << info.deviceNodeName;
+ dmaHeapHandle_ = UniqueFD(ret);
+ break;
+ }
+
+ if (!dmaHeapHandle_.isValid())
+ LOG(DmaHeap, Error) << "Could not open any dmaHeap device";
+}
+
+/**
+ * \brief Destroy the DmaHeap instance
+ */
+DmaHeap::~DmaHeap() = default;
+
+/**
+ * \fn DmaHeap::isValid()
+ * \brief Check if the DmaHeap instance is valid
+ * \return True if the DmaHeap is valid, false otherwise
+ */
+
+/**
+ * \brief Allocate a dma-buf from the DmaHeap
+ * \param [in] name The name to set for the allocated buffer
+ * \param [in] size The size of the buffer to allocate
+ *
+ * Allocates a dma-buf with read/write access.
+ *
+ * If the allocation fails, return an invalid UniqueFD.
+ *
+ * \return The UniqueFD of the allocated buffer
+ */
+UniqueFD DmaHeap::alloc(const char *name, std::size_t size)
+{
+ int ret;
+
+ if (!name)
+ return {};
+
+ struct dma_heap_allocation_data alloc = {};
+
+ alloc.len = size;
+ alloc.fd_flags = O_CLOEXEC | O_RDWR;
+
+ ret = ::ioctl(dmaHeapHandle_.get(), DMA_HEAP_IOCTL_ALLOC, &alloc);
+ if (ret < 0) {
+ LOG(DmaHeap, Error) << "dmaHeap allocation failure for " << name;
+ return {};
+ }
+
+ UniqueFD allocFd(alloc.fd);
+ ret = ::ioctl(allocFd.get(), DMA_BUF_SET_NAME, name);
+ if (ret < 0) {
+ LOG(DmaHeap, Error) << "dmaHeap naming failure for " << name;
+ return {};
+ }
+
+ return allocFd;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/meson.build b/src/libcamera/meson.build
index 2e7b0c77..a3b12bc1 100644
--- a/src/libcamera/meson.build
+++ b/src/libcamera/meson.build
@@ -15,6 +15,7 @@ libcamera_sources = files([
'delayed_controls.cpp',
'device_enumerator.cpp',
'device_enumerator_sysfs.cpp',
+ 'dma_heaps.cpp',
'fence.cpp',
'formats.cpp',
'framebuffer.cpp',
@@ -38,6 +39,7 @@ libcamera_sources = files([
'process.cpp',
'pub_key.cpp',
'request.cpp',
+ 'shared_mem_object.cpp',
'source_paths.cpp',
'stream.cpp',
'sysfs.cpp',
@@ -68,6 +70,7 @@ subdir('ipa')
subdir('pipeline')
subdir('proxy')
subdir('sensor')
+subdir('software_isp')
null_dep = dependency('', required : false)
diff --git a/src/libcamera/pipeline/rpi/common/shared_mem_object.h b/src/libcamera/pipeline/rpi/common/shared_mem_object.h
deleted file mode 100644
index aa56c220..00000000
--- a/src/libcamera/pipeline/rpi/common/shared_mem_object.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2023, Raspberry Pi Ltd
- *
- * shared_mem_object.h - Helper class for shared memory allocations
- */
-#pragma once
-
-#include <cstddef>
-#include <fcntl.h>
-#include <string>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <utility>
-
-#include <libcamera/base/class.h>
-#include <libcamera/base/shared_fd.h>
-
-namespace libcamera {
-
-namespace RPi {
-
-template<class T>
-class SharedMemObject
-{
-public:
- static constexpr std::size_t SIZE = sizeof(T);
-
- SharedMemObject()
- : obj_(nullptr)
- {
- }
-
- template<class... Args>
- SharedMemObject(const std::string &name, Args &&...args)
- : name_(name), obj_(nullptr)
- {
- void *mem;
- int ret;
-
- ret = memfd_create(name_.c_str(), MFD_CLOEXEC);
- if (ret < 0)
- return;
-
- fd_ = SharedFD(std::move(ret));
- if (!fd_.isValid())
- return;
-
- ret = ftruncate(fd_.get(), SIZE);
- if (ret < 0)
- return;
-
- mem = mmap(nullptr, SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
- fd_.get(), 0);
- if (mem == MAP_FAILED)
- return;
-
- obj_ = new (mem) T(std::forward<Args>(args)...);
- }
-
- SharedMemObject(SharedMemObject<T> &&rhs)
- {
- this->name_ = std::move(rhs.name_);
- this->fd_ = std::move(rhs.fd_);
- this->obj_ = rhs.obj_;
- rhs.obj_ = nullptr;
- }
-
- ~SharedMemObject()
- {
- if (obj_) {
- obj_->~T();
- munmap(obj_, SIZE);
- }
- }
-
- /* Make SharedMemObject non-copyable for now. */
- LIBCAMERA_DISABLE_COPY(SharedMemObject)
-
- SharedMemObject<T> &operator=(SharedMemObject<T> &&rhs)
- {
- this->name_ = std::move(rhs.name_);
- this->fd_ = std::move(rhs.fd_);
- this->obj_ = rhs.obj_;
- rhs.obj_ = nullptr;
- return *this;
- }
-
- T *operator->()
- {
- return obj_;
- }
-
- const T *operator->() const
- {
- return obj_;
- }
-
- T &operator*()
- {
- return *obj_;
- }
-
- const T &operator*() const
- {
- return *obj_;
- }
-
- const SharedFD &fd() const
- {
- return fd_;
- }
-
- explicit operator bool() const
- {
- return !!obj_;
- }
-
-private:
- std::string name_;
- SharedFD fd_;
- T *obj_;
-};
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/vc4/dma_heaps.cpp b/src/libcamera/pipeline/rpi/vc4/dma_heaps.cpp
deleted file mode 100644
index 317b1fc1..00000000
--- a/src/libcamera/pipeline/rpi/vc4/dma_heaps.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Raspberry Pi Ltd
- *
- * dma_heaps.h - Helper class for dma-heap allocations.
- */
-
-#include "dma_heaps.h"
-
-#include <array>
-#include <fcntl.h>
-#include <linux/dma-buf.h>
-#include <linux/dma-heap.h>
-#include <sys/ioctl.h>
-#include <unistd.h>
-
-#include <libcamera/base/log.h>
-
-/*
- * /dev/dma-heap/linux,cma is the dma-heap allocator, which allows dmaheap-cma
- * to only have to worry about importing.
- *
- * Annoyingly, should the cma heap size be specified on the kernel command line
- * instead of DT, the heap gets named "reserved" instead.
- */
-static constexpr std::array<const char *, 2> heapNames = {
- "/dev/dma_heap/linux,cma",
- "/dev/dma_heap/reserved"
-};
-
-namespace libcamera {
-
-LOG_DECLARE_CATEGORY(RPI)
-
-namespace RPi {
-
-DmaHeap::DmaHeap()
-{
- for (const char *name : heapNames) {
- int ret = ::open(name, O_RDWR | O_CLOEXEC, 0);
- if (ret < 0) {
- ret = errno;
- LOG(RPI, Debug) << "Failed to open " << name << ": "
- << strerror(ret);
- continue;
- }
-
- dmaHeapHandle_ = UniqueFD(ret);
- break;
- }
-
- if (!dmaHeapHandle_.isValid())
- LOG(RPI, Error) << "Could not open any dmaHeap device";
-}
-
-DmaHeap::~DmaHeap() = default;
-
-UniqueFD DmaHeap::alloc(const char *name, std::size_t size)
-{
- int ret;
-
- if (!name)
- return {};
-
- struct dma_heap_allocation_data alloc = {};
-
- alloc.len = size;
- alloc.fd_flags = O_CLOEXEC | O_RDWR;
-
- ret = ::ioctl(dmaHeapHandle_.get(), DMA_HEAP_IOCTL_ALLOC, &alloc);
- if (ret < 0) {
- LOG(RPI, Error) << "dmaHeap allocation failure for "
- << name;
- return {};
- }
-
- UniqueFD allocFd(alloc.fd);
- ret = ::ioctl(allocFd.get(), DMA_BUF_SET_NAME, name);
- if (ret < 0) {
- LOG(RPI, Error) << "dmaHeap naming failure for "
- << name;
- return {};
- }
-
- return allocFd;
-}
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/vc4/meson.build b/src/libcamera/pipeline/rpi/vc4/meson.build
index cdb049c5..386e2296 100644
--- a/src/libcamera/pipeline/rpi/vc4/meson.build
+++ b/src/libcamera/pipeline/rpi/vc4/meson.build
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: CC0-1.0
libcamera_sources += files([
- 'dma_heaps.cpp',
'vc4.cpp',
])
diff --git a/src/libcamera/pipeline/rpi/vc4/vc4.cpp b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
index ad7dddde..947b1e73 100644
--- a/src/libcamera/pipeline/rpi/vc4/vc4.cpp
+++ b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
@@ -12,12 +12,11 @@
#include <libcamera/formats.h>
#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/dma_heaps.h"
#include "../common/pipeline_base.h"
#include "../common/rpi_stream.h"
-#include "dma_heaps.h"
-
using namespace std::chrono_literals;
namespace libcamera {
@@ -87,7 +86,7 @@ public:
RPi::Device<Isp, 4> isp_;
/* DMAHEAP allocation helper. */
- RPi::DmaHeap dmaHeap_;
+ DmaHeap dmaHeap_;
SharedFD lsTable_;
struct Config {
diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
index 01f2a977..61a59926 100644
--- a/src/libcamera/pipeline/simple/simple.cpp
+++ b/src/libcamera/pipeline/simple/simple.cpp
@@ -34,6 +34,7 @@
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/software_isp/software_isp.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
@@ -185,18 +186,24 @@ struct SimplePipelineInfo {
* and the number of streams it supports.
*/
std::vector<std::pair<const char *, unsigned int>> converters;
+ /*
+ * Using Software ISP is to be enabled per driver.
+ *
+ * The Software ISP can't be used together with the converters.
+ */
+ bool swIspEnabled;
};
namespace {
static const SimplePipelineInfo supportedDevices[] = {
- { "dcmipp", {} },
- { "imx7-csi", { { "pxp", 1 } } },
- { "j721e-csi2rx", {} },
- { "mtk-seninf", { { "mtk-mdp", 3 } } },
- { "mxc-isi", {} },
- { "qcom-camss", {} },
- { "sun6i-csi", {} },
+ { "dcmipp", {}, false },
+ { "imx7-csi", { { "pxp", 1 } }, false },
+ { "j721e-csi2rx", {}, false },
+ { "mtk-seninf", { { "mtk-mdp", 3 } }, false },
+ { "mxc-isi", {}, false },
+ { "qcom-camss", {}, true },
+ { "sun6i-csi", {}, false },
};
} /* namespace */
@@ -270,17 +277,22 @@ public:
std::vector<Configuration> configs_;
std::map<PixelFormat, std::vector<const Configuration *>> formats_;
+ std::vector<std::unique_ptr<FrameBuffer>> conversionBuffers_;
+ std::queue<std::map<unsigned int, FrameBuffer *>> conversionQueue_;
+ bool useConversion_;
+
std::unique_ptr<Converter> converter_;
- std::vector<std::unique_ptr<FrameBuffer>> converterBuffers_;
- bool useConverter_;
- std::queue<std::map<unsigned int, FrameBuffer *>> converterQueue_;
+ std::unique_ptr<SoftwareIsp> swIsp_;
private:
void tryPipeline(unsigned int code, const Size &size);
static std::vector<const MediaPad *> routedSourcePads(MediaPad *sink);
- void converterInputDone(FrameBuffer *buffer);
- void converterOutputDone(FrameBuffer *buffer);
+ void conversionInputDone(FrameBuffer *buffer);
+ void conversionOutputDone(FrameBuffer *buffer);
+
+ void ispStatsReady();
+ void setSensorControls(const ControlList &sensorControls);
};
class SimpleCameraConfiguration : public CameraConfiguration
@@ -332,6 +344,7 @@ public:
V4L2VideoDevice *video(const MediaEntity *entity);
V4L2Subdevice *subdev(const MediaEntity *entity);
MediaDevice *converter() { return converter_; }
+ bool swIspEnabled() const { return swIspEnabled_; }
protected:
int queueRequestDevice(Camera *camera, Request *request) override;
@@ -360,6 +373,7 @@ private:
std::map<const MediaEntity *, EntityData> entities_;
MediaDevice *converter_;
+ bool swIspEnabled_;
};
/* -----------------------------------------------------------------------------
@@ -504,8 +518,39 @@ int SimpleCameraData::init()
<< "Failed to create converter, disabling format conversion";
converter_.reset();
} else {
- converter_->inputBufferReady.connect(this, &SimpleCameraData::converterInputDone);
- converter_->outputBufferReady.connect(this, &SimpleCameraData::converterOutputDone);
+ converter_->inputBufferReady.connect(this, &SimpleCameraData::conversionInputDone);
+ converter_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
+ }
+ }
+
+ /*
+ * Instantiate Soft ISP if this is enabled for the given driver and no converter is used.
+ */
+ if (!converter_ && pipe->swIspEnabled()) {
+ swIsp_ = std::make_unique<SoftwareIsp>(pipe, sensor_.get());
+ if (!swIsp_->isValid()) {
+ LOG(SimplePipeline, Warning)
+ << "Failed to create software ISP, disabling software debayering";
+ swIsp_.reset();
+ } else {
+ /*
+ * The inputBufferReady signal is emitted from the soft ISP thread,
+ * and needs to be handled in the pipeline handler thread. Signals
+ * implement queued delivery, but this works transparently only if
+ * the receiver is bound to the target thread. As the
+ * SimpleCameraData class doesn't inherit from the Object class, it
+ * is not bound to any thread, and the signal would be delivered
+ * synchronously. Instead, connect the signal to a lambda function
+ * bound explicitly to the pipe, which is bound to the pipeline
+ * handler thread. The function then simply forwards the call to
+ * conversionInputDone().
+ */
+ swIsp_->inputBufferReady.connect(pipe, [this](FrameBuffer *buffer) {
+ this->conversionInputDone(buffer);
+ });
+ swIsp_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
+ swIsp_->ispStatsReady.connect(this, &SimpleCameraData::ispStatsReady);
+ swIsp_->setSensorControls.connect(this, &SimpleCameraData::setSensorControls);
}
}
@@ -599,12 +644,20 @@ void SimpleCameraData::tryPipeline(unsigned int code, const Size &size)
config.captureFormat = pixelFormat;
config.captureSize = format.size;
- if (!converter_) {
- config.outputFormats = { pixelFormat };
- config.outputSizes = config.captureSize;
- } else {
+ if (converter_) {
config.outputFormats = converter_->formats(pixelFormat);
config.outputSizes = converter_->sizes(format.size);
+ } else if (swIsp_) {
+ config.outputFormats = swIsp_->formats(pixelFormat);
+ config.outputSizes = swIsp_->sizes(pixelFormat, format.size);
+ if (config.outputFormats.empty()) {
+ /* Do not use swIsp for unsupported pixelFormat's. */
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
+ }
+ } else {
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
}
configs_.push_back(config);
@@ -741,7 +794,7 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
* point converting an erroneous buffer.
*/
if (buffer->metadata().status != FrameMetadata::FrameSuccess) {
- if (!useConverter_) {
+ if (!useConversion_) {
/* No conversion, just complete the request. */
Request *request = buffer->request();
pipe->completeBuffer(request, buffer);
@@ -750,23 +803,23 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
}
/*
- * The converter is in use. Requeue the internal buffer for
- * capture (unless the stream is being stopped), and complete
- * the request with all the user-facing buffers.
+ * The converter or Software ISP is in use. Requeue the internal
+ * buffer for capture (unless the stream is being stopped), and
+ * complete the request with all the user-facing buffers.
*/
if (buffer->metadata().status != FrameMetadata::FrameCancelled)
video_->queueBuffer(buffer);
- if (converterQueue_.empty())
+ if (conversionQueue_.empty())
return;
Request *request = nullptr;
- for (auto &item : converterQueue_.front()) {
+ for (auto &item : conversionQueue_.front()) {
FrameBuffer *outputBuffer = item.second;
request = outputBuffer->request();
pipe->completeBuffer(request, outputBuffer);
}
- converterQueue_.pop();
+ conversionQueue_.pop();
if (request)
pipe->completeRequest(request);
@@ -783,9 +836,9 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
*/
Request *request = buffer->request();
- if (useConverter_ && !converterQueue_.empty()) {
+ if (useConversion_ && !conversionQueue_.empty()) {
const std::map<unsigned int, FrameBuffer *> &outputs =
- converterQueue_.front();
+ conversionQueue_.front();
if (!outputs.empty()) {
FrameBuffer *outputBuffer = outputs.begin()->second;
if (outputBuffer)
@@ -798,18 +851,22 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
buffer->metadata().timestamp);
/*
- * Queue the captured and the request buffer to the converter if format
- * conversion is needed. If there's no queued request, just requeue the
- * captured buffer for capture.
+ * Queue the captured and the request buffer to the converter or Software
+ * ISP if format conversion is needed. If there's no queued request, just
+ * requeue the captured buffer for capture.
*/
- if (useConverter_) {
- if (converterQueue_.empty()) {
+ if (useConversion_) {
+ if (conversionQueue_.empty()) {
video_->queueBuffer(buffer);
return;
}
- converter_->queueBuffers(buffer, converterQueue_.front());
- converterQueue_.pop();
+ if (converter_)
+ converter_->queueBuffers(buffer, conversionQueue_.front());
+ else
+ swIsp_->queueBuffers(buffer, conversionQueue_.front());
+
+ conversionQueue_.pop();
return;
}
@@ -818,13 +875,13 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
pipe->completeRequest(request);
}
-void SimpleCameraData::converterInputDone(FrameBuffer *buffer)
+void SimpleCameraData::conversionInputDone(FrameBuffer *buffer)
{
/* Queue the input buffer back for capture. */
video_->queueBuffer(buffer);
}
-void SimpleCameraData::converterOutputDone(FrameBuffer *buffer)
+void SimpleCameraData::conversionOutputDone(FrameBuffer *buffer)
{
SimplePipelineHandler *pipe = SimpleCameraData::pipe();
@@ -834,6 +891,19 @@ void SimpleCameraData::converterOutputDone(FrameBuffer *buffer)
pipe->completeRequest(request);
}
+void SimpleCameraData::ispStatsReady()
+{
+ /* \todo Use the DelayedControls class */
+ swIsp_->processStats(sensor_->getControls({ V4L2_CID_ANALOGUE_GAIN,
+ V4L2_CID_EXPOSURE }));
+}
+
+void SimpleCameraData::setSensorControls(const ControlList &sensorControls)
+{
+ ControlList ctrls(sensorControls);
+ sensor_->setControls(&ctrls);
+}
+
/* Retrieve all source pads connected to a sink pad through active routes. */
std::vector<const MediaPad *> SimpleCameraData::routedSourcePads(MediaPad *sink)
{
@@ -882,6 +952,33 @@ SimpleCameraConfiguration::SimpleCameraConfiguration(Camera *camera,
{
}
+namespace {
+
+static Size adjustSize(const Size &requestedSize, const SizeRange &supportedSizes)
+{
+ ASSERT(supportedSizes.min <= supportedSizes.max);
+
+ if (supportedSizes.min == supportedSizes.max)
+ return supportedSizes.max;
+
+ unsigned int hStep = supportedSizes.hStep;
+ unsigned int vStep = supportedSizes.vStep;
+
+ if (hStep == 0)
+ hStep = supportedSizes.max.width - supportedSizes.min.width;
+ if (vStep == 0)
+ vStep = supportedSizes.max.height - supportedSizes.min.height;
+
+ Size adjusted = requestedSize.boundedTo(supportedSizes.max)
+ .expandedTo(supportedSizes.min);
+
+ return adjusted.shrunkBy(supportedSizes.min)
+ .alignedDownTo(hStep, vStep)
+ .grownBy(supportedSizes.min);
+}
+
+} /* namespace */
+
CameraConfiguration::Status SimpleCameraConfiguration::validate()
{
const CameraSensor *sensor = data_->sensor_.get();
@@ -998,10 +1095,19 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
}
if (!pipeConfig_->outputSizes.contains(cfg.size)) {
+ Size adjustedSize = pipeConfig_->captureSize;
+ /*
+ * The converter (when present) may not be able to output
+ * a size identical to its input size. The capture size is thus
+ * not guaranteed to be a valid output size. In such cases, use
+ * the smaller valid output size closest to the requested.
+ */
+ if (!pipeConfig_->outputSizes.contains(adjustedSize))
+ adjustedSize = adjustSize(cfg.size, pipeConfig_->outputSizes);
LOG(SimplePipeline, Debug)
<< "Adjusting size from " << cfg.size
- << " to " << pipeConfig_->captureSize;
- cfg.size = pipeConfig_->captureSize;
+ << " to " << adjustedSize;
+ cfg.size = adjustedSize;
status = Adjusted;
}
@@ -1013,8 +1119,11 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
/* Set the stride, frameSize and bufferCount. */
if (needConversion_) {
std::tie(cfg.stride, cfg.frameSize) =
- data_->converter_->strideAndFrameSize(cfg.pixelFormat,
- cfg.size);
+ data_->converter_
+ ? data_->converter_->strideAndFrameSize(cfg.pixelFormat,
+ cfg.size)
+ : data_->swIsp_->strideAndFrameSize(cfg.pixelFormat,
+ cfg.size);
if (cfg.stride == 0)
return Invalid;
} else {
@@ -1157,14 +1266,14 @@ int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
/* Configure the converter if needed. */
std::vector<std::reference_wrapper<StreamConfiguration>> outputCfgs;
- data->useConverter_ = config->needConversion();
+ data->useConversion_ = config->needConversion();
for (unsigned int i = 0; i < config->size(); ++i) {
StreamConfiguration &cfg = config->at(i);
cfg.setStream(&data->streams_[i]);
- if (data->useConverter_)
+ if (data->useConversion_)
outputCfgs.push_back(cfg);
}
@@ -1177,7 +1286,10 @@ int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
inputCfg.stride = captureFormat.planes[0].bpl;
inputCfg.bufferCount = kNumInternalBuffers;
- return data->converter_->configure(inputCfg, outputCfgs);
+ return data->converter_
+ ? data->converter_->configure(inputCfg, outputCfgs)
+ : data->swIsp_->configure(inputCfg, outputCfgs,
+ data->sensor_->controls());
}
int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
@@ -1190,9 +1302,12 @@ int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
* Export buffers on the converter or capture video node, depending on
* whether the converter is used or not.
*/
- if (data->useConverter_)
- return data->converter_->exportBuffers(data->streamIndex(stream),
- count, buffers);
+ if (data->useConversion_)
+ return data->converter_
+ ? data->converter_->exportBuffers(data->streamIndex(stream),
+ count, buffers)
+ : data->swIsp_->exportBuffers(data->streamIndex(stream),
+ count, buffers);
else
return data->video_->exportBuffers(count, buffers);
}
@@ -1211,13 +1326,13 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
return -EBUSY;
}
- if (data->useConverter_) {
+ if (data->useConversion_) {
/*
* When using the converter allocate a fixed number of internal
* buffers.
*/
ret = video->allocateBuffers(kNumInternalBuffers,
- &data->converterBuffers_);
+ &data->conversionBuffers_);
} else {
/* Otherwise, prepare for using buffers from the only stream. */
Stream *stream = &data->streams_[0];
@@ -1236,15 +1351,21 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
return ret;
}
- if (data->useConverter_) {
- ret = data->converter_->start();
+ if (data->useConversion_) {
+ if (data->converter_)
+ ret = data->converter_->start();
+ else if (data->swIsp_)
+ ret = data->swIsp_->start();
+ else
+ ret = 0;
+
if (ret < 0) {
stop(camera);
return ret;
}
/* Queue all internal buffers for capture. */
- for (std::unique_ptr<FrameBuffer> &buffer : data->converterBuffers_)
+ for (std::unique_ptr<FrameBuffer> &buffer : data->conversionBuffers_)
video->queueBuffer(buffer.get());
}
@@ -1256,15 +1377,19 @@ void SimplePipelineHandler::stopDevice(Camera *camera)
SimpleCameraData *data = cameraData(camera);
V4L2VideoDevice *video = data->video_;
- if (data->useConverter_)
- data->converter_->stop();
+ if (data->useConversion_) {
+ if (data->converter_)
+ data->converter_->stop();
+ else if (data->swIsp_)
+ data->swIsp_->stop();
+ }
video->streamOff();
video->releaseBuffers();
video->bufferReady.disconnect(data, &SimpleCameraData::bufferReady);
- data->converterBuffers_.clear();
+ data->conversionBuffers_.clear();
releasePipeline(data);
}
@@ -1282,7 +1407,7 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
* queue, it will be handed to the converter in the capture
* completion handler.
*/
- if (data->useConverter_) {
+ if (data->useConversion_) {
buffers.emplace(data->streamIndex(stream), buffer);
} else {
ret = data->video_->queueBuffer(buffer);
@@ -1291,8 +1416,8 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
}
}
- if (data->useConverter_)
- data->converterQueue_.push(std::move(buffers));
+ if (data->useConversion_)
+ data->conversionQueue_.push(std::move(buffers));
return 0;
}
@@ -1419,6 +1544,8 @@ bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
}
}
+ swIspEnabled_ = info->swIspEnabled;
+
/* Locate the sensors. */
std::vector<MediaEntity *> sensors = locateSensors();
if (sensors.empty()) {
diff --git a/src/libcamera/shared_mem_object.cpp b/src/libcamera/shared_mem_object.cpp
new file mode 100644
index 00000000..b018fb3b
--- /dev/null
+++ b/src/libcamera/shared_mem_object.cpp
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ * Copyright (C) 2024 Andrei Konovalov
+ * Copyright (C) 2024 Dennis Bonke
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * shared_mem_object.cpp - Helpers for shared memory allocations
+ */
+
+#include "libcamera/internal/shared_mem_object.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+/**
+ * \file shared_mem_object.cpp
+ * \brief Helpers for shared memory allocations
+ */
+
+namespace libcamera {
+
+/**
+ * \class SharedMem
+ * \brief Helper class to allocate and manage memory shareable between processes
+ *
+ * SharedMem manages memory suitable for sharing between processes. When an
+ * instance is constructed, it allocates a memory buffer of the requested size
+ * backed by an anonymous file, using the memfd API.
+ *
+ * The allocated memory is exposed by the mem() function. If memory allocation
+ * fails, the function returns an empty Span. This can be also checked using the
+ * bool() operator.
+ *
+ * The file descriptor for the backing file is exposed as a SharedFD by the fd()
+ * function. It can be shared with other processes across IPC boundaries, which
+ * can then map the memory with mmap().
+ *
+ * A single memfd is created for every SharedMem. If there is a need to allocate
+ * a large number of objects in shared memory, these objects should be grouped
+ * together and use the shared memory allocated by a single SharedMem object if
+ * possible. This will help to minimize the number of created memfd's.
+ */
+
+SharedMem::SharedMem() = default;
+
+/**
+ * \brief Construct a SharedMem with memory of the given \a size
+ * \param[in] name Name of the SharedMem
+ * \param[in] size Size of the shared memory to allocate and map
+ *
+ * The \a name is used for debugging purpose only. Multiple SharedMem instances
+ * can have the same name.
+ */
+SharedMem::SharedMem(const std::string &name, std::size_t size)
+{
+ int fd = memfd_create(name.c_str(), MFD_CLOEXEC);
+ if (fd < 0)
+ return;
+
+ fd_ = SharedFD(std::move(fd));
+ if (!fd_.isValid())
+ return;
+
+ if (ftruncate(fd_.get(), size) < 0) {
+ fd_ = SharedFD();
+ return;
+ }
+
+ void *mem = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd_.get(), 0);
+ if (mem == MAP_FAILED) {
+ fd_ = SharedFD();
+ return;
+ }
+
+ mem_ = { static_cast<uint8_t *>(mem), size };
+}
+
+/**
+ * \brief Move constructor for SharedMem
+ * \param[in] rhs The object to move
+ */
+SharedMem::SharedMem(SharedMem &&rhs)
+{
+ this->fd_ = std::move(rhs.fd_);
+ this->mem_ = rhs.mem_;
+ rhs.mem_ = {};
+}
+
+/**
+ * \brief Destroy the SharedMem instance
+ *
+ * Destroying an instance invalidates the memory mapping exposed with mem().
+ * Other mappings of the backing file, created in this or other processes with
+ * mmap(), remain valid.
+ *
+ * Similarly, other references to the backing file descriptor created by copying
+ * the SharedFD returned by fd() remain valid. The underlying memory will be
+ * freed only when all file descriptors that reference the anonymous file get
+ * closed.
+ */
+SharedMem::~SharedMem()
+{
+ if (!mem_.empty())
+ munmap(mem_.data(), mem_.size_bytes());
+}
+
+/**
+ * \brief Move assignment operator for SharedMem
+ * \param[in] rhs The object to move
+ */
+SharedMem &SharedMem::operator=(SharedMem &&rhs)
+{
+ this->fd_ = std::move(rhs.fd_);
+ this->mem_ = rhs.mem_;
+ rhs.mem_ = {};
+ return *this;
+}
+
+/**
+ * \fn const SharedFD &SharedMem::fd() const
+ * \brief Retrieve the file descriptor for the underlying shared memory
+ * \return The file descriptor, or an invalid SharedFD if allocation failed
+ */
+
+/**
+ * \fn Span<uint8_t> SharedMem::mem() const
+ * \brief Retrieve the underlying shared memory
+ * \return The memory buffer, or an empty Span if allocation failed
+ */
+
+/**
+ * \fn SharedMem::operator bool()
+ * \brief Check if the shared memory allocation succeeded
+ * \return True if allocation of the shared memory succeeded, false otherwise
+ */
+
+/**
+ * \class SharedMemObject
+ * \brief Helper class to allocate an object in shareable memory
+ * \tparam The object type
+ *
+ * The SharedMemObject class is a specialization of the SharedMem class that
+ * wraps an object of type \a T and constructs it in shareable memory. It uses
+ * the same underlying memory allocation and sharing mechanism as the SharedMem
+ * class.
+ *
+ * The wrapped object is constructed at the same time as the SharedMemObject
+ * instance, by forwarding the arguments passed to the SharedMemObject
+ * constructor. The underlying memory allocation is sized to the object \a T
+ * size. The bool() operator should be used to check the allocation was
+ * successful. The object can be accessed using the dereference operators
+ * operator*() and operator->().
+ *
+ * While no restriction on the type \a T is enforced, not all types are suitable
+ * for sharing between multiple processes. Most notably, any object type that
+ * contains pointer or reference members will likely cause issues. Even if those
+ * members refer to other members of the same object, the shared memory will be
+ * mapped at different addresses in different processes, and the pointers will
+ * not be valid.
+ *
+ * A new anonymous file is created for every SharedMemObject instance. If there
+ * is a need to share a large number of small objects, these objects should be
+ * grouped into a single larger object to limit the number of file descriptors.
+ *
+ * To share the object with other processes, see the SharedMem documentation.
+ */
+
+/**
+ * \var SharedMemObject::kSize
+ * \brief The size of the object stored in shared memory
+ */
+
+/**
+ * \fn SharedMemObject::SharedMemObject(const std::string &name, Args &&...args)
+ * \brief Construct a SharedMemObject
+ * \param[in] name Name of the SharedMemObject
+ * \param[in] args Arguments to pass to the constructor of the object T
+ *
+ * The \a name is used for debugging purpose only. Multiple SharedMem instances
+ * can have the same name.
+ */
+
+/**
+ * \fn SharedMemObject::SharedMemObject(SharedMemObject<T> &&rhs)
+ * \brief Move constructor for SharedMemObject
+ * \param[in] rhs The object to move
+ */
+
+/**
+ * \fn SharedMemObject::~SharedMemObject()
+ * \brief Destroy the SharedMemObject instance
+ *
+ * Destroying a SharedMemObject calls the wrapped T object's destructor. While
+ * the underlying memory may not be freed immediately if other mappings have
+ * been created manually (see SharedMem::~SharedMem() for more information), the
+ * stored object may be modified. Depending on the ~T() destructor, accessing
+ * the object after destruction of the SharedMemObject causes undefined
+ * behaviour. It is the responsibility of the user of this class to synchronize
+ * with other users who have access to the shared object.
+ */
+
+/**
+ * \fn SharedMemObject::operator=(SharedMemObject<T> &&rhs)
+ * \brief Move assignment operator for SharedMemObject
+ * \param[in] rhs The SharedMemObject object to take the data from
+ *
+ * Moving a SharedMemObject does not affect the stored object.
+ */
+
+/**
+ * \fn SharedMemObject::operator->()
+ * \brief Dereference the stored object
+ * \return Pointer to the stored object
+ */
+
+/**
+ * \fn const T *SharedMemObject::operator->() const
+ * \copydoc SharedMemObject::operator->
+ */
+
+/**
+ * \fn SharedMemObject::operator*()
+ * \brief Dereference the stored object
+ * \return Reference to the stored object
+ */
+
+/**
+ * \fn const T &SharedMemObject::operator*() const
+ * \copydoc SharedMemObject::operator*
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/TODO b/src/libcamera/software_isp/TODO
new file mode 100644
index 00000000..4fcee39b
--- /dev/null
+++ b/src/libcamera/software_isp/TODO
@@ -0,0 +1,279 @@
+1. Setting F_SEAL_SHRINK and F_SEAL_GROW after ftruncate()
+
+>> SharedMem::SharedMem(const std::string &name, std::size_t size)
+>> : name_(name), size_(size), mem_(nullptr)
+>>
+>> ...
+>>
+>> if (ftruncate(fd_.get(), size_) < 0)
+>> return;
+>
+> Should we set the GROW and SHRINK seals (in a separate patch) ?
+
+Yes, this can be done.
+Setting F_SEAL_SHRINK and F_SEAL_GROW after the ftruncate() call above could catch
+some potential errors related to improper access to the shared memory allocated by
+the SharedMemObject.
+
+---
+
+2. Reconsider stats sharing
+
+>>> +void SwStatsCpu::finishFrame(void)
+>>> +{
+>>> + *sharedStats_ = stats_;
+>>
+>> Is it more efficient to copy the stats instead of operating directly on
+>> the shared memory ?
+>
+> I inherited doing things this way from Andrey. I kept this because
+> we don't really have any synchronization with the IPA reading this.
+>
+> So the idea is to only touch this when the next set of statistics
+> is ready since we don't know when the IPA is done with accessing
+> the previous set of statistics ...
+>
+> This is both something which seems mostly a theoretic problem,
+> yet also definitely something which I think we need to fix.
+>
+> Maybe use a ringbuffer of stats buffers and pass the index into
+> the ringbuffer to the emit signal ?
+
+That would match how we deal with hardware ISPs, and I think that's a
+good idea. It will help decoupling the processing side from the IPA.
+
+---
+
+3. Remove statsReady signal
+
+> class SwStatsCpu
+> {
+> /**
+> * \brief Signals that the statistics are ready
+> */
+> Signal<> statsReady;
+
+But better, I wonder if the signal could be dropped completely. The
+SwStatsCpu class does not operate asynchronously. Shouldn't whoever
+calls the finishFrame() function then handle emitting the signal ?
+
+Now, the trouble is that this would be the DebayerCpu class, whose name
+doesn't indicate as a prime candidate to handle stats. However, it
+already exposes a getStatsFD() function, so we're already calling for
+trouble :-) Either that should be moved to somewhere else, or the class
+should be renamed. Considering that the class applies colour gains in
+addition to performing the interpolation, it may be more of a naming
+issue.
+
+Removing the signal and refactoring those classes doesn't have to be
+addressed now, I think it would be part of a larger refactoring
+(possibly also considering platforms that have no ISP but can produce
+stats in hardware, such as the i.MX7), but please keep it on your radar.
+
+---
+
+4. Hide internal representation of gains from callers
+
+> struct DebayerParams {
+> static constexpr unsigned int kGain10 = 256;
+
+Forcing the caller to deal with the internal representation of gains
+isn't nice, especially given that it precludes implementing gains of
+different precisions in different backend. Wouldn't it be better to pass
+the values as floating point numbers, and convert them to the internal
+representation in the implementation of process() before using them ?
+
+---
+
+5. Store ISP parameters in per-frame buffers
+
+> /**
+> * \fn void Debayer::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+> * \brief Process the bayer data into the requested format.
+> * \param[in] input The input buffer.
+> * \param[in] output The output buffer.
+> * \param[in] params The parameters to be used in debayering.
+> *
+> * \note DebayerParams is passed by value deliberately so that a copy is passed
+> * when this is run in another thread by invokeMethod().
+> */
+
+Possibly something to address later, by storing ISP parameters in
+per-frame buffers like we do for hardware ISPs.
+
+---
+
+6. Input buffer copying configuration
+
+> DebayerCpu::DebayerCpu(std::unique_ptr<SwStatsCpu> stats)
+> : stats_(std::move(stats)), gammaCorrection_(1.0)
+> {
+> enableInputMemcpy_ = true;
+
+Set this appropriately and/or make it configurable.
+
+---
+
+7. Performance measurement configuration
+
+> void DebayerCpu::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+> /* Measure before emitting signals */
+> if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure &&
+> ++measuredFrames_ > DebayerCpu::kFramesToSkip) {
+> timespec frameEndTime = {};
+> clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime);
+> frameProcessTime_ += timeDiff(frameEndTime, frameStartTime);
+> if (measuredFrames_ == DebayerCpu::kLastFrameToMeasure) {
+> const unsigned int measuredFrames = DebayerCpu::kLastFrameToMeasure -
+> DebayerCpu::kFramesToSkip;
+> LOG(Debayer, Info)
+> << "Processed " << measuredFrames
+> << " frames in " << frameProcessTime_ / 1000 << "us, "
+> << frameProcessTime_ / (1000 * measuredFrames)
+> << " us/frame";
+> }
+> }
+
+I wonder if there would be a way to control at runtime when/how to
+perform those measurements. Maybe that's a bit overkill.
+
+---
+
+8. DebayerCpu cleanups
+
+> >> class DebayerCpu : public Debayer, public Object
+> >> const SharedFD &getStatsFD() { return stats_->getStatsFD(); }
+> >
+> > This,
+>
+> Note the statistics pass-through stuff is sort of a necessary evil
+> since we want one main loop going over the data line by line and
+> doing both debayering as well as stats while the line is still
+> hot in the l2 cache. And things like the process2() and process4()
+> loops are highly CPU debayering specific so I don't think we should
+> move those out of the CpuDebayer code.
+
+Yes, that I understood from the review. "necessary evil" is indeed the
+right term :-) I expect it will take quite some design skills to balance
+the need for performances and the need for a maintainable architecture.
+
+> > plus the fact that this class handles colour gains and gamma,
+> > makes me thing we have either a naming issue, or an architecture issue.
+>
+> I agree that this does a bit more then debayering, although
+> the debayering really is the main thing it does.
+>
+> I guess the calculation of the rgb lookup tables which do the
+> color gains and gamma could be moved outside of this class,
+> that might even be beneficial for GPU based debayering assuming
+> that that is going to use rgb lookup tables too (it could
+> implement actual color gains + gamma correction in some different
+> way).
+>
+> I think this falls under the lets wait until we have a GPU
+> based SoftISP MVP/POC and then do some refactoring to see which
+> bits should go where.
+
+---
+
+8. Decouple pipeline and IPA naming
+
+> The current src/ipa/meson.build assumes the IPA name to match the
+> pipeline name. For this reason "-Dipas=simple" is used for the
+> Soft IPA module.
+
+This should be addressed.
+
+---
+
+9. Doxyfile cleanup
+
+>> diff --git a/Documentation/Doxyfile.in b/Documentation/Doxyfile.in
+>> index a86ea6c1..2be8d47b 100644
+>> --- a/Documentation/Doxyfile.in
+>> +++ b/Documentation/Doxyfile.in
+>> @@ -44,6 +44,7 @@ EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
+>> @TOP_SRCDIR@/src/libcamera/pipeline/ \
+>> @TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
+>> @TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
+>> + @TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \
+> Why is this needed ?
+>
+>> @TOP_BUILDDIR@/src/libcamera/proxy/
+>> EXCLUDE_PATTERNS = @TOP_BUILDDIR@/include/libcamera/ipa/*_serializer.h \
+>> diff --git a/include/libcamera/ipa/meson.build b/include/libcamera/ipa/meson.build
+>> index f3b4881c..3352d08f 100644
+>> --- a/include/libcamera/ipa/meson.build
+>> +++ b/include/libcamera/ipa/meson.build
+>> @@ -65,6 +65,7 @@ pipeline_ipa_mojom_mapping = {
+>> 'ipu3': 'ipu3.mojom',
+>> 'rkisp1': 'rkisp1.mojom',
+>> 'rpi/vc4': 'raspberrypi.mojom',
+>> + 'simple': 'soft.mojom',
+>> 'vimc': 'vimc.mojom',
+>> }
+>> diff --git a/include/libcamera/ipa/soft.mojom b/include/libcamera/ipa/soft.mojom
+>> new file mode 100644
+>> index 00000000..c249bd75
+>> --- /dev/null
+>> +++ b/include/libcamera/ipa/soft.mojom
+>> @@ -0,0 +1,28 @@
+>> +/* SPDX-License-Identifier: LGPL-2.1-or-later */
+>> +
+>> +/*
+>> + * \todo Document the interface and remove the related EXCLUDE_PATTERNS entry.
+> Ah that's why.
+
+Yes, because, well... all the other IPAs were doing that...
+
+> It doesn't have to be done before merging, but could you
+> address this sooner than later ?
+
+---
+
+10. Switch to libipa/algorithm.h API in processStats
+
+>> void IPASoftSimple::processStats(const ControlList &sensorControls)
+>>
+> Do you envision switching to the libipa/algorithm.h API at some point ?
+
+At some point, yes.
+
+---
+
+11. Improve handling the sensor controls which take effect with a delay
+
+> void IPASoftSimple::processStats(const ControlList &sensorControls)
+> {
+> ...
+> /*
+> * AE / AGC, use 2 frames delay to make sure that the exposure and
+> * the gain set have applied to the camera sensor.
+> */
+> if (ignore_updates_ > 0) {
+> --ignore_updates_;
+> return;
+> }
+
+This could be handled better with DelayedControls.
+
+---
+
+12. Use DelayedControls class in ispStatsReady()
+
+> void SimpleCameraData::ispStatsReady()
+> {
+> swIsp_->processStats(sensor_->getControls({ V4L2_CID_ANALOGUE_GAIN,
+> V4L2_CID_EXPOSURE }));
+
+You should use the DelayedControls class.
+
+---
+
+13. Improve black level and colour gains application
+
+I think the black level should eventually be moved before debayering, and
+ideally the colour gains as well. I understand the need for optimizations to
+lower the CPU consumption, but at the same time I don't feel comfortable
+building up on top of an implementation that may work a bit more by chance than
+by correctness, as that's not very maintainable.
diff --git a/src/libcamera/software_isp/debayer.cpp b/src/libcamera/software_isp/debayer.cpp
new file mode 100644
index 00000000..1c035e9b
--- /dev/null
+++ b/src/libcamera/software_isp/debayer.cpp
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayer.cpp - debayer base class
+ */
+
+#include "debayer.h"
+
+namespace libcamera {
+
+/**
+ * \struct DebayerParams
+ * \brief Struct to hold the debayer parameters.
+ */
+
+/**
+ * \var DebayerParams::kGain10
+ * \brief const value for 1.0 gain
+ */
+
+/**
+ * \var DebayerParams::gainR
+ * \brief Red gain
+ *
+ * 128 = 0.5, 256 = 1.0, 512 = 2.0, etc.
+ */
+
+/**
+ * \var DebayerParams::gainG
+ * \brief Green gain
+ *
+ * 128 = 0.5, 256 = 1.0, 512 = 2.0, etc.
+ */
+
+/**
+ * \var DebayerParams::gainB
+ * \brief Blue gain
+ *
+ * 128 = 0.5, 256 = 1.0, 512 = 2.0, etc.
+ */
+
+/**
+ * \var DebayerParams::gamma
+ * \brief Gamma correction, 1.0 is no correction
+ */
+
+/**
+ * \class Debayer
+ * \brief Base debayering class
+ *
+ * Base class that provides functions for setting up the debayering process.
+ */
+
+LOG_DEFINE_CATEGORY(Debayer)
+
+Debayer::~Debayer()
+{
+}
+
+/**
+ * \fn int Debayer::configure(const StreamConfiguration &inputCfg, const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+ * \brief Configure the debayer object according to the passed in parameters.
+ * \param[in] inputCfg The input configuration.
+ * \param[in] outputCfgs The output configurations.
+ *
+ * \return 0 on success, a negative errno on failure.
+ */
+
+/**
+ * \fn Size Debayer::patternSize(PixelFormat inputFormat)
+ * \brief Get the width and height at which the bayer pattern repeats.
+ * \param[in] inputFormat The input format.
+ *
+ * Valid sizes are: 2x2, 4x2 or 4x4.
+ *
+ * \return Pattern size or an empty size for unsupported inputFormats.
+ */
+
+/**
+ * \fn std::vector<PixelFormat> Debayer::formats(PixelFormat inputFormat)
+ * \brief Get the supported output formats.
+ * \param[in] inputFormat The input format.
+ *
+ * \return All supported output formats or an empty vector if there are none.
+ */
+
+/**
+ * \fn std::tuple<unsigned int, unsigned int> Debayer::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+ * \brief Get the stride and the frame size.
+ * \param[in] outputFormat The output format.
+ * \param[in] size The output size.
+ *
+ * \return A tuple of the stride and the frame size, or a tuple with 0,0 if
+ * there is no valid output config.
+ */
+
+/**
+ * \fn void Debayer::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+ * \brief Process the bayer data into the requested format.
+ * \param[in] input The input buffer.
+ * \param[in] output The output buffer.
+ * \param[in] params The parameters to be used in debayering.
+ *
+ * \note DebayerParams is passed by value deliberately so that a copy is passed
+ * when this is run in another thread by invokeMethod().
+ */
+
+/**
+ * \fn virtual SizeRange Debayer::sizes(PixelFormat inputFormat, const Size &inputSize)
+ * \brief Get the supported output sizes for the given input format and size.
+ * \param[in] inputFormat The input format.
+ * \param[in] inputSize The input size.
+ *
+ * \return The valid size ranges or an empty range if there are none.
+ */
+
+/**
+ * \var Signal<FrameBuffer *> Debayer::inputBufferReady
+ * \brief Signals when the input buffer is ready.
+ */
+
+/**
+ * \var Signal<FrameBuffer *> Debayer::outputBufferReady
+ * \brief Signals when the output buffer is ready.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer.h b/src/libcamera/software_isp/debayer.h
new file mode 100644
index 00000000..42ae58ab
--- /dev/null
+++ b/src/libcamera/software_isp/debayer.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayer.h - debayering base class
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/signal.h>
+
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/software_isp/debayer_params.h"
+
+namespace libcamera {
+
+class FrameBuffer;
+
+LOG_DECLARE_CATEGORY(Debayer)
+
+class Debayer
+{
+public:
+ virtual ~Debayer() = 0;
+
+ virtual int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs) = 0;
+
+ virtual std::vector<PixelFormat> formats(PixelFormat inputFormat) = 0;
+
+ virtual std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size) = 0;
+
+ virtual void process(FrameBuffer *input, FrameBuffer *output, DebayerParams params) = 0;
+
+ virtual SizeRange sizes(PixelFormat inputFormat, const Size &inputSize) = 0;
+
+ Signal<FrameBuffer *> inputBufferReady;
+ Signal<FrameBuffer *> outputBufferReady;
+
+private:
+ virtual Size patternSize(PixelFormat inputFormat) = 0;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer_cpu.cpp b/src/libcamera/software_isp/debayer_cpu.cpp
new file mode 100644
index 00000000..88d6578b
--- /dev/null
+++ b/src/libcamera/software_isp/debayer_cpu.cpp
@@ -0,0 +1,807 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayer_cpu.cpp - CPU based debayering class
+ */
+
+#include "debayer_cpu.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <time.h>
+
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+
+namespace libcamera {
+
+/**
+ * \class DebayerCpu
+ * \brief Class for debayering on the CPU
+ *
+ * Implementation for CPU based debayering
+ */
+
+/**
+ * \brief Constructs a DebayerCpu object
+ * \param[in] stats Pointer to the stats object to use
+ */
+DebayerCpu::DebayerCpu(std::unique_ptr<SwStatsCpu> stats)
+ : stats_(std::move(stats)), gammaCorrection_(1.0), blackLevel_(0)
+{
+ /*
+ * Reading from uncached buffers may be very slow.
+ * In such a case, it's better to copy input buffer data to normal memory.
+ * But in case of cached buffers, copying the data is unnecessary overhead.
+ * enable_input_memcpy_ makes this behavior configurable. At the moment, we
+ * always set it to true as the safer choice but this should be changed in
+ * future.
+ */
+ enableInputMemcpy_ = true;
+
+ /* Initialize gamma to 1.0 curve */
+ for (unsigned int i = 0; i < kGammaLookupSize; i++)
+ gamma_[i] = i / (kGammaLookupSize / kRGBLookupSize);
+
+ for (unsigned int i = 0; i < kMaxLineBuffers; i++)
+ lineBuffers_[i] = nullptr;
+}
+
+DebayerCpu::~DebayerCpu()
+{
+ for (unsigned int i = 0; i < kMaxLineBuffers; i++)
+ free(lineBuffers_[i]);
+}
+
+#define DECLARE_SRC_POINTERS(pixel_t) \
+ const pixel_t *prev = (const pixel_t *)src[0] + xShift_; \
+ const pixel_t *curr = (const pixel_t *)src[1] + xShift_; \
+ const pixel_t *next = (const pixel_t *)src[2] + xShift_;
+
+/*
+ * RGR
+ * GBG
+ * RGR
+ */
+#define BGGR_BGR888(p, n, div) \
+ *dst++ = blue_[curr[x] / (div)]; \
+ *dst++ = green_[(prev[x] + curr[x - p] + curr[x + n] + next[x]) / (4 * (div))]; \
+ *dst++ = red_[(prev[x - p] + prev[x + n] + next[x - p] + next[x + n]) / (4 * (div))]; \
+ x++;
+
+/*
+ * GBG
+ * RGR
+ * GBG
+ */
+#define GRBG_BGR888(p, n, div) \
+ *dst++ = blue_[(prev[x] + next[x]) / (2 * (div))]; \
+ *dst++ = green_[curr[x] / (div)]; \
+ *dst++ = red_[(curr[x - p] + curr[x + n]) / (2 * (div))]; \
+ x++;
+
+/*
+ * GRG
+ * BGB
+ * GRG
+ */
+#define GBRG_BGR888(p, n, div) \
+ *dst++ = blue_[(curr[x - p] + curr[x + n]) / (2 * (div))]; \
+ *dst++ = green_[curr[x] / (div)]; \
+ *dst++ = red_[(prev[x] + next[x]) / (2 * (div))]; \
+ x++;
+
+/*
+ * BGB
+ * GRG
+ * BGB
+ */
+#define RGGB_BGR888(p, n, div) \
+ *dst++ = blue_[(prev[x - p] + prev[x + n] + next[x - p] + next[x + n]) / (4 * (div))]; \
+ *dst++ = green_[(prev[x] + curr[x - p] + curr[x + n] + next[x]) / (4 * (div))]; \
+ *dst++ = red_[curr[x] / (div)]; \
+ x++;
+
+void DebayerCpu::debayer8_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint8_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ BGGR_BGR888(1, 1, 1)
+ GBRG_BGR888(1, 1, 1)
+ }
+}
+
+void DebayerCpu::debayer8_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint8_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ GRBG_BGR888(1, 1, 1)
+ RGGB_BGR888(1, 1, 1)
+ }
+}
+
+void DebayerCpu::debayer10_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 4 for 10 -> 8 bpp value */
+ BGGR_BGR888(1, 1, 4)
+ GBRG_BGR888(1, 1, 4)
+ }
+}
+
+void DebayerCpu::debayer10_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 4 for 10 -> 8 bpp value */
+ GRBG_BGR888(1, 1, 4)
+ RGGB_BGR888(1, 1, 4)
+ }
+}
+
+void DebayerCpu::debayer12_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 16 for 12 -> 8 bpp value */
+ BGGR_BGR888(1, 1, 16)
+ GBRG_BGR888(1, 1, 16)
+ }
+}
+
+void DebayerCpu::debayer12_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 16 for 12 -> 8 bpp value */
+ GRBG_BGR888(1, 1, 16)
+ RGGB_BGR888(1, 1, 16)
+ }
+}
+
+void DebayerCpu::debayer10P_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ /*
+ * For the first pixel getting a pixel from the previous column uses
+ * x - 2 to skip the 5th byte with least-significant bits for 4 pixels.
+ * Same for last pixel (uses x + 2) and looking at the next column.
+ */
+ for (int x = 0; x < widthInBytes;) {
+ /* First pixel */
+ BGGR_BGR888(2, 1, 1)
+ /* Second pixel BGGR -> GBRG */
+ GBRG_BGR888(1, 1, 1)
+ /* Same thing for third and fourth pixels */
+ BGGR_BGR888(1, 1, 1)
+ GBRG_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+void DebayerCpu::debayer10P_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* First pixel */
+ GRBG_BGR888(2, 1, 1)
+ /* Second pixel GRBG -> RGGB */
+ RGGB_BGR888(1, 1, 1)
+ /* Same thing for third and fourth pixels */
+ GRBG_BGR888(1, 1, 1)
+ RGGB_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+void DebayerCpu::debayer10P_GBGB_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* Even pixel */
+ GBRG_BGR888(2, 1, 1)
+ /* Odd pixel GBGR -> BGGR */
+ BGGR_BGR888(1, 1, 1)
+ /* Same thing for next 2 pixels */
+ GBRG_BGR888(1, 1, 1)
+ BGGR_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+void DebayerCpu::debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* Even pixel */
+ RGGB_BGR888(2, 1, 1)
+ /* Odd pixel RGGB -> GRBG */
+ GRBG_BGR888(1, 1, 1)
+ /* Same thing for next 2 pixels */
+ RGGB_BGR888(1, 1, 1)
+ GRBG_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+static bool isStandardBayerOrder(BayerFormat::Order order)
+{
+ return order == BayerFormat::BGGR || order == BayerFormat::GBRG ||
+ order == BayerFormat::GRBG || order == BayerFormat::RGGB;
+}
+
+/*
+ * Setup the Debayer object according to the passed in parameters.
+ * Return 0 on success, a negative errno value on failure
+ * (unsupported parameters).
+ */
+int DebayerCpu::getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputFormat);
+
+ if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10 || bayerFormat.bitDepth == 12) &&
+ bayerFormat.packing == BayerFormat::Packing::None &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ config.bpp = (bayerFormat.bitDepth + 7) & ~7;
+ config.patternSize.width = 2;
+ config.patternSize.height = 2;
+ config.outputFormats = std::vector<PixelFormat>({ formats::RGB888, formats::BGR888 });
+ return 0;
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2 &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ config.bpp = 10;
+ config.patternSize.width = 4; /* 5 bytes per *4* pixels */
+ config.patternSize.height = 2;
+ config.outputFormats = std::vector<PixelFormat>({ formats::RGB888, formats::BGR888 });
+ return 0;
+ }
+
+ LOG(Debayer, Info)
+ << "Unsupported input format " << inputFormat.toString();
+ return -EINVAL;
+}
+
+int DebayerCpu::getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config)
+{
+ if (outputFormat == formats::RGB888 || outputFormat == formats::BGR888) {
+ config.bpp = 24;
+ return 0;
+ }
+
+ LOG(Debayer, Info)
+ << "Unsupported output format " << outputFormat.toString();
+ return -EINVAL;
+}
+
+/*
+ * Check for standard Bayer orders and set xShift_ and swap debayer0/1, so that
+ * a single pair of BGGR debayer functions can be used for all 4 standard orders.
+ */
+int DebayerCpu::setupStandardBayerOrder(BayerFormat::Order order)
+{
+ switch (order) {
+ case BayerFormat::BGGR:
+ break;
+ case BayerFormat::GBRG:
+ xShift_ = 1; /* BGGR -> GBRG */
+ break;
+ case BayerFormat::GRBG:
+ std::swap(debayer0_, debayer1_); /* BGGR -> GRBG */
+ break;
+ case BayerFormat::RGGB:
+ xShift_ = 1; /* BGGR -> GBRG */
+ std::swap(debayer0_, debayer1_); /* GBRG -> RGGB */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int DebayerCpu::setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputFormat);
+
+ xShift_ = 0;
+ swapRedBlueGains_ = false;
+
+ auto invalidFmt = []() -> int {
+ LOG(Debayer, Error) << "Unsupported input output format combination";
+ return -EINVAL;
+ };
+
+ switch (outputFormat) {
+ case formats::RGB888:
+ break;
+ case formats::BGR888:
+ /* Swap R and B in bayer order to generate BGR888 instead of RGB888 */
+ swapRedBlueGains_ = true;
+
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ bayerFormat.order = BayerFormat::RGGB;
+ break;
+ case BayerFormat::GBRG:
+ bayerFormat.order = BayerFormat::GRBG;
+ break;
+ case BayerFormat::GRBG:
+ bayerFormat.order = BayerFormat::GBRG;
+ break;
+ case BayerFormat::RGGB:
+ bayerFormat.order = BayerFormat::BGGR;
+ break;
+ default:
+ return invalidFmt();
+ }
+ break;
+ default:
+ return invalidFmt();
+ }
+
+ if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10 || bayerFormat.bitDepth == 12) &&
+ bayerFormat.packing == BayerFormat::Packing::None &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ switch (bayerFormat.bitDepth) {
+ case 8:
+ debayer0_ = &DebayerCpu::debayer8_BGBG_BGR888;
+ debayer1_ = &DebayerCpu::debayer8_GRGR_BGR888;
+ break;
+ case 10:
+ debayer0_ = &DebayerCpu::debayer10_BGBG_BGR888;
+ debayer1_ = &DebayerCpu::debayer10_GRGR_BGR888;
+ break;
+ case 12:
+ debayer0_ = &DebayerCpu::debayer12_BGBG_BGR888;
+ debayer1_ = &DebayerCpu::debayer12_GRGR_BGR888;
+ break;
+ }
+ setupStandardBayerOrder(bayerFormat.order);
+ return 0;
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2) {
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ debayer0_ = &DebayerCpu::debayer10P_BGBG_BGR888;
+ debayer1_ = &DebayerCpu::debayer10P_GRGR_BGR888;
+ return 0;
+ case BayerFormat::GBRG:
+ debayer0_ = &DebayerCpu::debayer10P_GBGB_BGR888;
+ debayer1_ = &DebayerCpu::debayer10P_RGRG_BGR888;
+ return 0;
+ case BayerFormat::GRBG:
+ debayer0_ = &DebayerCpu::debayer10P_GRGR_BGR888;
+ debayer1_ = &DebayerCpu::debayer10P_BGBG_BGR888;
+ return 0;
+ case BayerFormat::RGGB:
+ debayer0_ = &DebayerCpu::debayer10P_RGRG_BGR888;
+ debayer1_ = &DebayerCpu::debayer10P_GBGB_BGR888;
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ return invalidFmt();
+}
+
+int DebayerCpu::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+{
+ if (getInputConfig(inputCfg.pixelFormat, inputConfig_) != 0)
+ return -EINVAL;
+
+ if (stats_->configure(inputCfg) != 0)
+ return -EINVAL;
+
+ const Size &statsPatternSize = stats_->patternSize();
+ if (inputConfig_.patternSize.width != statsPatternSize.width ||
+ inputConfig_.patternSize.height != statsPatternSize.height) {
+ LOG(Debayer, Error)
+ << "mismatching stats and debayer pattern sizes for "
+ << inputCfg.pixelFormat.toString();
+ return -EINVAL;
+ }
+
+ inputConfig_.stride = inputCfg.stride;
+
+ if (outputCfgs.size() != 1) {
+ LOG(Debayer, Error)
+ << "Unsupported number of output streams: "
+ << outputCfgs.size();
+ return -EINVAL;
+ }
+
+ const StreamConfiguration &outputCfg = outputCfgs[0];
+ SizeRange outSizeRange = sizes(inputCfg.pixelFormat, inputCfg.size);
+ std::tie(outputConfig_.stride, outputConfig_.frameSize) =
+ strideAndFrameSize(outputCfg.pixelFormat, outputCfg.size);
+
+ if (!outSizeRange.contains(outputCfg.size) || outputConfig_.stride != outputCfg.stride) {
+ LOG(Debayer, Error)
+ << "Invalid output size/stride: "
+ << "\n " << outputCfg.size << " (" << outSizeRange << ")"
+ << "\n " << outputCfg.stride << " (" << outputConfig_.stride << ")";
+ return -EINVAL;
+ }
+
+ if (setDebayerFunctions(inputCfg.pixelFormat, outputCfg.pixelFormat) != 0)
+ return -EINVAL;
+
+ window_.x = ((inputCfg.size.width - outputCfg.size.width) / 2) &
+ ~(inputConfig_.patternSize.width - 1);
+ window_.y = ((inputCfg.size.height - outputCfg.size.height) / 2) &
+ ~(inputConfig_.patternSize.height - 1);
+ window_.width = outputCfg.size.width;
+ window_.height = outputCfg.size.height;
+
+ /* Don't pass x,y since process() already adjusts src before passing it */
+ stats_->setWindow(Rectangle(window_.size()));
+
+ /* pad with patternSize.Width on both left and right side */
+ lineBufferPadding_ = inputConfig_.patternSize.width * inputConfig_.bpp / 8;
+ lineBufferLength_ = window_.width * inputConfig_.bpp / 8 +
+ 2 * lineBufferPadding_;
+ for (unsigned int i = 0;
+ i < (inputConfig_.patternSize.height + 1) && enableInputMemcpy_;
+ i++) {
+ free(lineBuffers_[i]);
+ lineBuffers_[i] = (uint8_t *)malloc(lineBufferLength_);
+ if (!lineBuffers_[i])
+ return -ENOMEM;
+ }
+
+ measuredFrames_ = 0;
+ frameProcessTime_ = 0;
+
+ return 0;
+}
+
+/*
+ * Get width and height at which the bayer-pattern repeats.
+ * Return pattern-size or an empty Size for an unsupported inputFormat.
+ */
+Size DebayerCpu::patternSize(PixelFormat inputFormat)
+{
+ DebayerCpu::DebayerInputConfig config;
+
+ if (getInputConfig(inputFormat, config) != 0)
+ return {};
+
+ return config.patternSize;
+}
+
+std::vector<PixelFormat> DebayerCpu::formats(PixelFormat inputFormat)
+{
+ DebayerCpu::DebayerInputConfig config;
+
+ if (getInputConfig(inputFormat, config) != 0)
+ return std::vector<PixelFormat>();
+
+ return config.outputFormats;
+}
+
+std::tuple<unsigned int, unsigned int>
+DebayerCpu::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+{
+ DebayerCpu::DebayerOutputConfig config;
+
+ if (getOutputConfig(outputFormat, config) != 0)
+ return std::make_tuple(0, 0);
+
+ /* round up to multiple of 8 for 64 bits alignment */
+ unsigned int stride = (size.width * config.bpp / 8 + 7) & ~7;
+
+ return std::make_tuple(stride, stride * size.height);
+}
+
+void DebayerCpu::setupInputMemcpy(const uint8_t *linePointers[])
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ if (!enableInputMemcpy_)
+ return;
+
+ for (unsigned int i = 0; i < patternHeight; i++) {
+ memcpy(lineBuffers_[i], linePointers[i + 1] - lineBufferPadding_,
+ lineBufferLength_);
+ linePointers[i + 1] = lineBuffers_[i] + lineBufferPadding_;
+ }
+
+ /* Point lineBufferIndex_ to first unused lineBuffer */
+ lineBufferIndex_ = patternHeight;
+}
+
+void DebayerCpu::shiftLinePointers(const uint8_t *linePointers[], const uint8_t *src)
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ for (unsigned int i = 0; i < patternHeight; i++)
+ linePointers[i] = linePointers[i + 1];
+
+ linePointers[patternHeight] = src +
+ (patternHeight / 2) * (int)inputConfig_.stride;
+}
+
+void DebayerCpu::memcpyNextLine(const uint8_t *linePointers[])
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ if (!enableInputMemcpy_)
+ return;
+
+ memcpy(lineBuffers_[lineBufferIndex_], linePointers[patternHeight] - lineBufferPadding_,
+ lineBufferLength_);
+ linePointers[patternHeight] = lineBuffers_[lineBufferIndex_] + lineBufferPadding_;
+
+ lineBufferIndex_ = (lineBufferIndex_ + 1) % (patternHeight + 1);
+}
+
+void DebayerCpu::process2(const uint8_t *src, uint8_t *dst)
+{
+ unsigned int yEnd = window_.y + window_.height;
+ /* Holds [0] previous- [1] current- [2] next-line */
+ const uint8_t *linePointers[3];
+
+ /* Adjust src to top left corner of the window */
+ src += window_.y * inputConfig_.stride + window_.x * inputConfig_.bpp / 8;
+
+ /* [x] becomes [x - 1] after initial shiftLinePointers() call */
+ if (window_.y) {
+ linePointers[1] = src - inputConfig_.stride; /* previous-line */
+ linePointers[2] = src;
+ } else {
+ /* window_.y == 0, use the next line as prev line */
+ linePointers[1] = src + inputConfig_.stride;
+ linePointers[2] = src;
+ /* Last 2 lines also need special handling */
+ yEnd -= 2;
+ }
+
+ setupInputMemcpy(linePointers);
+
+ for (unsigned int y = window_.y; y < yEnd; y += 2) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(y, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+
+ if (window_.y == 0) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(yEnd, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ /* next line may point outside of src, use prev. */
+ linePointers[2] = linePointers[0];
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+}
+
+void DebayerCpu::process4(const uint8_t *src, uint8_t *dst)
+{
+ const unsigned int yEnd = window_.y + window_.height;
+ /*
+ * This holds pointers to [0] 2-lines-up [1] 1-line-up [2] current-line
+ * [3] 1-line-down [4] 2-lines-down.
+ */
+ const uint8_t *linePointers[5];
+
+ /* Adjust src to top left corner of the window */
+ src += window_.y * inputConfig_.stride + window_.x * inputConfig_.bpp / 8;
+
+ /* [x] becomes [x - 1] after initial shiftLinePointers() call */
+ linePointers[1] = src - 2 * inputConfig_.stride;
+ linePointers[2] = src - inputConfig_.stride;
+ linePointers[3] = src;
+ linePointers[4] = src + inputConfig_.stride;
+
+ setupInputMemcpy(linePointers);
+
+ for (unsigned int y = window_.y; y < yEnd; y += 4) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(y, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine2(y, linePointers);
+ (this->*debayer2_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer3_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+}
+
+static inline int64_t timeDiff(timespec &after, timespec &before)
+{
+ return (after.tv_sec - before.tv_sec) * 1000000000LL +
+ (int64_t)after.tv_nsec - (int64_t)before.tv_nsec;
+}
+
+void DebayerCpu::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+{
+ timespec frameStartTime;
+
+ if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure) {
+ frameStartTime = {};
+ clock_gettime(CLOCK_MONOTONIC_RAW, &frameStartTime);
+ }
+
+ /* Apply DebayerParams */
+ if (params.gamma != gammaCorrection_ || params.blackLevel != blackLevel_) {
+ const unsigned int blackIndex =
+ params.blackLevel * kGammaLookupSize / 256;
+ std::fill(gamma_.begin(), gamma_.begin() + blackIndex, 0);
+ const float divisor = kGammaLookupSize - blackIndex - 1.0;
+ for (unsigned int i = blackIndex; i < kGammaLookupSize; i++)
+ gamma_[i] = UINT8_MAX * powf((i - blackIndex) / divisor, params.gamma);
+
+ gammaCorrection_ = params.gamma;
+ blackLevel_ = params.blackLevel;
+ }
+
+ if (swapRedBlueGains_)
+ std::swap(params.gainR, params.gainB);
+
+ for (unsigned int i = 0; i < kRGBLookupSize; i++) {
+ constexpr unsigned int div =
+ kRGBLookupSize * DebayerParams::kGain10 / kGammaLookupSize;
+ unsigned int idx;
+
+ /* Apply gamma after gain! */
+ idx = std::min({ i * params.gainR / div, (kGammaLookupSize - 1) });
+ red_[i] = gamma_[idx];
+
+ idx = std::min({ i * params.gainG / div, (kGammaLookupSize - 1) });
+ green_[i] = gamma_[idx];
+
+ idx = std::min({ i * params.gainB / div, (kGammaLookupSize - 1) });
+ blue_[i] = gamma_[idx];
+ }
+
+ /* Copy metadata from the input buffer */
+ FrameMetadata &metadata = output->_d()->metadata();
+ metadata.status = input->metadata().status;
+ metadata.sequence = input->metadata().sequence;
+ metadata.timestamp = input->metadata().timestamp;
+
+ MappedFrameBuffer in(input, MappedFrameBuffer::MapFlag::Read);
+ MappedFrameBuffer out(output, MappedFrameBuffer::MapFlag::Write);
+ if (!in.isValid() || !out.isValid()) {
+ LOG(Debayer, Error) << "mmap-ing buffer(s) failed";
+ metadata.status = FrameMetadata::FrameError;
+ return;
+ }
+
+ stats_->startFrame();
+
+ if (inputConfig_.patternSize.height == 2)
+ process2(in.planes()[0].data(), out.planes()[0].data());
+ else
+ process4(in.planes()[0].data(), out.planes()[0].data());
+
+ metadata.planes()[0].bytesused = out.planes()[0].size();
+
+ /* Measure before emitting signals */
+ if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure &&
+ ++measuredFrames_ > DebayerCpu::kFramesToSkip) {
+ timespec frameEndTime = {};
+ clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime);
+ frameProcessTime_ += timeDiff(frameEndTime, frameStartTime);
+ if (measuredFrames_ == DebayerCpu::kLastFrameToMeasure) {
+ const unsigned int measuredFrames = DebayerCpu::kLastFrameToMeasure -
+ DebayerCpu::kFramesToSkip;
+ LOG(Debayer, Info)
+ << "Processed " << measuredFrames
+ << " frames in " << frameProcessTime_ / 1000 << "us, "
+ << frameProcessTime_ / (1000 * measuredFrames)
+ << " us/frame";
+ }
+ }
+
+ stats_->finishFrame();
+ outputBufferReady.emit(output);
+ inputBufferReady.emit(input);
+}
+
+SizeRange DebayerCpu::sizes(PixelFormat inputFormat, const Size &inputSize)
+{
+ Size patternSize = this->patternSize(inputFormat);
+ unsigned int borderHeight = patternSize.height;
+
+ if (patternSize.isNull())
+ return {};
+
+ /* No need for top/bottom border with a pattern height of 2 */
+ if (patternSize.height == 2)
+ borderHeight = 0;
+
+ /*
+ * For debayer interpolation a border is kept around the entire image
+ * and the minimum output size is pattern-height x pattern-width.
+ */
+ if (inputSize.width < (3 * patternSize.width) ||
+ inputSize.height < (2 * borderHeight + patternSize.height)) {
+ LOG(Debayer, Warning)
+ << "Input format size too small: " << inputSize.toString();
+ return {};
+ }
+
+ return SizeRange(Size(patternSize.width, patternSize.height),
+ Size((inputSize.width - 2 * patternSize.width) & ~(patternSize.width - 1),
+ (inputSize.height - 2 * borderHeight) & ~(patternSize.height - 1)),
+ patternSize.width, patternSize.height);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer_cpu.h b/src/libcamera/software_isp/debayer_cpu.h
new file mode 100644
index 00000000..689c1075
--- /dev/null
+++ b/src/libcamera/software_isp/debayer_cpu.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayer_cpu.h - CPU based debayering header
+ */
+
+#pragma once
+
+#include <memory>
+#include <stdint.h>
+#include <vector>
+
+#include <libcamera/base/object.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+#include "debayer.h"
+#include "swstats_cpu.h"
+
+namespace libcamera {
+
+class DebayerCpu : public Debayer, public Object
+{
+public:
+ DebayerCpu(std::unique_ptr<SwStatsCpu> stats);
+ ~DebayerCpu();
+
+ int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs);
+ Size patternSize(PixelFormat inputFormat);
+ std::vector<PixelFormat> formats(PixelFormat input);
+ std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size);
+ void process(FrameBuffer *input, FrameBuffer *output, DebayerParams params);
+ SizeRange sizes(PixelFormat inputFormat, const Size &inputSize);
+
+ /**
+ * \brief Get the file descriptor for the statistics
+ *
+ * \return the file descriptor pointing to the statistics
+ */
+ const SharedFD &getStatsFD() { return stats_->getStatsFD(); }
+
+ /**
+ * \brief Get the output frame size
+ *
+ * \return The output frame size
+ */
+ unsigned int frameSize() { return outputConfig_.frameSize; }
+
+private:
+ /**
+ * \brief Called to debayer 1 line of Bayer input data to output format
+ * \param[out] dst Pointer to the start of the output line to write
+ * \param[in] src The input data
+ *
+ * Input data is an array of (patternSize_.height + 1) src
+ * pointers each pointing to a line in the Bayer source. The middle
+ * element of the array will point to the actual line being processed.
+ * Earlier element(s) will point to the previous line(s) and later
+ * element(s) to the next line(s).
+ *
+ * These functions take an array of src pointers, rather than
+ * a single src pointer + a stride for the source, so that when the src
+ * is slow uncached memory it can be copied to faster memory before
+ * debayering. Debayering a standard 2x2 Bayer pattern requires access
+ * to the previous and next src lines for interpolating the missing
+ * colors. To allow copying the src lines only once 3 temporary buffers
+ * each holding a single line are used, re-using the oldest buffer for
+ * the next line and the pointers are swizzled so that:
+ * src[0] = previous-line, src[1] = currrent-line, src[2] = next-line.
+ * This way the 3 pointers passed to the debayer functions form
+ * a sliding window over the src avoiding the need to copy each
+ * line more than once.
+ *
+ * Similarly for bayer patterns which repeat every 4 lines, 5 src
+ * pointers are passed holding: src[0] = 2-lines-up, src[1] = 1-line-up
+ * src[2] = current-line, src[3] = 1-line-down, src[4] = 2-lines-down.
+ */
+ using debayerFn = void (DebayerCpu::*)(uint8_t *dst, const uint8_t *src[]);
+
+ /* 8-bit raw bayer format */
+ void debayer8_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer8_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* unpacked 10-bit raw bayer format */
+ void debayer10_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer10_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* unpacked 12-bit raw bayer format */
+ void debayer12_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer12_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* CSI-2 packed 10-bit raw bayer format (all the 4 orders) */
+ void debayer10P_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer10P_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer10P_GBGB_BGR888(uint8_t *dst, const uint8_t *src[]);
+ void debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[]);
+
+ struct DebayerInputConfig {
+ Size patternSize;
+ unsigned int bpp; /* Memory used per pixel, not precision */
+ unsigned int stride;
+ std::vector<PixelFormat> outputFormats;
+ };
+
+ struct DebayerOutputConfig {
+ unsigned int bpp; /* Memory used per pixel, not precision */
+ unsigned int stride;
+ unsigned int frameSize;
+ };
+
+ int getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config);
+ int getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config);
+ int setupStandardBayerOrder(BayerFormat::Order order);
+ int setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat);
+ void setupInputMemcpy(const uint8_t *linePointers[]);
+ void shiftLinePointers(const uint8_t *linePointers[], const uint8_t *src);
+ void memcpyNextLine(const uint8_t *linePointers[]);
+ void process2(const uint8_t *src, uint8_t *dst);
+ void process4(const uint8_t *src, uint8_t *dst);
+
+ static constexpr unsigned int kGammaLookupSize = 1024;
+ static constexpr unsigned int kRGBLookupSize = 256;
+ /* Max. supported Bayer pattern height is 4, debayering this requires 5 lines */
+ static constexpr unsigned int kMaxLineBuffers = 5;
+
+ std::array<uint8_t, kGammaLookupSize> gamma_;
+ std::array<uint8_t, kRGBLookupSize> red_;
+ std::array<uint8_t, kRGBLookupSize> green_;
+ std::array<uint8_t, kRGBLookupSize> blue_;
+ debayerFn debayer0_;
+ debayerFn debayer1_;
+ debayerFn debayer2_;
+ debayerFn debayer3_;
+ Rectangle window_;
+ DebayerInputConfig inputConfig_;
+ DebayerOutputConfig outputConfig_;
+ std::unique_ptr<SwStatsCpu> stats_;
+ uint8_t *lineBuffers_[kMaxLineBuffers];
+ unsigned int lineBufferLength_;
+ unsigned int lineBufferPadding_;
+ unsigned int lineBufferIndex_;
+ unsigned int xShift_; /* Offset of 0/1 applied to window_.x */
+ bool enableInputMemcpy_;
+ bool swapRedBlueGains_;
+ float gammaCorrection_;
+ unsigned int blackLevel_;
+ unsigned int measuredFrames_;
+ int64_t frameProcessTime_;
+ /* Skip 30 frames for things to stabilize then measure 30 frames */
+ static constexpr unsigned int kFramesToSkip = 30;
+ static constexpr unsigned int kLastFrameToMeasure = 60;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/meson.build b/src/libcamera/software_isp/meson.build
new file mode 100644
index 00000000..f7c66e28
--- /dev/null
+++ b/src/libcamera/software_isp/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: CC0-1.0
+
+softisp_enabled = pipelines.contains('simple')
+summary({'SoftISP support' : softisp_enabled}, section : 'Configuration')
+
+if not softisp_enabled
+ subdir_done()
+endif
+
+libcamera_sources += files([
+ 'debayer.cpp',
+ 'debayer_cpu.cpp',
+ 'software_isp.cpp',
+ 'swstats_cpu.cpp',
+])
diff --git a/src/libcamera/software_isp/software_isp.cpp b/src/libcamera/software_isp/software_isp.cpp
new file mode 100644
index 00000000..e4e56086
--- /dev/null
+++ b/src/libcamera/software_isp/software_isp.cpp
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * software_isp.cpp - Simple software ISP implementation
+ */
+
+#include "libcamera/internal/software_isp/software_isp.h"
+
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include "debayer_cpu.h"
+
+/**
+ * \file software_isp.cpp
+ * \brief Simple software ISP implementation
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(SoftwareIsp)
+
+/**
+ * \class SoftwareIsp
+ * \brief Class for the Software ISP
+ */
+
+/**
+ * \var SoftwareIsp::inputBufferReady
+ * \brief A signal emitted when the input frame buffer completes
+ */
+
+/**
+ * \var SoftwareIsp::outputBufferReady
+ * \brief A signal emitted when the output frame buffer completes
+ */
+
+/**
+ * \var SoftwareIsp::ispStatsReady
+ * \brief A signal emitted when the statistics for IPA are ready
+ */
+
+/**
+ * \var SoftwareIsp::setSensorControls
+ * \brief A signal emitted when the values to write to the sensor controls are
+ * ready
+ */
+
+/**
+ * \brief Constructs SoftwareIsp object
+ * \param[in] pipe The pipeline handler in use
+ * \param[in] sensor Pointer to the CameraSensor instance owned by the pipeline
+ * handler
+ */
+SoftwareIsp::SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor)
+ : debayerParams_{ DebayerParams::kGain10, DebayerParams::kGain10,
+ DebayerParams::kGain10, 0.5f, 0 },
+ dmaHeap_(DmaHeap::DmaHeapFlag::Cma | DmaHeap::DmaHeapFlag::System)
+{
+ if (!dmaHeap_.isValid()) {
+ LOG(SoftwareIsp, Error) << "Failed to create DmaHeap object";
+ return;
+ }
+
+ sharedParams_ = SharedMemObject<DebayerParams>("softIsp_params");
+ if (!sharedParams_) {
+ LOG(SoftwareIsp, Error) << "Failed to create shared memory for parameters";
+ return;
+ }
+
+ auto stats = std::make_unique<SwStatsCpu>();
+ if (!stats->isValid()) {
+ LOG(SoftwareIsp, Error) << "Failed to create SwStatsCpu object";
+ return;
+ }
+ stats->statsReady.connect(this, &SoftwareIsp::statsReady);
+
+ debayer_ = std::make_unique<DebayerCpu>(std::move(stats));
+ debayer_->inputBufferReady.connect(this, &SoftwareIsp::inputReady);
+ debayer_->outputBufferReady.connect(this, &SoftwareIsp::outputReady);
+
+ ipa_ = IPAManager::createIPA<ipa::soft::IPAProxySoft>(pipe, 0, 0);
+ if (!ipa_) {
+ LOG(SoftwareIsp, Error)
+ << "Creating IPA for software ISP failed";
+ debayer_.reset();
+ return;
+ }
+
+ /*
+ * The API tuning file is made from the sensor name. If the tuning file
+ * isn't found, fall back to the 'uncalibrated' file.
+ */
+ std::string ipaTuningFile = ipa_->configurationFile(sensor->model() + ".yaml");
+ if (ipaTuningFile.empty())
+ ipaTuningFile = ipa_->configurationFile("uncalibrated.yaml");
+
+ int ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() },
+ debayer_->getStatsFD(),
+ sharedParams_.fd(),
+ sensor->controls());
+ if (ret) {
+ LOG(SoftwareIsp, Error) << "IPA init failed";
+ debayer_.reset();
+ return;
+ }
+
+ ipa_->setIspParams.connect(this, &SoftwareIsp::saveIspParams);
+ ipa_->setSensorControls.connect(this, &SoftwareIsp::setSensorCtrls);
+
+ debayer_->moveToThread(&ispWorkerThread_);
+}
+
+SoftwareIsp::~SoftwareIsp()
+{
+ /* make sure to destroy the DebayerCpu before the ispWorkerThread_ is gone */
+ debayer_.reset();
+}
+
+/**
+ * \fn int SoftwareIsp::loadConfiguration([[maybe_unused]] const std::string &filename)
+ * \brief Load a configuration from a file
+ * \param[in] filename The file to load the configuration data from
+ *
+ * Currently is a stub doing nothing and always returning "success".
+ *
+ * \return 0 on success
+ */
+
+/**
+ * \brief Process the statistics gathered
+ * \param[in] sensorControls The sensor controls
+ *
+ * Requests the IPA to calculate new parameters for ISP and new control
+ * values for the sensor.
+ */
+void SoftwareIsp::processStats(const ControlList &sensorControls)
+{
+ ASSERT(ipa_);
+ ipa_->processStats(sensorControls);
+}
+
+/**
+ * \brief Check the validity of Software Isp object
+ * \return True if Software Isp is valid, false otherwise
+ */
+bool SoftwareIsp::isValid() const
+{
+ return !!debayer_;
+}
+
+/**
+ * \brief Get the output formats supported for the given input format
+ * \param[in] inputFormat The input format
+ * \return All the supported output formats or an empty vector if there are none
+ */
+std::vector<PixelFormat> SoftwareIsp::formats(PixelFormat inputFormat)
+{
+ ASSERT(debayer_);
+
+ return debayer_->formats(inputFormat);
+}
+
+/**
+ * \brief Get the supported output sizes for the given input format and size
+ * \param[in] inputFormat The input format
+ * \param[in] inputSize The input frame size
+ * \return The valid size range or an empty range if there are none
+ */
+SizeRange SoftwareIsp::sizes(PixelFormat inputFormat, const Size &inputSize)
+{
+ ASSERT(debayer_);
+
+ return debayer_->sizes(inputFormat, inputSize);
+}
+
+/**
+ * Get the output stride and the frame size in bytes for the given output format and size
+ * \param[in] outputFormat The output format
+ * \param[in] size The output size (width and height in pixels)
+ * \return A tuple of the stride and the frame size in bytes, or a tuple of 0,0
+ * if there is no valid output config
+ */
+std::tuple<unsigned int, unsigned int>
+SoftwareIsp::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+{
+ ASSERT(debayer_);
+
+ return debayer_->strideAndFrameSize(outputFormat, size);
+}
+
+/**
+ * \brief Configure the SoftwareIsp object according to the passed in parameters
+ * \param[in] inputCfg The input configuration
+ * \param[in] outputCfgs The output configurations
+ * \param[in] sensorControls ControlInfoMap of the controls supported by the sensor
+ * \return 0 on success, a negative errno on failure
+ */
+int SoftwareIsp::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs,
+ const ControlInfoMap &sensorControls)
+{
+ ASSERT(ipa_ && debayer_);
+
+ int ret = ipa_->configure(sensorControls);
+ if (ret < 0)
+ return ret;
+
+ return debayer_->configure(inputCfg, outputCfgs);
+}
+
+/**
+ * \brief Export the buffers from the Software ISP
+ * \param[in] output Output stream index exporting the buffers
+ * \param[in] count Number of buffers to allocate
+ * \param[out] buffers Vector to store the allocated buffers
+ * \return The number of allocated buffers on success or a negative error code
+ * otherwise
+ */
+int SoftwareIsp::exportBuffers(unsigned int output, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ ASSERT(debayer_ != nullptr);
+
+ /* single output for now */
+ if (output >= 1)
+ return -EINVAL;
+
+ for (unsigned int i = 0; i < count; i++) {
+ const std::string name = "frame-" + std::to_string(i);
+ const size_t frameSize = debayer_->frameSize();
+
+ FrameBuffer::Plane outPlane;
+ outPlane.fd = SharedFD(dmaHeap_.alloc(name.c_str(), frameSize));
+ if (!outPlane.fd.isValid()) {
+ LOG(SoftwareIsp, Error)
+ << "failed to allocate a dma_buf";
+ return -ENOMEM;
+ }
+ outPlane.offset = 0;
+ outPlane.length = frameSize;
+
+ std::vector<FrameBuffer::Plane> planes{ outPlane };
+ buffers->emplace_back(std::make_unique<FrameBuffer>(std::move(planes)));
+ }
+
+ return count;
+}
+
+/**
+ * \brief Queue buffers to Software ISP
+ * \param[in] input The input framebuffer
+ * \param[in] outputs The container holding the output stream indexes and
+ * their respective frame buffer outputs
+ * \return 0 on success, a negative errno on failure
+ */
+int SoftwareIsp::queueBuffers(FrameBuffer *input,
+ const std::map<unsigned int, FrameBuffer *> &outputs)
+{
+ unsigned int mask = 0;
+
+ /*
+ * Validate the outputs as a sanity check: at least one output is
+ * required, all outputs must reference a valid stream and no two
+ * outputs can reference the same stream.
+ */
+ if (outputs.empty())
+ return -EINVAL;
+
+ for (auto [index, buffer] : outputs) {
+ if (!buffer)
+ return -EINVAL;
+ if (index >= 1) /* only single stream atm */
+ return -EINVAL;
+ if (mask & (1 << index))
+ return -EINVAL;
+
+ mask |= 1 << index;
+ }
+
+ process(input, outputs.at(0));
+
+ return 0;
+}
+
+/**
+ * \brief Starts the Software ISP streaming operation
+ * \return 0 on success, any other value indicates an error
+ */
+int SoftwareIsp::start()
+{
+ int ret = ipa_->start();
+ if (ret)
+ return ret;
+
+ ispWorkerThread_.start();
+ return 0;
+}
+
+/**
+ * \brief Stops the Software ISP streaming operation
+ */
+void SoftwareIsp::stop()
+{
+ ispWorkerThread_.exit();
+ ispWorkerThread_.wait();
+
+ ipa_->stop();
+}
+
+/**
+ * \brief Passes the input framebuffer to the ISP worker to process
+ * \param[in] input The input framebuffer
+ * \param[out] output The framebuffer to write the processed frame to
+ */
+void SoftwareIsp::process(FrameBuffer *input, FrameBuffer *output)
+{
+ debayer_->invokeMethod(&DebayerCpu::process,
+ ConnectionTypeQueued, input, output, debayerParams_);
+}
+
+void SoftwareIsp::saveIspParams()
+{
+ debayerParams_ = *sharedParams_;
+}
+
+void SoftwareIsp::setSensorCtrls(const ControlList &sensorControls)
+{
+ setSensorControls.emit(sensorControls);
+}
+
+void SoftwareIsp::statsReady()
+{
+ ispStatsReady.emit();
+}
+
+void SoftwareIsp::inputReady(FrameBuffer *input)
+{
+ inputBufferReady.emit(input);
+}
+
+void SoftwareIsp::outputReady(FrameBuffer *output)
+{
+ outputBufferReady.emit(output);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/swstats_cpu.cpp b/src/libcamera/software_isp/swstats_cpu.cpp
new file mode 100644
index 00000000..a0c45b0c
--- /dev/null
+++ b/src/libcamera/software_isp/swstats_cpu.cpp
@@ -0,0 +1,432 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * swstats_cpu.cpp - CPU based software statistics implementation
+ */
+
+#include "swstats_cpu.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+namespace libcamera {
+
+/**
+ * \class SwStatsCpu
+ * \brief Class for gathering statistics on the CPU
+ *
+ * CPU based software ISP statistics implementation.
+ *
+ * This class offers a configure function + functions to gather statistics on a
+ * line by line basis. This allows CPU based software debayering to interleave
+ * debayering and statistics gathering on a line by line basis while the input
+ * data is still hot in the cache.
+ *
+ * It is also possible to specify a window over which to gather statistics
+ * instead of processing the whole frame.
+ */
+
+/**
+ * \fn bool SwStatsCpu::isValid() const
+ * \brief Gets whether the statistics object is valid
+ *
+ * \return True if it's valid, false otherwise
+ */
+
+/**
+ * \fn const SharedFD &SwStatsCpu::getStatsFD()
+ * \brief Get the file descriptor for the statistics
+ *
+ * \return The file descriptor
+ */
+
+/**
+ * \fn const Size &SwStatsCpu::patternSize()
+ * \brief Get the pattern size
+ *
+ * For some input-formats, e.g. Bayer data, processing is done multiple lines
+ * and/or columns at a time. Get width and height at which the (bayer) pattern
+ * repeats. Window values are rounded down to a multiple of this and the height
+ * also indicates if processLine2() should be called or not.
+ * This may only be called after a successful configure() call.
+ *
+ * \return The pattern size
+ */
+
+/**
+ * \fn void SwStatsCpu::processLine0(unsigned int y, const uint8_t *src[])
+ * \brief Process line 0
+ * \param[in] y The y coordinate.
+ * \param[in] src The input data.
+ *
+ * This function processes line 0 for input formats with
+ * patternSize height == 1.
+ * It'll process line 0 and 1 for input formats with patternSize height >= 2.
+ * This function may only be called after a successful setWindow() call.
+ */
+
+/**
+ * \fn void SwStatsCpu::processLine2(unsigned int y, const uint8_t *src[])
+ * \brief Process line 2 and 3
+ * \param[in] y The y coordinate.
+ * \param[in] src The input data.
+ *
+ * This function processes line 2 and 3 for input formats with
+ * patternSize height == 4.
+ * This function may only be called after a successful setWindow() call.
+ */
+
+/**
+ * \var Signal<> SwStatsCpu::statsReady
+ * \brief Signals that the statistics are ready
+ */
+
+/**
+ * \typedef SwStatsCpu::statsProcessFn
+ * \brief Called when there is data to get statistics from
+ * \param[in] src The input data
+ *
+ * These functions take an array of (patternSize_.height + 1) src
+ * pointers each pointing to a line in the source image. The middle
+ * element of the array will point to the actual line being processed.
+ * Earlier element(s) will point to the previous line(s) and later
+ * element(s) to the next line(s).
+ *
+ * See the documentation of DebayerCpu::debayerFn for more details.
+ */
+
+/**
+ * \var unsigned int SwStatsCpu::ySkipMask_
+ * \brief Skip lines where this bitmask is set in y
+ */
+
+/**
+ * \var Rectangle SwStatsCpu::window_
+ * \brief Statistics window, set by setWindow(), used every line
+ */
+
+/**
+ * \var Size SwStatsCpu::patternSize_
+ * \brief The size of the bayer pattern
+ *
+ * Valid sizes are: 2x2, 4x2 or 4x4.
+ */
+
+/**
+ * \var unsigned int SwStatsCpu::xShift_
+ * \brief The offset of x, applied to window_.x for bayer variants
+ *
+ * This can either be 0 or 1.
+ */
+
+LOG_DEFINE_CATEGORY(SwStatsCpu)
+
+SwStatsCpu::SwStatsCpu()
+ : sharedStats_("softIsp_stats")
+{
+ if (!sharedStats_)
+ LOG(SwStatsCpu, Error)
+ << "Failed to create shared memory for statistics";
+}
+
+static constexpr unsigned int kRedYMul = 77; /* 0.299 * 256 */
+static constexpr unsigned int kGreenYMul = 150; /* 0.587 * 256 */
+static constexpr unsigned int kBlueYMul = 29; /* 0.114 * 256 */
+
+#define SWSTATS_START_LINE_STATS(pixel_t) \
+ pixel_t r, g, g2, b; \
+ uint64_t yVal; \
+ \
+ uint64_t sumR = 0; \
+ uint64_t sumG = 0; \
+ uint64_t sumB = 0;
+
+#define SWSTATS_ACCUMULATE_LINE_STATS(div) \
+ sumR += r; \
+ sumG += g; \
+ sumB += b; \
+ \
+ yVal = r * kRedYMul; \
+ yVal += g * kGreenYMul; \
+ yVal += b * kBlueYMul; \
+ stats_.yHistogram[yVal * SwIspStats::kYHistogramSize / (256 * 256 * (div))]++;
+
+#define SWSTATS_FINISH_LINE_STATS() \
+ stats_.sumR_ += sumR; \
+ stats_.sumG_ += sumG; \
+ stats_.sumB_ += sumB;
+
+void SwStatsCpu::statsBGGR8Line0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x;
+ const uint8_t *src1 = src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR10Line0(const uint8_t *src[])
+{
+ const uint16_t *src0 = (const uint16_t *)src[1] + window_.x;
+ const uint16_t *src1 = (const uint16_t *)src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint16_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ /* divide Y by 4 for 10 -> 8 bpp value */
+ SWSTATS_ACCUMULATE_LINE_STATS(4)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR12Line0(const uint8_t *src[])
+{
+ const uint16_t *src0 = (const uint16_t *)src[1] + window_.x;
+ const uint16_t *src1 = (const uint16_t *)src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint16_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ /* divide Y by 16 for 12 -> 8 bpp value */
+ SWSTATS_ACCUMULATE_LINE_STATS(16)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR10PLine0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x * 5 / 4;
+ const uint8_t *src1 = src[2] + window_.x * 5 / 4;
+ const int widthInBytes = window_.width * 5 / 4;
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ /* x += 5 sample every other 2x2 block */
+ for (int x = 0; x < widthInBytes; x += 5) {
+ /* BGGR */
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+ g = (g + g2) / 2;
+ /* Data is already 8 bits, divide by 1 */
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsGBRG10PLine0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x * 5 / 4;
+ const uint8_t *src1 = src[2] + window_.x * 5 / 4;
+ const int widthInBytes = window_.width * 5 / 4;
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ /* x += 5 sample every other 2x2 block */
+ for (int x = 0; x < widthInBytes; x += 5) {
+ /* GBRG */
+ g = src0[x];
+ b = src0[x + 1];
+ r = src1[x];
+ g2 = src1[x + 1];
+ g = (g + g2) / 2;
+ /* Data is already 8 bits, divide by 1 */
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+/**
+ * \brief Reset state to start statistics gathering for a new frame
+ *
+ * This may only be called after a successful setWindow() call.
+ */
+void SwStatsCpu::startFrame(void)
+{
+ if (window_.width == 0)
+ LOG(SwStatsCpu, Error) << "Calling startFrame() without setWindow()";
+
+ stats_.sumR_ = 0;
+ stats_.sumB_ = 0;
+ stats_.sumG_ = 0;
+ stats_.yHistogram.fill(0);
+}
+
+/**
+ * \brief Finish statistics calculation for the current frame
+ *
+ * This may only be called after a successful setWindow() call.
+ */
+void SwStatsCpu::finishFrame(void)
+{
+ *sharedStats_ = stats_;
+ statsReady.emit();
+}
+
+/**
+ * \brief Setup SwStatsCpu object for standard Bayer orders
+ * \param[in] order The Bayer order
+ *
+ * Check if order is a standard Bayer order and setup xShift_ and swapLines_
+ * so that a single BGGR stats function can be used for all 4 standard orders.
+ */
+int SwStatsCpu::setupStandardBayerOrder(BayerFormat::Order order)
+{
+ switch (order) {
+ case BayerFormat::BGGR:
+ xShift_ = 0;
+ swapLines_ = false;
+ break;
+ case BayerFormat::GBRG:
+ xShift_ = 1; /* BGGR -> GBRG */
+ swapLines_ = false;
+ break;
+ case BayerFormat::GRBG:
+ xShift_ = 0;
+ swapLines_ = true; /* BGGR -> GRBG */
+ break;
+ case BayerFormat::RGGB:
+ xShift_ = 1; /* BGGR -> GBRG */
+ swapLines_ = true; /* GBRG -> RGGB */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ patternSize_.height = 2;
+ patternSize_.width = 2;
+ ySkipMask_ = 0x02; /* Skip every 3th and 4th line */
+ return 0;
+}
+
+/**
+ * \brief Configure the statistics object for the passed in input format
+ * \param[in] inputCfg The input format
+ *
+ * \return 0 on success, a negative errno value on failure
+ */
+int SwStatsCpu::configure(const StreamConfiguration &inputCfg)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputCfg.pixelFormat);
+
+ if (bayerFormat.packing == BayerFormat::Packing::None &&
+ setupStandardBayerOrder(bayerFormat.order) == 0) {
+ switch (bayerFormat.bitDepth) {
+ case 8:
+ stats0_ = &SwStatsCpu::statsBGGR8Line0;
+ return 0;
+ case 10:
+ stats0_ = &SwStatsCpu::statsBGGR10Line0;
+ return 0;
+ case 12:
+ stats0_ = &SwStatsCpu::statsBGGR12Line0;
+ return 0;
+ }
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2) {
+ patternSize_.height = 2;
+ patternSize_.width = 4; /* 5 bytes per *4* pixels */
+ /* Skip every 3th and 4th line, sample every other 2x2 block */
+ ySkipMask_ = 0x02;
+ xShift_ = 0;
+
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ case BayerFormat::GRBG:
+ stats0_ = &SwStatsCpu::statsBGGR10PLine0;
+ swapLines_ = bayerFormat.order == BayerFormat::GRBG;
+ return 0;
+ case BayerFormat::GBRG:
+ case BayerFormat::RGGB:
+ stats0_ = &SwStatsCpu::statsGBRG10PLine0;
+ swapLines_ = bayerFormat.order == BayerFormat::RGGB;
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ LOG(SwStatsCpu, Info)
+ << "Unsupported input format " << inputCfg.pixelFormat.toString();
+ return -EINVAL;
+}
+
+/**
+ * \brief Specify window coordinates over which to gather statistics
+ * \param[in] window The window object.
+ */
+void SwStatsCpu::setWindow(const Rectangle &window)
+{
+ window_ = window;
+
+ window_.x &= ~(patternSize_.width - 1);
+ window_.x += xShift_;
+ window_.y &= ~(patternSize_.height - 1);
+
+ /* width_ - xShift_ to make sure the window fits */
+ window_.width -= xShift_;
+ window_.width &= ~(patternSize_.width - 1);
+ window_.height &= ~(patternSize_.height - 1);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/swstats_cpu.h b/src/libcamera/software_isp/swstats_cpu.h
new file mode 100644
index 00000000..baec3951
--- /dev/null
+++ b/src/libcamera/software_isp/swstats_cpu.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * swstats_cpu.h - CPU based software statistics implementation
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include <libcamera/base/signal.h>
+
+#include <libcamera/geometry.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/shared_mem_object.h"
+#include "libcamera/internal/software_isp/swisp_stats.h"
+
+namespace libcamera {
+
+class PixelFormat;
+struct StreamConfiguration;
+
+class SwStatsCpu
+{
+public:
+ SwStatsCpu();
+ ~SwStatsCpu() = default;
+
+ bool isValid() const { return sharedStats_.fd().isValid(); }
+
+ const SharedFD &getStatsFD() { return sharedStats_.fd(); }
+
+ const Size &patternSize() { return patternSize_; }
+
+ int configure(const StreamConfiguration &inputCfg);
+ void setWindow(const Rectangle &window);
+ void startFrame();
+ void finishFrame();
+
+ void processLine0(unsigned int y, const uint8_t *src[])
+ {
+ if ((y & ySkipMask_) || y < static_cast<unsigned int>(window_.y) ||
+ y >= (window_.y + window_.height))
+ return;
+
+ (this->*stats0_)(src);
+ }
+
+ void processLine2(unsigned int y, const uint8_t *src[])
+ {
+ if ((y & ySkipMask_) || y < static_cast<unsigned int>(window_.y) ||
+ y >= (window_.y + window_.height))
+ return;
+
+ (this->*stats2_)(src);
+ }
+
+ Signal<> statsReady;
+
+private:
+ using statsProcessFn = void (SwStatsCpu::*)(const uint8_t *src[]);
+
+ int setupStandardBayerOrder(BayerFormat::Order order);
+ /* Bayer 8 bpp unpacked */
+ void statsBGGR8Line0(const uint8_t *src[]);
+ /* Bayer 10 bpp unpacked */
+ void statsBGGR10Line0(const uint8_t *src[]);
+ /* Bayer 12 bpp unpacked */
+ void statsBGGR12Line0(const uint8_t *src[]);
+ /* Bayer 10 bpp packed */
+ void statsBGGR10PLine0(const uint8_t *src[]);
+ void statsGBRG10PLine0(const uint8_t *src[]);
+
+ /* Variables set by configure(), used every line */
+ statsProcessFn stats0_;
+ statsProcessFn stats2_;
+ bool swapLines_;
+
+ unsigned int ySkipMask_;
+
+ Rectangle window_;
+
+ Size patternSize_;
+
+ unsigned int xShift_;
+
+ SharedMemObject<SwIspStats> sharedStats_;
+ SwIspStats stats_;
+};
+
+} /* namespace libcamera */
diff --git a/utils/checkstyle.py b/utils/checkstyle.py
index 84f44a42..88078a61 100755
--- a/utils/checkstyle.py
+++ b/utils/checkstyle.py
@@ -569,7 +569,7 @@ class IncludeChecker(StyleChecker):
'limits', 'locale', 'setjmp', 'signal', 'stdarg', 'stddef',
'stdint', 'stdio', 'stdlib', 'string', 'time', 'uchar', 'wchar',
'wctype')
- include_regex = re.compile('^#include <c([a-z]*)>')
+ include_regex = re.compile(r'^#include <c([a-z]*)>')
def __init__(self, content):
super().__init__()
@@ -632,7 +632,7 @@ class MesonChecker(StyleChecker):
class Pep8Checker(StyleChecker):
patterns = ('*.py',)
- results_regex = re.compile('stdin:([0-9]+):([0-9]+)(.*)')
+ results_regex = re.compile(r'stdin:([0-9]+):([0-9]+)(.*)')
def __init__(self, content):
super().__init__()
@@ -665,7 +665,7 @@ class Pep8Checker(StyleChecker):
class ShellChecker(StyleChecker):
patterns = ('*.sh',)
- results_line_regex = re.compile('In - line ([0-9]+):')
+ results_line_regex = re.compile(r'In - line ([0-9]+):')
def __init__(self, content):
super().__init__()
@@ -753,7 +753,8 @@ class CLangFormatter(Formatter):
class DoxygenFormatter(Formatter):
patterns = ('*.c', '*.cpp')
- return_regex = re.compile(' +\\* +\\\\return +[a-z]')
+ oneliner_regex = re.compile(r'^ +\* +\\(brief|param|return)\b.*\.$')
+ return_regex = re.compile(r' +\* +\\return +[a-z]')
@classmethod
def format(cls, filename, data):
@@ -768,6 +769,7 @@ class DoxygenFormatter(Formatter):
lines.append(line)
continue
+ line = cls.oneliner_regex.sub(lambda m: m.group(0)[:-1], line)
line = cls.return_regex.sub(lambda m: m.group(0)[:-1] + m.group(0)[-1].upper(), line)
if line.find('*/') != -1:
@@ -813,7 +815,7 @@ class DPointerFormatter(Formatter):
class IncludeOrderFormatter(Formatter):
patterns = ('*.cpp', '*.h')
- include_regex = re.compile('^#include (["<])([^">]*)([">])')
+ include_regex = re.compile(r'^#include (["<])([^">]*)([">])')
@classmethod
def format(cls, filename, data):
diff --git a/utils/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl b/utils/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl
index 238cf4a5..f64c3c93 100644
--- a/utils/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl
+++ b/utils/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl
@@ -175,9 +175,9 @@ void {{proxy_name}}::recvMessage(const IPCMessage &data)
);
{% elif method|is_async %}
ASSERT(state_ == ProxyRunning);
- proxy_.invokeMethod(&ThreadProxy::{{method.mojom_name}}, ConnectionTypeQueued,
+ proxy_.invokeMethod(&ThreadProxy::{{method.mojom_name}}, ConnectionTypeQueued
{%- for param in method|method_param_names -%}
- {{param}}{{- ", " if not loop.last}}
+ , {{param}}
{%- endfor -%}
);
{%- endif %}
diff --git a/utils/ipu3/ipu3-pack.c b/utils/ipu3/ipu3-pack.c
index decbfc6c..23d2db8b 100644
--- a/utils/ipu3/ipu3-pack.c
+++ b/utils/ipu3/ipu3-pack.c
@@ -8,6 +8,7 @@
#include <errno.h>
#include <fcntl.h>
+#include <libgen.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
@@ -15,9 +16,8 @@
#include <sys/types.h>
#include <unistd.h>
-static void usage(const char *argv0)
+static void usage(char *argv0)
{
-
printf("Usage: %s input-file output-file\n", basename(argv0));
printf("Convert unpacked RAW10 Bayer data to the IPU3 packed Bayer formats\n");
printf("If the output-file '-', output data will be written to standard output\n");
diff --git a/utils/ipu3/ipu3-unpack.c b/utils/ipu3/ipu3-unpack.c
index c96fafed..6ee8c45a 100644
--- a/utils/ipu3/ipu3-unpack.c
+++ b/utils/ipu3/ipu3-unpack.c
@@ -8,6 +8,7 @@
#include <errno.h>
#include <fcntl.h>
+#include <libgen.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
@@ -15,7 +16,7 @@
#include <sys/types.h>
#include <unistd.h>
-static void usage(const char *argv0)
+static void usage(char *argv0)
{
printf("Usage: %s input-file output-file\n", basename(argv0));
printf("Unpack the IPU3 raw Bayer format to 16-bit Bayer\n");
diff --git a/utils/raspberrypi/ctt/ctt.py b/utils/raspberrypi/ctt/ctt.py
index cd89f177..89159e63 100755
--- a/utils/raspberrypi/ctt/ctt.py
+++ b/utils/raspberrypi/ctt/ctt.py
@@ -350,7 +350,7 @@ class Camera:
alsc_out = alsc_all(self, do_alsc_colour, plot)
cal_cr_list, cal_cb_list, luminance_lut, av_corn = alsc_out
"""
- write ouput to json and finish if not do_alsc_colour
+ write output to json and finish if not do_alsc_colour
"""
if not do_alsc_colour:
self.json['rpi.alsc']['luminance_lut'] = luminance_lut