summaryrefslogtreecommitdiff
path: root/src/libcamera/pipeline
diff options
context:
space:
mode:
Diffstat (limited to 'src/libcamera/pipeline')
-rw-r--r--src/libcamera/pipeline/imx8-isi/imx8-isi.cpp1117
-rw-r--r--src/libcamera/pipeline/imx8-isi/meson.build5
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.cpp19
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.h6
-rw-r--r--src/libcamera/pipeline/ipu3/frames.cpp2
-rw-r--r--src/libcamera/pipeline/ipu3/frames.h2
-rw-r--r--src/libcamera/pipeline/ipu3/imgu.cpp8
-rw-r--r--src/libcamera/pipeline/ipu3/imgu.h2
-rw-r--r--src/libcamera/pipeline/ipu3/ipu3.cpp159
-rw-r--r--src/libcamera/pipeline/mali-c55/mali-c55.cpp1066
-rw-r--r--src/libcamera/pipeline/mali-c55/meson.build5
-rw-r--r--src/libcamera/pipeline/meson.build15
-rw-r--r--src/libcamera/pipeline/raspberrypi/dma_heaps.cpp90
-rw-r--r--src/libcamera/pipeline/raspberrypi/dma_heaps.h32
-rw-r--r--src/libcamera/pipeline/raspberrypi/raspberrypi.cpp2200
-rw-r--r--src/libcamera/pipeline/raspberrypi/rpi_stream.h178
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1.cpp520
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1_path.cpp299
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1_path.h18
-rw-r--r--src/libcamera/pipeline/rpi/common/delayed_controls.cpp293
-rw-r--r--src/libcamera/pipeline/rpi/common/delayed_controls.h87
-rw-r--r--src/libcamera/pipeline/rpi/common/meson.build (renamed from src/libcamera/pipeline/raspberrypi/meson.build)4
-rw-r--r--src/libcamera/pipeline/rpi/common/pipeline_base.cpp1491
-rw-r--r--src/libcamera/pipeline/rpi/common/pipeline_base.h286
-rw-r--r--src/libcamera/pipeline/rpi/common/rpi_stream.cpp (renamed from src/libcamera/pipeline/raspberrypi/rpi_stream.cpp)159
-rw-r--r--src/libcamera/pipeline/rpi/common/rpi_stream.h199
-rw-r--r--src/libcamera/pipeline/rpi/meson.build12
-rw-r--r--src/libcamera/pipeline/rpi/vc4/data/example.yaml46
-rw-r--r--src/libcamera/pipeline/rpi/vc4/data/meson.build9
-rw-r--r--src/libcamera/pipeline/rpi/vc4/meson.build7
-rw-r--r--src/libcamera/pipeline/rpi/vc4/vc4.cpp1023
-rw-r--r--src/libcamera/pipeline/simple/converter.cpp399
-rw-r--r--src/libcamera/pipeline/simple/converter.h98
-rw-r--r--src/libcamera/pipeline/simple/meson.build1
-rw-r--r--src/libcamera/pipeline/simple/simple.cpp465
-rw-r--r--src/libcamera/pipeline/uvcvideo/uvcvideo.cpp237
-rw-r--r--src/libcamera/pipeline/vimc/vimc.cpp49
37 files changed, 7003 insertions, 3605 deletions
diff --git a/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp
new file mode 100644
index 00000000..72aa6c75
--- /dev/null
+++ b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp
@@ -0,0 +1,1117 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022 - Jacopo Mondi <jacopo@jmondi.org>
+ *
+ * Pipeline handler for ISI interface found on NXP i.MX8 SoC
+ */
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera_manager.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+#include "linux/media-bus-format.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(ISI)
+
+class PipelineHandlerISI;
+
+class ISICameraData : public Camera::Private
+{
+public:
+ ISICameraData(PipelineHandler *ph)
+ : Camera::Private(ph)
+ {
+ /*
+ * \todo Assume 2 channels only for now, as that's the number of
+ * available channels on i.MX8MP.
+ */
+ streams_.resize(2);
+ }
+
+ PipelineHandlerISI *pipe();
+
+ int init();
+
+ unsigned int pipeIndex(const Stream *stream)
+ {
+ return stream - &*streams_.begin();
+ }
+
+ unsigned int getRawMediaBusFormat(PixelFormat *pixelFormat) const;
+ unsigned int getYuvMediaBusFormat(const PixelFormat &pixelFormat) const;
+ unsigned int getMediaBusFormat(PixelFormat *pixelFormat) const;
+
+ std::unique_ptr<CameraSensor> sensor_;
+ std::unique_ptr<V4L2Subdevice> csis_;
+
+ std::vector<Stream> streams_;
+
+ std::vector<Stream *> enabledStreams_;
+
+ unsigned int xbarSink_;
+};
+
+class ISICameraConfiguration : public CameraConfiguration
+{
+public:
+ ISICameraConfiguration(ISICameraData *data)
+ : data_(data)
+ {
+ }
+
+ Status validate() override;
+
+ static const std::map<PixelFormat, unsigned int> formatsMap_;
+
+ V4L2SubdeviceFormat sensorFormat_;
+
+private:
+ CameraConfiguration::Status
+ validateRaw(std::set<Stream *> &availableStreams, const Size &maxResolution);
+ CameraConfiguration::Status
+ validateYuv(std::set<Stream *> &availableStreams, const Size &maxResolution);
+
+ const ISICameraData *data_;
+};
+
+class PipelineHandlerISI : public PipelineHandler
+{
+public:
+ PipelineHandlerISI(CameraManager *manager);
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+
+protected:
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+private:
+ static constexpr Size kPreviewSize = { 1920, 1080 };
+ static constexpr Size kMinISISize = { 1, 1 };
+
+ struct Pipe {
+ std::unique_ptr<V4L2Subdevice> isi;
+ std::unique_ptr<V4L2VideoDevice> capture;
+ };
+
+ ISICameraData *cameraData(Camera *camera)
+ {
+ return static_cast<ISICameraData *>(camera->_d());
+ }
+
+ Pipe *pipeFromStream(Camera *camera, const Stream *stream);
+
+ StreamConfiguration generateYUVConfiguration(Camera *camera,
+ const Size &size);
+ StreamConfiguration generateRawConfiguration(Camera *camera);
+
+ void bufferReady(FrameBuffer *buffer);
+
+ MediaDevice *isiDev_;
+
+ std::unique_ptr<V4L2Subdevice> crossbar_;
+ std::vector<Pipe> pipes_;
+};
+
+/* -----------------------------------------------------------------------------
+ * Camera Data
+ */
+
+PipelineHandlerISI *ISICameraData::pipe()
+{
+ return static_cast<PipelineHandlerISI *>(Camera::Private::pipe());
+}
+
+/* Open and initialize pipe components. */
+int ISICameraData::init()
+{
+ int ret = sensor_->init();
+ if (ret)
+ return ret;
+
+ ret = csis_->open();
+ if (ret)
+ return ret;
+
+ properties_ = sensor_->properties();
+
+ return 0;
+}
+
+/*
+ * Get a RAW Bayer media bus format compatible with the requested pixelFormat.
+ *
+ * If the requested pixelFormat cannot be produced by the sensor adjust it to
+ * the one corresponding to the media bus format with the largest bit-depth.
+ */
+unsigned int ISICameraData::getRawMediaBusFormat(PixelFormat *pixelFormat) const
+{
+ std::vector<unsigned int> mbusCodes = sensor_->mbusCodes();
+
+ static const std::map<PixelFormat, unsigned int> rawFormats = {
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+ { formats::SBGGR14, MEDIA_BUS_FMT_SBGGR14_1X14 },
+ { formats::SGBRG14, MEDIA_BUS_FMT_SGBRG14_1X14 },
+ { formats::SGRBG14, MEDIA_BUS_FMT_SGRBG14_1X14 },
+ { formats::SRGGB14, MEDIA_BUS_FMT_SRGGB14_1X14 },
+ };
+
+ /*
+ * Make sure the requested PixelFormat is supported in the above
+ * map and the sensor can produce the compatible mbus code.
+ */
+ auto it = rawFormats.find(*pixelFormat);
+ if (it != rawFormats.end() &&
+ std::count(mbusCodes.begin(), mbusCodes.end(), it->second))
+ return it->second;
+
+ if (it == rawFormats.end())
+ LOG(ISI, Warning) << pixelFormat
+ << " not supported in ISI formats map.";
+
+ /*
+ * The desired pixel format cannot be produced. Adjust it to the one
+ * corresponding to the raw media bus format with the largest bit-depth
+ * the sensor provides.
+ */
+ unsigned int sensorCode = 0;
+ unsigned int maxDepth = 0;
+ *pixelFormat = {};
+
+ for (unsigned int code : mbusCodes) {
+ /* Make sure the media bus format is RAW Bayer. */
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(code);
+ if (!bayerFormat.isValid())
+ continue;
+
+ /* Make sure the media format is supported. */
+ it = std::find_if(rawFormats.begin(), rawFormats.end(),
+ [code](auto &rawFormat) {
+ return rawFormat.second == code;
+ });
+
+ if (it == rawFormats.end()) {
+ LOG(ISI, Warning) << bayerFormat
+ << " not supported in ISI formats map.";
+ continue;
+ }
+
+ /* Pick the one with the largest bit depth. */
+ if (bayerFormat.bitDepth > maxDepth) {
+ maxDepth = bayerFormat.bitDepth;
+ *pixelFormat = it->first;
+ sensorCode = code;
+ }
+ }
+
+ if (!pixelFormat->isValid())
+ LOG(ISI, Error) << "Cannot find a supported RAW format";
+
+ return sensorCode;
+}
+
+/*
+ * Get a YUV/RGB media bus format from which the ISI can produce a processed
+ * stream, preferring codes with the same colour encoding as the requested
+ * pixelformat.
+ *
+ * If the sensor does not provide any YUV/RGB media bus format the ISI cannot
+ * generate any processed pixel format as it cannot debayer.
+ */
+unsigned int ISICameraData::getYuvMediaBusFormat(const PixelFormat &pixelFormat) const
+{
+ std::vector<unsigned int> mbusCodes = sensor_->mbusCodes();
+
+ /*
+ * The ISI can produce YUV/RGB pixel formats from any non-RAW Bayer
+ * media bus formats.
+ *
+ * Keep the list in sync with the mxc_isi_bus_formats[] array in
+ * the ISI driver.
+ */
+ std::vector<unsigned int> yuvCodes = {
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_RGB565_1X16,
+ MEDIA_BUS_FMT_RGB888_1X24,
+ };
+
+ std::sort(mbusCodes.begin(), mbusCodes.end());
+ std::sort(yuvCodes.begin(), yuvCodes.end());
+
+ std::vector<unsigned int> supportedCodes;
+ std::set_intersection(mbusCodes.begin(), mbusCodes.end(),
+ yuvCodes.begin(), yuvCodes.end(),
+ std::back_inserter(supportedCodes));
+
+ if (supportedCodes.empty()) {
+ LOG(ISI, Warning) << "Cannot find a supported YUV/RGB format";
+
+ return 0;
+ }
+
+ /* Prefer codes with the same encoding as the requested pixel format. */
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixelFormat);
+ for (unsigned int code : supportedCodes) {
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingYUV &&
+ (code == MEDIA_BUS_FMT_UYVY8_1X16 ||
+ code == MEDIA_BUS_FMT_YUV8_1X24))
+ return code;
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRGB &&
+ (code == MEDIA_BUS_FMT_RGB565_1X16 ||
+ code == MEDIA_BUS_FMT_RGB888_1X24))
+ return code;
+ }
+
+ /* Otherwise return the first found code. */
+ return supportedCodes[0];
+}
+
+unsigned int ISICameraData::getMediaBusFormat(PixelFormat *pixelFormat) const
+{
+ if (PixelFormatInfo::info(*pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW)
+ return getRawMediaBusFormat(pixelFormat);
+
+ return getYuvMediaBusFormat(*pixelFormat);
+}
+
+/* -----------------------------------------------------------------------------
+ * Camera Configuration
+ */
+
+/*
+ * ISICameraConfiguration::formatsMap_ records the association between an output
+ * pixel format and the ISI source pixel format to be applied to the pipeline.
+ */
+const std::map<PixelFormat, unsigned int> ISICameraConfiguration::formatsMap_ = {
+ { formats::YUYV, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::AVUY8888, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::NV12, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::NV16, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::YUV444, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::RGB565, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::BGR888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::RGB888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::XRGB8888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::ABGR8888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+/*
+ * Adjust stream configuration when the first requested stream is RAW: all the
+ * streams will have the same RAW pixelformat and size.
+ */
+CameraConfiguration::Status
+ISICameraConfiguration::validateRaw(std::set<Stream *> &availableStreams,
+ const Size &maxResolution)
+{
+ CameraConfiguration::Status status = Valid;
+
+ /*
+ * Make sure the requested RAW format is supported by the
+ * pipeline, otherwise adjust it.
+ */
+ std::vector<unsigned int> mbusCodes = data_->sensor_->mbusCodes();
+ StreamConfiguration &rawConfig = config_[0];
+ PixelFormat rawFormat = rawConfig.pixelFormat;
+
+ unsigned int sensorCode = data_->getRawMediaBusFormat(&rawFormat);
+ if (!sensorCode) {
+ LOG(ISI, Error) << "Cannot adjust RAW pixelformat "
+ << rawConfig.pixelFormat;
+ return Invalid;
+ }
+
+ if (rawFormat != rawConfig.pixelFormat) {
+ LOG(ISI, Debug) << "RAW pixelformat adjusted to "
+ << rawFormat;
+ rawConfig.pixelFormat = rawFormat;
+ status = Adjusted;
+ }
+
+ /* Cap the RAW stream size to the maximum resolution. */
+ const Size configSize = rawConfig.size;
+ rawConfig.size.boundTo(maxResolution);
+ if (rawConfig.size != configSize) {
+ LOG(ISI, Debug) << "RAW size adjusted to "
+ << rawConfig.size;
+ status = Adjusted;
+ }
+
+ /* Adjust all other streams to RAW. */
+ for (const auto &[i, cfg] : utils::enumerate(config_)) {
+
+ LOG(ISI, Debug) << "Stream " << i << ": " << cfg.toString();
+ const PixelFormat pixFmt = cfg.pixelFormat;
+ const Size size = cfg.size;
+
+ cfg.pixelFormat = rawConfig.pixelFormat;
+ cfg.size = rawConfig.size;
+
+ if (cfg.pixelFormat != pixFmt || cfg.size != size) {
+ LOG(ISI, Debug) << "Stream " << i << " adjusted to "
+ << cfg.toString();
+ status = Adjusted;
+ }
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+ cfg.stride = info.stride(cfg.size.width, 0);
+ cfg.frameSize = info.frameSize(cfg.size, info.bitsPerPixel);
+
+ /* Assign streams in the order they are presented. */
+ auto stream = availableStreams.extract(availableStreams.begin());
+ cfg.setStream(stream.value());
+ }
+
+ return status;
+}
+
+/*
+ * Adjust stream configuration when the first requested stream is not RAW: all
+ * the streams will be either YUV or RGB processed formats.
+ */
+CameraConfiguration::Status
+ISICameraConfiguration::validateYuv(std::set<Stream *> &availableStreams,
+ const Size &maxResolution)
+{
+ CameraConfiguration::Status status = Valid;
+
+ StreamConfiguration &yuvConfig = config_[0];
+ PixelFormat yuvPixelFormat = yuvConfig.pixelFormat;
+
+ /*
+ * Make sure the sensor can produce a compatible YUV/RGB media bus
+ * format. If the sensor can only produce RAW Bayer we can only fail
+ * here as we can't adjust to anything but RAW.
+ */
+ unsigned int yuvMediaBusCode = data_->getYuvMediaBusFormat(yuvPixelFormat);
+ if (!yuvMediaBusCode) {
+ LOG(ISI, Error) << "Cannot adjust pixelformat "
+ << yuvConfig.pixelFormat;
+ return Invalid;
+ }
+
+ /* Adjust all the other streams. */
+ for (const auto &[i, cfg] : utils::enumerate(config_)) {
+
+ LOG(ISI, Debug) << "Stream " << i << ": " << cfg.toString();
+
+ /* If the stream is RAW or not supported default it to YUYV. */
+ const PixelFormatInfo &cfgInfo = PixelFormatInfo::info(cfg.pixelFormat);
+ if (cfgInfo.colourEncoding == PixelFormatInfo::ColourEncodingRAW ||
+ !formatsMap_.count(cfg.pixelFormat)) {
+
+ LOG(ISI, Debug) << "Stream " << i << " format: "
+ << cfg.pixelFormat << " adjusted to YUYV";
+
+ cfg.pixelFormat = formats::YUYV;
+ status = Adjusted;
+ }
+
+ /* Cap the streams size to the maximum accepted resolution. */
+ Size configSize = cfg.size;
+ cfg.size.boundTo(maxResolution);
+ if (cfg.size != configSize) {
+ LOG(ISI, Debug)
+ << "Stream " << i << " adjusted to " << cfg.size;
+ status = Adjusted;
+ }
+
+ /* Re-fetch the pixel format info in case it has been adjusted. */
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+
+ /* \todo Multiplane ? */
+ cfg.stride = info.stride(cfg.size.width, 0);
+ cfg.frameSize = info.frameSize(cfg.size, info.bitsPerPixel);
+
+ /* Assign streams in the order they are presented. */
+ auto stream = availableStreams.extract(availableStreams.begin());
+ cfg.setStream(stream.value());
+ }
+
+ return status;
+}
+
+CameraConfiguration::Status ISICameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ std::set<Stream *> availableStreams;
+ std::transform(data_->streams_.begin(), data_->streams_.end(),
+ std::inserter(availableStreams, availableStreams.end()),
+ [](const Stream &s) { return const_cast<Stream *>(&s); });
+
+ if (config_.empty())
+ return Invalid;
+
+ /* Cap the number of streams to the number of available ISI pipes. */
+ if (config_.size() > availableStreams.size()) {
+ config_.resize(availableStreams.size());
+ status = Adjusted;
+ }
+
+ /*
+ * If more than a single stream is requested, the maximum allowed input
+ * image width is 2048. Cap the maximum image size accordingly.
+ *
+ * \todo The (size > 1) check only applies to i.MX8MP which has 2 ISI
+ * channels. SoCs with more channels than the i.MX8MP are capable of
+ * supporting more streams with input width > 2048 by chaining
+ * successive channels together. Define a policy for channels allocation
+ * to fully support other SoCs.
+ */
+ CameraSensor *sensor = data_->sensor_.get();
+ Size maxResolution = sensor->resolution();
+ if (config_.size() > 1)
+ maxResolution.width = std::min(2048U, maxResolution.width);
+
+ /* Validate streams according to the format of the first one. */
+ const PixelFormatInfo info = PixelFormatInfo::info(config_[0].pixelFormat);
+
+ Status validationStatus;
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ validationStatus = validateRaw(availableStreams, maxResolution);
+ else
+ validationStatus = validateYuv(availableStreams, maxResolution);
+
+ if (validationStatus == Invalid)
+ return Invalid;
+
+ if (validationStatus == Adjusted)
+ status = Adjusted;
+
+ /*
+ * Sensor format selection policy: the first stream selects the media
+ * bus code to use, the largest stream selects the size.
+ *
+ * \todo The sensor format selection policy could be changed to
+ * prefer operating the sensor at full resolution to prioritize
+ * image quality in exchange of a usually slower frame rate.
+ * Usage of the STILL_CAPTURE role could be consider for this.
+ */
+ Size maxSize;
+ for (const auto &cfg : config_) {
+ if (cfg.size > maxSize)
+ maxSize = cfg.size;
+ }
+
+ PixelFormat pixelFormat = config_[0].pixelFormat;
+
+ V4L2SubdeviceFormat sensorFormat{};
+ sensorFormat.code = data_->getMediaBusFormat(&pixelFormat);
+ sensorFormat.size = maxSize;
+
+ LOG(ISI, Debug) << "Computed sensor configuration: " << sensorFormat;
+
+ /*
+ * We can't use CameraSensor::getFormat() as it might return a
+ * format larger than our strict width limit, as that function
+ * prioritizes formats with the same aspect ratio over formats with less
+ * difference in size.
+ *
+ * Manually walk all the sensor supported sizes searching for
+ * the smallest larger format without considering the aspect ratio
+ * as the ISI can freely scale.
+ */
+ auto sizes = sensor->sizes(sensorFormat.code);
+ Size bestSize;
+
+ for (const Size &s : sizes) {
+ /* Ignore smaller sizes. */
+ if (s.width < sensorFormat.size.width ||
+ s.height < sensorFormat.size.height)
+ continue;
+
+ /* Make sure the width stays in the limits. */
+ if (s.width > maxResolution.width)
+ continue;
+
+ bestSize = s;
+ break;
+ }
+
+ /*
+ * This should happen only if the sensor can only produce formats that
+ * exceed the maximum allowed input width.
+ */
+ if (bestSize.isNull()) {
+ LOG(ISI, Error) << "Unable to find a suitable sensor format";
+ return Invalid;
+ }
+
+ sensorFormat_.code = sensorFormat.code;
+ sensorFormat_.size = bestSize;
+
+ LOG(ISI, Debug) << "Selected sensor format: " << sensorFormat_;
+
+ return status;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Handler
+ */
+
+PipelineHandlerISI::PipelineHandlerISI(CameraManager *manager)
+ : PipelineHandler(manager)
+{
+}
+
+/*
+ * Generate a StreamConfiguration for YUV/RGB use case.
+ *
+ * Verify it the sensor can produce a YUV/RGB media bus format and collect
+ * all the processed pixel formats the ISI can generate as supported stream
+ * configurations.
+ */
+StreamConfiguration PipelineHandlerISI::generateYUVConfiguration(Camera *camera,
+ const Size &size)
+{
+ ISICameraData *data = cameraData(camera);
+ PixelFormat pixelFormat = formats::YUYV;
+ unsigned int mbusCode;
+
+ mbusCode = data->getYuvMediaBusFormat(pixelFormat);
+ if (!mbusCode)
+ return {};
+
+ /* Adjust the requested size to the sensor's capabilities. */
+ V4L2SubdeviceFormat sensorFmt;
+ sensorFmt.code = mbusCode;
+ sensorFmt.size = size;
+
+ int ret = data->sensor_->tryFormat(&sensorFmt);
+ if (ret) {
+ LOG(ISI, Error) << "Failed to try sensor format.";
+ return {};
+ }
+
+ Size sensorSize = sensorFmt.size;
+
+ /*
+ * Populate the StreamConfiguration.
+ *
+ * As the sensor supports at least one YUV/RGB media bus format all the
+ * processed ones in formatsMap_ can be generated from it.
+ */
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+
+ for (const auto &[pixFmt, pipeFmt] : ISICameraConfiguration::formatsMap_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ continue;
+
+ streamFormats[pixFmt] = { { kMinISISize, sensorSize } };
+ }
+
+ StreamFormats formats(streamFormats);
+
+ StreamConfiguration cfg(formats);
+ cfg.pixelFormat = pixelFormat;
+ cfg.size = sensorSize;
+ cfg.bufferCount = 4;
+
+ return cfg;
+}
+
+/*
+ * Generate a StreamConfiguration for Raw Bayer use case. Verify if the sensor
+ * can produce the requested RAW bayer format and eventually adjust it to
+ * the one with the largest bit-depth the sensor can produce.
+ */
+StreamConfiguration PipelineHandlerISI::generateRawConfiguration(Camera *camera)
+{
+ static const std::map<unsigned int, PixelFormat> rawFormats = {
+ { MEDIA_BUS_FMT_SBGGR8_1X8, formats::SBGGR8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, formats::SGBRG8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, formats::SGRBG8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, formats::SRGGB8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, formats::SBGGR10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, formats::SGBRG10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, formats::SGRBG10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, formats::SRGGB10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, formats::SBGGR12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, formats::SGBRG12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, formats::SGRBG12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, formats::SRGGB12 },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, formats::SBGGR14 },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, formats::SGBRG14 },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, formats::SGRBG14 },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, formats::SRGGB14 },
+ };
+
+ ISICameraData *data = cameraData(camera);
+ PixelFormat pixelFormat = formats::SBGGR10;
+ unsigned int mbusCode;
+
+ /* pixelFormat will be adjusted, if the sensor can produce RAW. */
+ mbusCode = data->getRawMediaBusFormat(&pixelFormat);
+ if (!mbusCode)
+ return {};
+
+ /*
+ * Populate the StreamConfiguration with all the supported Bayer
+ * formats the sensor can produce.
+ */
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+ const CameraSensor *sensor = data->sensor_.get();
+
+ for (unsigned int code : sensor->mbusCodes()) {
+ /* Find a Bayer media bus code from the sensor. */
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(code);
+ if (!bayerFormat.isValid())
+ continue;
+
+ auto it = rawFormats.find(code);
+ if (it == rawFormats.end()) {
+ LOG(ISI, Warning) << bayerFormat
+ << " not supported in ISI formats map.";
+ continue;
+ }
+
+ streamFormats[it->second] = { { sensor->resolution(), sensor->resolution() } };
+ }
+
+ StreamFormats formats(streamFormats);
+
+ StreamConfiguration cfg(formats);
+ cfg.size = sensor->resolution();
+ cfg.pixelFormat = pixelFormat;
+ cfg.bufferCount = 4;
+
+ return cfg;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerISI::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
+{
+ ISICameraData *data = cameraData(camera);
+ std::unique_ptr<ISICameraConfiguration> config =
+ std::make_unique<ISICameraConfiguration>(data);
+
+ if (roles.empty())
+ return config;
+
+ if (roles.size() > data->streams_.size()) {
+ LOG(ISI, Error) << "Only up to " << data->streams_.size()
+ << " streams are supported";
+ return nullptr;
+ }
+
+ for (const auto &role : roles) {
+ /*
+ * Prefer the following formats:
+ * - Still Capture: Full resolution YUYV
+ * - ViewFinder/VideoRecording: 1080p YUYV
+ * - RAW: Full resolution Bayer
+ */
+ StreamConfiguration cfg;
+
+ switch (role) {
+ case StreamRole::StillCapture:
+ case StreamRole::Viewfinder:
+ case StreamRole::VideoRecording: {
+ Size size = role == StreamRole::StillCapture
+ ? data->sensor_->resolution()
+ : PipelineHandlerISI::kPreviewSize;
+ cfg = generateYUVConfiguration(camera, size);
+ if (cfg.pixelFormat.isValid())
+ break;
+
+
+ /*
+ * Fallback to use a Bayer format if that's what the
+ * sensor supports.
+ */
+ [[fallthrough]];
+
+ }
+
+ case StreamRole::Raw: {
+ cfg = generateRawConfiguration(camera);
+ break;
+ }
+
+ default:
+ LOG(ISI, Error) << "Requested stream role not supported: " << role;
+ return nullptr;
+ }
+
+ if (!cfg.pixelFormat.isValid()) {
+ LOG(ISI, Error)
+ << "Cannot generate configuration for role: " << role;
+ return nullptr;
+ }
+
+ config->addConfiguration(cfg);
+ }
+
+ config->validate();
+
+ return config;
+}
+
+int PipelineHandlerISI::configure(Camera *camera, CameraConfiguration *c)
+{
+ ISICameraConfiguration *camConfig = static_cast<ISICameraConfiguration *>(c);
+ ISICameraData *data = cameraData(camera);
+
+ /* All links are immutable except the sensor -> csis link. */
+ const MediaPad *sensorSrc = data->sensor_->entity()->getPadByIndex(0);
+ sensorSrc->links()[0]->setEnabled(true);
+
+ /*
+ * Reset the crossbar switch routing and enable one route for each
+ * requested stream configuration.
+ *
+ * \todo Handle concurrent usage of multiple cameras by adjusting the
+ * routing table instead of resetting it.
+ */
+ V4L2Subdevice::Routing routing = {};
+ unsigned int xbarFirstSource = crossbar_->entity()->pads().size() / 2 + 1;
+
+ for (const auto &[idx, config] : utils::enumerate(*c)) {
+ uint32_t sourcePad = xbarFirstSource + idx;
+ routing.emplace_back(V4L2Subdevice::Stream{ data->xbarSink_, 0 },
+ V4L2Subdevice::Stream{ sourcePad, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+ }
+
+ int ret = crossbar_->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ /* Apply format to the sensor and CSIS receiver. */
+ V4L2SubdeviceFormat format = camConfig->sensorFormat_;
+ ret = data->sensor_->setFormat(&format);
+ if (ret)
+ return ret;
+
+ ret = data->csis_->setFormat(0, &format);
+ if (ret)
+ return ret;
+
+ ret = crossbar_->setFormat(data->xbarSink_, &format);
+ if (ret)
+ return ret;
+
+ /* Now configure the ISI and video node instances, one per stream. */
+ data->enabledStreams_.clear();
+ for (const auto &config : *c) {
+ Pipe *pipe = pipeFromStream(camera, config.stream());
+
+ /*
+ * Set the format on the ISI sink pad: it must match what is
+ * received by the CSIS.
+ */
+ ret = pipe->isi->setFormat(0, &format);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the ISI sink compose rectangle to downscale the
+ * image.
+ *
+ * \todo Additional cropping could be applied on the ISI source
+ * pad to further reduce the output image size.
+ */
+ Rectangle isiScale(config.size);
+ ret = pipe->isi->setSelection(0, V4L2_SEL_TGT_COMPOSE, &isiScale);
+ if (ret)
+ return ret;
+
+ /*
+ * Set the format on ISI source pad: only the media bus code
+ * is relevant as it configures format conversion, while the
+ * size is taken from the sink's COMPOSE (or source's CROP,
+ * if any) rectangles.
+ */
+ unsigned int isiCode = ISICameraConfiguration::formatsMap_.at(config.pixelFormat);
+
+ V4L2SubdeviceFormat isiFormat{};
+ isiFormat.code = isiCode;
+ isiFormat.size = config.size;
+
+ ret = pipe->isi->setFormat(1, &isiFormat);
+ if (ret)
+ return ret;
+
+ V4L2DeviceFormat captureFmt{};
+ captureFmt.fourcc = pipe->capture->toV4L2PixelFormat(config.pixelFormat);
+ captureFmt.size = config.size;
+
+ /* \todo Set stride and format. */
+ ret = pipe->capture->setFormat(&captureFmt);
+ if (ret)
+ return ret;
+
+ /* Store the list of enabled streams for later use. */
+ data->enabledStreams_.push_back(config.stream());
+ }
+
+ return 0;
+}
+
+int PipelineHandlerISI::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ unsigned int count = stream->configuration().bufferCount;
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ return pipe->capture->exportBuffers(count, buffers);
+}
+
+int PipelineHandlerISI::start(Camera *camera,
+ [[maybe_unused]] const ControlList *controls)
+{
+ ISICameraData *data = cameraData(camera);
+
+ for (const auto &stream : data->enabledStreams_) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+ const StreamConfiguration &config = stream->configuration();
+
+ int ret = pipe->capture->importBuffers(config.bufferCount);
+ if (ret)
+ return ret;
+
+ ret = pipe->capture->streamOn();
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void PipelineHandlerISI::stopDevice(Camera *camera)
+{
+ ISICameraData *data = cameraData(camera);
+
+ for (const auto &stream : data->enabledStreams_) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ pipe->capture->streamOff();
+ pipe->capture->releaseBuffers();
+ }
+}
+
+int PipelineHandlerISI::queueRequestDevice(Camera *camera, Request *request)
+{
+ for (auto &[stream, buffer] : request->buffers()) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ int ret = pipe->capture->queueBuffer(buffer);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+bool PipelineHandlerISI::match(DeviceEnumerator *enumerator)
+{
+ DeviceMatch dm("mxc-isi");
+ dm.add("crossbar");
+ dm.add("mxc_isi.0");
+ dm.add("mxc_isi.0.capture");
+
+ isiDev_ = acquireMediaDevice(enumerator, dm);
+ if (!isiDev_)
+ return false;
+
+ /*
+ * Acquire the subdevs and video nodes for the crossbar switch and the
+ * processing pipelines.
+ */
+ crossbar_ = V4L2Subdevice::fromEntityName(isiDev_, "crossbar");
+ if (!crossbar_)
+ return false;
+
+ int ret = crossbar_->open();
+ if (ret)
+ return false;
+
+ for (unsigned int i = 0; ; ++i) {
+ std::string entityName = "mxc_isi." + std::to_string(i);
+ std::unique_ptr<V4L2Subdevice> isi =
+ V4L2Subdevice::fromEntityName(isiDev_, entityName);
+ if (!isi)
+ break;
+
+ ret = isi->open();
+ if (ret)
+ return false;
+
+ entityName += ".capture";
+ std::unique_ptr<V4L2VideoDevice> capture =
+ V4L2VideoDevice::fromEntityName(isiDev_, entityName);
+ if (!capture)
+ return false;
+
+ capture->bufferReady.connect(this, &PipelineHandlerISI::bufferReady);
+
+ ret = capture->open();
+ if (ret)
+ return ret;
+
+ pipes_.push_back({ std::move(isi), std::move(capture) });
+ }
+
+ if (pipes_.empty()) {
+ LOG(ISI, Error) << "Unable to enumerate pipes";
+ return false;
+ }
+
+ /*
+ * Loop over all the crossbar switch sink pads to find connected CSI-2
+ * receivers and camera sensors.
+ */
+ unsigned int numCameras = 0;
+ unsigned int numSinks = 0;
+ for (MediaPad *pad : crossbar_->entity()->pads()) {
+ unsigned int sink = numSinks;
+
+ if (!(pad->flags() & MEDIA_PAD_FL_SINK) || pad->links().empty())
+ continue;
+
+ /*
+ * Count each crossbar sink pad to correctly configure
+ * routing and format for this camera.
+ */
+ numSinks++;
+
+ MediaEntity *csi = pad->links()[0]->source()->entity();
+ if (csi->pads().size() != 2) {
+ LOG(ISI, Debug) << "Skip unsupported CSI-2 receiver "
+ << csi->name();
+ continue;
+ }
+
+ pad = csi->pads()[0];
+ if (!(pad->flags() & MEDIA_PAD_FL_SINK) || pad->links().empty())
+ continue;
+
+ MediaEntity *sensor = pad->links()[0]->source()->entity();
+ if (sensor->function() != MEDIA_ENT_F_CAM_SENSOR) {
+ LOG(ISI, Debug) << "Skip unsupported subdevice "
+ << sensor->name();
+ continue;
+ }
+
+ /* Create the camera data. */
+ std::unique_ptr<ISICameraData> data =
+ std::make_unique<ISICameraData>(this);
+
+ data->sensor_ = std::make_unique<CameraSensor>(sensor);
+ data->csis_ = std::make_unique<V4L2Subdevice>(csi);
+ data->xbarSink_ = sink;
+
+ ret = data->init();
+ if (ret) {
+ LOG(ISI, Error) << "Failed to initialize camera data";
+ return false;
+ }
+
+ /* Register the camera. */
+ const std::string &id = data->sensor_->id();
+ std::set<Stream *> streams;
+ std::transform(data->streams_.begin(), data->streams_.end(),
+ std::inserter(streams, streams.end()),
+ [](Stream &s) { return &s; });
+
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(data), id, streams);
+
+ registerCamera(std::move(camera));
+ numCameras++;
+ }
+
+ return numCameras > 0;
+}
+
+PipelineHandlerISI::Pipe *PipelineHandlerISI::pipeFromStream(Camera *camera,
+ const Stream *stream)
+{
+ ISICameraData *data = cameraData(camera);
+ unsigned int pipeIndex = data->pipeIndex(stream);
+
+ ASSERT(pipeIndex < pipes_.size());
+
+ return &pipes_[pipeIndex];
+}
+
+void PipelineHandlerISI::bufferReady(FrameBuffer *buffer)
+{
+ Request *request = buffer->request();
+
+ /* Record the sensor's timestamp in the request metadata. */
+ ControlList &metadata = request->metadata();
+ if (!metadata.contains(controls::SensorTimestamp.id()))
+ metadata.set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
+
+ completeBuffer(request, buffer);
+ if (request->hasPendingBuffers())
+ return;
+
+ completeRequest(request);
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerISI, "imx8-isi")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/imx8-isi/meson.build b/src/libcamera/pipeline/imx8-isi/meson.build
new file mode 100644
index 00000000..ffd0ce54
--- /dev/null
+++ b/src/libcamera/pipeline/imx8-isi/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_sources += files([
+ 'imx8-isi.cpp'
+])
diff --git a/src/libcamera/pipeline/ipu3/cio2.cpp b/src/libcamera/pipeline/ipu3/cio2.cpp
index 08e254f7..81a7a8ab 100644
--- a/src/libcamera/pipeline/ipu3/cio2.cpp
+++ b/src/libcamera/pipeline/ipu3/cio2.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * cio2.cpp - Intel IPU3 CIO2
+ * Intel IPU3 CIO2
*/
#include "cio2.h"
@@ -15,6 +15,7 @@
#include <libcamera/formats.h>
#include <libcamera/geometry.h>
#include <libcamera/stream.h>
+#include <libcamera/transform.h>
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/framebuffer.h"
@@ -177,10 +178,12 @@ int CIO2Device::init(const MediaDevice *media, unsigned int index)
/**
* \brief Configure the CIO2 unit
* \param[in] size The requested CIO2 output frame size
+ * \param[in] transform The transformation to be applied on the image sensor
* \param[out] outputFormat The CIO2 unit output image format
* \return 0 on success or a negative error code otherwise
*/
-int CIO2Device::configure(const Size &size, V4L2DeviceFormat *outputFormat)
+int CIO2Device::configure(const Size &size, const Transform &transform,
+ V4L2DeviceFormat *outputFormat)
{
V4L2SubdeviceFormat sensorFormat;
int ret;
@@ -191,7 +194,7 @@ int CIO2Device::configure(const Size &size, V4L2DeviceFormat *outputFormat)
*/
std::vector<unsigned int> mbusCodes = utils::map_keys(mbusCodesToPixelFormat);
sensorFormat = getSensorFormat(mbusCodes, size);
- ret = sensor_->setFormat(&sensorFormat);
+ ret = sensor_->setFormat(&sensorFormat, transform);
if (ret)
return ret;
@@ -199,11 +202,11 @@ int CIO2Device::configure(const Size &size, V4L2DeviceFormat *outputFormat)
if (ret)
return ret;
- const auto &itInfo = mbusCodesToPixelFormat.find(sensorFormat.mbus_code);
+ const auto &itInfo = mbusCodesToPixelFormat.find(sensorFormat.code);
if (itInfo == mbusCodesToPixelFormat.end())
return -EINVAL;
- outputFormat->fourcc = V4L2PixelFormat::fromPixelFormat(itInfo->second);
+ outputFormat->fourcc = output_->toV4L2PixelFormat(itInfo->second);
outputFormat->size = sensorFormat.size;
outputFormat->planesCount = 1;
@@ -227,13 +230,13 @@ StreamConfiguration CIO2Device::generateConfiguration(Size size) const
/* Query the sensor static information for closest match. */
std::vector<unsigned int> mbusCodes = utils::map_keys(mbusCodesToPixelFormat);
V4L2SubdeviceFormat sensorFormat = getSensorFormat(mbusCodes, size);
- if (!sensorFormat.mbus_code) {
+ if (!sensorFormat.code) {
LOG(IPU3, Error) << "Sensor does not support mbus code";
return {};
}
cfg.size = sensorFormat.size;
- cfg.pixelFormat = mbusCodesToPixelFormat.at(sensorFormat.mbus_code);
+ cfg.pixelFormat = mbusCodesToPixelFormat.at(sensorFormat.code);
cfg.bufferCount = kBufferCount;
return cfg;
@@ -323,7 +326,7 @@ V4L2SubdeviceFormat CIO2Device::getSensorFormat(const std::vector<unsigned int>
}
V4L2SubdeviceFormat format{};
- format.mbus_code = bestCode;
+ format.code = bestCode;
format.size = bestSize;
return format;
diff --git a/src/libcamera/pipeline/ipu3/cio2.h b/src/libcamera/pipeline/ipu3/cio2.h
index 68504a2d..963c2f6b 100644
--- a/src/libcamera/pipeline/ipu3/cio2.h
+++ b/src/libcamera/pipeline/ipu3/cio2.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * cio2.h - Intel IPU3 CIO2
+ * Intel IPU3 CIO2
*/
#pragma once
@@ -26,6 +26,7 @@ class Request;
class Size;
class SizeRange;
struct StreamConfiguration;
+enum class Transform;
class CIO2Device
{
@@ -38,7 +39,8 @@ public:
std::vector<SizeRange> sizes(const PixelFormat &format) const;
int init(const MediaDevice *media, unsigned int index);
- int configure(const Size &size, V4L2DeviceFormat *outputFormat);
+ int configure(const Size &size, const Transform &transform,
+ V4L2DeviceFormat *outputFormat);
StreamConfiguration generateConfiguration(Size size) const;
diff --git a/src/libcamera/pipeline/ipu3/frames.cpp b/src/libcamera/pipeline/ipu3/frames.cpp
index a4c3477c..88eb9d05 100644
--- a/src/libcamera/pipeline/ipu3/frames.cpp
+++ b/src/libcamera/pipeline/ipu3/frames.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * frames.cpp - Intel IPU3 Frames helper
+ * Intel IPU3 Frames helper
*/
#include "frames.h"
diff --git a/src/libcamera/pipeline/ipu3/frames.h b/src/libcamera/pipeline/ipu3/frames.h
index 6e3cb915..a347b66f 100644
--- a/src/libcamera/pipeline/ipu3/frames.h
+++ b/src/libcamera/pipeline/ipu3/frames.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * frames.h - Intel IPU3 Frames helper
+ * Intel IPU3 Frames helper
*/
#pragma once
diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp b/src/libcamera/pipeline/ipu3/imgu.cpp
index 59305f85..7be78091 100644
--- a/src/libcamera/pipeline/ipu3/imgu.cpp
+++ b/src/libcamera/pipeline/ipu3/imgu.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * imgu.cpp - Intel IPU3 ImgU
+ * Intel IPU3 ImgU
*/
#include "imgu.h"
@@ -504,7 +504,7 @@ int ImgUDevice::configure(const PipeConfig &pipeConfig, V4L2DeviceFormat *inputF
LOG(IPU3, Debug) << "ImgU BDS rectangle = " << bds;
V4L2SubdeviceFormat gdcFormat = {};
- gdcFormat.mbus_code = MEDIA_BUS_FMT_FIXED;
+ gdcFormat.code = MEDIA_BUS_FMT_FIXED;
gdcFormat.size = pipeConfig.gdc;
ret = imgu_->setFormat(PAD_INPUT, &gdcFormat);
@@ -543,7 +543,7 @@ int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
V4L2DeviceFormat *outputFormat)
{
V4L2SubdeviceFormat imguFormat = {};
- imguFormat.mbus_code = MEDIA_BUS_FMT_FIXED;
+ imguFormat.code = MEDIA_BUS_FMT_FIXED;
imguFormat.size = cfg.size;
int ret = imgu_->setFormat(pad, &imguFormat);
@@ -558,7 +558,7 @@ int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
return 0;
*outputFormat = {};
- outputFormat->fourcc = V4L2PixelFormat::fromPixelFormat(formats::NV12);
+ outputFormat->fourcc = dev->toV4L2PixelFormat(formats::NV12);
outputFormat->size = cfg.size;
outputFormat->planesCount = 2;
diff --git a/src/libcamera/pipeline/ipu3/imgu.h b/src/libcamera/pipeline/ipu3/imgu.h
index 0af4dd8a..fa508316 100644
--- a/src/libcamera/pipeline/ipu3/imgu.h
+++ b/src/libcamera/pipeline/ipu3/imgu.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * imgu.h - Intel IPU3 ImgU
+ * Intel IPU3 ImgU
*/
#pragma once
diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
index b7dda282..066fd4a2 100644
--- a/src/libcamera/pipeline/ipu3/ipu3.cpp
+++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipu3.cpp - Pipeline handler for Intel IPU3
+ * Pipeline handler for Intel IPU3
*/
#include <algorithm>
@@ -11,6 +11,8 @@
#include <queue>
#include <vector>
+#include <linux/intel-ipu3.h>
+
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
@@ -49,7 +51,7 @@ class IPU3CameraData : public Camera::Private
{
public:
IPU3CameraData(PipelineHandler *pipe)
- : Camera::Private(pipe), supportsFlips_(false)
+ : Camera::Private(pipe)
{
}
@@ -71,8 +73,6 @@ public:
Stream rawStream_;
Rectangle cropRegion_;
- bool supportsFlips_;
- Transform rotationTransform_;
std::unique_ptr<DelayedControls> delayedCtrls_;
IPU3Frames frameInfos_;
@@ -134,8 +134,8 @@ public:
PipelineHandlerIPU3(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -182,48 +182,15 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
if (config_.empty())
return Invalid;
- Transform combined = transform * data_->rotationTransform_;
-
- /*
- * We combine the platform and user transform, but must "adjust away"
- * any combined result that includes a transposition, as we can't do
- * those. In this case, flipping only the transpose bit is helpful to
- * applications - they either get the transform they requested, or have
- * to do a simple transpose themselves (they don't have to worry about
- * the other possible cases).
- */
- if (!!(combined & Transform::Transpose)) {
- /*
- * Flipping the transpose bit in "transform" flips it in the
- * combined result too (as it's the last thing that happens),
- * which is of course clearing it.
- */
- transform ^= Transform::Transpose;
- combined &= ~Transform::Transpose;
- status = Adjusted;
- }
-
/*
- * We also check if the sensor doesn't do h/vflips at all, in which
- * case we clear them, and the application will have to do everything.
+ * Validate the requested transform against the sensor capabilities and
+ * rotation and store the final combined transform that configure() will
+ * need to apply to the sensor to save us working it out again.
*/
- if (!data_->supportsFlips_ && !!combined) {
- /*
- * If the sensor can do no transforms, then combined must be
- * changed to the identity. The only user transform that gives
- * rise to this is the inverse of the rotation. (Recall that
- * combined = transform * rotationTransform.)
- */
- transform = -data_->rotationTransform_;
- combined = Transform::Identity;
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->cio2_.sensor()->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
status = Adjusted;
- }
-
- /*
- * Store the final combined transform that configure() will need to
- * apply to the sensor to save us working it out again.
- */
- combinedTransform_ = combined;
/* Cap the number of entries to the available streams. */
if (config_.size() > kMaxStreams) {
@@ -243,6 +210,7 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
*/
unsigned int rawCount = 0;
unsigned int yuvCount = 0;
+ Size rawRequirement;
Size maxYuvSize;
Size rawSize;
@@ -251,10 +219,11 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
rawCount++;
- rawSize.expandTo(cfg.size);
+ rawSize = std::max(rawSize, cfg.size);
} else {
yuvCount++;
- maxYuvSize.expandTo(cfg.size);
+ maxYuvSize = std::max(maxYuvSize, cfg.size);
+ rawRequirement.expandTo(cfg.size);
}
}
@@ -283,17 +252,17 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
* The output YUV streams will be limited in size to the maximum frame
* size requested for the RAW stream, if present.
*
- * If no raw stream is requested generate a size as large as the maximum
- * requested YUV size aligned to the ImgU constraints and bound by the
- * sensor's maximum resolution. See
+ * If no raw stream is requested, generate a size from the largest YUV
+ * stream, aligned to the ImgU constraints and bound
+ * by the sensor's maximum resolution. See
* https://bugs.libcamera.org/show_bug.cgi?id=32
*/
if (rawSize.isNull())
- rawSize = maxYuvSize.expandedTo({ ImgUDevice::kIFMaxCropWidth,
- ImgUDevice::kIFMaxCropHeight })
- .grownBy({ ImgUDevice::kOutputMarginWidth,
- ImgUDevice::kOutputMarginHeight })
- .boundedTo(data_->cio2_.sensor()->resolution());
+ rawSize = rawRequirement.expandedTo({ ImgUDevice::kIFMaxCropWidth,
+ ImgUDevice::kIFMaxCropHeight })
+ .grownBy({ ImgUDevice::kOutputMarginWidth,
+ ImgUDevice::kOutputMarginHeight })
+ .boundedTo(data_->cio2_.sensor()->resolution());
cio2Configuration_ = data_->cio2_.generateConfiguration(rawSize);
if (!cio2Configuration_.pixelFormat.isValid())
@@ -420,11 +389,12 @@ PipelineHandlerIPU3::PipelineHandlerIPU3(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerIPU3::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
{
IPU3CameraData *data = cameraData(camera);
- IPU3CameraConfiguration *config = new IPU3CameraConfiguration(data);
+ std::unique_ptr<IPU3CameraConfiguration> config =
+ std::make_unique<IPU3CameraConfiguration>(data);
if (roles.empty())
return config;
@@ -490,7 +460,6 @@ CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
default:
LOG(IPU3, Error)
<< "Requested stream role not supported: " << role;
- delete config;
return nullptr;
}
@@ -552,7 +521,7 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
return ret;
/*
- * \todo: Enable links selectively based on the requested streams.
+ * \todo Enable links selectively based on the requested streams.
* As of now, enable all links unconditionally.
* \todo Don't configure the ImgU at all if we only have a single
* stream which is for raw capture, in which case no buffers will
@@ -568,7 +537,7 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
*/
const Size &sensorSize = config->cio2Format().size;
V4L2DeviceFormat cio2Format;
- ret = cio2->configure(sensorSize, &cio2Format);
+ ret = cio2->configure(sensorSize, config->combinedTransform_, &cio2Format);
if (ret)
return ret;
@@ -577,24 +546,6 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
data->cropRegion_ = sensorInfo.analogCrop;
/*
- * Configure the H/V flip controls based on the combination of
- * the sensor and user transform.
- */
- if (data->supportsFlips_) {
- ControlList sensorCtrls(cio2->sensor()->controls());
- sensorCtrls.set(V4L2_CID_HFLIP,
- static_cast<int32_t>(!!(config->combinedTransform_
- & Transform::HFlip)));
- sensorCtrls.set(V4L2_CID_VFLIP,
- static_cast<int32_t>(!!(config->combinedTransform_
- & Transform::VFlip)));
-
- ret = cio2->sensor()->setControls(&sensorCtrls);
- if (ret)
- return ret;
- }
-
- /*
* If the ImgU gets configured, its driver seems to expect that
* buffers will be queued to its outputs, as otherwise the next
* capture session that uses the ImgU fails when queueing
@@ -1143,25 +1094,12 @@ int PipelineHandlerIPU3::registerCameras()
&IPU3CameraData::frameStart);
/* Convert the sensor rotation to a transformation */
- int32_t rotation = 0;
- if (data->properties_.contains(properties::Rotation))
- rotation = data->properties_.get(properties::Rotation);
- else
+ const auto &rotation = data->properties_.get(properties::Rotation);
+ if (!rotation)
LOG(IPU3, Warning) << "Rotation control not exposed by "
<< cio2->sensor()->id()
<< ". Assume rotation 0";
- bool success;
- data->rotationTransform_ = transformFromRotation(rotation, &success);
- if (!success)
- LOG(IPU3, Warning) << "Invalid rotation of " << rotation
- << " degrees: ignoring";
-
- ControlList ctrls = cio2->sensor()->getControls({ V4L2_CID_HFLIP });
- if (!ctrls.empty())
- /* We assume the sensor supports VFLIP too. */
- data->supportsFlips_ = true;
-
/**
* \todo Dynamically assign ImgU and output devices to each
* stream and camera; as of now, limit support to two cameras
@@ -1244,8 +1182,16 @@ int IPU3CameraData::loadIPA()
if (ret)
return ret;
- ret = ipa_->init(IPASettings{ "", sensor->model() }, sensorInfo,
- sensor->controls(), &ipaControls_);
+ /*
+ * The API tuning file is made from the sensor name. If the tuning file
+ * isn't found, fall back to the 'uncalibrated' file.
+ */
+ std::string ipaTuningFile = ipa_->configurationFile(sensor->model() + ".yaml");
+ if (ipaTuningFile.empty())
+ ipaTuningFile = ipa_->configurationFile("uncalibrated.yaml");
+
+ ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() },
+ sensorInfo, sensor->controls(), &ipaControls_);
if (ret) {
LOG(IPU3, Error) << "Failed to initialise the IPU3 IPA";
return ret;
@@ -1289,6 +1235,8 @@ void IPU3CameraData::paramsBufferReady(unsigned int id)
imgu_->viewfinder_->queueBuffer(outbuffer);
}
+ info->paramBuffer->_d()->metadata().planes()[0].bytesused =
+ sizeof(struct ipu3_uapi_params);
imgu_->param_->queueBuffer(info->paramBuffer);
imgu_->stat_->queueBuffer(info->statBuffer);
imgu_->input_->queueBuffer(info->rawBuffer);
@@ -1330,8 +1278,9 @@ void IPU3CameraData::imguOutputBufferReady(FrameBuffer *buffer)
request->metadata().set(controls::draft::PipelineDepth, 3);
/* \todo Actually apply the scaler crop region to the ImgU. */
- if (request->controls().contains(controls::ScalerCrop))
- cropRegion_ = request->controls().get(controls::ScalerCrop);
+ const auto &scalerCrop = request->controls().get(controls::ScalerCrop);
+ if (scalerCrop)
+ cropRegion_ = *scalerCrop;
request->metadata().set(controls::ScalerCrop, cropRegion_);
if (frameInfos_.tryComplete(info))
@@ -1424,7 +1373,7 @@ void IPU3CameraData::statBufferReady(FrameBuffer *buffer)
return;
}
- ipa_->processStatsBuffer(info->id, request->metadata().get(controls::SensorTimestamp),
+ ipa_->processStatsBuffer(info->id, request->metadata().get(controls::SensorTimestamp).value_or(0),
info->statBuffer->cookie(), info->effectiveSensorControls);
}
@@ -1455,14 +1404,12 @@ void IPU3CameraData::frameStart(uint32_t sequence)
Request *request = processingRequests_.front();
processingRequests_.pop();
- if (!request->controls().contains(controls::draft::TestPatternMode))
+ const auto &testPatternMode = request->controls().get(controls::draft::TestPatternMode);
+ if (!testPatternMode)
return;
- const int32_t testPatternMode = request->controls().get(
- controls::draft::TestPatternMode);
-
int ret = cio2_.sensor()->setTestPatternMode(
- static_cast<controls::draft::TestPatternModeEnum>(testPatternMode));
+ static_cast<controls::draft::TestPatternModeEnum>(*testPatternMode));
if (ret) {
LOG(IPU3, Error) << "Failed to set test pattern mode: "
<< ret;
@@ -1470,9 +1417,9 @@ void IPU3CameraData::frameStart(uint32_t sequence)
}
request->metadata().set(controls::draft::TestPatternMode,
- testPatternMode);
+ *testPatternMode);
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerIPU3)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerIPU3, "ipu3")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/mali-c55/mali-c55.cpp b/src/libcamera/pipeline/mali-c55/mali-c55.cpp
new file mode 100644
index 00000000..45c71c1d
--- /dev/null
+++ b/src/libcamera/pipeline/mali-c55/mali-c55.cpp
@@ -0,0 +1,1066 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Pipeline Handler for ARM's Mali-C55 ISP
+ */
+
+#include <algorithm>
+#include <array>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+
+#include <linux/media-bus-format.h>
+#include <linux/media.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace {
+
+bool isFormatRaw(const libcamera::PixelFormat &pixFmt)
+{
+ return libcamera::PixelFormatInfo::info(pixFmt).colourEncoding ==
+ libcamera::PixelFormatInfo::ColourEncodingRAW;
+}
+
+} /* namespace */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(MaliC55)
+
+const std::map<libcamera::PixelFormat, unsigned int> maliC55FmtToCode = {
+ /* \todo Support all formats supported by the driver in libcamera. */
+
+ { formats::RGB565, MEDIA_BUS_FMT_RGB121212_1X36 },
+ { formats::RGB888, MEDIA_BUS_FMT_RGB121212_1X36 },
+ { formats::YUYV, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::UYVY, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::R8, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::NV12, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::NV21, MEDIA_BUS_FMT_YUV10_1X30 },
+
+ /* RAW formats, FR pipe only. */
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SGBRG14, MEDIA_BUS_FMT_SGBRG14_1X14 },
+ { formats::SRGGB14, MEDIA_BUS_FMT_SRGGB14_1X14 },
+ { formats::SBGGR14, MEDIA_BUS_FMT_SBGGR14_1X14 },
+ { formats::SGRBG14, MEDIA_BUS_FMT_SGRBG14_1X14 },
+ { formats::SGBRG16, MEDIA_BUS_FMT_SGBRG16_1X16 },
+ { formats::SRGGB16, MEDIA_BUS_FMT_SRGGB16_1X16 },
+ { formats::SBGGR16, MEDIA_BUS_FMT_SBGGR16_1X16 },
+ { formats::SGRBG16, MEDIA_BUS_FMT_SGRBG16_1X16 },
+};
+
+constexpr Size kMaliC55MinSize = { 128, 128 };
+constexpr Size kMaliC55MaxSize = { 8192, 8192 };
+constexpr unsigned int kMaliC55ISPInternalFormat = MEDIA_BUS_FMT_RGB121212_1X36;
+
+class MaliC55CameraData : public Camera::Private
+{
+public:
+ MaliC55CameraData(PipelineHandler *pipe, MediaEntity *entity)
+ : Camera::Private(pipe), entity_(entity)
+ {
+ }
+
+ int init();
+
+ /* Deflect these functionalities to either TPG or CameraSensor. */
+ const std::vector<unsigned int> mbusCodes() const;
+ const std::vector<Size> sizes(unsigned int mbusCode) const;
+ const Size resolution() const;
+
+ PixelFormat bestRawFormat() const;
+
+ PixelFormat adjustRawFormat(const PixelFormat &pixFmt) const;
+ Size adjustRawSizes(const PixelFormat &pixFmt, const Size &rawSize) const;
+
+ std::unique_ptr<CameraSensor> sensor_;
+
+ MediaEntity *entity_;
+ std::unique_ptr<V4L2Subdevice> csi_;
+ std::unique_ptr<V4L2Subdevice> sd_;
+ Stream frStream_;
+ Stream dsStream_;
+
+private:
+ void initTPGData();
+
+ std::string id_;
+ std::vector<unsigned int> tpgCodes_;
+ std::vector<Size> tpgSizes_;
+ Size tpgResolution_;
+};
+
+int MaliC55CameraData::init()
+{
+ int ret;
+
+ sd_ = std::make_unique<V4L2Subdevice>(entity_);
+ ret = sd_->open();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to open sensor subdevice";
+ return ret;
+ }
+
+ /* If this camera is created from TPG, we return here. */
+ if (entity_->name() == "mali-c55 tpg") {
+ initTPGData();
+ return 0;
+ }
+
+ /*
+ * Register a CameraSensor if we connect to a sensor and create
+ * an entity for the connected CSI-2 receiver.
+ */
+ sensor_ = std::make_unique<CameraSensor>(entity_);
+ ret = sensor_->init();
+ if (ret)
+ return ret;
+
+ const MediaPad *sourcePad = entity_->getPadByIndex(0);
+ MediaEntity *csiEntity = sourcePad->links()[0]->sink()->entity();
+
+ csi_ = std::make_unique<V4L2Subdevice>(csiEntity);
+ if (csi_->open()) {
+ LOG(MaliC55, Error) << "Failed to open CSI-2 subdevice";
+ return false;
+ }
+
+ return 0;
+}
+
+void MaliC55CameraData::initTPGData()
+{
+ /* Replicate the CameraSensor implementation for TPG. */
+ V4L2Subdevice::Formats formats = sd_->formats(0);
+ if (formats.empty())
+ return;
+
+ tpgCodes_ = utils::map_keys(formats);
+ std::sort(tpgCodes_.begin(), tpgCodes_.end());
+
+ for (const auto &format : formats) {
+ const std::vector<SizeRange> &ranges = format.second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(tpgSizes_),
+ [](const SizeRange &range) { return range.max; });
+ }
+
+ tpgResolution_ = tpgSizes_.back();
+}
+
+const std::vector<unsigned int> MaliC55CameraData::mbusCodes() const
+{
+ if (sensor_)
+ return sensor_->mbusCodes();
+
+ return tpgCodes_;
+}
+
+const std::vector<Size> MaliC55CameraData::sizes(unsigned int mbusCode) const
+{
+ if (sensor_)
+ return sensor_->sizes(mbusCode);
+
+ V4L2Subdevice::Formats formats = sd_->formats(0);
+ if (formats.empty())
+ return {};
+
+ std::vector<Size> sizes;
+ const auto &format = formats.find(mbusCode);
+ if (format == formats.end())
+ return {};
+
+ const std::vector<SizeRange> &ranges = format->second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+
+ std::sort(sizes.begin(), sizes.end());
+
+ return sizes;
+}
+
+const Size MaliC55CameraData::resolution() const
+{
+ if (sensor_)
+ return sensor_->resolution();
+
+ return tpgResolution_;
+}
+
+PixelFormat MaliC55CameraData::bestRawFormat() const
+{
+ unsigned int bitDepth = 0;
+ PixelFormat rawFormat;
+
+ /*
+ * Iterate over all the supported PixelFormat and find the one
+ * supported by the camera with the largest bitdepth.
+ */
+ for (const auto &maliFormat : maliC55FmtToCode) {
+ PixelFormat pixFmt = maliFormat.first;
+ if (!isFormatRaw(pixFmt))
+ continue;
+
+ unsigned int rawCode = maliFormat.second;
+ const auto rawSizes = sizes(rawCode);
+ if (rawSizes.empty())
+ continue;
+
+ BayerFormat bayer = BayerFormat::fromMbusCode(rawCode);
+ if (bayer.bitDepth > bitDepth) {
+ bitDepth = bayer.bitDepth;
+ rawFormat = pixFmt;
+ }
+ }
+
+ return rawFormat;
+}
+
+/*
+ * Make sure the provided raw pixel format is supported and adjust it to
+ * one of the supported ones if it's not.
+ */
+PixelFormat MaliC55CameraData::adjustRawFormat(const PixelFormat &rawFmt) const
+{
+ /* Make sure the provided raw format is supported by the pipeline. */
+ auto it = maliC55FmtToCode.find(rawFmt);
+ if (it == maliC55FmtToCode.end())
+ return bestRawFormat();
+
+ /* Now make sure the RAW mbus code is supported by the image source. */
+ unsigned int rawCode = it->second;
+ const auto rawSizes = sizes(rawCode);
+ if (rawSizes.empty())
+ return bestRawFormat();
+
+ return rawFmt;
+}
+
+Size MaliC55CameraData::adjustRawSizes(const PixelFormat &rawFmt, const Size &rawSize) const
+{
+ /* Just make sure the format is supported. */
+ auto it = maliC55FmtToCode.find(rawFmt);
+ if (it == maliC55FmtToCode.end())
+ return {};
+
+ /* Check if the size is natively supported. */
+ unsigned int rawCode = it->second;
+ const auto rawSizes = sizes(rawCode);
+ auto sizeIt = std::find(rawSizes.begin(), rawSizes.end(), rawSize);
+ if (sizeIt != rawSizes.end())
+ return rawSize;
+
+ /* Or adjust it to the closest supported size. */
+ uint16_t distance = std::numeric_limits<uint16_t>::max();
+ Size bestSize;
+ for (const Size &size : rawSizes) {
+ uint16_t dist = std::abs(static_cast<int>(rawSize.width) -
+ static_cast<int>(size.width)) +
+ std::abs(static_cast<int>(rawSize.height) -
+ static_cast<int>(size.height));
+ if (dist < distance) {
+ dist = distance;
+ bestSize = size;
+ }
+ }
+
+ return bestSize;
+}
+
+class MaliC55CameraConfiguration : public CameraConfiguration
+{
+public:
+ MaliC55CameraConfiguration(MaliC55CameraData *data)
+ : CameraConfiguration(), data_(data)
+ {
+ }
+
+ Status validate() override;
+
+ V4L2SubdeviceFormat sensorFormat_;
+
+private:
+ static constexpr unsigned int kMaxStreams = 2;
+
+ const MaliC55CameraData *data_;
+};
+
+CameraConfiguration::Status MaliC55CameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ /* Only 2 streams available. */
+ if (config_.size() > kMaxStreams) {
+ config_.resize(kMaxStreams);
+ status = Adjusted;
+ }
+
+ bool frPipeAvailable = true;
+ StreamConfiguration *rawConfig = nullptr;
+ for (StreamConfiguration &config : config_) {
+ if (!isFormatRaw(config.pixelFormat))
+ continue;
+
+ if (rawConfig) {
+ LOG(MaliC55, Error)
+ << "Only a single RAW stream is supported";
+ return Invalid;
+ }
+
+ rawConfig = &config;
+ }
+
+ Size maxSize = kMaliC55MaxSize;
+ if (rawConfig) {
+ /*
+ * \todo Take into account the Bayer components ordering once
+ * we support rotations.
+ */
+ PixelFormat rawFormat =
+ data_->adjustRawFormat(rawConfig->pixelFormat);
+ if (rawFormat != rawConfig->pixelFormat) {
+ LOG(MaliC55, Debug)
+ << "RAW format adjusted to " << rawFormat;
+ rawConfig->pixelFormat = rawFormat;
+ status = Adjusted;
+ }
+
+ Size rawSize =
+ data_->adjustRawSizes(rawFormat, rawConfig->size);
+ if (rawSize != rawConfig->size) {
+ LOG(MaliC55, Debug)
+ << "RAW sizes adjusted to " << rawSize;
+ rawConfig->size = rawSize;
+ status = Adjusted;
+ }
+
+ maxSize = rawSize;
+
+ rawConfig->setStream(const_cast<Stream *>(&data_->frStream_));
+ frPipeAvailable = false;
+ }
+
+ /* Adjust processed streams. */
+ Size maxYuvSize;
+ for (StreamConfiguration &config : config_) {
+ if (isFormatRaw(config.pixelFormat))
+ continue;
+
+ /* Adjust format and size for processed streams. */
+ const auto it = maliC55FmtToCode.find(config.pixelFormat);
+ if (it == maliC55FmtToCode.end()) {
+ LOG(MaliC55, Debug)
+ << "Format adjusted to " << formats::RGB565;
+ config.pixelFormat = formats::RGB565;
+ status = Adjusted;
+ }
+
+ Size size = std::clamp(config.size, kMaliC55MinSize, maxSize);
+ if (size != config.size) {
+ LOG(MaliC55, Debug)
+ << "Size adjusted to " << size;
+ config.size = size;
+ status = Adjusted;
+ }
+
+ if (maxYuvSize < size)
+ maxYuvSize = size;
+
+ if (frPipeAvailable) {
+ config.setStream(const_cast<Stream *>(&data_->frStream_));
+ frPipeAvailable = false;
+ } else {
+ config.setStream(const_cast<Stream *>(&data_->dsStream_));
+ }
+ }
+
+ /* Compute the sensor format. */
+
+ /* If there's a RAW config, sensor configuration follows it. */
+ if (rawConfig) {
+ const auto it = maliC55FmtToCode.find(rawConfig->pixelFormat);
+ sensorFormat_.code = it->second;
+ sensorFormat_.size = rawConfig->size;
+
+ return status;
+ }
+
+ /* If there's no RAW config, compute the sensor configuration here. */
+ PixelFormat rawFormat = data_->bestRawFormat();
+ const auto it = maliC55FmtToCode.find(rawFormat);
+ sensorFormat_.code = it->second;
+
+ uint16_t distance = std::numeric_limits<uint16_t>::max();
+ const auto sizes = data_->sizes(it->second);
+ Size bestSize;
+ for (const auto &size : sizes) {
+ /* Skip sensor sizes that are smaller than the max YUV size. */
+ if (maxYuvSize.width > size.width ||
+ maxYuvSize.height > size.height)
+ continue;
+
+ uint16_t dist = std::abs(static_cast<int>(maxYuvSize.width) -
+ static_cast<int>(size.width)) +
+ std::abs(static_cast<int>(maxYuvSize.height) -
+ static_cast<int>(size.height));
+ if (dist < distance) {
+ dist = distance;
+ bestSize = size;
+ }
+ }
+ sensorFormat_.size = bestSize;
+
+ LOG(MaliC55, Debug) << "Computed sensor configuration " << sensorFormat_;
+
+ return status;
+}
+
+class PipelineHandlerMaliC55 : public PipelineHandler
+{
+public:
+ PipelineHandlerMaliC55(CameraManager *manager);
+
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+ void bufferReady(FrameBuffer *buffer);
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ struct MaliC55Pipe {
+ std::unique_ptr<V4L2Subdevice> resizer;
+ std::unique_ptr<V4L2VideoDevice> cap;
+ Stream *stream;
+ };
+
+ enum {
+ MaliC55FR,
+ MaliC55DS,
+ MaliC55NumPipes,
+ };
+
+ MaliC55CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<MaliC55CameraData *>(camera->_d());
+ }
+
+ MaliC55Pipe *pipeFromStream(MaliC55CameraData *data, Stream *stream)
+ {
+ if (stream == &data->frStream_)
+ return &pipes_[MaliC55FR];
+ else if (stream == &data->dsStream_)
+ return &pipes_[MaliC55DS];
+ else
+ LOG(MaliC55, Fatal) << "Stream " << stream << " not valid";
+ return nullptr;
+ }
+
+ MaliC55Pipe *pipeFromStream(MaliC55CameraData *data, const Stream *stream)
+ {
+ return pipeFromStream(data, const_cast<Stream *>(stream));
+ }
+
+ void resetPipes()
+ {
+ for (MaliC55Pipe &pipe : pipes_)
+ pipe.stream = nullptr;
+ }
+
+ int configureRawStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat);
+ int configureProcessedStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat);
+
+ void registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
+ const std::string &name);
+ bool registerTPGCamera(MediaLink *link);
+ bool registerSensorCamera(MediaLink *link);
+
+ MediaDevice *media_;
+ std::unique_ptr<V4L2Subdevice> isp_;
+
+ std::array<MaliC55Pipe, MaliC55NumPipes> pipes_;
+
+ bool dsFitted_;
+};
+
+PipelineHandlerMaliC55::PipelineHandlerMaliC55(CameraManager *manager)
+ : PipelineHandler(manager), dsFitted_(true)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerMaliC55::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
+{
+ MaliC55CameraData *data = cameraData(camera);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<MaliC55CameraConfiguration>(data);
+ bool frPipeAvailable = true;
+
+ if (roles.empty())
+ return config;
+
+ /* Check if one stream is RAW to reserve the FR pipe for it. */
+ if (std::find(roles.begin(), roles.end(), StreamRole::Raw) != roles.end())
+ frPipeAvailable = false;
+
+ for (const StreamRole &role : roles) {
+ struct MaliC55Pipe *pipe;
+
+ /* Assign pipe for this role. */
+ if (role == StreamRole::Raw) {
+ pipe = &pipes_[MaliC55FR];
+ } else {
+ if (frPipeAvailable) {
+ pipe = &pipes_[MaliC55FR];
+ frPipeAvailable = false;
+ } else {
+ pipe = &pipes_[MaliC55DS];
+ }
+ }
+
+ Size size = std::min(Size{ 1920, 1080 }, data->resolution());
+ PixelFormat pixelFormat;
+
+ switch (role) {
+ case StreamRole::StillCapture:
+ size = data->resolution();
+ [[fallthrough]];
+ case StreamRole::VideoRecording:
+ pixelFormat = formats::NV12;
+ break;
+
+ case StreamRole::Viewfinder:
+ pixelFormat = formats::RGB565;
+ break;
+
+ case StreamRole::Raw:
+ pixelFormat = data->bestRawFormat();
+ if (!pixelFormat.isValid()) {
+ LOG(MaliC55, Error)
+ << "Camera does not support RAW formats";
+ return nullptr;
+ }
+
+ size = data->resolution();
+ break;
+
+ default:
+ LOG(MaliC55, Error)
+ << "Requested stream role not supported: " << role;
+ return nullptr;
+ }
+
+ std::map<PixelFormat, std::vector<SizeRange>> formats;
+ for (const auto &maliFormat : maliC55FmtToCode) {
+ PixelFormat pixFmt = maliFormat.first;
+ bool isRaw = isFormatRaw(pixFmt);
+
+ /* RAW formats are only supported on the FR pipe. */
+ if (pipe != &pipes_[MaliC55FR] && isRaw)
+ continue;
+
+ if (isRaw) {
+ /* Make sure the mbus code is supported. */
+ unsigned int rawCode = maliFormat.second;
+ const auto sizes = data->sizes(rawCode);
+ if (sizes.empty())
+ continue;
+
+ /* And list all sizes the sensor can produce. */
+ std::vector<SizeRange> sizeRanges;
+ std::transform(sizes.begin(), sizes.end(),
+ std::back_inserter(sizeRanges),
+ [](const Size &s) {
+ return SizeRange(s);
+ });
+
+ formats[pixFmt] = sizeRanges;
+ } else {
+ /* Processed formats are always available. */
+ Size maxSize = std::min(kMaliC55MaxSize,
+ data->resolution());
+ formats[pixFmt] = { kMaliC55MinSize, maxSize };
+ }
+ }
+
+ StreamFormats streamFormats(formats);
+ StreamConfiguration cfg(streamFormats);
+ cfg.pixelFormat = pixelFormat;
+ cfg.bufferCount = 4;
+ cfg.size = size;
+
+ config->addConfiguration(cfg);
+ }
+
+ if (config->validate() == CameraConfiguration::Invalid)
+ return nullptr;
+
+ return config;
+}
+
+int PipelineHandlerMaliC55::configureRawStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat)
+{
+ Stream *stream = config.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ if (pipe != &pipes_[MaliC55FR]) {
+ LOG(MaliC55, Fatal) << "Only the FR pipe supports RAW capture.";
+ return -EINVAL;
+ }
+
+ /* Enable the debayer route to set fixed internal format on pad #0. */
+ V4L2Subdevice::Routing routing = {};
+ routing.emplace_back(V4L2Subdevice::Stream{ 0, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+
+ int ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ unsigned int rawCode = subdevFormat.code;
+ subdevFormat.code = kMaliC55ISPInternalFormat;
+ ret = pipe->resizer->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ /* Enable the bypass route and apply RAW formats there. */
+ routing.clear();
+ routing.emplace_back(V4L2Subdevice::Stream{ 2, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+ ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ subdevFormat.code = rawCode;
+ ret = pipe->resizer->setFormat(2, &subdevFormat);
+ if (ret)
+ return ret;
+
+ ret = pipe->resizer->setFormat(1, &subdevFormat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int PipelineHandlerMaliC55::configureProcessedStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat)
+{
+ Stream *stream = config.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ /* Enable the debayer route on the resizer pipe. */
+ V4L2Subdevice::Routing routing = {};
+ routing.emplace_back(V4L2Subdevice::Stream{ 0, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+
+ int ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ subdevFormat.code = kMaliC55ISPInternalFormat;
+ ret = pipe->resizer->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ /* \todo Configure the resizer crop/compose rectangles. */
+ Rectangle ispCrop = { 0, 0, config.size };
+ ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_CROP, &ispCrop);
+ if (ret)
+ return ret;
+
+ ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_COMPOSE, &ispCrop);
+ if (ret)
+ return ret;
+
+ subdevFormat.code = maliC55FmtToCode.find(config.pixelFormat)->second;
+ return pipe->resizer->setFormat(1, &subdevFormat);
+}
+
+int PipelineHandlerMaliC55::configure(Camera *camera,
+ CameraConfiguration *config)
+{
+ resetPipes();
+
+ int ret = media_->disableLinks();
+ if (ret)
+ return ret;
+
+ /* Link the graph depending if we are operating the TPG or a sensor. */
+ MaliC55CameraData *data = cameraData(camera);
+ if (data->csi_) {
+ const MediaEntity *csiEntity = data->csi_->entity();
+ ret = csiEntity->getPadByIndex(1)->links()[0]->setEnabled(true);
+ } else {
+ ret = data->entity_->getPadByIndex(0)->links()[0]->setEnabled(true);
+ }
+ if (ret)
+ return ret;
+
+ MaliC55CameraConfiguration *maliConfig =
+ static_cast<MaliC55CameraConfiguration *>(config);
+ V4L2SubdeviceFormat subdevFormat = maliConfig->sensorFormat_;
+ ret = data->sd_->getFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ if (data->csi_) {
+ ret = data->csi_->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ ret = data->csi_->setFormat(1, &subdevFormat);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Propagate the format to the ISP sink pad and configure the input
+ * crop rectangle (no crop at the moment).
+ *
+ * \todo Configure the CSI-2 receiver.
+ */
+ ret = isp_->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ Rectangle ispCrop(0, 0, subdevFormat.size);
+ ret = isp_->setSelection(0, V4L2_SEL_TGT_CROP, &ispCrop);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the resizer: fixed format the sink pad; use the media
+ * bus code associated with the desired capture format on the source
+ * pad.
+ *
+ * Configure the crop and compose rectangles to match the desired
+ * stream output size
+ *
+ * \todo Make the crop/scaler configurable
+ */
+ for (const StreamConfiguration &streamConfig : *config) {
+ Stream *stream = streamConfig.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ if (isFormatRaw(streamConfig.pixelFormat))
+ ret = configureRawStream(data, streamConfig, subdevFormat);
+ else
+ ret = configureProcessedStream(data, streamConfig, subdevFormat);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to configure pipeline";
+ return ret;
+ }
+
+ /* Now apply the pixel format and size to the capture device. */
+ V4L2DeviceFormat captureFormat;
+ captureFormat.fourcc = pipe->cap->toV4L2PixelFormat(streamConfig.pixelFormat);
+ captureFormat.size = streamConfig.size;
+
+ ret = pipe->cap->setFormat(&captureFormat);
+ if (ret)
+ return ret;
+
+ pipe->stream = stream;
+ }
+
+ return 0;
+}
+
+int PipelineHandlerMaliC55::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ MaliC55Pipe *pipe = pipeFromStream(cameraData(camera), stream);
+ unsigned int count = stream->configuration().bufferCount;
+
+ return pipe->cap->exportBuffers(count, buffers);
+}
+
+int PipelineHandlerMaliC55::start([[maybe_unused]] Camera *camera, [[maybe_unused]] const ControlList *controls)
+{
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ Stream *stream = pipe.stream;
+
+ int ret = pipe.cap->importBuffers(stream->configuration().bufferCount);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to import buffers";
+ return ret;
+ }
+
+ ret = pipe.cap->streamOn();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to start stream";
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void PipelineHandlerMaliC55::stopDevice([[maybe_unused]] Camera *camera)
+{
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ pipe.cap->streamOff();
+ pipe.cap->releaseBuffers();
+ }
+}
+
+int PipelineHandlerMaliC55::queueRequestDevice(Camera *camera, Request *request)
+{
+ int ret;
+
+ for (auto &[stream, buffer] : request->buffers()) {
+ MaliC55Pipe *pipe = pipeFromStream(cameraData(camera), stream);
+
+ ret = pipe->cap->queueBuffer(buffer);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void PipelineHandlerMaliC55::bufferReady(FrameBuffer *buffer)
+{
+ Request *request = buffer->request();
+
+ completeBuffer(request, buffer);
+
+ if (request->hasPendingBuffers())
+ return;
+
+ completeRequest(request);
+}
+
+void PipelineHandlerMaliC55::registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
+ const std::string &name)
+{
+ std::set<Stream *> streams{ &data->frStream_ };
+ if (dsFitted_)
+ streams.insert(&data->dsStream_);
+
+ std::shared_ptr<Camera> camera = Camera::create(std::move(data),
+ name, streams);
+ registerCamera(std::move(camera));
+}
+
+/*
+ * The only camera we support through direct connection to the ISP is the
+ * Mali-C55 TPG. Check we have that and warn if not.
+ */
+bool PipelineHandlerMaliC55::registerTPGCamera(MediaLink *link)
+{
+ const std::string &name = link->source()->entity()->name();
+ if (name != "mali-c55 tpg") {
+ LOG(MaliC55, Warning) << "Unsupported direct connection to "
+ << link->source()->entity()->name();
+ /*
+ * Return true and just skip registering a camera for this
+ * entity.
+ */
+ return true;
+ }
+
+ std::unique_ptr<MaliC55CameraData> data =
+ std::make_unique<MaliC55CameraData>(this, link->source()->entity());
+
+ if (data->init())
+ return false;
+
+ registerMaliCamera(std::move(data), name);
+
+ return true;
+}
+
+/*
+ * Register a Camera for each sensor connected to the ISP through a CSI-2
+ * receiver.
+ *
+ * \todo Support more complex topologies, such as video muxes.
+ */
+bool PipelineHandlerMaliC55::registerSensorCamera(MediaLink *ispLink)
+{
+ MediaEntity *csi2 = ispLink->source()->entity();
+ const MediaPad *csi2Sink = csi2->getPadByIndex(0);
+
+ for (MediaLink *link : csi2Sink->links()) {
+ MediaEntity *sensor = link->source()->entity();
+ unsigned int function = sensor->function();
+
+ if (function != MEDIA_ENT_F_CAM_SENSOR)
+ continue;
+
+ std::unique_ptr<MaliC55CameraData> data =
+ std::make_unique<MaliC55CameraData>(this, sensor);
+ if (data->init())
+ return false;
+
+ /* \todo: Init properties and controls. */
+
+ registerMaliCamera(std::move(data), sensor->name());
+ }
+
+ return true;
+}
+
+bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator)
+{
+ const MediaPad *ispSink;
+
+ /*
+ * We search for just the ISP subdevice and the full resolution pipe.
+ * The TPG and the downscale pipe are both optional blocks and may not
+ * be fitted.
+ */
+ DeviceMatch dm("mali-c55");
+ dm.add("mali-c55 isp");
+ dm.add("mali-c55 resizer fr");
+ dm.add("mali-c55 fr");
+
+ media_ = acquireMediaDevice(enumerator, dm);
+ if (!media_)
+ return false;
+
+ isp_ = V4L2Subdevice::fromEntityName(media_, "mali-c55 isp");
+ if (isp_->open() < 0)
+ return false;
+
+ MaliC55Pipe *frPipe = &pipes_[MaliC55FR];
+ frPipe->resizer = V4L2Subdevice::fromEntityName(media_, "mali-c55 resizer fr");
+ if (frPipe->resizer->open() < 0)
+ return false;
+
+ frPipe->cap = V4L2VideoDevice::fromEntityName(media_, "mali-c55 fr");
+ if (frPipe->cap->open() < 0)
+ return false;
+
+ frPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::bufferReady);
+
+ dsFitted_ = !!media_->getEntityByName("mali-c55 ds");
+ if (dsFitted_) {
+ LOG(MaliC55, Debug) << "Downscaler pipe is fitted";
+
+ MaliC55Pipe *dsPipe = &pipes_[MaliC55DS];
+
+ dsPipe->resizer = V4L2Subdevice::fromEntityName(media_, "mali-c55 resizer ds");
+ if (dsPipe->resizer->open() < 0)
+ return false;
+
+ dsPipe->cap = V4L2VideoDevice::fromEntityName(media_, "mali-c55 ds");
+ if (dsPipe->cap->open() < 0)
+ return false;
+
+ dsPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::bufferReady);
+ }
+
+ ispSink = isp_->entity()->getPadByIndex(0);
+ if (!ispSink || ispSink->links().empty()) {
+ LOG(MaliC55, Error) << "ISP sink pad error";
+ return false;
+ }
+
+ /*
+ * We could have several links pointing to the ISP's sink pad, which
+ * will be from entities with one of the following functions:
+ *
+ * MEDIA_ENT_F_CAM_SENSOR - The test pattern generator
+ * MEDIA_ENT_F_VID_IF_BRIDGE - A CSI-2 receiver
+ * MEDIA_ENT_F_IO_V4L - An input device
+ *
+ * The last one will be unsupported for now. The TPG is relatively easy,
+ * we just register a Camera for it. If we have a CSI-2 receiver we need
+ * to check its sink pad and register Cameras for anything connected to
+ * it (probably...there are some complex situations in which that might
+ * not be true but let's pretend they don't exist until we come across
+ * them)
+ */
+ bool registered;
+ for (MediaLink *link : ispSink->links()) {
+ unsigned int function = link->source()->entity()->function();
+
+ switch (function) {
+ case MEDIA_ENT_F_CAM_SENSOR:
+ registered = registerTPGCamera(link);
+ if (!registered)
+ return registered;
+
+ break;
+ case MEDIA_ENT_F_VID_IF_BRIDGE:
+ registered = registerSensorCamera(link);
+ if (!registered)
+ return registered;
+
+ break;
+ case MEDIA_ENT_F_IO_V4L:
+ LOG(MaliC55, Warning) << "Memory input not yet supported";
+ break;
+ default:
+ LOG(MaliC55, Error) << "Unsupported entity function";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerMaliC55, "mali-c55")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/mali-c55/meson.build b/src/libcamera/pipeline/mali-c55/meson.build
new file mode 100644
index 00000000..30fd29b9
--- /dev/null
+++ b/src/libcamera/pipeline/mali-c55/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_sources += files([
+ 'mali-c55.cpp'
+])
diff --git a/src/libcamera/pipeline/meson.build b/src/libcamera/pipeline/meson.build
index 30dc5b97..8a61991c 100644
--- a/src/libcamera/pipeline/meson.build
+++ b/src/libcamera/pipeline/meson.build
@@ -1,5 +1,20 @@
# SPDX-License-Identifier: CC0-1.0
+# Location of pipeline specific configuration files
+pipeline_data_dir = libcamera_datadir / 'pipeline'
+
+# Allow multi-level directory structuring for the pipeline handlers if needed.
+subdirs = []
+
foreach pipeline : pipelines
+ pipeline = pipeline.split('/')[0]
+ if pipeline in subdirs
+ continue
+ endif
+
+ subdirs += pipeline
subdir(pipeline)
+
+ # Don't reuse the pipeline variable below, the subdirectory may have
+ # overwritten it.
endforeach
diff --git a/src/libcamera/pipeline/raspberrypi/dma_heaps.cpp b/src/libcamera/pipeline/raspberrypi/dma_heaps.cpp
deleted file mode 100644
index 69831dab..00000000
--- a/src/libcamera/pipeline/raspberrypi/dma_heaps.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
- *
- * dma_heaps.h - Helper class for dma-heap allocations.
- */
-
-#include "dma_heaps.h"
-
-#include <array>
-#include <fcntl.h>
-#include <linux/dma-buf.h>
-#include <linux/dma-heap.h>
-#include <sys/ioctl.h>
-#include <unistd.h>
-
-#include <libcamera/base/log.h>
-
-/*
- * /dev/dma-heap/linux,cma is the dma-heap allocator, which allows dmaheap-cma
- * to only have to worry about importing.
- *
- * Annoyingly, should the cma heap size be specified on the kernel command line
- * instead of DT, the heap gets named "reserved" instead.
- */
-static constexpr std::array<const char *, 2> heapNames = {
- "/dev/dma_heap/linux,cma",
- "/dev/dma_heap/reserved"
-};
-
-namespace libcamera {
-
-LOG_DECLARE_CATEGORY(RPI)
-
-namespace RPi {
-
-DmaHeap::DmaHeap()
-{
- for (const char *name : heapNames) {
- int ret = ::open(name, O_RDWR, 0);
- if (ret < 0) {
- ret = errno;
- LOG(RPI, Debug) << "Failed to open " << name << ": "
- << strerror(ret);
- continue;
- }
-
- dmaHeapHandle_ = UniqueFD(ret);
- break;
- }
-
- if (!dmaHeapHandle_.isValid())
- LOG(RPI, Error) << "Could not open any dmaHeap device";
-}
-
-DmaHeap::~DmaHeap() = default;
-
-UniqueFD DmaHeap::alloc(const char *name, std::size_t size)
-{
- int ret;
-
- if (!name)
- return {};
-
- struct dma_heap_allocation_data alloc = {};
-
- alloc.len = size;
- alloc.fd_flags = O_CLOEXEC | O_RDWR;
-
- ret = ::ioctl(dmaHeapHandle_.get(), DMA_HEAP_IOCTL_ALLOC, &alloc);
- if (ret < 0) {
- LOG(RPI, Error) << "dmaHeap allocation failure for "
- << name;
- return {};
- }
-
- UniqueFD allocFd(alloc.fd);
- ret = ::ioctl(allocFd.get(), DMA_BUF_SET_NAME, name);
- if (ret < 0) {
- LOG(RPI, Error) << "dmaHeap naming failure for "
- << name;
- return {};
- }
-
- return allocFd;
-}
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/dma_heaps.h b/src/libcamera/pipeline/raspberrypi/dma_heaps.h
deleted file mode 100644
index d38f41ea..00000000
--- a/src/libcamera/pipeline/raspberrypi/dma_heaps.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
- *
- * dma_heaps.h - Helper class for dma-heap allocations.
- */
-
-#pragma once
-
-#include <stddef.h>
-
-#include <libcamera/base/unique_fd.h>
-
-namespace libcamera {
-
-namespace RPi {
-
-class DmaHeap
-{
-public:
- DmaHeap();
- ~DmaHeap();
- bool isValid() const { return dmaHeapHandle_.isValid(); }
- UniqueFD alloc(const char *name, std::size_t size);
-
-private:
- UniqueFD dmaHeapHandle_;
-};
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
deleted file mode 100644
index 66a84b1d..00000000
--- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
+++ /dev/null
@@ -1,2200 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019-2021, Raspberry Pi (Trading) Ltd.
- *
- * raspberrypi.cpp - Pipeline handler for Raspberry Pi devices
- */
-#include <algorithm>
-#include <assert.h>
-#include <cmath>
-#include <fcntl.h>
-#include <memory>
-#include <mutex>
-#include <queue>
-#include <unordered_set>
-#include <utility>
-
-#include <libcamera/base/shared_fd.h>
-#include <libcamera/base/utils.h>
-
-#include <libcamera/camera.h>
-#include <libcamera/control_ids.h>
-#include <libcamera/formats.h>
-#include <libcamera/ipa/raspberrypi_ipa_interface.h>
-#include <libcamera/ipa/raspberrypi_ipa_proxy.h>
-#include <libcamera/logging.h>
-#include <libcamera/property_ids.h>
-#include <libcamera/request.h>
-
-#include <linux/bcm2835-isp.h>
-#include <linux/media-bus-format.h>
-#include <linux/videodev2.h>
-
-#include "libcamera/internal/bayer_format.h"
-#include "libcamera/internal/camera.h"
-#include "libcamera/internal/camera_sensor.h"
-#include "libcamera/internal/delayed_controls.h"
-#include "libcamera/internal/device_enumerator.h"
-#include "libcamera/internal/framebuffer.h"
-#include "libcamera/internal/ipa_manager.h"
-#include "libcamera/internal/media_device.h"
-#include "libcamera/internal/pipeline_handler.h"
-#include "libcamera/internal/v4l2_videodevice.h"
-
-#include "dma_heaps.h"
-#include "rpi_stream.h"
-
-using namespace std::chrono_literals;
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(RPI)
-
-namespace {
-
-constexpr unsigned int defaultRawBitDepth = 12;
-
-/* Map of mbus codes to supported sizes reported by the sensor. */
-using SensorFormats = std::map<unsigned int, std::vector<Size>>;
-
-SensorFormats populateSensorFormats(std::unique_ptr<CameraSensor> &sensor)
-{
- SensorFormats formats;
-
- for (auto const mbusCode : sensor->mbusCodes())
- formats.emplace(mbusCode, sensor->sizes(mbusCode));
-
- return formats;
-}
-
-PixelFormat mbusCodeToPixelFormat(unsigned int mbus_code,
- BayerFormat::Packing packingReq)
-{
- BayerFormat bayer = BayerFormat::fromMbusCode(mbus_code);
-
- ASSERT(bayer.isValid());
-
- bayer.packing = packingReq;
- PixelFormat pix = bayer.toPixelFormat();
-
- /*
- * Not all formats (e.g. 8-bit or 16-bit Bayer formats) can have packed
- * variants. So if the PixelFormat returns as invalid, use the non-packed
- * conversion instead.
- */
- if (!pix.isValid()) {
- bayer.packing = BayerFormat::Packing::None;
- pix = bayer.toPixelFormat();
- }
-
- return pix;
-}
-
-V4L2DeviceFormat toV4L2DeviceFormat(const V4L2SubdeviceFormat &format,
- BayerFormat::Packing packingReq)
-{
- const PixelFormat pix = mbusCodeToPixelFormat(format.mbus_code, packingReq);
- V4L2DeviceFormat deviceFormat;
-
- deviceFormat.fourcc = V4L2PixelFormat::fromPixelFormat(pix);
- deviceFormat.size = format.size;
- deviceFormat.colorSpace = format.colorSpace;
- return deviceFormat;
-}
-
-bool isRaw(const PixelFormat &pixFmt)
-{
- /*
- * The isRaw test might be redundant right now the pipeline handler only
- * supports RAW sensors. Leave it in for now, just as a sanity check.
- */
- if (!pixFmt.isValid())
- return false;
-
- const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
- if (!info.isValid())
- return false;
-
- return info.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
-}
-
-double scoreFormat(double desired, double actual)
-{
- double score = desired - actual;
- /* Smaller desired dimensions are preferred. */
- if (score < 0.0)
- score = (-score) / 8;
- /* Penalise non-exact matches. */
- if (actual != desired)
- score *= 2;
-
- return score;
-}
-
-V4L2SubdeviceFormat findBestFormat(const SensorFormats &formatsMap, const Size &req, unsigned int bitDepth)
-{
- double bestScore = std::numeric_limits<double>::max(), score;
- V4L2SubdeviceFormat bestFormat;
- bestFormat.colorSpace = ColorSpace::Raw;
-
- constexpr float penaltyAr = 1500.0;
- constexpr float penaltyBitDepth = 500.0;
-
- /* Calculate the closest/best mode from the user requested size. */
- for (const auto &iter : formatsMap) {
- const unsigned int mbusCode = iter.first;
- const PixelFormat format = mbusCodeToPixelFormat(mbusCode,
- BayerFormat::Packing::None);
- const PixelFormatInfo &info = PixelFormatInfo::info(format);
-
- for (const Size &size : iter.second) {
- double reqAr = static_cast<double>(req.width) / req.height;
- double fmtAr = static_cast<double>(size.width) / size.height;
-
- /* Score the dimensions for closeness. */
- score = scoreFormat(req.width, size.width);
- score += scoreFormat(req.height, size.height);
- score += penaltyAr * scoreFormat(reqAr, fmtAr);
-
- /* Add any penalties... this is not an exact science! */
- score += utils::abs_diff(info.bitsPerPixel, bitDepth) * penaltyBitDepth;
-
- if (score <= bestScore) {
- bestScore = score;
- bestFormat.mbus_code = mbusCode;
- bestFormat.size = size;
- }
-
- LOG(RPI, Debug) << "Format: " << size
- << " fmt " << format
- << " Score: " << score
- << " (best " << bestScore << ")";
- }
- }
-
- return bestFormat;
-}
-
-enum class Unicam : unsigned int { Image, Embedded };
-enum class Isp : unsigned int { Input, Output0, Output1, Stats };
-
-} /* namespace */
-
-class RPiCameraData : public Camera::Private
-{
-public:
- RPiCameraData(PipelineHandler *pipe)
- : Camera::Private(pipe), state_(State::Stopped),
- supportsFlips_(false), flipsAlterBayerOrder_(false),
- dropFrameCount_(0), buffersAllocated_(false), ispOutputCount_(0)
- {
- }
-
- ~RPiCameraData()
- {
- freeBuffers();
- }
-
- void freeBuffers();
- void frameStarted(uint32_t sequence);
-
- int loadIPA(ipa::RPi::IPAInitResult *result);
- int configureIPA(const CameraConfiguration *config, ipa::RPi::IPAConfigResult *result);
-
- void enumerateVideoDevices(MediaLink *link);
-
- void statsMetadataComplete(uint32_t bufferId, const ControlList &controls);
- void runIsp(uint32_t bufferId);
- void embeddedComplete(uint32_t bufferId);
- void setIspControls(const ControlList &controls);
- void setDelayedControls(const ControlList &controls);
- void setSensorControls(ControlList &controls);
- void unicamTimeout();
-
- /* bufferComplete signal handlers. */
- void unicamBufferDequeue(FrameBuffer *buffer);
- void ispInputDequeue(FrameBuffer *buffer);
- void ispOutputDequeue(FrameBuffer *buffer);
-
- void clearIncompleteRequests();
- void handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream);
- void handleExternalBuffer(FrameBuffer *buffer, RPi::Stream *stream);
- void handleState();
- Rectangle scaleIspCrop(const Rectangle &ispCrop) const;
- void applyScalerCrop(const ControlList &controls);
-
- std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
-
- std::unique_ptr<CameraSensor> sensor_;
- SensorFormats sensorFormats_;
- /* Array of Unicam and ISP device streams and associated buffers/streams. */
- RPi::Device<Unicam, 2> unicam_;
- RPi::Device<Isp, 4> isp_;
- /* The vector below is just for convenience when iterating over all streams. */
- std::vector<RPi::Stream *> streams_;
- /* Stores the ids of the buffers mapped in the IPA. */
- std::unordered_set<unsigned int> ipaBuffers_;
- /*
- * Stores a cascade of Video Mux or Bridge devices between the sensor and
- * Unicam together with media link across the entities.
- */
- std::vector<std::pair<std::unique_ptr<V4L2Subdevice>, MediaLink *>> bridgeDevices_;
-
- /* DMAHEAP allocation helper. */
- RPi::DmaHeap dmaHeap_;
- SharedFD lsTable_;
-
- std::unique_ptr<DelayedControls> delayedCtrls_;
- bool sensorMetadata_;
-
- /*
- * All the functions in this class are called from a single calling
- * thread. So, we do not need to have any mutex to protect access to any
- * of the variables below.
- */
- enum class State { Stopped, Idle, Busy, IpaComplete };
- State state_;
-
- struct BayerFrame {
- FrameBuffer *buffer;
- ControlList controls;
- };
-
- std::queue<BayerFrame> bayerQueue_;
- std::queue<FrameBuffer *> embeddedQueue_;
- std::deque<Request *> requestQueue_;
-
- /*
- * Manage horizontal and vertical flips supported (or not) by the
- * sensor. Also store the "native" Bayer order (that is, with no
- * transforms applied).
- */
- bool supportsFlips_;
- bool flipsAlterBayerOrder_;
- BayerFormat::Order nativeBayerOrder_;
-
- /* For handling digital zoom. */
- IPACameraSensorInfo sensorInfo_;
- Rectangle ispCrop_; /* crop in ISP (camera mode) pixels */
- Rectangle scalerCrop_; /* crop in sensor native pixels */
- Size ispMinCropSize_;
-
- unsigned int dropFrameCount_;
-
- /*
- * If set, this stores the value that represets a gain of one for
- * the V4L2_CID_NOTIFY_GAINS control.
- */
- std::optional<int32_t> notifyGainsUnity_;
-
- /* Have internal buffers been allocated? */
- bool buffersAllocated_;
-
-private:
- void checkRequestCompleted();
- void fillRequestMetadata(const ControlList &bufferControls,
- Request *request);
- void tryRunPipeline();
- bool findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer);
-
- unsigned int ispOutputCount_;
-};
-
-class RPiCameraConfiguration : public CameraConfiguration
-{
-public:
- RPiCameraConfiguration(const RPiCameraData *data);
-
- Status validate() override;
-
- /* Cache the combinedTransform_ that will be applied to the sensor */
- Transform combinedTransform_;
-
-private:
- const RPiCameraData *data_;
-};
-
-class PipelineHandlerRPi : public PipelineHandler
-{
-public:
- PipelineHandlerRPi(CameraManager *manager);
-
- CameraConfiguration *generateConfiguration(Camera *camera, const StreamRoles &roles) override;
- int configure(Camera *camera, CameraConfiguration *config) override;
-
- int exportFrameBuffers(Camera *camera, Stream *stream,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
-
- int start(Camera *camera, const ControlList *controls) override;
- void stopDevice(Camera *camera) override;
-
- int queueRequestDevice(Camera *camera, Request *request) override;
-
- bool match(DeviceEnumerator *enumerator) override;
-
-private:
- RPiCameraData *cameraData(Camera *camera)
- {
- return static_cast<RPiCameraData *>(camera->_d());
- }
-
- int registerCamera(MediaDevice *unicam, MediaDevice *isp, MediaEntity *sensorEntity);
- int queueAllBuffers(Camera *camera);
- int prepareBuffers(Camera *camera);
- void mapBuffers(Camera *camera, const RPi::BufferMap &buffers, unsigned int mask);
-};
-
-RPiCameraConfiguration::RPiCameraConfiguration(const RPiCameraData *data)
- : CameraConfiguration(), data_(data)
-{
-}
-
-CameraConfiguration::Status RPiCameraConfiguration::validate()
-{
- Status status = Valid;
-
- if (config_.empty())
- return Invalid;
-
- status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
-
- /*
- * What if the platform has a non-90 degree rotation? We can't even
- * "adjust" the configuration and carry on. Alternatively, raising an
- * error means the platform can never run. Let's just print a warning
- * and continue regardless; the rotation is effectively set to zero.
- */
- int32_t rotation = data_->sensor_->properties().get(properties::Rotation);
- bool success;
- Transform rotationTransform = transformFromRotation(rotation, &success);
- if (!success)
- LOG(RPI, Warning) << "Invalid rotation of " << rotation
- << " degrees - ignoring";
- Transform combined = transform * rotationTransform;
-
- /*
- * We combine the platform and user transform, but must "adjust away"
- * any combined result that includes a transform, as we can't do those.
- * In this case, flipping only the transpose bit is helpful to
- * applications - they either get the transform they requested, or have
- * to do a simple transpose themselves (they don't have to worry about
- * the other possible cases).
- */
- if (!!(combined & Transform::Transpose)) {
- /*
- * Flipping the transpose bit in "transform" flips it in the
- * combined result too (as it's the last thing that happens),
- * which is of course clearing it.
- */
- transform ^= Transform::Transpose;
- combined &= ~Transform::Transpose;
- status = Adjusted;
- }
-
- /*
- * We also check if the sensor doesn't do h/vflips at all, in which
- * case we clear them, and the application will have to do everything.
- */
- if (!data_->supportsFlips_ && !!combined) {
- /*
- * If the sensor can do no transforms, then combined must be
- * changed to the identity. The only user transform that gives
- * rise to this the inverse of the rotation. (Recall that
- * combined = transform * rotationTransform.)
- */
- transform = -rotationTransform;
- combined = Transform::Identity;
- status = Adjusted;
- }
-
- /*
- * Store the final combined transform that configure() will need to
- * apply to the sensor to save us working it out again.
- */
- combinedTransform_ = combined;
-
- unsigned int rawCount = 0, outCount = 0, count = 0, maxIndex = 0;
- std::pair<int, Size> outSize[2];
- Size maxSize;
- for (StreamConfiguration &cfg : config_) {
- if (isRaw(cfg.pixelFormat)) {
- /*
- * Calculate the best sensor mode we can use based on
- * the user request.
- */
- const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
- unsigned int bitDepth = info.isValid() ? info.bitsPerPixel : defaultRawBitDepth;
- V4L2SubdeviceFormat sensorFormat = findBestFormat(data_->sensorFormats_, cfg.size, bitDepth);
- BayerFormat::Packing packing = BayerFormat::Packing::CSI2;
- if (info.isValid() && !info.packed)
- packing = BayerFormat::Packing::None;
- V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat,
- packing);
- int ret = data_->unicam_[Unicam::Image].dev()->tryFormat(&unicamFormat);
- if (ret)
- return Invalid;
-
- /*
- * Some sensors change their Bayer order when they are
- * h-flipped or v-flipped, according to the transform.
- * If this one does, we must advertise the transformed
- * Bayer order in the raw stream. Note how we must
- * fetch the "native" (i.e. untransformed) Bayer order,
- * because the sensor may currently be flipped!
- */
- V4L2PixelFormat fourcc = unicamFormat.fourcc;
- if (data_->flipsAlterBayerOrder_) {
- BayerFormat bayer = BayerFormat::fromV4L2PixelFormat(fourcc);
- bayer.order = data_->nativeBayerOrder_;
- bayer = bayer.transform(combined);
- fourcc = bayer.toV4L2PixelFormat();
- }
-
- PixelFormat unicamPixFormat = fourcc.toPixelFormat();
- if (cfg.size != unicamFormat.size ||
- cfg.pixelFormat != unicamPixFormat) {
- cfg.size = unicamFormat.size;
- cfg.pixelFormat = unicamPixFormat;
- status = Adjusted;
- }
-
- cfg.stride = unicamFormat.planes[0].bpl;
- cfg.frameSize = unicamFormat.planes[0].size;
-
- rawCount++;
- } else {
- outSize[outCount] = std::make_pair(count, cfg.size);
- /* Record the largest resolution for fixups later. */
- if (maxSize < cfg.size) {
- maxSize = cfg.size;
- maxIndex = outCount;
- }
- outCount++;
- }
-
- count++;
-
- /* Can only output 1 RAW stream, or 2 YUV/RGB streams. */
- if (rawCount > 1 || outCount > 2) {
- LOG(RPI, Error) << "Invalid number of streams requested";
- return Invalid;
- }
- }
-
- /*
- * Now do any fixups needed. For the two ISP outputs, one stream must be
- * equal or smaller than the other in all dimensions.
- */
- for (unsigned int i = 0; i < outCount; i++) {
- outSize[i].second.width = std::min(outSize[i].second.width,
- maxSize.width);
- outSize[i].second.height = std::min(outSize[i].second.height,
- maxSize.height);
-
- if (config_.at(outSize[i].first).size != outSize[i].second) {
- config_.at(outSize[i].first).size = outSize[i].second;
- status = Adjusted;
- }
-
- /*
- * Also validate the correct pixel formats here.
- * Note that Output0 and Output1 support a different
- * set of formats.
- *
- * Output 0 must be for the largest resolution. We will
- * have that fixed up in the code above.
- *
- */
- StreamConfiguration &cfg = config_.at(outSize[i].first);
- PixelFormat &cfgPixFmt = cfg.pixelFormat;
- V4L2VideoDevice *dev;
-
- if (i == maxIndex)
- dev = data_->isp_[Isp::Output0].dev();
- else
- dev = data_->isp_[Isp::Output1].dev();
-
- V4L2VideoDevice::Formats fmts = dev->formats();
-
- if (fmts.find(V4L2PixelFormat::fromPixelFormat(cfgPixFmt)) == fmts.end()) {
- /* If we cannot find a native format, use a default one. */
- cfgPixFmt = formats::NV12;
- status = Adjusted;
- }
-
- V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
- format.size = cfg.size;
- format.colorSpace = cfg.colorSpace;
-
- LOG(RPI, Debug)
- << "Try color space " << ColorSpace::toString(cfg.colorSpace);
-
- int ret = dev->tryFormat(&format);
- if (ret)
- return Invalid;
-
- if (cfg.colorSpace != format.colorSpace) {
- status = Adjusted;
- LOG(RPI, Debug)
- << "Color space changed from "
- << ColorSpace::toString(cfg.colorSpace) << " to "
- << ColorSpace::toString(format.colorSpace);
- }
-
- cfg.colorSpace = format.colorSpace;
-
- cfg.stride = format.planes[0].bpl;
- cfg.frameSize = format.planes[0].size;
-
- }
-
- return status;
-}
-
-PipelineHandlerRPi::PipelineHandlerRPi(CameraManager *manager)
- : PipelineHandler(manager)
-{
-}
-
-CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
-{
- RPiCameraData *data = cameraData(camera);
- CameraConfiguration *config = new RPiCameraConfiguration(data);
- V4L2SubdeviceFormat sensorFormat;
- unsigned int bufferCount;
- PixelFormat pixelFormat;
- V4L2VideoDevice::Formats fmts;
- Size size;
- std::optional<ColorSpace> colorSpace;
-
- if (roles.empty())
- return config;
-
- unsigned int rawCount = 0;
- unsigned int outCount = 0;
- Size sensorSize = data->sensor_->resolution();
- for (const StreamRole role : roles) {
- switch (role) {
- case StreamRole::Raw:
- size = sensorSize;
- sensorFormat = findBestFormat(data->sensorFormats_, size, defaultRawBitDepth);
- pixelFormat = mbusCodeToPixelFormat(sensorFormat.mbus_code,
- BayerFormat::Packing::CSI2);
- ASSERT(pixelFormat.isValid());
- colorSpace = ColorSpace::Raw;
- bufferCount = 2;
- rawCount++;
- break;
-
- case StreamRole::StillCapture:
- fmts = data->isp_[Isp::Output0].dev()->formats();
- pixelFormat = formats::NV12;
- /*
- * Still image codecs usually expect the JPEG color space.
- * Even RGB codecs will be fine as the RGB we get with the
- * JPEG color space is the same as sRGB.
- */
- colorSpace = ColorSpace::Jpeg;
- /* Return the largest sensor resolution. */
- size = sensorSize;
- bufferCount = 1;
- outCount++;
- break;
-
- case StreamRole::VideoRecording:
- /*
- * The colour denoise algorithm requires the analysis
- * image, produced by the second ISP output, to be in
- * YUV420 format. Select this format as the default, to
- * maximize chances that it will be picked by
- * applications and enable usage of the colour denoise
- * algorithm.
- */
- fmts = data->isp_[Isp::Output0].dev()->formats();
- pixelFormat = formats::YUV420;
- /*
- * Choose a color space appropriate for video recording.
- * Rec.709 will be a good default for HD resolutions.
- */
- colorSpace = ColorSpace::Rec709;
- size = { 1920, 1080 };
- bufferCount = 4;
- outCount++;
- break;
-
- case StreamRole::Viewfinder:
- fmts = data->isp_[Isp::Output0].dev()->formats();
- pixelFormat = formats::ARGB8888;
- colorSpace = ColorSpace::Jpeg;
- size = { 800, 600 };
- bufferCount = 4;
- outCount++;
- break;
-
- default:
- LOG(RPI, Error) << "Requested stream role not supported: "
- << role;
- delete config;
- return nullptr;
- }
-
- if (rawCount > 1 || outCount > 2) {
- LOG(RPI, Error) << "Invalid stream roles requested";
- delete config;
- return nullptr;
- }
-
- std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
- if (role == StreamRole::Raw) {
- /* Translate the MBUS codes to a PixelFormat. */
- for (const auto &format : data->sensorFormats_) {
- PixelFormat pf = mbusCodeToPixelFormat(format.first,
- BayerFormat::Packing::CSI2);
- if (pf.isValid())
- deviceFormats.emplace(std::piecewise_construct, std::forward_as_tuple(pf),
- std::forward_as_tuple(format.second.begin(), format.second.end()));
- }
- } else {
- /*
- * Translate the V4L2PixelFormat to PixelFormat. Note that we
- * limit the recommended largest ISP output size to match the
- * sensor resolution.
- */
- for (const auto &format : fmts) {
- PixelFormat pf = format.first.toPixelFormat();
- if (pf.isValid()) {
- const SizeRange &ispSizes = format.second[0];
- deviceFormats[pf].emplace_back(ispSizes.min, sensorSize,
- ispSizes.hStep, ispSizes.vStep);
- }
- }
- }
-
- /* Add the stream format based on the device node used for the use case. */
- StreamFormats formats(deviceFormats);
- StreamConfiguration cfg(formats);
- cfg.size = size;
- cfg.pixelFormat = pixelFormat;
- cfg.colorSpace = colorSpace;
- cfg.bufferCount = bufferCount;
- config->addConfiguration(cfg);
- }
-
- config->validate();
-
- return config;
-}
-
-int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
-{
- RPiCameraData *data = cameraData(camera);
- int ret;
-
- /* Start by freeing all buffers and reset the Unicam and ISP stream states. */
- data->freeBuffers();
- for (auto const stream : data->streams_)
- stream->setExternal(false);
-
- BayerFormat::Packing packing = BayerFormat::Packing::CSI2;
- Size maxSize, sensorSize;
- unsigned int maxIndex = 0;
- bool rawStream = false;
- unsigned int bitDepth = defaultRawBitDepth;
-
- /*
- * Look for the RAW stream (if given) size as well as the largest
- * ISP output size.
- */
- for (unsigned i = 0; i < config->size(); i++) {
- StreamConfiguration &cfg = config->at(i);
-
- if (isRaw(cfg.pixelFormat)) {
- /*
- * If we have been given a RAW stream, use that size
- * for setting up the sensor.
- */
- sensorSize = cfg.size;
- rawStream = true;
- /* Check if the user has explicitly set an unpacked format. */
- BayerFormat bayerFormat = BayerFormat::fromPixelFormat(cfg.pixelFormat);
- packing = bayerFormat.packing;
- bitDepth = bayerFormat.bitDepth;
- } else {
- if (cfg.size > maxSize) {
- maxSize = config->at(i).size;
- maxIndex = i;
- }
- }
- }
-
- /*
- * Configure the H/V flip controls based on the combination of
- * the sensor and user transform.
- */
- if (data->supportsFlips_) {
- const RPiCameraConfiguration *rpiConfig =
- static_cast<const RPiCameraConfiguration *>(config);
- ControlList controls;
-
- controls.set(V4L2_CID_HFLIP,
- static_cast<int32_t>(!!(rpiConfig->combinedTransform_ & Transform::HFlip)));
- controls.set(V4L2_CID_VFLIP,
- static_cast<int32_t>(!!(rpiConfig->combinedTransform_ & Transform::VFlip)));
- data->setSensorControls(controls);
- }
-
- /* First calculate the best sensor mode we can use based on the user request. */
- V4L2SubdeviceFormat sensorFormat = findBestFormat(data->sensorFormats_, rawStream ? sensorSize : maxSize, bitDepth);
- ret = data->sensor_->setFormat(&sensorFormat);
- if (ret)
- return ret;
-
- V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat, packing);
- ret = data->unicam_[Unicam::Image].dev()->setFormat(&unicamFormat);
- if (ret)
- return ret;
-
- LOG(RPI, Info) << "Sensor: " << camera->id()
- << " - Selected sensor format: " << sensorFormat
- << " - Selected unicam format: " << unicamFormat;
-
- ret = data->isp_[Isp::Input].dev()->setFormat(&unicamFormat);
- if (ret)
- return ret;
-
- /*
- * See which streams are requested, and route the user
- * StreamConfiguration appropriately.
- */
- V4L2DeviceFormat format;
- bool output0Set = false, output1Set = false;
- for (unsigned i = 0; i < config->size(); i++) {
- StreamConfiguration &cfg = config->at(i);
-
- if (isRaw(cfg.pixelFormat)) {
- cfg.setStream(&data->unicam_[Unicam::Image]);
- data->unicam_[Unicam::Image].setExternal(true);
- continue;
- }
-
- /* The largest resolution gets routed to the ISP Output 0 node. */
- RPi::Stream *stream = i == maxIndex ? &data->isp_[Isp::Output0]
- : &data->isp_[Isp::Output1];
-
- V4L2PixelFormat fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
- format.size = cfg.size;
- format.fourcc = fourcc;
- format.colorSpace = cfg.colorSpace;
-
- LOG(RPI, Debug) << "Setting " << stream->name() << " to "
- << format;
-
- ret = stream->dev()->setFormat(&format);
- if (ret)
- return -EINVAL;
-
- if (format.size != cfg.size || format.fourcc != fourcc) {
- LOG(RPI, Error)
- << "Failed to set requested format on " << stream->name()
- << ", returned " << format;
- return -EINVAL;
- }
-
- LOG(RPI, Debug)
- << "Stream " << stream->name() << " has color space "
- << ColorSpace::toString(cfg.colorSpace);
-
- cfg.setStream(stream);
- stream->setExternal(true);
-
- if (i != maxIndex)
- output1Set = true;
- else
- output0Set = true;
- }
-
- /*
- * If ISP::Output0 stream has not been configured by the application,
- * we must allow the hardware to generate an output so that the data
- * flow in the pipeline handler remains consistent, and we still generate
- * statistics for the IPA to use. So enable the output at a very low
- * resolution for internal use.
- *
- * \todo Allow the pipeline to work correctly without Output0 and only
- * statistics coming from the hardware.
- */
- if (!output0Set) {
- maxSize = Size(320, 240);
- format = {};
- format.size = maxSize;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(formats::YUV420);
- /* No one asked for output, so the color space doesn't matter. */
- format.colorSpace = ColorSpace::Jpeg;
- ret = data->isp_[Isp::Output0].dev()->setFormat(&format);
- if (ret) {
- LOG(RPI, Error)
- << "Failed to set default format on ISP Output0: "
- << ret;
- return -EINVAL;
- }
-
- LOG(RPI, Debug) << "Defaulting ISP Output0 format to "
- << format;
- }
-
- /*
- * If ISP::Output1 stream has not been requested by the application, we
- * set it up for internal use now. This second stream will be used for
- * fast colour denoise, and must be a quarter resolution of the ISP::Output0
- * stream. However, also limit the maximum size to 1200 pixels in the
- * larger dimension, just to avoid being wasteful with buffer allocations
- * and memory bandwidth.
- *
- * \todo If Output 1 format is not YUV420, Output 1 ought to be disabled as
- * colour denoise will not run.
- */
- if (!output1Set) {
- V4L2DeviceFormat output1Format;
- constexpr Size maxDimensions(1200, 1200);
- const Size limit = maxDimensions.boundedToAspectRatio(format.size);
-
- output1Format.size = (format.size / 2).boundedTo(limit).alignedDownTo(2, 2);
- output1Format.colorSpace = format.colorSpace;
- output1Format.fourcc = V4L2PixelFormat::fromPixelFormat(formats::YUV420);
-
- LOG(RPI, Debug) << "Setting ISP Output1 (internal) to "
- << output1Format;
-
- ret = data->isp_[Isp::Output1].dev()->setFormat(&output1Format);
- if (ret) {
- LOG(RPI, Error) << "Failed to set format on ISP Output1: "
- << ret;
- return -EINVAL;
- }
- }
-
- /* ISP statistics output format. */
- format = {};
- format.fourcc = V4L2PixelFormat(V4L2_META_FMT_BCM2835_ISP_STATS);
- ret = data->isp_[Isp::Stats].dev()->setFormat(&format);
- if (ret) {
- LOG(RPI, Error) << "Failed to set format on ISP stats stream: "
- << format;
- return ret;
- }
-
- /* Figure out the smallest selection the ISP will allow. */
- Rectangle testCrop(0, 0, 1, 1);
- data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &testCrop);
- data->ispMinCropSize_ = testCrop.size();
-
- /* Adjust aspect ratio by providing crops on the input image. */
- Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
- Rectangle crop = size.centeredTo(Rectangle(unicamFormat.size).center());
- data->ispCrop_ = crop;
-
- data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop);
-
- ipa::RPi::IPAConfigResult result;
- ret = data->configureIPA(config, &result);
- if (ret)
- LOG(RPI, Error) << "Failed to configure the IPA: " << ret;
-
- /*
- * Set the scaler crop to the value we are using (scaled to native sensor
- * coordinates).
- */
- data->scalerCrop_ = data->scaleIspCrop(data->ispCrop_);
-
- /*
- * Configure the Unicam embedded data output format only if the sensor
- * supports it.
- */
- if (data->sensorMetadata_) {
- V4L2SubdeviceFormat embeddedFormat;
-
- data->sensor_->device()->getFormat(1, &embeddedFormat);
- format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
- format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
-
- LOG(RPI, Debug) << "Setting embedded data format.";
- ret = data->unicam_[Unicam::Embedded].dev()->setFormat(&format);
- if (ret) {
- LOG(RPI, Error) << "Failed to set format on Unicam embedded: "
- << format;
- return ret;
- }
- }
-
- /*
- * Update the ScalerCropMaximum to the correct value for this camera mode.
- * For us, it's the same as the "analogue crop".
- *
- * \todo Make this property the ScalerCrop maximum value when dynamic
- * controls are available and set it at validate() time
- */
- data->properties_.set(properties::ScalerCropMaximum, data->sensorInfo_.analogCrop);
-
- /* Store the mode sensitivity for the application. */
- data->properties_.set(properties::SensorSensitivity, result.modeSensitivity);
-
- /* Update the controls that the Raspberry Pi IPA can handle. */
- ControlInfoMap::Map ctrlMap;
- for (auto const &c : result.controlInfo)
- ctrlMap.emplace(c.first, c.second);
-
- /* Add the ScalerCrop control limits based on the current mode. */
- ctrlMap.emplace(&controls::ScalerCrop,
- ControlInfo(Rectangle(data->ispMinCropSize_), Rectangle(data->sensorInfo_.outputSize)));
-
- data->controlInfo_ = ControlInfoMap(std::move(ctrlMap), result.controlInfo.idmap());
-
- /* Setup the Video Mux/Bridge entities. */
- for (auto &[device, link] : data->bridgeDevices_) {
- /*
- * Start by disabling all the sink pad links on the devices in the
- * cascade, with the exception of the link connecting the device.
- */
- for (const MediaPad *p : device->entity()->pads()) {
- if (!(p->flags() & MEDIA_PAD_FL_SINK))
- continue;
-
- for (MediaLink *l : p->links()) {
- if (l != link)
- l->setEnabled(false);
- }
- }
-
- /*
- * Next, enable the entity -> entity links, and setup the pad format.
- *
- * \todo Some bridge devices may chainge the media bus code, so we
- * ought to read the source pad format and propagate it to the sink pad.
- */
- link->setEnabled(true);
- const MediaPad *sinkPad = link->sink();
- ret = device->setFormat(sinkPad->index(), &sensorFormat);
- if (ret) {
- LOG(RPI, Error) << "Failed to set format on " << device->entity()->name()
- << " pad " << sinkPad->index()
- << " with format " << format
- << ": " << ret;
- return ret;
- }
-
- LOG(RPI, Debug) << "Configured media link on device " << device->entity()->name()
- << " on pad " << sinkPad->index();
- }
-
- return ret;
-}
-
-int PipelineHandlerRPi::exportFrameBuffers([[maybe_unused]] Camera *camera, Stream *stream,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers)
-{
- RPi::Stream *s = static_cast<RPi::Stream *>(stream);
- unsigned int count = stream->configuration().bufferCount;
- int ret = s->dev()->exportBuffers(count, buffers);
-
- s->setExportedBuffers(buffers);
-
- return ret;
-}
-
-int PipelineHandlerRPi::start(Camera *camera, const ControlList *controls)
-{
- RPiCameraData *data = cameraData(camera);
- int ret;
-
- for (auto const stream : data->streams_)
- stream->resetBuffers();
-
- if (!data->buffersAllocated_) {
- /* Allocate buffers for internal pipeline usage. */
- ret = prepareBuffers(camera);
- if (ret) {
- LOG(RPI, Error) << "Failed to allocate buffers";
- data->freeBuffers();
- stop(camera);
- return ret;
- }
- data->buffersAllocated_ = true;
- }
-
- /* Check if a ScalerCrop control was specified. */
- if (controls)
- data->applyScalerCrop(*controls);
-
- /* Start the IPA. */
- ipa::RPi::StartConfig startConfig;
- data->ipa_->start(controls ? *controls : ControlList{ controls::controls },
- &startConfig);
-
- /* Apply any gain/exposure settings that the IPA may have passed back. */
- if (!startConfig.controls.empty())
- data->setSensorControls(startConfig.controls);
-
- /* Configure the number of dropped frames required on startup. */
- data->dropFrameCount_ = startConfig.dropFrameCount;
-
- /* We need to set the dropFrameCount_ before queueing buffers. */
- ret = queueAllBuffers(camera);
- if (ret) {
- LOG(RPI, Error) << "Failed to queue buffers";
- stop(camera);
- return ret;
- }
-
- /* Enable SOF event generation. */
- data->unicam_[Unicam::Image].dev()->setFrameStartEnabled(true);
-
- /*
- * Reset the delayed controls with the gain and exposure values set by
- * the IPA.
- */
- data->delayedCtrls_->reset();
-
- data->state_ = RPiCameraData::State::Idle;
-
- /* Start all streams. */
- for (auto const stream : data->streams_) {
- ret = stream->dev()->streamOn();
- if (ret) {
- stop(camera);
- return ret;
- }
- }
-
- /*
- * Set the dequeue timeout to the larger of 2x the maximum possible
- * frame duration or 1 second.
- */
- utils::Duration timeout =
- std::max<utils::Duration>(1s, 2 * startConfig.maxSensorFrameLengthMs * 1ms);
- data->unicam_[Unicam::Image].dev()->setDequeueTimeout(timeout);
-
- return 0;
-}
-
-void PipelineHandlerRPi::stopDevice(Camera *camera)
-{
- RPiCameraData *data = cameraData(camera);
-
- data->state_ = RPiCameraData::State::Stopped;
-
- /* Disable SOF event generation. */
- data->unicam_[Unicam::Image].dev()->setFrameStartEnabled(false);
-
- for (auto const stream : data->streams_)
- stream->dev()->streamOff();
-
- data->clearIncompleteRequests();
- data->bayerQueue_ = {};
- data->embeddedQueue_ = {};
-
- /* Stop the IPA. */
- data->ipa_->stop();
-}
-
-int PipelineHandlerRPi::queueRequestDevice(Camera *camera, Request *request)
-{
- RPiCameraData *data = cameraData(camera);
-
- if (data->state_ == RPiCameraData::State::Stopped)
- return -EINVAL;
-
- LOG(RPI, Debug) << "queueRequestDevice: New request.";
-
- /* Push all buffers supplied in the Request to the respective streams. */
- for (auto stream : data->streams_) {
- if (!stream->isExternal())
- continue;
-
- FrameBuffer *buffer = request->findBuffer(stream);
- if (buffer && stream->getBufferId(buffer) == -1) {
- /*
- * This buffer is not recognised, so it must have been allocated
- * outside the v4l2 device. Store it in the stream buffer list
- * so we can track it.
- */
- stream->setExternalBuffer(buffer);
- }
- /*
- * If no buffer is provided by the request for this stream, we
- * queue a nullptr to the stream to signify that it must use an
- * internally allocated buffer for this capture request. This
- * buffer will not be given back to the application, but is used
- * to support the internal pipeline flow.
- *
- * The below queueBuffer() call will do nothing if there are not
- * enough internal buffers allocated, but this will be handled by
- * queuing the request for buffers in the RPiStream object.
- */
- int ret = stream->queueBuffer(buffer);
- if (ret)
- return ret;
- }
-
- /* Push the request to the back of the queue. */
- data->requestQueue_.push_back(request);
- data->handleState();
-
- return 0;
-}
-
-bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
-{
- DeviceMatch unicam("unicam");
- MediaDevice *unicamDevice = acquireMediaDevice(enumerator, unicam);
-
- if (!unicamDevice) {
- LOG(RPI, Debug) << "Unable to acquire a Unicam instance";
- return false;
- }
-
- DeviceMatch isp("bcm2835-isp");
- MediaDevice *ispDevice = acquireMediaDevice(enumerator, isp);
-
- if (!ispDevice) {
- LOG(RPI, Debug) << "Unable to acquire ISP instance";
- return false;
- }
-
- /*
- * The loop below is used to register multiple cameras behind one or more
- * video mux devices that are attached to a particular Unicam instance.
- * Obviously these cameras cannot be used simultaneously.
- */
- unsigned int numCameras = 0;
- for (MediaEntity *entity : unicamDevice->entities()) {
- if (entity->function() != MEDIA_ENT_F_CAM_SENSOR)
- continue;
-
- int ret = registerCamera(unicamDevice, ispDevice, entity);
- if (ret)
- LOG(RPI, Error) << "Failed to register camera "
- << entity->name() << ": " << ret;
- else
- numCameras++;
- }
-
- return !!numCameras;
-}
-
-int PipelineHandlerRPi::registerCamera(MediaDevice *unicam, MediaDevice *isp, MediaEntity *sensorEntity)
-{
- std::unique_ptr<RPiCameraData> data = std::make_unique<RPiCameraData>(this);
-
- if (!data->dmaHeap_.isValid())
- return -ENOMEM;
-
- MediaEntity *unicamImage = unicam->getEntityByName("unicam-image");
- MediaEntity *ispOutput0 = isp->getEntityByName("bcm2835-isp0-output0");
- MediaEntity *ispCapture1 = isp->getEntityByName("bcm2835-isp0-capture1");
- MediaEntity *ispCapture2 = isp->getEntityByName("bcm2835-isp0-capture2");
- MediaEntity *ispCapture3 = isp->getEntityByName("bcm2835-isp0-capture3");
-
- if (!unicamImage || !ispOutput0 || !ispCapture1 || !ispCapture2 || !ispCapture3)
- return -ENOENT;
-
- /* Locate and open the unicam video streams. */
- data->unicam_[Unicam::Image] = RPi::Stream("Unicam Image", unicamImage);
-
- /* An embedded data node will not be present if the sensor does not support it. */
- MediaEntity *unicamEmbedded = unicam->getEntityByName("unicam-embedded");
- if (unicamEmbedded) {
- data->unicam_[Unicam::Embedded] = RPi::Stream("Unicam Embedded", unicamEmbedded);
- data->unicam_[Unicam::Embedded].dev()->bufferReady.connect(data.get(),
- &RPiCameraData::unicamBufferDequeue);
- }
-
- /* Tag the ISP input stream as an import stream. */
- data->isp_[Isp::Input] = RPi::Stream("ISP Input", ispOutput0, true);
- data->isp_[Isp::Output0] = RPi::Stream("ISP Output0", ispCapture1);
- data->isp_[Isp::Output1] = RPi::Stream("ISP Output1", ispCapture2);
- data->isp_[Isp::Stats] = RPi::Stream("ISP Stats", ispCapture3);
-
- /* Wire up all the buffer connections. */
- data->unicam_[Unicam::Image].dev()->dequeueTimeout.connect(data.get(), &RPiCameraData::unicamTimeout);
- data->unicam_[Unicam::Image].dev()->frameStart.connect(data.get(), &RPiCameraData::frameStarted);
- data->unicam_[Unicam::Image].dev()->bufferReady.connect(data.get(), &RPiCameraData::unicamBufferDequeue);
- data->isp_[Isp::Input].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispInputDequeue);
- data->isp_[Isp::Output0].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispOutputDequeue);
- data->isp_[Isp::Output1].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispOutputDequeue);
- data->isp_[Isp::Stats].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispOutputDequeue);
-
- data->sensor_ = std::make_unique<CameraSensor>(sensorEntity);
- if (!data->sensor_)
- return -EINVAL;
-
- if (data->sensor_->init())
- return -EINVAL;
-
- /*
- * Enumerate all the Video Mux/Bridge devices across the sensor -> unicam
- * chain. There may be a cascade of devices in this chain!
- */
- MediaLink *link = sensorEntity->getPadByIndex(0)->links()[0];
- data->enumerateVideoDevices(link);
-
- data->sensorFormats_ = populateSensorFormats(data->sensor_);
-
- ipa::RPi::IPAInitResult result;
- if (data->loadIPA(&result)) {
- LOG(RPI, Error) << "Failed to load a suitable IPA library";
- return -EINVAL;
- }
-
- if (result.sensorConfig.sensorMetadata ^ !!unicamEmbedded) {
- LOG(RPI, Warning) << "Mismatch between Unicam and CamHelper for embedded data usage!";
- result.sensorConfig.sensorMetadata = false;
- if (unicamEmbedded)
- data->unicam_[Unicam::Embedded].dev()->bufferReady.disconnect();
- }
-
- /*
- * Open all Unicam and ISP streams. The exception is the embedded data
- * stream, which only gets opened below if the IPA reports that the sensor
- * supports embedded data.
- *
- * The below grouping is just for convenience so that we can easily
- * iterate over all streams in one go.
- */
- data->streams_.push_back(&data->unicam_[Unicam::Image]);
- if (result.sensorConfig.sensorMetadata)
- data->streams_.push_back(&data->unicam_[Unicam::Embedded]);
-
- for (auto &stream : data->isp_)
- data->streams_.push_back(&stream);
-
- for (auto stream : data->streams_) {
- int ret = stream->dev()->open();
- if (ret)
- return ret;
- }
-
- if (!data->unicam_[Unicam::Image].dev()->caps().hasMediaController()) {
- LOG(RPI, Error) << "Unicam driver does not use the MediaController, please update your kernel!";
- return -EINVAL;
- }
-
- /*
- * Setup our delayed control writer with the sensor default
- * gain and exposure delays. Mark VBLANK for priority write.
- */
- std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
- { V4L2_CID_ANALOGUE_GAIN, { result.sensorConfig.gainDelay, false } },
- { V4L2_CID_EXPOSURE, { result.sensorConfig.exposureDelay, false } },
- { V4L2_CID_VBLANK, { result.sensorConfig.vblankDelay, true } }
- };
- data->delayedCtrls_ = std::make_unique<DelayedControls>(data->sensor_->device(), params);
- data->sensorMetadata_ = result.sensorConfig.sensorMetadata;
-
- /* Register initial controls that the Raspberry Pi IPA can handle. */
- data->controlInfo_ = std::move(result.controlInfo);
-
- /* Initialize the camera properties. */
- data->properties_ = data->sensor_->properties();
-
- /*
- * The V4L2_CID_NOTIFY_GAINS control, if present, is used to inform the
- * sensor of the colour gains. It is defined to be a linear gain where
- * the default value represents a gain of exactly one.
- */
- auto it = data->sensor_->controls().find(V4L2_CID_NOTIFY_GAINS);
- if (it != data->sensor_->controls().end())
- data->notifyGainsUnity_ = it->second.def().get<int32_t>();
-
- /*
- * Set a default value for the ScalerCropMaximum property to show
- * that we support its use, however, initialise it to zero because
- * it's not meaningful until a camera mode has been chosen.
- */
- data->properties_.set(properties::ScalerCropMaximum, Rectangle{});
-
- /*
- * We cache three things about the sensor in relation to transforms
- * (meaning horizontal and vertical flips).
- *
- * Firstly, does it support them?
- * Secondly, if you use them does it affect the Bayer ordering?
- * Thirdly, what is the "native" Bayer order, when no transforms are
- * applied?
- *
- * We note that the sensor's cached list of supported formats is
- * already in the "native" order, with any flips having been undone.
- */
- const V4L2Subdevice *sensor = data->sensor_->device();
- const struct v4l2_query_ext_ctrl *hflipCtrl = sensor->controlInfo(V4L2_CID_HFLIP);
- if (hflipCtrl) {
- /* We assume it will support vflips too... */
- data->supportsFlips_ = true;
- data->flipsAlterBayerOrder_ = hflipCtrl->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT;
- }
-
- /* Look for a valid Bayer format. */
- BayerFormat bayerFormat;
- for (const auto &iter : data->sensorFormats_) {
- bayerFormat = BayerFormat::fromMbusCode(iter.first);
- if (bayerFormat.isValid())
- break;
- }
-
- if (!bayerFormat.isValid()) {
- LOG(RPI, Error) << "No Bayer format found";
- return -EINVAL;
- }
- data->nativeBayerOrder_ = bayerFormat.order;
-
- /*
- * List the available streams an application may request. At present, we
- * do not advertise Unicam Embedded and ISP Statistics streams, as there
- * is no mechanism for the application to request non-image buffer formats.
- */
- std::set<Stream *> streams;
- streams.insert(&data->unicam_[Unicam::Image]);
- streams.insert(&data->isp_[Isp::Output0]);
- streams.insert(&data->isp_[Isp::Output1]);
-
- /* Create and register the camera. */
- const std::string &id = data->sensor_->id();
- std::shared_ptr<Camera> camera =
- Camera::create(std::move(data), id, streams);
- PipelineHandler::registerCamera(std::move(camera));
-
- LOG(RPI, Info) << "Registered camera " << id
- << " to Unicam device " << unicam->deviceNode()
- << " and ISP device " << isp->deviceNode();
- return 0;
-}
-
-int PipelineHandlerRPi::queueAllBuffers(Camera *camera)
-{
- RPiCameraData *data = cameraData(camera);
- int ret;
-
- for (auto const stream : data->streams_) {
- if (!stream->isExternal()) {
- ret = stream->queueAllBuffers();
- if (ret < 0)
- return ret;
- } else {
- /*
- * For external streams, we must queue up a set of internal
- * buffers to handle the number of drop frames requested by
- * the IPA. This is done by passing nullptr in queueBuffer().
- *
- * The below queueBuffer() call will do nothing if there
- * are not enough internal buffers allocated, but this will
- * be handled by queuing the request for buffers in the
- * RPiStream object.
- */
- unsigned int i;
- for (i = 0; i < data->dropFrameCount_; i++) {
- ret = stream->queueBuffer(nullptr);
- if (ret)
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-int PipelineHandlerRPi::prepareBuffers(Camera *camera)
-{
- RPiCameraData *data = cameraData(camera);
- unsigned int numRawBuffers = 0;
- int ret;
-
- for (Stream *s : camera->streams()) {
- if (isRaw(s->configuration().pixelFormat)) {
- numRawBuffers = s->configuration().bufferCount;
- break;
- }
- }
-
- /* Decide how many internal buffers to allocate. */
- for (auto const stream : data->streams_) {
- unsigned int numBuffers;
- /*
- * For Unicam, allocate a minimum of 4 buffers as we want
- * to avoid any frame drops.
- */
- constexpr unsigned int minBuffers = 4;
- if (stream == &data->unicam_[Unicam::Image]) {
- /*
- * If an application has configured a RAW stream, allocate
- * additional buffers to make up the minimum, but ensure
- * we have at least 2 sets of internal buffers to use to
- * minimise frame drops.
- */
- numBuffers = std::max<int>(2, minBuffers - numRawBuffers);
- } else if (stream == &data->isp_[Isp::Input]) {
- /*
- * ISP input buffers are imported from Unicam, so follow
- * similar logic as above to count all the RAW buffers
- * available.
- */
- numBuffers = numRawBuffers + std::max<int>(2, minBuffers - numRawBuffers);
-
- } else if (stream == &data->unicam_[Unicam::Embedded]) {
- /*
- * Embedded data buffers are (currently) for internal use,
- * so allocate the minimum required to avoid frame drops.
- */
- numBuffers = minBuffers;
- } else {
- /*
- * Since the ISP runs synchronous with the IPA and requests,
- * we only ever need one set of internal buffers. Any buffers
- * the application wants to hold onto will already be exported
- * through PipelineHandlerRPi::exportFrameBuffers().
- */
- numBuffers = 1;
- }
-
- ret = stream->prepareBuffers(numBuffers);
- if (ret < 0)
- return ret;
- }
-
- /*
- * Pass the stats and embedded data buffers to the IPA. No other
- * buffers need to be passed.
- */
- mapBuffers(camera, data->isp_[Isp::Stats].getBuffers(), ipa::RPi::MaskStats);
- if (data->sensorMetadata_)
- mapBuffers(camera, data->unicam_[Unicam::Embedded].getBuffers(),
- ipa::RPi::MaskEmbeddedData);
-
- return 0;
-}
-
-void PipelineHandlerRPi::mapBuffers(Camera *camera, const RPi::BufferMap &buffers, unsigned int mask)
-{
- RPiCameraData *data = cameraData(camera);
- std::vector<IPABuffer> ipaBuffers;
- /*
- * Link the FrameBuffers with the id (key value) in the map stored in
- * the RPi stream object - along with an identifier mask.
- *
- * This will allow us to identify buffers passed between the pipeline
- * handler and the IPA.
- */
- for (auto const &it : buffers) {
- ipaBuffers.push_back(IPABuffer(mask | it.first,
- it.second->planes()));
- data->ipaBuffers_.insert(mask | it.first);
- }
-
- data->ipa_->mapBuffers(ipaBuffers);
-}
-
-void RPiCameraData::freeBuffers()
-{
- if (ipa_) {
- /*
- * Copy the buffer ids from the unordered_set to a vector to
- * pass to the IPA.
- */
- std::vector<unsigned int> ipaBuffers(ipaBuffers_.begin(),
- ipaBuffers_.end());
- ipa_->unmapBuffers(ipaBuffers);
- ipaBuffers_.clear();
- }
-
- for (auto const stream : streams_)
- stream->releaseBuffers();
-
- buffersAllocated_ = false;
-}
-
-void RPiCameraData::frameStarted(uint32_t sequence)
-{
- LOG(RPI, Debug) << "frame start " << sequence;
-
- /* Write any controls for the next frame as soon as we can. */
- delayedCtrls_->applyControls(sequence);
-}
-
-int RPiCameraData::loadIPA(ipa::RPi::IPAInitResult *result)
-{
- ipa_ = IPAManager::createIPA<ipa::RPi::IPAProxyRPi>(pipe(), 1, 1);
-
- if (!ipa_)
- return -ENOENT;
-
- ipa_->statsMetadataComplete.connect(this, &RPiCameraData::statsMetadataComplete);
- ipa_->runIsp.connect(this, &RPiCameraData::runIsp);
- ipa_->embeddedComplete.connect(this, &RPiCameraData::embeddedComplete);
- ipa_->setIspControls.connect(this, &RPiCameraData::setIspControls);
- ipa_->setDelayedControls.connect(this, &RPiCameraData::setDelayedControls);
-
- /*
- * The configuration (tuning file) is made from the sensor name unless
- * the environment variable overrides it.
- */
- std::string configurationFile;
- char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_TUNING_FILE");
- if (!configFromEnv || *configFromEnv == '\0')
- configurationFile = ipa_->configurationFile(sensor_->model() + ".json");
- else
- configurationFile = std::string(configFromEnv);
-
- IPASettings settings(configurationFile, sensor_->model());
-
- return ipa_->init(settings, result);
-}
-
-int RPiCameraData::configureIPA(const CameraConfiguration *config, ipa::RPi::IPAConfigResult *result)
-{
- std::map<unsigned int, IPAStream> streamConfig;
- std::map<unsigned int, ControlInfoMap> entityControls;
- ipa::RPi::IPAConfig ipaConfig;
-
- /* Inform IPA of stream configuration and sensor controls. */
- unsigned int i = 0;
- for (auto const &stream : isp_) {
- if (stream.isExternal()) {
- streamConfig[i++] = IPAStream(
- stream.configuration().pixelFormat,
- stream.configuration().size);
- }
- }
-
- entityControls.emplace(0, sensor_->controls());
- entityControls.emplace(1, isp_[Isp::Input].dev()->controls());
-
- /* Always send the user transform to the IPA. */
- ipaConfig.transform = static_cast<unsigned int>(config->transform);
-
- /* Allocate the lens shading table via dmaHeap and pass to the IPA. */
- if (!lsTable_.isValid()) {
- lsTable_ = SharedFD(dmaHeap_.alloc("ls_grid", ipa::RPi::MaxLsGridSize));
- if (!lsTable_.isValid())
- return -ENOMEM;
-
- /* Allow the IPA to mmap the LS table via the file descriptor. */
- /*
- * \todo Investigate if mapping the lens shading table buffer
- * could be handled with mapBuffers().
- */
- ipaConfig.lsTableHandle = lsTable_;
- }
-
- /* We store the IPACameraSensorInfo for digital zoom calculations. */
- int ret = sensor_->sensorInfo(&sensorInfo_);
- if (ret) {
- LOG(RPI, Error) << "Failed to retrieve camera sensor info";
- return ret;
- }
-
- /* Ready the IPA - it must know about the sensor resolution. */
- ControlList controls;
- ret = ipa_->configure(sensorInfo_, streamConfig, entityControls, ipaConfig,
- &controls, result);
- if (ret < 0) {
- LOG(RPI, Error) << "IPA configuration failed!";
- return -EPIPE;
- }
-
- if (!controls.empty())
- setSensorControls(controls);
-
- return 0;
-}
-
-/*
- * enumerateVideoDevices() iterates over the Media Controller topology, starting
- * at the sensor and finishing at Unicam. For each sensor, RPiCameraData stores
- * a unique list of any intermediate video mux or bridge devices connected in a
- * cascade, together with the entity to entity link.
- *
- * Entity pad configuration and link enabling happens at the end of configure().
- * We first disable all pad links on each entity device in the chain, and then
- * selectively enabling the specific links to link sensor to Unicam across all
- * intermediate muxes and bridges.
- *
- * In the cascaded topology below, if Sensor1 is used, the Mux2 -> Mux1 link
- * will be disabled, and Sensor1 -> Mux1 -> Unicam links enabled. Alternatively,
- * if Sensor3 is used, the Sensor2 -> Mux2 and Sensor1 -> Mux1 links are disabled,
- * and Sensor3 -> Mux2 -> Mux1 -> Unicam links are enabled. All other links will
- * remain unchanged.
- *
- * +----------+
- * | Unicam |
- * +-----^----+
- * |
- * +---+---+
- * | Mux1 <-------+
- * +--^----+ |
- * | |
- * +-----+---+ +---+---+
- * | Sensor1 | | Mux2 |<--+
- * +---------+ +-^-----+ |
- * | |
- * +-------+-+ +---+-----+
- * | Sensor2 | | Sensor3 |
- * +---------+ +---------+
- */
-void RPiCameraData::enumerateVideoDevices(MediaLink *link)
-{
- const MediaPad *sinkPad = link->sink();
- const MediaEntity *entity = sinkPad->entity();
- bool unicamFound = false;
-
- /* We only deal with Video Mux and Bridge devices in cascade. */
- if (entity->function() != MEDIA_ENT_F_VID_MUX &&
- entity->function() != MEDIA_ENT_F_VID_IF_BRIDGE)
- return;
-
- /* Find the source pad for this Video Mux or Bridge device. */
- const MediaPad *sourcePad = nullptr;
- for (const MediaPad *pad : entity->pads()) {
- if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
- /*
- * We can only deal with devices that have a single source
- * pad. If this device has multiple source pads, ignore it
- * and this branch in the cascade.
- */
- if (sourcePad)
- return;
-
- sourcePad = pad;
- }
- }
-
- LOG(RPI, Debug) << "Found video mux device " << entity->name()
- << " linked to sink pad " << sinkPad->index();
-
- bridgeDevices_.emplace_back(std::make_unique<V4L2Subdevice>(entity), link);
- bridgeDevices_.back().first->open();
-
- /*
- * Iterate through all the sink pad links down the cascade to find any
- * other Video Mux and Bridge devices.
- */
- for (MediaLink *l : sourcePad->links()) {
- enumerateVideoDevices(l);
- /* Once we reach the Unicam entity, we are done. */
- if (l->sink()->entity()->name() == "unicam-image") {
- unicamFound = true;
- break;
- }
- }
-
- /* This identifies the end of our entity enumeration recursion. */
- if (link->source()->entity()->function() == MEDIA_ENT_F_CAM_SENSOR) {
- /*
- * If Unicam is not at the end of this cascade, we cannot configure
- * this topology automatically, so remove all entity references.
- */
- if (!unicamFound) {
- LOG(RPI, Warning) << "Cannot automatically configure this MC topology!";
- bridgeDevices_.clear();
- }
- }
-}
-
-void RPiCameraData::statsMetadataComplete(uint32_t bufferId, const ControlList &controls)
-{
- if (state_ == State::Stopped)
- return;
-
- FrameBuffer *buffer = isp_[Isp::Stats].getBuffers().at(bufferId);
-
- handleStreamBuffer(buffer, &isp_[Isp::Stats]);
-
- /* Add to the Request metadata buffer what the IPA has provided. */
- Request *request = requestQueue_.front();
- request->metadata().merge(controls);
-
- /*
- * Inform the sensor of the latest colour gains if it has the
- * V4L2_CID_NOTIFY_GAINS control (which means notifyGainsUnity_ is set).
- */
- if (notifyGainsUnity_ && controls.contains(libcamera::controls::ColourGains)) {
- libcamera::Span<const float> colourGains = controls.get(libcamera::controls::ColourGains);
- /* The control wants linear gains in the order B, Gb, Gr, R. */
- ControlList ctrls(sensor_->controls());
- std::array<int32_t, 4> gains{
- static_cast<int32_t>(colourGains[1] * *notifyGainsUnity_),
- *notifyGainsUnity_,
- *notifyGainsUnity_,
- static_cast<int32_t>(colourGains[0] * *notifyGainsUnity_)
- };
- ctrls.set(V4L2_CID_NOTIFY_GAINS, Span<const int32_t>{ gains });
-
- sensor_->setControls(&ctrls);
- }
-
- state_ = State::IpaComplete;
- handleState();
-}
-
-void RPiCameraData::runIsp(uint32_t bufferId)
-{
- if (state_ == State::Stopped)
- return;
-
- FrameBuffer *buffer = unicam_[Unicam::Image].getBuffers().at(bufferId);
-
- LOG(RPI, Debug) << "Input re-queue to ISP, buffer id " << bufferId
- << ", timestamp: " << buffer->metadata().timestamp;
-
- isp_[Isp::Input].queueBuffer(buffer);
- ispOutputCount_ = 0;
- handleState();
-}
-
-void RPiCameraData::embeddedComplete(uint32_t bufferId)
-{
- if (state_ == State::Stopped)
- return;
-
- FrameBuffer *buffer = unicam_[Unicam::Embedded].getBuffers().at(bufferId);
- handleStreamBuffer(buffer, &unicam_[Unicam::Embedded]);
- handleState();
-}
-
-void RPiCameraData::setIspControls(const ControlList &controls)
-{
- ControlList ctrls = controls;
-
- if (ctrls.contains(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING)) {
- ControlValue &value =
- const_cast<ControlValue &>(ctrls.get(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING));
- Span<uint8_t> s = value.data();
- bcm2835_isp_lens_shading *ls =
- reinterpret_cast<bcm2835_isp_lens_shading *>(s.data());
- ls->dmabuf = lsTable_.get();
- }
-
- isp_[Isp::Input].dev()->setControls(&ctrls);
- handleState();
-}
-
-void RPiCameraData::setDelayedControls(const ControlList &controls)
-{
- if (!delayedCtrls_->push(controls))
- LOG(RPI, Error) << "V4L2 DelayedControl set failed";
- handleState();
-}
-
-void RPiCameraData::setSensorControls(ControlList &controls)
-{
- /*
- * We need to ensure that if both VBLANK and EXPOSURE are present, the
- * former must be written ahead of, and separately from EXPOSURE to avoid
- * V4L2 rejecting the latter. This is identical to what DelayedControls
- * does with the priority write flag.
- *
- * As a consequence of the below logic, VBLANK gets set twice, and we
- * rely on the v4l2 framework to not pass the second control set to the
- * driver as the actual control value has not changed.
- */
- if (controls.contains(V4L2_CID_EXPOSURE) && controls.contains(V4L2_CID_VBLANK)) {
- ControlList vblank_ctrl;
-
- vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
- sensor_->setControls(&vblank_ctrl);
- }
-
- sensor_->setControls(&controls);
-}
-
-void RPiCameraData::unicamTimeout()
-{
- LOG(RPI, Error) << "Unicam has timed out!";
- LOG(RPI, Error) << "Please check that your camera sensor connector is attached securely.";
- LOG(RPI, Error) << "Alternatively, try another cable and/or sensor.";
-}
-
-void RPiCameraData::unicamBufferDequeue(FrameBuffer *buffer)
-{
- RPi::Stream *stream = nullptr;
- int index;
-
- if (state_ == State::Stopped)
- return;
-
- for (RPi::Stream &s : unicam_) {
- index = s.getBufferId(buffer);
- if (index != -1) {
- stream = &s;
- break;
- }
- }
-
- /* The buffer must belong to one of our streams. */
- ASSERT(stream);
-
- LOG(RPI, Debug) << "Stream " << stream->name() << " buffer dequeue"
- << ", buffer id " << index
- << ", timestamp: " << buffer->metadata().timestamp;
-
- if (stream == &unicam_[Unicam::Image]) {
- /*
- * Lookup the sensor controls used for this frame sequence from
- * DelayedControl and queue them along with the frame buffer.
- */
- ControlList ctrl = delayedCtrls_->get(buffer->metadata().sequence);
- /*
- * Add the frame timestamp to the ControlList for the IPA to use
- * as it does not receive the FrameBuffer object.
- */
- ctrl.set(controls::SensorTimestamp, buffer->metadata().timestamp);
- bayerQueue_.push({ buffer, std::move(ctrl) });
- } else {
- embeddedQueue_.push(buffer);
- }
-
- handleState();
-}
-
-void RPiCameraData::ispInputDequeue(FrameBuffer *buffer)
-{
- if (state_ == State::Stopped)
- return;
-
- LOG(RPI, Debug) << "Stream ISP Input buffer complete"
- << ", buffer id " << unicam_[Unicam::Image].getBufferId(buffer)
- << ", timestamp: " << buffer->metadata().timestamp;
-
- /* The ISP input buffer gets re-queued into Unicam. */
- handleStreamBuffer(buffer, &unicam_[Unicam::Image]);
- handleState();
-}
-
-void RPiCameraData::ispOutputDequeue(FrameBuffer *buffer)
-{
- RPi::Stream *stream = nullptr;
- int index;
-
- if (state_ == State::Stopped)
- return;
-
- for (RPi::Stream &s : isp_) {
- index = s.getBufferId(buffer);
- if (index != -1) {
- stream = &s;
- break;
- }
- }
-
- /* The buffer must belong to one of our ISP output streams. */
- ASSERT(stream);
-
- LOG(RPI, Debug) << "Stream " << stream->name() << " buffer complete"
- << ", buffer id " << index
- << ", timestamp: " << buffer->metadata().timestamp;
-
- /*
- * ISP statistics buffer must not be re-queued or sent back to the
- * application until after the IPA signals so.
- */
- if (stream == &isp_[Isp::Stats]) {
- ipa_->signalStatReady(ipa::RPi::MaskStats | static_cast<unsigned int>(index));
- } else {
- /* Any other ISP output can be handed back to the application now. */
- handleStreamBuffer(buffer, stream);
- }
-
- /*
- * Increment the number of ISP outputs generated.
- * This is needed to track dropped frames.
- */
- ispOutputCount_++;
-
- handleState();
-}
-
-void RPiCameraData::clearIncompleteRequests()
-{
- /*
- * All outstanding requests (and associated buffers) must be returned
- * back to the application.
- */
- while (!requestQueue_.empty()) {
- Request *request = requestQueue_.front();
-
- for (auto &b : request->buffers()) {
- FrameBuffer *buffer = b.second;
- /*
- * Has the buffer already been handed back to the
- * request? If not, do so now.
- */
- if (buffer->request()) {
- buffer->_d()->cancel();
- pipe()->completeBuffer(request, buffer);
- }
- }
-
- pipe()->completeRequest(request);
- requestQueue_.pop_front();
- }
-}
-
-void RPiCameraData::handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream)
-{
- /*
- * It is possible to be here without a pending request, so check
- * that we actually have one to action, otherwise we just return
- * buffer back to the stream.
- */
- Request *request = requestQueue_.empty() ? nullptr : requestQueue_.front();
- if (!dropFrameCount_ && request && request->findBuffer(stream) == buffer) {
- /*
- * Check if this is an externally provided buffer, and if
- * so, we must stop tracking it in the pipeline handler.
- */
- handleExternalBuffer(buffer, stream);
- /*
- * Tag the buffer as completed, returning it to the
- * application.
- */
- pipe()->completeBuffer(request, buffer);
- } else {
- /*
- * This buffer was not part of the Request (which happens if an
- * internal buffer was used for an external stream, or
- * unconditionally for internal streams), or there is no pending
- * request, so we can recycle it.
- */
- stream->returnBuffer(buffer);
- }
-}
-
-void RPiCameraData::handleExternalBuffer(FrameBuffer *buffer, RPi::Stream *stream)
-{
- unsigned int id = stream->getBufferId(buffer);
-
- if (!(id & ipa::RPi::MaskExternalBuffer))
- return;
-
- /* Stop the Stream object from tracking the buffer. */
- stream->removeExternalBuffer(buffer);
-}
-
-void RPiCameraData::handleState()
-{
- switch (state_) {
- case State::Stopped:
- case State::Busy:
- break;
-
- case State::IpaComplete:
- /* If the request is completed, we will switch to Idle state. */
- checkRequestCompleted();
- /*
- * No break here, we want to try running the pipeline again.
- * The fallthrough clause below suppresses compiler warnings.
- */
- [[fallthrough]];
-
- case State::Idle:
- tryRunPipeline();
- break;
- }
-}
-
-void RPiCameraData::checkRequestCompleted()
-{
- bool requestCompleted = false;
- /*
- * If we are dropping this frame, do not touch the request, simply
- * change the state to IDLE when ready.
- */
- if (!dropFrameCount_) {
- Request *request = requestQueue_.front();
- if (request->hasPendingBuffers())
- return;
-
- /* Must wait for metadata to be filled in before completing. */
- if (state_ != State::IpaComplete)
- return;
-
- pipe()->completeRequest(request);
- requestQueue_.pop_front();
- requestCompleted = true;
- }
-
- /*
- * Make sure we have three outputs completed in the case of a dropped
- * frame.
- */
- if (state_ == State::IpaComplete &&
- ((ispOutputCount_ == 3 && dropFrameCount_) || requestCompleted)) {
- state_ = State::Idle;
- if (dropFrameCount_) {
- dropFrameCount_--;
- LOG(RPI, Debug) << "Dropping frame at the request of the IPA ("
- << dropFrameCount_ << " left)";
- }
- }
-}
-
-Rectangle RPiCameraData::scaleIspCrop(const Rectangle &ispCrop) const
-{
- /*
- * Scale a crop rectangle defined in the ISP's coordinates into native sensor
- * coordinates.
- */
- Rectangle nativeCrop = ispCrop.scaledBy(sensorInfo_.analogCrop.size(),
- sensorInfo_.outputSize);
- nativeCrop.translateBy(sensorInfo_.analogCrop.topLeft());
- return nativeCrop;
-}
-
-void RPiCameraData::applyScalerCrop(const ControlList &controls)
-{
- if (controls.contains(controls::ScalerCrop)) {
- Rectangle nativeCrop = controls.get<Rectangle>(controls::ScalerCrop);
-
- if (!nativeCrop.width || !nativeCrop.height)
- nativeCrop = { 0, 0, 1, 1 };
-
- /* Create a version of the crop scaled to ISP (camera mode) pixels. */
- Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo_.analogCrop.topLeft());
- ispCrop.scaleBy(sensorInfo_.outputSize, sensorInfo_.analogCrop.size());
-
- /*
- * The crop that we set must be:
- * 1. At least as big as ispMinCropSize_, once that's been
- * enlarged to the same aspect ratio.
- * 2. With the same mid-point, if possible.
- * 3. But it can't go outside the sensor area.
- */
- Size minSize = ispMinCropSize_.expandedToAspectRatio(nativeCrop.size());
- Size size = ispCrop.size().expandedTo(minSize);
- ispCrop = size.centeredTo(ispCrop.center()).enclosedIn(Rectangle(sensorInfo_.outputSize));
-
- if (ispCrop != ispCrop_) {
- isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &ispCrop);
- ispCrop_ = ispCrop;
-
- /*
- * Also update the ScalerCrop in the metadata with what we actually
- * used. But we must first rescale that from ISP (camera mode) pixels
- * back into sensor native pixels.
- */
- scalerCrop_ = scaleIspCrop(ispCrop_);
- }
- }
-}
-
-void RPiCameraData::fillRequestMetadata(const ControlList &bufferControls,
- Request *request)
-{
- request->metadata().set(controls::SensorTimestamp,
- bufferControls.get(controls::SensorTimestamp));
-
- request->metadata().set(controls::ScalerCrop, scalerCrop_);
-}
-
-void RPiCameraData::tryRunPipeline()
-{
- FrameBuffer *embeddedBuffer;
- BayerFrame bayerFrame;
-
- /* If any of our request or buffer queues are empty, we cannot proceed. */
- if (state_ != State::Idle || requestQueue_.empty() ||
- bayerQueue_.empty() || (embeddedQueue_.empty() && sensorMetadata_))
- return;
-
- if (!findMatchingBuffers(bayerFrame, embeddedBuffer))
- return;
-
- /* Take the first request from the queue and action the IPA. */
- Request *request = requestQueue_.front();
-
- /* See if a new ScalerCrop value needs to be applied. */
- applyScalerCrop(request->controls());
-
- /*
- * Clear the request metadata and fill it with some initial non-IPA
- * related controls. We clear it first because the request metadata
- * may have been populated if we have dropped the previous frame.
- */
- request->metadata().clear();
- fillRequestMetadata(bayerFrame.controls, request);
-
- /*
- * Process all the user controls by the IPA. Once this is complete, we
- * queue the ISP output buffer listed in the request to start the HW
- * pipeline.
- */
- ipa_->signalQueueRequest(request->controls());
-
- /* Set our state to say the pipeline is active. */
- state_ = State::Busy;
-
- unsigned int bayerId = unicam_[Unicam::Image].getBufferId(bayerFrame.buffer);
-
- LOG(RPI, Debug) << "Signalling signalIspPrepare:"
- << " Bayer buffer id: " << bayerId;
-
- ipa::RPi::ISPConfig ispPrepare;
- ispPrepare.bayerBufferId = ipa::RPi::MaskBayerData | bayerId;
- ispPrepare.controls = std::move(bayerFrame.controls);
-
- if (embeddedBuffer) {
- unsigned int embeddedId = unicam_[Unicam::Embedded].getBufferId(embeddedBuffer);
-
- ispPrepare.embeddedBufferId = ipa::RPi::MaskEmbeddedData | embeddedId;
- ispPrepare.embeddedBufferPresent = true;
-
- LOG(RPI, Debug) << "Signalling signalIspPrepare:"
- << " Embedded buffer id: " << embeddedId;
- }
-
- ipa_->signalIspPrepare(ispPrepare);
-}
-
-bool RPiCameraData::findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer)
-{
- if (bayerQueue_.empty())
- return false;
-
- /* Start with the front of the bayer queue. */
- bayerFrame = std::move(bayerQueue_.front());
- bayerQueue_.pop();
-
- /*
- * Find the embedded data buffer with a matching timestamp to pass to
- * the IPA. Any embedded buffers with a timestamp lower than the
- * current bayer buffer will be removed and re-queued to the driver.
- */
- uint64_t ts = bayerFrame.buffer->metadata().timestamp;
- embeddedBuffer = nullptr;
- while (!embeddedQueue_.empty()) {
- FrameBuffer *b = embeddedQueue_.front();
- if (b->metadata().timestamp < ts) {
- embeddedQueue_.pop();
- unicam_[Unicam::Embedded].returnBuffer(b);
- LOG(RPI, Debug) << "Dropping unmatched input frame in stream "
- << unicam_[Unicam::Embedded].name();
- } else if (b->metadata().timestamp == ts) {
- /* Found a match! */
- embeddedBuffer = b;
- embeddedQueue_.pop();
- break;
- } else {
- break; /* Only higher timestamps from here. */
- }
- }
-
- if (!embeddedBuffer && sensorMetadata_) {
- /* Log if there is no matching embedded data buffer found. */
- LOG(RPI, Debug) << "Returning bayer frame without a matching embedded buffer.";
- }
-
- return true;
-}
-
-REGISTER_PIPELINE_HANDLER(PipelineHandlerRPi)
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/rpi_stream.h b/src/libcamera/pipeline/raspberrypi/rpi_stream.h
deleted file mode 100644
index fe011100..00000000
--- a/src/libcamera/pipeline/raspberrypi/rpi_stream.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
- *
- * rpi_stream.h - Raspberry Pi device stream abstraction class.
- */
-
-#pragma once
-
-#include <queue>
-#include <string>
-#include <unordered_map>
-#include <vector>
-
-#include <libcamera/ipa/raspberrypi_ipa_interface.h>
-#include <libcamera/stream.h>
-
-#include "libcamera/internal/v4l2_videodevice.h"
-
-namespace libcamera {
-
-namespace RPi {
-
-using BufferMap = std::unordered_map<unsigned int, FrameBuffer *>;
-
-/*
- * Device stream abstraction for either an internal or external stream.
- * Used for both Unicam and the ISP.
- */
-class Stream : public libcamera::Stream
-{
-public:
- Stream()
- : id_(ipa::RPi::MaskID)
- {
- }
-
- Stream(const char *name, MediaEntity *dev, bool importOnly = false)
- : external_(false), importOnly_(importOnly), name_(name),
- dev_(std::make_unique<V4L2VideoDevice>(dev)), id_(ipa::RPi::MaskID)
- {
- }
-
- V4L2VideoDevice *dev() const;
- std::string name() const;
- bool isImporter() const;
- void resetBuffers();
-
- void setExternal(bool external);
- bool isExternal() const;
-
- void setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers);
- const BufferMap &getBuffers() const;
- int getBufferId(FrameBuffer *buffer) const;
-
- void setExternalBuffer(FrameBuffer *buffer);
- void removeExternalBuffer(FrameBuffer *buffer);
-
- int prepareBuffers(unsigned int count);
- int queueBuffer(FrameBuffer *buffer);
- void returnBuffer(FrameBuffer *buffer);
-
- int queueAllBuffers();
- void releaseBuffers();
-
-private:
- class IdGenerator
- {
- public:
- IdGenerator(int max)
- : max_(max), id_(0)
- {
- }
-
- int get()
- {
- int id;
- if (!recycle_.empty()) {
- id = recycle_.front();
- recycle_.pop();
- } else {
- id = id_++;
- ASSERT(id_ <= max_);
- }
- return id;
- }
-
- void release(int id)
- {
- recycle_.push(id);
- }
-
- void reset()
- {
- id_ = 0;
- recycle_ = {};
- }
-
- private:
- int max_;
- int id_;
- std::queue<int> recycle_;
- };
-
- void clearBuffers();
- int queueToDevice(FrameBuffer *buffer);
-
- /*
- * Indicates that this stream is active externally, i.e. the buffers
- * might be provided by (and returned to) the application.
- */
- bool external_;
-
- /* Indicates that this stream only imports buffers, e.g. ISP input. */
- bool importOnly_;
-
- /* Stream name identifier. */
- std::string name_;
-
- /* The actual device stream. */
- std::unique_ptr<V4L2VideoDevice> dev_;
-
- /* Tracks a unique id key for the bufferMap_ */
- IdGenerator id_;
-
- /* All frame buffers associated with this device stream. */
- BufferMap bufferMap_;
-
- /*
- * List of frame buffers that we can use if none have been provided by
- * the application for external streams. This is populated by the
- * buffers exported internally.
- */
- std::queue<FrameBuffer *> availableBuffers_;
-
- /*
- * List of frame buffers that are to be queued into the device from a Request.
- * A nullptr indicates any internal buffer can be used (from availableBuffers_),
- * whereas a valid pointer indicates an external buffer to be queued.
- *
- * Ordering buffers to be queued is important here as it must match the
- * requests coming from the application.
- */
- std::queue<FrameBuffer *> requestBuffers_;
-
- /*
- * This is a list of buffers exported internally. Need to keep this around
- * as the stream needs to maintain ownership of these buffers.
- */
- std::vector<std::unique_ptr<FrameBuffer>> internalBuffers_;
-};
-
-/*
- * The following class is just a convenient (and typesafe) array of device
- * streams indexed with an enum class.
- */
-template<typename E, std::size_t N>
-class Device : public std::array<class Stream, N>
-{
-private:
- constexpr auto index(E e) const noexcept
- {
- return static_cast<std::underlying_type_t<E>>(e);
- }
-public:
- Stream &operator[](E e)
- {
- return std::array<class Stream, N>::operator[](index(e));
- }
- const Stream &operator[](E e) const
- {
- return std::array<class Stream, N>::operator[](index(e));
- }
-};
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
index 3dc0850c..4cbf105d 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * rkisp1.cpp - Pipeline handler for Rockchip ISP1
+ * Pipeline handler for Rockchip ISP1
*/
#include <algorithm>
@@ -13,24 +13,29 @@
#include <queue>
#include <linux/media-bus-format.h>
+#include <linux/rkisp1-config.h>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
+#include <libcamera/color_space.h>
#include <libcamera/control_ids.h>
#include <libcamera/formats.h>
#include <libcamera/framebuffer.h>
+#include <libcamera/request.h>
+#include <libcamera/stream.h>
+#include <libcamera/transform.h>
+
#include <libcamera/ipa/core_ipa_interface.h>
#include <libcamera/ipa/rkisp1_ipa_interface.h>
#include <libcamera/ipa/rkisp1_ipa_proxy.h>
-#include <libcamera/request.h>
-#include <libcamera/stream.h>
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/delayed_controls.h"
#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
@@ -64,7 +69,8 @@ class RkISP1Frames
public:
RkISP1Frames(PipelineHandler *pipe);
- RkISP1FrameInfo *create(const RkISP1CameraData *data, Request *request);
+ RkISP1FrameInfo *create(const RkISP1CameraData *data, Request *request,
+ bool isRaw);
int destroy(unsigned int frame);
void clear();
@@ -119,6 +125,7 @@ public:
Status validate() override;
const V4L2SubdeviceFormat &sensorFormat() { return sensorFormat_; }
+ const Transform &combinedTransform() { return combinedTransform_; }
private:
bool fitsAllPaths(const StreamConfiguration &cfg);
@@ -132,6 +139,7 @@ private:
const RkISP1CameraData *data_;
V4L2SubdeviceFormat sensorFormat_;
+ Transform combinedTransform_;
};
class PipelineHandlerRkISP1 : public PipelineHandler
@@ -139,8 +147,8 @@ class PipelineHandlerRkISP1 : public PipelineHandler
public:
PipelineHandlerRkISP1(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -154,6 +162,8 @@ public:
bool match(DeviceEnumerator *enumerator) override;
private:
+ static constexpr Size kRkISP1PreviewSize = { 1920, 1080 };
+
RkISP1CameraData *cameraData(Camera *camera)
{
return static_cast<RkISP1CameraData *>(camera->_d());
@@ -165,7 +175,7 @@ private:
int initLinks(Camera *camera, const CameraSensor *sensor,
const RkISP1CameraConfiguration &config);
int createCamera(MediaEntity *sensor);
- void tryCompleteRequest(Request *request);
+ void tryCompleteRequest(RkISP1FrameInfo *info);
void bufferReady(FrameBuffer *buffer);
void paramReady(FrameBuffer *buffer);
void statReady(FrameBuffer *buffer);
@@ -178,6 +188,10 @@ private:
std::unique_ptr<V4L2Subdevice> isp_;
std::unique_ptr<V4L2VideoDevice> param_;
std::unique_ptr<V4L2VideoDevice> stat_;
+ std::unique_ptr<V4L2Subdevice> csi_;
+
+ bool hasSelfPath_;
+ bool isRaw_;
RkISP1MainPath mainPath_;
RkISP1SelfPath selfPath_;
@@ -188,6 +202,8 @@ private:
std::queue<FrameBuffer *> availableStatBuffers_;
Camera *activeCamera_;
+
+ const MediaPad *ispSink_;
};
RkISP1Frames::RkISP1Frames(PipelineHandler *pipe)
@@ -195,28 +211,35 @@ RkISP1Frames::RkISP1Frames(PipelineHandler *pipe)
{
}
-RkISP1FrameInfo *RkISP1Frames::create(const RkISP1CameraData *data, Request *request)
+RkISP1FrameInfo *RkISP1Frames::create(const RkISP1CameraData *data, Request *request,
+ bool isRaw)
{
unsigned int frame = data->frame_;
- if (pipe_->availableParamBuffers_.empty()) {
- LOG(RkISP1, Error) << "Parameters buffer underrun";
- return nullptr;
- }
- FrameBuffer *paramBuffer = pipe_->availableParamBuffers_.front();
+ FrameBuffer *paramBuffer = nullptr;
+ FrameBuffer *statBuffer = nullptr;
- if (pipe_->availableStatBuffers_.empty()) {
- LOG(RkISP1, Error) << "Statisitc buffer underrun";
- return nullptr;
+ if (!isRaw) {
+ if (pipe_->availableParamBuffers_.empty()) {
+ LOG(RkISP1, Error) << "Parameters buffer underrun";
+ return nullptr;
+ }
+
+ if (pipe_->availableStatBuffers_.empty()) {
+ LOG(RkISP1, Error) << "Statistic buffer underrun";
+ return nullptr;
+ }
+
+ paramBuffer = pipe_->availableParamBuffers_.front();
+ pipe_->availableParamBuffers_.pop();
+
+ statBuffer = pipe_->availableStatBuffers_.front();
+ pipe_->availableStatBuffers_.pop();
}
- FrameBuffer *statBuffer = pipe_->availableStatBuffers_.front();
FrameBuffer *mainPathBuffer = request->findBuffer(&data->mainPathStream_);
FrameBuffer *selfPathBuffer = request->findBuffer(&data->selfPathStream_);
- pipe_->availableParamBuffers_.pop();
- pipe_->availableStatBuffers_.pop();
-
RkISP1FrameInfo *info = new RkISP1FrameInfo;
info->frame = frame;
@@ -323,7 +346,7 @@ int RkISP1CameraData::loadIPA(unsigned int hwRevision)
/*
* The API tuning file is made from the sensor name unless the
- * environment variable overrides it. If
+ * environment variable overrides it.
*/
std::string ipaTuningFile;
char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RKISP1_TUNING_FILE");
@@ -339,7 +362,15 @@ int RkISP1CameraData::loadIPA(unsigned int hwRevision)
ipaTuningFile = std::string(configFromEnv);
}
- int ret = ipa_->init({ ipaTuningFile, sensor_->model() }, hwRevision);
+ IPACameraSensorInfo sensorInfo{};
+ int ret = sensor_->sensorInfo(&sensorInfo);
+ if (ret) {
+ LOG(RkISP1, Error) << "Camera sensor information not available";
+ return ret;
+ }
+
+ ret = ipa_->init({ ipaTuningFile, sensor_->model() }, hwRevision,
+ sensorInfo, sensor_->controls(), &controlInfo_);
if (ret < 0) {
LOG(RkISP1, Error) << "IPA initialization failure";
return ret;
@@ -355,13 +386,15 @@ void RkISP1CameraData::paramFilled(unsigned int frame)
if (!info)
return;
+ info->paramBuffer->_d()->metadata().planes()[0].bytesused =
+ sizeof(struct rkisp1_params_cfg);
pipe->param_->queueBuffer(info->paramBuffer);
pipe->stat_->queueBuffer(info->statBuffer);
if (info->mainPathBuffer)
mainPath_->queueBuffer(info->mainPathBuffer);
- if (info->selfPathBuffer)
+ if (selfPath_ && info->selfPathBuffer)
selfPath_->queueBuffer(info->selfPathBuffer);
}
@@ -380,9 +413,33 @@ void RkISP1CameraData::metadataReady(unsigned int frame, const ControlList &meta
info->request->metadata().merge(metadata);
info->metadataProcessed = true;
- pipe()->tryCompleteRequest(info->request);
+ pipe()->tryCompleteRequest(info);
}
+/* -----------------------------------------------------------------------------
+ * Camera Configuration
+ */
+
+namespace {
+
+/* Keep in sync with the supported raw formats in rkisp1_path.cpp. */
+const std::map<PixelFormat, uint32_t> rawFormats = {
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+} /* namespace */
+
RkISP1CameraConfiguration::RkISP1CameraConfiguration(Camera *camera,
RkISP1CameraData *data)
: CameraConfiguration()
@@ -393,14 +450,15 @@ RkISP1CameraConfiguration::RkISP1CameraConfiguration(Camera *camera,
bool RkISP1CameraConfiguration::fitsAllPaths(const StreamConfiguration &cfg)
{
+ const CameraSensor *sensor = data_->sensor_.get();
StreamConfiguration config;
config = cfg;
- if (data_->mainPath_->validate(&config) != Valid)
+ if (data_->mainPath_->validate(sensor, &config) != Valid)
return false;
config = cfg;
- if (data_->selfPath_->validate(&config) != Valid)
+ if (data_->selfPath_ && data_->selfPath_->validate(sensor, &config) != Valid)
return false;
return true;
@@ -409,20 +467,38 @@ bool RkISP1CameraConfiguration::fitsAllPaths(const StreamConfiguration &cfg)
CameraConfiguration::Status RkISP1CameraConfiguration::validate()
{
const CameraSensor *sensor = data_->sensor_.get();
- Status status = Valid;
+ unsigned int pathCount = data_->selfPath_ ? 2 : 1;
+ Status status;
if (config_.empty())
return Invalid;
- if (transform != Transform::Identity) {
- transform = Transform::Identity;
+ status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
+
+ /* Cap the number of entries to the available streams. */
+ if (config_.size() > pathCount) {
+ config_.resize(pathCount);
status = Adjusted;
}
- /* Cap the number of entries to the available streams. */
- if (config_.size() > 2) {
- config_.resize(2);
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->sensor_->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
status = Adjusted;
+
+ /*
+ * Simultaneous capture of raw and processed streams isn't possible. If
+ * there is any raw stream, cap the number of streams to one.
+ */
+ if (config_.size() > 1) {
+ for (const auto &cfg : config_) {
+ if (PixelFormatInfo::info(cfg.pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW) {
+ config_.resize(1);
+ status = Adjusted;
+ break;
+ }
+ }
}
/*
@@ -438,14 +514,14 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
std::reverse(order.begin(), order.end());
bool mainPathAvailable = true;
- bool selfPathAvailable = true;
+ bool selfPathAvailable = data_->selfPath_;
for (unsigned int index : order) {
StreamConfiguration &cfg = config_[index];
/* Try to match stream without adjusting configuration. */
if (mainPathAvailable) {
StreamConfiguration tryCfg = cfg;
- if (data_->mainPath_->validate(&tryCfg) == Valid) {
+ if (data_->mainPath_->validate(sensor, &tryCfg) == Valid) {
mainPathAvailable = false;
cfg = tryCfg;
cfg.setStream(const_cast<Stream *>(&data_->mainPathStream_));
@@ -455,7 +531,7 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
if (selfPathAvailable) {
StreamConfiguration tryCfg = cfg;
- if (data_->selfPath_->validate(&tryCfg) == Valid) {
+ if (data_->selfPath_->validate(sensor, &tryCfg) == Valid) {
selfPathAvailable = false;
cfg = tryCfg;
cfg.setStream(const_cast<Stream *>(&data_->selfPathStream_));
@@ -466,7 +542,7 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
/* Try to match stream allowing adjusting configuration. */
if (mainPathAvailable) {
StreamConfiguration tryCfg = cfg;
- if (data_->mainPath_->validate(&tryCfg) == Adjusted) {
+ if (data_->mainPath_->validate(sensor, &tryCfg) == Adjusted) {
mainPathAvailable = false;
cfg = tryCfg;
cfg.setStream(const_cast<Stream *>(&data_->mainPathStream_));
@@ -477,7 +553,7 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
if (selfPathAvailable) {
StreamConfiguration tryCfg = cfg;
- if (data_->selfPath_->validate(&tryCfg) == Adjusted) {
+ if (data_->selfPath_->validate(sensor, &tryCfg) == Adjusted) {
selfPathAvailable = false;
cfg = tryCfg;
cfg.setStream(const_cast<Stream *>(&data_->selfPathStream_));
@@ -486,86 +562,147 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
}
}
- /* All paths rejected configuraiton. */
+ /* All paths rejected configuration. */
LOG(RkISP1, Debug) << "Camera configuration not supported "
<< cfg.toString();
return Invalid;
}
/* Select the sensor format. */
+ PixelFormat rawFormat;
Size maxSize;
- for (const StreamConfiguration &cfg : config_)
+
+ for (const StreamConfiguration &cfg : config_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ rawFormat = cfg.pixelFormat;
+
maxSize = std::max(maxSize, cfg.size);
+ }
+
+ std::vector<unsigned int> mbusCodes;
+
+ if (rawFormat.isValid()) {
+ mbusCodes = { rawFormats.at(rawFormat) };
+ } else {
+ std::transform(rawFormats.begin(), rawFormats.end(),
+ std::back_inserter(mbusCodes),
+ [](const auto &value) { return value.second; });
+ }
+
+ sensorFormat_ = sensor->getFormat(mbusCodes, maxSize);
- sensorFormat_ = sensor->getFormat({ MEDIA_BUS_FMT_SBGGR12_1X12,
- MEDIA_BUS_FMT_SGBRG12_1X12,
- MEDIA_BUS_FMT_SGRBG12_1X12,
- MEDIA_BUS_FMT_SRGGB12_1X12,
- MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10,
- MEDIA_BUS_FMT_SBGGR8_1X8,
- MEDIA_BUS_FMT_SGBRG8_1X8,
- MEDIA_BUS_FMT_SGRBG8_1X8,
- MEDIA_BUS_FMT_SRGGB8_1X8 },
- maxSize);
if (sensorFormat_.size.isNull())
sensorFormat_.size = sensor->resolution();
return status;
}
-PipelineHandlerRkISP1::PipelineHandlerRkISP1(CameraManager *manager)
- : PipelineHandler(manager)
-{
-}
-
/* -----------------------------------------------------------------------------
* Pipeline Operations
*/
-CameraConfiguration *PipelineHandlerRkISP1::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+PipelineHandlerRkISP1::PipelineHandlerRkISP1(CameraManager *manager)
+ : PipelineHandler(manager), hasSelfPath_(true)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerRkISP1::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
RkISP1CameraData *data = cameraData(camera);
- CameraConfiguration *config = new RkISP1CameraConfiguration(camera, data);
+
+ unsigned int pathCount = data->selfPath_ ? 2 : 1;
+ if (roles.size() > pathCount) {
+ LOG(RkISP1, Error) << "Too many stream roles requested";
+ return nullptr;
+ }
+
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<RkISP1CameraConfiguration>(camera, data);
if (roles.empty())
return config;
+ /*
+ * As the ISP can't output different color spaces for the main and self
+ * path, pick a sensible default color space based on the role of the
+ * first stream and use it for all streams.
+ */
+ std::optional<ColorSpace> colorSpace;
bool mainPathAvailable = true;
- bool selfPathAvailable = true;
+
for (const StreamRole role : roles) {
- bool useMainPath;
+ Size size;
switch (role) {
- case StreamRole::StillCapture: {
- useMainPath = mainPathAvailable;
+ case StreamRole::StillCapture:
+ /* JPEG encoders typically expect sYCC. */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Sycc;
+
+ size = data->sensor_->resolution();
break;
- }
+
case StreamRole::Viewfinder:
- case StreamRole::VideoRecording: {
- useMainPath = !selfPathAvailable;
+ /*
+ * sYCC is the YCbCr encoding of sRGB, which is commonly
+ * used by displays.
+ */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Sycc;
+
+ size = kRkISP1PreviewSize;
break;
- }
+
+ case StreamRole::VideoRecording:
+ /* Rec. 709 is a good default for HD video recording. */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Rec709;
+
+ size = kRkISP1PreviewSize;
+ break;
+
+ case StreamRole::Raw:
+ if (roles.size() > 1) {
+ LOG(RkISP1, Error)
+ << "Can't capture both raw and processed streams";
+ return nullptr;
+ }
+
+ colorSpace = ColorSpace::Raw;
+ size = data->sensor_->resolution();
+ break;
+
default:
LOG(RkISP1, Warning)
<< "Requested stream role not supported: " << role;
- delete config;
return nullptr;
}
- StreamConfiguration cfg;
- if (useMainPath) {
- cfg = data->mainPath_->generateConfiguration(
- data->sensor_->resolution());
+ /*
+ * Prefer the main path if available, as it supports higher
+ * resolutions.
+ *
+ * \todo Using the main path unconditionally hides support for
+ * RGB (only available on the self path) in the streams formats
+ * exposed to applications. This likely calls for a better API
+ * to expose streams capabilities.
+ */
+ RkISP1Path *path;
+ if (mainPathAvailable) {
+ path = data->mainPath_;
mainPathAvailable = false;
} else {
- cfg = data->selfPath_->generateConfiguration(
- data->sensor_->resolution());
- selfPathAvailable = false;
+ path = data->selfPath_;
}
+ StreamConfiguration cfg =
+ path->generateConfiguration(data->sensor_.get(), size, role);
+ if (!cfg.pixelFormat.isValid())
+ return nullptr;
+
+ cfg.colorSpace = colorSpace;
config->addConfiguration(cfg);
}
@@ -593,12 +730,18 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
V4L2SubdeviceFormat format = config->sensorFormat();
LOG(RkISP1, Debug) << "Configuring sensor with " << format;
- ret = sensor->setFormat(&format);
+ ret = sensor->setFormat(&format, config->combinedTransform());
if (ret < 0)
return ret;
LOG(RkISP1, Debug) << "Sensor configured with " << format;
+ if (csi_) {
+ ret = csi_->setFormat(0, &format);
+ if (ret < 0)
+ return ret;
+ }
+
ret = isp_->setFormat(0, &format);
if (ret < 0)
return ret;
@@ -612,8 +755,14 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
<< "ISP input pad configured with " << format
<< " crop " << rect;
+ const PixelFormat &streamFormat = config->at(0).pixelFormat;
+ const PixelFormatInfo &info = PixelFormatInfo::info(streamFormat);
+ isRaw_ = info.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
+
/* YUYV8_2X8 is required on the ISP source path pad for YUV output. */
- format.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
+ if (!isRaw_)
+ format.code = MEDIA_BUS_FMT_YUYV8_2X8;
+
LOG(RkISP1, Debug)
<< "Configuring ISP output pad with " << format
<< " crop " << rect;
@@ -622,6 +771,7 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
if (ret < 0)
return ret;
+ format.colorSpace = config->at(0).colorSpace;
ret = isp_->setFormat(2, &format);
if (ret < 0)
return ret;
@@ -637,10 +787,12 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
ret = mainPath_.configure(cfg, format);
streamConfig[0] = IPAStream(cfg.pixelFormat,
cfg.size);
- } else {
+ } else if (hasSelfPath_) {
ret = selfPath_.configure(cfg, format);
streamConfig[1] = IPAStream(cfg.pixelFormat,
cfg.size);
+ } else {
+ return -ENODEV;
}
if (ret)
@@ -660,19 +812,15 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
return ret;
/* Inform IPA of stream configuration and sensor controls. */
- IPACameraSensorInfo sensorInfo = {};
- ret = data->sensor_->sensorInfo(&sensorInfo);
- if (ret) {
- /* \todo Turn this into a hard failure. */
- LOG(RkISP1, Warning) << "Camera sensor information not available";
- sensorInfo = {};
- ret = 0;
- }
+ ipa::rkisp1::IPAConfigInfo ipaConfig{};
+
+ ret = data->sensor_->sensorInfo(&ipaConfig.sensorInfo);
+ if (ret)
+ return ret;
- std::map<uint32_t, ControlInfoMap> entityControls;
- entityControls.emplace(0, data->sensor_->controls());
+ ipaConfig.sensorControls = data->sensor_->controls();
- ret = data->ipa_->configure(sensorInfo, streamConfig, entityControls);
+ ret = data->ipa_->configure(ipaConfig, streamConfig, &data->controlInfo_);
if (ret) {
LOG(RkISP1, Error) << "failed configuring IPA (" << ret << ")";
return ret;
@@ -688,7 +836,7 @@ int PipelineHandlerRkISP1::exportFrameBuffers([[maybe_unused]] Camera *camera, S
if (stream == &data->mainPathStream_)
return mainPath_.exportBuffers(count, buffers);
- else if (stream == &data->selfPathStream_)
+ else if (hasSelfPath_ && stream == &data->selfPathStream_)
return selfPath_.exportBuffers(count, buffers);
return -EINVAL;
@@ -705,13 +853,15 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
data->selfPathStream_.configuration().bufferCount,
});
- ret = param_->allocateBuffers(maxCount, &paramBuffers_);
- if (ret < 0)
- goto error;
+ if (!isRaw_) {
+ ret = param_->allocateBuffers(maxCount, &paramBuffers_);
+ if (ret < 0)
+ goto error;
- ret = stat_->allocateBuffers(maxCount, &statBuffers_);
- if (ret < 0)
- goto error;
+ ret = stat_->allocateBuffers(maxCount, &statBuffers_);
+ if (ret < 0)
+ goto error;
+ }
for (std::unique_ptr<FrameBuffer> &buffer : paramBuffers_) {
buffer->setCookie(ipaBufferId++);
@@ -787,23 +937,25 @@ int PipelineHandlerRkISP1::start(Camera *camera, [[maybe_unused]] const ControlL
data->frame_ = 0;
- ret = param_->streamOn();
- if (ret) {
- data->ipa_->stop();
- freeBuffers(camera);
- LOG(RkISP1, Error)
- << "Failed to start parameters " << camera->id();
- return ret;
- }
+ if (!isRaw_) {
+ ret = param_->streamOn();
+ if (ret) {
+ data->ipa_->stop();
+ freeBuffers(camera);
+ LOG(RkISP1, Error)
+ << "Failed to start parameters " << camera->id();
+ return ret;
+ }
- ret = stat_->streamOn();
- if (ret) {
- param_->streamOff();
- data->ipa_->stop();
- freeBuffers(camera);
- LOG(RkISP1, Error)
- << "Failed to start statistics " << camera->id();
- return ret;
+ ret = stat_->streamOn();
+ if (ret) {
+ param_->streamOff();
+ data->ipa_->stop();
+ freeBuffers(camera);
+ LOG(RkISP1, Error)
+ << "Failed to start statistics " << camera->id();
+ return ret;
+ }
}
if (data->mainPath_->isEnabled()) {
@@ -817,7 +969,7 @@ int PipelineHandlerRkISP1::start(Camera *camera, [[maybe_unused]] const ControlL
}
}
- if (data->selfPath_->isEnabled()) {
+ if (hasSelfPath_ && data->selfPath_->isEnabled()) {
ret = selfPath_.start();
if (ret) {
mainPath_.stop();
@@ -844,18 +996,21 @@ void PipelineHandlerRkISP1::stopDevice(Camera *camera)
data->ipa_->stop();
- selfPath_.stop();
+ if (hasSelfPath_)
+ selfPath_.stop();
mainPath_.stop();
- ret = stat_->streamOff();
- if (ret)
- LOG(RkISP1, Warning)
- << "Failed to stop statistics for " << camera->id();
+ if (!isRaw_) {
+ ret = stat_->streamOff();
+ if (ret)
+ LOG(RkISP1, Warning)
+ << "Failed to stop statistics for " << camera->id();
- ret = param_->streamOff();
- if (ret)
- LOG(RkISP1, Warning)
- << "Failed to stop parameters for " << camera->id();
+ ret = param_->streamOff();
+ if (ret)
+ LOG(RkISP1, Warning)
+ << "Failed to stop parameters for " << camera->id();
+ }
ASSERT(data->queuedRequests_.empty());
data->frameInfo_.clear();
@@ -869,12 +1024,21 @@ int PipelineHandlerRkISP1::queueRequestDevice(Camera *camera, Request *request)
{
RkISP1CameraData *data = cameraData(camera);
- RkISP1FrameInfo *info = data->frameInfo_.create(data, request);
+ RkISP1FrameInfo *info = data->frameInfo_.create(data, request, isRaw_);
if (!info)
return -ENOENT;
data->ipa_->queueRequest(data->frame_, request->controls());
- data->ipa_->fillParamsBuffer(data->frame_, info->paramBuffer->cookie());
+ if (isRaw_) {
+ if (info->mainPathBuffer)
+ data->mainPath_->queueBuffer(info->mainPathBuffer);
+
+ if (data->selfPath_ && info->selfPathBuffer)
+ data->selfPath_->queueBuffer(info->selfPathBuffer);
+ } else {
+ data->ipa_->fillParamsBuffer(data->frame_,
+ info->paramBuffer->cookie());
+ }
data->frame_++;
@@ -900,8 +1064,7 @@ int PipelineHandlerRkISP1::initLinks(Camera *camera,
* Configure the sensor links: enable the link corresponding to this
* camera.
*/
- const MediaPad *pad = isp_->entity()->getPadByIndex(0);
- for (MediaLink *link : pad->links()) {
+ for (MediaLink *link : ispSink_->links()) {
if (link->source()->entity() != sensor->entity())
continue;
@@ -915,10 +1078,18 @@ int PipelineHandlerRkISP1::initLinks(Camera *camera,
return ret;
}
+ if (csi_) {
+ MediaLink *link = isp_->entity()->getPadByIndex(0)->links().at(0);
+
+ ret = link->setEnabled(true);
+ if (ret < 0)
+ return ret;
+ }
+
for (const StreamConfiguration &cfg : config) {
if (cfg.stream() == &data->mainPathStream_)
ret = data->mainPath_->setEnabled(true);
- else if (cfg.stream() == &data->selfPathStream_)
+ else if (hasSelfPath_ && cfg.stream() == &data->selfPathStream_)
ret = data->selfPath_->setEnabled(true);
else
return -EINVAL;
@@ -935,15 +1106,8 @@ int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor)
int ret;
std::unique_ptr<RkISP1CameraData> data =
- std::make_unique<RkISP1CameraData>(this, &mainPath_, &selfPath_);
-
- ControlInfoMap::Map ctrls;
- ctrls.emplace(std::piecewise_construct,
- std::forward_as_tuple(&controls::AeEnable),
- std::forward_as_tuple(false, true));
-
- data->controlInfo_ = ControlInfoMap(std::move(ctrls),
- controls::controls);
+ std::make_unique<RkISP1CameraData>(this, &mainPath_,
+ hasSelfPath_ ? &selfPath_ : nullptr);
data->sensor_ = std::make_unique<CameraSensor>(sensor);
ret = data->sensor_->init();
@@ -991,9 +1155,7 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
DeviceMatch dm("rkisp1");
dm.add("rkisp1_isp");
- dm.add("rkisp1_resizer_selfpath");
dm.add("rkisp1_resizer_mainpath");
- dm.add("rkisp1_selfpath");
dm.add("rkisp1_mainpath");
dm.add("rkisp1_stats");
dm.add("rkisp1_params");
@@ -1008,11 +1170,29 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
return false;
}
+ hasSelfPath_ = !!media_->getEntityByName("rkisp1_selfpath");
+
/* Create the V4L2 subdevices we will need. */
isp_ = V4L2Subdevice::fromEntityName(media_, "rkisp1_isp");
if (isp_->open() < 0)
return false;
+ /* Locate and open the optional CSI-2 receiver. */
+ ispSink_ = isp_->entity()->getPadByIndex(0);
+ if (!ispSink_ || ispSink_->links().empty())
+ return false;
+
+ pad = ispSink_->links().at(0)->source();
+ if (pad->entity()->function() == MEDIA_ENT_F_VID_IF_BRIDGE) {
+ csi_ = std::make_unique<V4L2Subdevice>(pad->entity());
+ if (csi_->open() < 0)
+ return false;
+
+ ispSink_ = csi_->entity()->getPadByIndex(0);
+ if (!ispSink_)
+ return false;
+ }
+
/* Locate and open the stats and params video nodes. */
stat_ = V4L2VideoDevice::fromEntityName(media_, "rkisp1_stats");
if (stat_->open() < 0)
@@ -1026,11 +1206,12 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
if (!mainPath_.init(media_))
return false;
- if (!selfPath_.init(media_))
+ if (hasSelfPath_ && !selfPath_.init(media_))
return false;
mainPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady);
- selfPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady);
+ if (hasSelfPath_)
+ selfPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady);
stat_->bufferReady.connect(this, &PipelineHandlerRkISP1::statReady);
param_->bufferReady.connect(this, &PipelineHandlerRkISP1::paramReady);
@@ -1038,12 +1219,8 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
* Enumerate all sensors connected to the ISP and create one
* camera instance for each of them.
*/
- pad = isp_->entity()->getPadByIndex(0);
- if (!pad)
- return false;
-
bool registered = false;
- for (MediaLink *link : pad->links()) {
+ for (MediaLink *link : ispSink_->links()) {
if (!createCamera(link->source()->entity()))
registered = true;
}
@@ -1055,12 +1232,10 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
* Buffer Handling
*/
-void PipelineHandlerRkISP1::tryCompleteRequest(Request *request)
+void PipelineHandlerRkISP1::tryCompleteRequest(RkISP1FrameInfo *info)
{
RkISP1CameraData *data = cameraData(activeCamera_);
- RkISP1FrameInfo *info = data->frameInfo_.find(request);
- if (!info)
- return;
+ Request *request = info->request;
if (request->hasPendingBuffers())
return;
@@ -1068,7 +1243,7 @@ void PipelineHandlerRkISP1::tryCompleteRequest(Request *request)
if (!info->metadataProcessed)
return;
- if (!info->paramDequeued)
+ if (!isRaw_ && !info->paramDequeued)
return;
data->frameInfo_.destroy(info->frame);
@@ -1078,19 +1253,38 @@ void PipelineHandlerRkISP1::tryCompleteRequest(Request *request)
void PipelineHandlerRkISP1::bufferReady(FrameBuffer *buffer)
{
+ ASSERT(activeCamera_);
+ RkISP1CameraData *data = cameraData(activeCamera_);
+
+ RkISP1FrameInfo *info = data->frameInfo_.find(buffer);
+ if (!info)
+ return;
+
+ const FrameMetadata &metadata = buffer->metadata();
Request *request = buffer->request();
- /*
- * Record the sensor's timestamp in the request metadata.
- *
- * \todo The sensor timestamp should be better estimated by connecting
- * to the V4L2Device::frameStart signal.
- */
- request->metadata().set(controls::SensorTimestamp,
- buffer->metadata().timestamp);
+ if (metadata.status != FrameMetadata::FrameCancelled) {
+ /*
+ * Record the sensor's timestamp in the request metadata.
+ *
+ * \todo The sensor timestamp should be better estimated by connecting
+ * to the V4L2Device::frameStart signal.
+ */
+ request->metadata().set(controls::SensorTimestamp,
+ metadata.timestamp);
+
+ if (isRaw_) {
+ const ControlList &ctrls =
+ data->delayedCtrls_->get(metadata.sequence);
+ data->ipa_->processStatsBuffer(info->frame, 0, ctrls);
+ }
+ } else {
+ if (isRaw_)
+ info->metadataProcessed = true;
+ }
completeBuffer(request, buffer);
- tryCompleteRequest(request);
+ tryCompleteRequest(info);
}
void PipelineHandlerRkISP1::paramReady(FrameBuffer *buffer)
@@ -1103,7 +1297,7 @@ void PipelineHandlerRkISP1::paramReady(FrameBuffer *buffer)
return;
info->paramDequeued = true;
- tryCompleteRequest(info->request);
+ tryCompleteRequest(info);
}
void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
@@ -1117,7 +1311,7 @@ void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
info->metadataProcessed = true;
- tryCompleteRequest(info->request);
+ tryCompleteRequest(info);
return;
}
@@ -1128,6 +1322,6 @@ void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
data->delayedCtrls_->get(buffer->metadata().sequence));
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerRkISP1)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerRkISP1, "rkisp1")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
index 6f175758..c49017d1 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * rkisp1path.cpp - Rockchip ISP1 path helper
+ * Rockchip ISP1 path helper
*/
#include "rkisp1_path.h"
@@ -12,6 +12,7 @@
#include <libcamera/formats.h>
#include <libcamera/stream.h>
+#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
@@ -20,6 +21,39 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(RkISP1)
+namespace {
+
+/* Keep in sync with the supported raw formats in rkisp1.cpp. */
+const std::map<PixelFormat, uint32_t> formatToMediaBus = {
+ { formats::UYVY, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YUYV, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::NV12, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::NV21, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::NV16, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::NV61, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YUV420, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::YVU420, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::YUV422, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YVU422, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::R8, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::RGB565, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::XRGB8888, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+} /* namespace */
+
RkISP1Path::RkISP1Path(const char *name, const Span<const PixelFormat> &formats,
const Size &minResolution, const Size &maxResolution)
: name_(name), running_(false), formats_(formats),
@@ -41,6 +75,8 @@ bool RkISP1Path::init(MediaDevice *media)
if (video_->open() < 0)
return false;
+ populateFormats();
+
link_ = media->link("rkisp1_isp", 2, resizer, 0);
if (!link_)
return false;
@@ -48,40 +84,227 @@ bool RkISP1Path::init(MediaDevice *media)
return true;
}
-StreamConfiguration RkISP1Path::generateConfiguration(const Size &resolution)
+void RkISP1Path::populateFormats()
{
+ V4L2VideoDevice::Formats v4l2Formats = video_->formats();
+ if (v4l2Formats.empty()) {
+ LOG(RkISP1, Info)
+ << "Failed to enumerate supported formats and sizes, using defaults";
+
+ for (const PixelFormat &format : formats_)
+ streamFormats_.insert(format);
+ return;
+ }
+
+ minResolution_ = { 65535, 65535 };
+ maxResolution_ = { 0, 0 };
+
+ std::vector<PixelFormat> formats;
+ for (const auto &[format, sizes] : v4l2Formats) {
+ const PixelFormat pixelFormat = format.toPixelFormat();
+
+ /*
+ * As a defensive measure, skip any pixel format exposed by the
+ * driver that we don't know about. This ensures that looking up
+ * formats in formatToMediaBus using a key from streamFormats_
+ * will never fail in any of the other functions.
+ */
+ if (!formatToMediaBus.count(pixelFormat)) {
+ LOG(RkISP1, Warning)
+ << "Unsupported pixel format " << pixelFormat;
+ continue;
+ }
+
+ streamFormats_.insert(pixelFormat);
+
+ for (const auto &size : sizes) {
+ if (minResolution_ > size.min)
+ minResolution_ = size.min;
+ if (maxResolution_ < size.max)
+ maxResolution_ = size.max;
+ }
+ }
+}
+
+StreamConfiguration
+RkISP1Path::generateConfiguration(const CameraSensor *sensor, const Size &size,
+ StreamRole role)
+{
+ const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes();
+ const Size &resolution = sensor->resolution();
+
+ /* Min and max resolutions to populate the available stream formats. */
Size maxResolution = maxResolution_.boundedToAspectRatio(resolution)
.boundedTo(resolution);
Size minResolution = minResolution_.expandedToAspectRatio(resolution);
+ /* The desired stream size, bound to the max resolution. */
+ Size streamSize = size.boundedTo(maxResolution);
+
+ /* Create the list of supported stream formats. */
std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
- for (const PixelFormat &format : formats_)
- streamFormats[format] = { { minResolution, maxResolution } };
+ unsigned int rawBitsPerPixel = 0;
+ PixelFormat rawFormat;
+
+ for (const auto &format : streamFormats_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ /* Populate stream formats for non-RAW configurations. */
+ if (info.colourEncoding != PixelFormatInfo::ColourEncodingRAW) {
+ if (role == StreamRole::Raw)
+ continue;
+
+ streamFormats[format] = { { minResolution, maxResolution } };
+ continue;
+ }
+
+ /* Skip RAW formats for non-raw roles. */
+ if (role != StreamRole::Raw)
+ continue;
+
+ /* Populate stream formats for RAW configurations. */
+ uint32_t mbusCode = formatToMediaBus.at(format);
+ if (std::find(mbusCodes.begin(), mbusCodes.end(), mbusCode) ==
+ mbusCodes.end())
+ /* Skip formats not supported by sensor. */
+ continue;
+
+ /* Add all the RAW sizes the sensor can produce for this code. */
+ for (const auto &rawSize : sensor->sizes(mbusCode)) {
+ if (rawSize.width > maxResolution_.width ||
+ rawSize.height > maxResolution_.height)
+ continue;
+
+ streamFormats[format].push_back({ rawSize, rawSize });
+ }
+
+ /*
+ * Store the raw format with the highest bits per pixel for
+ * later usage.
+ */
+ if (info.bitsPerPixel > rawBitsPerPixel) {
+ rawBitsPerPixel = info.bitsPerPixel;
+ rawFormat = format;
+ }
+ }
+
+ /*
+ * Pick a suitable pixel format for the role. Raw streams need to use a
+ * raw format, processed streams use NV12 by default.
+ */
+ PixelFormat format;
+
+ if (role == StreamRole::Raw) {
+ if (!rawFormat.isValid()) {
+ LOG(RkISP1, Error)
+ << "Sensor " << sensor->model()
+ << " doesn't support raw capture";
+ return {};
+ }
+
+ format = rawFormat;
+ } else {
+ format = formats::NV12;
+ }
StreamFormats formats(streamFormats);
StreamConfiguration cfg(formats);
- cfg.pixelFormat = formats::NV12;
- cfg.size = maxResolution;
+ cfg.pixelFormat = format;
+ cfg.size = streamSize;
cfg.bufferCount = RKISP1_BUFFER_COUNT;
return cfg;
}
-CameraConfiguration::Status RkISP1Path::validate(StreamConfiguration *cfg)
+CameraConfiguration::Status RkISP1Path::validate(const CameraSensor *sensor,
+ StreamConfiguration *cfg)
{
+ const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes();
+ const Size &resolution = sensor->resolution();
+
const StreamConfiguration reqCfg = *cfg;
CameraConfiguration::Status status = CameraConfiguration::Valid;
- if (std::find(formats_.begin(), formats_.end(), cfg->pixelFormat) ==
- formats_.end())
- cfg->pixelFormat = formats::NV12;
+ /*
+ * Validate the pixel format. If the requested format isn't supported,
+ * default to either NV12 (all versions of the ISP are guaranteed to
+ * support NV12 on both the main and self paths) if the requested format
+ * is not a raw format, or to the supported raw format with the highest
+ * bits per pixel otherwise.
+ */
+ unsigned int rawBitsPerPixel = 0;
+ PixelFormat rawFormat;
+ bool found = false;
+
+ for (const auto &format : streamFormats_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
+ /* Skip raw formats not supported by the sensor. */
+ uint32_t mbusCode = formatToMediaBus.at(format);
+ if (std::find(mbusCodes.begin(), mbusCodes.end(), mbusCode) ==
+ mbusCodes.end())
+ continue;
+
+ /*
+ * Store the raw format with the highest bits per pixel
+ * for later usage.
+ */
+ if (info.bitsPerPixel > rawBitsPerPixel) {
+ rawBitsPerPixel = info.bitsPerPixel;
+ rawFormat = format;
+ }
+ }
+
+ if (cfg->pixelFormat == format) {
+ found = true;
+ break;
+ }
+ }
+
+ bool isRaw = PixelFormatInfo::info(cfg->pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW;
+
+ /*
+ * If no raw format supported by the sensor has been found, use a
+ * processed format.
+ */
+ if (!rawFormat.isValid())
+ isRaw = false;
+
+ if (!found)
+ cfg->pixelFormat = isRaw ? rawFormat : formats::NV12;
+
+ Size minResolution;
+ Size maxResolution;
+
+ if (isRaw) {
+ /*
+ * Use the sensor output size closest to the requested stream
+ * size.
+ */
+ uint32_t mbusCode = formatToMediaBus.at(cfg->pixelFormat);
+ V4L2SubdeviceFormat sensorFormat =
+ sensor->getFormat({ mbusCode }, cfg->size);
+
+ minResolution = sensorFormat.size;
+ maxResolution = sensorFormat.size;
+ } else {
+ /*
+ * Adjust the size based on the sensor resolution and absolute
+ * limits of the ISP.
+ */
+ minResolution = minResolution_.expandedToAspectRatio(resolution);
+ maxResolution = maxResolution_.boundedToAspectRatio(resolution)
+ .boundedTo(resolution);
+ }
- cfg->size.boundTo(maxResolution_);
- cfg->size.expandTo(minResolution_);
+ cfg->size.boundTo(maxResolution);
+ cfg->size.expandTo(minResolution);
cfg->bufferCount = RKISP1_BUFFER_COUNT;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg->pixelFormat);
+ format.fourcc = video_->toV4L2PixelFormat(cfg->pixelFormat);
format.size = cfg->size;
int ret = video_->tryFormat(&format);
@@ -112,7 +335,18 @@ int RkISP1Path::configure(const StreamConfiguration &config,
if (ret < 0)
return ret;
- Rectangle rect(0, 0, ispFormat.size);
+ /*
+ * Crop on the resizer input to maintain FOV before downscaling.
+ *
+ * \todo The alignment to a multiple of 2 pixels is required but may
+ * change the aspect ratio very slightly. A more advanced algorithm to
+ * compute the resizer input crop rectangle is needed, and it should
+ * also take into account the need to crop away the edge pixels affected
+ * by the ISP processing blocks.
+ */
+ Size ispCrop = inputFormat.size.boundedToAspectRatio(config.size)
+ .alignedUpTo(2, 2);
+ Rectangle rect = ispCrop.centeredTo(Rectangle(inputFormat.size).center());
ret = resizer_->setSelection(0, V4L2_SEL_TGT_CROP, &rect);
if (ret < 0)
return ret;
@@ -127,15 +361,11 @@ int RkISP1Path::configure(const StreamConfiguration &config,
<< "Configuring " << name_ << " resizer output pad with "
<< ispFormat;
- switch (config.pixelFormat) {
- case formats::NV12:
- case formats::NV21:
- ispFormat.mbus_code = MEDIA_BUS_FMT_YUYV8_1_5X8;
- break;
- default:
- ispFormat.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
- break;
- }
+ /*
+ * The configuration has been validated, the pixel format is guaranteed
+ * to be supported and thus found in formatToMediaBus.
+ */
+ ispFormat.code = formatToMediaBus.at(config.pixelFormat);
ret = resizer_->setFormat(1, &ispFormat);
if (ret < 0)
@@ -147,7 +377,7 @@ int RkISP1Path::configure(const StreamConfiguration &config,
const PixelFormatInfo &info = PixelFormatInfo::info(config.pixelFormat);
V4L2DeviceFormat outputFormat;
- outputFormat.fourcc = V4L2PixelFormat::fromPixelFormat(config.pixelFormat);
+ outputFormat.fourcc = video_->toV4L2PixelFormat(config.pixelFormat);
outputFormat.size = config.size;
outputFormat.planesCount = info.numPlanes();
@@ -156,7 +386,7 @@ int RkISP1Path::configure(const StreamConfiguration &config,
return ret;
if (outputFormat.size != config.size ||
- outputFormat.fourcc != V4L2PixelFormat::fromPixelFormat(config.pixelFormat)) {
+ outputFormat.fourcc != video_->toV4L2PixelFormat(config.pixelFormat)) {
LOG(RkISP1, Error)
<< "Unable to configure capture in " << config.toString();
return -EINVAL;
@@ -204,17 +434,32 @@ void RkISP1Path::stop()
running_ = false;
}
+/*
+ * \todo Remove the hardcoded resolutions and formats once all users will have
+ * migrated to a recent enough kernel.
+ */
namespace {
constexpr Size RKISP1_RSZ_MP_SRC_MIN{ 32, 16 };
constexpr Size RKISP1_RSZ_MP_SRC_MAX{ 4416, 3312 };
-constexpr std::array<PixelFormat, 6> RKISP1_RSZ_MP_FORMATS{
+constexpr std::array<PixelFormat, 18> RKISP1_RSZ_MP_FORMATS{
formats::YUYV,
formats::NV16,
formats::NV61,
formats::NV21,
formats::NV12,
formats::R8,
- /* \todo Add support for RAW formats. */
+ formats::SBGGR8,
+ formats::SGBRG8,
+ formats::SGRBG8,
+ formats::SRGGB8,
+ formats::SBGGR10,
+ formats::SGBRG10,
+ formats::SGRBG10,
+ formats::SRGGB10,
+ formats::SBGGR12,
+ formats::SGBRG12,
+ formats::SGRBG12,
+ formats::SRGGB12,
};
constexpr Size RKISP1_RSZ_SP_SRC_MIN{ 32, 16 };
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
index f3f1ae39..08edefec 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * rkisp1path.h - Rockchip ISP1 path helper
+ * Rockchip ISP1 path helper
*/
#pragma once
#include <memory>
+#include <set>
#include <vector>
#include <libcamera/base/signal.h>
@@ -22,6 +23,7 @@
namespace libcamera {
+class CameraSensor;
class MediaDevice;
class V4L2Subdevice;
struct StreamConfiguration;
@@ -38,8 +40,11 @@ public:
int setEnabled(bool enable) { return link_->setEnabled(enable); }
bool isEnabled() const { return link_->flags() & MEDIA_LNK_FL_ENABLED; }
- StreamConfiguration generateConfiguration(const Size &resolution);
- CameraConfiguration::Status validate(StreamConfiguration *cfg);
+ StreamConfiguration generateConfiguration(const CameraSensor *sensor,
+ const Size &resolution,
+ StreamRole role);
+ CameraConfiguration::Status validate(const CameraSensor *sensor,
+ StreamConfiguration *cfg);
int configure(const StreamConfiguration &config,
const V4L2SubdeviceFormat &inputFormat);
@@ -57,14 +62,17 @@ public:
Signal<FrameBuffer *> &bufferReady() { return video_->bufferReady; }
private:
+ void populateFormats();
+
static constexpr unsigned int RKISP1_BUFFER_COUNT = 4;
const char *name_;
bool running_;
const Span<const PixelFormat> formats_;
- const Size minResolution_;
- const Size maxResolution_;
+ std::set<PixelFormat> streamFormats_;
+ Size minResolution_;
+ Size maxResolution_;
std::unique_ptr<V4L2Subdevice> resizer_;
std::unique_ptr<V4L2VideoDevice> video_;
diff --git a/src/libcamera/pipeline/rpi/common/delayed_controls.cpp b/src/libcamera/pipeline/rpi/common/delayed_controls.cpp
new file mode 100644
index 00000000..ad50a7c8
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/delayed_controls.cpp
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper to deal with controls that take effect with a delay
+ *
+ * Note: This has been forked from the libcamera core implementation.
+ */
+
+#include "delayed_controls.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/v4l2_device.h"
+
+/**
+ * \file delayed_controls.h
+ * \brief Helper to deal with controls that take effect with a delay
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(RPiDelayedControls)
+
+namespace RPi {
+
+/**
+ * \class DelayedControls
+ * \brief Helper to deal with controls that take effect with a delay
+ *
+ * Some sensor controls take effect with a delay as the sensor needs time to
+ * adjust, for example exposure and analog gain. This is a helper class to deal
+ * with such controls and the intended users are pipeline handlers.
+ *
+ * The idea is to extend the concept of the buffer depth of a pipeline the
+ * application needs to maintain to also cover controls. Just as with buffer
+ * depth if the application keeps the number of requests queued above the
+ * control depth the controls are guaranteed to take effect for the correct
+ * request. The control depth is determined by the control with the greatest
+ * delay.
+ */
+
+/**
+ * \struct DelayedControls::ControlParams
+ * \brief Parameters associated with controls handled by the \a DelayedControls
+ * helper class
+ *
+ * \var ControlParams::delay
+ * \brief Frame delay from setting the control on a sensor device to when it is
+ * consumed during framing.
+ *
+ * \var ControlParams::priorityWrite
+ * \brief Flag to indicate that this control must be applied ahead of, and
+ * separately from the other controls.
+ *
+ * Typically set for the \a V4L2_CID_VBLANK control so that the device driver
+ * does not reject \a V4L2_CID_EXPOSURE control values that may be outside of
+ * the existing vertical blanking specified bounds, but are within the new
+ * blanking bounds.
+ */
+
+/**
+ * \brief Construct a DelayedControls instance
+ * \param[in] device The V4L2 device the controls have to be applied to
+ * \param[in] controlParams Map of the numerical V4L2 control ids to their
+ * associated control parameters.
+ *
+ * The control parameters comprise of delays (in frames) and a priority write
+ * flag. If this flag is set, the relevant control is written separately from,
+ * and ahead of the rest of the batched controls.
+ *
+ * Only controls specified in \a controlParams are handled. If it's desired to
+ * mix delayed controls and controls that take effect immediately the immediate
+ * controls must be listed in the \a controlParams map with a delay value of 0.
+ */
+DelayedControls::DelayedControls(V4L2Device *device,
+ const std::unordered_map<uint32_t, ControlParams> &controlParams)
+ : device_(device), maxDelay_(0)
+{
+ const ControlInfoMap &controls = device_->controls();
+
+ /*
+ * Create a map of control ids to delays for controls exposed by the
+ * device.
+ */
+ for (auto const &param : controlParams) {
+ auto it = controls.find(param.first);
+ if (it == controls.end()) {
+ LOG(RPiDelayedControls, Error)
+ << "Delay request for control id "
+ << utils::hex(param.first)
+ << " but control is not exposed by device "
+ << device_->deviceNode();
+ continue;
+ }
+
+ const ControlId *id = it->first;
+
+ controlParams_[id] = param.second;
+
+ LOG(RPiDelayedControls, Debug)
+ << "Set a delay of " << controlParams_[id].delay
+ << " and priority write flag " << controlParams_[id].priorityWrite
+ << " for " << id->name();
+
+ maxDelay_ = std::max(maxDelay_, controlParams_[id].delay);
+ }
+
+ reset(0);
+}
+
+/**
+ * \brief Reset state machine
+ *
+ * Resets the state machine to a starting position based on control values
+ * retrieved from the device.
+ */
+void DelayedControls::reset(unsigned int cookie)
+{
+ queueCount_ = 1;
+ writeCount_ = 0;
+ cookies_[0] = cookie;
+
+ /* Retrieve control as reported by the device. */
+ std::vector<uint32_t> ids;
+ for (auto const &param : controlParams_)
+ ids.push_back(param.first->id());
+
+ ControlList controls = device_->getControls(ids);
+
+ /* Seed the control queue with the controls reported by the device. */
+ values_.clear();
+ for (const auto &ctrl : controls) {
+ const ControlId *id = device_->controls().idmap().at(ctrl.first);
+ /*
+ * Do not mark this control value as updated, it does not need
+ * to be written to to device on startup.
+ */
+ values_[id][0] = Info(ctrl.second, false);
+ }
+}
+
+/**
+ * \brief Push a set of controls on the queue
+ * \param[in] controls List of controls to add to the device queue
+ *
+ * Push a set of controls to the control queue. This increases the control queue
+ * depth by one.
+ *
+ * \returns true if \a controls are accepted, or false otherwise
+ */
+bool DelayedControls::push(const ControlList &controls, const unsigned int cookie)
+{
+ /* Copy state from previous frame. */
+ for (auto &ctrl : values_) {
+ Info &info = ctrl.second[queueCount_];
+ info = values_[ctrl.first][queueCount_ - 1];
+ info.updated = false;
+ }
+
+ /* Update with new controls. */
+ const ControlIdMap &idmap = device_->controls().idmap();
+ for (const auto &control : controls) {
+ const auto &it = idmap.find(control.first);
+ if (it == idmap.end()) {
+ LOG(RPiDelayedControls, Warning)
+ << "Unknown control " << control.first;
+ return false;
+ }
+
+ const ControlId *id = it->second;
+
+ if (controlParams_.find(id) == controlParams_.end())
+ return false;
+
+ Info &info = values_[id][queueCount_];
+
+ info = Info(control.second);
+
+ LOG(RPiDelayedControls, Debug)
+ << "Queuing " << id->name()
+ << " to " << info.toString()
+ << " at index " << queueCount_;
+ }
+
+ cookies_[queueCount_] = cookie;
+ queueCount_++;
+
+ return true;
+}
+
+/**
+ * \brief Read back controls in effect at a sequence number
+ * \param[in] sequence The sequence number to get controls for
+ *
+ * Read back what controls where in effect at a specific sequence number. The
+ * history is a ring buffer of 16 entries where new and old values coexist. It's
+ * the callers responsibility to not read too old sequence numbers that have been
+ * pushed out of the history.
+ *
+ * Historic values are evicted by pushing new values onto the queue using
+ * push(). The max history from the current sequence number that yields valid
+ * values are thus 16 minus number of controls pushed.
+ *
+ * \return The controls at \a sequence number
+ */
+std::pair<ControlList, unsigned int> DelayedControls::get(uint32_t sequence)
+{
+ unsigned int index = std::max<int>(0, sequence - maxDelay_);
+
+ ControlList out(device_->controls());
+ for (const auto &ctrl : values_) {
+ const ControlId *id = ctrl.first;
+ const Info &info = ctrl.second[index];
+
+ out.set(id->id(), info);
+
+ LOG(RPiDelayedControls, Debug)
+ << "Reading " << id->name()
+ << " to " << info.toString()
+ << " at index " << index;
+ }
+
+ return { out, cookies_[index] };
+}
+
+/**
+ * \brief Inform DelayedControls of the start of a new frame
+ * \param[in] sequence Sequence number of the frame that started
+ *
+ * Inform the state machine that a new frame has started and of its sequence
+ * number. Any user of these helpers is responsible to inform the helper about
+ * the start of any frame. This can be connected with ease to the start of a
+ * exposure (SOE) V4L2 event.
+ */
+void DelayedControls::applyControls(uint32_t sequence)
+{
+ LOG(RPiDelayedControls, Debug) << "frame " << sequence << " started";
+
+ /*
+ * Create control list peeking ahead in the value queue to ensure
+ * values are set in time to satisfy the sensor delay.
+ */
+ ControlList out(device_->controls());
+ for (auto &ctrl : values_) {
+ const ControlId *id = ctrl.first;
+ unsigned int delayDiff = maxDelay_ - controlParams_[id].delay;
+ unsigned int index = std::max<int>(0, writeCount_ - delayDiff);
+ Info &info = ctrl.second[index];
+
+ if (info.updated) {
+ if (controlParams_[id].priorityWrite) {
+ /*
+ * This control must be written now, it could
+ * affect validity of the other controls.
+ */
+ ControlList priority(device_->controls());
+ priority.set(id->id(), info);
+ device_->setControls(&priority);
+ } else {
+ /*
+ * Batch up the list of controls and write them
+ * at the end of the function.
+ */
+ out.set(id->id(), info);
+ }
+
+ LOG(RPiDelayedControls, Debug)
+ << "Setting " << id->name()
+ << " to " << info.toString()
+ << " at index " << index;
+
+ /* Done with this update, so mark as completed. */
+ info.updated = false;
+ }
+ }
+
+ writeCount_ = sequence + 1;
+
+ while (writeCount_ > queueCount_) {
+ LOG(RPiDelayedControls, Debug)
+ << "Queue is empty, auto queue no-op.";
+ push({}, cookies_[queueCount_ - 1]);
+ }
+
+ device_->setControls(&out);
+}
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/delayed_controls.h b/src/libcamera/pipeline/rpi/common/delayed_controls.h
new file mode 100644
index 00000000..487b0057
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/delayed_controls.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper to deal with controls that take effect with a delay
+ *
+ * Note: This has been forked from the libcamera core implementation.
+ */
+
+#pragma once
+
+#include <stdint.h>
+#include <unordered_map>
+#include <utility>
+
+#include <libcamera/controls.h>
+
+namespace libcamera {
+
+class V4L2Device;
+
+namespace RPi {
+
+class DelayedControls
+{
+public:
+ struct ControlParams {
+ unsigned int delay;
+ bool priorityWrite;
+ };
+
+ DelayedControls(V4L2Device *device,
+ const std::unordered_map<uint32_t, ControlParams> &controlParams);
+
+ void reset(unsigned int cookie);
+
+ bool push(const ControlList &controls, unsigned int cookie);
+ std::pair<ControlList, unsigned int> get(uint32_t sequence);
+
+ void applyControls(uint32_t sequence);
+
+private:
+ class Info : public ControlValue
+ {
+ public:
+ Info()
+ : updated(false)
+ {
+ }
+
+ Info(const ControlValue &v, bool updated_ = true)
+ : ControlValue(v), updated(updated_)
+ {
+ }
+
+ bool updated;
+ };
+
+ static constexpr int listSize = 16;
+ template<typename T>
+ class RingBuffer : public std::array<T, listSize>
+ {
+ public:
+ T &operator[](unsigned int index)
+ {
+ return std::array<T, listSize>::operator[](index % listSize);
+ }
+
+ const T &operator[](unsigned int index) const
+ {
+ return std::array<T, listSize>::operator[](index % listSize);
+ }
+ };
+
+ V4L2Device *device_;
+ std::unordered_map<const ControlId *, ControlParams> controlParams_;
+ unsigned int maxDelay_;
+
+ uint32_t queueCount_;
+ uint32_t writeCount_;
+ std::unordered_map<const ControlId *, RingBuffer<Info>> values_;
+ RingBuffer<unsigned int> cookies_;
+};
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/meson.build b/src/libcamera/pipeline/rpi/common/meson.build
index f1a2f5ee..8fb7e823 100644
--- a/src/libcamera/pipeline/raspberrypi/meson.build
+++ b/src/libcamera/pipeline/rpi/common/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: CC0-1.0
libcamera_sources += files([
- 'dma_heaps.cpp',
- 'raspberrypi.cpp',
+ 'delayed_controls.cpp',
+ 'pipeline_base.cpp',
'rpi_stream.cpp',
])
diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.cpp b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
new file mode 100644
index 00000000..289af516
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
@@ -0,0 +1,1491 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler base class for Raspberry Pi devices
+ */
+
+#include "pipeline_base.h"
+
+#include <chrono>
+
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/logging.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+using namespace RPi;
+
+LOG_DEFINE_CATEGORY(RPI)
+
+using StreamFlag = RPi::Stream::StreamFlag;
+
+namespace {
+
+constexpr unsigned int defaultRawBitDepth = 12;
+
+PixelFormat mbusCodeToPixelFormat(unsigned int code,
+ BayerFormat::Packing packingReq)
+{
+ BayerFormat bayer = BayerFormat::fromMbusCode(code);
+
+ ASSERT(bayer.isValid());
+
+ bayer.packing = packingReq;
+ PixelFormat pix = bayer.toPixelFormat();
+
+ /*
+ * Not all formats (e.g. 8-bit or 16-bit Bayer formats) can have packed
+ * variants. So if the PixelFormat returns as invalid, use the non-packed
+ * conversion instead.
+ */
+ if (!pix.isValid()) {
+ bayer.packing = BayerFormat::Packing::None;
+ pix = bayer.toPixelFormat();
+ }
+
+ return pix;
+}
+
+bool isMonoSensor(std::unique_ptr<CameraSensor> &sensor)
+{
+ unsigned int mbusCode = sensor->mbusCodes()[0];
+ const BayerFormat &bayer = BayerFormat::fromMbusCode(mbusCode);
+
+ return bayer.order == BayerFormat::Order::MONO;
+}
+
+const std::vector<ColorSpace> validColorSpaces = {
+ ColorSpace::Sycc,
+ ColorSpace::Smpte170m,
+ ColorSpace::Rec709
+};
+
+std::optional<ColorSpace> findValidColorSpace(const ColorSpace &colourSpace)
+{
+ for (auto cs : validColorSpaces) {
+ if (colourSpace.primaries == cs.primaries &&
+ colourSpace.transferFunction == cs.transferFunction)
+ return cs;
+ }
+
+ return std::nullopt;
+}
+
+} /* namespace */
+
+/*
+ * Raspberry Pi drivers expect the following colour spaces:
+ * - V4L2_COLORSPACE_RAW for raw streams.
+ * - One of V4L2_COLORSPACE_JPEG, V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_REC709 for
+ * non-raw streams. Other fields such as transfer function, YCbCr encoding and
+ * quantisation are not used.
+ *
+ * The libcamera colour spaces that we wish to use corresponding to these are therefore:
+ * - ColorSpace::Raw for V4L2_COLORSPACE_RAW
+ * - ColorSpace::Sycc for V4L2_COLORSPACE_JPEG
+ * - ColorSpace::Smpte170m for V4L2_COLORSPACE_SMPTE170M
+ * - ColorSpace::Rec709 for V4L2_COLORSPACE_REC709
+ */
+CameraConfiguration::Status RPiCameraConfiguration::validateColorSpaces([[maybe_unused]] ColorSpaceFlags flags)
+{
+ Status status = Valid;
+ yuvColorSpace_.reset();
+
+ for (auto cfg : config_) {
+ /* First fix up raw streams to have the "raw" colour space. */
+ if (PipelineHandlerBase::isRaw(cfg.pixelFormat)) {
+ /* If there was no value here, that doesn't count as "adjusted". */
+ if (cfg.colorSpace && cfg.colorSpace != ColorSpace::Raw)
+ status = Adjusted;
+ cfg.colorSpace = ColorSpace::Raw;
+ continue;
+ }
+
+ /* Next we need to find our shared colour space. The first valid one will do. */
+ if (cfg.colorSpace && !yuvColorSpace_)
+ yuvColorSpace_ = findValidColorSpace(cfg.colorSpace.value());
+ }
+
+ /* If no colour space was given anywhere, choose sYCC. */
+ if (!yuvColorSpace_)
+ yuvColorSpace_ = ColorSpace::Sycc;
+
+ /* Note the version of this that any RGB streams will have to use. */
+ rgbColorSpace_ = yuvColorSpace_;
+ rgbColorSpace_->ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
+ rgbColorSpace_->range = ColorSpace::Range::Full;
+
+ /* Go through the streams again and force everyone to the same colour space. */
+ for (auto cfg : config_) {
+ if (cfg.colorSpace == ColorSpace::Raw)
+ continue;
+
+ if (PipelineHandlerBase::isYuv(cfg.pixelFormat) && cfg.colorSpace != yuvColorSpace_) {
+ /* Again, no value means "not adjusted". */
+ if (cfg.colorSpace)
+ status = Adjusted;
+ cfg.colorSpace = yuvColorSpace_;
+ }
+ if (PipelineHandlerBase::isRgb(cfg.pixelFormat) && cfg.colorSpace != rgbColorSpace_) {
+ /* Be nice, and let the YUV version count as non-adjusted too. */
+ if (cfg.colorSpace && cfg.colorSpace != yuvColorSpace_)
+ status = Adjusted;
+ cfg.colorSpace = rgbColorSpace_;
+ }
+ }
+
+ return status;
+}
+
+CameraConfiguration::Status RPiCameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ /*
+ * Make sure that if a sensor configuration has been requested it
+ * is valid.
+ */
+ if (sensorConfig && !sensorConfig->isValid()) {
+ LOG(RPI, Error) << "Invalid sensor configuration request";
+ return Invalid;
+ }
+
+ status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
+
+ /*
+ * Validate the requested transform against the sensor capabilities and
+ * rotation and store the final combined transform that configure() will
+ * need to apply to the sensor to save us working it out again.
+ */
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->sensor_->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
+ status = Adjusted;
+
+ rawStreams_.clear();
+ outStreams_.clear();
+
+ for (const auto &[index, cfg] : utils::enumerate(config_)) {
+ if (PipelineHandlerBase::isRaw(cfg.pixelFormat))
+ rawStreams_.emplace_back(index, &cfg);
+ else
+ outStreams_.emplace_back(index, &cfg);
+ }
+
+ /* Sort the streams so the highest resolution is first. */
+ std::sort(rawStreams_.begin(), rawStreams_.end(),
+ [](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
+
+ std::sort(outStreams_.begin(), outStreams_.end(),
+ [](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
+
+ /* Compute the sensor's format then do any platform specific fixups. */
+ unsigned int bitDepth;
+ Size sensorSize;
+
+ if (sensorConfig) {
+ /* Use the application provided sensor configuration. */
+ bitDepth = sensorConfig->bitDepth;
+ sensorSize = sensorConfig->outputSize;
+ } else if (!rawStreams_.empty()) {
+ /* Use the RAW stream format and size. */
+ BayerFormat bayerFormat = BayerFormat::fromPixelFormat(rawStreams_[0].cfg->pixelFormat);
+ bitDepth = bayerFormat.bitDepth;
+ sensorSize = rawStreams_[0].cfg->size;
+ } else {
+ bitDepth = defaultRawBitDepth;
+ sensorSize = outStreams_[0].cfg->size;
+ }
+
+ sensorFormat_ = data_->findBestFormat(sensorSize, bitDepth);
+
+ /*
+ * If a sensor configuration has been requested, it should apply
+ * without modifications.
+ */
+ if (sensorConfig) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(sensorFormat_.code);
+
+ if (bayer.bitDepth != sensorConfig->bitDepth ||
+ sensorFormat_.size != sensorConfig->outputSize) {
+ LOG(RPI, Error) << "Invalid sensor configuration: "
+ << "bitDepth/size mismatch";
+ return Invalid;
+ }
+ }
+
+ /* Start with some initial generic RAW stream adjustments. */
+ for (auto &raw : rawStreams_) {
+ StreamConfiguration *rawStream = raw.cfg;
+
+ /*
+ * Some sensors change their Bayer order when they are
+ * h-flipped or v-flipped, according to the transform. Adjust
+ * the RAW stream to match the computed sensor format by
+ * applying the sensor Bayer order resulting from the transform
+ * to the user request.
+ */
+
+ BayerFormat cfgBayer = BayerFormat::fromPixelFormat(rawStream->pixelFormat);
+ cfgBayer.order = data_->sensor_->bayerOrder(combinedTransform_);
+
+ if (rawStream->pixelFormat != cfgBayer.toPixelFormat()) {
+ rawStream->pixelFormat = cfgBayer.toPixelFormat();
+ status = Adjusted;
+ }
+ }
+
+ /* Do any platform specific fixups. */
+ Status st = data_->platformValidate(this);
+ if (st == Invalid)
+ return Invalid;
+ else if (st == Adjusted)
+ status = Adjusted;
+
+ /* Further fixups on the RAW streams. */
+ for (auto &raw : rawStreams_) {
+ int ret = raw.dev->tryFormat(&raw.format);
+ if (ret)
+ return Invalid;
+
+ if (RPi::PipelineHandlerBase::updateStreamConfig(raw.cfg, raw.format))
+ status = Adjusted;
+ }
+
+ /* Further fixups on the ISP output streams. */
+ for (auto &out : outStreams_) {
+
+ /*
+ * We want to send the associated YCbCr info through to the driver.
+ *
+ * But for RGB streams, the YCbCr info gets overwritten on the way back
+ * so we must check against what the stream cfg says, not what we actually
+ * requested (which carefully included the YCbCr info)!
+ */
+ out.format.colorSpace = yuvColorSpace_;
+
+ LOG(RPI, Debug)
+ << "Try color space " << ColorSpace::toString(out.cfg->colorSpace);
+
+ int ret = out.dev->tryFormat(&out.format);
+ if (ret)
+ return Invalid;
+
+ if (RPi::PipelineHandlerBase::updateStreamConfig(out.cfg, out.format))
+ status = Adjusted;
+ }
+
+ return status;
+}
+
+bool PipelineHandlerBase::isRgb(const PixelFormat &pixFmt)
+{
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ return info.colourEncoding == PixelFormatInfo::ColourEncodingRGB;
+}
+
+bool PipelineHandlerBase::isYuv(const PixelFormat &pixFmt)
+{
+ /* The code below would return true for raw mono streams, so weed those out first. */
+ if (PipelineHandlerBase::isRaw(pixFmt))
+ return false;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ return info.colourEncoding == PixelFormatInfo::ColourEncodingYUV;
+}
+
+bool PipelineHandlerBase::isRaw(const PixelFormat &pixFmt)
+{
+ /* This test works for both Bayer and raw mono formats. */
+ return BayerFormat::fromPixelFormat(pixFmt).isValid();
+}
+
+/*
+ * Adjust a StreamConfiguration fields to match a video device format.
+ * Returns true if the StreamConfiguration has been adjusted.
+ */
+bool PipelineHandlerBase::updateStreamConfig(StreamConfiguration *stream,
+ const V4L2DeviceFormat &format)
+{
+ const PixelFormat &pixFormat = format.fourcc.toPixelFormat();
+ bool adjusted = false;
+
+ if (stream->pixelFormat != pixFormat || stream->size != format.size) {
+ stream->pixelFormat = pixFormat;
+ stream->size = format.size;
+ adjusted = true;
+ }
+
+ if (stream->colorSpace != format.colorSpace) {
+ stream->colorSpace = format.colorSpace;
+ adjusted = true;
+ LOG(RPI, Debug)
+ << "Color space changed from "
+ << ColorSpace::toString(stream->colorSpace) << " to "
+ << ColorSpace::toString(format.colorSpace);
+ }
+
+ stream->stride = format.planes[0].bpl;
+ stream->frameSize = format.planes[0].size;
+
+ return adjusted;
+}
+
+/*
+ * Populate and return a video device format using a StreamConfiguration. */
+V4L2DeviceFormat PipelineHandlerBase::toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const StreamConfiguration *stream)
+{
+ V4L2DeviceFormat deviceFormat;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(stream->pixelFormat);
+ deviceFormat.planesCount = info.numPlanes();
+ deviceFormat.fourcc = dev->toV4L2PixelFormat(stream->pixelFormat);
+ deviceFormat.size = stream->size;
+ deviceFormat.planes[0].bpl = stream->stride;
+ deviceFormat.colorSpace = stream->colorSpace;
+
+ return deviceFormat;
+}
+
+V4L2DeviceFormat PipelineHandlerBase::toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const V4L2SubdeviceFormat &format,
+ BayerFormat::Packing packingReq)
+{
+ unsigned int code = format.code;
+ const PixelFormat pix = mbusCodeToPixelFormat(code, packingReq);
+ V4L2DeviceFormat deviceFormat;
+
+ deviceFormat.fourcc = dev->toV4L2PixelFormat(pix);
+ deviceFormat.size = format.size;
+ deviceFormat.colorSpace = format.colorSpace;
+ return deviceFormat;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerBase::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
+{
+ CameraData *data = cameraData(camera);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<RPiCameraConfiguration>(data);
+ V4L2SubdeviceFormat sensorFormat;
+ unsigned int bufferCount;
+ PixelFormat pixelFormat;
+ V4L2VideoDevice::Formats fmts;
+ Size size;
+ std::optional<ColorSpace> colorSpace;
+
+ if (roles.empty())
+ return config;
+
+ Size sensorSize = data->sensor_->resolution();
+ for (const StreamRole role : roles) {
+ switch (role) {
+ case StreamRole::Raw:
+ size = sensorSize;
+ sensorFormat = data->findBestFormat(size, defaultRawBitDepth);
+ pixelFormat = mbusCodeToPixelFormat(sensorFormat.code,
+ BayerFormat::Packing::CSI2);
+ ASSERT(pixelFormat.isValid());
+ colorSpace = ColorSpace::Raw;
+ bufferCount = 2;
+ break;
+
+ case StreamRole::StillCapture:
+ fmts = data->ispFormats();
+ pixelFormat = formats::YUV420;
+ /*
+ * Still image codecs usually expect the sYCC color space.
+ * Even RGB codecs will be fine as the RGB we get with the
+ * sYCC color space is the same as sRGB.
+ */
+ colorSpace = ColorSpace::Sycc;
+ /* Return the largest sensor resolution. */
+ size = sensorSize;
+ bufferCount = 1;
+ break;
+
+ case StreamRole::VideoRecording:
+ /*
+ * The colour denoise algorithm requires the analysis
+ * image, produced by the second ISP output, to be in
+ * YUV420 format. Select this format as the default, to
+ * maximize chances that it will be picked by
+ * applications and enable usage of the colour denoise
+ * algorithm.
+ */
+ fmts = data->ispFormats();
+ pixelFormat = formats::YUV420;
+ /*
+ * Choose a color space appropriate for video recording.
+ * Rec.709 will be a good default for HD resolutions.
+ */
+ colorSpace = ColorSpace::Rec709;
+ size = { 1920, 1080 };
+ bufferCount = 4;
+ break;
+
+ case StreamRole::Viewfinder:
+ fmts = data->ispFormats();
+ pixelFormat = formats::XRGB8888;
+ colorSpace = ColorSpace::Sycc;
+ size = { 800, 600 };
+ bufferCount = 4;
+ break;
+
+ default:
+ LOG(RPI, Error) << "Requested stream role not supported: "
+ << role;
+ return nullptr;
+ }
+
+ std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
+ if (role == StreamRole::Raw) {
+ /* Translate the MBUS codes to a PixelFormat. */
+ for (const auto &format : data->sensorFormats_) {
+ PixelFormat pf = mbusCodeToPixelFormat(format.first,
+ BayerFormat::Packing::CSI2);
+ if (pf.isValid())
+ deviceFormats.emplace(std::piecewise_construct, std::forward_as_tuple(pf),
+ std::forward_as_tuple(format.second.begin(), format.second.end()));
+ }
+ } else {
+ /*
+ * Translate the V4L2PixelFormat to PixelFormat. Note that we
+ * limit the recommended largest ISP output size to match the
+ * sensor resolution.
+ */
+ for (const auto &format : fmts) {
+ PixelFormat pf = format.first.toPixelFormat();
+ /*
+ * Some V4L2 formats translate to the same pixel format (e.g. YU12, YM12
+ * both give YUV420). We must avoid duplicating the range in this case.
+ */
+ if (pf.isValid() && deviceFormats.find(pf) == deviceFormats.end()) {
+ const SizeRange &ispSizes = format.second[0];
+ deviceFormats[pf].emplace_back(ispSizes.min, sensorSize,
+ ispSizes.hStep, ispSizes.vStep);
+ }
+ }
+ }
+
+ /* Add the stream format based on the device node used for the use case. */
+ StreamFormats formats(deviceFormats);
+ StreamConfiguration cfg(formats);
+ cfg.size = size;
+ cfg.pixelFormat = pixelFormat;
+ cfg.colorSpace = colorSpace;
+ cfg.bufferCount = bufferCount;
+ config->addConfiguration(cfg);
+ }
+
+ config->validate();
+
+ return config;
+}
+
+int PipelineHandlerBase::configure(Camera *camera, CameraConfiguration *config)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ /* Start by freeing all buffers and reset the stream states. */
+ data->freeBuffers();
+ for (auto const stream : data->streams_)
+ stream->clearFlags(StreamFlag::External);
+
+ /*
+ * Apply the format on the sensor with any cached transform.
+ *
+ * If the application has provided a sensor configuration apply it
+ * instead of just applying a format.
+ */
+ RPiCameraConfiguration *rpiConfig = static_cast<RPiCameraConfiguration *>(config);
+ V4L2SubdeviceFormat *sensorFormat = &rpiConfig->sensorFormat_;
+
+ if (rpiConfig->sensorConfig) {
+ ret = data->sensor_->applyConfiguration(*rpiConfig->sensorConfig,
+ rpiConfig->combinedTransform_,
+ sensorFormat);
+ } else {
+ ret = data->sensor_->setFormat(sensorFormat,
+ rpiConfig->combinedTransform_);
+ }
+ if (ret)
+ return ret;
+
+ /*
+ * Platform specific internal stream configuration. This also assigns
+ * external streams which get configured below.
+ */
+ ret = data->platformConfigure(rpiConfig);
+ if (ret)
+ return ret;
+
+ ipa::RPi::ConfigResult result;
+ ret = data->configureIPA(config, &result);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to configure the IPA: " << ret;
+ return ret;
+ }
+
+ /*
+ * Set the scaler crop to the value we are using (scaled to native sensor
+ * coordinates).
+ */
+ data->scalerCrop_ = data->scaleIspCrop(data->ispCrop_);
+
+ /*
+ * Update the ScalerCropMaximum to the correct value for this camera mode.
+ * For us, it's the same as the "analogue crop".
+ *
+ * \todo Make this property the ScalerCrop maximum value when dynamic
+ * controls are available and set it at validate() time
+ */
+ data->properties_.set(properties::ScalerCropMaximum, data->sensorInfo_.analogCrop);
+
+ /* Store the mode sensitivity for the application. */
+ data->properties_.set(properties::SensorSensitivity, result.modeSensitivity);
+
+ /* Update the controls that the Raspberry Pi IPA can handle. */
+ ControlInfoMap::Map ctrlMap;
+ for (auto const &c : result.controlInfo)
+ ctrlMap.emplace(c.first, c.second);
+
+ /* Add the ScalerCrop control limits based on the current mode. */
+ Rectangle ispMinCrop = data->scaleIspCrop(Rectangle(data->ispMinCropSize_));
+ ctrlMap[&controls::ScalerCrop] = ControlInfo(ispMinCrop, data->sensorInfo_.analogCrop, data->scalerCrop_);
+
+ data->controlInfo_ = ControlInfoMap(std::move(ctrlMap), result.controlInfo.idmap());
+
+ /* Setup the Video Mux/Bridge entities. */
+ for (auto &[device, link] : data->bridgeDevices_) {
+ /*
+ * Start by disabling all the sink pad links on the devices in the
+ * cascade, with the exception of the link connecting the device.
+ */
+ for (const MediaPad *p : device->entity()->pads()) {
+ if (!(p->flags() & MEDIA_PAD_FL_SINK))
+ continue;
+
+ for (MediaLink *l : p->links()) {
+ if (l != link)
+ l->setEnabled(false);
+ }
+ }
+
+ /*
+ * Next, enable the entity -> entity links, and setup the pad format.
+ *
+ * \todo Some bridge devices may chainge the media bus code, so we
+ * ought to read the source pad format and propagate it to the sink pad.
+ */
+ link->setEnabled(true);
+ const MediaPad *sinkPad = link->sink();
+ ret = device->setFormat(sinkPad->index(), sensorFormat);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on " << device->entity()->name()
+ << " pad " << sinkPad->index()
+ << " with format " << *sensorFormat
+ << ": " << ret;
+ return ret;
+ }
+
+ LOG(RPI, Debug) << "Configured media link on device " << device->entity()->name()
+ << " on pad " << sinkPad->index();
+ }
+
+ return 0;
+}
+
+int PipelineHandlerBase::exportFrameBuffers([[maybe_unused]] Camera *camera, libcamera::Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ RPi::Stream *s = static_cast<RPi::Stream *>(stream);
+ unsigned int count = stream->configuration().bufferCount;
+ int ret = s->dev()->exportBuffers(count, buffers);
+
+ s->setExportedBuffers(buffers);
+
+ return ret;
+}
+
+int PipelineHandlerBase::start(Camera *camera, const ControlList *controls)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ /* Check if a ScalerCrop control was specified. */
+ if (controls)
+ data->applyScalerCrop(*controls);
+
+ /* Start the IPA. */
+ ipa::RPi::StartResult result;
+ data->ipa_->start(controls ? *controls : ControlList{ controls::controls },
+ &result);
+
+ /* Apply any gain/exposure settings that the IPA may have passed back. */
+ if (!result.controls.empty())
+ data->setSensorControls(result.controls);
+
+ /* Configure the number of dropped frames required on startup. */
+ data->dropFrameCount_ = data->config_.disableStartupFrameDrops
+ ? 0 : result.dropFrameCount;
+
+ for (auto const stream : data->streams_)
+ stream->resetBuffers();
+
+ if (!data->buffersAllocated_) {
+ /* Allocate buffers for internal pipeline usage. */
+ ret = prepareBuffers(camera);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to allocate buffers";
+ data->freeBuffers();
+ stop(camera);
+ return ret;
+ }
+ data->buffersAllocated_ = true;
+ }
+
+ /* We need to set the dropFrameCount_ before queueing buffers. */
+ ret = queueAllBuffers(camera);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to queue buffers";
+ stop(camera);
+ return ret;
+ }
+
+ /*
+ * Reset the delayed controls with the gain and exposure values set by
+ * the IPA.
+ */
+ data->delayedCtrls_->reset(0);
+ data->state_ = CameraData::State::Idle;
+
+ /* Enable SOF event generation. */
+ data->frontendDevice()->setFrameStartEnabled(true);
+
+ data->platformStart();
+
+ /* Start all streams. */
+ for (auto const stream : data->streams_) {
+ ret = stream->dev()->streamOn();
+ if (ret) {
+ stop(camera);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void PipelineHandlerBase::stopDevice(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+
+ data->state_ = CameraData::State::Stopped;
+ data->platformStop();
+
+ for (auto const stream : data->streams_)
+ stream->dev()->streamOff();
+
+ /* Disable SOF event generation. */
+ data->frontendDevice()->setFrameStartEnabled(false);
+
+ data->clearIncompleteRequests();
+
+ /* Stop the IPA. */
+ data->ipa_->stop();
+}
+
+void PipelineHandlerBase::releaseDevice(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+ data->freeBuffers();
+}
+
+int PipelineHandlerBase::queueRequestDevice(Camera *camera, Request *request)
+{
+ CameraData *data = cameraData(camera);
+
+ if (!data->isRunning())
+ return -EINVAL;
+
+ LOG(RPI, Debug) << "queueRequestDevice: New request sequence: "
+ << request->sequence();
+
+ /* Push all buffers supplied in the Request to the respective streams. */
+ for (auto stream : data->streams_) {
+ if (!(stream->getFlags() & StreamFlag::External))
+ continue;
+
+ FrameBuffer *buffer = request->findBuffer(stream);
+ if (buffer && !stream->getBufferId(buffer)) {
+ /*
+ * This buffer is not recognised, so it must have been allocated
+ * outside the v4l2 device. Store it in the stream buffer list
+ * so we can track it.
+ */
+ stream->setExportedBuffer(buffer);
+ }
+
+ /*
+ * If no buffer is provided by the request for this stream, we
+ * queue a nullptr to the stream to signify that it must use an
+ * internally allocated buffer for this capture request. This
+ * buffer will not be given back to the application, but is used
+ * to support the internal pipeline flow.
+ *
+ * The below queueBuffer() call will do nothing if there are not
+ * enough internal buffers allocated, but this will be handled by
+ * queuing the request for buffers in the RPiStream object.
+ */
+ int ret = stream->queueBuffer(buffer);
+ if (ret)
+ return ret;
+ }
+
+ /* Push the request to the back of the queue. */
+ data->requestQueue_.push(request);
+ data->handleState();
+
+ return 0;
+}
+
+int PipelineHandlerBase::registerCamera(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *frontend, const std::string &frontendName,
+ MediaDevice *backend, MediaEntity *sensorEntity)
+{
+ CameraData *data = cameraData.get();
+ int ret;
+
+ data->sensor_ = std::make_unique<CameraSensor>(sensorEntity);
+ if (!data->sensor_)
+ return -EINVAL;
+
+ if (data->sensor_->init())
+ return -EINVAL;
+
+ /* Populate the map of sensor supported formats and sizes. */
+ for (auto const mbusCode : data->sensor_->mbusCodes())
+ data->sensorFormats_.emplace(mbusCode,
+ data->sensor_->sizes(mbusCode));
+
+ /*
+ * Enumerate all the Video Mux/Bridge devices across the sensor -> Fr
+ * chain. There may be a cascade of devices in this chain!
+ */
+ MediaLink *link = sensorEntity->getPadByIndex(0)->links()[0];
+ data->enumerateVideoDevices(link, frontendName);
+
+ ipa::RPi::InitResult result;
+ if (data->loadIPA(&result)) {
+ LOG(RPI, Error) << "Failed to load a suitable IPA library";
+ return -EINVAL;
+ }
+
+ /*
+ * Setup our delayed control writer with the sensor default
+ * gain and exposure delays. Mark VBLANK for priority write.
+ */
+ std::unordered_map<uint32_t, RPi::DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { result.sensorConfig.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { result.sensorConfig.exposureDelay, false } },
+ { V4L2_CID_HBLANK, { result.sensorConfig.hblankDelay, false } },
+ { V4L2_CID_VBLANK, { result.sensorConfig.vblankDelay, true } }
+ };
+ data->delayedCtrls_ = std::make_unique<RPi::DelayedControls>(data->sensor_->device(), params);
+ data->sensorMetadata_ = result.sensorConfig.sensorMetadata;
+
+ /* Register initial controls that the Raspberry Pi IPA can handle. */
+ data->controlInfo_ = std::move(result.controlInfo);
+
+ /* Initialize the camera properties. */
+ data->properties_ = data->sensor_->properties();
+
+ /*
+ * The V4L2_CID_NOTIFY_GAINS control, if present, is used to inform the
+ * sensor of the colour gains. It is defined to be a linear gain where
+ * the default value represents a gain of exactly one.
+ */
+ auto it = data->sensor_->controls().find(V4L2_CID_NOTIFY_GAINS);
+ if (it != data->sensor_->controls().end())
+ data->notifyGainsUnity_ = it->second.def().get<int32_t>();
+
+ /*
+ * Set a default value for the ScalerCropMaximum property to show
+ * that we support its use, however, initialise it to zero because
+ * it's not meaningful until a camera mode has been chosen.
+ */
+ data->properties_.set(properties::ScalerCropMaximum, Rectangle{});
+
+ ret = platformRegister(cameraData, frontend, backend);
+ if (ret)
+ return ret;
+
+ ret = data->loadPipelineConfiguration();
+ if (ret) {
+ LOG(RPI, Error) << "Unable to load pipeline configuration";
+ return ret;
+ }
+
+ /* Setup the general IPA signal handlers. */
+ data->frontendDevice()->dequeueTimeout.connect(data, &RPi::CameraData::cameraTimeout);
+ data->frontendDevice()->frameStart.connect(data, &RPi::CameraData::frameStarted);
+ data->ipa_->setDelayedControls.connect(data, &CameraData::setDelayedControls);
+ data->ipa_->setLensControls.connect(data, &CameraData::setLensControls);
+ data->ipa_->metadataReady.connect(data, &CameraData::metadataReady);
+
+ return 0;
+}
+
+void PipelineHandlerBase::mapBuffers(Camera *camera, const BufferMap &buffers, unsigned int mask)
+{
+ CameraData *data = cameraData(camera);
+ std::vector<IPABuffer> bufferIds;
+ /*
+ * Link the FrameBuffers with the id (key value) in the map stored in
+ * the RPi stream object - along with an identifier mask.
+ *
+ * This will allow us to identify buffers passed between the pipeline
+ * handler and the IPA.
+ */
+ for (auto const &it : buffers) {
+ bufferIds.push_back(IPABuffer(mask | it.first,
+ it.second.buffer->planes()));
+ data->bufferIds_.insert(mask | it.first);
+ }
+
+ data->ipa_->mapBuffers(bufferIds);
+}
+
+int PipelineHandlerBase::queueAllBuffers(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ for (auto const stream : data->streams_) {
+ if (!(stream->getFlags() & StreamFlag::External)) {
+ ret = stream->queueAllBuffers();
+ if (ret < 0)
+ return ret;
+ } else {
+ /*
+ * For external streams, we must queue up a set of internal
+ * buffers to handle the number of drop frames requested by
+ * the IPA. This is done by passing nullptr in queueBuffer().
+ *
+ * The below queueBuffer() call will do nothing if there
+ * are not enough internal buffers allocated, but this will
+ * be handled by queuing the request for buffers in the
+ * RPiStream object.
+ */
+ unsigned int i;
+ for (i = 0; i < data->dropFrameCount_; i++) {
+ ret = stream->queueBuffer(nullptr);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+double CameraData::scoreFormat(double desired, double actual) const
+{
+ double score = desired - actual;
+ /* Smaller desired dimensions are preferred. */
+ if (score < 0.0)
+ score = (-score) / 8;
+ /* Penalise non-exact matches. */
+ if (actual != desired)
+ score *= 2;
+
+ return score;
+}
+
+V4L2SubdeviceFormat CameraData::findBestFormat(const Size &req, unsigned int bitDepth) const
+{
+ double bestScore = std::numeric_limits<double>::max(), score;
+ V4L2SubdeviceFormat bestFormat;
+ bestFormat.colorSpace = ColorSpace::Raw;
+
+ constexpr float penaltyAr = 1500.0;
+ constexpr float penaltyBitDepth = 500.0;
+
+ /* Calculate the closest/best mode from the user requested size. */
+ for (const auto &iter : sensorFormats_) {
+ const unsigned int mbusCode = iter.first;
+ const PixelFormat format = mbusCodeToPixelFormat(mbusCode,
+ BayerFormat::Packing::None);
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ for (const Size &size : iter.second) {
+ double reqAr = static_cast<double>(req.width) / req.height;
+ double fmtAr = static_cast<double>(size.width) / size.height;
+
+ /* Score the dimensions for closeness. */
+ score = scoreFormat(req.width, size.width);
+ score += scoreFormat(req.height, size.height);
+ score += penaltyAr * scoreFormat(reqAr, fmtAr);
+
+ /* Add any penalties... this is not an exact science! */
+ score += utils::abs_diff(info.bitsPerPixel, bitDepth) * penaltyBitDepth;
+
+ if (score <= bestScore) {
+ bestScore = score;
+ bestFormat.code = mbusCode;
+ bestFormat.size = size;
+ }
+
+ LOG(RPI, Debug) << "Format: " << size
+ << " fmt " << format
+ << " Score: " << score
+ << " (best " << bestScore << ")";
+ }
+ }
+
+ return bestFormat;
+}
+
+void CameraData::freeBuffers()
+{
+ if (ipa_) {
+ /*
+ * Copy the buffer ids from the unordered_set to a vector to
+ * pass to the IPA.
+ */
+ std::vector<unsigned int> bufferIds(bufferIds_.begin(),
+ bufferIds_.end());
+ ipa_->unmapBuffers(bufferIds);
+ bufferIds_.clear();
+ }
+
+ for (auto const stream : streams_)
+ stream->releaseBuffers();
+
+ platformFreeBuffers();
+
+ buffersAllocated_ = false;
+}
+
+/*
+ * enumerateVideoDevices() iterates over the Media Controller topology, starting
+ * at the sensor and finishing at the frontend. For each sensor, CameraData stores
+ * a unique list of any intermediate video mux or bridge devices connected in a
+ * cascade, together with the entity to entity link.
+ *
+ * Entity pad configuration and link enabling happens at the end of configure().
+ * We first disable all pad links on each entity device in the chain, and then
+ * selectively enabling the specific links to link sensor to the frontend across
+ * all intermediate muxes and bridges.
+ *
+ * In the cascaded topology below, if Sensor1 is used, the Mux2 -> Mux1 link
+ * will be disabled, and Sensor1 -> Mux1 -> Frontend links enabled. Alternatively,
+ * if Sensor3 is used, the Sensor2 -> Mux2 and Sensor1 -> Mux1 links are disabled,
+ * and Sensor3 -> Mux2 -> Mux1 -> Frontend links are enabled. All other links will
+ * remain unchanged.
+ *
+ * +----------+
+ * | FE |
+ * +-----^----+
+ * |
+ * +---+---+
+ * | Mux1 |<------+
+ * +--^---- |
+ * | |
+ * +-----+---+ +---+---+
+ * | Sensor1 | | Mux2 |<--+
+ * +---------+ +-^-----+ |
+ * | |
+ * +-------+-+ +---+-----+
+ * | Sensor2 | | Sensor3 |
+ * +---------+ +---------+
+ */
+void CameraData::enumerateVideoDevices(MediaLink *link, const std::string &frontend)
+{
+ const MediaPad *sinkPad = link->sink();
+ const MediaEntity *entity = sinkPad->entity();
+ bool frontendFound = false;
+
+ /* We only deal with Video Mux and Bridge devices in cascade. */
+ if (entity->function() != MEDIA_ENT_F_VID_MUX &&
+ entity->function() != MEDIA_ENT_F_VID_IF_BRIDGE)
+ return;
+
+ /* Find the source pad for this Video Mux or Bridge device. */
+ const MediaPad *sourcePad = nullptr;
+ for (const MediaPad *pad : entity->pads()) {
+ if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
+ /*
+ * We can only deal with devices that have a single source
+ * pad. If this device has multiple source pads, ignore it
+ * and this branch in the cascade.
+ */
+ if (sourcePad)
+ return;
+
+ sourcePad = pad;
+ }
+ }
+
+ LOG(RPI, Debug) << "Found video mux device " << entity->name()
+ << " linked to sink pad " << sinkPad->index();
+
+ bridgeDevices_.emplace_back(std::make_unique<V4L2Subdevice>(entity), link);
+ bridgeDevices_.back().first->open();
+
+ /*
+ * Iterate through all the sink pad links down the cascade to find any
+ * other Video Mux and Bridge devices.
+ */
+ for (MediaLink *l : sourcePad->links()) {
+ enumerateVideoDevices(l, frontend);
+ /* Once we reach the Frontend entity, we are done. */
+ if (l->sink()->entity()->name() == frontend) {
+ frontendFound = true;
+ break;
+ }
+ }
+
+ /* This identifies the end of our entity enumeration recursion. */
+ if (link->source()->entity()->function() == MEDIA_ENT_F_CAM_SENSOR) {
+ /*
+ * If the frontend is not at the end of this cascade, we cannot
+ * configure this topology automatically, so remove all entity
+ * references.
+ */
+ if (!frontendFound) {
+ LOG(RPI, Warning) << "Cannot automatically configure this MC topology!";
+ bridgeDevices_.clear();
+ }
+ }
+}
+
+int CameraData::loadPipelineConfiguration()
+{
+ config_ = {
+ .disableStartupFrameDrops = false,
+ .cameraTimeoutValue = 0,
+ };
+
+ /* Initial configuration of the platform, in case no config file is present */
+ platformPipelineConfigure({});
+
+ char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_CONFIG_FILE");
+ if (!configFromEnv || *configFromEnv == '\0')
+ return 0;
+
+ std::string filename = std::string(configFromEnv);
+ File file(filename);
+
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(RPI, Warning) << "Failed to open configuration file '" << filename << "'"
+ << ", using defaults";
+ return 0;
+ }
+
+ LOG(RPI, Info) << "Using configuration file '" << filename << "'";
+
+ std::unique_ptr<YamlObject> root = YamlParser::parse(file);
+ if (!root) {
+ LOG(RPI, Warning) << "Failed to parse configuration file, using defaults";
+ return 0;
+ }
+
+ std::optional<double> ver = (*root)["version"].get<double>();
+ if (!ver || *ver != 1.0) {
+ LOG(RPI, Warning) << "Unexpected configuration file version reported: "
+ << *ver;
+ return 0;
+ }
+
+ const YamlObject &phConfig = (*root)["pipeline_handler"];
+
+ config_.disableStartupFrameDrops =
+ phConfig["disable_startup_frame_drops"].get<bool>(config_.disableStartupFrameDrops);
+
+ config_.cameraTimeoutValue =
+ phConfig["camera_timeout_value_ms"].get<unsigned int>(config_.cameraTimeoutValue);
+
+ if (config_.cameraTimeoutValue) {
+ /* Disable the IPA signal to control timeout and set the user requested value. */
+ ipa_->setCameraTimeout.disconnect();
+ frontendDevice()->setDequeueTimeout(config_.cameraTimeoutValue * 1ms);
+ }
+
+ return platformPipelineConfigure(root);
+}
+
+int CameraData::loadIPA(ipa::RPi::InitResult *result)
+{
+ int ret;
+
+ ipa_ = IPAManager::createIPA<ipa::RPi::IPAProxyRPi>(pipe(), 1, 1);
+
+ if (!ipa_)
+ return -ENOENT;
+
+ /*
+ * The configuration (tuning file) is made from the sensor name unless
+ * the environment variable overrides it.
+ */
+ std::string configurationFile;
+ char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_TUNING_FILE");
+ if (!configFromEnv || *configFromEnv == '\0') {
+ std::string model = sensor_->model();
+ if (isMonoSensor(sensor_))
+ model += "_mono";
+ configurationFile = ipa_->configurationFile(model + ".json");
+ } else {
+ configurationFile = std::string(configFromEnv);
+ }
+
+ IPASettings settings(configurationFile, sensor_->model());
+ ipa::RPi::InitParams params;
+
+ ret = sensor_->sensorInfo(&params.sensorInfo);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to retrieve camera sensor info";
+ return ret;
+ }
+
+ params.lensPresent = !!sensor_->focusLens();
+ ret = platformInitIpa(params);
+ if (ret)
+ return ret;
+
+ return ipa_->init(settings, params, result);
+}
+
+int CameraData::configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result)
+{
+ ipa::RPi::ConfigParams params;
+ int ret;
+
+ params.sensorControls = sensor_->controls();
+ if (sensor_->focusLens())
+ params.lensControls = sensor_->focusLens()->controls();
+
+ ret = platformConfigureIpa(params);
+ if (ret)
+ return ret;
+
+ /* We store the IPACameraSensorInfo for digital zoom calculations. */
+ ret = sensor_->sensorInfo(&sensorInfo_);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to retrieve camera sensor info";
+ return ret;
+ }
+
+ /* Always send the user transform to the IPA. */
+ Transform transform = config->orientation / Orientation::Rotate0;
+ params.transform = static_cast<unsigned int>(transform);
+
+ /* Ready the IPA - it must know about the sensor resolution. */
+ ret = ipa_->configure(sensorInfo_, params, result);
+ if (ret < 0) {
+ LOG(RPI, Error) << "IPA configuration failed!";
+ return -EPIPE;
+ }
+
+ if (!result->sensorControls.empty())
+ setSensorControls(result->sensorControls);
+ if (!result->lensControls.empty())
+ setLensControls(result->lensControls);
+
+ return 0;
+}
+
+void CameraData::metadataReady(const ControlList &metadata)
+{
+ if (!isRunning())
+ return;
+
+ /* Add to the Request metadata buffer what the IPA has provided. */
+ /* Last thing to do is to fill up the request metadata. */
+ Request *request = requestQueue_.front();
+ request->metadata().merge(metadata);
+
+ /*
+ * Inform the sensor of the latest colour gains if it has the
+ * V4L2_CID_NOTIFY_GAINS control (which means notifyGainsUnity_ is set).
+ */
+ const auto &colourGains = metadata.get(libcamera::controls::ColourGains);
+ if (notifyGainsUnity_ && colourGains) {
+ /* The control wants linear gains in the order B, Gb, Gr, R. */
+ ControlList ctrls(sensor_->controls());
+ std::array<int32_t, 4> gains{
+ static_cast<int32_t>((*colourGains)[1] * *notifyGainsUnity_),
+ *notifyGainsUnity_,
+ *notifyGainsUnity_,
+ static_cast<int32_t>((*colourGains)[0] * *notifyGainsUnity_)
+ };
+ ctrls.set(V4L2_CID_NOTIFY_GAINS, Span<const int32_t>{ gains });
+
+ sensor_->setControls(&ctrls);
+ }
+}
+
+void CameraData::setDelayedControls(const ControlList &controls, uint32_t delayContext)
+{
+ if (!delayedCtrls_->push(controls, delayContext))
+ LOG(RPI, Error) << "V4L2 DelayedControl set failed";
+}
+
+void CameraData::setLensControls(const ControlList &controls)
+{
+ CameraLens *lens = sensor_->focusLens();
+
+ if (lens && controls.contains(V4L2_CID_FOCUS_ABSOLUTE)) {
+ ControlValue const &focusValue = controls.get(V4L2_CID_FOCUS_ABSOLUTE);
+ lens->setFocusPosition(focusValue.get<int32_t>());
+ }
+}
+
+void CameraData::setSensorControls(ControlList &controls)
+{
+ /*
+ * We need to ensure that if both VBLANK and EXPOSURE are present, the
+ * former must be written ahead of, and separately from EXPOSURE to avoid
+ * V4L2 rejecting the latter. This is identical to what DelayedControls
+ * does with the priority write flag.
+ *
+ * As a consequence of the below logic, VBLANK gets set twice, and we
+ * rely on the v4l2 framework to not pass the second control set to the
+ * driver as the actual control value has not changed.
+ */
+ if (controls.contains(V4L2_CID_EXPOSURE) && controls.contains(V4L2_CID_VBLANK)) {
+ ControlList vblank_ctrl;
+
+ vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
+ sensor_->setControls(&vblank_ctrl);
+ }
+
+ sensor_->setControls(&controls);
+}
+
+Rectangle CameraData::scaleIspCrop(const Rectangle &ispCrop) const
+{
+ /*
+ * Scale a crop rectangle defined in the ISP's coordinates into native sensor
+ * coordinates.
+ */
+ Rectangle nativeCrop = ispCrop.scaledBy(sensorInfo_.analogCrop.size(),
+ sensorInfo_.outputSize);
+ nativeCrop.translateBy(sensorInfo_.analogCrop.topLeft());
+ return nativeCrop;
+}
+
+void CameraData::applyScalerCrop(const ControlList &controls)
+{
+ const auto &scalerCrop = controls.get<Rectangle>(controls::ScalerCrop);
+ if (scalerCrop) {
+ Rectangle nativeCrop = *scalerCrop;
+
+ if (!nativeCrop.width || !nativeCrop.height)
+ nativeCrop = { 0, 0, 1, 1 };
+
+ /* Create a version of the crop scaled to ISP (camera mode) pixels. */
+ Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo_.analogCrop.topLeft());
+ ispCrop.scaleBy(sensorInfo_.outputSize, sensorInfo_.analogCrop.size());
+
+ /*
+ * The crop that we set must be:
+ * 1. At least as big as ispMinCropSize_, once that's been
+ * enlarged to the same aspect ratio.
+ * 2. With the same mid-point, if possible.
+ * 3. But it can't go outside the sensor area.
+ */
+ Size minSize = ispMinCropSize_.expandedToAspectRatio(nativeCrop.size());
+ Size size = ispCrop.size().expandedTo(minSize);
+ ispCrop = size.centeredTo(ispCrop.center()).enclosedIn(Rectangle(sensorInfo_.outputSize));
+
+ if (ispCrop != ispCrop_) {
+ ispCrop_ = ispCrop;
+ platformSetIspCrop();
+
+ /*
+ * Also update the ScalerCrop in the metadata with what we actually
+ * used. But we must first rescale that from ISP (camera mode) pixels
+ * back into sensor native pixels.
+ */
+ scalerCrop_ = scaleIspCrop(ispCrop_);
+ }
+ }
+}
+
+void CameraData::cameraTimeout()
+{
+ LOG(RPI, Error) << "Camera frontend has timed out!";
+ LOG(RPI, Error) << "Please check that your camera sensor connector is attached securely.";
+ LOG(RPI, Error) << "Alternatively, try another cable and/or sensor.";
+
+ state_ = CameraData::State::Error;
+ platformStop();
+
+ /*
+ * To allow the application to attempt a recovery from this timeout,
+ * stop all devices streaming, and return any outstanding requests as
+ * incomplete and cancelled.
+ */
+ for (auto const stream : streams_)
+ stream->dev()->streamOff();
+
+ clearIncompleteRequests();
+}
+
+void CameraData::frameStarted(uint32_t sequence)
+{
+ LOG(RPI, Debug) << "Frame start " << sequence;
+
+ /* Write any controls for the next frame as soon as we can. */
+ delayedCtrls_->applyControls(sequence);
+}
+
+void CameraData::clearIncompleteRequests()
+{
+ /*
+ * All outstanding requests (and associated buffers) must be returned
+ * back to the application.
+ */
+ while (!requestQueue_.empty()) {
+ Request *request = requestQueue_.front();
+
+ for (auto &b : request->buffers()) {
+ FrameBuffer *buffer = b.second;
+ /*
+ * Has the buffer already been handed back to the
+ * request? If not, do so now.
+ */
+ if (buffer->request()) {
+ buffer->_d()->cancel();
+ pipe()->completeBuffer(request, buffer);
+ }
+ }
+
+ pipe()->completeRequest(request);
+ requestQueue_.pop();
+ }
+}
+
+void CameraData::handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream)
+{
+ /*
+ * It is possible to be here without a pending request, so check
+ * that we actually have one to action, otherwise we just return
+ * buffer back to the stream.
+ */
+ Request *request = requestQueue_.empty() ? nullptr : requestQueue_.front();
+ if (!dropFrameCount_ && request && request->findBuffer(stream) == buffer) {
+ /*
+ * Tag the buffer as completed, returning it to the
+ * application.
+ */
+ LOG(RPI, Debug) << "Completing request buffer for stream "
+ << stream->name();
+ pipe()->completeBuffer(request, buffer);
+ } else {
+ /*
+ * This buffer was not part of the Request (which happens if an
+ * internal buffer was used for an external stream, or
+ * unconditionally for internal streams), or there is no pending
+ * request, so we can recycle it.
+ */
+ LOG(RPI, Debug) << "Returning buffer to stream "
+ << stream->name();
+ stream->returnBuffer(buffer);
+ }
+}
+
+void CameraData::handleState()
+{
+ switch (state_) {
+ case State::Stopped:
+ case State::Busy:
+ case State::Error:
+ break;
+
+ case State::IpaComplete:
+ /* If the request is completed, we will switch to Idle state. */
+ checkRequestCompleted();
+ /*
+ * No break here, we want to try running the pipeline again.
+ * The fallthrough clause below suppresses compiler warnings.
+ */
+ [[fallthrough]];
+
+ case State::Idle:
+ tryRunPipeline();
+ break;
+ }
+}
+
+void CameraData::checkRequestCompleted()
+{
+ bool requestCompleted = false;
+ /*
+ * If we are dropping this frame, do not touch the request, simply
+ * change the state to IDLE when ready.
+ */
+ if (!dropFrameCount_) {
+ Request *request = requestQueue_.front();
+ if (request->hasPendingBuffers())
+ return;
+
+ /* Must wait for metadata to be filled in before completing. */
+ if (state_ != State::IpaComplete)
+ return;
+
+ LOG(RPI, Debug) << "Completing request sequence: "
+ << request->sequence();
+
+ pipe()->completeRequest(request);
+ requestQueue_.pop();
+ requestCompleted = true;
+ }
+
+ /*
+ * Make sure we have three outputs completed in the case of a dropped
+ * frame.
+ */
+ if (state_ == State::IpaComplete &&
+ ((ispOutputCount_ == ispOutputTotal_ && dropFrameCount_) ||
+ requestCompleted)) {
+ LOG(RPI, Debug) << "Going into Idle state";
+ state_ = State::Idle;
+ if (dropFrameCount_) {
+ dropFrameCount_--;
+ LOG(RPI, Debug) << "Dropping frame at the request of the IPA ("
+ << dropFrameCount_ << " left)";
+ }
+ }
+}
+
+void CameraData::fillRequestMetadata(const ControlList &bufferControls, Request *request)
+{
+ request->metadata().set(controls::SensorTimestamp,
+ bufferControls.get(controls::SensorTimestamp).value_or(0));
+
+ request->metadata().set(controls::ScalerCrop, scalerCrop_);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.h b/src/libcamera/pipeline/rpi/common/pipeline_base.h
new file mode 100644
index 00000000..f9cecf70
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/pipeline_base.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler base class for Raspberry Pi devices
+ */
+
+#include <map>
+#include <memory>
+#include <optional>
+#include <queue>
+#include <string>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include <libcamera/controls.h>
+#include <libcamera/request.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/media_object.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include <libcamera/ipa/raspberrypi_ipa_interface.h>
+#include <libcamera/ipa/raspberrypi_ipa_proxy.h>
+
+#include "delayed_controls.h"
+#include "rpi_stream.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+namespace RPi {
+
+/* Map of mbus codes to supported sizes reported by the sensor. */
+using SensorFormats = std::map<unsigned int, std::vector<Size>>;
+
+class RPiCameraConfiguration;
+class CameraData : public Camera::Private
+{
+public:
+ CameraData(PipelineHandler *pipe)
+ : Camera::Private(pipe), state_(State::Stopped),
+ dropFrameCount_(0), buffersAllocated_(false),
+ ispOutputCount_(0), ispOutputTotal_(0)
+ {
+ }
+
+ virtual ~CameraData()
+ {
+ }
+
+ virtual CameraConfiguration::Status platformValidate(RPiCameraConfiguration *rpiConfig) const = 0;
+ virtual int platformConfigure(const RPiCameraConfiguration *rpiConfig) = 0;
+ virtual void platformStart() = 0;
+ virtual void platformStop() = 0;
+
+ double scoreFormat(double desired, double actual) const;
+ V4L2SubdeviceFormat findBestFormat(const Size &req, unsigned int bitDepth) const;
+
+ void freeBuffers();
+ virtual void platformFreeBuffers() = 0;
+
+ void enumerateVideoDevices(MediaLink *link, const std::string &frontend);
+
+ int loadPipelineConfiguration();
+ int loadIPA(ipa::RPi::InitResult *result);
+ int configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result);
+ virtual int platformInitIpa(ipa::RPi::InitParams &params) = 0;
+ virtual int platformConfigureIpa(ipa::RPi::ConfigParams &params) = 0;
+
+ void metadataReady(const ControlList &metadata);
+ void setDelayedControls(const ControlList &controls, uint32_t delayContext);
+ void setLensControls(const ControlList &controls);
+ void setSensorControls(ControlList &controls);
+
+ Rectangle scaleIspCrop(const Rectangle &ispCrop) const;
+ void applyScalerCrop(const ControlList &controls);
+ virtual void platformSetIspCrop() = 0;
+
+ void cameraTimeout();
+ void frameStarted(uint32_t sequence);
+
+ void clearIncompleteRequests();
+ void handleStreamBuffer(FrameBuffer *buffer, Stream *stream);
+ void handleState();
+
+ virtual V4L2VideoDevice::Formats ispFormats() const = 0;
+ virtual V4L2VideoDevice::Formats rawFormats() const = 0;
+ virtual V4L2VideoDevice *frontendDevice() = 0;
+
+ virtual int platformPipelineConfigure(const std::unique_ptr<YamlObject> &root) = 0;
+
+ std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
+
+ std::unique_ptr<CameraSensor> sensor_;
+ SensorFormats sensorFormats_;
+
+ /* The vector below is just for convenience when iterating over all streams. */
+ std::vector<Stream *> streams_;
+ /* Stores the ids of the buffers mapped in the IPA. */
+ std::unordered_set<unsigned int> bufferIds_;
+ /*
+ * Stores a cascade of Video Mux or Bridge devices between the sensor and
+ * Unicam together with media link across the entities.
+ */
+ std::vector<std::pair<std::unique_ptr<V4L2Subdevice>, MediaLink *>> bridgeDevices_;
+
+ std::unique_ptr<DelayedControls> delayedCtrls_;
+ bool sensorMetadata_;
+
+ /*
+ * All the functions in this class are called from a single calling
+ * thread. So, we do not need to have any mutex to protect access to any
+ * of the variables below.
+ */
+ enum class State { Stopped, Idle, Busy, IpaComplete, Error };
+ State state_;
+
+ bool isRunning()
+ {
+ return state_ != State::Stopped && state_ != State::Error;
+ }
+
+ std::queue<Request *> requestQueue_;
+
+ /* For handling digital zoom. */
+ IPACameraSensorInfo sensorInfo_;
+ Rectangle ispCrop_; /* crop in ISP (camera mode) pixels */
+ Rectangle scalerCrop_; /* crop in sensor native pixels */
+ Size ispMinCropSize_;
+
+ unsigned int dropFrameCount_;
+
+ /*
+ * If set, this stores the value that represets a gain of one for
+ * the V4L2_CID_NOTIFY_GAINS control.
+ */
+ std::optional<int32_t> notifyGainsUnity_;
+
+ /* Have internal buffers been allocated? */
+ bool buffersAllocated_;
+
+ struct Config {
+ /*
+ * Override any request from the IPA to drop a number of startup
+ * frames.
+ */
+ bool disableStartupFrameDrops;
+ /*
+ * Override the camera timeout value calculated by the IPA based
+ * on frame durations.
+ */
+ unsigned int cameraTimeoutValue;
+ };
+
+ Config config_;
+
+protected:
+ void fillRequestMetadata(const ControlList &bufferControls,
+ Request *request);
+
+ virtual void tryRunPipeline() = 0;
+
+ unsigned int ispOutputCount_;
+ unsigned int ispOutputTotal_;
+
+private:
+ void checkRequestCompleted();
+};
+
+class PipelineHandlerBase : public PipelineHandler
+{
+public:
+ PipelineHandlerBase(CameraManager *manager)
+ : PipelineHandler(manager)
+ {
+ }
+
+ virtual ~PipelineHandlerBase()
+ {
+ }
+
+ static bool isRgb(const PixelFormat &pixFmt);
+ static bool isYuv(const PixelFormat &pixFmt);
+ static bool isRaw(const PixelFormat &pixFmt);
+
+ static bool updateStreamConfig(StreamConfiguration *stream,
+ const V4L2DeviceFormat &format);
+ static V4L2DeviceFormat toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const StreamConfiguration *stream);
+ static V4L2DeviceFormat toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const V4L2SubdeviceFormat &format,
+ BayerFormat::Packing packingReq);
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, libcamera::Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+ void releaseDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+protected:
+ int registerCamera(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *frontent, const std::string &frontendName,
+ MediaDevice *backend, MediaEntity *sensorEntity);
+
+ void mapBuffers(Camera *camera, const BufferMap &buffers, unsigned int mask);
+
+ virtual int platformRegister(std::unique_ptr<CameraData> &cameraData,
+ MediaDevice *unicam, MediaDevice *isp) = 0;
+
+private:
+ CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<CameraData *>(camera->_d());
+ }
+
+ int queueAllBuffers(Camera *camera);
+ virtual int prepareBuffers(Camera *camera) = 0;
+};
+
+class RPiCameraConfiguration final : public CameraConfiguration
+{
+public:
+ RPiCameraConfiguration(const CameraData *data)
+ : CameraConfiguration(), data_(data)
+ {
+ }
+
+ CameraConfiguration::Status validateColorSpaces(ColorSpaceFlags flags);
+ Status validate() override;
+
+ /* Cache the combinedTransform_ that will be applied to the sensor */
+ Transform combinedTransform_;
+ /* The sensor format computed in validate() */
+ V4L2SubdeviceFormat sensorFormat_;
+
+ struct StreamParams {
+ StreamParams()
+ : index(0), cfg(nullptr), dev(nullptr)
+ {
+ }
+
+ StreamParams(unsigned int index_, StreamConfiguration *cfg_)
+ : index(index_), cfg(cfg_), dev(nullptr)
+ {
+ }
+
+ unsigned int index;
+ StreamConfiguration *cfg;
+ V4L2VideoDevice *dev;
+ V4L2DeviceFormat format;
+ };
+
+ std::vector<StreamParams> rawStreams_;
+ std::vector<StreamParams> outStreams_;
+
+ /*
+ * Store the colour spaces that all our streams will have. RGB format streams
+ * will have the same colorspace as YUV streams, with YCbCr field cleared and
+ * range set to full.
+ */
+ std::optional<ColorSpace> yuvColorSpace_;
+ std::optional<ColorSpace> rgbColorSpace_;
+
+private:
+ const CameraData *data_;
+};
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/raspberrypi/rpi_stream.cpp b/src/libcamera/pipeline/rpi/common/rpi_stream.cpp
index 7a93efaa..accf59eb 100644
--- a/src/libcamera/pipeline/raspberrypi/rpi_stream.cpp
+++ b/src/libcamera/pipeline/rpi/common/rpi_stream.cpp
@@ -1,14 +1,19 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * rpi_stream.cpp - Raspberry Pi device stream abstraction class.
+ * Raspberry Pi device stream abstraction class.
*/
#include "rpi_stream.h"
+#include <algorithm>
+#include <tuple>
+#include <utility>
+
#include <libcamera/base/log.h>
-#include <libcamera/ipa/raspberrypi_ipa_interface.h>
+/* Maximum number of buffer slots to allocate in the V4L2 device driver. */
+static constexpr unsigned int maxV4L2BufferCount = 32;
namespace libcamera {
@@ -16,40 +21,64 @@ LOG_DEFINE_CATEGORY(RPISTREAM)
namespace RPi {
+const BufferObject Stream::errorBufferObject{ nullptr, false };
+
+void Stream::setFlags(StreamFlags flags)
+{
+ /* We don't want dynamic mmapping. */
+ ASSERT(!(flags & StreamFlag::RequiresMmap));
+
+ flags_ |= flags;
+
+ /* Import streams cannot be external. */
+ ASSERT(!(flags_ & StreamFlag::External) || !(flags_ & StreamFlag::ImportOnly));
+}
+
+void Stream::clearFlags(StreamFlags flags)
+{
+ /* We don't want dynamic mmapping. */
+ ASSERT(!(flags & StreamFlag::RequiresMmap));
+
+ flags_ &= ~flags;
+}
+
+RPi::Stream::StreamFlags Stream::getFlags() const
+{
+ return flags_;
+}
+
V4L2VideoDevice *Stream::dev() const
{
return dev_.get();
}
-std::string Stream::name() const
+const std::string &Stream::name() const
{
return name_;
}
-void Stream::resetBuffers()
+unsigned int Stream::swDownscale() const
{
- /* Add all internal buffers to the queue of usable buffers. */
- availableBuffers_ = {};
- for (auto const &buffer : internalBuffers_)
- availableBuffers_.push(buffer.get());
+ return swDownscale_;
}
-void Stream::setExternal(bool external)
+void Stream::setSwDownscale(unsigned int swDownscale)
{
- /* Import streams cannot be external. */
- ASSERT(!external || !importOnly_);
- external_ = external;
+ swDownscale_ = swDownscale;
}
-bool Stream::isExternal() const
+void Stream::resetBuffers()
{
- return external_;
+ /* Add all internal buffers to the queue of usable buffers. */
+ availableBuffers_ = {};
+ for (auto const &buffer : internalBuffers_)
+ availableBuffers_.push(buffer.get());
}
void Stream::setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
for (auto const &buffer : *buffers)
- bufferMap_.emplace(id_.get(), buffer.get());
+ bufferEmplace(++id_, buffer.get());
}
const BufferMap &Stream::getBuffers() const
@@ -57,68 +86,42 @@ const BufferMap &Stream::getBuffers() const
return bufferMap_;
}
-int Stream::getBufferId(FrameBuffer *buffer) const
+unsigned int Stream::getBufferId(FrameBuffer *buffer) const
{
- if (importOnly_)
- return -1;
+ if (flags_ & StreamFlag::ImportOnly)
+ return 0;
/* Find the buffer in the map, and return the buffer id. */
auto it = std::find_if(bufferMap_.begin(), bufferMap_.end(),
- [&buffer](auto const &p) { return p.second == buffer; });
+ [&buffer](auto const &p) { return p.second.buffer == buffer; });
if (it == bufferMap_.end())
- return -1;
+ return 0;
return it->first;
}
-void Stream::setExternalBuffer(FrameBuffer *buffer)
+void Stream::setExportedBuffer(FrameBuffer *buffer)
{
- bufferMap_.emplace(ipa::RPi::MaskExternalBuffer | id_.get(), buffer);
-}
-
-void Stream::removeExternalBuffer(FrameBuffer *buffer)
-{
- int id = getBufferId(buffer);
-
- /* Ensure we have this buffer in the stream, and it is marked external. */
- ASSERT(id != -1 && (id & ipa::RPi::MaskExternalBuffer));
- bufferMap_.erase(id);
+ bufferEmplace(++id_, buffer);
}
int Stream::prepareBuffers(unsigned int count)
{
int ret;
- if (!importOnly_) {
- if (count) {
- /* Export some frame buffers for internal use. */
- ret = dev_->exportBuffers(count, &internalBuffers_);
- if (ret < 0)
- return ret;
-
- /* Add these exported buffers to the internal/external buffer list. */
- setExportedBuffers(&internalBuffers_);
- resetBuffers();
- }
+ if (!(flags_ & StreamFlag::ImportOnly)) {
+ /* Export some frame buffers for internal use. */
+ ret = dev_->exportBuffers(count, &internalBuffers_);
+ if (ret < 0)
+ return ret;
- /* We must import all internal/external exported buffers. */
- count = bufferMap_.size();
+ /* Add these exported buffers to the internal/external buffer list. */
+ setExportedBuffers(&internalBuffers_);
+ resetBuffers();
}
- /*
- * If this is an external stream, we must allocate slots for buffers that
- * might be externally allocated. We have no indication of how many buffers
- * may be used, so this might overallocate slots in the buffer cache.
- * Similarly, if this stream is only importing buffers, we do the same.
- *
- * \todo Find a better heuristic, or, even better, an exact solution to
- * this issue.
- */
- if (isExternal() || importOnly_)
- count = count * 2;
-
- return dev_->importBuffers(count);
+ return dev_->importBuffers(maxV4L2BufferCount);
}
int Stream::queueBuffer(FrameBuffer *buffer)
@@ -162,7 +165,7 @@ int Stream::queueBuffer(FrameBuffer *buffer)
void Stream::returnBuffer(FrameBuffer *buffer)
{
- if (!external_) {
+ if (!(flags_ & StreamFlag::External) && !(flags_ & StreamFlag::Recurrent)) {
/* For internal buffers, simply requeue back to the device. */
queueToDevice(buffer);
return;
@@ -171,9 +174,6 @@ void Stream::returnBuffer(FrameBuffer *buffer)
/* Push this buffer back into the queue to be used again. */
availableBuffers_.push(buffer);
- /* Allow the buffer id to be reused. */
- id_.release(getBufferId(buffer));
-
/*
* Do we have any Request buffers that are waiting to be queued?
* If so, do it now as availableBuffers_ will not be empty.
@@ -202,11 +202,32 @@ void Stream::returnBuffer(FrameBuffer *buffer)
}
}
+const BufferObject &Stream::getBuffer(unsigned int id)
+{
+ auto const &it = bufferMap_.find(id);
+ if (it == bufferMap_.end())
+ return errorBufferObject;
+
+ return it->second;
+}
+
+const BufferObject &Stream::acquireBuffer()
+{
+ /* No id provided, so pick up the next available buffer if possible. */
+ if (availableBuffers_.empty())
+ return errorBufferObject;
+
+ unsigned int id = getBufferId(availableBuffers_.front());
+ availableBuffers_.pop();
+
+ return getBuffer(id);
+}
+
int Stream::queueAllBuffers()
{
int ret;
- if (external_)
+ if ((flags_ & StreamFlag::External) || (flags_ & StreamFlag::Recurrent))
return 0;
while (!availableBuffers_.empty()) {
@@ -226,13 +247,23 @@ void Stream::releaseBuffers()
clearBuffers();
}
+void Stream::bufferEmplace(unsigned int id, FrameBuffer *buffer)
+{
+ if (flags_ & StreamFlag::RequiresMmap)
+ bufferMap_.emplace(std::piecewise_construct, std::forward_as_tuple(id),
+ std::forward_as_tuple(buffer, true));
+ else
+ bufferMap_.emplace(std::piecewise_construct, std::forward_as_tuple(id),
+ std::forward_as_tuple(buffer, false));
+}
+
void Stream::clearBuffers()
{
availableBuffers_ = std::queue<FrameBuffer *>{};
requestBuffers_ = std::queue<FrameBuffer *>{};
internalBuffers_.clear();
bufferMap_.clear();
- id_.reset();
+ id_ = 0;
}
int Stream::queueToDevice(FrameBuffer *buffer)
diff --git a/src/libcamera/pipeline/rpi/common/rpi_stream.h b/src/libcamera/pipeline/rpi/common/rpi_stream.h
new file mode 100644
index 00000000..a13d5dc0
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/rpi_stream.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Raspberry Pi device stream abstraction class.
+ */
+
+#pragma once
+
+#include <optional>
+#include <queue>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include <libcamera/base/flags.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace libcamera {
+
+namespace RPi {
+
+enum BufferMask {
+ MaskID = 0x00ffff,
+ MaskStats = 0x010000,
+ MaskEmbeddedData = 0x020000,
+ MaskBayerData = 0x040000,
+};
+
+struct BufferObject {
+ BufferObject(FrameBuffer *b, bool requiresMmap)
+ : buffer(b), mapped(std::nullopt)
+ {
+ if (requiresMmap)
+ mapped = std::make_optional<MappedFrameBuffer>
+ (b, MappedFrameBuffer::MapFlag::ReadWrite);
+ }
+
+ FrameBuffer *buffer;
+ std::optional<MappedFrameBuffer> mapped;
+};
+
+using BufferMap = std::unordered_map<unsigned int, BufferObject>;
+
+/*
+ * Device stream abstraction for either an internal or external stream.
+ * Used for both Unicam and the ISP.
+ */
+class Stream : public libcamera::Stream
+{
+public:
+ enum class StreamFlag {
+ None = 0,
+ /*
+ * Indicates that this stream only imports buffers, e.g. the ISP
+ * input stream.
+ */
+ ImportOnly = (1 << 0),
+ /*
+ * Indicates that this stream is active externally, i.e. the
+ * buffers might be provided by (and returned to) the application.
+ */
+ External = (1 << 1),
+ /*
+ * Indicates that the stream buffers need to be mmaped and returned
+ * to the pipeline handler when requested.
+ */
+ RequiresMmap = (1 << 2),
+ /*
+ * Indicates a stream that needs buffers recycled every frame internally
+ * in the pipeline handler, e.g. stitch, TDN, config. All buffer
+ * management will be handled by the pipeline handler.
+ */
+ Recurrent = (1 << 3),
+ /*
+ * Indicates that the output stream needs a software format conversion
+ * to be applied after ISP processing.
+ */
+ Needs32bitConv = (1 << 4),
+ };
+
+ using StreamFlags = Flags<StreamFlag>;
+
+ Stream()
+ : flags_(StreamFlag::None), id_(0), swDownscale_(0)
+ {
+ }
+
+ Stream(const char *name, MediaEntity *dev, StreamFlags flags = StreamFlag::None)
+ : flags_(flags), name_(name),
+ dev_(std::make_unique<V4L2VideoDevice>(dev)), id_(0),
+ swDownscale_(0)
+ {
+ }
+
+ void setFlags(StreamFlags flags);
+ void clearFlags(StreamFlags flags);
+ StreamFlags getFlags() const;
+
+ V4L2VideoDevice *dev() const;
+ const std::string &name() const;
+ void resetBuffers();
+
+ unsigned int swDownscale() const;
+ void setSwDownscale(unsigned int swDownscale);
+
+ void setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers);
+ const BufferMap &getBuffers() const;
+ unsigned int getBufferId(FrameBuffer *buffer) const;
+
+ void setExportedBuffer(FrameBuffer *buffer);
+
+ int prepareBuffers(unsigned int count);
+ int queueBuffer(FrameBuffer *buffer);
+ void returnBuffer(FrameBuffer *buffer);
+
+ const BufferObject &getBuffer(unsigned int id);
+ const BufferObject &acquireBuffer();
+
+ int queueAllBuffers();
+ void releaseBuffers();
+
+ /* For error handling. */
+ static const BufferObject errorBufferObject;
+
+private:
+ void bufferEmplace(unsigned int id, FrameBuffer *buffer);
+ void clearBuffers();
+ int queueToDevice(FrameBuffer *buffer);
+
+ StreamFlags flags_;
+
+ /* Stream name identifier. */
+ std::string name_;
+
+ /* The actual device stream. */
+ std::unique_ptr<V4L2VideoDevice> dev_;
+
+ /* Tracks a unique id key for the bufferMap_ */
+ unsigned int id_;
+
+ /* Power of 2 greater than one if software downscaling will be required. */
+ unsigned int swDownscale_;
+
+ /* All frame buffers associated with this device stream. */
+ BufferMap bufferMap_;
+
+ /*
+ * List of frame buffers that we can use if none have been provided by
+ * the application for external streams. This is populated by the
+ * buffers exported internally.
+ */
+ std::queue<FrameBuffer *> availableBuffers_;
+
+ /*
+ * List of frame buffers that are to be queued into the device from a Request.
+ * A nullptr indicates any internal buffer can be used (from availableBuffers_),
+ * whereas a valid pointer indicates an external buffer to be queued.
+ *
+ * Ordering buffers to be queued is important here as it must match the
+ * requests coming from the application.
+ */
+ std::queue<FrameBuffer *> requestBuffers_;
+
+ /*
+ * This is a list of buffers exported internally. Need to keep this around
+ * as the stream needs to maintain ownership of these buffers.
+ */
+ std::vector<std::unique_ptr<FrameBuffer>> internalBuffers_;
+};
+
+/*
+ * The following class is just a convenient (and typesafe) array of device
+ * streams indexed with an enum class.
+ */
+template<typename E, std::size_t N>
+class Device : public std::array<class Stream, N>
+{
+public:
+ Stream &operator[](E e)
+ {
+ return std::array<class Stream, N>::operator[](utils::to_underlying(e));
+ }
+ const Stream &operator[](E e) const
+ {
+ return std::array<class Stream, N>::operator[](utils::to_underlying(e));
+ }
+};
+
+} /* namespace RPi */
+
+LIBCAMERA_FLAGS_ENABLE_OPERATORS(RPi::Stream::StreamFlag)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/meson.build b/src/libcamera/pipeline/rpi/meson.build
new file mode 100644
index 00000000..2391b6a9
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: CC0-1.0
+
+subdir('common')
+
+foreach pipeline : pipelines
+ pipeline = pipeline.split('/')
+ if pipeline.length() < 2 or pipeline[0] != 'rpi'
+ continue
+ endif
+
+ subdir(pipeline[1])
+endforeach
diff --git a/src/libcamera/pipeline/rpi/vc4/data/example.yaml b/src/libcamera/pipeline/rpi/vc4/data/example.yaml
new file mode 100644
index 00000000..b8e01ade
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/data/example.yaml
@@ -0,0 +1,46 @@
+{
+ "version": 1.0,
+ "target": "bcm2835",
+
+ "pipeline_handler":
+ {
+ # The minimum number of internal buffers to be allocated for
+ # Unicam. This value must be greater than 0, but less than or
+ # equal to min_total_unicam_buffers.
+ #
+ # A larger number of internal buffers can reduce the occurrence
+ # of frame drops during high CPU loads, but might also cause
+ # additional latency in the system.
+ #
+ # Note that the pipeline handler might override this value and
+ # not allocate any internal buffers if it knows they will never
+ # be used. For example if the RAW stream is marked as mandatory
+ # and there are no dropped frames signalled for algorithm
+ # convergence.
+ #
+ # "min_unicam_buffers": 2,
+
+ # The minimum total (internal + external) buffer count used for
+ # Unicam. The number of internal buffers allocated for Unicam is
+ # given by:
+ #
+ # internal buffer count = max(min_unicam_buffers,
+ # min_total_unicam_buffers - external buffer count)
+ #
+ # "min_total_unicam_buffers": 4,
+
+ # Override any request from the IPA to drop a number of startup
+ # frames.
+ #
+ # "disable_startup_frame_drops": false,
+
+ # Custom timeout value (in ms) for camera to use. This overrides
+ # the value computed by the pipeline handler based on frame
+ # durations.
+ #
+ # Set this value to 0 to use the pipeline handler computed
+ # timeout value.
+ #
+ # "camera_timeout_value_ms": 0,
+ }
+}
diff --git a/src/libcamera/pipeline/rpi/vc4/data/meson.build b/src/libcamera/pipeline/rpi/vc4/data/meson.build
new file mode 100644
index 00000000..179feebc
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/data/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'example.yaml',
+])
+
+install_data(conf_files,
+ install_dir : pipeline_data_dir / 'rpi' / 'vc4',
+ install_tag : 'runtime')
diff --git a/src/libcamera/pipeline/rpi/vc4/meson.build b/src/libcamera/pipeline/rpi/vc4/meson.build
new file mode 100644
index 00000000..386e2296
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_sources += files([
+ 'vc4.cpp',
+])
+
+subdir('data')
diff --git a/src/libcamera/pipeline/rpi/vc4/vc4.cpp b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
new file mode 100644
index 00000000..37fb310f
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
@@ -0,0 +1,1023 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler for VC4-based Raspberry Pi devices
+ */
+
+#include <linux/bcm2835-isp.h>
+#include <linux/v4l2-controls.h>
+#include <linux/videodev2.h>
+
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/dma_heaps.h"
+
+#include "../common/pipeline_base.h"
+#include "../common/rpi_stream.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(RPI)
+
+using StreamFlag = RPi::Stream::StreamFlag;
+using StreamParams = RPi::RPiCameraConfiguration::StreamParams;
+
+namespace {
+
+enum class Unicam : unsigned int { Image, Embedded };
+enum class Isp : unsigned int { Input, Output0, Output1, Stats };
+
+} /* namespace */
+
+class Vc4CameraData final : public RPi::CameraData
+{
+public:
+ Vc4CameraData(PipelineHandler *pipe)
+ : RPi::CameraData(pipe)
+ {
+ }
+
+ ~Vc4CameraData()
+ {
+ freeBuffers();
+ }
+
+ V4L2VideoDevice::Formats ispFormats() const override
+ {
+ return isp_[Isp::Output0].dev()->formats();
+ }
+
+ V4L2VideoDevice::Formats rawFormats() const override
+ {
+ return unicam_[Unicam::Image].dev()->formats();
+ }
+
+ V4L2VideoDevice *frontendDevice() override
+ {
+ return unicam_[Unicam::Image].dev();
+ }
+
+ void platformFreeBuffers() override
+ {
+ }
+
+ CameraConfiguration::Status platformValidate(RPi::RPiCameraConfiguration *rpiConfig) const override;
+
+ int platformPipelineConfigure(const std::unique_ptr<YamlObject> &root) override;
+
+ void platformStart() override;
+ void platformStop() override;
+
+ void unicamBufferDequeue(FrameBuffer *buffer);
+ void ispInputDequeue(FrameBuffer *buffer);
+ void ispOutputDequeue(FrameBuffer *buffer);
+
+ void processStatsComplete(const ipa::RPi::BufferIds &buffers);
+ void prepareIspComplete(const ipa::RPi::BufferIds &buffers, bool stitchSwapBuffers);
+ void setIspControls(const ControlList &controls);
+ void setCameraTimeout(uint32_t maxFrameLengthMs);
+
+ /* Array of Unicam and ISP device streams and associated buffers/streams. */
+ RPi::Device<Unicam, 2> unicam_;
+ RPi::Device<Isp, 4> isp_;
+
+ /* DMAHEAP allocation helper. */
+ DmaHeap dmaHeap_;
+ SharedFD lsTable_;
+
+ struct Config {
+ /*
+ * The minimum number of internal buffers to be allocated for
+ * the Unicam Image stream.
+ */
+ unsigned int minUnicamBuffers;
+ /*
+ * The minimum total (internal + external) buffer count used for
+ * the Unicam Image stream.
+ *
+ * Note that:
+ * minTotalUnicamBuffers must be >= 1, and
+ * minTotalUnicamBuffers >= minUnicamBuffers
+ */
+ unsigned int minTotalUnicamBuffers;
+ };
+
+ Config config_;
+
+private:
+ void platformSetIspCrop() override
+ {
+ isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &ispCrop_);
+ }
+
+ int platformConfigure(const RPi::RPiCameraConfiguration *rpiConfig) override;
+ int platformConfigureIpa(ipa::RPi::ConfigParams &params) override;
+
+ int platformInitIpa([[maybe_unused]] ipa::RPi::InitParams &params) override
+ {
+ return 0;
+ }
+
+ struct BayerFrame {
+ FrameBuffer *buffer;
+ ControlList controls;
+ unsigned int delayContext;
+ };
+
+ void tryRunPipeline() override;
+ bool findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer);
+
+ std::queue<BayerFrame> bayerQueue_;
+ std::queue<FrameBuffer *> embeddedQueue_;
+};
+
+class PipelineHandlerVc4 : public RPi::PipelineHandlerBase
+{
+public:
+ PipelineHandlerVc4(CameraManager *manager)
+ : RPi::PipelineHandlerBase(manager)
+ {
+ }
+
+ ~PipelineHandlerVc4()
+ {
+ }
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ Vc4CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<Vc4CameraData *>(camera->_d());
+ }
+
+ int prepareBuffers(Camera *camera) override;
+ int platformRegister(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *unicam, MediaDevice *isp) override;
+};
+
+bool PipelineHandlerVc4::match(DeviceEnumerator *enumerator)
+{
+ constexpr unsigned int numUnicamDevices = 2;
+
+ /*
+ * Loop over all Unicam instances, but return out once a match is found.
+ * This is to ensure we correctly enumrate the camera when an instance
+ * of Unicam has registered with media controller, but has not registered
+ * device nodes due to a sensor subdevice failure.
+ */
+ for (unsigned int i = 0; i < numUnicamDevices; i++) {
+ DeviceMatch unicam("unicam");
+ MediaDevice *unicamDevice = acquireMediaDevice(enumerator, unicam);
+
+ if (!unicamDevice) {
+ LOG(RPI, Debug) << "Unable to acquire a Unicam instance";
+ continue;
+ }
+
+ DeviceMatch isp("bcm2835-isp");
+ MediaDevice *ispDevice = acquireMediaDevice(enumerator, isp);
+
+ if (!ispDevice) {
+ LOG(RPI, Debug) << "Unable to acquire ISP instance";
+ continue;
+ }
+
+ /*
+ * The loop below is used to register multiple cameras behind one or more
+ * video mux devices that are attached to a particular Unicam instance.
+ * Obviously these cameras cannot be used simultaneously.
+ */
+ unsigned int numCameras = 0;
+ for (MediaEntity *entity : unicamDevice->entities()) {
+ if (entity->function() != MEDIA_ENT_F_CAM_SENSOR)
+ continue;
+
+ std::unique_ptr<RPi::CameraData> cameraData = std::make_unique<Vc4CameraData>(this);
+ int ret = RPi::PipelineHandlerBase::registerCamera(cameraData,
+ unicamDevice, "unicam-image",
+ ispDevice, entity);
+ if (ret)
+ LOG(RPI, Error) << "Failed to register camera "
+ << entity->name() << ": " << ret;
+ else
+ numCameras++;
+ }
+
+ if (numCameras)
+ return true;
+ }
+
+ return false;
+}
+
+int PipelineHandlerVc4::prepareBuffers(Camera *camera)
+{
+ Vc4CameraData *data = cameraData(camera);
+ unsigned int numRawBuffers = 0;
+ int ret;
+
+ for (Stream *s : camera->streams()) {
+ if (BayerFormat::fromPixelFormat(s->configuration().pixelFormat).isValid()) {
+ numRawBuffers = s->configuration().bufferCount;
+ break;
+ }
+ }
+
+ /* Decide how many internal buffers to allocate. */
+ for (auto const stream : data->streams_) {
+ unsigned int numBuffers;
+ /*
+ * For Unicam, allocate a minimum number of buffers for internal
+ * use as we want to avoid any frame drops.
+ */
+ const unsigned int minBuffers = data->config_.minTotalUnicamBuffers;
+ if (stream == &data->unicam_[Unicam::Image]) {
+ /*
+ * If an application has configured a RAW stream, allocate
+ * additional buffers to make up the minimum, but ensure
+ * we have at least minUnicamBuffers of internal buffers
+ * to use to minimise frame drops.
+ */
+ numBuffers = std::max<int>(data->config_.minUnicamBuffers,
+ minBuffers - numRawBuffers);
+ } else if (stream == &data->isp_[Isp::Input]) {
+ /*
+ * ISP input buffers are imported from Unicam, so follow
+ * similar logic as above to count all the RAW buffers
+ * available.
+ */
+ numBuffers = numRawBuffers +
+ std::max<int>(data->config_.minUnicamBuffers,
+ minBuffers - numRawBuffers);
+
+ } else if (stream == &data->unicam_[Unicam::Embedded]) {
+ /*
+ * Embedded data buffers are (currently) for internal use, and
+ * are small enough (typically 1-2KB) that we can
+ * allocate them generously to avoid causing problems in the
+ * IPA when we cannot supply the metadata.
+ *
+ * 12 are allocated as a typical application will have 8-10
+ * input buffers, so allocating more embedded buffers than that
+ * is a sensible choice.
+ *
+ * The lifetimes of these buffers are smaller than those of the
+ * raw buffers, so allocating a fixed number will still suffice
+ * if the application requests a greater number of raw
+ * buffers, as these will be recycled quicker.
+ */
+ numBuffers = 12;
+ } else {
+ /*
+ * Since the ISP runs synchronous with the IPA and requests,
+ * we only ever need one set of internal buffers. Any buffers
+ * the application wants to hold onto will already be exported
+ * through PipelineHandlerRPi::exportFrameBuffers().
+ */
+ numBuffers = 1;
+ }
+
+ LOG(RPI, Debug) << "Preparing " << numBuffers
+ << " buffers for stream " << stream->name();
+
+ ret = stream->prepareBuffers(numBuffers);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Pass the stats and embedded data buffers to the IPA. No other
+ * buffers need to be passed.
+ */
+ mapBuffers(camera, data->isp_[Isp::Stats].getBuffers(), RPi::MaskStats);
+ if (data->sensorMetadata_)
+ mapBuffers(camera, data->unicam_[Unicam::Embedded].getBuffers(),
+ RPi::MaskEmbeddedData);
+
+ return 0;
+}
+
+int PipelineHandlerVc4::platformRegister(std::unique_ptr<RPi::CameraData> &cameraData, MediaDevice *unicam, MediaDevice *isp)
+{
+ Vc4CameraData *data = static_cast<Vc4CameraData *>(cameraData.get());
+
+ if (!data->dmaHeap_.isValid())
+ return -ENOMEM;
+
+ MediaEntity *unicamImage = unicam->getEntityByName("unicam-image");
+ MediaEntity *ispOutput0 = isp->getEntityByName("bcm2835-isp0-output0");
+ MediaEntity *ispCapture1 = isp->getEntityByName("bcm2835-isp0-capture1");
+ MediaEntity *ispCapture2 = isp->getEntityByName("bcm2835-isp0-capture2");
+ MediaEntity *ispCapture3 = isp->getEntityByName("bcm2835-isp0-capture3");
+
+ if (!unicamImage || !ispOutput0 || !ispCapture1 || !ispCapture2 || !ispCapture3)
+ return -ENOENT;
+
+ /* Locate and open the unicam video streams. */
+ data->unicam_[Unicam::Image] = RPi::Stream("Unicam Image", unicamImage);
+
+ /* An embedded data node will not be present if the sensor does not support it. */
+ MediaEntity *unicamEmbedded = unicam->getEntityByName("unicam-embedded");
+ if (unicamEmbedded) {
+ data->unicam_[Unicam::Embedded] = RPi::Stream("Unicam Embedded", unicamEmbedded);
+ data->unicam_[Unicam::Embedded].dev()->bufferReady.connect(data,
+ &Vc4CameraData::unicamBufferDequeue);
+ }
+
+ /* Tag the ISP input stream as an import stream. */
+ data->isp_[Isp::Input] = RPi::Stream("ISP Input", ispOutput0, StreamFlag::ImportOnly);
+ data->isp_[Isp::Output0] = RPi::Stream("ISP Output0", ispCapture1);
+ data->isp_[Isp::Output1] = RPi::Stream("ISP Output1", ispCapture2);
+ data->isp_[Isp::Stats] = RPi::Stream("ISP Stats", ispCapture3);
+
+ /* Wire up all the buffer connections. */
+ data->unicam_[Unicam::Image].dev()->bufferReady.connect(data, &Vc4CameraData::unicamBufferDequeue);
+ data->isp_[Isp::Input].dev()->bufferReady.connect(data, &Vc4CameraData::ispInputDequeue);
+ data->isp_[Isp::Output0].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+ data->isp_[Isp::Output1].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+ data->isp_[Isp::Stats].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+
+ if (data->sensorMetadata_ ^ !!data->unicam_[Unicam::Embedded].dev()) {
+ LOG(RPI, Warning) << "Mismatch between Unicam and CamHelper for embedded data usage!";
+ data->sensorMetadata_ = false;
+ if (data->unicam_[Unicam::Embedded].dev())
+ data->unicam_[Unicam::Embedded].dev()->bufferReady.disconnect();
+ }
+
+ /*
+ * Open all Unicam and ISP streams. The exception is the embedded data
+ * stream, which only gets opened below if the IPA reports that the sensor
+ * supports embedded data.
+ *
+ * The below grouping is just for convenience so that we can easily
+ * iterate over all streams in one go.
+ */
+ data->streams_.push_back(&data->unicam_[Unicam::Image]);
+ if (data->sensorMetadata_)
+ data->streams_.push_back(&data->unicam_[Unicam::Embedded]);
+
+ for (auto &stream : data->isp_)
+ data->streams_.push_back(&stream);
+
+ for (auto stream : data->streams_) {
+ int ret = stream->dev()->open();
+ if (ret)
+ return ret;
+ }
+
+ if (!data->unicam_[Unicam::Image].dev()->caps().hasMediaController()) {
+ LOG(RPI, Error) << "Unicam driver does not use the MediaController, please update your kernel!";
+ return -EINVAL;
+ }
+
+ /* Write up all the IPA connections. */
+ data->ipa_->processStatsComplete.connect(data, &Vc4CameraData::processStatsComplete);
+ data->ipa_->prepareIspComplete.connect(data, &Vc4CameraData::prepareIspComplete);
+ data->ipa_->setIspControls.connect(data, &Vc4CameraData::setIspControls);
+ data->ipa_->setCameraTimeout.connect(data, &Vc4CameraData::setCameraTimeout);
+
+ /*
+ * List the available streams an application may request. At present, we
+ * do not advertise Unicam Embedded and ISP Statistics streams, as there
+ * is no mechanism for the application to request non-image buffer formats.
+ */
+ std::set<Stream *> streams;
+ streams.insert(&data->unicam_[Unicam::Image]);
+ streams.insert(&data->isp_[Isp::Output0]);
+ streams.insert(&data->isp_[Isp::Output1]);
+
+ /* Create and register the camera. */
+ const std::string &id = data->sensor_->id();
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(cameraData), id, streams);
+ PipelineHandler::registerCamera(std::move(camera));
+
+ LOG(RPI, Info) << "Registered camera " << id
+ << " to Unicam device " << unicam->deviceNode()
+ << " and ISP device " << isp->deviceNode();
+
+ return 0;
+}
+
+CameraConfiguration::Status Vc4CameraData::platformValidate(RPi::RPiCameraConfiguration *rpiConfig) const
+{
+ std::vector<StreamParams> &rawStreams = rpiConfig->rawStreams_;
+ std::vector<StreamParams> &outStreams = rpiConfig->outStreams_;
+
+ CameraConfiguration::Status status = CameraConfiguration::Status::Valid;
+
+ /* Can only output 1 RAW stream, or 2 YUV/RGB streams. */
+ if (rawStreams.size() > 1 || outStreams.size() > 2) {
+ LOG(RPI, Error) << "Invalid number of streams requested";
+ return CameraConfiguration::Status::Invalid;
+ }
+
+ if (!rawStreams.empty()) {
+ rawStreams[0].dev = unicam_[Unicam::Image].dev();
+
+ /* Adjust the RAW stream to match the computed sensor format. */
+ StreamConfiguration *rawStream = rawStreams[0].cfg;
+ BayerFormat rawBayer = BayerFormat::fromPixelFormat(rawStream->pixelFormat);
+
+ /* Apply the sensor bitdepth. */
+ rawBayer.bitDepth = BayerFormat::fromMbusCode(rpiConfig->sensorFormat_.code).bitDepth;
+
+ /* Default to CSI2 packing if the user request is unsupported. */
+ if (rawBayer.packing != BayerFormat::Packing::CSI2 &&
+ rawBayer.packing != BayerFormat::Packing::None)
+ rawBayer.packing = BayerFormat::Packing::CSI2;
+
+ PixelFormat rawFormat = rawBayer.toPixelFormat();
+
+ /*
+ * Try for an unpacked format if a packed one wasn't available.
+ * This catches 8 (and 16) bit formats which would otherwise
+ * fail.
+ */
+ if (!rawFormat.isValid() && rawBayer.packing != BayerFormat::Packing::None) {
+ rawBayer.packing = BayerFormat::Packing::None;
+ rawFormat = rawBayer.toPixelFormat();
+ }
+
+ if (rawStream->pixelFormat != rawFormat ||
+ rawStream->size != rpiConfig->sensorFormat_.size) {
+ rawStream->pixelFormat = rawFormat;
+ rawStream->size = rpiConfig->sensorFormat_.size;
+
+ status = CameraConfiguration::Adjusted;
+ }
+
+ rawStreams[0].format =
+ RPi::PipelineHandlerBase::toV4L2DeviceFormat(unicam_[Unicam::Image].dev(), rawStream);
+ }
+
+ /*
+ * For the two ISP outputs, one stream must be equal or smaller than the
+ * other in all dimensions.
+ *
+ * Index 0 contains the largest requested resolution.
+ */
+ for (unsigned int i = 0; i < outStreams.size(); i++) {
+ Size size;
+
+ /*
+ * \todo Should we warn if upscaling, as it reduces the image
+ * quality and is usually undesired ?
+ */
+
+ size.width = std::min(outStreams[i].cfg->size.width,
+ outStreams[0].cfg->size.width);
+ size.height = std::min(outStreams[i].cfg->size.height,
+ outStreams[0].cfg->size.height);
+
+ if (outStreams[i].cfg->size != size) {
+ outStreams[i].cfg->size = size;
+ status = CameraConfiguration::Status::Adjusted;
+ }
+
+ /*
+ * Output 0 must be for the largest resolution. We will
+ * have that fixed up in the code above.
+ */
+ outStreams[i].dev = isp_[i == 0 ? Isp::Output0 : Isp::Output1].dev();
+
+ outStreams[i].format = RPi::PipelineHandlerBase::toV4L2DeviceFormat(outStreams[i].dev, outStreams[i].cfg);
+ }
+
+ return status;
+}
+
+int Vc4CameraData::platformPipelineConfigure(const std::unique_ptr<YamlObject> &root)
+{
+ config_ = {
+ .minUnicamBuffers = 2,
+ .minTotalUnicamBuffers = 4,
+ };
+
+ if (!root)
+ return 0;
+
+ std::optional<double> ver = (*root)["version"].get<double>();
+ if (!ver || *ver != 1.0) {
+ LOG(RPI, Error) << "Unexpected configuration file version reported";
+ return -EINVAL;
+ }
+
+ std::optional<std::string> target = (*root)["target"].get<std::string>();
+ if (!target || *target != "bcm2835") {
+ LOG(RPI, Error) << "Unexpected target reported: expected \"bcm2835\", got "
+ << *target;
+ return -EINVAL;
+ }
+
+ const YamlObject &phConfig = (*root)["pipeline_handler"];
+ config_.minUnicamBuffers =
+ phConfig["min_unicam_buffers"].get<unsigned int>(config_.minUnicamBuffers);
+ config_.minTotalUnicamBuffers =
+ phConfig["min_total_unicam_buffers"].get<unsigned int>(config_.minTotalUnicamBuffers);
+
+ if (config_.minTotalUnicamBuffers < config_.minUnicamBuffers) {
+ LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= min_unicam_buffers";
+ return -EINVAL;
+ }
+
+ if (config_.minTotalUnicamBuffers < 1) {
+ LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= 1";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int Vc4CameraData::platformConfigure(const RPi::RPiCameraConfiguration *rpiConfig)
+{
+ const std::vector<StreamParams> &rawStreams = rpiConfig->rawStreams_;
+ const std::vector<StreamParams> &outStreams = rpiConfig->outStreams_;
+ int ret;
+
+ V4L2VideoDevice *unicam = unicam_[Unicam::Image].dev();
+ V4L2DeviceFormat unicamFormat;
+
+ /*
+ * See which streams are requested, and route the user
+ * StreamConfiguration appropriately.
+ */
+ if (!rawStreams.empty()) {
+ rawStreams[0].cfg->setStream(&unicam_[Unicam::Image]);
+ unicam_[Unicam::Image].setFlags(StreamFlag::External);
+ unicamFormat = rawStreams[0].format;
+ } else {
+ unicamFormat =
+ RPi::PipelineHandlerBase::toV4L2DeviceFormat(unicam,
+ rpiConfig->sensorFormat_,
+ BayerFormat::Packing::CSI2);
+ }
+
+ ret = unicam->setFormat(&unicamFormat);
+ if (ret)
+ return ret;
+
+ ret = isp_[Isp::Input].dev()->setFormat(&unicamFormat);
+ if (ret)
+ return ret;
+
+ LOG(RPI, Info) << "Sensor: " << sensor_->id()
+ << " - Selected sensor format: " << rpiConfig->sensorFormat_
+ << " - Selected unicam format: " << unicamFormat;
+
+ /* Use a sensible small default size if no output streams are configured. */
+ Size maxSize = outStreams.empty() ? Size(320, 240) : outStreams[0].cfg->size;
+ V4L2DeviceFormat format;
+
+ for (unsigned int i = 0; i < outStreams.size(); i++) {
+ StreamConfiguration *cfg = outStreams[i].cfg;
+
+ /* The largest resolution gets routed to the ISP Output 0 node. */
+ RPi::Stream *stream = i == 0 ? &isp_[Isp::Output0] : &isp_[Isp::Output1];
+ format = outStreams[i].format;
+
+ LOG(RPI, Debug) << "Setting " << stream->name() << " to "
+ << format;
+
+ ret = stream->dev()->setFormat(&format);
+ if (ret)
+ return -EINVAL;
+
+ LOG(RPI, Debug)
+ << "Stream " << stream->name() << " has color space "
+ << ColorSpace::toString(cfg->colorSpace);
+
+ cfg->setStream(stream);
+ stream->setFlags(StreamFlag::External);
+ }
+
+ ispOutputTotal_ = outStreams.size();
+
+ /*
+ * If ISP::Output0 stream has not been configured by the application,
+ * we must allow the hardware to generate an output so that the data
+ * flow in the pipeline handler remains consistent, and we still generate
+ * statistics for the IPA to use. So enable the output at a very low
+ * resolution for internal use.
+ *
+ * \todo Allow the pipeline to work correctly without Output0 and only
+ * statistics coming from the hardware.
+ */
+ if (outStreams.empty()) {
+ V4L2VideoDevice *dev = isp_[Isp::Output0].dev();
+
+ format = {};
+ format.size = maxSize;
+ format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
+ /* No one asked for output, so the color space doesn't matter. */
+ format.colorSpace = ColorSpace::Sycc;
+ ret = dev->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error)
+ << "Failed to set default format on ISP Output0: "
+ << ret;
+ return -EINVAL;
+ }
+
+ ispOutputTotal_++;
+
+ LOG(RPI, Debug) << "Defaulting ISP Output0 format to "
+ << format;
+ }
+
+ /*
+ * If ISP::Output1 stream has not been requested by the application, we
+ * set it up for internal use now. This second stream will be used for
+ * fast colour denoise, and must be a quarter resolution of the ISP::Output0
+ * stream. However, also limit the maximum size to 1200 pixels in the
+ * larger dimension, just to avoid being wasteful with buffer allocations
+ * and memory bandwidth.
+ *
+ * \todo If Output 1 format is not YUV420, Output 1 ought to be disabled as
+ * colour denoise will not run.
+ */
+ if (outStreams.size() <= 1) {
+ V4L2VideoDevice *dev = isp_[Isp::Output1].dev();
+
+ V4L2DeviceFormat output1Format;
+ constexpr Size maxDimensions(1200, 1200);
+ const Size limit = maxDimensions.boundedToAspectRatio(format.size);
+
+ output1Format.size = (format.size / 2).boundedTo(limit).alignedDownTo(2, 2);
+ output1Format.colorSpace = format.colorSpace;
+ output1Format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
+
+ LOG(RPI, Debug) << "Setting ISP Output1 (internal) to "
+ << output1Format;
+
+ ret = dev->setFormat(&output1Format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on ISP Output1: "
+ << ret;
+ return -EINVAL;
+ }
+
+ ispOutputTotal_++;
+ }
+
+ /* ISP statistics output format. */
+ format = {};
+ format.fourcc = V4L2PixelFormat(V4L2_META_FMT_BCM2835_ISP_STATS);
+ ret = isp_[Isp::Stats].dev()->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on ISP stats stream: "
+ << format;
+ return ret;
+ }
+
+ ispOutputTotal_++;
+
+ /*
+ * Configure the Unicam embedded data output format only if the sensor
+ * supports it.
+ */
+ if (sensorMetadata_) {
+ V4L2SubdeviceFormat embeddedFormat;
+
+ sensor_->device()->getFormat(1, &embeddedFormat);
+ format = {};
+ format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
+ format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
+
+ LOG(RPI, Debug) << "Setting embedded data format " << format.toString();
+ ret = unicam_[Unicam::Embedded].dev()->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on Unicam embedded: "
+ << format;
+ return ret;
+ }
+ }
+
+ /* Figure out the smallest selection the ISP will allow. */
+ Rectangle testCrop(0, 0, 1, 1);
+ isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &testCrop);
+ ispMinCropSize_ = testCrop.size();
+
+ /* Adjust aspect ratio by providing crops on the input image. */
+ Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
+ ispCrop_ = size.centeredTo(Rectangle(unicamFormat.size).center());
+
+ platformSetIspCrop();
+
+ return 0;
+}
+
+int Vc4CameraData::platformConfigureIpa(ipa::RPi::ConfigParams &params)
+{
+ params.ispControls = isp_[Isp::Input].dev()->controls();
+
+ /* Allocate the lens shading table via dmaHeap and pass to the IPA. */
+ if (!lsTable_.isValid()) {
+ lsTable_ = SharedFD(dmaHeap_.alloc("ls_grid", ipa::RPi::MaxLsGridSize));
+ if (!lsTable_.isValid())
+ return -ENOMEM;
+
+ /* Allow the IPA to mmap the LS table via the file descriptor. */
+ /*
+ * \todo Investigate if mapping the lens shading table buffer
+ * could be handled with mapBuffers().
+ */
+ params.lsTableHandle = lsTable_;
+ }
+
+ return 0;
+}
+
+void Vc4CameraData::platformStart()
+{
+}
+
+void Vc4CameraData::platformStop()
+{
+ bayerQueue_ = {};
+ embeddedQueue_ = {};
+}
+
+void Vc4CameraData::unicamBufferDequeue(FrameBuffer *buffer)
+{
+ RPi::Stream *stream = nullptr;
+ unsigned int index;
+
+ if (!isRunning())
+ return;
+
+ for (RPi::Stream &s : unicam_) {
+ index = s.getBufferId(buffer);
+ if (index) {
+ stream = &s;
+ break;
+ }
+ }
+
+ /* The buffer must belong to one of our streams. */
+ ASSERT(stream);
+
+ LOG(RPI, Debug) << "Stream " << stream->name() << " buffer dequeue"
+ << ", buffer id " << index
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ if (stream == &unicam_[Unicam::Image]) {
+ /*
+ * Lookup the sensor controls used for this frame sequence from
+ * DelayedControl and queue them along with the frame buffer.
+ */
+ auto [ctrl, delayContext] = delayedCtrls_->get(buffer->metadata().sequence);
+ /*
+ * Add the frame timestamp to the ControlList for the IPA to use
+ * as it does not receive the FrameBuffer object.
+ */
+ ctrl.set(controls::SensorTimestamp, buffer->metadata().timestamp);
+ bayerQueue_.push({ buffer, std::move(ctrl), delayContext });
+ } else {
+ embeddedQueue_.push(buffer);
+ }
+
+ handleState();
+}
+
+void Vc4CameraData::ispInputDequeue(FrameBuffer *buffer)
+{
+ if (!isRunning())
+ return;
+
+ LOG(RPI, Debug) << "Stream ISP Input buffer complete"
+ << ", buffer id " << unicam_[Unicam::Image].getBufferId(buffer)
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ /* The ISP input buffer gets re-queued into Unicam. */
+ handleStreamBuffer(buffer, &unicam_[Unicam::Image]);
+ handleState();
+}
+
+void Vc4CameraData::ispOutputDequeue(FrameBuffer *buffer)
+{
+ RPi::Stream *stream = nullptr;
+ unsigned int index;
+
+ if (!isRunning())
+ return;
+
+ for (RPi::Stream &s : isp_) {
+ index = s.getBufferId(buffer);
+ if (index) {
+ stream = &s;
+ break;
+ }
+ }
+
+ /* The buffer must belong to one of our ISP output streams. */
+ ASSERT(stream);
+
+ LOG(RPI, Debug) << "Stream " << stream->name() << " buffer complete"
+ << ", buffer id " << index
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ /*
+ * ISP statistics buffer must not be re-queued or sent back to the
+ * application until after the IPA signals so.
+ */
+ if (stream == &isp_[Isp::Stats]) {
+ ipa::RPi::ProcessParams params;
+ params.buffers.stats = index | RPi::MaskStats;
+ params.ipaContext = requestQueue_.front()->sequence();
+ ipa_->processStats(params);
+ } else {
+ /* Any other ISP output can be handed back to the application now. */
+ handleStreamBuffer(buffer, stream);
+ }
+
+ /*
+ * Increment the number of ISP outputs generated.
+ * This is needed to track dropped frames.
+ */
+ ispOutputCount_++;
+
+ handleState();
+}
+
+void Vc4CameraData::processStatsComplete(const ipa::RPi::BufferIds &buffers)
+{
+ if (!isRunning())
+ return;
+
+ FrameBuffer *buffer = isp_[Isp::Stats].getBuffers().at(buffers.stats & RPi::MaskID).buffer;
+
+ handleStreamBuffer(buffer, &isp_[Isp::Stats]);
+
+ state_ = State::IpaComplete;
+ handleState();
+}
+
+void Vc4CameraData::prepareIspComplete(const ipa::RPi::BufferIds &buffers,
+ [[maybe_unused]] bool stitchSwapBuffers)
+{
+ unsigned int embeddedId = buffers.embedded & RPi::MaskID;
+ unsigned int bayer = buffers.bayer & RPi::MaskID;
+ FrameBuffer *buffer;
+
+ if (!isRunning())
+ return;
+
+ buffer = unicam_[Unicam::Image].getBuffers().at(bayer & RPi::MaskID).buffer;
+ LOG(RPI, Debug) << "Input re-queue to ISP, buffer id " << (bayer & RPi::MaskID)
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ isp_[Isp::Input].queueBuffer(buffer);
+ ispOutputCount_ = 0;
+
+ if (sensorMetadata_ && embeddedId) {
+ buffer = unicam_[Unicam::Embedded].getBuffers().at(embeddedId & RPi::MaskID).buffer;
+ handleStreamBuffer(buffer, &unicam_[Unicam::Embedded]);
+ }
+
+ handleState();
+}
+
+void Vc4CameraData::setIspControls(const ControlList &controls)
+{
+ ControlList ctrls = controls;
+
+ if (ctrls.contains(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING)) {
+ ControlValue &value =
+ const_cast<ControlValue &>(ctrls.get(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING));
+ Span<uint8_t> s = value.data();
+ bcm2835_isp_lens_shading *ls =
+ reinterpret_cast<bcm2835_isp_lens_shading *>(s.data());
+ ls->dmabuf = lsTable_.get();
+ }
+
+ isp_[Isp::Input].dev()->setControls(&ctrls);
+ handleState();
+}
+
+void Vc4CameraData::setCameraTimeout(uint32_t maxFrameLengthMs)
+{
+ /*
+ * Set the dequeue timeout to the larger of 5x the maximum reported
+ * frame length advertised by the IPA over a number of frames. Allow
+ * a minimum timeout value of 1s.
+ */
+ utils::Duration timeout =
+ std::max<utils::Duration>(1s, 5 * maxFrameLengthMs * 1ms);
+
+ LOG(RPI, Debug) << "Setting Unicam timeout to " << timeout;
+ unicam_[Unicam::Image].dev()->setDequeueTimeout(timeout);
+}
+
+void Vc4CameraData::tryRunPipeline()
+{
+ FrameBuffer *embeddedBuffer;
+ BayerFrame bayerFrame;
+
+ /* If any of our request or buffer queues are empty, we cannot proceed. */
+ if (state_ != State::Idle || requestQueue_.empty() ||
+ bayerQueue_.empty() || (embeddedQueue_.empty() && sensorMetadata_))
+ return;
+
+ if (!findMatchingBuffers(bayerFrame, embeddedBuffer))
+ return;
+
+ /* Take the first request from the queue and action the IPA. */
+ Request *request = requestQueue_.front();
+
+ /* See if a new ScalerCrop value needs to be applied. */
+ applyScalerCrop(request->controls());
+
+ /*
+ * Clear the request metadata and fill it with some initial non-IPA
+ * related controls. We clear it first because the request metadata
+ * may have been populated if we have dropped the previous frame.
+ */
+ request->metadata().clear();
+ fillRequestMetadata(bayerFrame.controls, request);
+
+ /* Set our state to say the pipeline is active. */
+ state_ = State::Busy;
+
+ unsigned int bayer = unicam_[Unicam::Image].getBufferId(bayerFrame.buffer);
+
+ LOG(RPI, Debug) << "Signalling prepareIsp:"
+ << " Bayer buffer id: " << bayer;
+
+ ipa::RPi::PrepareParams params;
+ params.buffers.bayer = RPi::MaskBayerData | bayer;
+ params.sensorControls = std::move(bayerFrame.controls);
+ params.requestControls = request->controls();
+ params.ipaContext = request->sequence();
+ params.delayContext = bayerFrame.delayContext;
+ params.buffers.embedded = 0;
+
+ if (embeddedBuffer) {
+ unsigned int embeddedId = unicam_[Unicam::Embedded].getBufferId(embeddedBuffer);
+
+ params.buffers.embedded = RPi::MaskEmbeddedData | embeddedId;
+ LOG(RPI, Debug) << "Signalling prepareIsp:"
+ << " Embedded buffer id: " << embeddedId;
+ }
+
+ ipa_->prepareIsp(params);
+}
+
+bool Vc4CameraData::findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer)
+{
+ if (bayerQueue_.empty())
+ return false;
+
+ /*
+ * Find the embedded data buffer with a matching timestamp to pass to
+ * the IPA. Any embedded buffers with a timestamp lower than the
+ * current bayer buffer will be removed and re-queued to the driver.
+ */
+ uint64_t ts = bayerQueue_.front().buffer->metadata().timestamp;
+ embeddedBuffer = nullptr;
+ while (!embeddedQueue_.empty()) {
+ FrameBuffer *b = embeddedQueue_.front();
+ if (b->metadata().timestamp < ts) {
+ embeddedQueue_.pop();
+ unicam_[Unicam::Embedded].returnBuffer(b);
+ LOG(RPI, Debug) << "Dropping unmatched input frame in stream "
+ << unicam_[Unicam::Embedded].name();
+ } else if (b->metadata().timestamp == ts) {
+ /* Found a match! */
+ embeddedBuffer = b;
+ embeddedQueue_.pop();
+ break;
+ } else {
+ break; /* Only higher timestamps from here. */
+ }
+ }
+
+ if (!embeddedBuffer && sensorMetadata_) {
+ if (embeddedQueue_.empty()) {
+ /*
+ * If the embedded buffer queue is empty, wait for the next
+ * buffer to arrive - dequeue ordering may send the image
+ * buffer first.
+ */
+ LOG(RPI, Debug) << "Waiting for next embedded buffer.";
+ return false;
+ }
+
+ /* Log if there is no matching embedded data buffer found. */
+ LOG(RPI, Debug) << "Returning bayer frame without a matching embedded buffer.";
+ }
+
+ bayerFrame = std::move(bayerQueue_.front());
+ bayerQueue_.pop();
+
+ return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVc4, "rpi/vc4")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/simple/converter.cpp b/src/libcamera/pipeline/simple/converter.cpp
deleted file mode 100644
index 77c44fc8..00000000
--- a/src/libcamera/pipeline/simple/converter.cpp
+++ /dev/null
@@ -1,399 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Laurent Pinchart
- *
- * converter.cpp - Format converter for simple pipeline handler
- */
-
-#include "converter.h"
-
-#include <algorithm>
-#include <limits.h>
-
-#include <libcamera/base/log.h>
-#include <libcamera/base/signal.h>
-#include <libcamera/base/utils.h>
-
-#include <libcamera/framebuffer.h>
-#include <libcamera/geometry.h>
-#include <libcamera/stream.h>
-
-#include "libcamera/internal/media_device.h"
-#include "libcamera/internal/v4l2_videodevice.h"
-
-namespace libcamera {
-
-LOG_DECLARE_CATEGORY(SimplePipeline)
-
-/* -----------------------------------------------------------------------------
- * SimpleConverter::Stream
- */
-
-SimpleConverter::Stream::Stream(SimpleConverter *converter, unsigned int index)
- : converter_(converter), index_(index)
-{
- m2m_ = std::make_unique<V4L2M2MDevice>(converter->deviceNode_);
-
- m2m_->output()->bufferReady.connect(this, &Stream::outputBufferReady);
- m2m_->capture()->bufferReady.connect(this, &Stream::captureBufferReady);
-
- int ret = m2m_->open();
- if (ret < 0)
- m2m_.reset();
-}
-
-int SimpleConverter::Stream::configure(const StreamConfiguration &inputCfg,
- const StreamConfiguration &outputCfg)
-{
- V4L2PixelFormat videoFormat =
- V4L2PixelFormat::fromPixelFormat(inputCfg.pixelFormat);
-
- V4L2DeviceFormat format;
- format.fourcc = videoFormat;
- format.size = inputCfg.size;
- format.planesCount = 1;
- format.planes[0].bpl = inputCfg.stride;
-
- int ret = m2m_->output()->setFormat(&format);
- if (ret < 0) {
- LOG(SimplePipeline, Error)
- << "Failed to set input format: " << strerror(-ret);
- return ret;
- }
-
- if (format.fourcc != videoFormat || format.size != inputCfg.size ||
- format.planes[0].bpl != inputCfg.stride) {
- LOG(SimplePipeline, Error)
- << "Input format not supported (requested "
- << inputCfg.size << "-" << videoFormat
- << ", got " << format << ")";
- return -EINVAL;
- }
-
- /* Set the pixel format and size on the output. */
- videoFormat = V4L2PixelFormat::fromPixelFormat(outputCfg.pixelFormat);
- format = {};
- format.fourcc = videoFormat;
- format.size = outputCfg.size;
-
- ret = m2m_->capture()->setFormat(&format);
- if (ret < 0) {
- LOG(SimplePipeline, Error)
- << "Failed to set output format: " << strerror(-ret);
- return ret;
- }
-
- if (format.fourcc != videoFormat || format.size != outputCfg.size) {
- LOG(SimplePipeline, Error)
- << "Output format not supported";
- return -EINVAL;
- }
-
- inputBufferCount_ = inputCfg.bufferCount;
- outputBufferCount_ = outputCfg.bufferCount;
-
- return 0;
-}
-
-int SimpleConverter::Stream::exportBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers)
-{
- return m2m_->capture()->exportBuffers(count, buffers);
-}
-
-int SimpleConverter::Stream::start()
-{
- int ret = m2m_->output()->importBuffers(inputBufferCount_);
- if (ret < 0)
- return ret;
-
- ret = m2m_->capture()->importBuffers(outputBufferCount_);
- if (ret < 0) {
- stop();
- return ret;
- }
-
- ret = m2m_->output()->streamOn();
- if (ret < 0) {
- stop();
- return ret;
- }
-
- ret = m2m_->capture()->streamOn();
- if (ret < 0) {
- stop();
- return ret;
- }
-
- return 0;
-}
-
-void SimpleConverter::Stream::stop()
-{
- m2m_->capture()->streamOff();
- m2m_->output()->streamOff();
- m2m_->capture()->releaseBuffers();
- m2m_->output()->releaseBuffers();
-}
-
-int SimpleConverter::Stream::queueBuffers(FrameBuffer *input,
- FrameBuffer *output)
-{
- int ret = m2m_->output()->queueBuffer(input);
- if (ret < 0)
- return ret;
-
- ret = m2m_->capture()->queueBuffer(output);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-std::string SimpleConverter::Stream::logPrefix() const
-{
- return "stream" + std::to_string(index_);
-}
-
-void SimpleConverter::Stream::outputBufferReady(FrameBuffer *buffer)
-{
- auto it = converter_->queue_.find(buffer);
- if (it == converter_->queue_.end())
- return;
-
- if (!--it->second) {
- converter_->inputBufferReady.emit(buffer);
- converter_->queue_.erase(it);
- }
-}
-
-void SimpleConverter::Stream::captureBufferReady(FrameBuffer *buffer)
-{
- converter_->outputBufferReady.emit(buffer);
-}
-
-/* -----------------------------------------------------------------------------
- * SimpleConverter
- */
-
-SimpleConverter::SimpleConverter(MediaDevice *media)
-{
- /*
- * Locate the video node. There's no need to validate the pipeline
- * further, the caller guarantees that this is a V4L2 mem2mem device.
- */
- const std::vector<MediaEntity *> &entities = media->entities();
- auto it = std::find_if(entities.begin(), entities.end(),
- [](MediaEntity *entity) {
- return entity->function() == MEDIA_ENT_F_IO_V4L;
- });
- if (it == entities.end())
- return;
-
- deviceNode_ = (*it)->deviceNode();
-
- m2m_ = std::make_unique<V4L2M2MDevice>(deviceNode_);
- int ret = m2m_->open();
- if (ret < 0) {
- m2m_.reset();
- return;
- }
-}
-
-std::vector<PixelFormat> SimpleConverter::formats(PixelFormat input)
-{
- if (!m2m_)
- return {};
-
- /*
- * Set the format on the input side (V4L2 output) of the converter to
- * enumerate the conversion capabilities on its output (V4L2 capture).
- */
- V4L2DeviceFormat v4l2Format;
- v4l2Format.fourcc = V4L2PixelFormat::fromPixelFormat(input);
- v4l2Format.size = { 1, 1 };
-
- int ret = m2m_->output()->setFormat(&v4l2Format);
- if (ret < 0) {
- LOG(SimplePipeline, Error)
- << "Failed to set format: " << strerror(-ret);
- return {};
- }
-
- std::vector<PixelFormat> pixelFormats;
-
- for (const auto &format : m2m_->capture()->formats()) {
- PixelFormat pixelFormat = format.first.toPixelFormat();
- if (pixelFormat)
- pixelFormats.push_back(pixelFormat);
- }
-
- return pixelFormats;
-}
-
-SizeRange SimpleConverter::sizes(const Size &input)
-{
- if (!m2m_)
- return {};
-
- /*
- * Set the size on the input side (V4L2 output) of the converter to
- * enumerate the scaling capabilities on its output (V4L2 capture).
- */
- V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat();
- format.size = input;
-
- int ret = m2m_->output()->setFormat(&format);
- if (ret < 0) {
- LOG(SimplePipeline, Error)
- << "Failed to set format: " << strerror(-ret);
- return {};
- }
-
- SizeRange sizes;
-
- format.size = { 1, 1 };
- ret = m2m_->capture()->setFormat(&format);
- if (ret < 0) {
- LOG(SimplePipeline, Error)
- << "Failed to set format: " << strerror(-ret);
- return {};
- }
-
- sizes.min = format.size;
-
- format.size = { UINT_MAX, UINT_MAX };
- ret = m2m_->capture()->setFormat(&format);
- if (ret < 0) {
- LOG(SimplePipeline, Error)
- << "Failed to set format: " << strerror(-ret);
- return {};
- }
-
- sizes.max = format.size;
-
- return sizes;
-}
-
-std::tuple<unsigned int, unsigned int>
-SimpleConverter::strideAndFrameSize(const PixelFormat &pixelFormat,
- const Size &size)
-{
- V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(pixelFormat);
- format.size = size;
-
- int ret = m2m_->capture()->tryFormat(&format);
- if (ret < 0)
- return std::make_tuple(0, 0);
-
- return std::make_tuple(format.planes[0].bpl, format.planes[0].size);
-}
-
-int SimpleConverter::configure(const StreamConfiguration &inputCfg,
- const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
-{
- int ret = 0;
-
- streams_.clear();
- streams_.reserve(outputCfgs.size());
-
- for (unsigned int i = 0; i < outputCfgs.size(); ++i) {
- Stream &stream = streams_.emplace_back(this, i);
-
- if (!stream.isValid()) {
- LOG(SimplePipeline, Error)
- << "Failed to create stream " << i;
- ret = -EINVAL;
- break;
- }
-
- ret = stream.configure(inputCfg, outputCfgs[i]);
- if (ret < 0)
- break;
- }
-
- if (ret < 0) {
- streams_.clear();
- return ret;
- }
-
- return 0;
-}
-
-int SimpleConverter::exportBuffers(unsigned int output, unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers)
-{
- if (output >= streams_.size())
- return -EINVAL;
-
- return streams_[output].exportBuffers(count, buffers);
-}
-
-int SimpleConverter::start()
-{
- int ret;
-
- for (Stream &stream : streams_) {
- ret = stream.start();
- if (ret < 0) {
- stop();
- return ret;
- }
- }
-
- return 0;
-}
-
-void SimpleConverter::stop()
-{
- for (Stream &stream : utils::reverse(streams_))
- stream.stop();
-}
-
-int SimpleConverter::queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs)
-{
- unsigned int mask = 0;
- int ret;
-
- /*
- * Validate the outputs as a sanity check: at least one output is
- * required, all outputs must reference a valid stream and no two
- * outputs can reference the same stream.
- */
- if (outputs.empty())
- return -EINVAL;
-
- for (auto [index, buffer] : outputs) {
- if (!buffer)
- return -EINVAL;
- if (index >= streams_.size())
- return -EINVAL;
- if (mask & (1 << index))
- return -EINVAL;
-
- mask |= 1 << index;
- }
-
- /* Queue the input and output buffers to all the streams. */
- for (auto [index, buffer] : outputs) {
- ret = streams_[index].queueBuffers(input, buffer);
- if (ret < 0)
- return ret;
- }
-
- /*
- * Add the input buffer to the queue, with the number of streams as a
- * reference count. Completion of the input buffer will be signalled by
- * the stream that releases the last reference.
- */
- queue_.emplace(std::piecewise_construct,
- std::forward_as_tuple(input),
- std::forward_as_tuple(outputs.size()));
-
- return 0;
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/simple/converter.h b/src/libcamera/pipeline/simple/converter.h
deleted file mode 100644
index f0ebe2e0..00000000
--- a/src/libcamera/pipeline/simple/converter.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Laurent Pinchart
- *
- * converter.h - Format converter for simple pipeline handler
- */
-
-#pragma once
-
-#include <functional>
-#include <map>
-#include <memory>
-#include <string>
-#include <tuple>
-#include <vector>
-
-#include <libcamera/pixel_format.h>
-
-#include <libcamera/base/log.h>
-#include <libcamera/base/signal.h>
-
-namespace libcamera {
-
-class FrameBuffer;
-class MediaDevice;
-class Size;
-class SizeRange;
-struct StreamConfiguration;
-class V4L2M2MDevice;
-
-class SimpleConverter
-{
-public:
- SimpleConverter(MediaDevice *media);
-
- bool isValid() const { return m2m_ != nullptr; }
-
- std::vector<PixelFormat> formats(PixelFormat input);
- SizeRange sizes(const Size &input);
-
- std::tuple<unsigned int, unsigned int>
- strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size);
-
- int configure(const StreamConfiguration &inputCfg,
- const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfg);
- int exportBuffers(unsigned int ouput, unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers);
-
- int start();
- void stop();
-
- int queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs);
-
- Signal<FrameBuffer *> inputBufferReady;
- Signal<FrameBuffer *> outputBufferReady;
-
-private:
- class Stream : protected Loggable
- {
- public:
- Stream(SimpleConverter *converter, unsigned int index);
-
- bool isValid() const { return m2m_ != nullptr; }
-
- int configure(const StreamConfiguration &inputCfg,
- const StreamConfiguration &outputCfg);
- int exportBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers);
-
- int start();
- void stop();
-
- int queueBuffers(FrameBuffer *input, FrameBuffer *output);
-
- protected:
- std::string logPrefix() const override;
-
- private:
- void captureBufferReady(FrameBuffer *buffer);
- void outputBufferReady(FrameBuffer *buffer);
-
- SimpleConverter *converter_;
- unsigned int index_;
- std::unique_ptr<V4L2M2MDevice> m2m_;
-
- unsigned int inputBufferCount_;
- unsigned int outputBufferCount_;
- };
-
- std::string deviceNode_;
- std::unique_ptr<V4L2M2MDevice> m2m_;
-
- std::vector<Stream> streams_;
- std::map<FrameBuffer *, unsigned int> queue_;
-};
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/simple/meson.build b/src/libcamera/pipeline/simple/meson.build
index 9c99b32f..42b0896d 100644
--- a/src/libcamera/pipeline/simple/meson.build
+++ b/src/libcamera/pipeline/simple/meson.build
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: CC0-1.0
libcamera_sources += files([
- 'converter.cpp',
'simple.cpp',
])
diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
index bc0cb1a0..db3575c3 100644
--- a/src/libcamera/pipeline/simple/simple.cpp
+++ b/src/libcamera/pipeline/simple/simple.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Laurent Pinchart
* Copyright (C) 2019, Martijn Braam
*
- * simple.cpp - Pipeline handler for simple pipelines
+ * Pipeline handler for simple pipelines
*/
#include <algorithm>
@@ -30,13 +30,14 @@
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/converter.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/software_isp/software_isp.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
-#include "converter.h"
namespace libcamera {
@@ -100,8 +101,14 @@ LOG_DEFINE_CATEGORY(SimplePipeline)
*
* During the breadth-first search, the pipeline is traversed from entity to
* entity, by following media graph links from source to sink, starting at the
- * camera sensor. When reaching an entity (on its sink side), all its source
- * pads are considered to continue the graph traversal.
+ * camera sensor.
+ *
+ * When reaching an entity (on its sink side), if the entity is a V4L2 subdev
+ * that supports the streams API, the subdev internal routes are followed to
+ * find the connected source pads. Otherwise all of the entity's source pads
+ * are considered to continue the graph traversal. The pipeline handler
+ * currently considers the default internal routes only and doesn't attempt to
+ * setup custom routes. This can be extended if needed.
*
* The shortest path between the camera sensor and a video node is stored in
* SimpleCameraData::entities_ as a list of SimpleCameraData::Entity structures,
@@ -179,14 +186,24 @@ struct SimplePipelineInfo {
* and the number of streams it supports.
*/
std::vector<std::pair<const char *, unsigned int>> converters;
+ /*
+ * Using Software ISP is to be enabled per driver.
+ *
+ * The Software ISP can't be used together with the converters.
+ */
+ bool swIspEnabled;
};
namespace {
static const SimplePipelineInfo supportedDevices[] = {
- { "imx7-csi", { { "pxp", 1 } } },
- { "qcom-camss", {} },
- { "sun6i-csi", {} },
+ { "dcmipp", {}, false },
+ { "imx7-csi", { { "pxp", 1 } }, false },
+ { "j721e-csi2rx", {}, false },
+ { "mtk-seninf", { { "mtk-mdp", 3 } }, false },
+ { "mxc-isi", {}, false },
+ { "qcom-camss", {}, true },
+ { "sun6i-csi", {}, false },
};
} /* namespace */
@@ -204,7 +221,8 @@ public:
int init();
int setupLinks();
int setupFormats(V4L2SubdeviceFormat *format,
- V4L2Subdevice::Whence whence);
+ V4L2Subdevice::Whence whence,
+ Transform transform = Transform::Identity);
void bufferReady(FrameBuffer *buffer);
unsigned int streamIndex(const Stream *stream) const
@@ -216,6 +234,11 @@ public:
/* The media entity, always valid. */
MediaEntity *entity;
/*
+ * Whether or not the entity is a subdev that supports the
+ * routing API.
+ */
+ bool supportsRouting;
+ /*
* The local sink pad connected to the upstream entity, null for
* the camera sensor at the beginning of the pipeline.
*/
@@ -254,16 +277,22 @@ public:
std::vector<Configuration> configs_;
std::map<PixelFormat, std::vector<const Configuration *>> formats_;
- std::unique_ptr<SimpleConverter> converter_;
- std::vector<std::unique_ptr<FrameBuffer>> converterBuffers_;
- bool useConverter_;
- std::queue<std::map<unsigned int, FrameBuffer *>> converterQueue_;
+ std::vector<std::unique_ptr<FrameBuffer>> conversionBuffers_;
+ std::queue<std::map<unsigned int, FrameBuffer *>> conversionQueue_;
+ bool useConversion_;
+
+ std::unique_ptr<Converter> converter_;
+ std::unique_ptr<SoftwareIsp> swIsp_;
private:
void tryPipeline(unsigned int code, const Size &size);
+ static std::vector<const MediaPad *> routedSourcePads(MediaPad *sink);
- void converterInputDone(FrameBuffer *buffer);
- void converterOutputDone(FrameBuffer *buffer);
+ void conversionInputDone(FrameBuffer *buffer);
+ void conversionOutputDone(FrameBuffer *buffer);
+
+ void ispStatsReady();
+ void setSensorControls(const ControlList &sensorControls);
};
class SimpleCameraConfiguration : public CameraConfiguration
@@ -279,6 +308,7 @@ public:
}
bool needConversion() const { return needConversion_; }
+ const Transform &combinedTransform() const { return combinedTransform_; }
private:
/*
@@ -291,6 +321,7 @@ private:
const SimpleCameraData::Configuration *pipeConfig_;
bool needConversion_;
+ Transform combinedTransform_;
};
class SimplePipelineHandler : public PipelineHandler
@@ -298,8 +329,8 @@ class SimplePipelineHandler : public PipelineHandler
public:
SimplePipelineHandler(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -313,6 +344,7 @@ public:
V4L2VideoDevice *video(const MediaEntity *entity);
V4L2Subdevice *subdev(const MediaEntity *entity);
MediaDevice *converter() { return converter_; }
+ bool swIspEnabled() const { return swIspEnabled_; }
protected:
int queueRequestDevice(Camera *camera, Request *request) override;
@@ -332,6 +364,7 @@ private:
}
std::vector<MediaEntity *> locateSensors();
+ static int resetRoutingTable(V4L2Subdevice *subdev);
const MediaPad *acquirePipeline(SimpleCameraData *data);
void releasePipeline(SimpleCameraData *data);
@@ -340,6 +373,7 @@ private:
std::map<const MediaEntity *, EntityData> entities_;
MediaDevice *converter_;
+ bool swIspEnabled_;
};
/* -----------------------------------------------------------------------------
@@ -386,17 +420,40 @@ SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe,
break;
}
- /* The actual breadth-first search algorithm. */
visited.insert(entity);
- for (MediaPad *pad : entity->pads()) {
- if (!(pad->flags() & MEDIA_PAD_FL_SOURCE))
- continue;
+ /*
+ * Add direct downstream entities to the search queue. If the
+ * current entity supports the subdev internal routing API,
+ * restrict the search to downstream entities reachable through
+ * active routes.
+ */
+
+ std::vector<const MediaPad *> pads;
+ bool supportsRouting = false;
+
+ if (sinkPad) {
+ pads = routedSourcePads(sinkPad);
+ if (!pads.empty())
+ supportsRouting = true;
+ }
+
+ if (pads.empty()) {
+ for (const MediaPad *pad : entity->pads()) {
+ if (!(pad->flags() & MEDIA_PAD_FL_SOURCE))
+ continue;
+ pads.push_back(pad);
+ }
+ }
+
+ for (const MediaPad *pad : pads) {
for (MediaLink *link : pad->links()) {
MediaEntity *next = link->sink()->entity();
if (visited.find(next) == visited.end()) {
queue.push({ next, link->sink() });
- parents.insert({ next, { entity, sinkPad, pad, link } });
+
+ Entity e{ entity, supportsRouting, sinkPad, pad, link };
+ parents.insert({ next, e });
}
}
}
@@ -410,7 +467,7 @@ SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe,
* to the sensor. Store all the entities in the pipeline, from the
* camera sensor to the video node, in entities_.
*/
- entities_.push_front({ entity, sinkPad, nullptr, nullptr });
+ entities_.push_front({ entity, false, sinkPad, nullptr, nullptr });
for (auto it = parents.find(entity); it != parents.end();
it = parents.find(entity)) {
@@ -455,14 +512,45 @@ int SimpleCameraData::init()
/* Open the converter, if any. */
MediaDevice *converter = pipe->converter();
if (converter) {
- converter_ = std::make_unique<SimpleConverter>(converter);
- if (!converter_->isValid()) {
+ converter_ = ConverterFactoryBase::create(converter);
+ if (!converter_) {
LOG(SimplePipeline, Warning)
<< "Failed to create converter, disabling format conversion";
converter_.reset();
} else {
- converter_->inputBufferReady.connect(this, &SimpleCameraData::converterInputDone);
- converter_->outputBufferReady.connect(this, &SimpleCameraData::converterOutputDone);
+ converter_->inputBufferReady.connect(this, &SimpleCameraData::conversionInputDone);
+ converter_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
+ }
+ }
+
+ /*
+ * Instantiate Soft ISP if this is enabled for the given driver and no converter is used.
+ */
+ if (!converter_ && pipe->swIspEnabled()) {
+ swIsp_ = std::make_unique<SoftwareIsp>(pipe, sensor_.get());
+ if (!swIsp_->isValid()) {
+ LOG(SimplePipeline, Warning)
+ << "Failed to create software ISP, disabling software debayering";
+ swIsp_.reset();
+ } else {
+ /*
+ * The inputBufferReady signal is emitted from the soft ISP thread,
+ * and needs to be handled in the pipeline handler thread. Signals
+ * implement queued delivery, but this works transparently only if
+ * the receiver is bound to the target thread. As the
+ * SimpleCameraData class doesn't inherit from the Object class, it
+ * is not bound to any thread, and the signal would be delivered
+ * synchronously. Instead, connect the signal to a lambda function
+ * bound explicitly to the pipe, which is bound to the pipeline
+ * handler thread. The function then simply forwards the call to
+ * conversionInputDone().
+ */
+ swIsp_->inputBufferReady.connect(pipe, [this](FrameBuffer *buffer) {
+ this->conversionInputDone(buffer);
+ });
+ swIsp_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
+ swIsp_->ispStatsReady.connect(this, &SimpleCameraData::ispStatsReady);
+ swIsp_->setSensorControls.connect(this, &SimpleCameraData::setSensorControls);
}
}
@@ -520,13 +608,13 @@ void SimpleCameraData::tryPipeline(unsigned int code, const Size &size)
* corresponding possible V4L2 pixel formats on the video node.
*/
V4L2SubdeviceFormat format{};
- format.mbus_code = code;
+ format.code = code;
format.size = size;
int ret = setupFormats(&format, V4L2Subdevice::TryFormat);
if (ret < 0) {
/* Pipeline configuration failed, skip this configuration. */
- format.mbus_code = code;
+ format.code = code;
format.size = size;
LOG(SimplePipeline, Debug)
<< "Sensor format " << format
@@ -534,7 +622,7 @@ void SimpleCameraData::tryPipeline(unsigned int code, const Size &size)
return;
}
- V4L2VideoDevice::Formats videoFormats = video_->formats(format.mbus_code);
+ V4L2VideoDevice::Formats videoFormats = video_->formats(format.code);
LOG(SimplePipeline, Debug)
<< "Adding configuration for " << format.size
@@ -556,12 +644,20 @@ void SimpleCameraData::tryPipeline(unsigned int code, const Size &size)
config.captureFormat = pixelFormat;
config.captureSize = format.size;
- if (!converter_) {
- config.outputFormats = { pixelFormat };
- config.outputSizes = config.captureSize;
- } else {
+ if (converter_) {
config.outputFormats = converter_->formats(pixelFormat);
config.outputSizes = converter_->sizes(format.size);
+ } else if (swIsp_) {
+ config.outputFormats = swIsp_->formats(pixelFormat);
+ config.outputSizes = swIsp_->sizes(pixelFormat, format.size);
+ if (config.outputFormats.empty()) {
+ /* Do not use swIsp for unsupported pixelFormat's. */
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
+ }
+ } else {
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
}
configs_.push_back(config);
@@ -577,15 +673,32 @@ int SimpleCameraData::setupLinks()
* multiple sink links to be enabled together, even on different sink
* pads. We must thus start by disabling all sink links (but the one we
* want to enable) before enabling the pipeline link.
+ *
+ * The entities_ list stores entities along with their source link. We
+ * need to process the link in the context of the sink entity, so
+ * record the source link of the current entity as the sink link of the
+ * next entity, and skip the first entity in the loop.
*/
+ MediaLink *sinkLink = nullptr;
+
for (SimpleCameraData::Entity &e : entities_) {
- if (!e.sourceLink)
- break;
+ if (!sinkLink) {
+ sinkLink = e.sourceLink;
+ continue;
+ }
+
+ for (MediaPad *pad : e.entity->pads()) {
+ /*
+ * If the entity supports the V4L2 internal routing API,
+ * assume that it may carry multiple independent streams
+ * concurrently, and only disable links on the sink and
+ * source pads used by the pipeline.
+ */
+ if (e.supportsRouting && pad != e.sink && pad != e.source)
+ continue;
- MediaEntity *remote = e.sourceLink->sink()->entity();
- for (MediaPad *pad : remote->pads()) {
for (MediaLink *link : pad->links()) {
- if (link == e.sourceLink)
+ if (link == sinkLink)
continue;
if ((link->flags() & MEDIA_LNK_FL_ENABLED) &&
@@ -597,18 +710,21 @@ int SimpleCameraData::setupLinks()
}
}
- if (!(e.sourceLink->flags() & MEDIA_LNK_FL_ENABLED)) {
- ret = e.sourceLink->setEnabled(true);
+ if (!(sinkLink->flags() & MEDIA_LNK_FL_ENABLED)) {
+ ret = sinkLink->setEnabled(true);
if (ret < 0)
return ret;
}
+
+ sinkLink = e.sourceLink;
}
return 0;
}
int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format,
- V4L2Subdevice::Whence whence)
+ V4L2Subdevice::Whence whence,
+ Transform transform)
{
SimplePipelineHandler *pipe = SimpleCameraData::pipe();
int ret;
@@ -617,7 +733,7 @@ int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format,
* Configure the format on the sensor output and propagate it through
* the pipeline.
*/
- ret = sensor_->setFormat(format);
+ ret = sensor_->setFormat(format, transform);
if (ret < 0)
return ret;
@@ -644,7 +760,7 @@ int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format,
if (ret < 0)
return ret;
- if (format->mbus_code != sourceFormat.mbus_code ||
+ if (format->code != sourceFormat.code ||
format->size != sourceFormat.size) {
LOG(SimplePipeline, Debug)
<< "Source '" << source->entity()->name()
@@ -678,7 +794,7 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
* point converting an erroneous buffer.
*/
if (buffer->metadata().status != FrameMetadata::FrameSuccess) {
- if (!useConverter_) {
+ if (!useConversion_) {
/* No conversion, just complete the request. */
Request *request = buffer->request();
pipe->completeBuffer(request, buffer);
@@ -687,23 +803,23 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
}
/*
- * The converter is in use. Requeue the internal buffer for
- * capture (unless the stream is being stopped), and complete
- * the request with all the user-facing buffers.
+ * The converter or Software ISP is in use. Requeue the internal
+ * buffer for capture (unless the stream is being stopped), and
+ * complete the request with all the user-facing buffers.
*/
if (buffer->metadata().status != FrameMetadata::FrameCancelled)
video_->queueBuffer(buffer);
- if (converterQueue_.empty())
+ if (conversionQueue_.empty())
return;
Request *request = nullptr;
- for (auto &item : converterQueue_.front()) {
+ for (auto &item : conversionQueue_.front()) {
FrameBuffer *outputBuffer = item.second;
request = outputBuffer->request();
pipe->completeBuffer(request, outputBuffer);
}
- converterQueue_.pop();
+ conversionQueue_.pop();
if (request)
pipe->completeRequest(request);
@@ -720,9 +836,9 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
*/
Request *request = buffer->request();
- if (useConverter_ && !converterQueue_.empty()) {
+ if (useConversion_ && !conversionQueue_.empty()) {
const std::map<unsigned int, FrameBuffer *> &outputs =
- converterQueue_.front();
+ conversionQueue_.front();
if (!outputs.empty()) {
FrameBuffer *outputBuffer = outputs.begin()->second;
if (outputBuffer)
@@ -735,18 +851,22 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
buffer->metadata().timestamp);
/*
- * Queue the captured and the request buffer to the converter if format
- * conversion is needed. If there's no queued request, just requeue the
- * captured buffer for capture.
+ * Queue the captured and the request buffer to the converter or Software
+ * ISP if format conversion is needed. If there's no queued request, just
+ * requeue the captured buffer for capture.
*/
- if (useConverter_) {
- if (converterQueue_.empty()) {
+ if (useConversion_) {
+ if (conversionQueue_.empty()) {
video_->queueBuffer(buffer);
return;
}
- converter_->queueBuffers(buffer, converterQueue_.front());
- converterQueue_.pop();
+ if (converter_)
+ converter_->queueBuffers(buffer, conversionQueue_.front());
+ else
+ swIsp_->queueBuffers(buffer, conversionQueue_.front());
+
+ conversionQueue_.pop();
return;
}
@@ -755,13 +875,13 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
pipe->completeRequest(request);
}
-void SimpleCameraData::converterInputDone(FrameBuffer *buffer)
+void SimpleCameraData::conversionInputDone(FrameBuffer *buffer)
{
/* Queue the input buffer back for capture. */
video_->queueBuffer(buffer);
}
-void SimpleCameraData::converterOutputDone(FrameBuffer *buffer)
+void SimpleCameraData::conversionOutputDone(FrameBuffer *buffer)
{
SimplePipelineHandler *pipe = SimpleCameraData::pipe();
@@ -771,6 +891,56 @@ void SimpleCameraData::converterOutputDone(FrameBuffer *buffer)
pipe->completeRequest(request);
}
+void SimpleCameraData::ispStatsReady()
+{
+ /* \todo Use the DelayedControls class */
+ swIsp_->processStats(sensor_->getControls({ V4L2_CID_ANALOGUE_GAIN,
+ V4L2_CID_EXPOSURE }));
+}
+
+void SimpleCameraData::setSensorControls(const ControlList &sensorControls)
+{
+ ControlList ctrls(sensorControls);
+ sensor_->setControls(&ctrls);
+}
+
+/* Retrieve all source pads connected to a sink pad through active routes. */
+std::vector<const MediaPad *> SimpleCameraData::routedSourcePads(MediaPad *sink)
+{
+ MediaEntity *entity = sink->entity();
+ std::unique_ptr<V4L2Subdevice> subdev =
+ std::make_unique<V4L2Subdevice>(entity);
+
+ int ret = subdev->open();
+ if (ret < 0)
+ return {};
+
+ V4L2Subdevice::Routing routing = {};
+ ret = subdev->getRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret < 0)
+ return {};
+
+ std::vector<const MediaPad *> pads;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (sink->index() != route.sink.pad ||
+ !(route.flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ const MediaPad *pad = entity->getPadByIndex(route.source.pad);
+ if (!pad) {
+ LOG(SimplePipeline, Warning)
+ << "Entity " << entity->name()
+ << " has invalid route source pad "
+ << route.source.pad;
+ }
+
+ pads.push_back(pad);
+ }
+
+ return pads;
+}
+
/* -----------------------------------------------------------------------------
* Camera Configuration
*/
@@ -782,17 +952,45 @@ SimpleCameraConfiguration::SimpleCameraConfiguration(Camera *camera,
{
}
+namespace {
+
+static Size adjustSize(const Size &requestedSize, const SizeRange &supportedSizes)
+{
+ ASSERT(supportedSizes.min <= supportedSizes.max);
+
+ if (supportedSizes.min == supportedSizes.max)
+ return supportedSizes.max;
+
+ unsigned int hStep = supportedSizes.hStep;
+ unsigned int vStep = supportedSizes.vStep;
+
+ if (hStep == 0)
+ hStep = supportedSizes.max.width - supportedSizes.min.width;
+ if (vStep == 0)
+ vStep = supportedSizes.max.height - supportedSizes.min.height;
+
+ Size adjusted = requestedSize.boundedTo(supportedSizes.max)
+ .expandedTo(supportedSizes.min);
+
+ return adjusted.shrunkBy(supportedSizes.min)
+ .alignedDownTo(hStep, vStep)
+ .grownBy(supportedSizes.min);
+}
+
+} /* namespace */
+
CameraConfiguration::Status SimpleCameraConfiguration::validate()
{
+ const CameraSensor *sensor = data_->sensor_.get();
Status status = Valid;
if (config_.empty())
return Invalid;
- if (transform != Transform::Identity) {
- transform = Transform::Identity;
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = sensor->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
status = Adjusted;
- }
/* Cap the number of entries to the available streams. */
if (config_.size() > data_->streams_.size()) {
@@ -897,10 +1095,19 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
}
if (!pipeConfig_->outputSizes.contains(cfg.size)) {
+ Size adjustedSize = pipeConfig_->captureSize;
+ /*
+ * The converter (when present) may not be able to output
+ * a size identical to its input size. The capture size is thus
+ * not guaranteed to be a valid output size. In such cases, use
+ * the smaller valid output size closest to the requested.
+ */
+ if (!pipeConfig_->outputSizes.contains(adjustedSize))
+ adjustedSize = adjustSize(cfg.size, pipeConfig_->outputSizes);
LOG(SimplePipeline, Debug)
<< "Adjusting size from " << cfg.size
- << " to " << pipeConfig_->captureSize;
- cfg.size = pipeConfig_->captureSize;
+ << " to " << adjustedSize;
+ cfg.size = adjustedSize;
status = Adjusted;
}
@@ -912,13 +1119,16 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
/* Set the stride, frameSize and bufferCount. */
if (needConversion_) {
std::tie(cfg.stride, cfg.frameSize) =
- data_->converter_->strideAndFrameSize(cfg.pixelFormat,
- cfg.size);
+ data_->converter_
+ ? data_->converter_->strideAndFrameSize(cfg.pixelFormat,
+ cfg.size)
+ : data_->swIsp_->strideAndFrameSize(cfg.pixelFormat,
+ cfg.size);
if (cfg.stride == 0)
return Invalid;
} else {
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
int ret = data_->video_->tryFormat(&format);
@@ -944,12 +1154,12 @@ SimplePipelineHandler::SimplePipelineHandler(CameraManager *manager)
{
}
-CameraConfiguration *SimplePipelineHandler::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+SimplePipelineHandler::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
{
SimpleCameraData *data = cameraData(camera);
- CameraConfiguration *config =
- new SimpleCameraConfiguration(camera, data);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<SimpleCameraConfiguration>(camera, data);
if (roles.empty())
return config;
@@ -1020,15 +1230,16 @@ int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
const SimpleCameraData::Configuration *pipeConfig = config->pipeConfig();
V4L2SubdeviceFormat format{};
- format.mbus_code = pipeConfig->code;
+ format.code = pipeConfig->code;
format.size = pipeConfig->sensorSize;
- ret = data->setupFormats(&format, V4L2Subdevice::ActiveFormat);
+ ret = data->setupFormats(&format, V4L2Subdevice::ActiveFormat,
+ config->combinedTransform());
if (ret < 0)
return ret;
/* Configure the video node. */
- V4L2PixelFormat videoFormat = V4L2PixelFormat::fromPixelFormat(pipeConfig->captureFormat);
+ V4L2PixelFormat videoFormat = video->toV4L2PixelFormat(pipeConfig->captureFormat);
V4L2DeviceFormat captureFormat;
captureFormat.fourcc = videoFormat;
@@ -1055,14 +1266,14 @@ int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
/* Configure the converter if needed. */
std::vector<std::reference_wrapper<StreamConfiguration>> outputCfgs;
- data->useConverter_ = config->needConversion();
+ data->useConversion_ = config->needConversion();
for (unsigned int i = 0; i < config->size(); ++i) {
StreamConfiguration &cfg = config->at(i);
cfg.setStream(&data->streams_[i]);
- if (data->useConverter_)
+ if (data->useConversion_)
outputCfgs.push_back(cfg);
}
@@ -1075,7 +1286,10 @@ int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
inputCfg.stride = captureFormat.planes[0].bpl;
inputCfg.bufferCount = kNumInternalBuffers;
- return data->converter_->configure(inputCfg, outputCfgs);
+ return data->converter_
+ ? data->converter_->configure(inputCfg, outputCfgs)
+ : data->swIsp_->configure(inputCfg, outputCfgs,
+ data->sensor_->controls());
}
int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
@@ -1088,9 +1302,12 @@ int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
* Export buffers on the converter or capture video node, depending on
* whether the converter is used or not.
*/
- if (data->useConverter_)
- return data->converter_->exportBuffers(data->streamIndex(stream),
- count, buffers);
+ if (data->useConversion_)
+ return data->converter_
+ ? data->converter_->exportBuffers(data->streamIndex(stream),
+ count, buffers)
+ : data->swIsp_->exportBuffers(data->streamIndex(stream),
+ count, buffers);
else
return data->video_->exportBuffers(count, buffers);
}
@@ -1109,13 +1326,13 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
return -EBUSY;
}
- if (data->useConverter_) {
+ if (data->useConversion_) {
/*
* When using the converter allocate a fixed number of internal
* buffers.
*/
ret = video->allocateBuffers(kNumInternalBuffers,
- &data->converterBuffers_);
+ &data->conversionBuffers_);
} else {
/* Otherwise, prepare for using buffers from the only stream. */
Stream *stream = &data->streams_[0];
@@ -1134,15 +1351,21 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
return ret;
}
- if (data->useConverter_) {
- ret = data->converter_->start();
+ if (data->useConversion_) {
+ if (data->converter_)
+ ret = data->converter_->start();
+ else if (data->swIsp_)
+ ret = data->swIsp_->start();
+ else
+ ret = 0;
+
if (ret < 0) {
stop(camera);
return ret;
}
/* Queue all internal buffers for capture. */
- for (std::unique_ptr<FrameBuffer> &buffer : data->converterBuffers_)
+ for (std::unique_ptr<FrameBuffer> &buffer : data->conversionBuffers_)
video->queueBuffer(buffer.get());
}
@@ -1154,15 +1377,19 @@ void SimplePipelineHandler::stopDevice(Camera *camera)
SimpleCameraData *data = cameraData(camera);
V4L2VideoDevice *video = data->video_;
- if (data->useConverter_)
- data->converter_->stop();
+ if (data->useConversion_) {
+ if (data->converter_)
+ data->converter_->stop();
+ else if (data->swIsp_)
+ data->swIsp_->stop();
+ }
video->streamOff();
video->releaseBuffers();
video->bufferReady.disconnect(data, &SimpleCameraData::bufferReady);
- data->converterBuffers_.clear();
+ data->conversionBuffers_.clear();
releasePipeline(data);
}
@@ -1180,7 +1407,7 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
* queue, it will be handed to the converter in the capture
* completion handler.
*/
- if (data->useConverter_) {
+ if (data->useConversion_) {
buffers.emplace(data->streamIndex(stream), buffer);
} else {
ret = data->video_->queueBuffer(buffer);
@@ -1189,8 +1416,8 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
}
}
- if (data->useConverter_)
- data->converterQueue_.push(std::move(buffers));
+ if (data->useConversion_)
+ data->conversionQueue_.push(std::move(buffers));
return 0;
}
@@ -1260,6 +1487,37 @@ std::vector<MediaEntity *> SimplePipelineHandler::locateSensors()
return sensors;
}
+int SimplePipelineHandler::resetRoutingTable(V4L2Subdevice *subdev)
+{
+ /* Reset the media entity routing table to its default state. */
+ V4L2Subdevice::Routing routing = {};
+
+ int ret = subdev->getRouting(&routing, V4L2Subdevice::TryFormat);
+ if (ret)
+ return ret;
+
+ ret = subdev->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ /*
+ * If the routing table is empty we won't be able to meaningfully use
+ * the subdev.
+ */
+ if (routing.empty()) {
+ LOG(SimplePipeline, Error)
+ << "Default routing table of " << subdev->deviceNode()
+ << " is empty";
+ return -EINVAL;
+ }
+
+ LOG(SimplePipeline, Debug)
+ << "Routing table of " << subdev->deviceNode()
+ << " reset to " << routing;
+
+ return 0;
+}
+
bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
{
const SimplePipelineInfo *info = nullptr;
@@ -1286,6 +1544,8 @@ bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
}
}
+ swIspEnabled_ = info->swIspEnabled;
+
/* Locate the sensors. */
std::vector<MediaEntity *> sensors = locateSensors();
if (sensors.empty()) {
@@ -1352,6 +1612,23 @@ bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
<< ": " << strerror(-ret);
return false;
}
+
+ if (subdev->caps().hasStreams()) {
+ /*
+ * Reset the routing table to its default state
+ * to make sure entities are enumerate according
+ * to the defaul routing configuration.
+ */
+ ret = resetRoutingTable(subdev.get());
+ if (ret) {
+ LOG(SimplePipeline, Error)
+ << "Failed to reset routes for "
+ << subdev->deviceNode() << ": "
+ << strerror(-ret);
+ return false;
+ }
+ }
+
break;
default:
@@ -1455,6 +1732,6 @@ void SimplePipelineHandler::releasePipeline(SimpleCameraData *data)
}
}
-REGISTER_PIPELINE_HANDLER(SimplePipelineHandler)
+REGISTER_PIPELINE_HANDLER(SimplePipelineHandler, "simple")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
index 53b2f23a..8a7409fc 100644
--- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
+++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * uvcvideo.cpp - Pipeline handler for uvcvideo devices
+ * Pipeline handler for uvcvideo devices
*/
#include <algorithm>
@@ -46,8 +46,16 @@ public:
ControlInfoMap::Map *ctrls);
void bufferReady(FrameBuffer *buffer);
+ const std::string &id() const { return id_; }
+
std::unique_ptr<V4L2VideoDevice> video_;
Stream stream_;
+ std::map<PixelFormat, std::vector<SizeRange>> formats_;
+
+private:
+ bool generateId();
+
+ std::string id_;
};
class UVCCameraConfiguration : public CameraConfiguration
@@ -66,8 +74,8 @@ class PipelineHandlerUVC : public PipelineHandler
public:
PipelineHandlerUVC(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -81,8 +89,6 @@ public:
bool match(DeviceEnumerator *enumerator) override;
private:
- std::string generateId(const UVCCameraData *data);
-
int processControl(ControlList *controls, unsigned int id,
const ControlValue &value);
int processControls(UVCCameraData *data, Request *request);
@@ -105,8 +111,8 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
if (config_.empty())
return Invalid;
- if (transform != Transform::Identity) {
- transform = Transform::Identity;
+ if (orientation != Orientation::Rotate0) {
+ orientation = Orientation::Rotate0;
status = Adjusted;
}
@@ -149,7 +155,7 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
cfg.bufferCount = 4;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
int ret = data_->video_->tryFormat(&format);
@@ -159,6 +165,11 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
cfg.stride = format.planes[0].bpl;
cfg.frameSize = format.planes[0].size;
+ if (cfg.colorSpace != format.colorSpace) {
+ cfg.colorSpace = format.colorSpace;
+ status = Adjusted;
+ }
+
return status;
}
@@ -167,24 +178,18 @@ PipelineHandlerUVC::PipelineHandlerUVC(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerUVC::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerUVC::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
UVCCameraData *data = cameraData(camera);
- CameraConfiguration *config = new UVCCameraConfiguration(data);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<UVCCameraConfiguration>(data);
if (roles.empty())
return config;
- V4L2VideoDevice::Formats v4l2Formats = data->video_->formats();
- std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
- for (const auto &format : v4l2Formats) {
- PixelFormat pixelFormat = format.first.toPixelFormat();
- if (pixelFormat.isValid())
- deviceFormats[pixelFormat] = format.second;
- }
-
- StreamFormats formats(deviceFormats);
+ StreamFormats formats(data->formats_);
StreamConfiguration cfg(formats);
cfg.pixelFormat = formats.pixelformats().front();
@@ -205,7 +210,7 @@ int PipelineHandlerUVC::configure(Camera *camera, CameraConfiguration *config)
int ret;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
ret = data->video_->setFormat(&format);
@@ -213,7 +218,7 @@ int PipelineHandlerUVC::configure(Camera *camera, CameraConfiguration *config)
return ret;
if (format.size != cfg.size ||
- format.fourcc != V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat))
+ format.fourcc != data->video_->toV4L2PixelFormat(cfg.pixelFormat))
return -EINVAL;
cfg.setStream(&data->stream_);
@@ -340,12 +345,8 @@ int PipelineHandlerUVC::processControls(UVCCameraData *data, Request *request)
{
ControlList controls(data->video_->controls());
- for (auto it : request->controls()) {
- unsigned int id = it.first;
- ControlValue &value = it.second;
-
+ for (const auto &[id, value] : request->controls())
processControl(&controls, id, value);
- }
for (const auto &ctrl : controls)
LOG(UVC, Debug)
@@ -383,69 +384,6 @@ int PipelineHandlerUVC::queueRequestDevice(Camera *camera, Request *request)
return 0;
}
-std::string PipelineHandlerUVC::generateId(const UVCCameraData *data)
-{
- const std::string path = data->video_->devicePath();
-
- /* Create a controller ID from first device described in firmware. */
- std::string controllerId;
- std::string searchPath = path;
- while (true) {
- std::string::size_type pos = searchPath.rfind('/');
- if (pos <= 1) {
- LOG(UVC, Error) << "Can not find controller ID";
- return {};
- }
-
- searchPath = searchPath.substr(0, pos);
-
- controllerId = sysfs::firmwareNodePath(searchPath);
- if (!controllerId.empty())
- break;
- }
-
- /*
- * Create a USB ID from the device path which has the known format:
- *
- * path = bus, "-", ports, ":", config, ".", interface ;
- * bus = number ;
- * ports = port, [ ".", ports ] ;
- * port = number ;
- * config = number ;
- * interface = number ;
- *
- * Example: 3-2.4:1.0
- *
- * The bus is not guaranteed to be stable and needs to be stripped from
- * the USB ID. The final USB ID is built up of the ports, config and
- * interface properties.
- *
- * Example 2.4:1.0.
- */
- std::string usbId = utils::basename(path.c_str());
- usbId = usbId.substr(usbId.find('-') + 1);
-
- /* Creata a device ID from the USB devices vendor and product ID. */
- std::string deviceId;
- for (const char *name : { "idVendor", "idProduct" }) {
- std::ifstream file(path + "/../" + name);
-
- if (!file.is_open())
- return {};
-
- std::string value;
- std::getline(file, value);
- file.close();
-
- if (!deviceId.empty())
- deviceId += ":";
-
- deviceId += value;
- }
-
- return controllerId + "-" + usbId + "-" + deviceId;
-}
-
bool PipelineHandlerUVC::match(DeviceEnumerator *enumerator)
{
MediaDevice *media;
@@ -461,12 +399,7 @@ bool PipelineHandlerUVC::match(DeviceEnumerator *enumerator)
return false;
/* Create and register the camera. */
- std::string id = generateId(data.get());
- if (id.empty()) {
- LOG(UVC, Error) << "Failed to generate camera ID";
- return false;
- }
-
+ std::string id = data->id();
std::set<Stream *> streams{ &data->stream_ };
std::shared_ptr<Camera> camera =
Camera::create(std::move(data), id, streams);
@@ -501,6 +434,39 @@ int UVCCameraData::init(MediaDevice *media)
video_->bufferReady.connect(this, &UVCCameraData::bufferReady);
+ /* Generate the camera ID. */
+ if (!generateId()) {
+ LOG(UVC, Error) << "Failed to generate camera ID";
+ return -EINVAL;
+ }
+
+ /*
+ * Populate the map of supported formats, and infer the camera sensor
+ * resolution from the largest size it advertises.
+ */
+ Size resolution;
+ for (const auto &format : video_->formats()) {
+ PixelFormat pixelFormat = format.first.toPixelFormat();
+ if (!pixelFormat.isValid())
+ continue;
+
+ formats_[pixelFormat] = format.second;
+
+ const std::vector<SizeRange> &sizeRanges = format.second;
+ for (const SizeRange &sizeRange : sizeRanges) {
+ if (sizeRange.max > resolution)
+ resolution = sizeRange.max;
+ }
+ }
+
+ if (formats_.empty()) {
+ LOG(UVC, Error)
+ << "Camera " << id_ << " (" << media->model()
+ << ") doesn't expose any supported format";
+ return -EINVAL;
+ }
+
+ /* Populate the camera properties. */
properties_.set(properties::Model, utils::toAscii(media->model()));
/*
@@ -531,19 +497,6 @@ int UVCCameraData::init(MediaDevice *media)
properties_.set(properties::Location, location);
- /*
- * Get the current format in order to initialize the sensor array
- * properties.
- */
- Size resolution;
- for (const auto &it : video_->formats()) {
- const std::vector<SizeRange> &sizeRanges = it.second;
- for (const SizeRange &sizeRange : sizeRanges) {
- if (sizeRange.max > resolution)
- resolution = sizeRange.max;
- }
- }
-
properties_.set(properties::PixelArraySize, resolution);
properties_.set(properties::PixelArrayActiveAreas, { Rectangle(resolution) });
@@ -562,6 +515,70 @@ int UVCCameraData::init(MediaDevice *media)
return 0;
}
+bool UVCCameraData::generateId()
+{
+ const std::string path = video_->devicePath();
+
+ /* Create a controller ID from first device described in firmware. */
+ std::string controllerId;
+ std::string searchPath = path;
+ while (true) {
+ std::string::size_type pos = searchPath.rfind('/');
+ if (pos <= 1) {
+ LOG(UVC, Error) << "Can not find controller ID";
+ return false;
+ }
+
+ searchPath = searchPath.substr(0, pos);
+
+ controllerId = sysfs::firmwareNodePath(searchPath);
+ if (!controllerId.empty())
+ break;
+ }
+
+ /*
+ * Create a USB ID from the device path which has the known format:
+ *
+ * path = bus, "-", ports, ":", config, ".", interface ;
+ * bus = number ;
+ * ports = port, [ ".", ports ] ;
+ * port = number ;
+ * config = number ;
+ * interface = number ;
+ *
+ * Example: 3-2.4:1.0
+ *
+ * The bus is not guaranteed to be stable and needs to be stripped from
+ * the USB ID. The final USB ID is built up of the ports, config and
+ * interface properties.
+ *
+ * Example 2.4:1.0.
+ */
+ std::string usbId = utils::basename(path.c_str());
+ usbId = usbId.substr(usbId.find('-') + 1);
+
+ /* Creata a device ID from the USB devices vendor and product ID. */
+ std::string deviceId;
+ for (const char *name : { "idVendor", "idProduct" }) {
+ std::ifstream file(path + "/../" + name);
+
+ if (!file.is_open())
+ return false;
+
+ std::string value;
+ std::getline(file, value);
+ file.close();
+
+ if (!deviceId.empty())
+ deviceId += ":";
+
+ deviceId += value;
+ }
+
+ id_ = controllerId + "-" + usbId + "-" + deviceId;
+ return true;
+}
+
void UVCCameraData::addControl(uint32_t cid, const ControlInfo &v4l2Info,
ControlInfoMap::Map *ctrls)
{
@@ -692,6 +709,6 @@ void UVCCameraData::bufferReady(FrameBuffer *buffer)
pipe()->completeRequest(request);
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerUVC)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerUVC, "uvcvideo")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
index 3379ac5c..c7650432 100644
--- a/src/libcamera/pipeline/vimc/vimc.cpp
+++ b/src/libcamera/pipeline/vimc/vimc.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * vimc.cpp - Pipeline handler for the vimc device
+ * Pipeline handler for the vimc device
*/
#include <algorithm>
@@ -54,7 +54,7 @@ public:
int init();
int allocateMockIPABuffers();
void bufferReady(FrameBuffer *buffer);
- void paramsBufferReady(unsigned int id);
+ void paramsBufferReady(unsigned int id, const Flags<ipa::vimc::TestFlag> flags);
MediaDevice *media_;
std::unique_ptr<CameraSensor> sensor_;
@@ -84,8 +84,8 @@ class PipelineHandlerVimc : public PipelineHandler
public:
PipelineHandlerVimc(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -128,8 +128,8 @@ CameraConfiguration::Status VimcCameraConfiguration::validate()
if (config_.empty())
return Invalid;
- if (transform != Transform::Identity) {
- transform = Transform::Identity;
+ if (orientation != Orientation::Rotate0) {
+ orientation = Orientation::Rotate0;
status = Adjusted;
}
@@ -171,7 +171,7 @@ CameraConfiguration::Status VimcCameraConfiguration::validate()
cfg.bufferCount = 4;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
int ret = data_->video_->tryFormat(&format);
@@ -189,11 +189,13 @@ PipelineHandlerVimc::PipelineHandlerVimc(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerVimc::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerVimc::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
VimcCameraData *data = cameraData(camera);
- CameraConfiguration *config = new VimcCameraConfiguration(data);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<VimcCameraConfiguration>(data);
if (roles.empty())
return config;
@@ -242,7 +244,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
/* The scaler hardcodes a x3 scale-up ratio. */
V4L2SubdeviceFormat subformat = {};
- subformat.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8;
+ subformat.code = MEDIA_BUS_FMT_SGRBG8_1X8;
subformat.size = { cfg.size.width / 3, cfg.size.height / 3 };
ret = data->sensor_->setFormat(&subformat);
@@ -253,7 +255,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
if (ret)
return ret;
- subformat.mbus_code = pixelformats.find(cfg.pixelFormat)->second;
+ subformat.code = pixelformats.find(cfg.pixelFormat)->second;
ret = data->debayer_->setFormat(1, &subformat);
if (ret)
return ret;
@@ -275,7 +277,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
return ret;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat);
+ format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
ret = data->video_->setFormat(&format);
@@ -283,7 +285,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
return ret;
if (format.size != cfg.size ||
- format.fourcc != V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat))
+ format.fourcc != data->video_->toV4L2PixelFormat(cfg.pixelFormat))
return -EINVAL;
/*
@@ -378,7 +380,7 @@ int PipelineHandlerVimc::processControls(VimcCameraData *data, Request *request)
{
ControlList controls(data->sensor_->controls());
- for (auto it : request->controls()) {
+ for (const auto &it : request->controls()) {
unsigned int id = it.first;
unsigned int offset;
uint32_t cid;
@@ -471,7 +473,15 @@ bool PipelineHandlerVimc::match(DeviceEnumerator *enumerator)
data->ipa_->paramsBufferReady.connect(data.get(), &VimcCameraData::paramsBufferReady);
std::string conf = data->ipa_->configurationFile("vimc.conf");
- data->ipa_->init(IPASettings{ conf, data->sensor_->model() });
+ Flags<ipa::vimc::TestFlag> inFlags = ipa::vimc::TestFlag::Flag2;
+ Flags<ipa::vimc::TestFlag> outFlags;
+ data->ipa_->init(IPASettings{ conf, data->sensor_->model() },
+ ipa::vimc::IPAOperationInit, inFlags, &outFlags);
+
+ LOG(VIMC, Debug)
+ << "Flag 1 was "
+ << (outFlags & ipa::vimc::TestFlag::Flag1 ? "" : "not ")
+ << "set";
/* Create and register the camera. */
std::set<Stream *> streams{ &data->stream_ };
@@ -598,7 +608,7 @@ int VimcCameraData::allocateMockIPABuffers()
constexpr unsigned int kBufCount = 2;
V4L2DeviceFormat format;
- format.fourcc = V4L2PixelFormat::fromPixelFormat(formats::BGR888);
+ format.fourcc = video_->toV4L2PixelFormat(formats::BGR888);
format.size = Size (160, 120);
int ret = video_->setFormat(&format);
@@ -608,10 +618,11 @@ int VimcCameraData::allocateMockIPABuffers()
return video_->exportBuffers(kBufCount, &mockIPABufs_);
}
-void VimcCameraData::paramsBufferReady([[maybe_unused]] unsigned int id)
+void VimcCameraData::paramsBufferReady([[maybe_unused]] unsigned int id,
+ [[maybe_unused]] const Flags<ipa::vimc::TestFlag> flags)
{
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerVimc)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVimc, "vimc")
} /* namespace libcamera */