summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/libcamera/controls.h2
-rw-r--r--include/libcamera/internal/media_device.h2
-rw-r--r--include/libcamera/internal/media_pipeline.h59
-rw-r--r--include/libcamera/internal/meson.build1
-rw-r--r--src/apps/cam/camera_session.cpp29
-rw-r--r--src/apps/common/stream_options.cpp3
-rw-r--r--src/apps/qcam/main_window.cpp3
-rw-r--r--src/gstreamer/gstlibcamera-utils.cpp5
-rw-r--r--src/ipa/rpi/common/ipa_base.cpp11
-rw-r--r--src/libcamera/media_device.cpp24
-rw-r--r--src/libcamera/media_pipeline.cpp304
-rw-r--r--src/libcamera/meson.build1
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1.cpp86
-rw-r--r--src/libcamera/stream.cpp3
-rwxr-xr-xutils/raspberrypi/ctt/ctt.py9
-rw-r--r--utils/raspberrypi/ctt/ctt_alsc.py7
-rw-r--r--utils/raspberrypi/ctt/ctt_cac.py32
17 files changed, 506 insertions, 75 deletions
diff --git a/include/libcamera/controls.h b/include/libcamera/controls.h
index 4bfe9615..2ae4ec3d 100644
--- a/include/libcamera/controls.h
+++ b/include/libcamera/controls.h
@@ -120,7 +120,7 @@ struct control_type<Point> {
};
template<typename T, std::size_t N>
-struct control_type<Span<T, N>> : public control_type<std::remove_cv_t<T>> {
+struct control_type<Span<T, N>, std::enable_if_t<control_type<std::remove_cv_t<T>>::size == 0>> : public control_type<std::remove_cv_t<T>> {
static constexpr std::size_t size = N;
};
diff --git a/include/libcamera/internal/media_device.h b/include/libcamera/internal/media_device.h
index e412d3a0..b3a48b98 100644
--- a/include/libcamera/internal/media_device.h
+++ b/include/libcamera/internal/media_device.h
@@ -55,6 +55,8 @@ public:
Signal<> disconnected;
+ std::vector<MediaEntity *> locateEntities(unsigned int function);
+
protected:
std::string logPrefix() const override;
diff --git a/include/libcamera/internal/media_pipeline.h b/include/libcamera/internal/media_pipeline.h
new file mode 100644
index 00000000..a7a4b8c5
--- /dev/null
+++ b/include/libcamera/internal/media_pipeline.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Media pipeline support
+ */
+
+#pragma once
+
+#include <list>
+#include <string>
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+class CameraSensor;
+class MediaEntity;
+class MediaLink;
+class MediaPad;
+struct V4L2SubdeviceFormat;
+
+class MediaPipeline
+{
+public:
+ int init(MediaEntity *source, std::string_view sink);
+ int initLinks();
+ int configure(CameraSensor *sensor, V4L2SubdeviceFormat *);
+
+private:
+ struct Entity {
+ /* The media entity, always valid. */
+ MediaEntity *entity;
+ /*
+ * Whether or not the entity is a subdev that supports the
+ * routing API.
+ */
+ bool supportsRouting;
+ /*
+ * The local sink pad connected to the upstream entity, null for
+ * the camera sensor at the beginning of the pipeline.
+ */
+ const MediaPad *sink;
+ /*
+ * The local source pad connected to the downstream entity, null
+ * for the video node at the end of the pipeline.
+ */
+ const MediaPad *source;
+ /*
+ * The link on the source pad, to the downstream entity, null
+ * for the video node at the end of the pipeline.
+ */
+ MediaLink *sourceLink;
+ };
+
+ std::list<Entity> entities_;
+};
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/meson.build b/include/libcamera/internal/meson.build
index 45408b31..33f318b2 100644
--- a/include/libcamera/internal/meson.build
+++ b/include/libcamera/internal/meson.build
@@ -32,6 +32,7 @@ libcamera_internal_headers = files([
'matrix.h',
'media_device.h',
'media_object.h',
+ 'media_pipeline.h',
'pipeline_handler.h',
'process.h',
'pub_key.h',
diff --git a/src/apps/cam/camera_session.cpp b/src/apps/cam/camera_session.cpp
index 97c1ae44..f63fcb22 100644
--- a/src/apps/cam/camera_session.cpp
+++ b/src/apps/cam/camera_session.cpp
@@ -62,11 +62,32 @@ CameraSession::CameraSession(CameraManager *cm,
return;
}
- std::vector<StreamRole> roles = StreamKeyValueParser::roles(options_[OptStream]);
+ std::vector<StreamRole> roles =
+ StreamKeyValueParser::roles(options_[OptStream]);
+ std::vector<std::vector<StreamRole>> tryRoles;
+ if (!roles.empty()) {
+ /*
+ * If the roles are explicitly specified then there's no need
+ * to try other roles
+ */
+ tryRoles.push_back(roles);
+ } else {
+ tryRoles.push_back({ StreamRole::Viewfinder });
+ tryRoles.push_back({ StreamRole::Raw });
+ }
+
+ std::unique_ptr<CameraConfiguration> config;
+ bool valid = false;
+ for (std::vector<StreamRole> &rolesIt : tryRoles) {
+ config = camera_->generateConfiguration(rolesIt);
+ if (config && config->size() == rolesIt.size()) {
+ roles = rolesIt;
+ valid = true;
+ break;
+ }
+ }
- std::unique_ptr<CameraConfiguration> config =
- camera_->generateConfiguration(roles);
- if (!config || config->size() != roles.size()) {
+ if (!valid) {
std::cerr << "Failed to get default stream configuration"
<< std::endl;
return;
diff --git a/src/apps/common/stream_options.cpp b/src/apps/common/stream_options.cpp
index 99239e07..288f8653 100644
--- a/src/apps/common/stream_options.cpp
+++ b/src/apps/common/stream_options.cpp
@@ -42,9 +42,8 @@ KeyValueParser::Options StreamKeyValueParser::parse(const char *arguments)
std::vector<StreamRole> StreamKeyValueParser::roles(const OptionValue &values)
{
- /* If no configuration values to examine default to viewfinder. */
if (values.empty())
- return { StreamRole::Viewfinder };
+ return {};
const std::vector<OptionValue> &streamParameters = values.toArray();
diff --git a/src/apps/qcam/main_window.cpp b/src/apps/qcam/main_window.cpp
index d2ccbd23..224a7e5a 100644
--- a/src/apps/qcam/main_window.cpp
+++ b/src/apps/qcam/main_window.cpp
@@ -356,6 +356,9 @@ int MainWindow::startCapture()
/* Verify roles are supported. */
switch (roles.size()) {
+ case 0:
+ roles[0] = StreamRole::Viewfinder;
+ break;
case 1:
if (roles[0] != StreamRole::Viewfinder) {
qWarning() << "Only viewfinder supported for single stream";
diff --git a/src/gstreamer/gstlibcamera-utils.cpp b/src/gstreamer/gstlibcamera-utils.cpp
index 41eea7d8..2edebba0 100644
--- a/src/gstreamer/gstlibcamera-utils.cpp
+++ b/src/gstreamer/gstlibcamera-utils.cpp
@@ -494,9 +494,12 @@ void gst_libcamera_configure_stream_from_caps(StreamConfiguration &stream_cfg,
/* Configure colorimetry */
if (gst_structure_has_field(s, "colorimetry")) {
- const gchar *colorimetry_str = gst_structure_get_string(s, "colorimetry");
+ const gchar *colorimetry_str;
GstVideoColorimetry colorimetry;
+ gst_structure_fixate_field(s, "colorimetry");
+ colorimetry_str = gst_structure_get_string(s, "colorimetry");
+
if (!gst_video_colorimetry_from_string(&colorimetry, colorimetry_str))
g_critical("Invalid colorimetry %s", colorimetry_str);
diff --git a/src/ipa/rpi/common/ipa_base.cpp b/src/ipa/rpi/common/ipa_base.cpp
index 6734c32e..e0a93daa 100644
--- a/src/ipa/rpi/common/ipa_base.cpp
+++ b/src/ipa/rpi/common/ipa_base.cpp
@@ -946,6 +946,17 @@ void IpaBase::applyControls(const ControlList &controls)
break;
}
+ case controls::AE_ENABLE: {
+ /*
+ * The AeEnable control is now just a wrapper that will already have been
+ * converted to ExposureTimeMode and AnalogueGainMode equivalents, so there
+ * would be nothing to do here. Nonetheless, "handle" the control so as to
+ * avoid warnings from the "default:" clause of the switch statement.
+ */
+
+ break;
+ }
+
case controls::AE_FLICKER_MODE: {
RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
controller_.getAlgorithm("agc"));
diff --git a/src/libcamera/media_device.cpp b/src/libcamera/media_device.cpp
index 75abd91d..353f34a8 100644
--- a/src/libcamera/media_device.cpp
+++ b/src/libcamera/media_device.cpp
@@ -794,7 +794,7 @@ void MediaDevice::fixupEntityFlags(struct media_v2_entity *entity)
* low-level link setup as it performs no checks on the validity of the \a
* flags, and assumes that the supplied \a flags are valid for the link (e.g.
* immutable links cannot be disabled).
-*
+ *
* \sa MediaLink::setEnabled(bool enable)
*
* \return 0 on success or a negative error code otherwise
@@ -829,4 +829,26 @@ int MediaDevice::setupLink(const MediaLink *link, unsigned int flags)
return 0;
}
+/**
+ * \brief Identify all entities of a common function in the MediaDevice
+ * \param[in] function The entity function to search for
+ *
+ * Search all entities within the graph of the MediaDevice and return
+ * a vector of those which match the given function.
+ *
+ * \return A vector of matching entities
+ */
+std::vector<MediaEntity *> MediaDevice::locateEntities(unsigned int function)
+{
+ std::vector<MediaEntity *> found;
+
+ /* Gather all the entities matching the function they expose. */
+ for (MediaEntity *entity : entities()) {
+ if (entity->function() == function)
+ found.push_back(entity);
+ }
+
+ return found;
+}
+
} /* namespace libcamera */
diff --git a/src/libcamera/media_pipeline.cpp b/src/libcamera/media_pipeline.cpp
new file mode 100644
index 00000000..c4e9f69b
--- /dev/null
+++ b/src/libcamera/media_pipeline.cpp
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Media pipeline support
+ */
+
+#include "libcamera/internal/media_pipeline.h"
+
+#include <algorithm>
+#include <errno.h>
+#include <queue>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include <linux/media.h>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/media_object.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+/**
+ * \file media_pipeline.h
+ * \brief Provide a representation of a pipeline of devices using the Media
+ * Controller.
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(MediaPipeline)
+
+/**
+ * \class MediaPipeline
+ * \brief The MediaPipeline represents a set of entities that together form a
+ * data path for stream data.
+ *
+ * A MediaPipeline instance is constructed from a sink and a source between
+ * two entities in a media graph.
+ */
+
+/**
+ * \brief Retrieve all source pads connected to a sink pad through active routes
+ *
+ * Examine the entity using the V4L2 Subdevice Routing API to collect all the
+ * source pads which are connected with an active route to the sink pad.
+ *
+ * \return A vector of source MediaPads
+ */
+static std::vector<const MediaPad *> routedSourcePads(MediaPad *sink)
+{
+ MediaEntity *entity = sink->entity();
+ std::unique_ptr<V4L2Subdevice> subdev =
+ std::make_unique<V4L2Subdevice>(entity);
+
+ int ret = subdev->open();
+ if (ret < 0)
+ return {};
+
+ V4L2Subdevice::Routing routing = {};
+ ret = subdev->getRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret < 0)
+ return {};
+
+ std::vector<const MediaPad *> pads;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (sink->index() != route.sink.pad ||
+ !(route.flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ const MediaPad *pad = entity->getPadByIndex(route.source.pad);
+ if (!pad) {
+ LOG(MediaPipeline, Error)
+ << "Entity " << entity->name()
+ << " has invalid route source pad "
+ << route.source.pad;
+ return {};
+ }
+
+ pads.push_back(pad);
+ }
+
+ return pads;
+}
+
+/**
+ * \brief Find the path from source to sink
+ *
+ * Starting from a source entity, determine the shortest path to the target
+ * described by \a sink.
+ *
+ * If \a sink can not be found, or a route from source to sink can not be
+ * achieved an error of -ENOLINK will be returned.
+ *
+ * When successful, the MediaPipeline will internally store the representation
+ * of entities and links to describe the path between the two entities.
+ *
+ * \return 0 on success, a negative errno otherwise
+ */
+int MediaPipeline::init(MediaEntity *source, std::string_view sink)
+{
+ /*
+ * Find the shortest path between from the Camera Sensor and the
+ * target entity.
+ */
+ std::unordered_set<MediaEntity *> visited;
+ std::queue<std::tuple<MediaEntity *, MediaPad *>> queue;
+
+ /* Remember at each entity where we came from. */
+ std::unordered_map<MediaEntity *, Entity> parents;
+ MediaEntity *entity = nullptr;
+ MediaEntity *target = nullptr;
+ MediaPad *sinkPad;
+
+ queue.push({ source, nullptr });
+
+ while (!queue.empty()) {
+ std::tie(entity, sinkPad) = queue.front();
+ queue.pop();
+
+ /* Found the target device. */
+ if (entity->name() == sink) {
+ LOG(MediaPipeline, Debug)
+ << "Found Pipeline target " << entity->name();
+ target = entity;
+ break;
+ }
+
+ visited.insert(entity);
+
+ /*
+ * Add direct downstream entities to the search queue. If the
+ * current entity supports the subdev internal routing API,
+ * restrict the search to downstream entities reachable through
+ * active routes.
+ */
+
+ std::vector<const MediaPad *> pads;
+ bool supportsRouting = false;
+
+ if (sinkPad) {
+ pads = routedSourcePads(sinkPad);
+ if (!pads.empty())
+ supportsRouting = true;
+ }
+
+ if (pads.empty()) {
+ for (const MediaPad *pad : entity->pads()) {
+ if (!(pad->flags() & MEDIA_PAD_FL_SOURCE))
+ continue;
+ pads.push_back(pad);
+ }
+ }
+
+ for (const MediaPad *pad : pads) {
+ for (MediaLink *link : pad->links()) {
+ MediaEntity *next = link->sink()->entity();
+ if (visited.find(next) == visited.end()) {
+ queue.push({ next, link->sink() });
+
+ Entity e{ entity, supportsRouting, sinkPad, pad, link };
+ parents.insert({ next, e });
+ }
+ }
+ }
+ }
+
+ if (!target) {
+ LOG(MediaPipeline, Error)
+ << "Failed to connect " << source->name()
+ << " to " << sink;
+ return -ENOLINK;
+ }
+
+ /*
+ * With the parents, we can follow back our way from the capture device
+ * to the sensor. Store all the entities in the pipeline, from the
+ * camera sensor to the video node, in entities_.
+ */
+ entities_.push_front({ entity, false, sinkPad, nullptr, nullptr });
+
+ for (auto it = parents.find(entity); it != parents.end();
+ it = parents.find(entity)) {
+ const Entity &e = it->second;
+ entities_.push_front(e);
+ entity = e.entity;
+ }
+
+ LOG(MediaPipeline, Info)
+ << "Found pipeline: "
+ << utils::join(entities_, " -> ",
+ [](const Entity &e) {
+ std::string s = "[";
+ if (e.sink)
+ s += std::to_string(e.sink->index()) + "|";
+ s += e.entity->name();
+ if (e.source)
+ s += "|" + std::to_string(e.source->index());
+ s += "]";
+ return s;
+ });
+
+ return 0;
+}
+
+/**
+ * \brief Initialise and enable all links through the MediaPipeline
+ * \return 0 on success, or a negative errno otherwise
+ */
+int MediaPipeline::initLinks()
+{
+ int ret = 0;
+
+ MediaLink *sinkLink = nullptr;
+ for (Entity &e : entities_) {
+ /* Sensor entities have no connected sink. */
+ if (!sinkLink) {
+ sinkLink = e.sourceLink;
+ continue;
+ }
+
+ LOG(MediaPipeline, Debug) << "Enabling : " << *sinkLink;
+
+ if (!(sinkLink->flags() & MEDIA_LNK_FL_ENABLED)) {
+ ret = sinkLink->setEnabled(true);
+ if (ret < 0)
+ return ret;
+ }
+
+ sinkLink = e.sourceLink;
+ }
+
+ return ret;
+}
+
+/**
+ * \brief Configure the entities of this MediaPipeline
+ *
+ * Propagate formats through each of the entities of the Pipeline, validating
+ * that each one was not adjusted by the driver from the desired format.
+ *
+ * \return 0 on success or a negative errno otherwise
+ */
+int MediaPipeline::configure(CameraSensor *sensor, V4L2SubdeviceFormat *format)
+{
+ int ret;
+
+ for (const Entity &e : entities_) {
+ /* The sensor is configured through the CameraSensor */
+ if (!e.sourceLink)
+ break;
+
+ MediaLink *link = e.sourceLink;
+ MediaPad *source = link->source();
+ MediaPad *sink = link->sink();
+
+ /* 'format' already contains the sensor configuration */
+ if (source->entity() != sensor->entity()) {
+ /* \todo Add MediaDevice cache to reduce FD pressure */
+ V4L2Subdevice subdev(source->entity());
+ ret = subdev.open();
+ if (ret)
+ return ret;
+
+ ret = subdev.getFormat(source->index(), format);
+ if (ret < 0)
+ return ret;
+ }
+
+ V4L2SubdeviceFormat sourceFormat = *format;
+ /* \todo Add MediaDevice cache to reduce FD pressure */
+ V4L2Subdevice subdev(sink->entity());
+ ret = subdev.open();
+ if (ret)
+ return ret;
+
+ ret = subdev.setFormat(sink->index(), format);
+ if (ret < 0)
+ return ret;
+
+ if (format->code != sourceFormat.code ||
+ format->size != sourceFormat.size) {
+ LOG(MediaPipeline, Debug)
+ << "Source '" << *source
+ << " produces " << sourceFormat
+ << ", sink '" << *sink
+ << " requires " << *format;
+ return -EINVAL;
+ }
+
+ LOG(MediaPipeline, Debug)
+ << "Link " << *link << " configured with format "
+ << *format;
+ }
+
+ return 0;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/meson.build b/src/libcamera/meson.build
index de22b8e6..800d4080 100644
--- a/src/libcamera/meson.build
+++ b/src/libcamera/meson.build
@@ -43,6 +43,7 @@ libcamera_internal_sources = files([
'matrix.cpp',
'media_device.cpp',
'media_object.cpp',
+ 'media_pipeline.cpp',
'pipeline_handler.cpp',
'process.cpp',
'pub_key.cpp',
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
index 194dfce7..675f0a74 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
@@ -42,6 +42,7 @@
#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/media_pipeline.h"
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
@@ -116,6 +117,11 @@ public:
ControlInfoMap ipaControls_;
+ /*
+ * All entities in the pipeline, from the camera sensor to the RKISP1.
+ */
+ MediaPipeline pipe_;
+
private:
void paramsComputed(unsigned int frame, unsigned int bytesused);
void setSensorControls(unsigned int frame,
@@ -180,8 +186,7 @@ private:
friend RkISP1CameraConfiguration;
friend RkISP1Frames;
- int initLinks(Camera *camera, const CameraSensor *sensor,
- const RkISP1CameraConfiguration &config);
+ int initLinks(Camera *camera, const RkISP1CameraConfiguration &config);
int createCamera(MediaEntity *sensor);
void tryCompleteRequest(RkISP1FrameInfo *info);
void imageBufferReady(FrameBuffer *buffer);
@@ -199,7 +204,6 @@ private:
std::unique_ptr<V4L2Subdevice> isp_;
std::unique_ptr<V4L2VideoDevice> param_;
std::unique_ptr<V4L2VideoDevice> stat_;
- std::unique_ptr<V4L2Subdevice> csi_;
bool hasSelfPath_;
bool isRaw_;
@@ -223,8 +227,6 @@ private:
std::queue<FrameBuffer *> availableStatBuffers_;
Camera *activeCamera_;
-
- const MediaPad *ispSink_;
};
RkISP1Frames::RkISP1Frames(PipelineHandler *pipe)
@@ -798,7 +800,7 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
CameraSensor *sensor = data->sensor_.get();
int ret;
- ret = initLinks(camera, sensor, *config);
+ ret = initLinks(camera, *config);
if (ret)
return ret;
@@ -821,12 +823,12 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
LOG(RkISP1, Debug) << "Sensor configured with " << format;
- if (csi_) {
- ret = csi_->setFormat(0, &format);
- if (ret < 0)
- return ret;
- }
+ /* Propagate format through the internal media pipeline up to the ISP */
+ ret = data->pipe_.configure(sensor, &format);
+ if (ret < 0)
+ return ret;
+ LOG(RkISP1, Debug) << "Configuring ISP with : " << format;
ret = isp_->setFormat(0, &format);
if (ret < 0)
return ret;
@@ -1201,7 +1203,6 @@ int PipelineHandlerRkISP1::queueRequestDevice(Camera *camera, Request *request)
*/
int PipelineHandlerRkISP1::initLinks(Camera *camera,
- const CameraSensor *sensor,
const RkISP1CameraConfiguration &config)
{
RkISP1CameraData *data = cameraData(camera);
@@ -1212,31 +1213,16 @@ int PipelineHandlerRkISP1::initLinks(Camera *camera,
return ret;
/*
- * Configure the sensor links: enable the link corresponding to this
- * camera.
+ * Configure the sensor links: enable the links corresponding to this
+ * pipeline all the way up to the ISP, through any connected CSI receiver.
*/
- for (MediaLink *link : ispSink_->links()) {
- if (link->source()->entity() != sensor->entity())
- continue;
-
- LOG(RkISP1, Debug)
- << "Enabling link from sensor '"
- << link->source()->entity()->name()
- << "' to ISP";
-
- ret = link->setEnabled(true);
- if (ret < 0)
- return ret;
- }
-
- if (csi_) {
- MediaLink *link = isp_->entity()->getPadByIndex(0)->links().at(0);
-
- ret = link->setEnabled(true);
- if (ret < 0)
- return ret;
+ ret = data->pipe_.initLinks();
+ if (ret) {
+ LOG(RkISP1, Error) << "Failed to set up pipe links";
+ return ret;
}
+ /* Configure the paths after the ISP */
for (const StreamConfiguration &cfg : config) {
if (cfg.stream() == &data->mainPathStream_)
ret = data->mainPath_->setEnabled(true);
@@ -1312,6 +1298,13 @@ int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor)
std::make_unique<RkISP1CameraData>(this, &mainPath_,
hasSelfPath_ ? &selfPath_ : nullptr);
+ /* Identify the pipeline path between the sensor and the rkisp1_isp */
+ ret = data->pipe_.init(sensor, "rkisp1_isp");
+ if (ret) {
+ LOG(RkISP1, Error) << "Failed to identify path from sensor to sink";
+ return ret;
+ }
+
data->sensor_ = CameraSensorFactoryBase::create(sensor);
if (!data->sensor_)
return -ENODEV;
@@ -1347,6 +1340,7 @@ int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor)
const std::string &id = data->sensor_->id();
std::shared_ptr<Camera> camera =
Camera::create(std::move(data), id, streams);
+
registerCamera(std::move(camera));
return 0;
@@ -1354,8 +1348,6 @@ int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor)
bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
{
- const MediaPad *pad;
-
DeviceMatch dm("rkisp1");
dm.add("rkisp1_isp");
dm.add("rkisp1_resizer_mainpath");
@@ -1380,22 +1372,6 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
if (isp_->open() < 0)
return false;
- /* Locate and open the optional CSI-2 receiver. */
- ispSink_ = isp_->entity()->getPadByIndex(0);
- if (!ispSink_ || ispSink_->links().empty())
- return false;
-
- pad = ispSink_->links().at(0)->source();
- if (pad->entity()->function() == MEDIA_ENT_F_VID_IF_BRIDGE) {
- csi_ = std::make_unique<V4L2Subdevice>(pad->entity());
- if (csi_->open() < 0)
- return false;
-
- ispSink_ = csi_->entity()->getPadByIndex(0);
- if (!ispSink_)
- return false;
- }
-
/* Locate and open the stats and params video nodes. */
stat_ = V4L2VideoDevice::fromEntityName(media_, "rkisp1_stats");
if (stat_->open() < 0)
@@ -1446,8 +1422,10 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
* camera instance for each of them.
*/
bool registered = false;
- for (MediaLink *link : ispSink_->links()) {
- if (!createCamera(link->source()->entity()))
+
+ for (MediaEntity *entity : media_->locateEntities(MEDIA_ENT_F_CAM_SENSOR)) {
+ LOG(RkISP1, Debug) << "Identified " << entity->name();
+ if (!createCamera(entity))
registered = true;
}
diff --git a/src/libcamera/stream.cpp b/src/libcamera/stream.cpp
index 978d7275..f091487c 100644
--- a/src/libcamera/stream.cpp
+++ b/src/libcamera/stream.cpp
@@ -407,7 +407,8 @@ std::string StreamConfiguration::toString() const
*/
std::ostream &operator<<(std::ostream &out, const StreamConfiguration &cfg)
{
- out << cfg.size << "-" << cfg.pixelFormat;
+ out << cfg.size << "-" << cfg.pixelFormat << "/"
+ << ColorSpace::toString(cfg.colorSpace);
return out;
}
diff --git a/utils/raspberrypi/ctt/ctt.py b/utils/raspberrypi/ctt/ctt.py
index 96f1b5e6..186afda5 100755
--- a/utils/raspberrypi/ctt/ctt.py
+++ b/utils/raspberrypi/ctt/ctt.py
@@ -198,9 +198,12 @@ class Camera:
"""
Write output to json
"""
- self.json['rpi.cac']['cac'] = cacs
- self.log += '\nCAC calibration written to json file'
- print('Finished CAC calibration')
+ if cacs:
+ self.json['rpi.cac']['cac'] = cacs
+ self.log += '\nCAC calibration written to json file'
+ print('Finished CAC calibration')
+ else:
+ self.log += "\nCAC calibration failed"
"""
diff --git a/utils/raspberrypi/ctt/ctt_alsc.py b/utils/raspberrypi/ctt/ctt_alsc.py
index 1d94dfa5..f4fd09e3 100644
--- a/utils/raspberrypi/ctt/ctt_alsc.py
+++ b/utils/raspberrypi/ctt/ctt_alsc.py
@@ -127,11 +127,12 @@ def alsc(Cam, Img, do_alsc_colour, plot=False, grid_size=(16, 12), max_gain=8.0)
channels = [Img.channels[i] for i in Img.order]
"""
calculate size of single rectangle.
- -(-(w-1)//32) is a ceiling division. w-1 is to deal robustly with the case
- where w is a multiple of 32.
+ The divisions here must ensure the final row/column of cells has a non-zero number of
+ pixels.
"""
w, h = Img.w/2, Img.h/2
- dx, dy = int(-(-(w-1)//grid_w)), int(-(-(h-1)//grid_h))
+ dx, dy = (w - 1) // (grid_w - 1), (h - 1) // (grid_h - 1)
+
"""
average the green channels into one
"""
diff --git a/utils/raspberrypi/ctt/ctt_cac.py b/utils/raspberrypi/ctt/ctt_cac.py
index 5a4c5101..a1183989 100644
--- a/utils/raspberrypi/ctt/ctt_cac.py
+++ b/utils/raspberrypi/ctt/ctt_cac.py
@@ -108,12 +108,29 @@ def shifts_to_yaml(red_shift, blue_shift, image_dimensions, output_grid_size=9):
ybsgrid[xgridloc][ygridloc].append(blue_shift[3])
# Now calculate the average pixel shift for each square in the grid
+ grid_incomplete = False
for x in range(output_grid_size - 1):
for y in range(output_grid_size - 1):
- xrgrid[x, y] = np.mean(xrsgrid[x][y])
- yrgrid[x, y] = np.mean(yrsgrid[x][y])
- xbgrid[x, y] = np.mean(xbsgrid[x][y])
- ybgrid[x, y] = np.mean(ybsgrid[x][y])
+ if xrsgrid[x][y]:
+ xrgrid[x, y] = np.mean(xrsgrid[x][y])
+ else:
+ grid_incomplete = True
+ if yrsgrid[x][y]:
+ yrgrid[x, y] = np.mean(yrsgrid[x][y])
+ else:
+ grid_incomplete = True
+ if xbsgrid[x][y]:
+ xbgrid[x, y] = np.mean(xbsgrid[x][y])
+ else:
+ grid_incomplete = True
+ if ybsgrid[x][y]:
+ ybgrid[x, y] = np.mean(ybsgrid[x][y])
+ else:
+ grid_incomplete = True
+
+ if grid_incomplete:
+ raise RuntimeError("\nERROR: CAC measurements do not span the image!"
+ "\nConsider using improved CAC images, or remove them entirely.\n")
# Next, we start to interpolate the central points of the grid that gets passed to the tuning file
input_grids = np.array([xrgrid, yrgrid, xbgrid, ybgrid])
@@ -219,7 +236,12 @@ def cac(Cam):
# tuning file
print("\nCreating output grid")
Cam.log += '\nCreating output grid'
- rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size)
+ try:
+ rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size)
+ except RuntimeError as e:
+ print(str(e))
+ Cam.log += "\nCAC correction failed! CAC will not be enabled."
+ return {}
print("CAC correction complete!")
Cam.log += '\nCAC correction complete!'