summaryrefslogtreecommitdiff
path: root/src/libcamera/pipeline
diff options
context:
space:
mode:
authorJacopo Mondi <jacopo.mondi@ideasonboard.com>2022-11-24 10:33:06 +0100
committerJacopo Mondi <jacopo.mondi@ideasonboard.com>2023-01-30 11:04:50 +0100
commit6f6e1bf704feec3a9bcfc1f5490ae82fe8d63065 (patch)
tree1144cde876d28953d34c9973d561b14d1c7e8a90 /src/libcamera/pipeline
parent1a614866a29ce1e3c185d72975ad9fc37c4f99bd (diff)
libcamera: camera_sensor: Apply flips at setFormat()
Augment the CameraSensor::setFormat() function to configure horizontal and vertical flips before applying the image format on the sensor. Applying flips before format is crucial as they might change the Bayer pattern ordering. To allow users of the CameraSensor class to specify a Transform, add to the V4L2SubdeviceFormat class a 'transform' member, by default initialized to Transform::Identity. Moving the handling of H/V flips to the CameraSensor class allows to remove quite some boilerplate code from the IPU3 and RaspberryPi pipeline handlers. No functional changes intended. Signed-off-by: Jacopo Mondi <jacopo.mondi@ideasonboard.com> Reviewed-by: David Plowman <david.plowman@raspberrypi.com>
Diffstat (limited to 'src/libcamera/pipeline')
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.cpp6
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.h4
-rw-r--r--src/libcamera/pipeline/ipu3/ipu3.cpp28
-rw-r--r--src/libcamera/pipeline/raspberrypi/raspberrypi.cpp27
4 files changed, 17 insertions, 48 deletions
diff --git a/src/libcamera/pipeline/ipu3/cio2.cpp b/src/libcamera/pipeline/ipu3/cio2.cpp
index d4e523af..a819884f 100644
--- a/src/libcamera/pipeline/ipu3/cio2.cpp
+++ b/src/libcamera/pipeline/ipu3/cio2.cpp
@@ -15,6 +15,7 @@
#include <libcamera/formats.h>
#include <libcamera/geometry.h>
#include <libcamera/stream.h>
+#include <libcamera/transform.h>
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/framebuffer.h"
@@ -177,10 +178,12 @@ int CIO2Device::init(const MediaDevice *media, unsigned int index)
/**
* \brief Configure the CIO2 unit
* \param[in] size The requested CIO2 output frame size
+ * \param[in] transform The transformation to be applied on the image sensor
* \param[out] outputFormat The CIO2 unit output image format
* \return 0 on success or a negative error code otherwise
*/
-int CIO2Device::configure(const Size &size, V4L2DeviceFormat *outputFormat)
+int CIO2Device::configure(const Size &size, const Transform &transform,
+ V4L2DeviceFormat *outputFormat)
{
V4L2SubdeviceFormat sensorFormat;
int ret;
@@ -191,6 +194,7 @@ int CIO2Device::configure(const Size &size, V4L2DeviceFormat *outputFormat)
*/
std::vector<unsigned int> mbusCodes = utils::map_keys(mbusCodesToPixelFormat);
sensorFormat = getSensorFormat(mbusCodes, size);
+ sensorFormat.transform = transform;
ret = sensor_->setFormat(&sensorFormat);
if (ret)
return ret;
diff --git a/src/libcamera/pipeline/ipu3/cio2.h b/src/libcamera/pipeline/ipu3/cio2.h
index 68504a2d..bbd87eb8 100644
--- a/src/libcamera/pipeline/ipu3/cio2.h
+++ b/src/libcamera/pipeline/ipu3/cio2.h
@@ -26,6 +26,7 @@ class Request;
class Size;
class SizeRange;
struct StreamConfiguration;
+enum class Transform;
class CIO2Device
{
@@ -38,7 +39,8 @@ public:
std::vector<SizeRange> sizes(const PixelFormat &format) const;
int init(const MediaDevice *media, unsigned int index);
- int configure(const Size &size, V4L2DeviceFormat *outputFormat);
+ int configure(const Size &size, const Transform &transform,
+ V4L2DeviceFormat *outputFormat);
StreamConfiguration generateConfiguration(Size size) const;
diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
index a424ac91..3a569c7e 100644
--- a/src/libcamera/pipeline/ipu3/ipu3.cpp
+++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
@@ -51,7 +51,7 @@ class IPU3CameraData : public Camera::Private
{
public:
IPU3CameraData(PipelineHandler *pipe)
- : Camera::Private(pipe), supportsFlips_(false)
+ : Camera::Private(pipe)
{
}
@@ -73,7 +73,6 @@ public:
Stream rawStream_;
Rectangle cropRegion_;
- bool supportsFlips_;
Transform rotationTransform_;
std::unique_ptr<DelayedControls> delayedCtrls_;
@@ -539,7 +538,7 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
*/
const Size &sensorSize = config->cio2Format().size;
V4L2DeviceFormat cio2Format;
- ret = cio2->configure(sensorSize, &cio2Format);
+ ret = cio2->configure(sensorSize, config->combinedTransform_, &cio2Format);
if (ret)
return ret;
@@ -548,24 +547,6 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
data->cropRegion_ = sensorInfo.analogCrop;
/*
- * Configure the H/V flip controls based on the combination of
- * the sensor and user transform.
- */
- if (data->supportsFlips_) {
- ControlList sensorCtrls(cio2->sensor()->controls());
- sensorCtrls.set(V4L2_CID_HFLIP,
- static_cast<int32_t>(!!(config->combinedTransform_
- & Transform::HFlip)));
- sensorCtrls.set(V4L2_CID_VFLIP,
- static_cast<int32_t>(!!(config->combinedTransform_
- & Transform::VFlip)));
-
- ret = cio2->sensor()->setControls(&sensorCtrls);
- if (ret)
- return ret;
- }
-
- /*
* If the ImgU gets configured, its driver seems to expect that
* buffers will be queued to its outputs, as otherwise the next
* capture session that uses the ImgU fails when queueing
@@ -1127,11 +1108,6 @@ int PipelineHandlerIPU3::registerCameras()
LOG(IPU3, Warning) << "Invalid rotation of " << rotationValue
<< " degrees: ignoring";
- ControlList ctrls = cio2->sensor()->getControls({ V4L2_CID_HFLIP });
- if (!ctrls.empty())
- /* We assume the sensor supports VFLIP too. */
- data->supportsFlips_ = true;
-
/**
* \todo Dynamically assign ImgU and output devices to each
* stream and camera; as of now, limit support to two cameras
diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
index c086a69a..d8232ff8 100644
--- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
+++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
@@ -691,24 +691,12 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
}
}
- /*
- * Configure the H/V flip controls based on the combination of
- * the sensor and user transform.
- */
- if (data->supportsFlips_) {
- const RPiCameraConfiguration *rpiConfig =
- static_cast<const RPiCameraConfiguration *>(config);
- ControlList controls;
-
- controls.set(V4L2_CID_HFLIP,
- static_cast<int32_t>(!!(rpiConfig->combinedTransform_ & Transform::HFlip)));
- controls.set(V4L2_CID_VFLIP,
- static_cast<int32_t>(!!(rpiConfig->combinedTransform_ & Transform::VFlip)));
- data->setSensorControls(controls);
- }
-
/* First calculate the best sensor mode we can use based on the user request. */
V4L2SubdeviceFormat sensorFormat = findBestFormat(data->sensorFormats_, rawStream ? sensorSize : maxSize, bitDepth);
+ /* Apply any cached transform. */
+ const RPiCameraConfiguration *rpiConfig = static_cast<const RPiCameraConfiguration *>(config);
+ sensorFormat.transform = rpiConfig->combinedTransform_;
+ /* Finally apply the format on the sensor. */
ret = data->sensor_->setFormat(&sensorFormat);
if (ret)
return ret;
@@ -1293,10 +1281,9 @@ int PipelineHandlerRPi::registerCamera(MediaDevice *unicam, MediaDevice *isp, Me
* We cache three things about the sensor in relation to transforms
* (meaning horizontal and vertical flips).
*
- * Firstly, does it support them?
- * Secondly, if you use them does it affect the Bayer ordering?
- * Thirdly, what is the "native" Bayer order, when no transforms are
- * applied?
+ * If flips are supported verify if they affect the Bayer ordering
+ * and what the "native" Bayer order is, when no transforms are
+ * applied.
*
* We note that the sensor's cached list of supported formats is
* already in the "native" order, with any flips having been undone.