summaryrefslogtreecommitdiff
path: root/src/ipa/libipa
diff options
context:
space:
mode:
Diffstat (limited to 'src/ipa/libipa')
-rw-r--r--src/ipa/libipa/agc_mean_luminance.cpp577
-rw-r--r--src/ipa/libipa/agc_mean_luminance.h96
-rw-r--r--src/ipa/libipa/algorithm.cpp29
-rw-r--r--src/ipa/libipa/algorithm.h20
-rw-r--r--src/ipa/libipa/camera_sensor_helper.cpp217
-rw-r--r--src/ipa/libipa/camera_sensor_helper.h51
-rw-r--r--src/ipa/libipa/exposure_mode_helper.cpp246
-rw-r--r--src/ipa/libipa/exposure_mode_helper.h53
-rw-r--r--src/ipa/libipa/fc_queue.cpp140
-rw-r--r--src/ipa/libipa/fc_queue.h118
-rw-r--r--src/ipa/libipa/histogram.cpp32
-rw-r--r--src/ipa/libipa/histogram.h19
-rw-r--r--src/ipa/libipa/meson.build6
-rw-r--r--src/ipa/libipa/module.cpp4
-rw-r--r--src/ipa/libipa/module.h2
15 files changed, 1531 insertions, 79 deletions
diff --git a/src/ipa/libipa/agc_mean_luminance.cpp b/src/ipa/libipa/agc_mean_luminance.cpp
new file mode 100644
index 00000000..271b5ae4
--- /dev/null
+++ b/src/ipa/libipa/agc_mean_luminance.cpp
@@ -0,0 +1,577 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Base class for mean luminance AGC algorithms
+ */
+
+#include "agc_mean_luminance.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
+
+#include "exposure_mode_helper.h"
+
+using namespace libcamera::controls;
+
+/**
+ * \file agc_mean_luminance.h
+ * \brief Base class implementing mean luminance AEGC
+ */
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(AgcMeanLuminance)
+
+namespace ipa {
+
+/*
+ * Number of frames for which to run the algorithm at full speed, before slowing
+ * down to prevent large and jarring changes in exposure from frame to frame.
+ */
+static constexpr uint32_t kNumStartupFrames = 10;
+
+/*
+ * Default relative luminance target
+ *
+ * This value should be chosen so that when the camera points at a grey target,
+ * the resulting image brightness looks "right". Custom values can be passed
+ * as the relativeLuminanceTarget value in sensor tuning files.
+ */
+static constexpr double kDefaultRelativeLuminanceTarget = 0.16;
+
+/**
+ * \struct AgcMeanLuminance::AgcConstraint
+ * \brief The boundaries and target for an AeConstraintMode constraint
+ *
+ * This structure describes an AeConstraintMode constraint for the purposes of
+ * this algorithm. These constraints are expressed as a pair of quantile
+ * boundaries for a histogram, along with a luminance target and a bounds-type.
+ * The algorithm uses the constraints by ensuring that the defined portion of a
+ * luminance histogram (I.E. lying between the two quantiles) is above or below
+ * the given luminance value.
+ */
+
+/**
+ * \enum AgcMeanLuminance::AgcConstraint::Bound
+ * \brief Specify whether the constraint defines a lower or upper bound
+ * \var AgcMeanLuminance::AgcConstraint::lower
+ * \brief The constraint defines a lower bound
+ * \var AgcMeanLuminance::AgcConstraint::upper
+ * \brief The constraint defines an upper bound
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::bound
+ * \brief The type of constraint bound
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::qLo
+ * \brief The lower quantile to use for the constraint
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::qHi
+ * \brief The upper quantile to use for the constraint
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::yTarget
+ * \brief The luminance target for the constraint
+ */
+
+/**
+ * \class AgcMeanLuminance
+ * \brief A mean-based auto-exposure algorithm
+ *
+ * This algorithm calculates a shutter time, analogue and digital gain such that
+ * the normalised mean luminance value of an image is driven towards a target,
+ * which itself is discovered from tuning data. The algorithm is a two-stage
+ * process.
+ *
+ * In the first stage, an initial gain value is derived by iteratively comparing
+ * the gain-adjusted mean luminance across the entire image against a target,
+ * and selecting a value which pushes it as closely as possible towards the
+ * target.
+ *
+ * In the second stage we calculate the gain required to drive the average of a
+ * section of a histogram to a target value, where the target and the boundaries
+ * of the section of the histogram used in the calculation are taken from the
+ * values defined for the currently configured AeConstraintMode within the
+ * tuning data. This class provides a helper function to parse those tuning data
+ * to discover the constraints, and so requires a specific format for those
+ * data which is described in \ref parseTuningData(). The gain from the first
+ * stage is then clamped to the gain from this stage.
+ *
+ * The final gain is used to adjust the effective exposure value of the image,
+ * and that new exposure value is divided into shutter time, analogue gain and
+ * digital gain according to the selected AeExposureMode. This class uses the
+ * \ref ExposureModeHelper class to assist in that division, and expects the
+ * data needed to initialise that class to be present in tuning data in a
+ * format described in \ref parseTuningData().
+ *
+ * In order to be able to use this algorithm an IPA module needs to be able to
+ * do the following:
+ *
+ * 1. Provide a luminance estimation across an entire image.
+ * 2. Provide a luminance Histogram for the image to use in calculating
+ * constraint compliance. The precision of the Histogram that is available
+ * will determine the supportable precision of the constraints.
+ *
+ * IPA modules that want to use this class to implement their AEGC algorithm
+ * should derive it and provide an overriding estimateLuminance() function for
+ * this class to use. They must call parseTuningData() in init(), and must also
+ * call setLimits() and resetFrameCounter() in configure(). They may then use
+ * calculateNewEv() in process(). If the limits passed to setLimits() change for
+ * any reason (for example, in response to a FrameDurationLimit control being
+ * passed in queueRequest()) then setLimits() must be called again with the new
+ * values.
+ */
+
+AgcMeanLuminance::AgcMeanLuminance()
+ : frameCount_(0), filteredExposure_(0s), relativeLuminanceTarget_(0)
+{
+}
+
+AgcMeanLuminance::~AgcMeanLuminance() = default;
+
+void AgcMeanLuminance::parseRelativeLuminanceTarget(const YamlObject &tuningData)
+{
+ relativeLuminanceTarget_ =
+ tuningData["relativeLuminanceTarget"].get<double>(kDefaultRelativeLuminanceTarget);
+}
+
+void AgcMeanLuminance::parseConstraint(const YamlObject &modeDict, int32_t id)
+{
+ for (const auto &[boundName, content] : modeDict.asDict()) {
+ if (boundName != "upper" && boundName != "lower") {
+ LOG(AgcMeanLuminance, Warning)
+ << "Ignoring unknown constraint bound '" << boundName << "'";
+ continue;
+ }
+
+ unsigned int idx = static_cast<unsigned int>(boundName == "upper");
+ AgcConstraint::Bound bound = static_cast<AgcConstraint::Bound>(idx);
+ double qLo = content["qLo"].get<double>().value_or(0.98);
+ double qHi = content["qHi"].get<double>().value_or(1.0);
+ double yTarget =
+ content["yTarget"].getList<double>().value_or(std::vector<double>{ 0.5 }).at(0);
+
+ AgcConstraint constraint = { bound, qLo, qHi, yTarget };
+
+ if (!constraintModes_.count(id))
+ constraintModes_[id] = {};
+
+ if (idx)
+ constraintModes_[id].push_back(constraint);
+ else
+ constraintModes_[id].insert(constraintModes_[id].begin(), constraint);
+ }
+}
+
+int AgcMeanLuminance::parseConstraintModes(const YamlObject &tuningData)
+{
+ std::vector<ControlValue> availableConstraintModes;
+
+ const YamlObject &yamlConstraintModes = tuningData[controls::AeConstraintMode.name()];
+ if (yamlConstraintModes.isDictionary()) {
+ for (const auto &[modeName, modeDict] : yamlConstraintModes.asDict()) {
+ if (AeConstraintModeNameValueMap.find(modeName) ==
+ AeConstraintModeNameValueMap.end()) {
+ LOG(AgcMeanLuminance, Warning)
+ << "Skipping unknown constraint mode '" << modeName << "'";
+ continue;
+ }
+
+ if (!modeDict.isDictionary()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Invalid constraint mode '" << modeName << "'";
+ return -EINVAL;
+ }
+
+ parseConstraint(modeDict,
+ AeConstraintModeNameValueMap.at(modeName));
+ availableConstraintModes.push_back(
+ AeConstraintModeNameValueMap.at(modeName));
+ }
+ }
+
+ /*
+ * If the tuning data file contains no constraints then we use the
+ * default constraint that the IPU3/RkISP1 Agc algorithms were adhering
+ * to anyway before centralisation; this constraint forces the top 2% of
+ * the histogram to be at least 0.5.
+ */
+ if (constraintModes_.empty()) {
+ AgcConstraint constraint = {
+ AgcConstraint::Bound::lower,
+ 0.98,
+ 1.0,
+ 0.5
+ };
+
+ constraintModes_[controls::ConstraintNormal].insert(
+ constraintModes_[controls::ConstraintNormal].begin(),
+ constraint);
+ availableConstraintModes.push_back(
+ AeConstraintModeNameValueMap.at("ConstraintNormal"));
+ }
+
+ controls_[&controls::AeConstraintMode] = ControlInfo(availableConstraintModes);
+
+ return 0;
+}
+
+int AgcMeanLuminance::parseExposureModes(const YamlObject &tuningData)
+{
+ std::vector<ControlValue> availableExposureModes;
+
+ const YamlObject &yamlExposureModes = tuningData[controls::AeExposureMode.name()];
+ if (yamlExposureModes.isDictionary()) {
+ for (const auto &[modeName, modeValues] : yamlExposureModes.asDict()) {
+ if (AeExposureModeNameValueMap.find(modeName) ==
+ AeExposureModeNameValueMap.end()) {
+ LOG(AgcMeanLuminance, Warning)
+ << "Skipping unknown exposure mode '" << modeName << "'";
+ continue;
+ }
+
+ if (!modeValues.isDictionary()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Invalid exposure mode '" << modeName << "'";
+ return -EINVAL;
+ }
+
+ std::vector<uint32_t> shutters =
+ modeValues["shutter"].getList<uint32_t>().value_or(std::vector<uint32_t>{});
+ std::vector<double> gains =
+ modeValues["gain"].getList<double>().value_or(std::vector<double>{});
+
+ if (shutters.size() != gains.size()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Shutter and gain array sizes unequal";
+ return -EINVAL;
+ }
+
+ if (shutters.empty()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Shutter and gain arrays are empty";
+ return -EINVAL;
+ }
+
+ std::vector<std::pair<utils::Duration, double>> stages;
+ for (unsigned int i = 0; i < shutters.size(); i++) {
+ stages.push_back({
+ std::chrono::microseconds(shutters[i]),
+ gains[i]
+ });
+ }
+
+ std::shared_ptr<ExposureModeHelper> helper =
+ std::make_shared<ExposureModeHelper>(stages);
+
+ exposureModeHelpers_[AeExposureModeNameValueMap.at(modeName)] = helper;
+ availableExposureModes.push_back(AeExposureModeNameValueMap.at(modeName));
+ }
+ }
+
+ /*
+ * If we don't have any exposure modes in the tuning data we create an
+ * ExposureModeHelper using an empty vector of stages. This will result
+ * in the ExposureModeHelper simply driving the shutter as high as
+ * possible before touching gain.
+ */
+ if (availableExposureModes.empty()) {
+ int32_t exposureModeId = AeExposureModeNameValueMap.at("ExposureNormal");
+ std::vector<std::pair<utils::Duration, double>> stages = { };
+
+ std::shared_ptr<ExposureModeHelper> helper =
+ std::make_shared<ExposureModeHelper>(stages);
+
+ exposureModeHelpers_[exposureModeId] = helper;
+ availableExposureModes.push_back(exposureModeId);
+ }
+
+ controls_[&controls::AeExposureMode] = ControlInfo(availableExposureModes);
+
+ return 0;
+}
+
+/**
+ * \brief Parse tuning data for AeConstraintMode and AeExposureMode controls
+ * \param[in] tuningData the YamlObject representing the tuning data
+ *
+ * This function parses tuning data to build the list of allowed values for the
+ * AeConstraintMode and AeExposureMode controls. Those tuning data must provide
+ * the data in a specific format; the Agc algorithm's tuning data should contain
+ * a dictionary called AeConstraintMode containing per-mode setting dictionaries
+ * with the key being a value from \ref controls::AeConstraintModeNameValueMap.
+ * Each mode dict may contain either a "lower" or "upper" key or both, for
+ * example:
+ *
+ * \code{.unparsed}
+ * algorithms:
+ * - Agc:
+ * AeConstraintMode:
+ * ConstraintNormal:
+ * lower:
+ * qLo: 0.98
+ * qHi: 1.0
+ * yTarget: 0.5
+ * ConstraintHighlight:
+ * lower:
+ * qLo: 0.98
+ * qHi: 1.0
+ * yTarget: 0.5
+ * upper:
+ * qLo: 0.98
+ * qHi: 1.0
+ * yTarget: 0.8
+ *
+ * \endcode
+ *
+ * For the AeExposureMode control the data should contain a dictionary called
+ * AeExposureMode containing per-mode setting dictionaries with the key being a
+ * value from \ref controls::AeExposureModeNameValueMap. Each mode dict should
+ * contain an array of shutter times with the key "shutter" and an array of gain
+ * values with the key "gain", in this format:
+ *
+ * \code{.unparsed}
+ * algorithms:
+ * - Agc:
+ * AeExposureMode:
+ * ExposureNormal:
+ * shutter: [ 100, 10000, 30000, 60000, 120000 ]
+ * gain: [ 2.0, 4.0, 6.0, 8.0, 10.0 ]
+ * ExposureShort:
+ * shutter: [ 100, 10000, 30000, 60000, 120000 ]
+ * gain: [ 2.0, 4.0, 6.0, 8.0, 10.0 ]
+ *
+ * \endcode
+ *
+ * \return 0 on success or a negative error code
+ */
+int AgcMeanLuminance::parseTuningData(const YamlObject &tuningData)
+{
+ int ret;
+
+ parseRelativeLuminanceTarget(tuningData);
+
+ ret = parseConstraintModes(tuningData);
+ if (ret)
+ return ret;
+
+ return parseExposureModes(tuningData);
+}
+
+/**
+ * \brief Set the ExposureModeHelper limits for this class
+ * \param[in] minShutter Minimum shutter time to allow
+ * \param[in] maxShutter Maximum shutter time to allow
+ * \param[in] minGain Minimum gain to allow
+ * \param[in] maxGain Maximum gain to allow
+ *
+ * This function calls \ref ExposureModeHelper::setLimits() for each
+ * ExposureModeHelper that has been created for this class.
+ */
+void AgcMeanLuminance::setLimits(utils::Duration minShutter,
+ utils::Duration maxShutter,
+ double minGain, double maxGain)
+{
+ for (auto &[id, helper] : exposureModeHelpers_)
+ helper->setLimits(minShutter, maxShutter, minGain, maxGain);
+}
+
+/**
+ * \fn AgcMeanLuminance::constraintModes()
+ * \brief Get the constraint modes that have been parsed from tuning data
+ */
+
+/**
+ * \fn AgcMeanLuminance::exposureModeHelpers()
+ * \brief Get the ExposureModeHelpers that have been parsed from tuning data
+ */
+
+/**
+ * \fn AgcMeanLuminance::controls()
+ * \brief Get the controls that have been generated after parsing tuning data
+ */
+
+/**
+ * \fn AgcMeanLuminance::estimateLuminance(const double gain)
+ * \brief Estimate the luminance of an image, adjusted by a given gain
+ * \param[in] gain The gain with which to adjust the luminance estimate
+ *
+ * This function estimates the average relative luminance of the frame that
+ * would be output by the sensor if an additional \a gain was applied. It is a
+ * pure virtual function because estimation of luminance is a hardware-specific
+ * operation, which depends wholly on the format of the stats that are delivered
+ * to libcamera from the ISP. Derived classes must override this function with
+ * one that calculates the normalised mean luminance value across the entire
+ * image.
+ *
+ * \return The normalised relative luminance of the image
+ */
+
+/**
+ * \brief Estimate the initial gain needed to achieve a relative luminance
+ * target
+ * \return The calculated initial gain
+ */
+double AgcMeanLuminance::estimateInitialGain() const
+{
+ double yTarget = relativeLuminanceTarget_;
+ double yGain = 1.0;
+
+ /*
+ * To account for non-linearity caused by saturation, the value needs to
+ * be estimated in an iterative process, as multiplying by a gain will
+ * not increase the relative luminance by the same factor if some image
+ * regions are saturated.
+ */
+ for (unsigned int i = 0; i < 8; i++) {
+ double yValue = estimateLuminance(yGain);
+ double extra_gain = std::min(10.0, yTarget / (yValue + .001));
+
+ yGain *= extra_gain;
+ LOG(AgcMeanLuminance, Debug) << "Y value: " << yValue
+ << ", Y target: " << yTarget
+ << ", gives gain " << yGain;
+
+ if (utils::abs_diff(extra_gain, 1.0) < 0.01)
+ break;
+ }
+
+ return yGain;
+}
+
+/**
+ * \brief Clamp gain within the bounds of a defined constraint
+ * \param[in] constraintModeIndex The index of the constraint to adhere to
+ * \param[in] hist A histogram over which to calculate inter-quantile means
+ * \param[in] gain The gain to clamp
+ *
+ * \return The gain clamped within the constraint bounds
+ */
+double AgcMeanLuminance::constraintClampGain(uint32_t constraintModeIndex,
+ const Histogram &hist,
+ double gain)
+{
+ std::vector<AgcConstraint> &constraints = constraintModes_[constraintModeIndex];
+ for (const AgcConstraint &constraint : constraints) {
+ double newGain = constraint.yTarget * hist.bins() /
+ hist.interQuantileMean(constraint.qLo, constraint.qHi);
+
+ if (constraint.bound == AgcConstraint::Bound::lower &&
+ newGain > gain)
+ gain = newGain;
+
+ if (constraint.bound == AgcConstraint::Bound::upper &&
+ newGain < gain)
+ gain = newGain;
+ }
+
+ return gain;
+}
+
+/**
+ * \brief Apply a filter on the exposure value to limit the speed of changes
+ * \param[in] exposureValue The target exposure from the AGC algorithm
+ *
+ * The speed of the filter is adaptive, and will produce the target quicker
+ * during startup, or when the target exposure is within 20% of the most recent
+ * filter output.
+ *
+ * \return The filtered exposure
+ */
+utils::Duration AgcMeanLuminance::filterExposure(utils::Duration exposureValue)
+{
+ double speed = 0.2;
+
+ /* Adapt instantly if we are in startup phase. */
+ if (frameCount_ < kNumStartupFrames)
+ speed = 1.0;
+
+ /*
+ * If we are close to the desired result, go faster to avoid making
+ * multiple micro-adjustments.
+ * \todo Make this customisable?
+ */
+ if (filteredExposure_ < 1.2 * exposureValue &&
+ filteredExposure_ > 0.8 * exposureValue)
+ speed = sqrt(speed);
+
+ filteredExposure_ = speed * exposureValue +
+ filteredExposure_ * (1.0 - speed);
+
+ return filteredExposure_;
+}
+
+/**
+ * \brief Calculate the new exposure value and splut it between shutter time and gain
+ * \param[in] constraintModeIndex The index of the current constraint mode
+ * \param[in] exposureModeIndex The index of the current exposure mode
+ * \param[in] yHist A Histogram from the ISP statistics to use in constraining
+ * the calculated gain
+ * \param[in] effectiveExposureValue The EV applied to the frame from which the
+ * statistics in use derive
+ *
+ * Calculate a new exposure value to try to obtain the target. The calculated
+ * exposure value is filtered to prevent rapid changes from frame to frame, and
+ * divided into shutter time, analogue and digital gain.
+ *
+ * \return Tuple of shutter time, analogue gain, and digital gain
+ */
+std::tuple<utils::Duration, double, double>
+AgcMeanLuminance::calculateNewEv(uint32_t constraintModeIndex,
+ uint32_t exposureModeIndex,
+ const Histogram &yHist,
+ utils::Duration effectiveExposureValue)
+{
+ /*
+ * The pipeline handler should validate that we have received an allowed
+ * value for AeExposureMode.
+ */
+ std::shared_ptr<ExposureModeHelper> exposureModeHelper =
+ exposureModeHelpers_.at(exposureModeIndex);
+
+ double gain = estimateInitialGain();
+ gain = constraintClampGain(constraintModeIndex, yHist, gain);
+
+ /*
+ * We don't check whether we're already close to the target, because
+ * even if the effective exposure value is the same as the last frame's
+ * we could have switched to an exposure mode that would require a new
+ * pass through the splitExposure() function.
+ */
+
+ utils::Duration newExposureValue = effectiveExposureValue * gain;
+
+ /*
+ * We filter the exposure value to make sure changes are not too jarring
+ * from frame to frame.
+ */
+ newExposureValue = filterExposure(newExposureValue);
+
+ frameCount_++;
+ return exposureModeHelper->splitExposure(newExposureValue);
+}
+
+/**
+ * \fn AgcMeanLuminance::resetFrameCount()
+ * \brief Reset the frame counter
+ *
+ * This function resets the internal frame counter, which exists to help the
+ * algorithm decide whether it should respond instantly or not. The expectation
+ * is for derived classes to call this function before each camera start call in
+ * their configure() function.
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/agc_mean_luminance.h b/src/ipa/libipa/agc_mean_luminance.h
new file mode 100644
index 00000000..0a81c6d2
--- /dev/null
+++ b/src/ipa/libipa/agc_mean_luminance.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ agc_mean_luminance.h - Base class for mean luminance AGC algorithms
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "exposure_mode_helper.h"
+#include "histogram.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+class AgcMeanLuminance
+{
+public:
+ AgcMeanLuminance();
+ virtual ~AgcMeanLuminance();
+
+ struct AgcConstraint {
+ enum class Bound {
+ lower = 0,
+ upper = 1
+ };
+ Bound bound;
+ double qLo;
+ double qHi;
+ double yTarget;
+ };
+
+ int parseTuningData(const YamlObject &tuningData);
+
+ void setLimits(utils::Duration minShutter, utils::Duration maxShutter,
+ double minGain, double maxGain);
+
+ std::map<int32_t, std::vector<AgcConstraint>> constraintModes()
+ {
+ return constraintModes_;
+ }
+
+ std::map<int32_t, std::shared_ptr<ExposureModeHelper>> exposureModeHelpers()
+ {
+ return exposureModeHelpers_;
+ }
+
+ ControlInfoMap::Map controls()
+ {
+ return controls_;
+ }
+
+ std::tuple<utils::Duration, double, double>
+ calculateNewEv(uint32_t constraintModeIndex, uint32_t exposureModeIndex,
+ const Histogram &yHist, utils::Duration effectiveExposureValue);
+
+ void resetFrameCount()
+ {
+ frameCount_ = 0;
+ }
+
+private:
+ virtual double estimateLuminance(const double gain) const = 0;
+
+ void parseRelativeLuminanceTarget(const YamlObject &tuningData);
+ void parseConstraint(const YamlObject &modeDict, int32_t id);
+ int parseConstraintModes(const YamlObject &tuningData);
+ int parseExposureModes(const YamlObject &tuningData);
+ double estimateInitialGain() const;
+ double constraintClampGain(uint32_t constraintModeIndex,
+ const Histogram &hist,
+ double gain);
+ utils::Duration filterExposure(utils::Duration exposureValue);
+
+ uint64_t frameCount_;
+ utils::Duration filteredExposure_;
+ double relativeLuminanceTarget_;
+
+ std::map<int32_t, std::vector<AgcConstraint>> constraintModes_;
+ std::map<int32_t, std::shared_ptr<ExposureModeHelper>> exposureModeHelpers_;
+ ControlInfoMap::Map controls_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/algorithm.cpp b/src/ipa/libipa/algorithm.cpp
index 8549fe3f..201efdfd 100644
--- a/src/ipa/libipa/algorithm.cpp
+++ b/src/ipa/libipa/algorithm.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * algorithm.cpp - IPA control algorithm interface
+ * IPA control algorithm interface
*/
#include "algorithm.h"
@@ -67,10 +67,29 @@ namespace ipa {
*/
/**
+ * \fn Algorithm::queueRequest()
+ * \brief Provide control values to the algorithm
+ * \param[in] context The shared IPA context
+ * \param[in] frame The frame number to apply the control values
+ * \param[in] frameContext The current frame's context
+ * \param[in] controls The list of user controls
+ *
+ * This function is called for each request queued to the camera. It provides
+ * the controls stored in the request to the algorithm. The \a frame number
+ * is the Request sequence number and identifies the desired corresponding
+ * frame to target for the controls to take effect.
+ *
+ * Algorithms shall read the applicable controls and store their value for later
+ * use during frame processing.
+ */
+
+/**
* \fn Algorithm::prepare()
* \brief Fill the \a params buffer with ISP processing parameters for a frame
* \param[in] context The shared IPA context
- * \param[out] params The ISP specific parameters.
+ * \param[in] frame The frame context sequence number
+ * \param[in] frameContext The FrameContext for this frame
+ * \param[out] params The ISP specific parameters
*
* This function is called for every frame when the camera is running before it
* is processed by the ISP to prepare the ISP processing parameters for that
@@ -85,13 +104,15 @@ namespace ipa {
* \fn Algorithm::process()
* \brief Process ISP statistics, and run algorithm operations
* \param[in] context The shared IPA context
+ * \param[in] frame The frame context sequence number
* \param[in] frameContext The current frame's context
* \param[in] stats The IPA statistics and ISP results
+ * \param[out] metadata Metadata for the frame, to be filled by the algorithm
*
* This function is called while camera is running for every frame processed by
* the ISP, to process statistics generated from that frame by the ISP.
- * Algorithms shall use this data to run calculations and update their state
- * accordingly.
+ * Algorithms shall use this data to run calculations, update their state
+ * accordingly, and fill the frame metadata.
*
* Processing shall not take an undue amount of time, and any extended or
* computationally expensive calculations or operations must be handled
diff --git a/src/ipa/libipa/algorithm.h b/src/ipa/libipa/algorithm.h
index 2a8871d8..9a19dbd6 100644
--- a/src/ipa/libipa/algorithm.h
+++ b/src/ipa/libipa/algorithm.h
@@ -2,13 +2,16 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * algorithm.h - ISP control algorithm interface
+ * ISP control algorithm interface
*/
#pragma once
#include <memory>
+#include <stdint.h>
#include <string>
+#include <libcamera/controls.h>
+
namespace libcamera {
class YamlObject;
@@ -35,14 +38,25 @@ public:
return 0;
}
+ virtual void queueRequest([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] typename Module::FrameContext &frameContext,
+ [[maybe_unused]] const ControlList &controls)
+ {
+ }
+
virtual void prepare([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] typename Module::FrameContext &frameContext,
[[maybe_unused]] typename Module::Params *params)
{
}
virtual void process([[maybe_unused]] typename Module::Context &context,
- [[maybe_unused]] typename Module::FrameContext *frameContext,
- [[maybe_unused]] const typename Module::Stats *stats)
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] typename Module::FrameContext &frameContext,
+ [[maybe_unused]] const typename Module::Stats *stats,
+ [[maybe_unused]] ControlList &metadata)
{
}
};
diff --git a/src/ipa/libipa/camera_sensor_helper.cpp b/src/ipa/libipa/camera_sensor_helper.cpp
index d4dba497..2cd61fcc 100644
--- a/src/ipa/libipa/camera_sensor_helper.cpp
+++ b/src/ipa/libipa/camera_sensor_helper.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_sensor_helper.cpp - Helper class that performs sensor-specific
+ * Helper class that performs sensor-specific
* parameter computations
*/
#include "camera_sensor_helper.h"
@@ -43,7 +43,8 @@ namespace ipa {
* \brief Construct a CameraSensorHelper instance
*
* CameraSensorHelper derived class instances shall never be constructed
- * manually but always through the CameraSensorHelperFactory::create() function.
+ * manually but always through the CameraSensorHelperFactoryBase::create()
+ * function.
*/
/**
@@ -217,27 +218,25 @@ double CameraSensorHelper::gain(uint32_t gainCode) const
*/
/**
- * \class CameraSensorHelperFactory
- * \brief Registration of CameraSensorHelperFactory classes and creation of instances
+ * \class CameraSensorHelperFactoryBase
+ * \brief Base class for camera sensor helper factories
*
- * To facilitate discovery and instantiation of CameraSensorHelper classes, the
- * CameraSensorHelperFactory class maintains a registry of camera sensor helper
- * sub-classes. Each CameraSensorHelper subclass shall register itself using the
- * REGISTER_CAMERA_SENSOR_HELPER() macro, which will create a corresponding
- * instance of a CameraSensorHelperFactory subclass and register it with the
- * static list of factories.
+ * The CameraSensorHelperFactoryBase class is the base of all specializations of
+ * the CameraSensorHelperFactory class template. It implements the factory
+ * registration, maintains a registry of factories, and provides access to the
+ * registered factories.
*/
/**
- * \brief Construct a camera sensor helper factory
+ * \brief Construct a camera sensor helper factory base
* \param[in] name Name of the camera sensor helper class
*
- * Creating an instance of the factory registers it with the global list of
+ * Creating an instance of the factory base registers it with the global list of
* factories, accessible through the factories() function.
*
- * The factory \a name is used for debug purpose and shall be unique.
+ * The factory \a name is used to look up factories and shall be unique.
*/
-CameraSensorHelperFactory::CameraSensorHelperFactory(const std::string name)
+CameraSensorHelperFactoryBase::CameraSensorHelperFactoryBase(const std::string name)
: name_(name)
{
registerType(this);
@@ -252,17 +251,16 @@ CameraSensorHelperFactory::CameraSensorHelperFactory(const std::string name)
* corresponding to the named factory or a null pointer if no such factory
* exists
*/
-std::unique_ptr<CameraSensorHelper> CameraSensorHelperFactory::create(const std::string &name)
+std::unique_ptr<CameraSensorHelper> CameraSensorHelperFactoryBase::create(const std::string &name)
{
- std::vector<CameraSensorHelperFactory *> &factories =
- CameraSensorHelperFactory::factories();
+ const std::vector<CameraSensorHelperFactoryBase *> &factories =
+ CameraSensorHelperFactoryBase::factories();
- for (CameraSensorHelperFactory *factory : factories) {
+ for (const CameraSensorHelperFactoryBase *factory : factories) {
if (name != factory->name_)
continue;
- CameraSensorHelper *helper = factory->createInstance();
- return std::unique_ptr<CameraSensorHelper>(helper);
+ return factory->createInstance();
}
return nullptr;
@@ -275,10 +273,10 @@ std::unique_ptr<CameraSensorHelper> CameraSensorHelperFactory::create(const std:
* The caller is responsible to guarantee the uniqueness of the camera sensor
* helper name.
*/
-void CameraSensorHelperFactory::registerType(CameraSensorHelperFactory *factory)
+void CameraSensorHelperFactoryBase::registerType(CameraSensorHelperFactoryBase *factory)
{
- std::vector<CameraSensorHelperFactory *> &factories =
- CameraSensorHelperFactory::factories();
+ std::vector<CameraSensorHelperFactoryBase *> &factories =
+ CameraSensorHelperFactoryBase::factories();
factories.push_back(factory);
}
@@ -287,33 +285,49 @@ void CameraSensorHelperFactory::registerType(CameraSensorHelperFactory *factory)
* \brief Retrieve the list of all camera sensor helper factories
* \return The list of camera sensor helper factories
*/
-std::vector<CameraSensorHelperFactory *> &CameraSensorHelperFactory::factories()
+std::vector<CameraSensorHelperFactoryBase *> &CameraSensorHelperFactoryBase::factories()
{
/*
* The static factories map is defined inside the function to ensure
* it gets initialized on first use, without any dependency on link
* order.
*/
- static std::vector<CameraSensorHelperFactory *> factories;
+ static std::vector<CameraSensorHelperFactoryBase *> factories;
return factories;
}
/**
- * \fn CameraSensorHelperFactory::createInstance()
- * \brief Create an instance of the CameraSensorHelper corresponding to the
- * factory
+ * \class CameraSensorHelperFactory
+ * \brief Registration of CameraSensorHelperFactory classes and creation of instances
+ * \tparam _Helper The camera sensor helper class type for this factory
*
- * This virtual function is implemented by the REGISTER_CAMERA_SENSOR_HELPER()
- * macro. It creates a camera sensor helper instance associated with the camera
- * sensor model.
+ * To facilitate discovery and instantiation of CameraSensorHelper classes, the
+ * CameraSensorHelperFactory class implements auto-registration of camera sensor
+ * helpers. Each CameraSensorHelper subclass shall register itself using the
+ * REGISTER_CAMERA_SENSOR_HELPER() macro, which will create a corresponding
+ * instance of a CameraSensorHelperFactory subclass and register it with the
+ * static list of factories.
+ */
+
+/**
+ * \fn CameraSensorHelperFactory::CameraSensorHelperFactory(const char *name)
+ * \brief Construct a camera sensor helper factory
+ * \param[in] name Name of the camera sensor helper class
*
- * \return A pointer to a newly constructed instance of the CameraSensorHelper
- * subclass corresponding to the factory
+ * Creating an instance of the factory registers it with the global list of
+ * factories, accessible through the CameraSensorHelperFactoryBase::factories()
+ * function.
+ *
+ * The factory \a name is used to look up factories and shall be unique.
*/
/**
- * \var CameraSensorHelperFactory::name_
- * \brief The name of the factory
+ * \fn CameraSensorHelperFactory::createInstance() const
+ * \brief Create an instance of the CameraSensorHelper corresponding to the
+ * factory
+ *
+ * \return A unique pointer to a newly constructed instance of the
+ * CameraSensorHelper subclass corresponding to the factory
*/
/**
@@ -352,6 +366,35 @@ static constexpr double expGainDb(double step)
return log2_10 * step / 20;
}
+class CameraSensorHelperAr0521 : public CameraSensorHelper
+{
+public:
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+
+private:
+ static constexpr double kStep_ = 16;
+};
+
+uint32_t CameraSensorHelperAr0521::gainCode(double gain) const
+{
+ gain = std::clamp(gain, 1.0, 15.5);
+ unsigned int coarse = std::log2(gain);
+ unsigned int fine = (gain / (1 << coarse) - 1) * kStep_;
+
+ return (coarse << 4) | (fine & 0xf);
+}
+
+double CameraSensorHelperAr0521::gain(uint32_t gainCode) const
+{
+ unsigned int coarse = gainCode >> 4;
+ unsigned int fine = gainCode & 0xf;
+
+ return (1 << coarse) * (1 + fine / kStep_);
+}
+
+REGISTER_CAMERA_SENSOR_HELPER("ar0521", CameraSensorHelperAr0521)
+
class CameraSensorHelperImx219 : public CameraSensorHelper
{
public:
@@ -374,6 +417,17 @@ public:
};
REGISTER_CAMERA_SENSOR_HELPER("imx258", CameraSensorHelperImx258)
+class CameraSensorHelperImx283 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx283()
+ {
+ gainType_ = AnalogueGainLinear;
+ gainConstants_.linear = { 0, 2048, -1, 2048 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx283", CameraSensorHelperImx283)
+
class CameraSensorHelperImx290 : public CameraSensorHelper
{
public:
@@ -396,6 +450,33 @@ public:
};
REGISTER_CAMERA_SENSOR_HELPER("imx296", CameraSensorHelperImx296)
+class CameraSensorHelperImx327 : public CameraSensorHelperImx290
+{
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx327", CameraSensorHelperImx327)
+
+class CameraSensorHelperImx335 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx335()
+ {
+ gainType_ = AnalogueGainExponential;
+ gainConstants_.exp = { 1.0, expGainDb(0.3) };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx335", CameraSensorHelperImx335)
+
+class CameraSensorHelperImx415 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx415()
+ {
+ gainType_ = AnalogueGainExponential;
+ gainConstants_.exp = { 1.0, expGainDb(0.3) };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx415", CameraSensorHelperImx415)
+
class CameraSensorHelperImx477 : public CameraSensorHelper
{
public:
@@ -407,6 +488,21 @@ public:
};
REGISTER_CAMERA_SENSOR_HELPER("imx477", CameraSensorHelperImx477)
+class CameraSensorHelperOv2685 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv2685()
+ {
+ /*
+ * The Sensor Manual doesn't appear to document the gain model.
+ * This has been validated with some empirical testing only.
+ */
+ gainType_ = AnalogueGainLinear;
+ gainConstants_.linear = { 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov2685", CameraSensorHelperOv2685)
+
class CameraSensorHelperOv2740 : public CameraSensorHelper
{
public:
@@ -418,6 +514,17 @@ public:
};
REGISTER_CAMERA_SENSOR_HELPER("ov2740", CameraSensorHelperOv2740)
+class CameraSensorHelperOv4689 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv4689()
+ {
+ gainType_ = AnalogueGainLinear;
+ gainConstants_.linear = { 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov4689", CameraSensorHelperOv4689)
+
class CameraSensorHelperOv5640 : public CameraSensorHelper
{
public:
@@ -429,6 +536,17 @@ public:
};
REGISTER_CAMERA_SENSOR_HELPER("ov5640", CameraSensorHelperOv5640)
+class CameraSensorHelperOv5647 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv5647()
+ {
+ gainType_ = AnalogueGainLinear;
+ gainConstants_.linear = { 1, 0, 0, 16 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov5647", CameraSensorHelperOv5647)
+
class CameraSensorHelperOv5670 : public CameraSensorHelper
{
public:
@@ -462,6 +580,35 @@ public:
};
REGISTER_CAMERA_SENSOR_HELPER("ov5693", CameraSensorHelperOv5693)
+class CameraSensorHelperOv64a40 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv64a40()
+ {
+ gainType_ = AnalogueGainLinear;
+ gainConstants_.linear = { 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov64a40", CameraSensorHelperOv64a40)
+
+class CameraSensorHelperOv8858 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv8858()
+ {
+ gainType_ = AnalogueGainLinear;
+
+ /*
+ * \todo Validate the selected 1/128 step value as it differs
+ * from what the sensor manual describes.
+ *
+ * See: https://patchwork.linuxtv.org/project/linux-media/patch/20221106171129.166892-2-nicholas@rothemail.net/#142267
+ */
+ gainConstants_.linear = { 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov8858", CameraSensorHelperOv8858)
+
class CameraSensorHelperOv8865 : public CameraSensorHelper
{
public:
diff --git a/src/ipa/libipa/camera_sensor_helper.h b/src/ipa/libipa/camera_sensor_helper.h
index 7351fc7c..0d99073b 100644
--- a/src/ipa/libipa/camera_sensor_helper.h
+++ b/src/ipa/libipa/camera_sensor_helper.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_sensor_helper.h - Helper class that performs sensor-specific parameter computations
+ * Helper class that performs sensor-specific parameter computations
*/
#pragma once
@@ -58,39 +58,44 @@ private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorHelper)
};
-class CameraSensorHelperFactory
+class CameraSensorHelperFactoryBase
{
public:
- CameraSensorHelperFactory(const std::string name);
- virtual ~CameraSensorHelperFactory() = default;
+ CameraSensorHelperFactoryBase(const std::string name);
+ virtual ~CameraSensorHelperFactoryBase() = default;
static std::unique_ptr<CameraSensorHelper> create(const std::string &name);
- static void registerType(CameraSensorHelperFactory *factory);
- static std::vector<CameraSensorHelperFactory *> &factories();
-
-protected:
- virtual CameraSensorHelper *createInstance() = 0;
+ static std::vector<CameraSensorHelperFactoryBase *> &factories();
private:
- LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorHelperFactory)
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorHelperFactoryBase)
+
+ static void registerType(CameraSensorHelperFactoryBase *factory);
+
+ virtual std::unique_ptr<CameraSensorHelper> createInstance() const = 0;
std::string name_;
};
-#define REGISTER_CAMERA_SENSOR_HELPER(name, helper) \
-class helper##Factory final : public CameraSensorHelperFactory \
-{ \
-public: \
- helper##Factory() : CameraSensorHelperFactory(name) {} \
- \
-private: \
- CameraSensorHelper *createInstance() \
- { \
- return new helper(); \
- } \
-}; \
-static helper##Factory global_##helper##Factory;
+template<typename _Helper>
+class CameraSensorHelperFactory final : public CameraSensorHelperFactoryBase
+{
+public:
+ CameraSensorHelperFactory(const char *name)
+ : CameraSensorHelperFactoryBase(name)
+ {
+ }
+
+private:
+ std::unique_ptr<CameraSensorHelper> createInstance() const override
+ {
+ return std::make_unique<_Helper>();
+ }
+};
+
+#define REGISTER_CAMERA_SENSOR_HELPER(name, helper) \
+static CameraSensorHelperFactory<helper> global_##helper##Factory(name);
} /* namespace ipa */
diff --git a/src/ipa/libipa/exposure_mode_helper.cpp b/src/ipa/libipa/exposure_mode_helper.cpp
new file mode 100644
index 00000000..683a564a
--- /dev/null
+++ b/src/ipa/libipa/exposure_mode_helper.cpp
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that performs computations relating to exposure
+ */
+#include "exposure_mode_helper.h"
+
+#include <algorithm>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file exposure_mode_helper.h
+ * \brief Helper class that performs computations relating to exposure
+ *
+ * AEGC algorithms have a need to split exposure between shutter time, analogue
+ * and digital gain. Multiple implementations do so based on paired stages of
+ * shutter time and gain limits; provide a helper to avoid duplicating the code.
+ */
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(ExposureModeHelper)
+
+namespace ipa {
+
+/**
+ * \class ExposureModeHelper
+ * \brief Class for splitting exposure into shutter time and total gain
+ *
+ * The ExposureModeHelper class provides a standard interface through which an
+ * AEGC algorithm can divide exposure between shutter time and gain. It is
+ * configured with a set of shutter time and gain pairs and works by initially
+ * fixing gain at 1.0 and increasing shutter time up to the shutter time value
+ * from the first pair in the set in an attempt to meet the required exposure
+ * value.
+ *
+ * If the required exposure is not achievable by the first shutter time value
+ * alone it ramps gain up to the value from the first pair in the set. If the
+ * required exposure is still not met it then allows shutter time to ramp up to
+ * the shutter time value from the second pair in the set, and continues in this
+ * vein until either the required exposure time is met, or else the hardware's
+ * shutter time or gain limits are reached.
+ *
+ * This method allows users to strike a balance between a well-exposed image and
+ * an acceptable frame-rate, as opposed to simply maximising shutter time
+ * followed by gain. The same helpers can be used to perform the latter
+ * operation if needed by passing an empty set of pairs to the initialisation
+ * function.
+ *
+ * The gain values may exceed a camera sensor's analogue gain limits if either
+ * it or the IPA is also capable of digital gain. The configure() function must
+ * be called with the hardware's limits to inform the helper of those
+ * constraints. Any gain that is needed will be applied as analogue gain first
+ * until the hardware's limit is reached, following which digital gain will be
+ * used.
+ */
+
+/**
+ * \brief Construct an ExposureModeHelper instance
+ * \param[in] stages The vector of paired shutter time and gain limits
+ *
+ * The input stages are shutter time and _total_ gain pairs; the gain
+ * encompasses both analogue and digital gain.
+ *
+ * The vector of stages may be empty. In that case, the helper will simply use
+ * the runtime limits set through setShutterGainLimits() instead.
+ */
+ExposureModeHelper::ExposureModeHelper(const Span<std::pair<utils::Duration, double>> stages)
+{
+ minShutter_ = 0us;
+ maxShutter_ = 0us;
+ minGain_ = 0;
+ maxGain_ = 0;
+
+ for (const auto &[s, g] : stages) {
+ shutters_.push_back(s);
+ gains_.push_back(g);
+ }
+}
+
+/**
+ * \brief Set the shutter time and gain limits
+ * \param[in] minShutter The minimum shutter time supported
+ * \param[in] maxShutter The maximum shutter time supported
+ * \param[in] minGain The minimum analogue gain supported
+ * \param[in] maxGain The maximum analogue gain supported
+ *
+ * This function configures the shutter time and analogue gain limits that need
+ * to be adhered to as the helper divides up exposure. Note that this function
+ * *must* be called whenever those limits change and before splitExposure() is
+ * used.
+ *
+ * If the algorithm using the helpers needs to indicate that either shutter time
+ * or analogue gain or both should be fixed it can do so by setting both the
+ * minima and maxima to the same value.
+ */
+void ExposureModeHelper::setLimits(utils::Duration minShutter,
+ utils::Duration maxShutter,
+ double minGain, double maxGain)
+{
+ minShutter_ = minShutter;
+ maxShutter_ = maxShutter;
+ minGain_ = minGain;
+ maxGain_ = maxGain;
+}
+
+utils::Duration ExposureModeHelper::clampShutter(utils::Duration shutter) const
+{
+ return std::clamp(shutter, minShutter_, maxShutter_);
+}
+
+double ExposureModeHelper::clampGain(double gain) const
+{
+ return std::clamp(gain, minGain_, maxGain_);
+}
+
+/**
+ * \brief Split exposure time into shutter time and gain
+ * \param[in] exposure Exposure time
+ *
+ * This function divides a given exposure time into shutter time, analogue and
+ * digital gain by iterating through stages of shutter time and gain limits. At
+ * each stage the current stage's shutter time limit is multiplied by the
+ * previous stage's gain limit (or 1.0 initially) to see if the combination of
+ * the two can meet the required exposure time. If they cannot then the current
+ * stage's shutter time limit is multiplied by the same stage's gain limit to
+ * see if that combination can meet the required exposure time. If they cannot
+ * then the function moves to consider the next stage.
+ *
+ * When a combination of shutter time and gain _stage_ limits are found that are
+ * sufficient to meet the required exposure time, the function attempts to
+ * reduce shutter time as much as possible whilst fixing gain and still meeting
+ * the exposure time. If a _runtime_ limit prevents shutter time from being
+ * lowered enough to meet the exposure time with gain fixed at the stage limit,
+ * gain is also lowered to compensate.
+ *
+ * Once the shutter time and gain values are ascertained, gain is assigned as
+ * analogue gain as much as possible, with digital gain only in use if the
+ * maximum analogue gain runtime limit is unable to accommodate the exposure
+ * value.
+ *
+ * If no combination of shutter time and gain limits is found that meets the
+ * required exposure time, the helper falls-back to simply maximising the
+ * shutter time first, followed by analogue gain, followed by digital gain.
+ *
+ * \return Tuple of shutter time, analogue gain, and digital gain
+ */
+std::tuple<utils::Duration, double, double>
+ExposureModeHelper::splitExposure(utils::Duration exposure) const
+{
+ ASSERT(maxShutter_);
+ ASSERT(maxGain_);
+
+ bool gainFixed = minGain_ == maxGain_;
+ bool shutterFixed = minShutter_ == maxShutter_;
+
+ /*
+ * There's no point entering the loop if we cannot change either gain
+ * nor shutter anyway.
+ */
+ if (shutterFixed && gainFixed)
+ return { minShutter_, minGain_, exposure / (minShutter_ * minGain_) };
+
+ utils::Duration shutter;
+ double stageGain;
+ double gain;
+
+ for (unsigned int stage = 0; stage < gains_.size(); stage++) {
+ double lastStageGain = stage == 0 ? 1.0 : clampGain(gains_[stage - 1]);
+ utils::Duration stageShutter = clampShutter(shutters_[stage]);
+ stageGain = clampGain(gains_[stage]);
+
+ /*
+ * We perform the clamping on both shutter and gain in case the
+ * helper has had limits set that prevent those values being
+ * lowered beyond a certain minimum...this can happen at runtime
+ * for various reasons and so would not be known when the stage
+ * limits are initialised.
+ */
+
+ if (stageShutter * lastStageGain >= exposure) {
+ shutter = clampShutter(exposure / clampGain(lastStageGain));
+ gain = clampGain(exposure / shutter);
+
+ return { shutter, gain, exposure / (shutter * gain) };
+ }
+
+ if (stageShutter * stageGain >= exposure) {
+ shutter = clampShutter(exposure / clampGain(stageGain));
+ gain = clampGain(exposure / shutter);
+
+ return { shutter, gain, exposure / (shutter * gain) };
+ }
+ }
+
+ /*
+ * From here on all we can do is max out the shutter time, followed by
+ * the analogue gain. If we still haven't achieved the target we send
+ * the rest of the exposure time to digital gain. If we were given no
+ * stages to use then set stageGain to 1.0 so that shutter time is maxed
+ * before gain touched at all.
+ */
+ if (gains_.empty())
+ stageGain = 1.0;
+
+ shutter = clampShutter(exposure / clampGain(stageGain));
+ gain = clampGain(exposure / shutter);
+
+ return { shutter, gain, exposure / (shutter * gain) };
+}
+
+/**
+ * \fn ExposureModeHelper::minShutter()
+ * \brief Retrieve the configured minimum shutter time limit set through
+ * setShutterGainLimits()
+ * \return The minShutter_ value
+ */
+
+/**
+ * \fn ExposureModeHelper::maxShutter()
+ * \brief Retrieve the configured maximum shutter time set through
+ * setShutterGainLimits()
+ * \return The maxShutter_ value
+ */
+
+/**
+ * \fn ExposureModeHelper::minGain()
+ * \brief Retrieve the configured minimum gain set through
+ * setShutterGainLimits()
+ * \return The minGain_ value
+ */
+
+/**
+ * \fn ExposureModeHelper::maxGain()
+ * \brief Retrieve the configured maximum gain set through
+ * setShutterGainLimits()
+ * \return The maxGain_ value
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/exposure_mode_helper.h b/src/ipa/libipa/exposure_mode_helper.h
new file mode 100644
index 00000000..85c665d7
--- /dev/null
+++ b/src/ipa/libipa/exposure_mode_helper.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that performs computations relating to exposure
+ */
+
+#pragma once
+
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
+
+namespace libcamera {
+
+namespace ipa {
+
+class ExposureModeHelper
+{
+public:
+ ExposureModeHelper(const Span<std::pair<utils::Duration, double>> stages);
+ ~ExposureModeHelper() = default;
+
+ void setLimits(utils::Duration minShutter, utils::Duration maxShutter,
+ double minGain, double maxGain);
+
+ std::tuple<utils::Duration, double, double>
+ splitExposure(utils::Duration exposure) const;
+
+ utils::Duration minShutter() const { return minShutter_; }
+ utils::Duration maxShutter() const { return maxShutter_; }
+ double minGain() const { return minGain_; }
+ double maxGain() const { return maxGain_; }
+
+private:
+ utils::Duration clampShutter(utils::Duration shutter) const;
+ double clampGain(double gain) const;
+
+ std::vector<utils::Duration> shutters_;
+ std::vector<double> gains_;
+
+ utils::Duration minShutter_;
+ utils::Duration maxShutter_;
+ double minGain_;
+ double maxGain_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/fc_queue.cpp b/src/ipa/libipa/fc_queue.cpp
new file mode 100644
index 00000000..0365e919
--- /dev/null
+++ b/src/ipa/libipa/fc_queue.cpp
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * IPA Frame context queue
+ */
+
+#include "fc_queue.h"
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(FCQueue)
+
+namespace ipa {
+
+/**
+ * \file fc_queue.h
+ * \brief Queue of per-frame contexts
+ */
+
+/**
+ * \struct FrameContext
+ * \brief Context for a frame
+ *
+ * The frame context stores data specific to a single frame processed by the
+ * IPA module. Each frame processed by the IPA module has a context associated
+ * with it, accessible through the Frame Context Queue.
+ *
+ * Fields in the frame context should reflect values and controls associated
+ * with the specific frame as requested by the application, and as configured by
+ * the hardware. Fields can be read by algorithms to determine if they should
+ * update any specific action for this frame, and finally to update the metadata
+ * control lists when the frame is fully completed.
+ *
+ * \var FrameContext::frame
+ * \brief The frame number
+ */
+
+/**
+ * \class FCQueue
+ * \brief A support class for managing FrameContext instances in IPA modules
+ * \tparam FrameContext The IPA module-specific FrameContext derived class type
+ *
+ * Along with the Module and Algorithm classes, the frame context queue is a
+ * core component of the libipa infrastructure. It stores per-frame contexts
+ * used by the Algorithm operations. By centralizing the lifetime management of
+ * the contexts and implementing safeguards against underflows and overflows, it
+ * simplifies IPA modules and improves their reliability.
+ *
+ * The queue references frame contexts by a monotonically increasing sequence
+ * number. The FCQueue design assumes that this number matches both the sequence
+ * number of the corresponding frame, as generated by the camera sensor, and the
+ * sequence number of the request. This allows IPA modules to obtain the frame
+ * context from any location where a request or a frame is available.
+ *
+ * A frame context normally begins its lifetime when the corresponding request
+ * is queued, way before the frame is captured by the camera sensor. IPA modules
+ * allocate the context from the queue at that point, calling alloc() using the
+ * request number. The queue initializes the context, and the IPA module then
+ * populates it with data from the request. The context can be later retrieved
+ * with a call to get(), typically when the IPA module is requested to provide
+ * sensor or ISP parameters or receives statistics for a frame. The frame number
+ * is used at that point to identify the context.
+ *
+ * If an application fails to queue requests to the camera fast enough, frames
+ * may be produced by the camera sensor and processed by the IPA module without
+ * a corresponding request having been queued to the IPA module. This creates an
+ * underrun condition, where the IPA module will try to get a frame context that
+ * hasn't been allocated. In this case, the get() function will allocate and
+ * initialize a context for the frame, and log a message. Algorithms will not
+ * apply the controls associated with the late request, but should otherwise
+ * behave correctly.
+ *
+ * \todo Mark the frame context with a per-frame control error flag in case of
+ * underrun, and research how algorithms should handle this.
+ *
+ * At its core, the queue uses a circular buffer to avoid dynamic memory
+ * allocation at runtime. The buffer is pre-allocated with a maximum number of
+ * entries when the FCQueue instance is constructed. Entries are initialized on
+ * first use by alloc() or, in underrun conditions, get(). The queue is not
+ * allowed to overflow, which must be ensured by pipeline handlers never
+ * queuing more in-flight requests to the IPA module than the queue size. If an
+ * overflow condition is detected, the queue will log a fatal error.
+ *
+ * IPA module-specific frame context implementations shall inherit from the
+ * FrameContext base class to support the minimum required features for a
+ * FrameContext.
+ */
+
+/**
+ * \fn FCQueue::FCQueue(unsigned int size)
+ * \brief Construct a frame contexts queue of a specified size
+ * \param[in] size The number of contexts in the queue
+ */
+
+/**
+ * \fn FCQueue::clear()
+ * \brief Clear the contexts queue
+ *
+ * IPA modules must clear the frame context queue at the beginning of a new
+ * streaming session, in IPAModule::start().
+ *
+ * \todo Fix any issue this may cause with requests queued before the camera is
+ * started.
+ */
+
+/**
+ * \fn FCQueue::alloc(uint32_t frame)
+ * \brief Allocate and return a FrameContext for the \a frame
+ * \param[in] frame The frame context sequence number
+ *
+ * The first call to obtain a FrameContext from the FCQueue should be handled
+ * through this function. The FrameContext will be initialised, if not
+ * initialised already, and returned to the caller.
+ *
+ * If the FrameContext was already initialized for this \a frame, a warning will
+ * be reported and the previously initialized FrameContext is returned.
+ *
+ * Frame contexts are expected to be initialised when a Request is first passed
+ * to the IPA module in IPAModule::queueRequest().
+ *
+ * \return A reference to the FrameContext for sequence \a frame
+ */
+
+/**
+ * \fn FCQueue::get(uint32_t frame)
+ * \brief Obtain the FrameContext for the \a frame
+ * \param[in] frame The frame context sequence number
+ *
+ * If the FrameContext is not correctly initialised for the \a frame, it will be
+ * initialised.
+ *
+ * \return A reference to the FrameContext for sequence \a frame
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/fc_queue.h b/src/ipa/libipa/fc_queue.h
new file mode 100644
index 00000000..24d9e82b
--- /dev/null
+++ b/src/ipa/libipa/fc_queue.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * IPA Frame context queue
+ */
+
+#pragma once
+
+#include <stdint.h>
+#include <vector>
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(FCQueue)
+
+namespace ipa {
+
+template<typename FrameContext>
+class FCQueue;
+
+struct FrameContext {
+private:
+ template<typename T> friend class FCQueue;
+ uint32_t frame;
+};
+
+template<typename FrameContext>
+class FCQueue
+{
+public:
+ FCQueue(unsigned int size)
+ : contexts_(size)
+ {
+ }
+
+ void clear()
+ {
+ for (FrameContext &ctx : contexts_)
+ ctx.frame = 0;
+ }
+
+ FrameContext &alloc(const uint32_t frame)
+ {
+ FrameContext &frameContext = contexts_[frame % contexts_.size()];
+
+ /*
+ * Do not re-initialise if a get() call has already fetched this
+ * frame context to preseve the context.
+ *
+ * \todo If the the sequence number of the context to initialise
+ * is smaller than the sequence number of the queue slot to use,
+ * it means that we had a serious request underrun and more
+ * frames than the queue size has been produced since the last
+ * time the application has queued a request. Does this deserve
+ * an error condition ?
+ */
+ if (frame != 0 && frame <= frameContext.frame)
+ LOG(FCQueue, Warning)
+ << "Frame " << frame << " already initialised";
+ else
+ init(frameContext, frame);
+
+ return frameContext;
+ }
+
+ FrameContext &get(uint32_t frame)
+ {
+ FrameContext &frameContext = contexts_[frame % contexts_.size()];
+
+ /*
+ * If the IPA algorithms try to access a frame context slot which
+ * has been already overwritten by a newer context, it means the
+ * frame context queue has overflowed and the desired context
+ * has been forever lost. The pipeline handler shall avoid
+ * queueing more requests to the IPA than the frame context
+ * queue size.
+ */
+ if (frame < frameContext.frame)
+ LOG(FCQueue, Fatal) << "Frame context for " << frame
+ << " has been overwritten by "
+ << frameContext.frame;
+
+ if (frame == frameContext.frame)
+ return frameContext;
+
+ /*
+ * The frame context has been retrieved before it was
+ * initialised through the initialise() call. This indicates an
+ * algorithm attempted to access a Frame context before it was
+ * queued to the IPA. Controls applied for this request may be
+ * left unhandled.
+ *
+ * \todo Set an error flag for per-frame control errors.
+ */
+ LOG(FCQueue, Warning)
+ << "Obtained an uninitialised FrameContext for " << frame;
+
+ init(frameContext, frame);
+
+ return frameContext;
+ }
+
+private:
+ void init(FrameContext &frameContext, const uint32_t frame)
+ {
+ frameContext = {};
+ frameContext.frame = frame;
+ }
+
+ std::vector<FrameContext> contexts_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/histogram.cpp b/src/ipa/libipa/histogram.cpp
index d8ad1c89..5fbfadf5 100644
--- a/src/ipa/libipa/histogram.cpp
+++ b/src/ipa/libipa/histogram.cpp
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
- * histogram.cpp - histogram calculations
+ * histogram calculations
*/
#include "histogram.h"
@@ -29,18 +29,34 @@ namespace ipa {
*/
/**
+ * \fn Histogram::Histogram()
+ * \brief Construct an empty Histogram
+ *
+ * This empty constructor exists largely to allow Histograms to be embedded in
+ * other classes which may be created before the contents of the Histogram are
+ * known.
+ */
+
+/**
* \brief Create a cumulative histogram
- * \param[in] data A pre-sorted histogram to be passed
+ * \param[in] data A (non-cumulative) histogram
*/
Histogram::Histogram(Span<const uint32_t> data)
{
- cumulative_.reserve(data.size());
- cumulative_.push_back(0);
- for (const uint32_t &value : data)
- cumulative_.push_back(cumulative_.back() + value);
+ cumulative_.resize(data.size() + 1);
+ cumulative_[0] = 0;
+ for (const auto &[i, value] : utils::enumerate(data))
+ cumulative_[i + 1] = cumulative_[i] + value;
}
/**
+ * \fn Histogram::Histogram(Span<const uint32_t> data, Transform transform)
+ * \brief Create a cumulative histogram
+ * \param[in] data A (non-cumulative) histogram
+ * \param[in] transform The transformation function to apply to every bin
+ */
+
+/**
* \fn Histogram::bins()
* \brief Retrieve the number of bins currently used by the Histogram
* \return Number of bins
@@ -53,7 +69,7 @@ Histogram::Histogram(Span<const uint32_t> data)
*/
/**
- * \brief Cumulative frequency up to a (fractional) point in a bin.
+ * \brief Cumulative frequency up to a (fractional) point in a bin
* \param[in] bin The bin up to which to cumulate
*
* With F(p) the cumulative frequency of the histogram, the value is 0 at
diff --git a/src/ipa/libipa/histogram.h b/src/ipa/libipa/histogram.h
index 164d4603..032adca0 100644
--- a/src/ipa/libipa/histogram.h
+++ b/src/ipa/libipa/histogram.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2019, Raspberry Pi Ltd
*
- * histogram.h - histogram calculation interface
+ * histogram calculation interface
*/
#pragma once
@@ -10,10 +10,11 @@
#include <assert.h>
#include <limits.h>
#include <stdint.h>
-
+#include <type_traits>
#include <vector>
#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
namespace libcamera {
@@ -22,7 +23,19 @@ namespace ipa {
class Histogram
{
public:
+ Histogram() { cumulative_.push_back(0); }
Histogram(Span<const uint32_t> data);
+
+ template<typename Transform,
+ std::enable_if_t<std::is_invocable_v<Transform, uint32_t>> * = nullptr>
+ Histogram(Span<const uint32_t> data, Transform transform)
+ {
+ cumulative_.resize(data.size() + 1);
+ cumulative_[0] = 0;
+ for (const auto &[i, value] : utils::enumerate(data))
+ cumulative_[i + 1] = cumulative_[i] + transform(value);
+ }
+
size_t bins() const { return cumulative_.size() - 1; }
uint64_t total() const { return cumulative_[cumulative_.size() - 1]; }
uint64_t cumulativeFrequency(double bin) const;
diff --git a/src/ipa/libipa/meson.build b/src/ipa/libipa/meson.build
index fb894bc6..7ce885da 100644
--- a/src/ipa/libipa/meson.build
+++ b/src/ipa/libipa/meson.build
@@ -1,15 +1,21 @@
# SPDX-License-Identifier: CC0-1.0
libipa_headers = files([
+ 'agc_mean_luminance.h',
'algorithm.h',
'camera_sensor_helper.h',
+ 'exposure_mode_helper.h',
+ 'fc_queue.h',
'histogram.h',
'module.h',
])
libipa_sources = files([
+ 'agc_mean_luminance.cpp',
'algorithm.cpp',
'camera_sensor_helper.cpp',
+ 'exposure_mode_helper.cpp',
+ 'fc_queue.cpp',
'histogram.cpp',
'module.cpp',
])
diff --git a/src/ipa/libipa/module.cpp b/src/ipa/libipa/module.cpp
index 77352104..64ca9141 100644
--- a/src/ipa/libipa/module.cpp
+++ b/src/ipa/libipa/module.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas On Board
*
- * module.cpp - IPA Module
+ * IPA Module
*/
#include "module.h"
@@ -17,7 +17,7 @@ namespace libcamera {
LOG_DEFINE_CATEGORY(IPAModuleAlgo)
/**
- * \brief The IPA namespace
+ * \brief The IPA (Image Processing Algorithm) namespace
*
* The IPA namespace groups all types specific to IPA modules. It serves as the
* top-level namespace for the IPA library libipa, and also contains
diff --git a/src/ipa/libipa/module.h b/src/ipa/libipa/module.h
index 4149a353..0fb51916 100644
--- a/src/ipa/libipa/module.h
+++ b/src/ipa/libipa/module.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas On Board
*
- * module.h - IPA module
+ * IPA module
*/
#pragma once