summaryrefslogtreecommitdiff
path: root/src/ipa/libipa
diff options
context:
space:
mode:
Diffstat (limited to 'src/ipa/libipa')
-rw-r--r--src/ipa/libipa/agc_mean_luminance.cpp578
-rw-r--r--src/ipa/libipa/agc_mean_luminance.h98
-rw-r--r--src/ipa/libipa/algorithm.cpp181
-rw-r--r--src/ipa/libipa/algorithm.h106
-rw-r--r--src/ipa/libipa/camera_sensor_helper.cpp752
-rw-r--r--src/ipa/libipa/camera_sensor_helper.h94
-rw-r--r--src/ipa/libipa/colours.cpp81
-rw-r--r--src/ipa/libipa/colours.h23
-rw-r--r--src/ipa/libipa/exposure_mode_helper.cpp240
-rw-r--r--src/ipa/libipa/exposure_mode_helper.h53
-rw-r--r--src/ipa/libipa/fc_queue.cpp140
-rw-r--r--src/ipa/libipa/fc_queue.h137
-rw-r--r--src/ipa/libipa/fixedpoint.cpp42
-rw-r--r--src/ipa/libipa/fixedpoint.h65
-rw-r--r--src/ipa/libipa/histogram.cpp175
-rw-r--r--src/ipa/libipa/histogram.h51
-rw-r--r--src/ipa/libipa/interpolator.cpp157
-rw-r--r--src/ipa/libipa/interpolator.h131
-rw-r--r--src/ipa/libipa/ipa_interface_wrapper.cpp245
-rw-r--r--src/ipa/libipa/ipa_interface_wrapper.h57
-rw-r--r--src/ipa/libipa/lsc_polynomial.cpp81
-rw-r--r--src/ipa/libipa/lsc_polynomial.h105
-rw-r--r--src/ipa/libipa/lux.cpp181
-rw-r--r--src/ipa/libipa/lux.h42
-rw-r--r--src/ipa/libipa/meson.build40
-rw-r--r--src/ipa/libipa/module.cpp126
-rw-r--r--src/ipa/libipa/module.h124
-rw-r--r--src/ipa/libipa/pwl.cpp457
-rw-r--r--src/ipa/libipa/pwl.h85
-rw-r--r--src/ipa/libipa/vector.cpp351
-rw-r--r--src/ipa/libipa/vector.h370
31 files changed, 5062 insertions, 306 deletions
diff --git a/src/ipa/libipa/agc_mean_luminance.cpp b/src/ipa/libipa/agc_mean_luminance.cpp
new file mode 100644
index 00000000..02555a44
--- /dev/null
+++ b/src/ipa/libipa/agc_mean_luminance.cpp
@@ -0,0 +1,578 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Base class for mean luminance AGC algorithms
+ */
+
+#include "agc_mean_luminance.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
+
+#include "exposure_mode_helper.h"
+
+using namespace libcamera::controls;
+
+/**
+ * \file agc_mean_luminance.h
+ * \brief Base class implementing mean luminance AEGC
+ */
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(AgcMeanLuminance)
+
+namespace ipa {
+
+/*
+ * Number of frames for which to run the algorithm at full speed, before slowing
+ * down to prevent large and jarring changes in exposure from frame to frame.
+ */
+static constexpr uint32_t kNumStartupFrames = 10;
+
+/*
+ * Default relative luminance target
+ *
+ * This value should be chosen so that when the camera points at a grey target,
+ * the resulting image brightness looks "right". Custom values can be passed
+ * as the relativeLuminanceTarget value in sensor tuning files.
+ */
+static constexpr double kDefaultRelativeLuminanceTarget = 0.16;
+
+/**
+ * \struct AgcMeanLuminance::AgcConstraint
+ * \brief The boundaries and target for an AeConstraintMode constraint
+ *
+ * This structure describes an AeConstraintMode constraint for the purposes of
+ * this algorithm. These constraints are expressed as a pair of quantile
+ * boundaries for a histogram, along with a luminance target and a bounds-type.
+ * The algorithm uses the constraints by ensuring that the defined portion of a
+ * luminance histogram (I.E. lying between the two quantiles) is above or below
+ * the given luminance value.
+ */
+
+/**
+ * \enum AgcMeanLuminance::AgcConstraint::Bound
+ * \brief Specify whether the constraint defines a lower or upper bound
+ * \var AgcMeanLuminance::AgcConstraint::Lower
+ * \brief The constraint defines a lower bound
+ * \var AgcMeanLuminance::AgcConstraint::Upper
+ * \brief The constraint defines an upper bound
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::bound
+ * \brief The type of constraint bound
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::qLo
+ * \brief The lower quantile to use for the constraint
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::qHi
+ * \brief The upper quantile to use for the constraint
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::yTarget
+ * \brief The luminance target for the constraint
+ */
+
+/**
+ * \class AgcMeanLuminance
+ * \brief A mean-based auto-exposure algorithm
+ *
+ * This algorithm calculates an exposure time, analogue and digital gain such
+ * that the normalised mean luminance value of an image is driven towards a
+ * target, which itself is discovered from tuning data. The algorithm is a
+ * two-stage process.
+ *
+ * In the first stage, an initial gain value is derived by iteratively comparing
+ * the gain-adjusted mean luminance across the entire image against a target,
+ * and selecting a value which pushes it as closely as possible towards the
+ * target.
+ *
+ * In the second stage we calculate the gain required to drive the average of a
+ * section of a histogram to a target value, where the target and the boundaries
+ * of the section of the histogram used in the calculation are taken from the
+ * values defined for the currently configured AeConstraintMode within the
+ * tuning data. This class provides a helper function to parse those tuning data
+ * to discover the constraints, and so requires a specific format for those
+ * data which is described in \ref parseTuningData(). The gain from the first
+ * stage is then clamped to the gain from this stage.
+ *
+ * The final gain is used to adjust the effective exposure value of the image,
+ * and that new exposure value is divided into exposure time, analogue gain and
+ * digital gain according to the selected AeExposureMode. This class uses the
+ * \ref ExposureModeHelper class to assist in that division, and expects the
+ * data needed to initialise that class to be present in tuning data in a
+ * format described in \ref parseTuningData().
+ *
+ * In order to be able to use this algorithm an IPA module needs to be able to
+ * do the following:
+ *
+ * 1. Provide a luminance estimation across an entire image.
+ * 2. Provide a luminance Histogram for the image to use in calculating
+ * constraint compliance. The precision of the Histogram that is available
+ * will determine the supportable precision of the constraints.
+ *
+ * IPA modules that want to use this class to implement their AEGC algorithm
+ * should derive it and provide an overriding estimateLuminance() function for
+ * this class to use. They must call parseTuningData() in init(), and must also
+ * call setLimits() and resetFrameCounter() in configure(). They may then use
+ * calculateNewEv() in process(). If the limits passed to setLimits() change for
+ * any reason (for example, in response to a FrameDurationLimit control being
+ * passed in queueRequest()) then setLimits() must be called again with the new
+ * values.
+ */
+
+AgcMeanLuminance::AgcMeanLuminance()
+ : frameCount_(0), filteredExposure_(0s), relativeLuminanceTarget_(0)
+{
+}
+
+AgcMeanLuminance::~AgcMeanLuminance() = default;
+
+void AgcMeanLuminance::parseRelativeLuminanceTarget(const YamlObject &tuningData)
+{
+ relativeLuminanceTarget_ =
+ tuningData["relativeLuminanceTarget"].get<double>(kDefaultRelativeLuminanceTarget);
+}
+
+void AgcMeanLuminance::parseConstraint(const YamlObject &modeDict, int32_t id)
+{
+ for (const auto &[boundName, content] : modeDict.asDict()) {
+ if (boundName != "upper" && boundName != "lower") {
+ LOG(AgcMeanLuminance, Warning)
+ << "Ignoring unknown constraint bound '" << boundName << "'";
+ continue;
+ }
+
+ unsigned int idx = static_cast<unsigned int>(boundName == "upper");
+ AgcConstraint::Bound bound = static_cast<AgcConstraint::Bound>(idx);
+ double qLo = content["qLo"].get<double>().value_or(0.98);
+ double qHi = content["qHi"].get<double>().value_or(1.0);
+ double yTarget =
+ content["yTarget"].getList<double>().value_or(std::vector<double>{ 0.5 }).at(0);
+
+ AgcConstraint constraint = { bound, qLo, qHi, yTarget };
+
+ if (!constraintModes_.count(id))
+ constraintModes_[id] = {};
+
+ if (idx)
+ constraintModes_[id].push_back(constraint);
+ else
+ constraintModes_[id].insert(constraintModes_[id].begin(), constraint);
+ }
+}
+
+int AgcMeanLuminance::parseConstraintModes(const YamlObject &tuningData)
+{
+ std::vector<ControlValue> availableConstraintModes;
+
+ const YamlObject &yamlConstraintModes = tuningData[controls::AeConstraintMode.name()];
+ if (yamlConstraintModes.isDictionary()) {
+ for (const auto &[modeName, modeDict] : yamlConstraintModes.asDict()) {
+ if (AeConstraintModeNameValueMap.find(modeName) ==
+ AeConstraintModeNameValueMap.end()) {
+ LOG(AgcMeanLuminance, Warning)
+ << "Skipping unknown constraint mode '" << modeName << "'";
+ continue;
+ }
+
+ if (!modeDict.isDictionary()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Invalid constraint mode '" << modeName << "'";
+ return -EINVAL;
+ }
+
+ parseConstraint(modeDict,
+ AeConstraintModeNameValueMap.at(modeName));
+ availableConstraintModes.push_back(
+ AeConstraintModeNameValueMap.at(modeName));
+ }
+ }
+
+ /*
+ * If the tuning data file contains no constraints then we use the
+ * default constraint that the IPU3/RkISP1 Agc algorithms were adhering
+ * to anyway before centralisation; this constraint forces the top 2% of
+ * the histogram to be at least 0.5.
+ */
+ if (constraintModes_.empty()) {
+ AgcConstraint constraint = {
+ AgcConstraint::Bound::Lower,
+ 0.98,
+ 1.0,
+ 0.5
+ };
+
+ constraintModes_[controls::ConstraintNormal].insert(
+ constraintModes_[controls::ConstraintNormal].begin(),
+ constraint);
+ availableConstraintModes.push_back(
+ AeConstraintModeNameValueMap.at("ConstraintNormal"));
+ }
+
+ controls_[&controls::AeConstraintMode] = ControlInfo(availableConstraintModes);
+
+ return 0;
+}
+
+int AgcMeanLuminance::parseExposureModes(const YamlObject &tuningData)
+{
+ std::vector<ControlValue> availableExposureModes;
+
+ const YamlObject &yamlExposureModes = tuningData[controls::AeExposureMode.name()];
+ if (yamlExposureModes.isDictionary()) {
+ for (const auto &[modeName, modeValues] : yamlExposureModes.asDict()) {
+ if (AeExposureModeNameValueMap.find(modeName) ==
+ AeExposureModeNameValueMap.end()) {
+ LOG(AgcMeanLuminance, Warning)
+ << "Skipping unknown exposure mode '" << modeName << "'";
+ continue;
+ }
+
+ if (!modeValues.isDictionary()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Invalid exposure mode '" << modeName << "'";
+ return -EINVAL;
+ }
+
+ std::vector<uint32_t> exposureTimes =
+ modeValues["exposureTime"].getList<uint32_t>().value_or(std::vector<uint32_t>{});
+ std::vector<double> gains =
+ modeValues["gain"].getList<double>().value_or(std::vector<double>{});
+
+ if (exposureTimes.size() != gains.size()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Exposure time and gain array sizes unequal";
+ return -EINVAL;
+ }
+
+ if (exposureTimes.empty()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Exposure time and gain arrays are empty";
+ return -EINVAL;
+ }
+
+ std::vector<std::pair<utils::Duration, double>> stages;
+ for (unsigned int i = 0; i < exposureTimes.size(); i++) {
+ stages.push_back({
+ std::chrono::microseconds(exposureTimes[i]),
+ gains[i]
+ });
+ }
+
+ std::shared_ptr<ExposureModeHelper> helper =
+ std::make_shared<ExposureModeHelper>(stages);
+
+ exposureModeHelpers_[AeExposureModeNameValueMap.at(modeName)] = helper;
+ availableExposureModes.push_back(AeExposureModeNameValueMap.at(modeName));
+ }
+ }
+
+ /*
+ * If we don't have any exposure modes in the tuning data we create an
+ * ExposureModeHelper using an empty vector of stages. This will result
+ * in the ExposureModeHelper simply driving the exposure time as high as
+ * possible before touching gain.
+ */
+ if (availableExposureModes.empty()) {
+ int32_t exposureModeId = AeExposureModeNameValueMap.at("ExposureNormal");
+ std::vector<std::pair<utils::Duration, double>> stages = { };
+
+ std::shared_ptr<ExposureModeHelper> helper =
+ std::make_shared<ExposureModeHelper>(stages);
+
+ exposureModeHelpers_[exposureModeId] = helper;
+ availableExposureModes.push_back(exposureModeId);
+ }
+
+ controls_[&controls::AeExposureMode] = ControlInfo(availableExposureModes);
+
+ return 0;
+}
+
+/**
+ * \brief Parse tuning data for AeConstraintMode and AeExposureMode controls
+ * \param[in] tuningData the YamlObject representing the tuning data
+ *
+ * This function parses tuning data to build the list of allowed values for the
+ * AeConstraintMode and AeExposureMode controls. Those tuning data must provide
+ * the data in a specific format; the Agc algorithm's tuning data should contain
+ * a dictionary called AeConstraintMode containing per-mode setting dictionaries
+ * with the key being a value from \ref controls::AeConstraintModeNameValueMap.
+ * Each mode dict may contain either a "lower" or "upper" key or both, for
+ * example:
+ *
+ * \code{.unparsed}
+ * algorithms:
+ * - Agc:
+ * AeConstraintMode:
+ * ConstraintNormal:
+ * lower:
+ * qLo: 0.98
+ * qHi: 1.0
+ * yTarget: 0.5
+ * ConstraintHighlight:
+ * lower:
+ * qLo: 0.98
+ * qHi: 1.0
+ * yTarget: 0.5
+ * upper:
+ * qLo: 0.98
+ * qHi: 1.0
+ * yTarget: 0.8
+ *
+ * \endcode
+ *
+ * For the AeExposureMode control the data should contain a dictionary called
+ * AeExposureMode containing per-mode setting dictionaries with the key being a
+ * value from \ref controls::AeExposureModeNameValueMap. Each mode dict should
+ * contain an array of exposure times with the key "exposureTime" and an array
+ * of gain values with the key "gain", in this format:
+ *
+ * \code{.unparsed}
+ * algorithms:
+ * - Agc:
+ * AeExposureMode:
+ * ExposureNormal:
+ * exposureTime: [ 100, 10000, 30000, 60000, 120000 ]
+ * gain: [ 2.0, 4.0, 6.0, 8.0, 10.0 ]
+ * ExposureShort:
+ * exposureTime: [ 100, 10000, 30000, 60000, 120000 ]
+ * gain: [ 2.0, 4.0, 6.0, 8.0, 10.0 ]
+ *
+ * \endcode
+ *
+ * \return 0 on success or a negative error code
+ */
+int AgcMeanLuminance::parseTuningData(const YamlObject &tuningData)
+{
+ int ret;
+
+ parseRelativeLuminanceTarget(tuningData);
+
+ ret = parseConstraintModes(tuningData);
+ if (ret)
+ return ret;
+
+ return parseExposureModes(tuningData);
+}
+
+/**
+ * \brief Set the ExposureModeHelper limits for this class
+ * \param[in] minExposureTime Minimum exposure time to allow
+ * \param[in] maxExposureTime Maximum ewposure time to allow
+ * \param[in] minGain Minimum gain to allow
+ * \param[in] maxGain Maximum gain to allow
+ *
+ * This function calls \ref ExposureModeHelper::setLimits() for each
+ * ExposureModeHelper that has been created for this class.
+ */
+void AgcMeanLuminance::setLimits(utils::Duration minExposureTime,
+ utils::Duration maxExposureTime,
+ double minGain, double maxGain)
+{
+ for (auto &[id, helper] : exposureModeHelpers_)
+ helper->setLimits(minExposureTime, maxExposureTime, minGain, maxGain);
+}
+
+/**
+ * \fn AgcMeanLuminance::constraintModes()
+ * \brief Get the constraint modes that have been parsed from tuning data
+ */
+
+/**
+ * \fn AgcMeanLuminance::exposureModeHelpers()
+ * \brief Get the ExposureModeHelpers that have been parsed from tuning data
+ */
+
+/**
+ * \fn AgcMeanLuminance::controls()
+ * \brief Get the controls that have been generated after parsing tuning data
+ */
+
+/**
+ * \fn AgcMeanLuminance::estimateLuminance(const double gain)
+ * \brief Estimate the luminance of an image, adjusted by a given gain
+ * \param[in] gain The gain with which to adjust the luminance estimate
+ *
+ * This function estimates the average relative luminance of the frame that
+ * would be output by the sensor if an additional \a gain was applied. It is a
+ * pure virtual function because estimation of luminance is a hardware-specific
+ * operation, which depends wholly on the format of the stats that are delivered
+ * to libcamera from the ISP. Derived classes must override this function with
+ * one that calculates the normalised mean luminance value across the entire
+ * image.
+ *
+ * \return The normalised relative luminance of the image
+ */
+
+/**
+ * \brief Estimate the initial gain needed to achieve a relative luminance
+ * target
+ * \return The calculated initial gain
+ */
+double AgcMeanLuminance::estimateInitialGain() const
+{
+ double yTarget = relativeLuminanceTarget_;
+ double yGain = 1.0;
+
+ /*
+ * To account for non-linearity caused by saturation, the value needs to
+ * be estimated in an iterative process, as multiplying by a gain will
+ * not increase the relative luminance by the same factor if some image
+ * regions are saturated.
+ */
+ for (unsigned int i = 0; i < 8; i++) {
+ double yValue = estimateLuminance(yGain);
+ double extra_gain = std::min(10.0, yTarget / (yValue + .001));
+
+ yGain *= extra_gain;
+ LOG(AgcMeanLuminance, Debug) << "Y value: " << yValue
+ << ", Y target: " << yTarget
+ << ", gives gain " << yGain;
+
+ if (utils::abs_diff(extra_gain, 1.0) < 0.01)
+ break;
+ }
+
+ return yGain;
+}
+
+/**
+ * \brief Clamp gain within the bounds of a defined constraint
+ * \param[in] constraintModeIndex The index of the constraint to adhere to
+ * \param[in] hist A histogram over which to calculate inter-quantile means
+ * \param[in] gain The gain to clamp
+ *
+ * \return The gain clamped within the constraint bounds
+ */
+double AgcMeanLuminance::constraintClampGain(uint32_t constraintModeIndex,
+ const Histogram &hist,
+ double gain)
+{
+ std::vector<AgcConstraint> &constraints = constraintModes_[constraintModeIndex];
+ for (const AgcConstraint &constraint : constraints) {
+ double newGain = constraint.yTarget * hist.bins() /
+ hist.interQuantileMean(constraint.qLo, constraint.qHi);
+
+ if (constraint.bound == AgcConstraint::Bound::Lower &&
+ newGain > gain)
+ gain = newGain;
+
+ if (constraint.bound == AgcConstraint::Bound::Upper &&
+ newGain < gain)
+ gain = newGain;
+ }
+
+ return gain;
+}
+
+/**
+ * \brief Apply a filter on the exposure value to limit the speed of changes
+ * \param[in] exposureValue The target exposure from the AGC algorithm
+ *
+ * The speed of the filter is adaptive, and will produce the target quicker
+ * during startup, or when the target exposure is within 20% of the most recent
+ * filter output.
+ *
+ * \return The filtered exposure
+ */
+utils::Duration AgcMeanLuminance::filterExposure(utils::Duration exposureValue)
+{
+ double speed = 0.2;
+
+ /* Adapt instantly if we are in startup phase. */
+ if (frameCount_ < kNumStartupFrames)
+ speed = 1.0;
+
+ /*
+ * If we are close to the desired result, go faster to avoid making
+ * multiple micro-adjustments.
+ * \todo Make this customisable?
+ */
+ if (filteredExposure_ < 1.2 * exposureValue &&
+ filteredExposure_ > 0.8 * exposureValue)
+ speed = sqrt(speed);
+
+ filteredExposure_ = speed * exposureValue +
+ filteredExposure_ * (1.0 - speed);
+
+ return filteredExposure_;
+}
+
+/**
+ * \brief Calculate the new exposure value and splut it between exposure time
+ * and gain
+ * \param[in] constraintModeIndex The index of the current constraint mode
+ * \param[in] exposureModeIndex The index of the current exposure mode
+ * \param[in] yHist A Histogram from the ISP statistics to use in constraining
+ * the calculated gain
+ * \param[in] effectiveExposureValue The EV applied to the frame from which the
+ * statistics in use derive
+ *
+ * Calculate a new exposure value to try to obtain the target. The calculated
+ * exposure value is filtered to prevent rapid changes from frame to frame, and
+ * divided into exposure time, analogue and digital gain.
+ *
+ * \return Tuple of exposure time, analogue gain, and digital gain
+ */
+std::tuple<utils::Duration, double, double>
+AgcMeanLuminance::calculateNewEv(uint32_t constraintModeIndex,
+ uint32_t exposureModeIndex,
+ const Histogram &yHist,
+ utils::Duration effectiveExposureValue)
+{
+ /*
+ * The pipeline handler should validate that we have received an allowed
+ * value for AeExposureMode.
+ */
+ std::shared_ptr<ExposureModeHelper> exposureModeHelper =
+ exposureModeHelpers_.at(exposureModeIndex);
+
+ double gain = estimateInitialGain();
+ gain = constraintClampGain(constraintModeIndex, yHist, gain);
+
+ /*
+ * We don't check whether we're already close to the target, because
+ * even if the effective exposure value is the same as the last frame's
+ * we could have switched to an exposure mode that would require a new
+ * pass through the splitExposure() function.
+ */
+
+ utils::Duration newExposureValue = effectiveExposureValue * gain;
+
+ /*
+ * We filter the exposure value to make sure changes are not too jarring
+ * from frame to frame.
+ */
+ newExposureValue = filterExposure(newExposureValue);
+
+ frameCount_++;
+ return exposureModeHelper->splitExposure(newExposureValue);
+}
+
+/**
+ * \fn AgcMeanLuminance::resetFrameCount()
+ * \brief Reset the frame counter
+ *
+ * This function resets the internal frame counter, which exists to help the
+ * algorithm decide whether it should respond instantly or not. The expectation
+ * is for derived classes to call this function before each camera start call in
+ * their configure() function.
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/agc_mean_luminance.h b/src/ipa/libipa/agc_mean_luminance.h
new file mode 100644
index 00000000..c41391cb
--- /dev/null
+++ b/src/ipa/libipa/agc_mean_luminance.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ agc_mean_luminance.h - Base class for mean luminance AGC algorithms
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include <libcamera/base/utils.h>
+
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "exposure_mode_helper.h"
+#include "histogram.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+class AgcMeanLuminance
+{
+public:
+ AgcMeanLuminance();
+ virtual ~AgcMeanLuminance();
+
+ struct AgcConstraint {
+ enum class Bound {
+ Lower = 0,
+ Upper = 1
+ };
+ Bound bound;
+ double qLo;
+ double qHi;
+ double yTarget;
+ };
+
+ int parseTuningData(const YamlObject &tuningData);
+
+ void setLimits(utils::Duration minExposureTime, utils::Duration maxExposureTime,
+ double minGain, double maxGain);
+
+ std::map<int32_t, std::vector<AgcConstraint>> constraintModes()
+ {
+ return constraintModes_;
+ }
+
+ std::map<int32_t, std::shared_ptr<ExposureModeHelper>> exposureModeHelpers()
+ {
+ return exposureModeHelpers_;
+ }
+
+ ControlInfoMap::Map controls()
+ {
+ return controls_;
+ }
+
+ std::tuple<utils::Duration, double, double>
+ calculateNewEv(uint32_t constraintModeIndex, uint32_t exposureModeIndex,
+ const Histogram &yHist, utils::Duration effectiveExposureValue);
+
+ void resetFrameCount()
+ {
+ frameCount_ = 0;
+ }
+
+private:
+ virtual double estimateLuminance(const double gain) const = 0;
+
+ void parseRelativeLuminanceTarget(const YamlObject &tuningData);
+ void parseConstraint(const YamlObject &modeDict, int32_t id);
+ int parseConstraintModes(const YamlObject &tuningData);
+ int parseExposureModes(const YamlObject &tuningData);
+ double estimateInitialGain() const;
+ double constraintClampGain(uint32_t constraintModeIndex,
+ const Histogram &hist,
+ double gain);
+ utils::Duration filterExposure(utils::Duration exposureValue);
+
+ uint64_t frameCount_;
+ utils::Duration filteredExposure_;
+ double relativeLuminanceTarget_;
+
+ std::map<int32_t, std::vector<AgcConstraint>> constraintModes_;
+ std::map<int32_t, std::shared_ptr<ExposureModeHelper>> exposureModeHelpers_;
+ ControlInfoMap::Map controls_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/algorithm.cpp b/src/ipa/libipa/algorithm.cpp
new file mode 100644
index 00000000..201efdfd
--- /dev/null
+++ b/src/ipa/libipa/algorithm.cpp
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Ideas On Board
+ *
+ * IPA control algorithm interface
+ */
+
+#include "algorithm.h"
+
+/**
+ * \file algorithm.h
+ * \brief Algorithm common interface
+ */
+
+namespace libcamera {
+
+namespace ipa {
+
+/**
+ * \class Algorithm
+ * \brief The base class for all IPA algorithms
+ * \tparam Module The IPA module type for this class of algorithms
+ *
+ * The Algorithm class defines a standard interface for IPA algorithms
+ * compatible with the \a Module. By abstracting algorithms, it makes possible
+ * the implementation of generic code to manage algorithms regardless of their
+ * specific type.
+ *
+ * To specialize the Algorithm class template, an IPA module shall specialize
+ * the Module class template with module-specific context and configuration
+ * types, and pass the specialized Module class as the \a Module template
+ * argument.
+ */
+
+/**
+ * \typedef Algorithm::Module
+ * \brief The IPA module type for this class of algorithms
+ */
+
+/**
+ * \fn Algorithm::init()
+ * \brief Initialize the Algorithm with tuning data
+ * \param[in] context The shared IPA context
+ * \param[in] tuningData The tuning data for the algorithm
+ *
+ * This function is called once, when the IPA module is initialized, to
+ * initialize the algorithm. The \a tuningData YamlObject contains the tuning
+ * data for algorithm.
+ *
+ * \return 0 if successful, an error code otherwise
+ */
+
+/**
+ * \fn Algorithm::configure()
+ * \brief Configure the Algorithm given an IPAConfigInfo
+ * \param[in] context The shared IPA context
+ * \param[in] configInfo The IPA configuration data, received from the pipeline
+ * handler
+ *
+ * Algorithms may implement a configure operation to pre-calculate
+ * parameters prior to commencing streaming.
+ *
+ * Configuration state may be stored in the IPASessionConfiguration structure of
+ * the IPAContext.
+ *
+ * \return 0 if successful, an error code otherwise
+ */
+
+/**
+ * \fn Algorithm::queueRequest()
+ * \brief Provide control values to the algorithm
+ * \param[in] context The shared IPA context
+ * \param[in] frame The frame number to apply the control values
+ * \param[in] frameContext The current frame's context
+ * \param[in] controls The list of user controls
+ *
+ * This function is called for each request queued to the camera. It provides
+ * the controls stored in the request to the algorithm. The \a frame number
+ * is the Request sequence number and identifies the desired corresponding
+ * frame to target for the controls to take effect.
+ *
+ * Algorithms shall read the applicable controls and store their value for later
+ * use during frame processing.
+ */
+
+/**
+ * \fn Algorithm::prepare()
+ * \brief Fill the \a params buffer with ISP processing parameters for a frame
+ * \param[in] context The shared IPA context
+ * \param[in] frame The frame context sequence number
+ * \param[in] frameContext The FrameContext for this frame
+ * \param[out] params The ISP specific parameters
+ *
+ * This function is called for every frame when the camera is running before it
+ * is processed by the ISP to prepare the ISP processing parameters for that
+ * frame.
+ *
+ * Algorithms shall fill in the parameter structure fields appropriately to
+ * configure the ISP processing blocks that they are responsible for. This
+ * includes setting fields and flags that enable those processing blocks.
+ */
+
+/**
+ * \fn Algorithm::process()
+ * \brief Process ISP statistics, and run algorithm operations
+ * \param[in] context The shared IPA context
+ * \param[in] frame The frame context sequence number
+ * \param[in] frameContext The current frame's context
+ * \param[in] stats The IPA statistics and ISP results
+ * \param[out] metadata Metadata for the frame, to be filled by the algorithm
+ *
+ * This function is called while camera is running for every frame processed by
+ * the ISP, to process statistics generated from that frame by the ISP.
+ * Algorithms shall use this data to run calculations, update their state
+ * accordingly, and fill the frame metadata.
+ *
+ * Processing shall not take an undue amount of time, and any extended or
+ * computationally expensive calculations or operations must be handled
+ * asynchronously in a separate thread.
+ *
+ * Algorithms can store state in their respective IPAFrameContext structures,
+ * and reference state from the IPAFrameContext of other algorithms.
+ *
+ * \todo Historical data may be required as part of the processing.
+ * Either the previous frame, or the IPAFrameContext state of the frame
+ * that generated the statistics for this operation may be required for
+ * some advanced algorithms to prevent oscillations or support control
+ * loops correctly. Only a single IPAFrameContext is available currently,
+ * and so any data stored may represent the results of the previously
+ * completed operations.
+ *
+ * Care shall be taken to ensure the ordering of access to the information
+ * such that the algorithms use up to date state as required.
+ */
+
+/**
+ * \class AlgorithmFactory
+ * \brief Registration of Algorithm classes and creation of instances
+ * \tparam _Algorithm The algorithm class type for this factory
+ *
+ * To facilitate instantiation of Algorithm classes, the AlgorithmFactory class
+ * implements auto-registration of algorithms with the IPA Module class. Each
+ * Algorithm subclass shall register itself using the REGISTER_IPA_ALGORITHM()
+ * macro, which will create a corresponding instance of an AlgorithmFactory and
+ * register it with the IPA Module.
+ */
+
+/**
+ * \fn AlgorithmFactory::AlgorithmFactory()
+ * \brief Construct an algorithm factory
+ * \param[in] name Name of the algorithm class
+ *
+ * Creating an instance of the factory automatically registers is with the IPA
+ * Module class, enabling creation of algorithm instances through
+ * Module::createAlgorithm().
+ *
+ * The factory \a name identifies the algorithm and shall be unique.
+ */
+
+/**
+ * \fn AlgorithmFactory::create()
+ * \brief Create an instance of the Algorithm corresponding to the factory
+ * \return A pointer to a newly constructed instance of the Algorithm subclass
+ * corresponding to the factory
+ */
+
+/**
+ * \def REGISTER_IPA_ALGORITHM
+ * \brief Register an algorithm with the IPA module
+ * \param[in] algorithm Class name of Algorithm derived class to register
+ * \param[in] name Name of the algorithm
+ *
+ * Register an Algorithm subclass with the IPA module to make it available for
+ * instantiation through Module::createAlgorithm(). The \a name identifies the
+ * algorithm and must be unique across all algorithms registered for the IPA
+ * module.
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/algorithm.h b/src/ipa/libipa/algorithm.h
new file mode 100644
index 00000000..9a19dbd6
--- /dev/null
+++ b/src/ipa/libipa/algorithm.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Ideas On Board
+ *
+ * ISP control algorithm interface
+ */
+#pragma once
+
+#include <memory>
+#include <stdint.h>
+#include <string>
+
+#include <libcamera/controls.h>
+
+namespace libcamera {
+
+class YamlObject;
+
+namespace ipa {
+
+template<typename _Module>
+class Algorithm
+{
+public:
+ using Module = _Module;
+
+ virtual ~Algorithm() {}
+
+ virtual int init([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const YamlObject &tuningData)
+ {
+ return 0;
+ }
+
+ virtual int configure([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const typename Module::Config &configInfo)
+ {
+ return 0;
+ }
+
+ virtual void queueRequest([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] typename Module::FrameContext &frameContext,
+ [[maybe_unused]] const ControlList &controls)
+ {
+ }
+
+ virtual void prepare([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] typename Module::FrameContext &frameContext,
+ [[maybe_unused]] typename Module::Params *params)
+ {
+ }
+
+ virtual void process([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] typename Module::FrameContext &frameContext,
+ [[maybe_unused]] const typename Module::Stats *stats,
+ [[maybe_unused]] ControlList &metadata)
+ {
+ }
+};
+
+template<typename _Module>
+class AlgorithmFactoryBase
+{
+public:
+ AlgorithmFactoryBase(const char *name)
+ : name_(name)
+ {
+ _Module::registerAlgorithm(this);
+ }
+
+ virtual ~AlgorithmFactoryBase() = default;
+
+ const std::string &name() const { return name_; }
+
+ virtual std::unique_ptr<Algorithm<_Module>> create() const = 0;
+
+private:
+ std::string name_;
+};
+
+template<typename _Algorithm>
+class AlgorithmFactory : public AlgorithmFactoryBase<typename _Algorithm::Module>
+{
+public:
+ AlgorithmFactory(const char *name)
+ : AlgorithmFactoryBase<typename _Algorithm::Module>(name)
+ {
+ }
+
+ ~AlgorithmFactory() = default;
+
+ std::unique_ptr<Algorithm<typename _Algorithm::Module>> create() const override
+ {
+ return std::make_unique<_Algorithm>();
+ }
+};
+
+#define REGISTER_IPA_ALGORITHM(algorithm, name) \
+static AlgorithmFactory<algorithm> global_##algorithm##Factory(name);
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/camera_sensor_helper.cpp b/src/ipa/libipa/camera_sensor_helper.cpp
new file mode 100644
index 00000000..7c66cd57
--- /dev/null
+++ b/src/ipa/libipa/camera_sensor_helper.cpp
@@ -0,0 +1,752 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Helper class that performs sensor-specific
+ * parameter computations
+ */
+#include "camera_sensor_helper.h"
+
+#include <cmath>
+#include <limits>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file camera_sensor_helper.h
+ * \brief Helper class that performs sensor-specific parameter computations
+ *
+ * Computation of sensor configuration parameters is a sensor specific
+ * operation. Each CameraHelper derived class computes the value of
+ * configuration parameters, for example the analogue gain value, using
+ * sensor-specific functions and constants.
+ *
+ * Every subclass of CameraSensorHelper shall be registered with libipa using
+ * the REGISTER_CAMERA_SENSOR_HELPER() macro.
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(CameraSensorHelper)
+
+namespace ipa {
+
+/**
+ * \class CameraSensorHelper
+ * \brief Base class for computing sensor tuning parameters using
+ * sensor-specific constants
+ *
+ * Instances derived from CameraSensorHelper class are sensor-specific.
+ * Each supported sensor will have an associated base class defined.
+ */
+
+/**
+ * \fn CameraSensorHelper::CameraSensorHelper()
+ * \brief Construct a CameraSensorHelper instance
+ *
+ * CameraSensorHelper derived class instances shall never be constructed
+ * manually but always through the CameraSensorHelperFactoryBase::create()
+ * function.
+ */
+
+/**
+ * \fn CameraSensorHelper::blackLevel()
+ * \brief Fetch the black level of the sensor
+ *
+ * This function returns the black level of the sensor scaled to a 16bit pixel
+ * width. If it is unknown an empty optional is returned.
+ *
+ * \todo Fill the blanks and add pedestal values for all supported sensors. Once
+ * done, drop the std::optional<>.
+ *
+ * Black levels are typically the result of the following phenomena:
+ * - Pedestal added by the sensor to pixel values. They are typically fixed,
+ * sometimes programmable and should be reported in datasheets (but
+ * documentation is not always available).
+ * - Dark currents and other physical effects that add charge to pixels in the
+ * absence of light. Those can depend on the integration time and the sensor
+ * die temperature, and their contribution to pixel values depend on the
+ * sensor gains.
+ *
+ * The pedestal is usually the value with the biggest contribution to the
+ * overall black level. In most cases it is either known before or in rare cases
+ * (there is not a single driver with such a control in the linux kernel) can be
+ * queried from the sensor. This function provides that fixed, known value.
+ *
+ * \return The black level of the sensor, or std::nullopt if not known
+ */
+
+/**
+ * \brief Compute gain code from the analogue gain absolute value
+ * \param[in] gain The real gain to pass
+ *
+ * This function aims to abstract the calculation of the gain letting the IPA
+ * use the real gain for its estimations.
+ *
+ * \return The gain code to pass to V4L2
+ */
+uint32_t CameraSensorHelper::gainCode(double gain) const
+{
+ if (auto *l = std::get_if<AnalogueGainLinear>(&gain_)) {
+ ASSERT(l->m0 == 0 || l->m1 == 0);
+
+ return (l->c0 - l->c1 * gain) /
+ (l->m1 * gain - l->m0);
+ } else if (auto *e = std::get_if<AnalogueGainExp>(&gain_)) {
+ ASSERT(e->a != 0 && e->m != 0);
+
+ return std::log2(gain / e->a) / e->m;
+ } else {
+ ASSERT(false);
+ return 0;
+ }
+}
+
+/**
+ * \brief Compute the real gain from the V4L2 subdev control gain code
+ * \param[in] gainCode The V4L2 subdev control gain
+ *
+ * This function aims to abstract the calculation of the gain letting the IPA
+ * use the real gain for its estimations. It is the counterpart of the function
+ * CameraSensorHelper::gainCode.
+ *
+ * \return The real gain
+ */
+double CameraSensorHelper::gain(uint32_t gainCode) const
+{
+ double gain = static_cast<double>(gainCode);
+
+ if (auto *l = std::get_if<AnalogueGainLinear>(&gain_)) {
+ ASSERT(l->m0 == 0 || l->m1 == 0);
+
+ return (l->m0 * gain + l->c0) /
+ (l->m1 * gain + l->c1);
+ } else if (auto *e = std::get_if<AnalogueGainExp>(&gain_)) {
+ ASSERT(e->a != 0 && e->m != 0);
+
+ return e->a * std::exp2(e->m * gain);
+ } else {
+ ASSERT(false);
+ return 0.0;
+ }
+}
+
+/**
+ * \struct CameraSensorHelper::AnalogueGainLinear
+ * \brief Analogue gain constants for the linear gain model
+ *
+ * The relationship between the integer gain parameter and the resulting gain
+ * multiplier is given by the following equation:
+ *
+ * \f$gain=\frac{m0x+c0}{m1x+c1}\f$
+ *
+ * Where 'x' is the gain control parameter, and m0, m1, c0 and c1 are
+ * image-sensor-specific constants of the sensor.
+ * These constants are static parameters, and for any given image sensor either
+ * m0 or m1 shall be zero.
+ *
+ * The full Gain equation therefore reduces to either:
+ *
+ * \f$gain=\frac{c0}{m1x+c1}\f$ or \f$\frac{m0x+c0}{c1}\f$
+ *
+ * \var CameraSensorHelper::AnalogueGainLinear::m0
+ * \brief Constant used in the linear gain coding/decoding
+ *
+ * \note Either m0 or m1 shall be zero.
+ *
+ * \var CameraSensorHelper::AnalogueGainLinear::c0
+ * \brief Constant used in the linear gain coding/decoding
+ *
+ * \var CameraSensorHelper::AnalogueGainLinear::m1
+ * \brief Constant used in the linear gain coding/decoding
+ *
+ * \note Either m0 or m1 shall be zero.
+ *
+ * \var CameraSensorHelper::AnalogueGainLinear::c1
+ * \brief Constant used in the linear gain coding/decoding
+ */
+
+/**
+ * \struct CameraSensorHelper::AnalogueGainExp
+ * \brief Analogue gain constants for the exponential gain model
+ *
+ * The relationship between the integer gain parameter and the resulting gain
+ * multiplier is given by the following equation:
+ *
+ * \f$gain = a \cdot 2^{m \cdot x}\f$
+ *
+ * Where 'x' is the gain control parameter, and 'a' and 'm' are image
+ * sensor-specific constants.
+ *
+ * This is a subset of the MIPI CCS exponential gain model with the linear
+ * factor 'a' being a constant, but with the exponent being configurable
+ * through the 'm' coefficient.
+ *
+ * When the gain is expressed in dB, 'a' is equal to 1 and 'm' to
+ * \f$log_{2}{10^{\frac{1}{20}}}\f$.
+ *
+ * \var CameraSensorHelper::AnalogueGainExp::a
+ * \brief Constant used in the exponential gain coding/decoding
+ *
+ * \var CameraSensorHelper::AnalogueGainExp::m
+ * \brief Constant used in the exponential gain coding/decoding
+ */
+
+/**
+ * \var CameraSensorHelper::blackLevel_
+ * \brief The black level of the sensor
+ * \sa CameraSensorHelper::blackLevel()
+ */
+
+/**
+ * \var CameraSensorHelper::gain_
+ * \brief The analogue gain parameters used for calculation
+ *
+ * The analogue gain is calculated through a formula, and its parameters are
+ * sensor specific. Use this variable to store the values at init time.
+ */
+
+/**
+ * \class CameraSensorHelperFactoryBase
+ * \brief Base class for camera sensor helper factories
+ *
+ * The CameraSensorHelperFactoryBase class is the base of all specializations of
+ * the CameraSensorHelperFactory class template. It implements the factory
+ * registration, maintains a registry of factories, and provides access to the
+ * registered factories.
+ */
+
+/**
+ * \brief Construct a camera sensor helper factory base
+ * \param[in] name Name of the camera sensor helper class
+ *
+ * Creating an instance of the factory base registers it with the global list of
+ * factories, accessible through the factories() function.
+ *
+ * The factory \a name is used to look up factories and shall be unique.
+ */
+CameraSensorHelperFactoryBase::CameraSensorHelperFactoryBase(const std::string name)
+ : name_(name)
+{
+ registerType(this);
+}
+
+/**
+ * \brief Create an instance of the CameraSensorHelper corresponding to
+ * a named factory
+ * \param[in] name Name of the factory
+ *
+ * \return A unique pointer to a new instance of the CameraSensorHelper subclass
+ * corresponding to the named factory or a null pointer if no such factory
+ * exists
+ */
+std::unique_ptr<CameraSensorHelper> CameraSensorHelperFactoryBase::create(const std::string &name)
+{
+ const std::vector<CameraSensorHelperFactoryBase *> &factories =
+ CameraSensorHelperFactoryBase::factories();
+
+ for (const CameraSensorHelperFactoryBase *factory : factories) {
+ if (name != factory->name_)
+ continue;
+
+ return factory->createInstance();
+ }
+
+ return nullptr;
+}
+
+/**
+ * \brief Add a camera sensor helper class to the registry
+ * \param[in] factory Factory to use to construct the camera sensor helper
+ *
+ * The caller is responsible to guarantee the uniqueness of the camera sensor
+ * helper name.
+ */
+void CameraSensorHelperFactoryBase::registerType(CameraSensorHelperFactoryBase *factory)
+{
+ std::vector<CameraSensorHelperFactoryBase *> &factories =
+ CameraSensorHelperFactoryBase::factories();
+
+ factories.push_back(factory);
+}
+
+/**
+ * \brief Retrieve the list of all camera sensor helper factories
+ * \return The list of camera sensor helper factories
+ */
+std::vector<CameraSensorHelperFactoryBase *> &CameraSensorHelperFactoryBase::factories()
+{
+ /*
+ * The static factories map is defined inside the function to ensure
+ * it gets initialized on first use, without any dependency on link
+ * order.
+ */
+ static std::vector<CameraSensorHelperFactoryBase *> factories;
+ return factories;
+}
+
+/**
+ * \class CameraSensorHelperFactory
+ * \brief Registration of CameraSensorHelperFactory classes and creation of instances
+ * \tparam _Helper The camera sensor helper class type for this factory
+ *
+ * To facilitate discovery and instantiation of CameraSensorHelper classes, the
+ * CameraSensorHelperFactory class implements auto-registration of camera sensor
+ * helpers. Each CameraSensorHelper subclass shall register itself using the
+ * REGISTER_CAMERA_SENSOR_HELPER() macro, which will create a corresponding
+ * instance of a CameraSensorHelperFactory subclass and register it with the
+ * static list of factories.
+ */
+
+/**
+ * \fn CameraSensorHelperFactory::CameraSensorHelperFactory(const char *name)
+ * \brief Construct a camera sensor helper factory
+ * \param[in] name Name of the camera sensor helper class
+ *
+ * Creating an instance of the factory registers it with the global list of
+ * factories, accessible through the CameraSensorHelperFactoryBase::factories()
+ * function.
+ *
+ * The factory \a name is used to look up factories and shall be unique.
+ */
+
+/**
+ * \fn CameraSensorHelperFactory::createInstance() const
+ * \brief Create an instance of the CameraSensorHelper corresponding to the
+ * factory
+ *
+ * \return A unique pointer to a newly constructed instance of the
+ * CameraSensorHelper subclass corresponding to the factory
+ */
+
+/**
+ * \def REGISTER_CAMERA_SENSOR_HELPER
+ * \brief Register a camera sensor helper with the camera sensor helper factory
+ * \param[in] name Sensor model name used to register the class
+ * \param[in] helper Class name of CameraSensorHelper derived class to register
+ *
+ * Register a CameraSensorHelper subclass with the factory and make it available
+ * to try and match sensors.
+ */
+
+/* -----------------------------------------------------------------------------
+ * Sensor-specific subclasses
+ */
+
+#ifndef __DOXYGEN__
+
+/*
+ * Helper function to compute the m parameter of the exponential gain model
+ * when the gain code is expressed in dB.
+ */
+static constexpr double expGainDb(double step)
+{
+ constexpr double log2_10 = 3.321928094887362;
+
+ /*
+ * The gain code is expressed in step * dB (e.g. in 0.1 dB steps):
+ *
+ * G_code = G_dB/step = 20/step*log10(G_linear)
+ *
+ * Inverting the formula, we get
+ *
+ * G_linear = 10^(step/20*G_code) = 2^(log2(10)*step/20*G_code)
+ */
+ return log2_10 * step / 20;
+}
+
+class CameraSensorHelperAr0144 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperAr0144()
+ {
+ /* Power-on default value: 168 at 12bits. */
+ blackLevel_ = 2688;
+ }
+
+ uint32_t gainCode(double gain) const override
+ {
+ /* The recommended minimum gain is 1.6842 to avoid artifacts. */
+ gain = std::clamp(gain, 1.0 / (1.0 - 13.0 / 32.0), 18.45);
+
+ /*
+ * The analogue gain is made of a coarse exponential gain in
+ * the range [2^0, 2^4] and a fine inversely linear gain in the
+ * range [1.0, 2.0[. There is an additional fixed 1.153125
+ * multiplier when the coarse gain reaches 2^2.
+ */
+
+ if (gain > 4.0)
+ gain /= 1.153125;
+
+ unsigned int coarse = std::log2(gain);
+ unsigned int fine = (1 - (1 << coarse) / gain) * 32;
+
+ /* The fine gain rounding depends on the coarse gain. */
+ if (coarse == 1 || coarse == 3)
+ fine &= ~1;
+ else if (coarse == 4)
+ fine &= ~3;
+
+ return (coarse << 4) | (fine & 0xf);
+ }
+
+ double gain(uint32_t gainCode) const override
+ {
+ unsigned int coarse = gainCode >> 4;
+ unsigned int fine = gainCode & 0xf;
+ unsigned int d1;
+ double d2, m;
+
+ switch (coarse) {
+ default:
+ case 0:
+ d1 = 1;
+ d2 = 32.0;
+ m = 1.0;
+ break;
+ case 1:
+ d1 = 2;
+ d2 = 16.0;
+ m = 1.0;
+ break;
+ case 2:
+ d1 = 1;
+ d2 = 32.0;
+ m = 1.153125;
+ break;
+ case 3:
+ d1 = 2;
+ d2 = 16.0;
+ m = 1.153125;
+ break;
+ case 4:
+ d1 = 4;
+ d2 = 8.0;
+ m = 1.153125;
+ break;
+ }
+
+ /*
+ * With infinite precision, the calculated gain would be exact,
+ * and the reverse conversion with gainCode() would produce the
+ * same gain code. In the real world, rounding errors may cause
+ * the calculated gain to be lower by an amount negligible for
+ * all purposes, except for the reverse conversion. Converting
+ * the gain to a gain code could then return the quantized value
+ * just lower than the original gain code. To avoid this, tests
+ * showed that adding the machine epsilon to the multiplier m is
+ * sufficient.
+ */
+ m += std::numeric_limits<decltype(m)>::epsilon();
+
+ return m * (1 << coarse) / (1.0 - (fine / d1) / d2);
+ }
+
+private:
+ static constexpr double kStep_ = 16;
+};
+REGISTER_CAMERA_SENSOR_HELPER("ar0144", CameraSensorHelperAr0144)
+
+class CameraSensorHelperAr0521 : public CameraSensorHelper
+{
+public:
+ uint32_t gainCode(double gain) const override
+ {
+ gain = std::clamp(gain, 1.0, 15.5);
+ unsigned int coarse = std::log2(gain);
+ unsigned int fine = (gain / (1 << coarse) - 1) * kStep_;
+
+ return (coarse << 4) | (fine & 0xf);
+ }
+
+ double gain(uint32_t gainCode) const override
+ {
+ unsigned int coarse = gainCode >> 4;
+ unsigned int fine = gainCode & 0xf;
+
+ return (1 << coarse) * (1 + fine / kStep_);
+ }
+
+private:
+ static constexpr double kStep_ = 16;
+};
+REGISTER_CAMERA_SENSOR_HELPER("ar0521", CameraSensorHelperAr0521)
+
+class CameraSensorHelperGc05a2 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperGc05a2()
+ {
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 100, 0, 0, 1024 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("gc05a2", CameraSensorHelperGc05a2)
+
+class CameraSensorHelperGc08a3 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperGc08a3()
+ {
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 100, 0, 0, 1024 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("gc08a3", CameraSensorHelperGc08a3)
+
+class CameraSensorHelperImx214 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx214()
+ {
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 0, 512, -1, 512 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx214", CameraSensorHelperImx214)
+
+class CameraSensorHelperImx219 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx219()
+ {
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 0, 256, -1, 256 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx219", CameraSensorHelperImx219)
+
+class CameraSensorHelperImx258 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx258()
+ {
+ /* From datasheet: 0x40 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 0, 512, -1, 512 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx258", CameraSensorHelperImx258)
+
+class CameraSensorHelperImx283 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx283()
+ {
+ /* From datasheet: 0x32 at 10bits. */
+ blackLevel_ = 3200;
+ gain_ = AnalogueGainLinear{ 0, 2048, -1, 2048 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx283", CameraSensorHelperImx283)
+
+class CameraSensorHelperImx290 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx290()
+ {
+ /* From datasheet: 0xf0 at 12bits. */
+ blackLevel_ = 3840;
+ gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx290", CameraSensorHelperImx290)
+
+class CameraSensorHelperImx296 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx296()
+ {
+ gain_ = AnalogueGainExp{ 1.0, expGainDb(0.1) };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx296", CameraSensorHelperImx296)
+
+class CameraSensorHelperImx327 : public CameraSensorHelperImx290
+{
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx327", CameraSensorHelperImx327)
+
+class CameraSensorHelperImx335 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx335()
+ {
+ /* From datasheet: 0x32 at 10bits. */
+ blackLevel_ = 3200;
+ gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx335", CameraSensorHelperImx335)
+
+class CameraSensorHelperImx415 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx415()
+ {
+ gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx415", CameraSensorHelperImx415)
+
+class CameraSensorHelperImx462 : public CameraSensorHelperImx290
+{
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx462", CameraSensorHelperImx462)
+
+class CameraSensorHelperImx477 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx477()
+ {
+ gain_ = AnalogueGainLinear{ 0, 1024, -1, 1024 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx477", CameraSensorHelperImx477)
+
+class CameraSensorHelperOv2685 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv2685()
+ {
+ /*
+ * The Sensor Manual doesn't appear to document the gain model.
+ * This has been validated with some empirical testing only.
+ */
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov2685", CameraSensorHelperOv2685)
+
+class CameraSensorHelperOv2740 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv2740()
+ {
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov2740", CameraSensorHelperOv2740)
+
+class CameraSensorHelperOv4689 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv4689()
+ {
+ /* From datasheet: 0x40 at 12bits. */
+ blackLevel_ = 1024;
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov4689", CameraSensorHelperOv4689)
+
+class CameraSensorHelperOv5640 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv5640()
+ {
+ /* From datasheet: 0x10 at 10bits. */
+ blackLevel_ = 1024;
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 16 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov5640", CameraSensorHelperOv5640)
+
+class CameraSensorHelperOv5647 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv5647()
+ {
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 16 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov5647", CameraSensorHelperOv5647)
+
+class CameraSensorHelperOv5670 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv5670()
+ {
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov5670", CameraSensorHelperOv5670)
+
+class CameraSensorHelperOv5675 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv5675()
+ {
+ /* From Linux kernel driver: 0x40 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov5675", CameraSensorHelperOv5675)
+
+class CameraSensorHelperOv5693 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv5693()
+ {
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 16 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov5693", CameraSensorHelperOv5693)
+
+class CameraSensorHelperOv64a40 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv64a40()
+ {
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov64a40", CameraSensorHelperOv64a40)
+
+class CameraSensorHelperOv8858 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv8858()
+ {
+ /*
+ * \todo Validate the selected 1/128 step value as it differs
+ * from what the sensor manual describes.
+ *
+ * See: https://patchwork.linuxtv.org/project/linux-media/patch/20221106171129.166892-2-nicholas@rothemail.net/#142267
+ */
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov8858", CameraSensorHelperOv8858)
+
+class CameraSensorHelperOv8865 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv8865()
+ {
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov8865", CameraSensorHelperOv8865)
+
+class CameraSensorHelperOv13858 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv13858()
+ {
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov13858", CameraSensorHelperOv13858)
+
+#endif /* __DOXYGEN__ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/camera_sensor_helper.h b/src/ipa/libipa/camera_sensor_helper.h
new file mode 100644
index 00000000..a9300a64
--- /dev/null
+++ b/src/ipa/libipa/camera_sensor_helper.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Helper class that performs sensor-specific parameter computations
+ */
+
+#pragma once
+
+#include <memory>
+#include <optional>
+#include <stdint.h>
+#include <string>
+#include <variant>
+#include <vector>
+
+#include <libcamera/base/class.h>
+
+namespace libcamera {
+
+namespace ipa {
+
+class CameraSensorHelper
+{
+public:
+ CameraSensorHelper() = default;
+ virtual ~CameraSensorHelper() = default;
+
+ std::optional<int16_t> blackLevel() const { return blackLevel_; }
+ virtual uint32_t gainCode(double gain) const;
+ virtual double gain(uint32_t gainCode) const;
+
+protected:
+ struct AnalogueGainLinear {
+ int16_t m0;
+ int16_t c0;
+ int16_t m1;
+ int16_t c1;
+ };
+
+ struct AnalogueGainExp {
+ double a;
+ double m;
+ };
+
+ std::optional<int16_t> blackLevel_;
+ std::variant<std::monostate, AnalogueGainLinear, AnalogueGainExp> gain_;
+
+private:
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorHelper)
+};
+
+class CameraSensorHelperFactoryBase
+{
+public:
+ CameraSensorHelperFactoryBase(const std::string name);
+ virtual ~CameraSensorHelperFactoryBase() = default;
+
+ static std::unique_ptr<CameraSensorHelper> create(const std::string &name);
+
+ static std::vector<CameraSensorHelperFactoryBase *> &factories();
+
+private:
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorHelperFactoryBase)
+
+ static void registerType(CameraSensorHelperFactoryBase *factory);
+
+ virtual std::unique_ptr<CameraSensorHelper> createInstance() const = 0;
+
+ std::string name_;
+};
+
+template<typename _Helper>
+class CameraSensorHelperFactory final : public CameraSensorHelperFactoryBase
+{
+public:
+ CameraSensorHelperFactory(const char *name)
+ : CameraSensorHelperFactoryBase(name)
+ {
+ }
+
+private:
+ std::unique_ptr<CameraSensorHelper> createInstance() const override
+ {
+ return std::make_unique<_Helper>();
+ }
+};
+
+#define REGISTER_CAMERA_SENSOR_HELPER(name, helper) \
+static CameraSensorHelperFactory<helper> global_##helper##Factory(name);
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/colours.cpp b/src/ipa/libipa/colours.cpp
new file mode 100644
index 00000000..97124cf4
--- /dev/null
+++ b/src/ipa/libipa/colours.cpp
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * libipa miscellaneous colour helpers
+ */
+
+#include "colours.h"
+
+#include <algorithm>
+#include <cmath>
+
+namespace libcamera {
+
+namespace ipa {
+
+/**
+ * \file colours.h
+ * \brief Functions to reduce code duplication between IPA modules
+ */
+
+/**
+ * \brief Estimate luminance from RGB values following ITU-R BT.601
+ * \param[in] rgb The RGB value
+ *
+ * This function estimates a luminance value from a triplet of Red, Green and
+ * Blue values, following the formula defined by ITU-R Recommendation BT.601-7
+ * which can be found at https://www.itu.int/rec/R-REC-BT.601
+ *
+ * \return The estimated luminance value
+ */
+double rec601LuminanceFromRGB(const RGB<double> &rgb)
+{
+ static const Vector<double, 3> rgb2y{{
+ 0.299, 0.587, 0.114
+ }};
+
+ return rgb.dot(rgb2y);
+}
+
+/**
+ * \brief Estimate correlated colour temperature from RGB color space input
+ * \param[in] rgb The RGB value
+ *
+ * This function estimates the correlated color temperature RGB color space
+ * input. In physics and color science, the Planckian locus or black body locus
+ * is the path or locus that the color of an incandescent black body would take
+ * in a particular chromaticity space as the black body temperature changes.
+ *
+ * If a narrow range of color temperatures is considered (those encapsulating
+ * daylight being the most practical case) one can approximate the Planckian
+ * locus in order to calculate the CCT in terms of chromaticity coordinates.
+ *
+ * More detailed information can be found in:
+ * https://en.wikipedia.org/wiki/Color_temperature#Approximation
+ *
+ * \return The estimated color temperature
+ */
+uint32_t estimateCCT(const RGB<double> &rgb)
+{
+ /*
+ * Convert the RGB values to CIE tristimulus values (XYZ) and divide by
+ * the sum of X, Y and Z to calculate the CIE xy chromaticity.
+ */
+ static const Matrix<double, 3, 3> rgb2xyz({
+ -0.14282, 1.54924, -0.95641,
+ -0.32466, 1.57837, -0.73191,
+ -0.68202, 0.77073, 0.56332
+ });
+
+ Vector<double, 3> xyz = rgb2xyz * rgb;
+ xyz /= xyz.sum();
+
+ /* Calculate CCT */
+ double n = (xyz.x() - 0.3320) / (0.1858 - xyz.y());
+ return 449 * n * n * n + 3525 * n * n + 6823.3 * n + 5520.33;
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/colours.h b/src/ipa/libipa/colours.h
new file mode 100644
index 00000000..fa6a8b57
--- /dev/null
+++ b/src/ipa/libipa/colours.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * libipa miscellaneous colour helpers
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include "vector.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+double rec601LuminanceFromRGB(const RGB<double> &rgb);
+uint32_t estimateCCT(const RGB<double> &rgb);
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/exposure_mode_helper.cpp b/src/ipa/libipa/exposure_mode_helper.cpp
new file mode 100644
index 00000000..f235316d
--- /dev/null
+++ b/src/ipa/libipa/exposure_mode_helper.cpp
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that performs computations relating to exposure
+ */
+#include "exposure_mode_helper.h"
+
+#include <algorithm>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file exposure_mode_helper.h
+ * \brief Helper class that performs computations relating to exposure
+ *
+ * AEGC algorithms have a need to split exposure between exposure time, analogue
+ * and digital gain. Multiple implementations do so based on paired stages of
+ * exposure time and gain limits; provide a helper to avoid duplicating the code.
+ */
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(ExposureModeHelper)
+
+namespace ipa {
+
+/**
+ * \class ExposureModeHelper
+ * \brief Class for splitting exposure into exposure time and total gain
+ *
+ * The ExposureModeHelper class provides a standard interface through which an
+ * AEGC algorithm can divide exposure between exposure time and gain. It is
+ * configured with a set of exposure time and gain pairs and works by initially
+ * fixing gain at 1.0 and increasing exposure time up to the exposure time value
+ * from the first pair in the set in an attempt to meet the required exposure
+ * value.
+ *
+ * If the required exposure is not achievable by the first exposure time value
+ * alone it ramps gain up to the value from the first pair in the set. If the
+ * required exposure is still not met it then allows exposure time to ramp up to
+ * the exposure time value from the second pair in the set, and continues in this
+ * vein until either the required exposure time is met, or else the hardware's
+ * exposure time or gain limits are reached.
+ *
+ * This method allows users to strike a balance between a well-exposed image and
+ * an acceptable frame-rate, as opposed to simply maximising exposure time
+ * followed by gain. The same helpers can be used to perform the latter
+ * operation if needed by passing an empty set of pairs to the initialisation
+ * function.
+ *
+ * The gain values may exceed a camera sensor's analogue gain limits if either
+ * it or the IPA is also capable of digital gain. The configure() function must
+ * be called with the hardware's limits to inform the helper of those
+ * constraints. Any gain that is needed will be applied as analogue gain first
+ * until the hardware's limit is reached, following which digital gain will be
+ * used.
+ */
+
+/**
+ * \brief Construct an ExposureModeHelper instance
+ * \param[in] stages The vector of paired exposure time and gain limits
+ *
+ * The input stages are exposure time and _total_ gain pairs; the gain
+ * encompasses both analogue and digital gain.
+ *
+ * The vector of stages may be empty. In that case, the helper will simply use
+ * the runtime limits set through setLimits() instead.
+ */
+ExposureModeHelper::ExposureModeHelper(const Span<std::pair<utils::Duration, double>> stages)
+{
+ minExposureTime_ = 0us;
+ maxExposureTime_ = 0us;
+ minGain_ = 0;
+ maxGain_ = 0;
+
+ for (const auto &[s, g] : stages) {
+ exposureTimes_.push_back(s);
+ gains_.push_back(g);
+ }
+}
+
+/**
+ * \brief Set the exposure time and gain limits
+ * \param[in] minExposureTime The minimum exposure time supported
+ * \param[in] maxExposureTime The maximum exposure time supported
+ * \param[in] minGain The minimum analogue gain supported
+ * \param[in] maxGain The maximum analogue gain supported
+ *
+ * This function configures the exposure time and analogue gain limits that need
+ * to be adhered to as the helper divides up exposure. Note that this function
+ * *must* be called whenever those limits change and before splitExposure() is
+ * used.
+ *
+ * If the algorithm using the helpers needs to indicate that either exposure time
+ * or analogue gain or both should be fixed it can do so by setting both the
+ * minima and maxima to the same value.
+ */
+void ExposureModeHelper::setLimits(utils::Duration minExposureTime,
+ utils::Duration maxExposureTime,
+ double minGain, double maxGain)
+{
+ minExposureTime_ = minExposureTime;
+ maxExposureTime_ = maxExposureTime;
+ minGain_ = minGain;
+ maxGain_ = maxGain;
+}
+
+utils::Duration ExposureModeHelper::clampExposureTime(utils::Duration exposureTime) const
+{
+ return std::clamp(exposureTime, minExposureTime_, maxExposureTime_);
+}
+
+double ExposureModeHelper::clampGain(double gain) const
+{
+ return std::clamp(gain, minGain_, maxGain_);
+}
+
+/**
+ * \brief Split exposure into exposure time and gain
+ * \param[in] exposure Exposure value
+ *
+ * This function divides a given exposure into exposure time, analogue and
+ * digital gain by iterating through stages of exposure time and gain limits.
+ * At each stage the current stage's exposure time limit is multiplied by the
+ * previous stage's gain limit (or 1.0 initially) to see if the combination of
+ * the two can meet the required exposure. If they cannot then the current
+ * stage's exposure time limit is multiplied by the same stage's gain limit to
+ * see if that combination can meet the required exposure time. If they cannot
+ * then the function moves to consider the next stage.
+ *
+ * When a combination of exposure time and gain _stage_ limits are found that
+ * are sufficient to meet the required exposure, the function attempts to reduce
+ * exposure time as much as possible whilst fixing gain and still meeting the
+ * exposure. If a _runtime_ limit prevents exposure time from being lowered
+ * enough to meet the exposure with gain fixed at the stage limit, gain is also
+ * lowered to compensate.
+ *
+ * Once the exposure time and gain values are ascertained, gain is assigned as
+ * analogue gain as much as possible, with digital gain only in use if the
+ * maximum analogue gain runtime limit is unable to accommodate the exposure
+ * value.
+ *
+ * If no combination of exposure time and gain limits is found that meets the
+ * required exposure, the helper falls-back to simply maximising the exposure
+ * time first, followed by analogue gain, followed by digital gain.
+ *
+ * \return Tuple of exposure time, analogue gain, and digital gain
+ */
+std::tuple<utils::Duration, double, double>
+ExposureModeHelper::splitExposure(utils::Duration exposure) const
+{
+ ASSERT(maxExposureTime_);
+ ASSERT(maxGain_);
+
+ bool gainFixed = minGain_ == maxGain_;
+ bool exposureTimeFixed = minExposureTime_ == maxExposureTime_;
+
+ /*
+ * There's no point entering the loop if we cannot change either gain
+ * nor exposure time anyway.
+ */
+ if (exposureTimeFixed && gainFixed)
+ return { minExposureTime_, minGain_, exposure / (minExposureTime_ * minGain_) };
+
+ utils::Duration exposureTime;
+ double stageGain = 1.0;
+ double gain;
+
+ for (unsigned int stage = 0; stage < gains_.size(); stage++) {
+ double lastStageGain = stage == 0 ? 1.0 : clampGain(gains_[stage - 1]);
+ utils::Duration stageExposureTime = clampExposureTime(exposureTimes_[stage]);
+ stageGain = clampGain(gains_[stage]);
+
+ /*
+ * We perform the clamping on both exposure time and gain in
+ * case the helper has had limits set that prevent those values
+ * being lowered beyond a certain minimum...this can happen at
+ * runtime for various reasons and so would not be known when
+ * the stage limits are initialised.
+ */
+
+ if (stageExposureTime * lastStageGain >= exposure) {
+ exposureTime = clampExposureTime(exposure / clampGain(lastStageGain));
+ gain = clampGain(exposure / exposureTime);
+
+ return { exposureTime, gain, exposure / (exposureTime * gain) };
+ }
+
+ if (stageExposureTime * stageGain >= exposure) {
+ exposureTime = clampExposureTime(exposure / clampGain(stageGain));
+ gain = clampGain(exposure / exposureTime);
+
+ return { exposureTime, gain, exposure / (exposureTime * gain) };
+ }
+ }
+
+ /*
+ * From here on all we can do is max out the exposure time, followed by
+ * the analogue gain. If we still haven't achieved the target we send
+ * the rest of the exposure time to digital gain. If we were given no
+ * stages to use then the default stageGain of 1.0 is used so that
+ * exposure time is maxed before gain is touched at all.
+ */
+ exposureTime = clampExposureTime(exposure / clampGain(stageGain));
+ gain = clampGain(exposure / exposureTime);
+
+ return { exposureTime, gain, exposure / (exposureTime * gain) };
+}
+
+/**
+ * \fn ExposureModeHelper::minExposureTime()
+ * \brief Retrieve the configured minimum exposure time limit set through
+ * setLimits()
+ * \return The minExposureTime_ value
+ */
+
+/**
+ * \fn ExposureModeHelper::maxExposureTime()
+ * \brief Retrieve the configured maximum exposure time set through setLimits()
+ * \return The maxExposureTime_ value
+ */
+
+/**
+ * \fn ExposureModeHelper::minGain()
+ * \brief Retrieve the configured minimum gain set through setLimits()
+ * \return The minGain_ value
+ */
+
+/**
+ * \fn ExposureModeHelper::maxGain()
+ * \brief Retrieve the configured maximum gain set through setLimits()
+ * \return The maxGain_ value
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/exposure_mode_helper.h b/src/ipa/libipa/exposure_mode_helper.h
new file mode 100644
index 00000000..c5be1b67
--- /dev/null
+++ b/src/ipa/libipa/exposure_mode_helper.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that performs computations relating to exposure
+ */
+
+#pragma once
+
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
+
+namespace libcamera {
+
+namespace ipa {
+
+class ExposureModeHelper
+{
+public:
+ ExposureModeHelper(const Span<std::pair<utils::Duration, double>> stages);
+ ~ExposureModeHelper() = default;
+
+ void setLimits(utils::Duration minExposureTime, utils::Duration maxExposureTime,
+ double minGain, double maxGain);
+
+ std::tuple<utils::Duration, double, double>
+ splitExposure(utils::Duration exposure) const;
+
+ utils::Duration minExposureTime() const { return minExposureTime_; }
+ utils::Duration maxExposureTime() const { return maxExposureTime_; }
+ double minGain() const { return minGain_; }
+ double maxGain() const { return maxGain_; }
+
+private:
+ utils::Duration clampExposureTime(utils::Duration exposureTime) const;
+ double clampGain(double gain) const;
+
+ std::vector<utils::Duration> exposureTimes_;
+ std::vector<double> gains_;
+
+ utils::Duration minExposureTime_;
+ utils::Duration maxExposureTime_;
+ double minGain_;
+ double maxGain_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/fc_queue.cpp b/src/ipa/libipa/fc_queue.cpp
new file mode 100644
index 00000000..0365e919
--- /dev/null
+++ b/src/ipa/libipa/fc_queue.cpp
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * IPA Frame context queue
+ */
+
+#include "fc_queue.h"
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(FCQueue)
+
+namespace ipa {
+
+/**
+ * \file fc_queue.h
+ * \brief Queue of per-frame contexts
+ */
+
+/**
+ * \struct FrameContext
+ * \brief Context for a frame
+ *
+ * The frame context stores data specific to a single frame processed by the
+ * IPA module. Each frame processed by the IPA module has a context associated
+ * with it, accessible through the Frame Context Queue.
+ *
+ * Fields in the frame context should reflect values and controls associated
+ * with the specific frame as requested by the application, and as configured by
+ * the hardware. Fields can be read by algorithms to determine if they should
+ * update any specific action for this frame, and finally to update the metadata
+ * control lists when the frame is fully completed.
+ *
+ * \var FrameContext::frame
+ * \brief The frame number
+ */
+
+/**
+ * \class FCQueue
+ * \brief A support class for managing FrameContext instances in IPA modules
+ * \tparam FrameContext The IPA module-specific FrameContext derived class type
+ *
+ * Along with the Module and Algorithm classes, the frame context queue is a
+ * core component of the libipa infrastructure. It stores per-frame contexts
+ * used by the Algorithm operations. By centralizing the lifetime management of
+ * the contexts and implementing safeguards against underflows and overflows, it
+ * simplifies IPA modules and improves their reliability.
+ *
+ * The queue references frame contexts by a monotonically increasing sequence
+ * number. The FCQueue design assumes that this number matches both the sequence
+ * number of the corresponding frame, as generated by the camera sensor, and the
+ * sequence number of the request. This allows IPA modules to obtain the frame
+ * context from any location where a request or a frame is available.
+ *
+ * A frame context normally begins its lifetime when the corresponding request
+ * is queued, way before the frame is captured by the camera sensor. IPA modules
+ * allocate the context from the queue at that point, calling alloc() using the
+ * request number. The queue initializes the context, and the IPA module then
+ * populates it with data from the request. The context can be later retrieved
+ * with a call to get(), typically when the IPA module is requested to provide
+ * sensor or ISP parameters or receives statistics for a frame. The frame number
+ * is used at that point to identify the context.
+ *
+ * If an application fails to queue requests to the camera fast enough, frames
+ * may be produced by the camera sensor and processed by the IPA module without
+ * a corresponding request having been queued to the IPA module. This creates an
+ * underrun condition, where the IPA module will try to get a frame context that
+ * hasn't been allocated. In this case, the get() function will allocate and
+ * initialize a context for the frame, and log a message. Algorithms will not
+ * apply the controls associated with the late request, but should otherwise
+ * behave correctly.
+ *
+ * \todo Mark the frame context with a per-frame control error flag in case of
+ * underrun, and research how algorithms should handle this.
+ *
+ * At its core, the queue uses a circular buffer to avoid dynamic memory
+ * allocation at runtime. The buffer is pre-allocated with a maximum number of
+ * entries when the FCQueue instance is constructed. Entries are initialized on
+ * first use by alloc() or, in underrun conditions, get(). The queue is not
+ * allowed to overflow, which must be ensured by pipeline handlers never
+ * queuing more in-flight requests to the IPA module than the queue size. If an
+ * overflow condition is detected, the queue will log a fatal error.
+ *
+ * IPA module-specific frame context implementations shall inherit from the
+ * FrameContext base class to support the minimum required features for a
+ * FrameContext.
+ */
+
+/**
+ * \fn FCQueue::FCQueue(unsigned int size)
+ * \brief Construct a frame contexts queue of a specified size
+ * \param[in] size The number of contexts in the queue
+ */
+
+/**
+ * \fn FCQueue::clear()
+ * \brief Clear the contexts queue
+ *
+ * IPA modules must clear the frame context queue at the beginning of a new
+ * streaming session, in IPAModule::start().
+ *
+ * \todo Fix any issue this may cause with requests queued before the camera is
+ * started.
+ */
+
+/**
+ * \fn FCQueue::alloc(uint32_t frame)
+ * \brief Allocate and return a FrameContext for the \a frame
+ * \param[in] frame The frame context sequence number
+ *
+ * The first call to obtain a FrameContext from the FCQueue should be handled
+ * through this function. The FrameContext will be initialised, if not
+ * initialised already, and returned to the caller.
+ *
+ * If the FrameContext was already initialized for this \a frame, a warning will
+ * be reported and the previously initialized FrameContext is returned.
+ *
+ * Frame contexts are expected to be initialised when a Request is first passed
+ * to the IPA module in IPAModule::queueRequest().
+ *
+ * \return A reference to the FrameContext for sequence \a frame
+ */
+
+/**
+ * \fn FCQueue::get(uint32_t frame)
+ * \brief Obtain the FrameContext for the \a frame
+ * \param[in] frame The frame context sequence number
+ *
+ * If the FrameContext is not correctly initialised for the \a frame, it will be
+ * initialised.
+ *
+ * \return A reference to the FrameContext for sequence \a frame
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/fc_queue.h b/src/ipa/libipa/fc_queue.h
new file mode 100644
index 00000000..a1d13652
--- /dev/null
+++ b/src/ipa/libipa/fc_queue.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * IPA Frame context queue
+ */
+
+#pragma once
+
+#include <stdint.h>
+#include <vector>
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(FCQueue)
+
+namespace ipa {
+
+template<typename FrameContext>
+class FCQueue;
+
+struct FrameContext {
+private:
+ template<typename T> friend class FCQueue;
+ uint32_t frame;
+ bool initialised = false;
+};
+
+template<typename FrameContext>
+class FCQueue
+{
+public:
+ FCQueue(unsigned int size)
+ : contexts_(size)
+ {
+ }
+
+ void clear()
+ {
+ for (FrameContext &ctx : contexts_) {
+ ctx.initialised = false;
+ ctx.frame = 0;
+ }
+ }
+
+ FrameContext &alloc(const uint32_t frame)
+ {
+ FrameContext &frameContext = contexts_[frame % contexts_.size()];
+
+ /*
+ * Do not re-initialise if a get() call has already fetched this
+ * frame context to preseve the context.
+ *
+ * \todo If the the sequence number of the context to initialise
+ * is smaller than the sequence number of the queue slot to use,
+ * it means that we had a serious request underrun and more
+ * frames than the queue size has been produced since the last
+ * time the application has queued a request. Does this deserve
+ * an error condition ?
+ */
+ if (frame != 0 && frame <= frameContext.frame)
+ LOG(FCQueue, Warning)
+ << "Frame " << frame << " already initialised";
+ else
+ init(frameContext, frame);
+
+ return frameContext;
+ }
+
+ FrameContext &get(uint32_t frame)
+ {
+ FrameContext &frameContext = contexts_[frame % contexts_.size()];
+
+ /*
+ * If the IPA algorithms try to access a frame context slot which
+ * has been already overwritten by a newer context, it means the
+ * frame context queue has overflowed and the desired context
+ * has been forever lost. The pipeline handler shall avoid
+ * queueing more requests to the IPA than the frame context
+ * queue size.
+ */
+ if (frame < frameContext.frame)
+ LOG(FCQueue, Fatal) << "Frame context for " << frame
+ << " has been overwritten by "
+ << frameContext.frame;
+
+ if (frame == 0 && !frameContext.initialised) {
+ /*
+ * If the IPA calls get() at start() time it will get an
+ * un-intialized FrameContext as the below "frame ==
+ * frameContext.frame" check will return success because
+ * FrameContexts are zeroed at creation time.
+ *
+ * Make sure the FrameContext gets initialised if get()
+ * is called before alloc() by the IPA for frame#0.
+ */
+ init(frameContext, frame);
+
+ return frameContext;
+ }
+
+ if (frame == frameContext.frame)
+ return frameContext;
+
+ /*
+ * The frame context has been retrieved before it was
+ * initialised through the initialise() call. This indicates an
+ * algorithm attempted to access a Frame context before it was
+ * queued to the IPA. Controls applied for this request may be
+ * left unhandled.
+ *
+ * \todo Set an error flag for per-frame control errors.
+ */
+ LOG(FCQueue, Warning)
+ << "Obtained an uninitialised FrameContext for " << frame;
+
+ init(frameContext, frame);
+
+ return frameContext;
+ }
+
+private:
+ void init(FrameContext &frameContext, const uint32_t frame)
+ {
+ frameContext = {};
+ frameContext.frame = frame;
+ frameContext.initialised = true;
+ }
+
+ std::vector<FrameContext> contexts_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/fixedpoint.cpp b/src/ipa/libipa/fixedpoint.cpp
new file mode 100644
index 00000000..6b698fc5
--- /dev/null
+++ b/src/ipa/libipa/fixedpoint.cpp
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Fixed / floating point conversions
+ */
+
+#include "fixedpoint.h"
+
+/**
+ * \file fixedpoint.h
+ */
+
+namespace libcamera {
+
+namespace ipa {
+
+/**
+ * \fn R floatingToFixedPoint(T number)
+ * \brief Convert a floating point number to a fixed-point representation
+ * \tparam I Bit width of the integer part of the fixed-point
+ * \tparam F Bit width of the fractional part of the fixed-point
+ * \tparam R Return type of the fixed-point representation
+ * \tparam T Input type of the floating point representation
+ * \param number The floating point number to convert to fixed point
+ * \return The converted value
+ */
+
+/**
+ * \fn R fixedToFloatingPoint(T number)
+ * \brief Convert a fixed-point number to a floating point representation
+ * \tparam I Bit width of the integer part of the fixed-point
+ * \tparam F Bit width of the fractional part of the fixed-point
+ * \tparam R Return type of the floating point representation
+ * \tparam T Input type of the fixed-point representation
+ * \param number The fixed point number to convert to floating point
+ * \return The converted value
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/fixedpoint.h b/src/ipa/libipa/fixedpoint.h
new file mode 100644
index 00000000..709cf50f
--- /dev/null
+++ b/src/ipa/libipa/fixedpoint.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Fixed / floating point conversions
+ */
+
+#pragma once
+
+#include <cmath>
+#include <type_traits>
+
+namespace libcamera {
+
+namespace ipa {
+
+#ifndef __DOXYGEN__
+template<unsigned int I, unsigned int F, typename R, typename T,
+ std::enable_if_t<std::is_integral_v<R> &&
+ std::is_floating_point_v<T>> * = nullptr>
+#else
+template<unsigned int I, unsigned int F, typename R, typename T>
+#endif
+constexpr R floatingToFixedPoint(T number)
+{
+ static_assert(sizeof(int) >= sizeof(R));
+ static_assert(I + F <= sizeof(R) * 8);
+
+ /*
+ * The intermediate cast to int is needed on arm platforms to properly
+ * cast negative values. See
+ * https://embeddeduse.com/2013/08/25/casting-a-negative-float-to-an-unsigned-int/
+ */
+ R mask = (1 << (F + I)) - 1;
+ R frac = static_cast<R>(static_cast<int>(std::round(number * (1 << F)))) & mask;
+
+ return frac;
+}
+
+#ifndef __DOXYGEN__
+template<unsigned int I, unsigned int F, typename R, typename T,
+ std::enable_if_t<std::is_floating_point_v<R> &&
+ std::is_integral_v<T>> * = nullptr>
+#else
+template<unsigned int I, unsigned int F, typename R, typename T>
+#endif
+constexpr R fixedToFloatingPoint(T number)
+{
+ static_assert(sizeof(int) >= sizeof(T));
+ static_assert(I + F <= sizeof(T) * 8);
+
+ /*
+ * Recreate the upper bits in case of a negative number by shifting the sign
+ * bit from the fixed point to the first bit of the unsigned and then right shifting
+ * by the same amount which keeps the sign bit in place.
+ * This can be optimized by the compiler quite well.
+ */
+ int remaining_bits = sizeof(int) * 8 - (I + F);
+ int t = static_cast<int>(static_cast<unsigned>(number) << remaining_bits) >> remaining_bits;
+ return static_cast<R>(t) / static_cast<R>(1 << F);
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/histogram.cpp b/src/ipa/libipa/histogram.cpp
new file mode 100644
index 00000000..10e44b54
--- /dev/null
+++ b/src/ipa/libipa/histogram.cpp
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * histogram calculations
+ */
+#include "histogram.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file histogram.h
+ * \brief Class to represent Histograms and manipulate them
+ */
+
+namespace libcamera {
+
+namespace ipa {
+
+/**
+ * \class Histogram
+ * \brief The base class for creating histograms
+ *
+ * This class stores a cumulative frequency histogram, which is a mapping that
+ * counts the cumulative number of observations in all of the bins up to the
+ * specified bin. It can be used to find quantiles and averages between quantiles.
+ */
+
+/**
+ * \fn Histogram::Histogram()
+ * \brief Construct an empty Histogram
+ *
+ * This empty constructor exists largely to allow Histograms to be embedded in
+ * other classes which may be created before the contents of the Histogram are
+ * known.
+ */
+
+/**
+ * \brief Create a cumulative histogram
+ * \param[in] data A (non-cumulative) histogram
+ */
+Histogram::Histogram(Span<const uint32_t> data)
+{
+ cumulative_.resize(data.size() + 1);
+ cumulative_[0] = 0;
+ for (const auto &[i, value] : utils::enumerate(data))
+ cumulative_[i + 1] = cumulative_[i] + value;
+}
+
+/**
+ * \fn Histogram::Histogram(Span<const uint32_t> data, Transform transform)
+ * \brief Create a cumulative histogram
+ * \param[in] data A (non-cumulative) histogram
+ * \param[in] transform The transformation function to apply to every bin
+ */
+
+/**
+ * \fn Histogram::bins()
+ * \brief Retrieve the number of bins currently used by the Histogram
+ * \return Number of bins
+ */
+
+/**
+ * \fn Histogram::data()
+ * \brief Retrieve the internal data
+ * \return The data
+ */
+
+/**
+ * \fn Histogram::total()
+ * \brief Retrieve the total number of values in the data set
+ * \return Number of values
+ */
+
+/**
+ * \brief Cumulative frequency up to a (fractional) point in a bin
+ * \param[in] bin The bin up to which to cumulate
+ *
+ * With F(p) the cumulative frequency of the histogram, the value is 0 at
+ * the bottom of the histogram, and the maximum is the number of bins.
+ * The pixels are spread evenly throughout the “bin” in which they lie, so that
+ * F(p) is a continuous (monotonically increasing) function.
+ *
+ * \return The cumulative frequency from 0 up to the specified bin
+ */
+uint64_t Histogram::cumulativeFrequency(double bin) const
+{
+ if (bin <= 0)
+ return 0;
+ else if (bin >= bins())
+ return total();
+ int b = static_cast<int32_t>(bin);
+ return cumulative_[b] +
+ (bin - b) * (cumulative_[b + 1] - cumulative_[b]);
+}
+
+/**
+ * \brief Return the (fractional) bin of the point through the histogram
+ * \param[in] q the desired point (0 <= q <= 1)
+ * \param[in] first low limit (default is 0)
+ * \param[in] last high limit (default is UINT_MAX)
+ *
+ * A quantile gives us the point p = Q(q) in the range such that a proportion
+ * q of the pixels lie below p. A familiar quantile is Q(0.5) which is the median
+ * of a distribution.
+ *
+ * \return The fractional bin of the point
+ */
+double Histogram::quantile(double q, uint32_t first, uint32_t last) const
+{
+ if (last == UINT_MAX)
+ last = cumulative_.size() - 2;
+ ASSERT(first <= last);
+
+ uint64_t item = q * total();
+ /* Binary search to find the right bin */
+ while (first < last) {
+ int middle = (first + last) / 2;
+ /* Is it between first and middle ? */
+ if (cumulative_[middle + 1] > item)
+ last = middle;
+ else
+ first = middle + 1;
+ }
+ ASSERT(item >= cumulative_[first] && item <= cumulative_[last + 1]);
+
+ double frac;
+ if (cumulative_[first + 1] == cumulative_[first])
+ frac = 0;
+ else
+ frac = (item - cumulative_[first]) / (cumulative_[first + 1] - cumulative_[first]);
+ return first + frac;
+}
+
+/**
+ * \brief Calculate the mean between two quantiles
+ * \param[in] lowQuantile low Quantile
+ * \param[in] highQuantile high Quantile
+ *
+ * Quantiles are not ideal for metering as they suffer several limitations.
+ * Instead, a concept is introduced here: inter-quantile mean.
+ * It returns the mean of all pixels between lowQuantile and highQuantile.
+ *
+ * \return The mean histogram bin value between the two quantiles
+ */
+double Histogram::interQuantileMean(double lowQuantile, double highQuantile) const
+{
+ ASSERT(highQuantile > lowQuantile);
+ /* Proportion of pixels which lies below lowQuantile */
+ double lowPoint = quantile(lowQuantile);
+ /* Proportion of pixels which lies below highQuantile */
+ double highPoint = quantile(highQuantile, static_cast<uint32_t>(lowPoint));
+ double sumBinFreq = 0, cumulFreq = 0;
+
+ for (double p_next = floor(lowPoint) + 1.0;
+ p_next <= ceil(highPoint);
+ lowPoint = p_next, p_next += 1.0) {
+ int bin = floor(lowPoint);
+ double freq = (cumulative_[bin + 1] - cumulative_[bin])
+ * (std::min(p_next, highPoint) - lowPoint);
+
+ /* Accumulate weighted bin */
+ sumBinFreq += bin * freq;
+ /* Accumulate weights */
+ cumulFreq += freq;
+ }
+ /* add 0.5 to give an average for bin mid-points */
+ return sumBinFreq / cumulFreq + 0.5;
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/histogram.h b/src/ipa/libipa/histogram.h
new file mode 100644
index 00000000..a926002c
--- /dev/null
+++ b/src/ipa/libipa/histogram.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * histogram calculation interface
+ */
+
+#pragma once
+
+#include <limits.h>
+#include <stdint.h>
+#include <type_traits>
+#include <vector>
+
+#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
+
+namespace libcamera {
+
+namespace ipa {
+
+class Histogram
+{
+public:
+ Histogram() { cumulative_.push_back(0); }
+ Histogram(Span<const uint32_t> data);
+
+ template<typename Transform,
+ std::enable_if_t<std::is_invocable_v<Transform, uint32_t>> * = nullptr>
+ Histogram(Span<const uint32_t> data, Transform transform)
+ {
+ cumulative_.resize(data.size() + 1);
+ cumulative_[0] = 0;
+ for (const auto &[i, value] : utils::enumerate(data))
+ cumulative_[i + 1] = cumulative_[i] + transform(value);
+ }
+
+ size_t bins() const { return cumulative_.size() - 1; }
+ const Span<const uint64_t> data() const { return cumulative_; }
+ uint64_t total() const { return cumulative_[cumulative_.size() - 1]; }
+ uint64_t cumulativeFrequency(double bin) const;
+ double quantile(double q, uint32_t first = 0, uint32_t last = UINT_MAX) const;
+ double interQuantileMean(double lowQuantile, double hiQuantile) const;
+
+private:
+ std::vector<uint64_t> cumulative_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/interpolator.cpp b/src/ipa/libipa/interpolator.cpp
new file mode 100644
index 00000000..73e8d3b7
--- /dev/null
+++ b/src/ipa/libipa/interpolator.cpp
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class for interpolating objects
+ */
+#include "interpolator.h"
+
+#include <algorithm>
+#include <string>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "interpolator.h"
+
+/**
+ * \file interpolator.h
+ * \brief Helper class for linear interpolating a set of objects
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Interpolator)
+
+namespace ipa {
+
+/**
+ * \class Interpolator
+ * \brief Class for storing, retrieving, and interpolating objects
+ * \tparam T Type of objects stored in the interpolator
+ *
+ * The main use case is to pass a map from color temperatures to corresponding
+ * objects (eg. matrices for color correction), and then requesting a
+ * interpolated object for a specific color temperature. This class will
+ * abstract away the interpolation portion.
+ */
+
+/**
+ * \fn Interpolator::Interpolator()
+ * \brief Construct an empty interpolator
+ */
+
+/**
+ * \fn Interpolator::Interpolator(const std::map<unsigned int, T> &data)
+ * \brief Construct an interpolator from a map of objects
+ * \param data Map from which to construct the interpolator
+ */
+
+/**
+ * \fn Interpolator::Interpolator(std::map<unsigned int, T> &&data)
+ * \brief Construct an interpolator from a map of objects
+ * \param data Map from which to construct the interpolator
+ */
+
+/**
+ * \fn int Interpolator<T>::readYaml(const libcamera::YamlObject &yaml,
+ const std::string &key_name,
+ const std::string &value_name)
+ * \brief Initialize an Interpolator instance from yaml
+ * \tparam T Type of data stored in the interpolator
+ * \param[in] yaml The yaml object that contains the map of unsigned integers to
+ * objects
+ * \param[in] key_name The name of the key in the yaml object
+ * \param[in] value_name The name of the value in the yaml object
+ *
+ * The yaml object is expected to be a list of maps. Each map has two or more
+ * pairs: one of \a key_name to the key value (usually color temperature), and
+ * one or more of \a value_name to the object. This is a bit difficult to
+ * explain, so here is an example (in python, as it is easier to parse than
+ * yaml):
+ * [
+ * {
+ * 'ct': 2860,
+ * 'ccm': [ 2.12089, -0.52461, -0.59629,
+ * -0.85342, 2.80445, -0.95103,
+ * -0.26897, -1.14788, 2.41685 ],
+ * 'offsets': [ 0, 0, 0 ]
+ * },
+ *
+ * {
+ * 'ct': 2960,
+ * 'ccm': [ 2.26962, -0.54174, -0.72789,
+ * -0.77008, 2.60271, -0.83262,
+ * -0.26036, -1.51254, 2.77289 ],
+ * 'offsets': [ 0, 0, 0 ]
+ * },
+ *
+ * {
+ * 'ct': 3603,
+ * 'ccm': [ 2.18644, -0.66148, -0.52496,
+ * -0.77828, 2.69474, -0.91645,
+ * -0.25239, -0.83059, 2.08298 ],
+ * 'offsets': [ 0, 0, 0 ]
+ * },
+ * ]
+ *
+ * In this case, \a key_name would be 'ct', and \a value_name can be either
+ * 'ccm' or 'offsets'. This way multiple interpolators can be defined in
+ * one set of color temperature ranges in the tuning file, and they can be
+ * retrieved separately with the \a value_name parameter.
+ *
+ * \return Zero on success, negative error code otherwise
+ */
+
+/**
+ * \fn void Interpolator<T>::setQuantization(const unsigned int q)
+ * \brief Set the quantization value
+ * \param[in] q The quantization value
+ *
+ * Sets the quantization value. When this is set, 'key' gets quantized to this
+ * size, before doing the interpolation. This can help in reducing the number of
+ * updates pushed to the hardware.
+ *
+ * Note that normally a threshold needs to be combined with quantization.
+ * Otherwise a value that swings around the edge of the quantization step will
+ * lead to constant updates.
+ */
+
+/**
+ * \fn void Interpolator<T>::setData(std::map<unsigned int, T> &&data)
+ * \brief Set the internal map
+ *
+ * Overwrites the internal map using move semantics.
+ */
+
+/**
+ * \fn const T& Interpolator<T>::getInterpolated()
+ * \brief Retrieve an interpolated value for the given key
+ * \param[in] key The unsigned integer key of the object to retrieve
+ * \param[out] quantizedKey If provided, the key value after quantization
+ * \return The object corresponding to the key. The object is cached internally,
+ * so on successive calls with the same key (after quantization) interpolation
+ * is not recalculated.
+ */
+
+/**
+ * \fn void Interpolator<T>::interpolate(const T &a, const T &b, T &dest, double
+ * lambda)
+ * \brief Interpolate between two instances of T
+ * \param a The first value to interpolate
+ * \param b The second value to interpolate
+ * \param dest The destination for the interpolated value
+ * \param lambda The interpolation factor (0..1)
+ *
+ * Interpolates between \a a and \a b according to \a lambda. It calculates
+ * dest = a * (1.0 - lambda) + b * lambda;
+ *
+ * If T supports multiplication with double and addition, this function can be
+ * used as is. For other types this function can be overwritten using partial
+ * template specialization.
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/interpolator.h b/src/ipa/libipa/interpolator.h
new file mode 100644
index 00000000..fffce214
--- /dev/null
+++ b/src/ipa/libipa/interpolator.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class for interpolating maps of objects
+ */
+
+#pragma once
+
+#include <algorithm>
+#include <cmath>
+#include <map>
+#include <string>
+#include <tuple>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Interpolator)
+
+namespace ipa {
+
+template<typename T>
+class Interpolator
+{
+public:
+ Interpolator() = default;
+ Interpolator(const std::map<unsigned int, T> &data)
+ : data_(data)
+ {
+ }
+ Interpolator(std::map<unsigned int, T> &&data)
+ : data_(std::move(data))
+ {
+ }
+
+ ~Interpolator() = default;
+
+ int readYaml(const libcamera::YamlObject &yaml,
+ const std::string &key_name,
+ const std::string &value_name)
+ {
+ data_.clear();
+ lastInterpolatedKey_.reset();
+
+ if (!yaml.isList()) {
+ LOG(Interpolator, Error) << "yaml object must be a list";
+ return -EINVAL;
+ }
+
+ for (const auto &value : yaml.asList()) {
+ unsigned int ct = std::stoul(value[key_name].get<std::string>(""));
+ std::optional<T> data =
+ value[value_name].get<T>();
+ if (!data) {
+ return -EINVAL;
+ }
+
+ data_[ct] = *data;
+ }
+
+ if (data_.size() < 1) {
+ LOG(Interpolator, Error) << "Need at least one element";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ void setQuantization(const unsigned int q)
+ {
+ quantization_ = q;
+ }
+
+ void setData(std::map<unsigned int, T> &&data)
+ {
+ data_ = std::move(data);
+ lastInterpolatedKey_.reset();
+ }
+
+ const T &getInterpolated(unsigned int key, unsigned int *quantizedKey = nullptr)
+ {
+ ASSERT(data_.size() > 0);
+
+ if (quantization_ > 0)
+ key = std::lround(key / static_cast<double>(quantization_)) * quantization_;
+
+ if (quantizedKey)
+ *quantizedKey = key;
+
+ if (lastInterpolatedKey_.has_value() &&
+ *lastInterpolatedKey_ == key)
+ return lastInterpolatedValue_;
+
+ auto it = data_.lower_bound(key);
+
+ if (it == data_.begin())
+ return it->second;
+
+ if (it == data_.end())
+ return std::prev(it)->second;
+
+ if (it->first == key)
+ return it->second;
+
+ auto it2 = std::prev(it);
+ double lambda = (key - it2->first) / static_cast<double>(it->first - it2->first);
+ interpolate(it2->second, it->second, lastInterpolatedValue_, lambda);
+ lastInterpolatedKey_ = key;
+
+ return lastInterpolatedValue_;
+ }
+
+ void interpolate(const T &a, const T &b, T &dest, double lambda)
+ {
+ dest = a * (1.0 - lambda) + b * lambda;
+ }
+
+private:
+ std::map<unsigned int, T> data_;
+ T lastInterpolatedValue_;
+ std::optional<unsigned int> lastInterpolatedKey_;
+ unsigned int quantization_ = 0;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/ipa_interface_wrapper.cpp b/src/ipa/libipa/ipa_interface_wrapper.cpp
deleted file mode 100644
index b93c1c1f..00000000
--- a/src/ipa/libipa/ipa_interface_wrapper.cpp
+++ /dev/null
@@ -1,245 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_interface_wrapper.cpp - Image Processing Algorithm interface wrapper
- */
-
-#include "ipa_interface_wrapper.h"
-
-#include <map>
-#include <string.h>
-#include <unistd.h>
-#include <vector>
-
-#include <ipa/ipa_interface.h>
-
-#include "byte_stream_buffer.h"
-
-/**
- * \file ipa_interface_wrapper.h
- * \brief Image Processing Algorithm interface wrapper
- */
-
-namespace libcamera {
-
-/**
- * \class IPAInterfaceWrapper
- * \brief Wrap an IPAInterface and expose it as an ipa_context
- *
- * This class implements the ipa_context API based on a provided IPAInterface.
- * It helps IPAs that implement the IPAInterface API to provide the external
- * ipa_context API.
- *
- * To use the wrapper, an IPA module simple creates a new instance of its
- * IPAInterface implementation, and passes it to the constructor of the
- * IPAInterfaceWrapper. As IPAInterfaceWrapper inherits from ipa_context, the
- * constructed wrapper can then be directly returned from the IPA module's
- * ipaCreate() function.
- *
- * \code{.cpp}
- * class MyIPA : public IPAInterface
- * {
- * ...
- * };
- *
- * struct ipa_context *ipaCreate()
- * {
- * return new IPAInterfaceWrapper(std::make_unique<MyIPA>());
- * }
- * \endcode
- *
- * The wrapper takes ownership of the IPAInterface and will automatically
- * delete it when the wrapper is destroyed.
- */
-
-/**
- * \brief Construct an IPAInterfaceWrapper wrapping \a interface
- * \param[in] interface The interface to wrap
- */
-IPAInterfaceWrapper::IPAInterfaceWrapper(std::unique_ptr<IPAInterface> interface)
- : ipa_(std::move(interface)), callbacks_(nullptr), cb_ctx_(nullptr)
-{
- ops = &operations_;
-
- ipa_->queueFrameAction.connect(this, &IPAInterfaceWrapper::queueFrameAction);
-}
-
-void IPAInterfaceWrapper::destroy(struct ipa_context *_ctx)
-{
- IPAInterfaceWrapper *ctx = static_cast<IPAInterfaceWrapper *>(_ctx);
-
- delete ctx;
-}
-
-void *IPAInterfaceWrapper::get_interface(struct ipa_context *_ctx)
-{
- IPAInterfaceWrapper *ctx = static_cast<IPAInterfaceWrapper *>(_ctx);
-
- return ctx->ipa_.get();
-}
-
-void IPAInterfaceWrapper::init(struct ipa_context *_ctx)
-{
- IPAInterfaceWrapper *ctx = static_cast<IPAInterfaceWrapper *>(_ctx);
-
- ctx->ipa_->init();
-}
-
-void IPAInterfaceWrapper::register_callbacks(struct ipa_context *_ctx,
- const struct ipa_callback_ops *callbacks,
- void *cb_ctx)
-{
- IPAInterfaceWrapper *ctx = static_cast<IPAInterfaceWrapper *>(_ctx);
-
- ctx->callbacks_ = callbacks;
- ctx->cb_ctx_ = cb_ctx;
-}
-
-void IPAInterfaceWrapper::configure(struct ipa_context *_ctx,
- const struct ipa_stream *streams,
- unsigned int num_streams,
- const struct ipa_control_info_map *maps,
- unsigned int num_maps)
-{
- IPAInterfaceWrapper *ctx = static_cast<IPAInterfaceWrapper *>(_ctx);
-
- ctx->serializer_.reset();
-
- /* Translate the IPA stream configurations map. */
- std::map<unsigned int, IPAStream> ipaStreams;
-
- for (unsigned int i = 0; i < num_streams; ++i) {
- const struct ipa_stream &stream = streams[i];
-
- ipaStreams[stream.id] = {
- stream.pixel_format,
- Size(stream.width, stream.height),
- };
- }
-
- /* Translate the IPA entity controls map. */
- std::map<unsigned int, const ControlInfoMap &> entityControls;
- std::map<unsigned int, ControlInfoMap> infoMaps;
-
- for (unsigned int i = 0; i < num_maps; ++i) {
- const struct ipa_control_info_map &ipa_map = maps[i];
- ByteStreamBuffer byteStream(ipa_map.data, ipa_map.size);
- unsigned int id = ipa_map.id;
-
- infoMaps[id] = ctx->serializer_.deserialize<ControlInfoMap>(byteStream);
- entityControls.emplace(id, infoMaps[id]);
- }
-
- ctx->ipa_->configure(ipaStreams, entityControls);
-}
-
-void IPAInterfaceWrapper::map_buffers(struct ipa_context *_ctx,
- const struct ipa_buffer *_buffers,
- size_t num_buffers)
-{
- IPAInterfaceWrapper *ctx = static_cast<IPAInterfaceWrapper *>(_ctx);
- std::vector<IPABuffer> buffers(num_buffers);
-
- for (unsigned int i = 0; i < num_buffers; ++i) {
- const struct ipa_buffer &_buffer = _buffers[i];
- IPABuffer &buffer = buffers[i];
- std::vector<FrameBuffer::Plane> &planes = buffer.planes;
-
- buffer.id = _buffer.id;
-
- planes.resize(_buffer.num_planes);
- for (unsigned int j = 0; j < _buffer.num_planes; ++j) {
- planes[j].fd = FileDescriptor(_buffer.planes[j].dmabuf);
- planes[j].length = _buffer.planes[j].length;
- }
- }
-
- ctx->ipa_->mapBuffers(buffers);
-}
-
-void IPAInterfaceWrapper::unmap_buffers(struct ipa_context *_ctx,
- const unsigned int *_ids,
- size_t num_buffers)
-{
- IPAInterfaceWrapper *ctx = static_cast<IPAInterfaceWrapper *>(_ctx);
- std::vector<unsigned int> ids(_ids, _ids + num_buffers);
- ctx->ipa_->unmapBuffers(ids);
-}
-
-void IPAInterfaceWrapper::process_event(struct ipa_context *_ctx,
- const struct ipa_operation_data *data)
-{
- IPAInterfaceWrapper *ctx = static_cast<IPAInterfaceWrapper *>(_ctx);
- IPAOperationData opData;
-
- opData.operation = data->operation;
-
- opData.data.resize(data->num_data);
- memcpy(opData.data.data(), data->data,
- data->num_data * sizeof(*data->data));
-
- opData.controls.resize(data->num_lists);
- for (unsigned int i = 0; i < data->num_lists; ++i) {
- const struct ipa_control_list *c_list = &data->lists[i];
- ByteStreamBuffer byteStream(c_list->data, c_list->size);
- opData.controls[i] = ctx->serializer_.deserialize<ControlList>(byteStream);
- }
-
- ctx->ipa_->processEvent(opData);
-}
-
-void IPAInterfaceWrapper::queueFrameAction(unsigned int frame,
- const IPAOperationData &data)
-{
- if (!callbacks_)
- return;
-
- struct ipa_operation_data c_data;
- c_data.operation = data.operation;
- c_data.data = data.data.data();
- c_data.num_data = data.data.size();
-
- struct ipa_control_list control_lists[data.controls.size()];
- c_data.lists = control_lists;
- c_data.num_lists = data.controls.size();
-
- std::size_t listsSize = 0;
- for (const auto &list : data.controls)
- listsSize += serializer_.binarySize(list);
-
- std::vector<uint8_t> binaryData(listsSize);
- ByteStreamBuffer byteStreamBuffer(binaryData.data(), listsSize);
-
- unsigned int i = 0;
- for (const auto &list : data.controls) {
- struct ipa_control_list &c_list = control_lists[i];
- c_list.size = serializer_.binarySize(list);
-
- ByteStreamBuffer b = byteStreamBuffer.carveOut(c_list.size);
- serializer_.serialize(list, b);
-
- c_list.data = b.base();
- }
-
- callbacks_->queue_frame_action(cb_ctx_, frame, c_data);
-}
-
-#ifndef __DOXYGEN__
-/*
- * This construct confuses Doygen and makes it believe that all members of the
- * operations is a member of IPAInterfaceWrapper. It must thus be hidden.
- */
-const struct ipa_context_ops IPAInterfaceWrapper::operations_ = {
- .destroy = &IPAInterfaceWrapper::destroy,
- .get_interface = &IPAInterfaceWrapper::get_interface,
- .init = &IPAInterfaceWrapper::init,
- .register_callbacks = &IPAInterfaceWrapper::register_callbacks,
- .configure = &IPAInterfaceWrapper::configure,
- .map_buffers = &IPAInterfaceWrapper::map_buffers,
- .unmap_buffers = &IPAInterfaceWrapper::unmap_buffers,
- .process_event = &IPAInterfaceWrapper::process_event,
-};
-#endif
-
-} /* namespace libcamera */
diff --git a/src/ipa/libipa/ipa_interface_wrapper.h b/src/ipa/libipa/ipa_interface_wrapper.h
deleted file mode 100644
index 3fb7b447..00000000
--- a/src/ipa/libipa/ipa_interface_wrapper.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_interface_wrapper.h - Image Processing Algorithm interface wrapper
- */
-#ifndef __LIBCAMERA_IPA_INTERFACE_WRAPPER_H__
-#define __LIBCAMERA_IPA_INTERFACE_WRAPPER_H__
-
-#include <memory>
-
-#include <ipa/ipa_interface.h>
-
-#include "control_serializer.h"
-
-namespace libcamera {
-
-class IPAInterfaceWrapper : public ipa_context
-{
-public:
- IPAInterfaceWrapper(std::unique_ptr<IPAInterface> interface);
-
-private:
- static void destroy(struct ipa_context *ctx);
- static void *get_interface(struct ipa_context *ctx);
- static void init(struct ipa_context *ctx);
- static void register_callbacks(struct ipa_context *ctx,
- const struct ipa_callback_ops *callbacks,
- void *cb_ctx);
- static void configure(struct ipa_context *ctx,
- const struct ipa_stream *streams,
- unsigned int num_streams,
- const struct ipa_control_info_map *maps,
- unsigned int num_maps);
- static void map_buffers(struct ipa_context *ctx,
- const struct ipa_buffer *c_buffers,
- size_t num_buffers);
- static void unmap_buffers(struct ipa_context *ctx,
- const unsigned int *ids,
- size_t num_buffers);
- static void process_event(struct ipa_context *ctx,
- const struct ipa_operation_data *data);
-
- static const struct ipa_context_ops operations_;
-
- void queueFrameAction(unsigned int frame, const IPAOperationData &data);
-
- std::unique_ptr<IPAInterface> ipa_;
- const struct ipa_callback_ops *callbacks_;
- void *cb_ctx_;
-
- ControlSerializer serializer_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_IPA_INTERFACE_WRAPPER_H__ */
diff --git a/src/ipa/libipa/lsc_polynomial.cpp b/src/ipa/libipa/lsc_polynomial.cpp
new file mode 100644
index 00000000..f607d86c
--- /dev/null
+++ b/src/ipa/libipa/lsc_polynomial.cpp
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * Polynomial class to represent lens shading correction
+ */
+
+#include "lsc_polynomial.h"
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file lsc_polynomial.h
+ * \brief LscPolynomial class
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(LscPolynomial)
+
+namespace ipa {
+
+/**
+ * \class LscPolynomial
+ * \brief Class for handling even polynomials used in lens shading correction
+ *
+ * Shading artifacts of camera lenses can be modeled using even radial
+ * polynomials. This class implements a polynomial with 5 coefficients which
+ * follows the definition of the FixVignetteRadial opcode in the Adobe DNG
+ * specification.
+ */
+
+/**
+ * \fn LscPolynomial::LscPolynomial(double cx = 0.0, double cy = 0.0, double k0 = 0.0,
+ double k1 = 0.0, double k2 = 0.0, double k3 = 0.0,
+ double k4 = 0.0)
+ * \brief Construct a polynomial using the given coefficients
+ * \param cx Center-x relative to the image in normalized coordinates (0..1)
+ * \param cy Center-y relative to the image in normalized coordinates (0..1)
+ * \param k0 Coefficient of the polynomial
+ * \param k1 Coefficient of the polynomial
+ * \param k2 Coefficient of the polynomial
+ * \param k3 Coefficient of the polynomial
+ * \param k4 Coefficient of the polynomial
+ */
+
+/**
+ * \fn LscPolynomial::sampleAtNormalizedPixelPos(double x, double y)
+ * \brief Sample the polynomial at the given normalized pixel position
+ *
+ * This functions samples the polynomial at the given pixel position divided by
+ * the value returned by getM().
+ *
+ * \param x x position in normalized coordinates
+ * \param y y position in normalized coordinates
+ * \return The sampled value
+ */
+
+/**
+ * \fn LscPolynomial::getM()
+ * \brief Get the value m as described in the dng specification
+ *
+ * Returns m according to dng spec. m represents the Euclidean distance
+ * (in pixels) from the optical center to the farthest pixel in the
+ * image.
+ *
+ * \return The sampled value
+ */
+
+/**
+ * \fn LscPolynomial::setReferenceImageSize(const Size &size)
+ * \brief Set the reference image size
+ *
+ * Set the reference image size that is used for subsequent calls to getM() and
+ * sampleAtNormalizedPixelPos()
+ *
+ * \param size The size of the reference image
+ */
+
+} // namespace ipa
+} // namespace libcamera
diff --git a/src/ipa/libipa/lsc_polynomial.h b/src/ipa/libipa/lsc_polynomial.h
new file mode 100644
index 00000000..c898faeb
--- /dev/null
+++ b/src/ipa/libipa/lsc_polynomial.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * Helper for radial polynomial used in lens shading correction.
+ */
+#pragma once
+
+#include <algorithm>
+#include <array>
+#include <assert.h>
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(LscPolynomial)
+
+namespace ipa {
+
+class LscPolynomial
+{
+public:
+ LscPolynomial(double cx = 0.0, double cy = 0.0, double k0 = 0.0,
+ double k1 = 0.0, double k2 = 0.0, double k3 = 0.0,
+ double k4 = 0.0)
+ : cx_(cx), cy_(cy), cnx_(0), cny_(0),
+ coefficients_({ k0, k1, k2, k3, k4 })
+ {
+ }
+
+ double sampleAtNormalizedPixelPos(double x, double y) const
+ {
+ double dx = x - cnx_;
+ double dy = y - cny_;
+ double r = sqrt(dx * dx + dy * dy);
+ double res = 1.0;
+ for (unsigned int i = 0; i < coefficients_.size(); i++) {
+ res += coefficients_[i] * std::pow(r, (i + 1) * 2);
+ }
+ return res;
+ }
+
+ double getM() const
+ {
+ double cpx = imageSize_.width * cx_;
+ double cpy = imageSize_.height * cy_;
+ double mx = std::max(cpx, std::fabs(imageSize_.width - cpx));
+ double my = std::max(cpy, std::fabs(imageSize_.height - cpy));
+
+ return sqrt(mx * mx + my * my);
+ }
+
+ void setReferenceImageSize(const Size &size)
+ {
+ assert(!size.isNull());
+ imageSize_ = size;
+
+ /* Calculate normalized centers */
+ double m = getM();
+ cnx_ = (size.width * cx_) / m;
+ cny_ = (size.height * cy_) / m;
+ }
+
+private:
+ double cx_;
+ double cy_;
+ double cnx_;
+ double cny_;
+ std::array<double, 5> coefficients_;
+
+ Size imageSize_;
+};
+
+} /* namespace ipa */
+
+#ifndef __DOXYGEN__
+
+template<>
+struct YamlObject::Getter<ipa::LscPolynomial> {
+ std::optional<ipa::LscPolynomial> get(const YamlObject &obj) const
+ {
+ std::optional<double> cx = obj["cx"].get<double>();
+ std::optional<double> cy = obj["cy"].get<double>();
+ std::optional<double> k0 = obj["k0"].get<double>();
+ std::optional<double> k1 = obj["k1"].get<double>();
+ std::optional<double> k2 = obj["k2"].get<double>();
+ std::optional<double> k3 = obj["k3"].get<double>();
+ std::optional<double> k4 = obj["k4"].get<double>();
+
+ if (!(cx && cy && k0 && k1 && k2 && k3 && k4))
+ LOG(LscPolynomial, Error)
+ << "Polynomial is missing a parameter";
+
+ return ipa::LscPolynomial(*cx, *cy, *k0, *k1, *k2, *k3, *k4);
+ }
+};
+
+#endif
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/lux.cpp b/src/ipa/libipa/lux.cpp
new file mode 100644
index 00000000..bae8198f
--- /dev/null
+++ b/src/ipa/libipa/lux.cpp
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that implements lux estimation
+ */
+#include "lux.h"
+
+#include <algorithm>
+#include <chrono>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "histogram.h"
+
+/**
+ * \file lux.h
+ * \brief Helper class that implements lux estimation
+ *
+ * Estimating the lux level of an image is a common operation that can for
+ * instance be used to adjust the target Y value in AGC or for Bayesian AWB
+ * estimation.
+ */
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(Lux)
+
+namespace ipa {
+
+/**
+ * \class Lux
+ * \brief Class that implements lux estimation
+ *
+ * IPAs that wish to use lux estimation should create a Lux algorithm module
+ * that lightly wraps this module by providing the platform-specific luminance
+ * histogram. The Lux entry in the tuning file must then precede the algorithms
+ * that depend on the estimated lux value.
+ */
+
+/**
+ * \var Lux::binSize_
+ * \brief The maximum count of each bin
+ */
+
+/**
+ * \var Lux::referenceExposureTime_
+ * \brief The exposure time of the reference image, in microseconds
+ */
+
+/**
+ * \var Lux::referenceAnalogueGain_
+ * \brief The analogue gain of the reference image
+ */
+
+/**
+ * \var Lux::referenceDigitalGain_
+ * \brief The analogue gain of the reference image
+ */
+
+/**
+ * \var Lux::referenceY_
+ * \brief The measured luminance of the reference image, out of the bin size
+ *
+ * \sa binSize_
+ */
+
+/**
+ * \var Lux::referenceLux_
+ * \brief The estimated lux level of the reference image
+ */
+
+/**
+ * \brief Construct the Lux helper module
+ * \param[in] binSize The maximum count of each bin
+ */
+Lux::Lux(unsigned int binSize)
+ : binSize_(binSize)
+{
+}
+
+/**
+ * \brief Parse tuning data
+ * \param[in] tuningData The YamlObject representing the tuning data
+ *
+ * This function parses yaml tuning data for the common Lux module. It requires
+ * reference exposure time, analogue gain, digital gain, and lux values.
+ *
+ * \code{.unparsed}
+ * algorithms:
+ * - Lux:
+ * referenceExposureTime: 10000
+ * referenceAnalogueGain: 4.0
+ * referenceDigitalGain: 1.0
+ * referenceY: 12000
+ * referenceLux: 1000
+ * \endcode
+ *
+ * \return 0 on success or a negative error code
+ */
+int Lux::parseTuningData(const YamlObject &tuningData)
+{
+ auto value = tuningData["referenceExposureTime"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceExposureTime'";
+ return -EINVAL;
+ }
+ referenceExposureTime_ = *value * 1.0us;
+
+ value = tuningData["referenceAnalogueGain"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceAnalogueGain'";
+ return -EINVAL;
+ }
+ referenceAnalogueGain_ = *value;
+
+ value = tuningData["referenceDigitalGain"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceDigitalGain'";
+ return -EINVAL;
+ }
+ referenceDigitalGain_ = *value;
+
+ value = tuningData["referenceY"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceY'";
+ return -EINVAL;
+ }
+ referenceY_ = *value;
+
+ value = tuningData["referenceLux"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceLux'";
+ return -EINVAL;
+ }
+ referenceLux_ = *value;
+
+ return 0;
+}
+
+/**
+ * \brief Estimate lux given runtime values
+ * \param[in] exposureTime Exposure time applied to the frame
+ * \param[in] aGain Analogue gain applied to the frame
+ * \param[in] dGain Digital gain applied to the frame
+ * \param[in] yHist Histogram from the ISP statistics
+ *
+ * Estimate the lux given the exposure time, gain, and histogram.
+ *
+ * \return Estimated lux value
+ */
+double Lux::estimateLux(utils::Duration exposureTime,
+ double aGain, double dGain,
+ const Histogram &yHist) const
+{
+ double currentY = yHist.interQuantileMean(0, 1);
+ double exposureTimeRatio = referenceExposureTime_ / exposureTime;
+ double aGainRatio = referenceAnalogueGain_ / aGain;
+ double dGainRatio = referenceDigitalGain_ / dGain;
+ double yRatio = currentY * (binSize_ / yHist.bins()) / referenceY_;
+
+ double estimatedLux = exposureTimeRatio * aGainRatio * dGainRatio *
+ yRatio * referenceLux_;
+
+ LOG(Lux, Debug) << "Estimated lux " << estimatedLux;
+ return estimatedLux;
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/lux.h b/src/ipa/libipa/lux.h
new file mode 100644
index 00000000..93ca6479
--- /dev/null
+++ b/src/ipa/libipa/lux.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that implements lux estimation
+ */
+
+#pragma once
+
+#include <libcamera/base/utils.h>
+
+namespace libcamera {
+
+class YamlObject;
+
+namespace ipa {
+
+class Histogram;
+
+class Lux
+{
+public:
+ Lux(unsigned int binSize);
+
+ int parseTuningData(const YamlObject &tuningData);
+ double estimateLux(utils::Duration exposureTime,
+ double aGain, double dGain,
+ const Histogram &yHist) const;
+
+private:
+ unsigned int binSize_;
+ utils::Duration referenceExposureTime_;
+ double referenceAnalogueGain_;
+ double referenceDigitalGain_;
+ double referenceY_;
+ double referenceLux_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/meson.build b/src/ipa/libipa/meson.build
index 6f3cd486..f2b2f4be 100644
--- a/src/ipa/libipa/meson.build
+++ b/src/ipa/libipa/meson.build
@@ -1,13 +1,45 @@
+# SPDX-License-Identifier: CC0-1.0
+
libipa_headers = files([
- 'ipa_interface_wrapper.h',
+ 'agc_mean_luminance.h',
+ 'algorithm.h',
+ 'camera_sensor_helper.h',
+ 'colours.h',
+ 'exposure_mode_helper.h',
+ 'fc_queue.h',
+ 'fixedpoint.h',
+ 'histogram.h',
+ 'interpolator.h',
+ 'lsc_polynomial.h',
+ 'lux.h',
+ 'module.h',
+ 'pwl.h',
+ 'vector.h',
])
libipa_sources = files([
- 'ipa_interface_wrapper.cpp',
+ 'agc_mean_luminance.cpp',
+ 'algorithm.cpp',
+ 'camera_sensor_helper.cpp',
+ 'colours.cpp',
+ 'exposure_mode_helper.cpp',
+ 'fc_queue.cpp',
+ 'fixedpoint.cpp',
+ 'histogram.cpp',
+ 'interpolator.cpp',
+ 'lsc_polynomial.cpp',
+ 'lux.cpp',
+ 'module.cpp',
+ 'pwl.cpp',
+ 'vector.cpp',
])
libipa_includes = include_directories('..')
-libipa = static_library('ipa', libipa_sources,
+libipa = static_library('ipa', [libipa_sources, libipa_headers],
include_directories : ipa_includes,
- dependencies : libcamera_dep)
+ dependencies : libcamera_private)
+
+libipa_dep = declare_dependency(sources : libipa_headers,
+ include_directories : libipa_includes,
+ link_with : libipa)
diff --git a/src/ipa/libipa/module.cpp b/src/ipa/libipa/module.cpp
new file mode 100644
index 00000000..64ca9141
--- /dev/null
+++ b/src/ipa/libipa/module.cpp
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Ideas On Board
+ *
+ * IPA Module
+ */
+
+#include "module.h"
+
+/**
+ * \file module.h
+ * \brief IPA Module common interface
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPAModuleAlgo)
+
+/**
+ * \brief The IPA (Image Processing Algorithm) namespace
+ *
+ * The IPA namespace groups all types specific to IPA modules. It serves as the
+ * top-level namespace for the IPA library libipa, and also contains
+ * module-specific namespaces for IPA modules.
+ */
+namespace ipa {
+
+/**
+ * \class Module
+ * \brief The base class for all IPA modules
+ * \tparam Context The type of the shared IPA context
+ * \tparam FrameContext The type of the frame context
+ * \tparam Config The type of the IPA configuration data
+ * \tparam Params The type of the ISP specific parameters
+ * \tparam Stats The type of the IPA statistics and ISP results
+ *
+ * The Module class template defines a standard internal interface between IPA
+ * modules and libipa.
+ *
+ * While IPA modules are platform-specific, many of their internal functions are
+ * conceptually similar, even if they take different types of platform-specifc
+ * parameters. For instance, IPA modules could share code that instantiates,
+ * initializes and run algorithms if it wasn't for the fact that the the format
+ * of ISP parameters or statistics passed to the related functions is
+ * device-dependent.
+ *
+ * To enable a shared implementation of those common tasks in libipa, the Module
+ * class template defines a standard internal interface between IPA modules and
+ * libipa. The template parameters specify the types of module-dependent data.
+ * IPA modules shall create a specialization of the Module class template in
+ * their namespace, and use it to specialize other classes of libipa, such as
+ * the Algorithm class.
+ */
+
+/**
+ * \typedef Module::Context
+ * \brief The type of the shared IPA context
+ */
+
+/**
+ * \typedef Module::FrameContext
+ * \brief The type of the frame context
+ */
+
+/**
+ * \typedef Module::Config
+ * \brief The type of the IPA configuration data
+ */
+
+/**
+ * \typedef Module::Params
+ * \brief The type of the ISP specific parameters
+ */
+
+/**
+ * \typedef Module::Stats
+ * \brief The type of the IPA statistics and ISP results
+ */
+
+/**
+ * \fn Module::algorithms()
+ * \brief Retrieve the list of instantiated algorithms
+ * \return The list of instantiated algorithms
+ */
+
+/**
+ * \fn Module::createAlgorithms()
+ * \brief Create algorithms from YAML configuration data
+ * \param[in] context The IPA context
+ * \param[in] algorithms Algorithms configuration data as a parsed YamlObject
+ *
+ * This function iterates over the list of \a algorithms parsed from the YAML
+ * configuration file, and instantiates and initializes the corresponding
+ * algorithms. The configuration data is expected to be correct, any error
+ * causes the function to fail and return immediately.
+ *
+ * \return 0 on success, or a negative error code on failure
+ */
+
+/**
+ * \fn Module::registerAlgorithm()
+ * \brief Add an algorithm factory class to the list of available algorithms
+ * \param[in] factory Factory to use to construct the algorithm
+ *
+ * This function registers an algorithm factory. It is meant to be called by the
+ * AlgorithmFactory constructor only.
+ */
+
+/**
+ * \fn Module::createAlgorithm(const std::string &name)
+ * \brief Create an instance of an Algorithm by name
+ * \param[in] name The algorithm name
+ *
+ * This function is the entry point to algorithm instantiation for the IPA
+ * module. It creates and returns an instance of an algorithm identified by its
+ * \a name. If no such algorithm exists, the function returns nullptr.
+ *
+ * To make an algorithm available to the IPA module, it shall be registered with
+ * the REGISTER_IPA_ALGORITHM() macro.
+ *
+ * \return A new instance of the Algorithm subclass corresponding to the \a name
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/module.h b/src/ipa/libipa/module.h
new file mode 100644
index 00000000..0fb51916
--- /dev/null
+++ b/src/ipa/libipa/module.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Ideas On Board
+ *
+ * IPA module
+ */
+
+#pragma once
+
+#include <list>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(IPAModuleAlgo)
+
+namespace ipa {
+
+template<typename _Context, typename _FrameContext, typename _Config,
+ typename _Params, typename _Stats>
+class Module : public Loggable
+{
+public:
+ using Context = _Context;
+ using FrameContext = _FrameContext;
+ using Config = _Config;
+ using Params = _Params;
+ using Stats = _Stats;
+
+ virtual ~Module() {}
+
+ const std::list<std::unique_ptr<Algorithm<Module>>> &algorithms() const
+ {
+ return algorithms_;
+ }
+
+ int createAlgorithms(Context &context, const YamlObject &algorithms)
+ {
+ const auto &list = algorithms.asList();
+
+ for (const auto &[i, algo] : utils::enumerate(list)) {
+ if (!algo.isDictionary()) {
+ LOG(IPAModuleAlgo, Error)
+ << "Invalid YAML syntax for algorithm " << i;
+ algorithms_.clear();
+ return -EINVAL;
+ }
+
+ int ret = createAlgorithm(context, algo);
+ if (ret) {
+ algorithms_.clear();
+ return ret;
+ }
+ }
+
+ return 0;
+ }
+
+ static void registerAlgorithm(AlgorithmFactoryBase<Module> *factory)
+ {
+ factories().push_back(factory);
+ }
+
+private:
+ int createAlgorithm(Context &context, const YamlObject &data)
+ {
+ const auto &[name, algoData] = *data.asDict().begin();
+ std::unique_ptr<Algorithm<Module>> algo = createAlgorithm(name);
+ if (!algo) {
+ LOG(IPAModuleAlgo, Error)
+ << "Algorithm '" << name << "' not found";
+ return -EINVAL;
+ }
+
+ int ret = algo->init(context, algoData);
+ if (ret) {
+ LOG(IPAModuleAlgo, Error)
+ << "Algorithm '" << name << "' failed to initialize";
+ return ret;
+ }
+
+ LOG(IPAModuleAlgo, Debug)
+ << "Instantiated algorithm '" << name << "'";
+
+ algorithms_.push_back(std::move(algo));
+ return 0;
+ }
+
+ static std::unique_ptr<Algorithm<Module>> createAlgorithm(const std::string &name)
+ {
+ for (const AlgorithmFactoryBase<Module> *factory : factories()) {
+ if (factory->name() == name)
+ return factory->create();
+ }
+
+ return nullptr;
+ }
+
+ static std::vector<AlgorithmFactoryBase<Module> *> &factories()
+ {
+ /*
+ * The static factories map is defined inside the function to ensure
+ * it gets initialized on first use, without any dependency on
+ * link order.
+ */
+ static std::vector<AlgorithmFactoryBase<Module> *> factories;
+ return factories;
+ }
+
+ std::list<std::unique_ptr<Algorithm<Module>>> algorithms_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/pwl.cpp b/src/ipa/libipa/pwl.cpp
new file mode 100644
index 00000000..88fe2022
--- /dev/null
+++ b/src/ipa/libipa/pwl.cpp
@@ -0,0 +1,457 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Piecewise linear functions
+ */
+
+#include "pwl.h"
+
+#include <cmath>
+#include <sstream>
+
+/**
+ * \file pwl.h
+ * \brief Piecewise linear functions
+ */
+
+namespace libcamera {
+
+namespace ipa {
+
+/**
+ * \class Pwl
+ * \brief Describe a univariate piecewise linear function in two-dimensional
+ * real space
+ *
+ * A piecewise linear function is a univariate function that maps reals to
+ * reals, and it is composed of multiple straight-line segments.
+ *
+ * While a mathematical piecewise linear function would usually be defined by
+ * a list of linear functions and for which values of the domain they apply,
+ * this Pwl class is instead defined by a list of points at which these line
+ * segments intersect. These intersecting points are known as knots.
+ *
+ * https://en.wikipedia.org/wiki/Piecewise_linear_function
+ *
+ * A consequence of the Pwl class being defined by knots instead of linear
+ * functions is that the values of the piecewise linear function past the ends
+ * of the function are constants as opposed to linear functions. In a
+ * mathematical piecewise linear function that is defined by multiple linear
+ * functions, the ends of the function are also linear functions and hence grow
+ * to infinity (or negative infinity). However, since this Pwl class is defined
+ * by knots, the y-value of the leftmost and rightmost knots will hold for all
+ * x values to negative infinity and positive infinity, respectively.
+ */
+
+/**
+ * \typedef Pwl::Point
+ * \brief Describe a point in two-dimensional real space
+ */
+
+/**
+ * \class Pwl::Interval
+ * \brief Describe an interval in one-dimensional real space
+ */
+
+/**
+ * \fn Pwl::Interval::Interval(double _start, double _end)
+ * \brief Construct an interval
+ * \param[in] _start Start of the interval
+ * \param[in] _end End of the interval
+ */
+
+/**
+ * \fn Pwl::Interval::contains
+ * \brief Check if a given value falls within the interval
+ * \param[in] value Value to check
+ * \return True if the value falls within the interval, including its bounds,
+ * or false otherwise
+ */
+
+/**
+ * \fn Pwl::Interval::clamp
+ * \brief Clamp a value such that it is within the interval
+ * \param[in] value Value to clamp
+ * \return The clamped value
+ */
+
+/**
+ * \fn Pwl::Interval::length
+ * \brief Compute the length of the interval
+ * \return The length of the interval
+ */
+
+/**
+ * \var Pwl::Interval::start
+ * \brief Start of the interval
+ */
+
+/**
+ * \var Pwl::Interval::end
+ * \brief End of the interval
+ */
+
+/**
+ * \brief Construct an empty piecewise linear function
+ */
+Pwl::Pwl()
+{
+}
+
+/**
+ * \brief Construct a piecewise linear function from a list of 2D points
+ * \param[in] points Vector of points from which to construct the piecewise
+ * linear function
+ *
+ * \a points must be in ascending order of x-value.
+ */
+Pwl::Pwl(const std::vector<Point> &points)
+ : points_(points)
+{
+}
+
+/**
+ * \copydoc Pwl::Pwl(const std::vector<Point> &points)
+ *
+ * The contents of the \a points vector is moved to the newly constructed Pwl
+ * instance.
+ */
+Pwl::Pwl(std::vector<Point> &&points)
+ : points_(std::move(points))
+{
+}
+
+/**
+ * \brief Append a point to the end of the piecewise linear function
+ * \param[in] x x-coordinate of the point to add to the piecewise linear function
+ * \param[in] y y-coordinate of the point to add to the piecewise linear function
+ * \param[in] eps Epsilon for the minimum x distance between points (optional)
+ *
+ * The point's x-coordinate must be greater than the x-coordinate of the last
+ * (= greatest) point already in the piecewise linear function.
+ */
+void Pwl::append(double x, double y, const double eps)
+{
+ if (points_.empty() || points_.back().x() + eps < x)
+ points_.push_back(Point({ x, y }));
+}
+
+/**
+ * \brief Prepend a point to the beginning of the piecewise linear function
+ * \param[in] x x-coordinate of the point to add to the piecewise linear function
+ * \param[in] y y-coordinate of the point to add to the piecewise linear function
+ * \param[in] eps Epsilon for the minimum x distance between points (optional)
+ *
+ * The point's x-coordinate must be less than the x-coordinate of the first
+ * (= smallest) point already in the piecewise linear function.
+ */
+void Pwl::prepend(double x, double y, const double eps)
+{
+ if (points_.empty() || points_.front().x() - eps > x)
+ points_.insert(points_.begin(), Point({ x, y }));
+}
+
+/**
+ * \fn Pwl::empty() const
+ * \brief Check if the piecewise linear function is empty
+ * \return True if there are no points in the function, false otherwise
+ */
+
+/**
+ * \fn Pwl::size() const
+ * \brief Retrieve the number of points in the piecewise linear function
+ * \return The number of points in the piecewise linear function
+ */
+
+/**
+ * \brief Get the domain of the piecewise linear function
+ * \return An interval representing the domain
+ */
+Pwl::Interval Pwl::domain() const
+{
+ return Interval(points_[0].x(), points_[points_.size() - 1].x());
+}
+
+/**
+ * \brief Get the range of the piecewise linear function
+ * \return An interval representing the range
+ */
+Pwl::Interval Pwl::range() const
+{
+ double lo = points_[0].y(), hi = lo;
+ for (auto &p : points_)
+ lo = std::min(lo, p.y()), hi = std::max(hi, p.y());
+ return Interval(lo, hi);
+}
+
+/**
+ * \brief Evaluate the piecewise linear function
+ * \param[in] x The x value to input into the function
+ * \param[inout] span Initial guess for span
+ * \param[in] updateSpan Set to true to update span
+ *
+ * Evaluate Pwl, optionally supplying an initial guess for the
+ * "span". The "span" may be optionally be updated. If you want to know
+ * the "span" value but don't have an initial guess you can set it to
+ * -1.
+ *
+ * \return The result of evaluating the piecewise linear function at position \a x
+ */
+double Pwl::eval(double x, int *span, bool updateSpan) const
+{
+ int index = findSpan(x, span && *span != -1
+ ? *span
+ : points_.size() / 2 - 1);
+ if (span && updateSpan)
+ *span = index;
+ return points_[index].y() +
+ (x - points_[index].x()) * (points_[index + 1].y() - points_[index].y()) /
+ (points_[index + 1].x() - points_[index].x());
+}
+
+int Pwl::findSpan(double x, int span) const
+{
+ /*
+ * Pwls are generally small, so linear search may well be faster than
+ * binary, though could review this if large Pwls start turning up.
+ */
+ int lastSpan = points_.size() - 2;
+ /*
+ * some algorithms may call us with span pointing directly at the last
+ * control point
+ */
+ span = std::max(0, std::min(lastSpan, span));
+ while (span < lastSpan && x >= points_[span + 1].x())
+ span++;
+ while (span && x < points_[span].x())
+ span--;
+ return span;
+}
+
+/**
+ * \brief Compute the inverse function
+ * \param[in] eps Epsilon for the minimum x distance between points (optional)
+ *
+ * The output includes whether the resulting inverse function is a proper
+ * (true) inverse, or only a best effort (e.g. input was non-monotonic).
+ *
+ * \return A pair of the inverse piecewise linear function, and whether or not
+ * the result is a proper/true inverse
+ */
+std::pair<Pwl, bool> Pwl::inverse(const double eps) const
+{
+ bool appended = false, prepended = false, neither = false;
+ Pwl inverse;
+
+ for (Point const &p : points_) {
+ if (inverse.empty()) {
+ inverse.append(p.y(), p.x(), eps);
+ } else if (std::abs(inverse.points_.back().x() - p.y()) <= eps ||
+ std::abs(inverse.points_.front().x() - p.y()) <= eps) {
+ /* do nothing */;
+ } else if (p.y() > inverse.points_.back().x()) {
+ inverse.append(p.y(), p.x(), eps);
+ appended = true;
+ } else if (p.y() < inverse.points_.front().x()) {
+ inverse.prepend(p.y(), p.x(), eps);
+ prepended = true;
+ } else {
+ neither = true;
+ }
+ }
+
+ /*
+ * This is not a proper inverse if we found ourselves putting points
+ * onto both ends of the inverse, or if there were points that couldn't
+ * go on either.
+ */
+ bool trueInverse = !(neither || (appended && prepended));
+
+ return { inverse, trueInverse };
+}
+
+/**
+ * \brief Compose two piecewise linear functions together
+ * \param[in] other The "other" piecewise linear function
+ * \param[in] eps Epsilon for the minimum x distance between points (optional)
+ *
+ * The "this" function is done first, and "other" after.
+ *
+ * \return The composed piecewise linear function
+ */
+Pwl Pwl::compose(Pwl const &other, const double eps) const
+{
+ double thisX = points_[0].x(), thisY = points_[0].y();
+ int thisSpan = 0, otherSpan = other.findSpan(thisY, 0);
+ Pwl result({ Point({ thisX, other.eval(thisY, &otherSpan, false) }) });
+
+ while (thisSpan != (int)points_.size() - 1) {
+ double dx = points_[thisSpan + 1].x() - points_[thisSpan].x(),
+ dy = points_[thisSpan + 1].y() - points_[thisSpan].y();
+ if (std::abs(dy) > eps &&
+ otherSpan + 1 < (int)other.points_.size() &&
+ points_[thisSpan + 1].y() >= other.points_[otherSpan + 1].x() + eps) {
+ /*
+ * next control point in result will be where this
+ * function's y reaches the next span in other
+ */
+ thisX = points_[thisSpan].x() +
+ (other.points_[otherSpan + 1].x() -
+ points_[thisSpan].y()) *
+ dx / dy;
+ thisY = other.points_[++otherSpan].x();
+ } else if (std::abs(dy) > eps && otherSpan > 0 &&
+ points_[thisSpan + 1].y() <=
+ other.points_[otherSpan - 1].x() - eps) {
+ /*
+ * next control point in result will be where this
+ * function's y reaches the previous span in other
+ */
+ thisX = points_[thisSpan].x() +
+ (other.points_[otherSpan + 1].x() -
+ points_[thisSpan].y()) *
+ dx / dy;
+ thisY = other.points_[--otherSpan].x();
+ } else {
+ /* we stay in the same span in other */
+ thisSpan++;
+ thisX = points_[thisSpan].x(),
+ thisY = points_[thisSpan].y();
+ }
+ result.append(thisX, other.eval(thisY, &otherSpan, false),
+ eps);
+ }
+ return result;
+}
+
+/**
+ * \brief Apply function to (x, y) values at every control point
+ * \param[in] f Function to be applied
+ */
+void Pwl::map(std::function<void(double x, double y)> f) const
+{
+ for (auto &pt : points_)
+ f(pt.x(), pt.y());
+}
+
+/**
+ * \brief Apply function to (x, y0, y1) values wherever either Pwl has a
+ * control point.
+ * \param[in] pwl0 First piecewise linear function
+ * \param[in] pwl1 Second piecewise linear function
+ * \param[in] f Function to be applied
+ *
+ * This applies the function \a f to every parameter (x, y0, y1), where x is
+ * the combined list of x-values from \a pwl0 and \a pwl1, y0 is the y-value
+ * for the given x in \a pwl0, and y1 is the y-value for the same x in \a pwl1.
+ */
+void Pwl::map2(Pwl const &pwl0, Pwl const &pwl1,
+ std::function<void(double x, double y0, double y1)> f)
+{
+ int span0 = 0, span1 = 0;
+ double x = std::min(pwl0.points_[0].x(), pwl1.points_[0].x());
+ f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false));
+
+ while (span0 < (int)pwl0.points_.size() - 1 ||
+ span1 < (int)pwl1.points_.size() - 1) {
+ if (span0 == (int)pwl0.points_.size() - 1)
+ x = pwl1.points_[++span1].x();
+ else if (span1 == (int)pwl1.points_.size() - 1)
+ x = pwl0.points_[++span0].x();
+ else if (pwl0.points_[span0 + 1].x() > pwl1.points_[span1 + 1].x())
+ x = pwl1.points_[++span1].x();
+ else
+ x = pwl0.points_[++span0].x();
+ f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false));
+ }
+}
+
+/**
+ * \brief Combine two Pwls
+ * \param[in] pwl0 First piecewise linear function
+ * \param[in] pwl1 Second piecewise linear function
+ * \param[in] f Function to be applied
+ * \param[in] eps Epsilon for the minimum x distance between points (optional)
+ *
+ * Create a new Pwl where the y values are given by running \a f wherever
+ * either pwl has a knot.
+ *
+ * \return The combined pwl
+ */
+Pwl Pwl::combine(Pwl const &pwl0, Pwl const &pwl1,
+ std::function<double(double x, double y0, double y1)> f,
+ const double eps)
+{
+ Pwl result;
+ map2(pwl0, pwl1, [&](double x, double y0, double y1) {
+ result.append(x, f(x, y0, y1), eps);
+ });
+ return result;
+}
+
+/**
+ * \brief Multiply the piecewise linear function
+ * \param[in] d Scalar multiplier to multiply the function by
+ * \return This function, after it has been multiplied by \a d
+ */
+Pwl &Pwl::operator*=(double d)
+{
+ for (auto &pt : points_)
+ pt[1] *= d;
+ return *this;
+}
+
+/**
+ * \brief Assemble and return a string describing the piecewise linear function
+ * \return A string describing the piecewise linear function
+ */
+std::string Pwl::toString() const
+{
+ std::stringstream ss;
+ ss << "Pwl { ";
+ for (auto &p : points_)
+ ss << "(" << p.x() << ", " << p.y() << ") ";
+ ss << "}";
+ return ss.str();
+}
+
+} /* namespace ipa */
+
+#ifndef __DOXYGEN__
+/*
+ * The YAML data shall be a list of numerical values with an even number of
+ * elements. They are parsed in pairs into x and y points in the piecewise
+ * linear function, and added in order. x must be monotonically increasing.
+ */
+template<>
+std::optional<ipa::Pwl>
+YamlObject::Getter<ipa::Pwl>::get(const YamlObject &obj) const
+{
+ if (!obj.size() || obj.size() % 2)
+ return std::nullopt;
+
+ ipa::Pwl pwl;
+
+ const auto &list = obj.asList();
+
+ for (auto it = list.begin(); it != list.end(); it++) {
+ auto x = it->get<double>();
+ if (!x)
+ return std::nullopt;
+ auto y = (++it)->get<double>();
+ if (!y)
+ return std::nullopt;
+
+ pwl.append(*x, *y);
+ }
+
+ if (pwl.size() != obj.size() / 2)
+ return std::nullopt;
+
+ return pwl;
+}
+#endif /* __DOXYGEN__ */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/pwl.h b/src/ipa/libipa/pwl.h
new file mode 100644
index 00000000..d4ec9f4f
--- /dev/null
+++ b/src/ipa/libipa/pwl.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Piecewise linear functions interface
+ */
+#pragma once
+
+#include <algorithm>
+#include <functional>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "vector.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+class Pwl
+{
+public:
+ using Point = Vector<double, 2>;
+
+ struct Interval {
+ Interval(double _start, double _end)
+ : start(_start), end(_end) {}
+
+ bool contains(double value)
+ {
+ return value >= start && value <= end;
+ }
+
+ double clamp(double value)
+ {
+ return std::clamp(value, start, end);
+ }
+
+ double length() const { return end - start; }
+
+ double start, end;
+ };
+
+ Pwl();
+ Pwl(const std::vector<Point> &points);
+ Pwl(std::vector<Point> &&points);
+
+ void append(double x, double y, double eps = 1e-6);
+
+ bool empty() const { return points_.empty(); }
+ size_t size() const { return points_.size(); }
+
+ Interval domain() const;
+ Interval range() const;
+
+ double eval(double x, int *span = nullptr,
+ bool updateSpan = true) const;
+
+ std::pair<Pwl, bool> inverse(double eps = 1e-6) const;
+ Pwl compose(const Pwl &other, double eps = 1e-6) const;
+
+ void map(std::function<void(double x, double y)> f) const;
+
+ static Pwl
+ combine(const Pwl &pwl0, const Pwl &pwl1,
+ std::function<double(double x, double y0, double y1)> f,
+ double eps = 1e-6);
+
+ Pwl &operator*=(double d);
+
+ std::string toString() const;
+
+private:
+ static void map2(const Pwl &pwl0, const Pwl &pwl1,
+ std::function<void(double x, double y0, double y1)> f);
+ void prepend(double x, double y, double eps = 1e-6);
+ int findSpan(double x, int span) const;
+
+ std::vector<Point> points_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/vector.cpp b/src/ipa/libipa/vector.cpp
new file mode 100644
index 00000000..8019f8cf
--- /dev/null
+++ b/src/ipa/libipa/vector.cpp
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Vector and related operations
+ */
+
+#include "vector.h"
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file vector.h
+ * \brief Vector class
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Vector)
+
+namespace ipa {
+
+/**
+ * \class Vector
+ * \brief Vector class
+ * \tparam T Type of numerical values to be stored in the vector
+ * \tparam Rows Number of dimension of the vector (= number of elements)
+ */
+
+/**
+ * \fn Vector::Vector()
+ * \brief Construct an uninitialized vector
+ */
+
+/**
+ * \fn Vector::Vector(T scalar)
+ * \brief Construct a vector filled with a \a scalar value
+ * \param[in] scalar The scalar value
+ */
+
+/**
+ * \fn Vector::Vector(const std::array<T, Rows> &data)
+ * \brief Construct vector from supplied data
+ * \param data Data from which to construct a vector
+ *
+ * The size of \a data must be equal to the dimension size Rows of the vector.
+ */
+
+/**
+ * \fn T Vector::operator[](size_t i) const
+ * \brief Index to an element in the vector
+ * \param i Index of element to retrieve
+ * \return Element at index \a i from the vector
+ */
+
+/**
+ * \fn T &Vector::operator[](size_t i)
+ * \copydoc Vector::operator[](size_t i) const
+ */
+
+/**
+ * \fn Vector::operator-() const
+ * \brief Negate a Vector by negating both all of its coordinates
+ * \return The negated vector
+ */
+
+/**
+ * \fn Vector::operator+(Vector const &other) const
+ * \brief Calculate the sum of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise sum of this vector and \a other
+ */
+
+/**
+ * \fn Vector::operator+(T scalar) const
+ * \brief Calculate the sum of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise sum of this vector and \a other
+ */
+
+/**
+ * \fn Vector::operator-(Vector const &other) const
+ * \brief Calculate the difference of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise subtraction of \a other from this vector
+ */
+
+/**
+ * \fn Vector::operator-(T scalar) const
+ * \brief Calculate the difference of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise subtraction of \a scalar from this vector
+ */
+
+/**
+ * \fn Vector::operator*(const Vector &other) const
+ * \brief Calculate the product of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise product of this vector and \a other
+ */
+
+/**
+ * \fn Vector::operator*(T scalar) const
+ * \brief Calculate the product of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise product of this vector and \a scalar
+ */
+
+/**
+ * \fn Vector::operator/(const Vector &other) const
+ * \brief Calculate the quotient of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise division of this vector by \a other
+ */
+
+/**
+ * \fn Vector::operator/(T scalar) const
+ * \brief Calculate the quotient of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise division of this vector by \a scalar
+ */
+
+/**
+ * \fn Vector::operator+=(Vector const &other)
+ * \brief Add \a other element-wise to this vector
+ * \param[in] other The other vector
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator+=(T scalar)
+ * \brief Add \a scalar element-wise to this vector
+ * \param[in] scalar The scalar
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator-=(Vector const &other)
+ * \brief Subtract \a other element-wise from this vector
+ * \param[in] other The other vector
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator-=(T scalar)
+ * \brief Subtract \a scalar element-wise from this vector
+ * \param[in] scalar The scalar
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator*=(const Vector &other)
+ * \brief Multiply this vector by \a other element-wise
+ * \param[in] other The other vector
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator*=(T scalar)
+ * \brief Multiply this vector by \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator/=(const Vector &other)
+ * \brief Divide this vector by \a other element-wise
+ * \param[in] other The other vector
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator/=(T scalar)
+ * \brief Divide this vector by \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::min(const Vector &other) const
+ * \brief Calculate the minimum of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise minimum of this vector and \a other
+ */
+
+/**
+ * \fn Vector::min(T scalar) const
+ * \brief Calculate the minimum of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise minimum of this vector and \a scalar
+ */
+
+/**
+ * \fn Vector::max(const Vector &other) const
+ * \brief Calculate the maximum of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise maximum of this vector and \a other
+ */
+
+/**
+ * \fn Vector::max(T scalar) const
+ * \brief Calculate the maximum of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise maximum of this vector and \a scalar
+ */
+
+/**
+ * \fn Vector::dot(const Vector<T, Rows> &other) const
+ * \brief Compute the dot product
+ * \param[in] other The other vector
+ * \return The dot product of the two vectors
+ */
+
+/**
+ * \fn constexpr T &Vector::x()
+ * \brief Convenience function to access the first element of the vector
+ * \return The first element of the vector
+ */
+
+/**
+ * \fn constexpr T &Vector::y()
+ * \brief Convenience function to access the second element of the vector
+ * \return The second element of the vector
+ */
+
+/**
+ * \fn constexpr T &Vector::z()
+ * \brief Convenience function to access the third element of the vector
+ * \return The third element of the vector
+ */
+
+/**
+ * \fn constexpr const T &Vector::x() const
+ * \copydoc Vector::x()
+ */
+
+/**
+ * \fn constexpr const T &Vector::y() const
+ * \copydoc Vector::y()
+ */
+
+/**
+ * \fn constexpr const T &Vector::z() const
+ * \copydoc Vector::z()
+ */
+
+/**
+ * \fn constexpr T &Vector::r()
+ * \brief Convenience function to access the first element of the vector
+ * \return The first element of the vector
+ */
+
+/**
+ * \fn constexpr T &Vector::g()
+ * \brief Convenience function to access the second element of the vector
+ * \return The second element of the vector
+ */
+
+/**
+ * \fn constexpr T &Vector::b()
+ * \brief Convenience function to access the third element of the vector
+ * \return The third element of the vector
+ */
+
+/**
+ * \fn constexpr const T &Vector::r() const
+ * \copydoc Vector::r()
+ */
+
+/**
+ * \fn constexpr const T &Vector::g() const
+ * \copydoc Vector::g()
+ */
+
+/**
+ * \fn constexpr const T &Vector::b() const
+ * \copydoc Vector::b()
+ */
+
+/**
+ * \fn Vector::length2()
+ * \brief Get the squared length of the vector
+ * \return The squared length of the vector
+ */
+
+/**
+ * \fn Vector::length()
+ * \brief Get the length of the vector
+ * \return The length of the vector
+ */
+
+/**
+ * \fn Vector::sum() const
+ * \brief Calculate the sum of all the vector elements
+ * \tparam R The type of the sum
+ *
+ * The type R of the sum defaults to the type T of the elements, but can be set
+ * explicitly to use a different type in case the type T would risk
+ * overflowing.
+ *
+ * \return The sum of all the vector elements
+ */
+
+/**
+ * \fn Vector<T, Rows> operator*(const Matrix<T, Rows, Cols> &m, const Vector<T, Cols> &v)
+ * \brief Multiply a matrix by a vector
+ * \tparam T Numerical type of the contents of the matrix and vector
+ * \tparam Rows The number of rows in the matrix
+ * \tparam Cols The number of columns in the matrix (= rows in the vector)
+ * \param m The matrix
+ * \param v The vector
+ * \return Product of matrix \a m and vector \a v
+ */
+
+/**
+ * \typedef RGB
+ * \brief A Vector of 3 elements representing an RGB pixel value
+ */
+
+/**
+ * \fn bool operator==(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
+ * \brief Compare vectors for equality
+ * \return True if the two vectors are equal, false otherwise
+ */
+
+/**
+ * \fn bool operator!=(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
+ * \brief Compare vectors for inequality
+ * \return True if the two vectors are not equal, false otherwise
+ */
+
+#ifndef __DOXYGEN__
+bool vectorValidateYaml(const YamlObject &obj, unsigned int size)
+{
+ if (!obj.isList())
+ return false;
+
+ if (obj.size() != size) {
+ LOG(Vector, Error)
+ << "Wrong number of values in YAML vector: expected "
+ << size << ", got " << obj.size();
+ return false;
+ }
+
+ return true;
+}
+#endif /* __DOXYGEN__ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/vector.h b/src/ipa/libipa/vector.h
new file mode 100644
index 00000000..fe33c9d6
--- /dev/null
+++ b/src/ipa/libipa/vector.h
@@ -0,0 +1,370 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Vector and related operations
+ */
+#pragma once
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <functional>
+#include <numeric>
+#include <optional>
+#include <ostream>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+
+#include "libcamera/internal/matrix.h"
+#include "libcamera/internal/yaml_parser.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Vector)
+
+namespace ipa {
+
+#ifndef __DOXYGEN__
+template<typename T, unsigned int Rows,
+ std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
+#else
+template<typename T, unsigned int Rows>
+#endif /* __DOXYGEN__ */
+class Vector
+{
+public:
+ constexpr Vector() = default;
+
+ constexpr explicit Vector(T scalar)
+ {
+ data_.fill(scalar);
+ }
+
+ constexpr Vector(const std::array<T, Rows> &data)
+ {
+ for (unsigned int i = 0; i < Rows; i++)
+ data_[i] = data[i];
+ }
+
+ const T &operator[](size_t i) const
+ {
+ ASSERT(i < data_.size());
+ return data_[i];
+ }
+
+ T &operator[](size_t i)
+ {
+ ASSERT(i < data_.size());
+ return data_[i];
+ }
+
+ constexpr Vector<T, Rows> operator-() const
+ {
+ Vector<T, Rows> ret;
+ for (unsigned int i = 0; i < Rows; i++)
+ ret[i] = -data_[i];
+ return ret;
+ }
+
+ constexpr Vector operator+(const Vector &other) const
+ {
+ return apply(*this, other, std::plus<>{});
+ }
+
+ constexpr Vector operator+(T scalar) const
+ {
+ return apply(*this, scalar, std::plus<>{});
+ }
+
+ constexpr Vector operator-(const Vector &other) const
+ {
+ return apply(*this, other, std::minus<>{});
+ }
+
+ constexpr Vector operator-(T scalar) const
+ {
+ return apply(*this, scalar, std::minus<>{});
+ }
+
+ constexpr Vector operator*(const Vector &other) const
+ {
+ return apply(*this, other, std::multiplies<>{});
+ }
+
+ constexpr Vector operator*(T scalar) const
+ {
+ return apply(*this, scalar, std::multiplies<>{});
+ }
+
+ constexpr Vector operator/(const Vector &other) const
+ {
+ return apply(*this, other, std::divides<>{});
+ }
+
+ constexpr Vector operator/(T scalar) const
+ {
+ return apply(*this, scalar, std::divides<>{});
+ }
+
+ Vector &operator+=(const Vector &other)
+ {
+ return apply(other, [](T a, T b) { return a + b; });
+ }
+
+ Vector &operator+=(T scalar)
+ {
+ return apply(scalar, [](T a, T b) { return a + b; });
+ }
+
+ Vector &operator-=(const Vector &other)
+ {
+ return apply(other, [](T a, T b) { return a - b; });
+ }
+
+ Vector &operator-=(T scalar)
+ {
+ return apply(scalar, [](T a, T b) { return a - b; });
+ }
+
+ Vector &operator*=(const Vector &other)
+ {
+ return apply(other, [](T a, T b) { return a * b; });
+ }
+
+ Vector &operator*=(T scalar)
+ {
+ return apply(scalar, [](T a, T b) { return a * b; });
+ }
+
+ Vector &operator/=(const Vector &other)
+ {
+ return apply(other, [](T a, T b) { return a / b; });
+ }
+
+ Vector &operator/=(T scalar)
+ {
+ return apply(scalar, [](T a, T b) { return a / b; });
+ }
+
+ constexpr Vector min(const Vector &other) const
+ {
+ return apply(*this, other, [](T a, T b) { return std::min(a, b); });
+ }
+
+ constexpr Vector min(T scalar) const
+ {
+ return apply(*this, scalar, [](T a, T b) { return std::min(a, b); });
+ }
+
+ constexpr Vector max(const Vector &other) const
+ {
+ return apply(*this, other, [](T a, T b) { return std::max(a, b); });
+ }
+
+ constexpr Vector max(T scalar) const
+ {
+ return apply(*this, scalar, [](T a, T b) -> T { return std::max(a, b); });
+ }
+
+ constexpr T dot(const Vector<T, Rows> &other) const
+ {
+ T ret = 0;
+ for (unsigned int i = 0; i < Rows; i++)
+ ret += data_[i] * other[i];
+ return ret;
+ }
+
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &x() const { return data_[0]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &y() const { return data_[1]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &z() const { return data_[2]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
+#endif /* __DOXYGEN__ */
+ constexpr T &x() { return data_[0]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
+#endif /* __DOXYGEN__ */
+ constexpr T &y() { return data_[1]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
+#endif /* __DOXYGEN__ */
+ constexpr T &z() { return data_[2]; }
+
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &r() const { return data_[0]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &g() const { return data_[1]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &b() const { return data_[2]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
+#endif /* __DOXYGEN__ */
+ constexpr T &r() { return data_[0]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
+#endif /* __DOXYGEN__ */
+ constexpr T &g() { return data_[1]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
+#endif /* __DOXYGEN__ */
+ constexpr T &b() { return data_[2]; }
+
+ constexpr double length2() const
+ {
+ double ret = 0;
+ for (unsigned int i = 0; i < Rows; i++)
+ ret += data_[i] * data_[i];
+ return ret;
+ }
+
+ constexpr double length() const
+ {
+ return std::sqrt(length2());
+ }
+
+ template<typename R = T>
+ constexpr R sum() const
+ {
+ return std::accumulate(data_.begin(), data_.end(), R{});
+ }
+
+private:
+ template<class BinaryOp>
+ static constexpr Vector apply(const Vector &lhs, const Vector &rhs, BinaryOp op)
+ {
+ Vector result;
+ std::transform(lhs.data_.begin(), lhs.data_.end(),
+ rhs.data_.begin(), result.data_.begin(),
+ op);
+
+ return result;
+ }
+
+ template<class BinaryOp>
+ static constexpr Vector apply(const Vector &lhs, T rhs, BinaryOp op)
+ {
+ Vector result;
+ std::transform(lhs.data_.begin(), lhs.data_.end(),
+ result.data_.begin(),
+ [&op, rhs](T v) { return op(v, rhs); });
+
+ return result;
+ }
+
+ template<class BinaryOp>
+ Vector &apply(const Vector &other, BinaryOp op)
+ {
+ auto itOther = other.data_.begin();
+ std::for_each(data_.begin(), data_.end(),
+ [&op, &itOther](T &v) { v = op(v, *itOther++); });
+
+ return *this;
+ }
+
+ template<class BinaryOp>
+ Vector &apply(T scalar, BinaryOp op)
+ {
+ std::for_each(data_.begin(), data_.end(),
+ [&op, scalar](T &v) { v = op(v, scalar); });
+
+ return *this;
+ }
+
+ std::array<T, Rows> data_;
+};
+
+template<typename T>
+using RGB = Vector<T, 3>;
+
+template<typename T, unsigned int Rows, unsigned int Cols>
+Vector<T, Rows> operator*(const Matrix<T, Rows, Cols> &m, const Vector<T, Cols> &v)
+{
+ Vector<T, Rows> result;
+
+ for (unsigned int i = 0; i < Rows; i++) {
+ T sum = 0;
+ for (unsigned int j = 0; j < Cols; j++)
+ sum += m[i][j] * v[j];
+ result[i] = sum;
+ }
+
+ return result;
+}
+
+template<typename T, unsigned int Rows>
+bool operator==(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
+{
+ for (unsigned int i = 0; i < Rows; i++) {
+ if (lhs[i] != rhs[i])
+ return false;
+ }
+
+ return true;
+}
+
+template<typename T, unsigned int Rows>
+bool operator!=(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
+{
+ return !(lhs == rhs);
+}
+
+#ifndef __DOXYGEN__
+bool vectorValidateYaml(const YamlObject &obj, unsigned int size);
+#endif /* __DOXYGEN__ */
+
+} /* namespace ipa */
+
+#ifndef __DOXYGEN__
+template<typename T, unsigned int Rows>
+std::ostream &operator<<(std::ostream &out, const ipa::Vector<T, Rows> &v)
+{
+ out << "Vector { ";
+ for (unsigned int i = 0; i < Rows; i++) {
+ out << v[i];
+ out << ((i + 1 < Rows) ? ", " : " ");
+ }
+ out << " }";
+
+ return out;
+}
+
+template<typename T, unsigned int Rows>
+struct YamlObject::Getter<ipa::Vector<T, Rows>> {
+ std::optional<ipa::Vector<T, Rows>> get(const YamlObject &obj) const
+ {
+ if (!ipa::vectorValidateYaml(obj, Rows))
+ return std::nullopt;
+
+ ipa::Vector<T, Rows> vector;
+
+ unsigned int i = 0;
+ for (const YamlObject &entry : obj.asList()) {
+ const auto value = entry.get<T>();
+ if (!value)
+ return std::nullopt;
+ vector[i++] = *value;
+ }
+
+ return vector;
+ }
+};
+#endif /* __DOXYGEN__ */
+
+} /* namespace libcamera */