summaryrefslogtreecommitdiff
path: root/src/ipa/libipa
diff options
context:
space:
mode:
Diffstat (limited to 'src/ipa/libipa')
-rw-r--r--src/ipa/libipa/agc_mean_luminance.cpp53
-rw-r--r--src/ipa/libipa/agc_mean_luminance.h2
-rw-r--r--src/ipa/libipa/awb.cpp265
-rw-r--r--src/ipa/libipa/awb.h63
-rw-r--r--src/ipa/libipa/awb_bayes.cpp499
-rw-r--r--src/ipa/libipa/awb_bayes.h67
-rw-r--r--src/ipa/libipa/awb_grey.cpp114
-rw-r--r--src/ipa/libipa/awb_grey.h35
-rw-r--r--src/ipa/libipa/camera_sensor_helper.cpp364
-rw-r--r--src/ipa/libipa/camera_sensor_helper.h24
-rw-r--r--src/ipa/libipa/colours.cpp81
-rw-r--r--src/ipa/libipa/colours.h23
-rw-r--r--src/ipa/libipa/exposure_mode_helper.cpp170
-rw-r--r--src/ipa/libipa/exposure_mode_helper.h14
-rw-r--r--src/ipa/libipa/fc_queue.h21
-rw-r--r--src/ipa/libipa/fixedpoint.cpp42
-rw-r--r--src/ipa/libipa/fixedpoint.h65
-rw-r--r--src/ipa/libipa/histogram.cpp6
-rw-r--r--src/ipa/libipa/histogram.h2
-rw-r--r--src/ipa/libipa/interpolator.cpp163
-rw-r--r--src/ipa/libipa/interpolator.h136
-rw-r--r--src/ipa/libipa/lsc_polynomial.cpp81
-rw-r--r--src/ipa/libipa/lsc_polynomial.h105
-rw-r--r--src/ipa/libipa/lux.cpp173
-rw-r--r--src/ipa/libipa/lux.h41
-rw-r--r--src/ipa/libipa/matrix.cpp149
-rw-r--r--src/ipa/libipa/matrix.h204
-rw-r--r--src/ipa/libipa/matrix_interpolator.cpp110
-rw-r--r--src/ipa/libipa/matrix_interpolator.h122
-rw-r--r--src/ipa/libipa/meson.build22
-rw-r--r--src/ipa/libipa/pwl.cpp7
-rw-r--r--src/ipa/libipa/pwl.h6
-rw-r--r--src/ipa/libipa/vector.cpp168
-rw-r--r--src/ipa/libipa/vector.h219
34 files changed, 2369 insertions, 1247 deletions
diff --git a/src/ipa/libipa/agc_mean_luminance.cpp b/src/ipa/libipa/agc_mean_luminance.cpp
index f97ef117..02555a44 100644
--- a/src/ipa/libipa/agc_mean_luminance.cpp
+++ b/src/ipa/libipa/agc_mean_luminance.cpp
@@ -89,10 +89,10 @@ static constexpr double kDefaultRelativeLuminanceTarget = 0.16;
* \class AgcMeanLuminance
* \brief A mean-based auto-exposure algorithm
*
- * This algorithm calculates a shutter time, analogue and digital gain such that
- * the normalised mean luminance value of an image is driven towards a target,
- * which itself is discovered from tuning data. The algorithm is a two-stage
- * process.
+ * This algorithm calculates an exposure time, analogue and digital gain such
+ * that the normalised mean luminance value of an image is driven towards a
+ * target, which itself is discovered from tuning data. The algorithm is a
+ * two-stage process.
*
* In the first stage, an initial gain value is derived by iteratively comparing
* the gain-adjusted mean luminance across the entire image against a target,
@@ -109,7 +109,7 @@ static constexpr double kDefaultRelativeLuminanceTarget = 0.16;
* stage is then clamped to the gain from this stage.
*
* The final gain is used to adjust the effective exposure value of the image,
- * and that new exposure value is divided into shutter time, analogue gain and
+ * and that new exposure value is divided into exposure time, analogue gain and
* digital gain according to the selected AeExposureMode. This class uses the
* \ref ExposureModeHelper class to assist in that division, and expects the
* data needed to initialise that class to be present in tuning data in a
@@ -247,27 +247,27 @@ int AgcMeanLuminance::parseExposureModes(const YamlObject &tuningData)
return -EINVAL;
}
- std::vector<uint32_t> shutters =
- modeValues["shutter"].getList<uint32_t>().value_or(std::vector<uint32_t>{});
+ std::vector<uint32_t> exposureTimes =
+ modeValues["exposureTime"].getList<uint32_t>().value_or(std::vector<uint32_t>{});
std::vector<double> gains =
modeValues["gain"].getList<double>().value_or(std::vector<double>{});
- if (shutters.size() != gains.size()) {
+ if (exposureTimes.size() != gains.size()) {
LOG(AgcMeanLuminance, Error)
- << "Shutter and gain array sizes unequal";
+ << "Exposure time and gain array sizes unequal";
return -EINVAL;
}
- if (shutters.empty()) {
+ if (exposureTimes.empty()) {
LOG(AgcMeanLuminance, Error)
- << "Shutter and gain arrays are empty";
+ << "Exposure time and gain arrays are empty";
return -EINVAL;
}
std::vector<std::pair<utils::Duration, double>> stages;
- for (unsigned int i = 0; i < shutters.size(); i++) {
+ for (unsigned int i = 0; i < exposureTimes.size(); i++) {
stages.push_back({
- std::chrono::microseconds(shutters[i]),
+ std::chrono::microseconds(exposureTimes[i]),
gains[i]
});
}
@@ -283,7 +283,7 @@ int AgcMeanLuminance::parseExposureModes(const YamlObject &tuningData)
/*
* If we don't have any exposure modes in the tuning data we create an
* ExposureModeHelper using an empty vector of stages. This will result
- * in the ExposureModeHelper simply driving the shutter as high as
+ * in the ExposureModeHelper simply driving the exposure time as high as
* possible before touching gain.
*/
if (availableExposureModes.empty()) {
@@ -338,18 +338,18 @@ int AgcMeanLuminance::parseExposureModes(const YamlObject &tuningData)
* For the AeExposureMode control the data should contain a dictionary called
* AeExposureMode containing per-mode setting dictionaries with the key being a
* value from \ref controls::AeExposureModeNameValueMap. Each mode dict should
- * contain an array of shutter times with the key "shutter" and an array of gain
- * values with the key "gain", in this format:
+ * contain an array of exposure times with the key "exposureTime" and an array
+ * of gain values with the key "gain", in this format:
*
* \code{.unparsed}
* algorithms:
* - Agc:
* AeExposureMode:
* ExposureNormal:
- * shutter: [ 100, 10000, 30000, 60000, 120000 ]
+ * exposureTime: [ 100, 10000, 30000, 60000, 120000 ]
* gain: [ 2.0, 4.0, 6.0, 8.0, 10.0 ]
* ExposureShort:
- * shutter: [ 100, 10000, 30000, 60000, 120000 ]
+ * exposureTime: [ 100, 10000, 30000, 60000, 120000 ]
* gain: [ 2.0, 4.0, 6.0, 8.0, 10.0 ]
*
* \endcode
@@ -371,20 +371,20 @@ int AgcMeanLuminance::parseTuningData(const YamlObject &tuningData)
/**
* \brief Set the ExposureModeHelper limits for this class
- * \param[in] minShutter Minimum shutter time to allow
- * \param[in] maxShutter Maximum shutter time to allow
+ * \param[in] minExposureTime Minimum exposure time to allow
+ * \param[in] maxExposureTime Maximum ewposure time to allow
* \param[in] minGain Minimum gain to allow
* \param[in] maxGain Maximum gain to allow
*
* This function calls \ref ExposureModeHelper::setLimits() for each
* ExposureModeHelper that has been created for this class.
*/
-void AgcMeanLuminance::setLimits(utils::Duration minShutter,
- utils::Duration maxShutter,
+void AgcMeanLuminance::setLimits(utils::Duration minExposureTime,
+ utils::Duration maxExposureTime,
double minGain, double maxGain)
{
for (auto &[id, helper] : exposureModeHelpers_)
- helper->setLimits(minShutter, maxShutter, minGain, maxGain);
+ helper->setLimits(minExposureTime, maxExposureTime, minGain, maxGain);
}
/**
@@ -513,7 +513,8 @@ utils::Duration AgcMeanLuminance::filterExposure(utils::Duration exposureValue)
}
/**
- * \brief Calculate the new exposure value and splut it between shutter time and gain
+ * \brief Calculate the new exposure value and splut it between exposure time
+ * and gain
* \param[in] constraintModeIndex The index of the current constraint mode
* \param[in] exposureModeIndex The index of the current exposure mode
* \param[in] yHist A Histogram from the ISP statistics to use in constraining
@@ -523,9 +524,9 @@ utils::Duration AgcMeanLuminance::filterExposure(utils::Duration exposureValue)
*
* Calculate a new exposure value to try to obtain the target. The calculated
* exposure value is filtered to prevent rapid changes from frame to frame, and
- * divided into shutter time, analogue and digital gain.
+ * divided into exposure time, analogue and digital gain.
*
- * \return Tuple of shutter time, analogue gain, and digital gain
+ * \return Tuple of exposure time, analogue gain, and digital gain
*/
std::tuple<utils::Duration, double, double>
AgcMeanLuminance::calculateNewEv(uint32_t constraintModeIndex,
diff --git a/src/ipa/libipa/agc_mean_luminance.h b/src/ipa/libipa/agc_mean_luminance.h
index 576d28be..c41391cb 100644
--- a/src/ipa/libipa/agc_mean_luminance.h
+++ b/src/ipa/libipa/agc_mean_luminance.h
@@ -44,7 +44,7 @@ public:
int parseTuningData(const YamlObject &tuningData);
- void setLimits(utils::Duration minShutter, utils::Duration maxShutter,
+ void setLimits(utils::Duration minExposureTime, utils::Duration maxExposureTime,
double minGain, double maxGain);
std::map<int32_t, std::vector<AgcConstraint>> constraintModes()
diff --git a/src/ipa/libipa/awb.cpp b/src/ipa/libipa/awb.cpp
new file mode 100644
index 00000000..6157bd43
--- /dev/null
+++ b/src/ipa/libipa/awb.cpp
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Generic AWB algorithms
+ */
+
+#include "awb.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+/**
+ * \file awb.h
+ * \brief Base classes for AWB algorithms
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Awb)
+
+namespace ipa {
+
+/**
+ * \class AwbResult
+ * \brief The result of an awb calculation
+ *
+ * This class holds the result of an auto white balance calculation.
+ */
+
+/**
+ * \var AwbResult::gains
+ * \brief The calculated white balance gains
+ */
+
+/**
+ * \var AwbResult::colourTemperature
+ * \brief The calculated colour temperature in Kelvin
+ */
+
+/**
+ * \class AwbStats
+ * \brief An abstraction class wrapping hardware-specific AWB statistics
+ *
+ * Pipeline handlers using an AWB algorithm based on the AwbAlgorithm class need
+ * to implement this class to give the algorithm access to the hardware-specific
+ * statistics data.
+ */
+
+/**
+ * \fn AwbStats::computeColourError
+ * \brief Compute an error value for when the given gains would be applied
+ * \param[in] gains The gains to apply
+ *
+ * Compute an error value (non-greyness) assuming the given \a gains would be
+ * applied. To keep the actual implementations computationally inexpensive,
+ * the squared colour error shall be returned.
+ *
+ * If the awb statistics provide multiple zones, the average of the individual
+ * squared errors shall be returned. Averaging/normalizing is necessary so that
+ * the numeric dimensions are the same on all hardware platforms.
+ *
+ * \return The computed error value
+ */
+
+/**
+ * \fn AwbStats::getRGBMeans
+ * \brief Get RGB means of the statistics
+ *
+ * Fetch the RGB means from the statistics. The values of each channel are
+ * dimensionless and only the ratios are used for further calculations. This is
+ * used by the simple gray world model to calculate the gains to apply.
+ *
+ * \return The RGB means
+ */
+
+/**
+ * \class AwbAlgorithm
+ * \brief A base class for auto white balance algorithms
+ *
+ * This class is a base class for auto white balance algorithms. It defines an
+ * interface for the algorithms to implement, and is used by the IPAs to
+ * interact with the concrete implementation.
+ */
+
+/**
+ * \fn AwbAlgorithm::init
+ * \brief Initialize the algorithm with the given tuning data
+ * \param[in] tuningData The tuning data to use for the algorithm
+ *
+ * \return 0 on success, a negative error code otherwise
+ */
+
+/**
+ * \fn AwbAlgorithm::calculateAwb
+ * \brief Calculate awb data from the given statistics
+ * \param[in] stats The statistics to use for the calculation
+ * \param[in] lux The lux value of the scene
+ *
+ * Calculate an AwbResult object from the given statistics and lux value. A \a
+ * lux value of 0 means it is unknown or invalid and the algorithm shall ignore
+ * it.
+ *
+ * \return The awb result
+ */
+
+/**
+ * \fn AwbAlgorithm::gainsFromColourTemperature
+ * \brief Compute white balance gains from a colour temperature
+ * \param[in] colourTemperature The colour temperature in Kelvin
+ *
+ * Compute the white balance gains from a \a colourTemperature. This function
+ * does not take any statistics into account. It is used to compute the colour
+ * gains when the user manually specifies a colour temperature.
+ *
+ * \return The colour gains
+ */
+
+/**
+ * \fn AwbAlgorithm::controls
+ * \brief Get the controls info map for this algorithm
+ *
+ * \return The controls info map
+ */
+
+/**
+ * \fn AwbAlgorithm::handleControls
+ * \param[in] controls The controls to handle
+ * \brief Handle the controls supplied in a request
+ */
+
+/**
+ * \var AwbAlgorithm::controls_
+ * \brief Controls info map for the controls provided by the algorithm
+ */
+
+/**
+ * \var AwbAlgorithm::modes_
+ * \brief Map of all configured modes
+ * \sa AwbAlgorithm::parseModeConfigs
+ */
+
+/**
+ * \class AwbAlgorithm::ModeConfig
+ * \brief Holds the configuration of a single AWB mode
+ *
+ * Awb modes limit the regulation of the AWB algorithm to a specific range of
+ * colour temperatures.
+ */
+
+/**
+ * \var AwbAlgorithm::ModeConfig::ctLo
+ * \brief The lowest valid colour temperature of that mode
+ */
+
+/**
+ * \var AwbAlgorithm::ModeConfig::ctHi
+ * \brief The highest valid colour temperature of that mode
+ */
+
+/**
+ * \brief Parse the mode configurations from the tuning data
+ * \param[in] tuningData the YamlObject representing the tuning data
+ * \param[in] def The default value for the AwbMode control
+ *
+ * Utility function to parse the tuning data for an AwbMode entry and read all
+ * provided modes. It adds controls::AwbMode to AwbAlgorithm::controls_ and
+ * populates AwbAlgorithm::modes_. For a list of possible modes see \ref
+ * controls::AwbModeEnum.
+ *
+ * Each mode entry must contain a "lo" and "hi" key to specify the lower and
+ * upper colour temperature of that mode. For example:
+ *
+ * \code{.unparsed}
+ * algorithms:
+ * - Awb:
+ * AwbMode:
+ * AwbAuto:
+ * lo: 2500
+ * hi: 8000
+ * AwbIncandescent:
+ * lo: 2500
+ * hi: 3000
+ * ...
+ * \endcode
+ *
+ * If \a def is supplied but not contained in the the \a tuningData, -EINVAL is
+ * returned.
+ *
+ * \sa controls::AwbModeEnum
+ * \return Zero on success, negative error code otherwise
+ */
+int AwbAlgorithm::parseModeConfigs(const YamlObject &tuningData,
+ const ControlValue &def)
+{
+ std::vector<ControlValue> availableModes;
+
+ const YamlObject &yamlModes = tuningData[controls::AwbMode.name()];
+ if (!yamlModes.isDictionary()) {
+ LOG(Awb, Error)
+ << "AwbModes must be a dictionary.";
+ return -EINVAL;
+ }
+
+ for (const auto &[modeName, modeDict] : yamlModes.asDict()) {
+ if (controls::AwbModeNameValueMap.find(modeName) ==
+ controls::AwbModeNameValueMap.end()) {
+ LOG(Awb, Warning)
+ << "Skipping unknown awb mode '"
+ << modeName << "'";
+ continue;
+ }
+
+ if (!modeDict.isDictionary()) {
+ LOG(Awb, Error)
+ << "Invalid awb mode '" << modeName << "'";
+ return -EINVAL;
+ }
+
+ const auto &modeValue = static_cast<controls::AwbModeEnum>(
+ controls::AwbModeNameValueMap.at(modeName));
+
+ ModeConfig &config = modes_[modeValue];
+
+ auto hi = modeDict["hi"].get<double>();
+ if (!hi) {
+ LOG(Awb, Error) << "Failed to read hi param of mode "
+ << modeName;
+ return -EINVAL;
+ }
+ config.ctHi = *hi;
+
+ auto lo = modeDict["lo"].get<double>();
+ if (!lo) {
+ LOG(Awb, Error) << "Failed to read low param of mode "
+ << modeName;
+ return -EINVAL;
+ }
+ config.ctLo = *lo;
+
+ availableModes.push_back(modeValue);
+ }
+
+ if (modes_.empty()) {
+ LOG(Awb, Error) << "No AWB modes configured";
+ return -EINVAL;
+ }
+
+ if (!def.isNone() &&
+ modes_.find(def.get<controls::AwbModeEnum>()) == modes_.end()) {
+ const auto &names = controls::AwbMode.enumerators();
+ LOG(Awb, Error) << names.at(def.get<controls::AwbModeEnum>())
+ << " mode is missing in the configuration.";
+ return -EINVAL;
+ }
+
+ controls_[&controls::AwbMode] = ControlInfo(availableModes, def);
+
+ return 0;
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/awb.h b/src/ipa/libipa/awb.h
new file mode 100644
index 00000000..4a1b012a
--- /dev/null
+++ b/src/ipa/libipa/awb.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Generic AWB algorithms
+ */
+
+#pragma once
+
+#include <map>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/vector.h"
+#include "libcamera/internal/yaml_parser.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+struct AwbResult {
+ RGB<double> gains;
+ double colourTemperature;
+};
+
+struct AwbStats {
+ virtual double computeColourError(const RGB<double> &gains) const = 0;
+ virtual RGB<double> getRGBMeans() const = 0;
+};
+
+class AwbAlgorithm
+{
+public:
+ virtual ~AwbAlgorithm() = default;
+
+ virtual int init(const YamlObject &tuningData) = 0;
+ virtual AwbResult calculateAwb(const AwbStats &stats, int lux) = 0;
+ virtual RGB<double> gainsFromColourTemperature(double colourTemperature) = 0;
+
+ const ControlInfoMap::Map &controls() const
+ {
+ return controls_;
+ }
+
+ virtual void handleControls([[maybe_unused]] const ControlList &controls) {}
+
+protected:
+ int parseModeConfigs(const YamlObject &tuningData,
+ const ControlValue &def = {});
+
+ struct ModeConfig {
+ double ctHi;
+ double ctLo;
+ };
+
+ ControlInfoMap::Map controls_;
+ std::map<controls::AwbModeEnum, AwbAlgorithm::ModeConfig> modes_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/awb_bayes.cpp b/src/ipa/libipa/awb_bayes.cpp
new file mode 100644
index 00000000..e75bfcd6
--- /dev/null
+++ b/src/ipa/libipa/awb_bayes.cpp
@@ -0,0 +1,499 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Implementation of a bayesian AWB algorithm
+ */
+
+#include "awb_bayes.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
+
+#include "colours.h"
+
+/**
+ * \file awb_bayes.h
+ * \brief Implementation of bayesian auto white balance algorithm
+ *
+ * This implementation is based on the initial implementation done by
+ * RaspberryPi.
+ * \todo: Documentation
+ *
+ * \todo Not all the features implemented by RaspberryPi were ported over to
+ * this algorithm because they either rely on hardware features not generally
+ * available or were considered not important enough at the moment.
+ *
+ * The following parts are not implemented:
+ *
+ * - min_pixels: minimum proportion of pixels counted within AWB region for it
+ * to be "useful"
+ * - min_g: minimum G value of those pixels, to be regarded a "useful"
+ * - min_regions: number of AWB regions that must be "useful" in order to do the
+ * AWB calculation
+ * - deltaLimit: clamp on colour error term (so as not to penalize non-grey
+ * excessively)
+ * - bias_proportion: The biasProportion parameter adds a small proportion of
+ * the counted pixels to a region biased to the biasCT colour temperature.
+ * A typical value for biasProportion would be between 0.05 to 0.1.
+ * - bias_ct: CT target for the search bias
+ * - sensitivityR: red sensitivity ratio (set to canonical sensor's R/G divided
+ * by this sensor's R/G)
+ * - sensitivityB: blue sensitivity ratio (set to canonical sensor's B/G divided
+ * by this sensor's B/G)
+ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Awb)
+
+namespace {
+
+template<typename T>
+class LimitsRecorder
+{
+public:
+ LimitsRecorder()
+ : min_(std::numeric_limits<T>::max()),
+ max_(std::numeric_limits<T>::min())
+ {
+ }
+
+ void record(const T &value)
+ {
+ min_ = std::min(min_, value);
+ max_ = std::max(max_, value);
+ }
+
+ const T &min() const { return min_; }
+ const T &max() const { return max_; }
+
+private:
+ T min_;
+ T max_;
+};
+
+#ifndef __DOXYGEN__
+template<typename T>
+std::ostream &operator<<(std::ostream &out, const LimitsRecorder<T> &v)
+{
+ out << "[ " << v.min() << ", " << v.max() << " ]";
+ return out;
+}
+#endif
+
+} /* namespace */
+
+namespace ipa {
+
+/**
+ * \brief Step size control for CT search
+ */
+constexpr double kSearchStep = 0.2;
+
+/**
+ * \copydoc libcamera::ipa::Interpolator::interpolate()
+ */
+template<>
+void Interpolator<Pwl>::interpolate(const Pwl &a, const Pwl &b, Pwl &dest, double lambda)
+{
+ dest = Pwl::combine(a, b,
+ [=](double /*x*/, double y0, double y1) -> double {
+ return y0 * (1.0 - lambda) + y1 * lambda;
+ });
+}
+
+/**
+ * \class AwbBayes
+ * \brief Implementation of a bayesian auto white balance algorithm
+ *
+ * In a bayesian AWB algorithm the auto white balance estimation is improved by
+ * taking the likelihood of a given lightsource based on the estimated lux level
+ * into account. E.g. If it is very bright we can assume that we are outside and
+ * that colour temperatures around 6500 are preferred.
+ *
+ * The second part of this algorithm is the search for the most likely colour
+ * temperature. It is implemented in AwbBayes::coarseSearch() and in
+ * AwbBayes::fineSearch(). The search works very well without prior likelihoods
+ * and therefore the algorithm itself provides very good results even without
+ * prior likelihoods.
+ */
+
+/**
+ * \var AwbBayes::transversePos_
+ * \brief How far to wander off CT curve towards "more purple"
+ */
+
+/**
+ * \var AwbBayes::transverseNeg_
+ * \brief How far to wander off CT curve towards "more green"
+ */
+
+/**
+ * \var AwbBayes::currentMode_
+ * \brief The currently selected mode
+ */
+
+int AwbBayes::init(const YamlObject &tuningData)
+{
+ int ret = colourGainCurve_.readYaml(tuningData["colourGains"], "ct", "gains");
+ if (ret) {
+ LOG(Awb, Error)
+ << "Failed to parse 'colourGains' "
+ << "parameter from tuning file";
+ return ret;
+ }
+
+ ctR_.clear();
+ ctB_.clear();
+ for (const auto &[ct, g] : colourGainCurve_.data()) {
+ ctR_.append(ct, 1.0 / g[0]);
+ ctB_.append(ct, 1.0 / g[1]);
+ }
+
+ /* We will want the inverse functions of these too. */
+ ctRInverse_ = ctR_.inverse().first;
+ ctBInverse_ = ctB_.inverse().first;
+
+ ret = readPriors(tuningData);
+ if (ret) {
+ LOG(Awb, Error) << "Failed to read priors";
+ return ret;
+ }
+
+ ret = parseModeConfigs(tuningData, controls::AwbAuto);
+ if (ret) {
+ LOG(Awb, Error)
+ << "Failed to parse mode parameter from tuning file";
+ return ret;
+ }
+ currentMode_ = &modes_[controls::AwbAuto];
+
+ transversePos_ = tuningData["transversePos"].get<double>(0.01);
+ transverseNeg_ = tuningData["transverseNeg"].get<double>(0.01);
+ if (transversePos_ <= 0 || transverseNeg_ <= 0) {
+ LOG(Awb, Error) << "AwbConfig: transversePos/Neg must be > 0";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int AwbBayes::readPriors(const YamlObject &tuningData)
+{
+ const auto &priorsList = tuningData["priors"];
+ std::map<uint32_t, Pwl> priors;
+
+ if (!priorsList) {
+ LOG(Awb, Error) << "No priors specified";
+ return -EINVAL;
+ }
+
+ for (const auto &p : priorsList.asList()) {
+ if (!p.contains("lux")) {
+ LOG(Awb, Error) << "Missing lux value";
+ return -EINVAL;
+ }
+
+ uint32_t lux = p["lux"].get<uint32_t>(0);
+ if (priors.count(lux)) {
+ LOG(Awb, Error) << "Duplicate prior for lux value " << lux;
+ return -EINVAL;
+ }
+
+ std::vector<uint32_t> temperatures =
+ p["ct"].getList<uint32_t>().value_or(std::vector<uint32_t>{});
+ std::vector<double> probabilities =
+ p["probability"].getList<double>().value_or(std::vector<double>{});
+
+ if (temperatures.size() != probabilities.size()) {
+ LOG(Awb, Error)
+ << "Ct and probability array sizes are unequal";
+ return -EINVAL;
+ }
+
+ if (temperatures.empty()) {
+ LOG(Awb, Error)
+ << "Ct and probability arrays are empty";
+ return -EINVAL;
+ }
+
+ std::map<int, double> ctToProbability;
+ for (unsigned int i = 0; i < temperatures.size(); i++) {
+ int t = temperatures[i];
+ if (ctToProbability.count(t)) {
+ LOG(Awb, Error) << "Duplicate ct value";
+ return -EINVAL;
+ }
+
+ ctToProbability[t] = probabilities[i];
+ }
+
+ auto &pwl = priors[lux];
+ for (const auto &[ct, prob] : ctToProbability) {
+ if (prob < 1e-6) {
+ LOG(Awb, Error) << "Prior probability must be larger than 1e-6";
+ return -EINVAL;
+ }
+ pwl.append(ct, prob);
+ }
+ }
+
+ if (priors.empty()) {
+ LOG(Awb, Error) << "No priors specified";
+ return -EINVAL;
+ }
+
+ priors_.setData(std::move(priors));
+
+ return 0;
+}
+
+void AwbBayes::handleControls(const ControlList &controls)
+{
+ auto mode = controls.get(controls::AwbMode);
+ if (mode) {
+ auto it = modes_.find(static_cast<controls::AwbModeEnum>(*mode));
+ if (it != modes_.end())
+ currentMode_ = &it->second;
+ else
+ LOG(Awb, Error) << "Unsupported AWB mode " << *mode;
+ }
+}
+
+RGB<double> AwbBayes::gainsFromColourTemperature(double colourTemperature)
+{
+ /*
+ * \todo: In the RaspberryPi code, the ct curve was interpolated in
+ * the white point space (1/x) not in gains space. This feels counter
+ * intuitive, as the gains are in linear space. But I can't prove it.
+ */
+ const auto &gains = colourGainCurve_.getInterpolated(colourTemperature);
+ return { { gains[0], 1.0, gains[1] } };
+}
+
+AwbResult AwbBayes::calculateAwb(const AwbStats &stats, int lux)
+{
+ ipa::Pwl prior;
+ if (lux > 0) {
+ prior = priors_.getInterpolated(lux);
+ prior.map([](double x, double y) {
+ LOG(Awb, Debug) << "(" << x << "," << y << ")";
+ });
+ } else {
+ prior.append(0, 1.0);
+ }
+
+ double t = coarseSearch(prior, stats);
+ double r = ctR_.eval(t);
+ double b = ctB_.eval(t);
+ LOG(Awb, Debug)
+ << "After coarse search: r " << r << " b " << b << " (gains r "
+ << 1 / r << " b " << 1 / b << ")";
+
+ /*
+ * Original comment from RaspberryPi:
+ * Not entirely sure how to handle the fine search yet. Mostly the
+ * estimated CT is already good enough, but the fine search allows us to
+ * wander transversely off the CT curve. Under some illuminants, where
+ * there may be more or less green light, this may prove beneficial,
+ * though I probably need more real datasets before deciding exactly how
+ * this should be controlled and tuned.
+ */
+ fineSearch(t, r, b, prior, stats);
+ LOG(Awb, Debug)
+ << "After fine search: r " << r << " b " << b << " (gains r "
+ << 1 / r << " b " << 1 / b << ")";
+
+ return { { { 1.0 / r, 1.0, 1.0 / b } }, t };
+}
+
+double AwbBayes::coarseSearch(const ipa::Pwl &prior, const AwbStats &stats) const
+{
+ std::vector<Pwl::Point> points;
+ size_t bestPoint = 0;
+ double t = currentMode_->ctLo;
+ int spanR = -1;
+ int spanB = -1;
+ LimitsRecorder<double> errorLimits;
+ LimitsRecorder<double> priorLogLikelihoodLimits;
+
+ /* Step down the CT curve evaluating log likelihood. */
+ while (true) {
+ double r = ctR_.eval(t, &spanR);
+ double b = ctB_.eval(t, &spanB);
+ RGB<double> gains({ 1 / r, 1.0, 1 / b });
+ double delta2Sum = stats.computeColourError(gains);
+ double priorLogLikelihood = log(prior.eval(prior.domain().clamp(t)));
+ double finalLogLikelihood = delta2Sum - priorLogLikelihood;
+
+ errorLimits.record(delta2Sum);
+ priorLogLikelihoodLimits.record(priorLogLikelihood);
+
+ points.push_back({ { t, finalLogLikelihood } });
+ if (points.back().y() < points[bestPoint].y())
+ bestPoint = points.size() - 1;
+
+ if (t == currentMode_->ctHi)
+ break;
+
+ /*
+ * Ensure even steps along the r/b curve by scaling them by the
+ * current t.
+ */
+ t = std::min(t + t / 10 * kSearchStep, currentMode_->ctHi);
+ }
+
+ t = points[bestPoint].x();
+ LOG(Awb, Debug) << "Coarse search found CT " << t
+ << " error limits:" << errorLimits
+ << " prior log likelihood limits:" << priorLogLikelihoodLimits;
+
+ /*
+ * We have the best point of the search, but refine it with a quadratic
+ * interpolation around its neighbors.
+ */
+ if (points.size() > 2) {
+ bestPoint = std::clamp(bestPoint, std::size_t{ 1 }, points.size() - 2);
+ t = interpolateQuadratic(points[bestPoint - 1],
+ points[bestPoint],
+ points[bestPoint + 1]);
+ LOG(Awb, Debug)
+ << "After quadratic refinement, coarse search has CT "
+ << t;
+ }
+
+ return t;
+}
+
+void AwbBayes::fineSearch(double &t, double &r, double &b, ipa::Pwl const &prior, const AwbStats &stats) const
+{
+ int spanR = -1;
+ int spanB = -1;
+ double step = t / 10 * kSearchStep * 0.1;
+ int nsteps = 5;
+ ctR_.eval(t, &spanR);
+ ctB_.eval(t, &spanB);
+ double rDiff = ctR_.eval(t + nsteps * step, &spanR) -
+ ctR_.eval(t - nsteps * step, &spanR);
+ double bDiff = ctB_.eval(t + nsteps * step, &spanB) -
+ ctB_.eval(t - nsteps * step, &spanB);
+ Pwl::Point transverse({ bDiff, -rDiff });
+ if (transverse.length2() < 1e-6)
+ return;
+ /*
+ * transverse is a unit vector orthogonal to the b vs. r function
+ * (pointing outwards with r and b increasing)
+ */
+ transverse = transverse / transverse.length();
+ double bestLogLikelihood = 0;
+ double bestT = 0;
+ Pwl::Point bestRB(0);
+ double transverseRange = transverseNeg_ + transversePos_;
+ const int maxNumDeltas = 12;
+ LimitsRecorder<double> errorLimits;
+ LimitsRecorder<double> priorLogLikelihoodLimits;
+
+
+ /* a transverse step approximately every 0.01 r/b units */
+ int numDeltas = floor(transverseRange * 100 + 0.5) + 1;
+ numDeltas = std::clamp(numDeltas, 3, maxNumDeltas);
+
+ /*
+ * Step down CT curve. March a bit further if the transverse range is
+ * large.
+ */
+ nsteps += numDeltas;
+ for (int i = -nsteps; i <= nsteps; i++) {
+ double tTest = t + i * step;
+ double priorLogLikelihood =
+ log(prior.eval(prior.domain().clamp(tTest)));
+ priorLogLikelihoodLimits.record(priorLogLikelihood);
+ Pwl::Point rbStart{ { ctR_.eval(tTest, &spanR),
+ ctB_.eval(tTest, &spanB) } };
+ Pwl::Point samples[maxNumDeltas];
+ int bestPoint = 0;
+
+ /*
+ * Sample numDeltas points transversely *off* the CT curve
+ * in the range [-transverseNeg, transversePos].
+ * The x values of a sample contains the distance and the y
+ * value contains the corresponding log likelihood.
+ */
+ double transverseStep = transverseRange / (numDeltas - 1);
+ for (int j = 0; j < numDeltas; j++) {
+ auto &p = samples[j];
+ p.x() = -transverseNeg_ + transverseStep * j;
+ Pwl::Point rbTest = rbStart + transverse * p.x();
+ RGB<double> gains({ 1 / rbTest[0], 1.0, 1 / rbTest[1] });
+ double delta2Sum = stats.computeColourError(gains);
+ errorLimits.record(delta2Sum);
+ p.y() = delta2Sum - priorLogLikelihood;
+
+ if (p.y() < samples[bestPoint].y())
+ bestPoint = j;
+ }
+
+ /*
+ * We have all samples transversely across the CT curve,
+ * now let's do a quadratic interpolation for the best result.
+ */
+ bestPoint = std::clamp(bestPoint, 1, numDeltas - 2);
+ double bestOffset = interpolateQuadratic(samples[bestPoint - 1],
+ samples[bestPoint],
+ samples[bestPoint + 1]);
+ Pwl::Point rbTest = rbStart + transverse * bestOffset;
+ RGB<double> gains({ 1 / rbTest[0], 1.0, 1 / rbTest[1] });
+ double delta2Sum = stats.computeColourError(gains);
+ errorLimits.record(delta2Sum);
+ double finalLogLikelihood = delta2Sum - priorLogLikelihood;
+
+ if (bestT == 0 || finalLogLikelihood < bestLogLikelihood) {
+ bestLogLikelihood = finalLogLikelihood;
+ bestT = tTest;
+ bestRB = rbTest;
+ }
+ }
+
+ t = bestT;
+ r = bestRB[0];
+ b = bestRB[1];
+ LOG(Awb, Debug)
+ << "Fine search found t " << t << " r " << r << " b " << b
+ << " error limits: " << errorLimits
+ << " prior log likelihood limits: " << priorLogLikelihoodLimits;
+}
+
+/**
+ * \brief Find extremum of function
+ * \param[in] a First point
+ * \param[in] b Second point
+ * \param[in] c Third point
+ *
+ * Given 3 points on a curve, find the extremum of the function in that interval
+ * by fitting a quadratic.
+ *
+ * \return The x value of the extremum clamped to the interval [a.x(), c.x()]
+ */
+double AwbBayes::interpolateQuadratic(Pwl::Point const &a, Pwl::Point const &b,
+ Pwl::Point const &c) const
+{
+ const double eps = 1e-3;
+ Pwl::Point ca = c - a;
+ Pwl::Point ba = b - a;
+ double denominator = 2 * (ba.y() * ca.x() - ca.y() * ba.x());
+ if (std::abs(denominator) > eps) {
+ double numerator = ba.y() * ca.x() * ca.x() - ca.y() * ba.x() * ba.x();
+ double result = numerator / denominator + a.x();
+ return std::max(a.x(), std::min(c.x(), result));
+ }
+ /* has degenerated to straight line segment */
+ return a.y() < c.y() - eps ? a.x() : (c.y() < a.y() - eps ? c.x() : b.x());
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/awb_bayes.h b/src/ipa/libipa/awb_bayes.h
new file mode 100644
index 00000000..47db7243
--- /dev/null
+++ b/src/ipa/libipa/awb_bayes.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Base class for bayes AWB algorithms
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include <libcamera/base/utils.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/vector.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "awb.h"
+#include "interpolator.h"
+#include "pwl.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+class AwbBayes : public AwbAlgorithm
+{
+public:
+ AwbBayes() = default;
+
+ int init(const YamlObject &tuningData) override;
+ AwbResult calculateAwb(const AwbStats &stats, int lux) override;
+ RGB<double> gainsFromColourTemperature(double temperatureK) override;
+ void handleControls(const ControlList &controls) override;
+
+private:
+ int readPriors(const YamlObject &tuningData);
+
+ void fineSearch(double &t, double &r, double &b, ipa::Pwl const &prior,
+ const AwbStats &stats) const;
+ double coarseSearch(const ipa::Pwl &prior, const AwbStats &stats) const;
+ double interpolateQuadratic(ipa::Pwl::Point const &a,
+ ipa::Pwl::Point const &b,
+ ipa::Pwl::Point const &c) const;
+
+ Interpolator<Pwl> priors_;
+ Interpolator<Vector<double, 2>> colourGainCurve_;
+
+ ipa::Pwl ctR_;
+ ipa::Pwl ctB_;
+ ipa::Pwl ctRInverse_;
+ ipa::Pwl ctBInverse_;
+
+ double transversePos_;
+ double transverseNeg_;
+
+ ModeConfig *currentMode_ = nullptr;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/awb_grey.cpp b/src/ipa/libipa/awb_grey.cpp
new file mode 100644
index 00000000..49448976
--- /dev/null
+++ b/src/ipa/libipa/awb_grey.cpp
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Base class for grey world AWB algorithm
+ */
+
+#include "awb_grey.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
+
+#include "colours.h"
+
+using namespace libcamera::controls;
+
+/**
+ * \file awb_grey.h
+ * \brief Implementation of a grey world AWB algorithm
+ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Awb)
+namespace ipa {
+
+/**
+ * \class AwbGrey
+ * \brief A Grey world auto white balance algorithm
+ */
+
+/**
+ * \brief Initialize the algorithm with the given tuning data
+ * \param[in] tuningData The tuning data for the algorithm
+ *
+ * Load the colour temperature curve from the tuning data. If there is no tuning
+ * data available, continue with a warning. Manual colour temperature will not
+ * work in that case.
+ *
+ * \return 0 on success, a negative error code otherwise
+ */
+int AwbGrey::init(const YamlObject &tuningData)
+{
+ Interpolator<Vector<double, 2>> gains;
+ int ret = gains.readYaml(tuningData["colourGains"], "ct", "gains");
+ if (ret < 0)
+ LOG(Awb, Warning)
+ << "Failed to parse 'colourGains' "
+ << "parameter from tuning file; "
+ << "manual colour temperature will not work properly";
+ else
+ colourGainCurve_ = gains;
+
+ return 0;
+}
+
+/**
+ * \brief Calculate awb data from the given statistics
+ * \param[in] stats The statistics to use for the calculation
+ * \param[in] lux The lux value of the scene
+ *
+ * Estimates the colour temperature based on the colours::estimateCCT function.
+ * The gains are calculated purely based on the RGB means provided by the \a
+ * stats. The colour temperature is not taken into account when calculating the
+ * gains.
+ *
+ * The \a lux parameter is not used in this algorithm.
+ *
+ * \return The awb result
+ */
+AwbResult AwbGrey::calculateAwb(const AwbStats &stats, [[maybe_unused]] int lux)
+{
+ AwbResult result;
+ auto means = stats.getRGBMeans();
+ result.colourTemperature = estimateCCT(means);
+
+ /*
+ * Estimate the red and blue gains to apply in a grey world. The green
+ * gain is hardcoded to 1.0. Avoid divisions by zero by clamping the
+ * divisor to a minimum value of 1.0.
+ */
+ result.gains.r() = means.g() / std::max(means.r(), 1.0);
+ result.gains.g() = 1.0;
+ result.gains.b() = means.g() / std::max(means.b(), 1.0);
+ return result;
+}
+
+/**
+ * \brief Compute white balance gains from a colour temperature
+ * \param[in] colourTemperature The colour temperature in Kelvin
+ *
+ * Compute the white balance gains from a \a colourTemperature. This function
+ * does not take any statistics into account. It simply interpolates the colour
+ * gains configured in the colour temperature curve.
+ *
+ * \return The colour gains if a colour temperature curve is available,
+ * [1, 1, 1] otherwise.
+ */
+RGB<double> AwbGrey::gainsFromColourTemperature(double colourTemperature)
+{
+ if (!colourGainCurve_) {
+ LOG(Awb, Error) << "No gains defined";
+ return RGB<double>({ 1.0, 1.0, 1.0 });
+ }
+
+ auto gains = colourGainCurve_->getInterpolated(colourTemperature);
+ return { { gains[0], 1.0, gains[1] } };
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/awb_grey.h b/src/ipa/libipa/awb_grey.h
new file mode 100644
index 00000000..1a365e61
--- /dev/null
+++ b/src/ipa/libipa/awb_grey.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * AWB grey world algorithm
+ */
+
+#pragma once
+
+#include "libcamera/internal/vector.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "awb.h"
+#include "interpolator.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+class AwbGrey : public AwbAlgorithm
+{
+public:
+ AwbGrey() = default;
+
+ int init(const YamlObject &tuningData) override;
+ AwbResult calculateAwb(const AwbStats &stats, int lux) override;
+ RGB<double> gainsFromColourTemperature(double colourTemperature) override;
+
+private:
+ std::optional<Interpolator<Vector<double, 2>>> colourGainCurve_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/camera_sensor_helper.cpp b/src/ipa/libipa/camera_sensor_helper.cpp
index 782ff990..7c66cd57 100644
--- a/src/ipa/libipa/camera_sensor_helper.cpp
+++ b/src/ipa/libipa/camera_sensor_helper.cpp
@@ -8,6 +8,7 @@
#include "camera_sensor_helper.h"
#include <cmath>
+#include <limits>
#include <libcamera/base/log.h>
@@ -40,6 +41,7 @@ namespace ipa {
*/
/**
+ * \fn CameraSensorHelper::CameraSensorHelper()
* \brief Construct a CameraSensorHelper instance
*
* CameraSensorHelper derived class instances shall never be constructed
@@ -48,6 +50,33 @@ namespace ipa {
*/
/**
+ * \fn CameraSensorHelper::blackLevel()
+ * \brief Fetch the black level of the sensor
+ *
+ * This function returns the black level of the sensor scaled to a 16bit pixel
+ * width. If it is unknown an empty optional is returned.
+ *
+ * \todo Fill the blanks and add pedestal values for all supported sensors. Once
+ * done, drop the std::optional<>.
+ *
+ * Black levels are typically the result of the following phenomena:
+ * - Pedestal added by the sensor to pixel values. They are typically fixed,
+ * sometimes programmable and should be reported in datasheets (but
+ * documentation is not always available).
+ * - Dark currents and other physical effects that add charge to pixels in the
+ * absence of light. Those can depend on the integration time and the sensor
+ * die temperature, and their contribution to pixel values depend on the
+ * sensor gains.
+ *
+ * The pedestal is usually the value with the biggest contribution to the
+ * overall black level. In most cases it is either known before or in rare cases
+ * (there is not a single driver with such a control in the linux kernel) can be
+ * queried from the sensor. This function provides that fixed, known value.
+ *
+ * \return The black level of the sensor, or std::nullopt if not known
+ */
+
+/**
* \brief Compute gain code from the analogue gain absolute value
* \param[in] gain The real gain to pass
*
@@ -58,21 +87,16 @@ namespace ipa {
*/
uint32_t CameraSensorHelper::gainCode(double gain) const
{
- const AnalogueGainConstants &k = gainConstants_;
-
- switch (gainType_) {
- case AnalogueGainLinear:
- ASSERT(k.linear.m0 == 0 || k.linear.m1 == 0);
+ if (auto *l = std::get_if<AnalogueGainLinear>(&gain_)) {
+ ASSERT(l->m0 == 0 || l->m1 == 0);
- return (k.linear.c0 - k.linear.c1 * gain) /
- (k.linear.m1 * gain - k.linear.m0);
+ return (l->c0 - l->c1 * gain) /
+ (l->m1 * gain - l->m0);
+ } else if (auto *e = std::get_if<AnalogueGainExp>(&gain_)) {
+ ASSERT(e->a != 0 && e->m != 0);
- case AnalogueGainExponential:
- ASSERT(k.exp.a != 0 && k.exp.m != 0);
-
- return std::log2(gain / k.exp.a) / k.exp.m;
-
- default:
+ return std::log2(gain / e->a) / e->m;
+ } else {
ASSERT(false);
return 0;
}
@@ -90,38 +114,26 @@ uint32_t CameraSensorHelper::gainCode(double gain) const
*/
double CameraSensorHelper::gain(uint32_t gainCode) const
{
- const AnalogueGainConstants &k = gainConstants_;
double gain = static_cast<double>(gainCode);
- switch (gainType_) {
- case AnalogueGainLinear:
- ASSERT(k.linear.m0 == 0 || k.linear.m1 == 0);
-
- return (k.linear.m0 * gain + k.linear.c0) /
- (k.linear.m1 * gain + k.linear.c1);
+ if (auto *l = std::get_if<AnalogueGainLinear>(&gain_)) {
+ ASSERT(l->m0 == 0 || l->m1 == 0);
- case AnalogueGainExponential:
- ASSERT(k.exp.a != 0 && k.exp.m != 0);
+ return (l->m0 * gain + l->c0) /
+ (l->m1 * gain + l->c1);
+ } else if (auto *e = std::get_if<AnalogueGainExp>(&gain_)) {
+ ASSERT(e->a != 0 && e->m != 0);
- return k.exp.a * std::exp2(k.exp.m * gain);
-
- default:
+ return e->a * std::exp2(e->m * gain);
+ } else {
ASSERT(false);
return 0.0;
}
}
/**
- * \enum CameraSensorHelper::AnalogueGainType
- * \brief The gain calculation modes as defined by the MIPI CCS
- *
- * Describes the image sensor analogue gain capabilities.
- * Two modes are possible, depending on the sensor: Linear and Exponential.
- */
-
-/**
- * \var CameraSensorHelper::AnalogueGainLinear
- * \brief Gain is computed using linear gain estimation
+ * \struct CameraSensorHelper::AnalogueGainLinear
+ * \brief Analogue gain constants for the linear gain model
*
* The relationship between the integer gain parameter and the resulting gain
* multiplier is given by the following equation:
@@ -136,11 +148,27 @@ double CameraSensorHelper::gain(uint32_t gainCode) const
* The full Gain equation therefore reduces to either:
*
* \f$gain=\frac{c0}{m1x+c1}\f$ or \f$\frac{m0x+c0}{c1}\f$
+ *
+ * \var CameraSensorHelper::AnalogueGainLinear::m0
+ * \brief Constant used in the linear gain coding/decoding
+ *
+ * \note Either m0 or m1 shall be zero.
+ *
+ * \var CameraSensorHelper::AnalogueGainLinear::c0
+ * \brief Constant used in the linear gain coding/decoding
+ *
+ * \var CameraSensorHelper::AnalogueGainLinear::m1
+ * \brief Constant used in the linear gain coding/decoding
+ *
+ * \note Either m0 or m1 shall be zero.
+ *
+ * \var CameraSensorHelper::AnalogueGainLinear::c1
+ * \brief Constant used in the linear gain coding/decoding
*/
/**
- * \var CameraSensorHelper::AnalogueGainExponential
- * \brief Gain is expressed using an exponential model
+ * \struct CameraSensorHelper::AnalogueGainExp
+ * \brief Analogue gain constants for the exponential gain model
*
* The relationship between the integer gain parameter and the resulting gain
* multiplier is given by the following equation:
@@ -156,61 +184,22 @@ double CameraSensorHelper::gain(uint32_t gainCode) const
*
* When the gain is expressed in dB, 'a' is equal to 1 and 'm' to
* \f$log_{2}{10^{\frac{1}{20}}}\f$.
- */
-
-/**
- * \struct CameraSensorHelper::AnalogueGainLinearConstants
- * \brief Analogue gain constants for the linear gain model
- *
- * \var CameraSensorHelper::AnalogueGainLinearConstants::m0
- * \brief Constant used in the linear gain coding/decoding
- *
- * \note Either m0 or m1 shall be zero.
- *
- * \var CameraSensorHelper::AnalogueGainLinearConstants::c0
- * \brief Constant used in the linear gain coding/decoding
- *
- * \var CameraSensorHelper::AnalogueGainLinearConstants::m1
- * \brief Constant used in the linear gain coding/decoding
- *
- * \note Either m0 or m1 shall be zero.
*
- * \var CameraSensorHelper::AnalogueGainLinearConstants::c1
- * \brief Constant used in the linear gain coding/decoding
- */
-
-/**
- * \struct CameraSensorHelper::AnalogueGainExpConstants
- * \brief Analogue gain constants for the exponential gain model
- *
- * \var CameraSensorHelper::AnalogueGainExpConstants::a
+ * \var CameraSensorHelper::AnalogueGainExp::a
* \brief Constant used in the exponential gain coding/decoding
*
- * \var CameraSensorHelper::AnalogueGainExpConstants::m
+ * \var CameraSensorHelper::AnalogueGainExp::m
* \brief Constant used in the exponential gain coding/decoding
*/
/**
- * \struct CameraSensorHelper::AnalogueGainConstants
- * \brief Analogue gain model constants
- *
- * This union stores the constants used to calculate the analogue gain. The
- * CameraSensorHelper::gainType_ variable selects which union member is valid.
- *
- * \var CameraSensorHelper::AnalogueGainConstants::linear
- * \brief Constants for the linear gain model
- *
- * \var CameraSensorHelper::AnalogueGainConstants::exp
- * \brief Constants for the exponential gain model
- */
-
-/**
- * \var CameraSensorHelper::gainType_
- * \brief The analogue gain model type
+ * \var CameraSensorHelper::blackLevel_
+ * \brief The black level of the sensor
+ * \sa CameraSensorHelper::blackLevel()
*/
/**
- * \var CameraSensorHelper::gainConstants_
+ * \var CameraSensorHelper::gain_
* \brief The analogue gain parameters used for calculation
*
* The analogue gain is calculated through a formula, and its parameters are
@@ -366,6 +355,99 @@ static constexpr double expGainDb(double step)
return log2_10 * step / 20;
}
+class CameraSensorHelperAr0144 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperAr0144()
+ {
+ /* Power-on default value: 168 at 12bits. */
+ blackLevel_ = 2688;
+ }
+
+ uint32_t gainCode(double gain) const override
+ {
+ /* The recommended minimum gain is 1.6842 to avoid artifacts. */
+ gain = std::clamp(gain, 1.0 / (1.0 - 13.0 / 32.0), 18.45);
+
+ /*
+ * The analogue gain is made of a coarse exponential gain in
+ * the range [2^0, 2^4] and a fine inversely linear gain in the
+ * range [1.0, 2.0[. There is an additional fixed 1.153125
+ * multiplier when the coarse gain reaches 2^2.
+ */
+
+ if (gain > 4.0)
+ gain /= 1.153125;
+
+ unsigned int coarse = std::log2(gain);
+ unsigned int fine = (1 - (1 << coarse) / gain) * 32;
+
+ /* The fine gain rounding depends on the coarse gain. */
+ if (coarse == 1 || coarse == 3)
+ fine &= ~1;
+ else if (coarse == 4)
+ fine &= ~3;
+
+ return (coarse << 4) | (fine & 0xf);
+ }
+
+ double gain(uint32_t gainCode) const override
+ {
+ unsigned int coarse = gainCode >> 4;
+ unsigned int fine = gainCode & 0xf;
+ unsigned int d1;
+ double d2, m;
+
+ switch (coarse) {
+ default:
+ case 0:
+ d1 = 1;
+ d2 = 32.0;
+ m = 1.0;
+ break;
+ case 1:
+ d1 = 2;
+ d2 = 16.0;
+ m = 1.0;
+ break;
+ case 2:
+ d1 = 1;
+ d2 = 32.0;
+ m = 1.153125;
+ break;
+ case 3:
+ d1 = 2;
+ d2 = 16.0;
+ m = 1.153125;
+ break;
+ case 4:
+ d1 = 4;
+ d2 = 8.0;
+ m = 1.153125;
+ break;
+ }
+
+ /*
+ * With infinite precision, the calculated gain would be exact,
+ * and the reverse conversion with gainCode() would produce the
+ * same gain code. In the real world, rounding errors may cause
+ * the calculated gain to be lower by an amount negligible for
+ * all purposes, except for the reverse conversion. Converting
+ * the gain to a gain code could then return the quantized value
+ * just lower than the original gain code. To avoid this, tests
+ * showed that adding the machine epsilon to the multiplier m is
+ * sufficient.
+ */
+ m += std::numeric_limits<decltype(m)>::epsilon();
+
+ return m * (1 << coarse) / (1.0 - (fine / d1) / d2);
+ }
+
+private:
+ static constexpr double kStep_ = 16;
+};
+REGISTER_CAMERA_SENSOR_HELPER("ar0144", CameraSensorHelperAr0144)
+
class CameraSensorHelperAr0521 : public CameraSensorHelper
{
public:
@@ -391,13 +473,50 @@ private:
};
REGISTER_CAMERA_SENSOR_HELPER("ar0521", CameraSensorHelperAr0521)
+class CameraSensorHelperGc05a2 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperGc05a2()
+ {
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 100, 0, 0, 1024 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("gc05a2", CameraSensorHelperGc05a2)
+
+class CameraSensorHelperGc08a3 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperGc08a3()
+ {
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 100, 0, 0, 1024 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("gc08a3", CameraSensorHelperGc08a3)
+
+class CameraSensorHelperImx214 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx214()
+ {
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 0, 512, -1, 512 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx214", CameraSensorHelperImx214)
+
class CameraSensorHelperImx219 : public CameraSensorHelper
{
public:
CameraSensorHelperImx219()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 0, 256, -1, 256 };
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 0, 256, -1, 256 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("imx219", CameraSensorHelperImx219)
@@ -407,8 +526,9 @@ class CameraSensorHelperImx258 : public CameraSensorHelper
public:
CameraSensorHelperImx258()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 0, 512, -1, 512 };
+ /* From datasheet: 0x40 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 0, 512, -1, 512 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("imx258", CameraSensorHelperImx258)
@@ -418,8 +538,9 @@ class CameraSensorHelperImx283 : public CameraSensorHelper
public:
CameraSensorHelperImx283()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 0, 2048, -1, 2048 };
+ /* From datasheet: 0x32 at 10bits. */
+ blackLevel_ = 3200;
+ gain_ = AnalogueGainLinear{ 0, 2048, -1, 2048 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("imx283", CameraSensorHelperImx283)
@@ -429,8 +550,9 @@ class CameraSensorHelperImx290 : public CameraSensorHelper
public:
CameraSensorHelperImx290()
{
- gainType_ = AnalogueGainExponential;
- gainConstants_.exp = { 1.0, expGainDb(0.3) };
+ /* From datasheet: 0xf0 at 12bits. */
+ blackLevel_ = 3840;
+ gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) };
}
};
REGISTER_CAMERA_SENSOR_HELPER("imx290", CameraSensorHelperImx290)
@@ -440,8 +562,7 @@ class CameraSensorHelperImx296 : public CameraSensorHelper
public:
CameraSensorHelperImx296()
{
- gainType_ = AnalogueGainExponential;
- gainConstants_.exp = { 1.0, expGainDb(0.1) };
+ gain_ = AnalogueGainExp{ 1.0, expGainDb(0.1) };
}
};
REGISTER_CAMERA_SENSOR_HELPER("imx296", CameraSensorHelperImx296)
@@ -456,8 +577,9 @@ class CameraSensorHelperImx335 : public CameraSensorHelper
public:
CameraSensorHelperImx335()
{
- gainType_ = AnalogueGainExponential;
- gainConstants_.exp = { 1.0, expGainDb(0.3) };
+ /* From datasheet: 0x32 at 10bits. */
+ blackLevel_ = 3200;
+ gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) };
}
};
REGISTER_CAMERA_SENSOR_HELPER("imx335", CameraSensorHelperImx335)
@@ -467,19 +589,22 @@ class CameraSensorHelperImx415 : public CameraSensorHelper
public:
CameraSensorHelperImx415()
{
- gainType_ = AnalogueGainExponential;
- gainConstants_.exp = { 1.0, expGainDb(0.3) };
+ gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) };
}
};
REGISTER_CAMERA_SENSOR_HELPER("imx415", CameraSensorHelperImx415)
+class CameraSensorHelperImx462 : public CameraSensorHelperImx290
+{
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx462", CameraSensorHelperImx462)
+
class CameraSensorHelperImx477 : public CameraSensorHelper
{
public:
CameraSensorHelperImx477()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 0, 1024, -1, 1024 };
+ gain_ = AnalogueGainLinear{ 0, 1024, -1, 1024 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("imx477", CameraSensorHelperImx477)
@@ -493,8 +618,7 @@ public:
* The Sensor Manual doesn't appear to document the gain model.
* This has been validated with some empirical testing only.
*/
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 128 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov2685", CameraSensorHelperOv2685)
@@ -504,8 +628,7 @@ class CameraSensorHelperOv2740 : public CameraSensorHelper
public:
CameraSensorHelperOv2740()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 128 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov2740", CameraSensorHelperOv2740)
@@ -515,8 +638,9 @@ class CameraSensorHelperOv4689 : public CameraSensorHelper
public:
CameraSensorHelperOv4689()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 128 };
+ /* From datasheet: 0x40 at 12bits. */
+ blackLevel_ = 1024;
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov4689", CameraSensorHelperOv4689)
@@ -526,8 +650,9 @@ class CameraSensorHelperOv5640 : public CameraSensorHelper
public:
CameraSensorHelperOv5640()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 16 };
+ /* From datasheet: 0x10 at 10bits. */
+ blackLevel_ = 1024;
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 16 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov5640", CameraSensorHelperOv5640)
@@ -537,8 +662,7 @@ class CameraSensorHelperOv5647 : public CameraSensorHelper
public:
CameraSensorHelperOv5647()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 16 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 16 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov5647", CameraSensorHelperOv5647)
@@ -548,8 +672,7 @@ class CameraSensorHelperOv5670 : public CameraSensorHelper
public:
CameraSensorHelperOv5670()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 128 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov5670", CameraSensorHelperOv5670)
@@ -559,8 +682,9 @@ class CameraSensorHelperOv5675 : public CameraSensorHelper
public:
CameraSensorHelperOv5675()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 128 };
+ /* From Linux kernel driver: 0x40 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov5675", CameraSensorHelperOv5675)
@@ -570,8 +694,7 @@ class CameraSensorHelperOv5693 : public CameraSensorHelper
public:
CameraSensorHelperOv5693()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 16 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 16 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov5693", CameraSensorHelperOv5693)
@@ -581,8 +704,7 @@ class CameraSensorHelperOv64a40 : public CameraSensorHelper
public:
CameraSensorHelperOv64a40()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 128 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov64a40", CameraSensorHelperOv64a40)
@@ -592,15 +714,13 @@ class CameraSensorHelperOv8858 : public CameraSensorHelper
public:
CameraSensorHelperOv8858()
{
- gainType_ = AnalogueGainLinear;
-
/*
* \todo Validate the selected 1/128 step value as it differs
* from what the sensor manual describes.
*
* See: https://patchwork.linuxtv.org/project/linux-media/patch/20221106171129.166892-2-nicholas@rothemail.net/#142267
*/
- gainConstants_.linear = { 1, 0, 0, 128 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov8858", CameraSensorHelperOv8858)
@@ -610,8 +730,7 @@ class CameraSensorHelperOv8865 : public CameraSensorHelper
public:
CameraSensorHelperOv8865()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 128 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov8865", CameraSensorHelperOv8865)
@@ -621,8 +740,7 @@ class CameraSensorHelperOv13858 : public CameraSensorHelper
public:
CameraSensorHelperOv13858()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 128 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov13858", CameraSensorHelperOv13858)
diff --git a/src/ipa/libipa/camera_sensor_helper.h b/src/ipa/libipa/camera_sensor_helper.h
index 0d99073b..a9300a64 100644
--- a/src/ipa/libipa/camera_sensor_helper.h
+++ b/src/ipa/libipa/camera_sensor_helper.h
@@ -7,10 +7,11 @@
#pragma once
-#include <stdint.h>
-
#include <memory>
+#include <optional>
+#include <stdint.h>
#include <string>
+#include <variant>
#include <vector>
#include <libcamera/base/class.h>
@@ -25,34 +26,25 @@ public:
CameraSensorHelper() = default;
virtual ~CameraSensorHelper() = default;
+ std::optional<int16_t> blackLevel() const { return blackLevel_; }
virtual uint32_t gainCode(double gain) const;
virtual double gain(uint32_t gainCode) const;
protected:
- enum AnalogueGainType {
- AnalogueGainLinear,
- AnalogueGainExponential,
- };
-
- struct AnalogueGainLinearConstants {
+ struct AnalogueGainLinear {
int16_t m0;
int16_t c0;
int16_t m1;
int16_t c1;
};
- struct AnalogueGainExpConstants {
+ struct AnalogueGainExp {
double a;
double m;
};
- union AnalogueGainConstants {
- AnalogueGainLinearConstants linear;
- AnalogueGainExpConstants exp;
- };
-
- AnalogueGainType gainType_;
- AnalogueGainConstants gainConstants_;
+ std::optional<int16_t> blackLevel_;
+ std::variant<std::monostate, AnalogueGainLinear, AnalogueGainExp> gain_;
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorHelper)
diff --git a/src/ipa/libipa/colours.cpp b/src/ipa/libipa/colours.cpp
new file mode 100644
index 00000000..97124cf4
--- /dev/null
+++ b/src/ipa/libipa/colours.cpp
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * libipa miscellaneous colour helpers
+ */
+
+#include "colours.h"
+
+#include <algorithm>
+#include <cmath>
+
+namespace libcamera {
+
+namespace ipa {
+
+/**
+ * \file colours.h
+ * \brief Functions to reduce code duplication between IPA modules
+ */
+
+/**
+ * \brief Estimate luminance from RGB values following ITU-R BT.601
+ * \param[in] rgb The RGB value
+ *
+ * This function estimates a luminance value from a triplet of Red, Green and
+ * Blue values, following the formula defined by ITU-R Recommendation BT.601-7
+ * which can be found at https://www.itu.int/rec/R-REC-BT.601
+ *
+ * \return The estimated luminance value
+ */
+double rec601LuminanceFromRGB(const RGB<double> &rgb)
+{
+ static const Vector<double, 3> rgb2y{{
+ 0.299, 0.587, 0.114
+ }};
+
+ return rgb.dot(rgb2y);
+}
+
+/**
+ * \brief Estimate correlated colour temperature from RGB color space input
+ * \param[in] rgb The RGB value
+ *
+ * This function estimates the correlated color temperature RGB color space
+ * input. In physics and color science, the Planckian locus or black body locus
+ * is the path or locus that the color of an incandescent black body would take
+ * in a particular chromaticity space as the black body temperature changes.
+ *
+ * If a narrow range of color temperatures is considered (those encapsulating
+ * daylight being the most practical case) one can approximate the Planckian
+ * locus in order to calculate the CCT in terms of chromaticity coordinates.
+ *
+ * More detailed information can be found in:
+ * https://en.wikipedia.org/wiki/Color_temperature#Approximation
+ *
+ * \return The estimated color temperature
+ */
+uint32_t estimateCCT(const RGB<double> &rgb)
+{
+ /*
+ * Convert the RGB values to CIE tristimulus values (XYZ) and divide by
+ * the sum of X, Y and Z to calculate the CIE xy chromaticity.
+ */
+ static const Matrix<double, 3, 3> rgb2xyz({
+ -0.14282, 1.54924, -0.95641,
+ -0.32466, 1.57837, -0.73191,
+ -0.68202, 0.77073, 0.56332
+ });
+
+ Vector<double, 3> xyz = rgb2xyz * rgb;
+ xyz /= xyz.sum();
+
+ /* Calculate CCT */
+ double n = (xyz.x() - 0.3320) / (0.1858 - xyz.y());
+ return 449 * n * n * n + 3525 * n * n + 6823.3 * n + 5520.33;
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/colours.h b/src/ipa/libipa/colours.h
new file mode 100644
index 00000000..d39b2ca8
--- /dev/null
+++ b/src/ipa/libipa/colours.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * libipa miscellaneous colour helpers
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include "libcamera/internal/vector.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+double rec601LuminanceFromRGB(const RGB<double> &rgb);
+uint32_t estimateCCT(const RGB<double> &rgb);
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/exposure_mode_helper.cpp b/src/ipa/libipa/exposure_mode_helper.cpp
index 683a564a..f235316d 100644
--- a/src/ipa/libipa/exposure_mode_helper.cpp
+++ b/src/ipa/libipa/exposure_mode_helper.cpp
@@ -14,9 +14,9 @@
* \file exposure_mode_helper.h
* \brief Helper class that performs computations relating to exposure
*
- * AEGC algorithms have a need to split exposure between shutter time, analogue
+ * AEGC algorithms have a need to split exposure between exposure time, analogue
* and digital gain. Multiple implementations do so based on paired stages of
- * shutter time and gain limits; provide a helper to avoid duplicating the code.
+ * exposure time and gain limits; provide a helper to avoid duplicating the code.
*/
namespace libcamera {
@@ -29,24 +29,24 @@ namespace ipa {
/**
* \class ExposureModeHelper
- * \brief Class for splitting exposure into shutter time and total gain
+ * \brief Class for splitting exposure into exposure time and total gain
*
* The ExposureModeHelper class provides a standard interface through which an
- * AEGC algorithm can divide exposure between shutter time and gain. It is
- * configured with a set of shutter time and gain pairs and works by initially
- * fixing gain at 1.0 and increasing shutter time up to the shutter time value
+ * AEGC algorithm can divide exposure between exposure time and gain. It is
+ * configured with a set of exposure time and gain pairs and works by initially
+ * fixing gain at 1.0 and increasing exposure time up to the exposure time value
* from the first pair in the set in an attempt to meet the required exposure
* value.
*
- * If the required exposure is not achievable by the first shutter time value
+ * If the required exposure is not achievable by the first exposure time value
* alone it ramps gain up to the value from the first pair in the set. If the
- * required exposure is still not met it then allows shutter time to ramp up to
- * the shutter time value from the second pair in the set, and continues in this
+ * required exposure is still not met it then allows exposure time to ramp up to
+ * the exposure time value from the second pair in the set, and continues in this
* vein until either the required exposure time is met, or else the hardware's
- * shutter time or gain limits are reached.
+ * exposure time or gain limits are reached.
*
* This method allows users to strike a balance between a well-exposed image and
- * an acceptable frame-rate, as opposed to simply maximising shutter time
+ * an acceptable frame-rate, as opposed to simply maximising exposure time
* followed by gain. The same helpers can be used to perform the latter
* operation if needed by passing an empty set of pairs to the initialisation
* function.
@@ -61,56 +61,56 @@ namespace ipa {
/**
* \brief Construct an ExposureModeHelper instance
- * \param[in] stages The vector of paired shutter time and gain limits
+ * \param[in] stages The vector of paired exposure time and gain limits
*
- * The input stages are shutter time and _total_ gain pairs; the gain
+ * The input stages are exposure time and _total_ gain pairs; the gain
* encompasses both analogue and digital gain.
*
* The vector of stages may be empty. In that case, the helper will simply use
- * the runtime limits set through setShutterGainLimits() instead.
+ * the runtime limits set through setLimits() instead.
*/
ExposureModeHelper::ExposureModeHelper(const Span<std::pair<utils::Duration, double>> stages)
{
- minShutter_ = 0us;
- maxShutter_ = 0us;
+ minExposureTime_ = 0us;
+ maxExposureTime_ = 0us;
minGain_ = 0;
maxGain_ = 0;
for (const auto &[s, g] : stages) {
- shutters_.push_back(s);
+ exposureTimes_.push_back(s);
gains_.push_back(g);
}
}
/**
- * \brief Set the shutter time and gain limits
- * \param[in] minShutter The minimum shutter time supported
- * \param[in] maxShutter The maximum shutter time supported
+ * \brief Set the exposure time and gain limits
+ * \param[in] minExposureTime The minimum exposure time supported
+ * \param[in] maxExposureTime The maximum exposure time supported
* \param[in] minGain The minimum analogue gain supported
* \param[in] maxGain The maximum analogue gain supported
*
- * This function configures the shutter time and analogue gain limits that need
+ * This function configures the exposure time and analogue gain limits that need
* to be adhered to as the helper divides up exposure. Note that this function
* *must* be called whenever those limits change and before splitExposure() is
* used.
*
- * If the algorithm using the helpers needs to indicate that either shutter time
+ * If the algorithm using the helpers needs to indicate that either exposure time
* or analogue gain or both should be fixed it can do so by setting both the
* minima and maxima to the same value.
*/
-void ExposureModeHelper::setLimits(utils::Duration minShutter,
- utils::Duration maxShutter,
+void ExposureModeHelper::setLimits(utils::Duration minExposureTime,
+ utils::Duration maxExposureTime,
double minGain, double maxGain)
{
- minShutter_ = minShutter;
- maxShutter_ = maxShutter;
+ minExposureTime_ = minExposureTime;
+ maxExposureTime_ = maxExposureTime;
minGain_ = minGain;
maxGain_ = maxGain;
}
-utils::Duration ExposureModeHelper::clampShutter(utils::Duration shutter) const
+utils::Duration ExposureModeHelper::clampExposureTime(utils::Duration exposureTime) const
{
- return std::clamp(shutter, minShutter_, maxShutter_);
+ return std::clamp(exposureTime, minExposureTime_, maxExposureTime_);
}
double ExposureModeHelper::clampGain(double gain) const
@@ -119,125 +119,119 @@ double ExposureModeHelper::clampGain(double gain) const
}
/**
- * \brief Split exposure time into shutter time and gain
- * \param[in] exposure Exposure time
+ * \brief Split exposure into exposure time and gain
+ * \param[in] exposure Exposure value
*
- * This function divides a given exposure time into shutter time, analogue and
- * digital gain by iterating through stages of shutter time and gain limits. At
- * each stage the current stage's shutter time limit is multiplied by the
+ * This function divides a given exposure into exposure time, analogue and
+ * digital gain by iterating through stages of exposure time and gain limits.
+ * At each stage the current stage's exposure time limit is multiplied by the
* previous stage's gain limit (or 1.0 initially) to see if the combination of
- * the two can meet the required exposure time. If they cannot then the current
- * stage's shutter time limit is multiplied by the same stage's gain limit to
+ * the two can meet the required exposure. If they cannot then the current
+ * stage's exposure time limit is multiplied by the same stage's gain limit to
* see if that combination can meet the required exposure time. If they cannot
* then the function moves to consider the next stage.
*
- * When a combination of shutter time and gain _stage_ limits are found that are
- * sufficient to meet the required exposure time, the function attempts to
- * reduce shutter time as much as possible whilst fixing gain and still meeting
- * the exposure time. If a _runtime_ limit prevents shutter time from being
- * lowered enough to meet the exposure time with gain fixed at the stage limit,
- * gain is also lowered to compensate.
+ * When a combination of exposure time and gain _stage_ limits are found that
+ * are sufficient to meet the required exposure, the function attempts to reduce
+ * exposure time as much as possible whilst fixing gain and still meeting the
+ * exposure. If a _runtime_ limit prevents exposure time from being lowered
+ * enough to meet the exposure with gain fixed at the stage limit, gain is also
+ * lowered to compensate.
*
- * Once the shutter time and gain values are ascertained, gain is assigned as
+ * Once the exposure time and gain values are ascertained, gain is assigned as
* analogue gain as much as possible, with digital gain only in use if the
* maximum analogue gain runtime limit is unable to accommodate the exposure
* value.
*
- * If no combination of shutter time and gain limits is found that meets the
- * required exposure time, the helper falls-back to simply maximising the
- * shutter time first, followed by analogue gain, followed by digital gain.
+ * If no combination of exposure time and gain limits is found that meets the
+ * required exposure, the helper falls-back to simply maximising the exposure
+ * time first, followed by analogue gain, followed by digital gain.
*
- * \return Tuple of shutter time, analogue gain, and digital gain
+ * \return Tuple of exposure time, analogue gain, and digital gain
*/
std::tuple<utils::Duration, double, double>
ExposureModeHelper::splitExposure(utils::Duration exposure) const
{
- ASSERT(maxShutter_);
+ ASSERT(maxExposureTime_);
ASSERT(maxGain_);
bool gainFixed = minGain_ == maxGain_;
- bool shutterFixed = minShutter_ == maxShutter_;
+ bool exposureTimeFixed = minExposureTime_ == maxExposureTime_;
/*
* There's no point entering the loop if we cannot change either gain
- * nor shutter anyway.
+ * nor exposure time anyway.
*/
- if (shutterFixed && gainFixed)
- return { minShutter_, minGain_, exposure / (minShutter_ * minGain_) };
+ if (exposureTimeFixed && gainFixed)
+ return { minExposureTime_, minGain_, exposure / (minExposureTime_ * minGain_) };
- utils::Duration shutter;
- double stageGain;
+ utils::Duration exposureTime;
+ double stageGain = 1.0;
double gain;
for (unsigned int stage = 0; stage < gains_.size(); stage++) {
double lastStageGain = stage == 0 ? 1.0 : clampGain(gains_[stage - 1]);
- utils::Duration stageShutter = clampShutter(shutters_[stage]);
+ utils::Duration stageExposureTime = clampExposureTime(exposureTimes_[stage]);
stageGain = clampGain(gains_[stage]);
/*
- * We perform the clamping on both shutter and gain in case the
- * helper has had limits set that prevent those values being
- * lowered beyond a certain minimum...this can happen at runtime
- * for various reasons and so would not be known when the stage
- * limits are initialised.
+ * We perform the clamping on both exposure time and gain in
+ * case the helper has had limits set that prevent those values
+ * being lowered beyond a certain minimum...this can happen at
+ * runtime for various reasons and so would not be known when
+ * the stage limits are initialised.
*/
- if (stageShutter * lastStageGain >= exposure) {
- shutter = clampShutter(exposure / clampGain(lastStageGain));
- gain = clampGain(exposure / shutter);
+ if (stageExposureTime * lastStageGain >= exposure) {
+ exposureTime = clampExposureTime(exposure / clampGain(lastStageGain));
+ gain = clampGain(exposure / exposureTime);
- return { shutter, gain, exposure / (shutter * gain) };
+ return { exposureTime, gain, exposure / (exposureTime * gain) };
}
- if (stageShutter * stageGain >= exposure) {
- shutter = clampShutter(exposure / clampGain(stageGain));
- gain = clampGain(exposure / shutter);
+ if (stageExposureTime * stageGain >= exposure) {
+ exposureTime = clampExposureTime(exposure / clampGain(stageGain));
+ gain = clampGain(exposure / exposureTime);
- return { shutter, gain, exposure / (shutter * gain) };
+ return { exposureTime, gain, exposure / (exposureTime * gain) };
}
}
/*
- * From here on all we can do is max out the shutter time, followed by
+ * From here on all we can do is max out the exposure time, followed by
* the analogue gain. If we still haven't achieved the target we send
* the rest of the exposure time to digital gain. If we were given no
- * stages to use then set stageGain to 1.0 so that shutter time is maxed
- * before gain touched at all.
+ * stages to use then the default stageGain of 1.0 is used so that
+ * exposure time is maxed before gain is touched at all.
*/
- if (gains_.empty())
- stageGain = 1.0;
+ exposureTime = clampExposureTime(exposure / clampGain(stageGain));
+ gain = clampGain(exposure / exposureTime);
- shutter = clampShutter(exposure / clampGain(stageGain));
- gain = clampGain(exposure / shutter);
-
- return { shutter, gain, exposure / (shutter * gain) };
+ return { exposureTime, gain, exposure / (exposureTime * gain) };
}
/**
- * \fn ExposureModeHelper::minShutter()
- * \brief Retrieve the configured minimum shutter time limit set through
- * setShutterGainLimits()
- * \return The minShutter_ value
+ * \fn ExposureModeHelper::minExposureTime()
+ * \brief Retrieve the configured minimum exposure time limit set through
+ * setLimits()
+ * \return The minExposureTime_ value
*/
/**
- * \fn ExposureModeHelper::maxShutter()
- * \brief Retrieve the configured maximum shutter time set through
- * setShutterGainLimits()
- * \return The maxShutter_ value
+ * \fn ExposureModeHelper::maxExposureTime()
+ * \brief Retrieve the configured maximum exposure time set through setLimits()
+ * \return The maxExposureTime_ value
*/
/**
* \fn ExposureModeHelper::minGain()
- * \brief Retrieve the configured minimum gain set through
- * setShutterGainLimits()
+ * \brief Retrieve the configured minimum gain set through setLimits()
* \return The minGain_ value
*/
/**
* \fn ExposureModeHelper::maxGain()
- * \brief Retrieve the configured maximum gain set through
- * setShutterGainLimits()
+ * \brief Retrieve the configured maximum gain set through setLimits()
* \return The maxGain_ value
*/
diff --git a/src/ipa/libipa/exposure_mode_helper.h b/src/ipa/libipa/exposure_mode_helper.h
index 85c665d7..c5be1b67 100644
--- a/src/ipa/libipa/exposure_mode_helper.h
+++ b/src/ipa/libipa/exposure_mode_helper.h
@@ -24,26 +24,26 @@ public:
ExposureModeHelper(const Span<std::pair<utils::Duration, double>> stages);
~ExposureModeHelper() = default;
- void setLimits(utils::Duration minShutter, utils::Duration maxShutter,
+ void setLimits(utils::Duration minExposureTime, utils::Duration maxExposureTime,
double minGain, double maxGain);
std::tuple<utils::Duration, double, double>
splitExposure(utils::Duration exposure) const;
- utils::Duration minShutter() const { return minShutter_; }
- utils::Duration maxShutter() const { return maxShutter_; }
+ utils::Duration minExposureTime() const { return minExposureTime_; }
+ utils::Duration maxExposureTime() const { return maxExposureTime_; }
double minGain() const { return minGain_; }
double maxGain() const { return maxGain_; }
private:
- utils::Duration clampShutter(utils::Duration shutter) const;
+ utils::Duration clampExposureTime(utils::Duration exposureTime) const;
double clampGain(double gain) const;
- std::vector<utils::Duration> shutters_;
+ std::vector<utils::Duration> exposureTimes_;
std::vector<double> gains_;
- utils::Duration minShutter_;
- utils::Duration maxShutter_;
+ utils::Duration minExposureTime_;
+ utils::Duration maxExposureTime_;
double minGain_;
double maxGain_;
};
diff --git a/src/ipa/libipa/fc_queue.h b/src/ipa/libipa/fc_queue.h
index 24d9e82b..a1d13652 100644
--- a/src/ipa/libipa/fc_queue.h
+++ b/src/ipa/libipa/fc_queue.h
@@ -25,6 +25,7 @@ struct FrameContext {
private:
template<typename T> friend class FCQueue;
uint32_t frame;
+ bool initialised = false;
};
template<typename FrameContext>
@@ -38,8 +39,10 @@ public:
void clear()
{
- for (FrameContext &ctx : contexts_)
+ for (FrameContext &ctx : contexts_) {
+ ctx.initialised = false;
ctx.frame = 0;
+ }
}
FrameContext &alloc(const uint32_t frame)
@@ -83,6 +86,21 @@ public:
<< " has been overwritten by "
<< frameContext.frame;
+ if (frame == 0 && !frameContext.initialised) {
+ /*
+ * If the IPA calls get() at start() time it will get an
+ * un-intialized FrameContext as the below "frame ==
+ * frameContext.frame" check will return success because
+ * FrameContexts are zeroed at creation time.
+ *
+ * Make sure the FrameContext gets initialised if get()
+ * is called before alloc() by the IPA for frame#0.
+ */
+ init(frameContext, frame);
+
+ return frameContext;
+ }
+
if (frame == frameContext.frame)
return frameContext;
@@ -108,6 +126,7 @@ private:
{
frameContext = {};
frameContext.frame = frame;
+ frameContext.initialised = true;
}
std::vector<FrameContext> contexts_;
diff --git a/src/ipa/libipa/fixedpoint.cpp b/src/ipa/libipa/fixedpoint.cpp
new file mode 100644
index 00000000..6b698fc5
--- /dev/null
+++ b/src/ipa/libipa/fixedpoint.cpp
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Fixed / floating point conversions
+ */
+
+#include "fixedpoint.h"
+
+/**
+ * \file fixedpoint.h
+ */
+
+namespace libcamera {
+
+namespace ipa {
+
+/**
+ * \fn R floatingToFixedPoint(T number)
+ * \brief Convert a floating point number to a fixed-point representation
+ * \tparam I Bit width of the integer part of the fixed-point
+ * \tparam F Bit width of the fractional part of the fixed-point
+ * \tparam R Return type of the fixed-point representation
+ * \tparam T Input type of the floating point representation
+ * \param number The floating point number to convert to fixed point
+ * \return The converted value
+ */
+
+/**
+ * \fn R fixedToFloatingPoint(T number)
+ * \brief Convert a fixed-point number to a floating point representation
+ * \tparam I Bit width of the integer part of the fixed-point
+ * \tparam F Bit width of the fractional part of the fixed-point
+ * \tparam R Return type of the floating point representation
+ * \tparam T Input type of the fixed-point representation
+ * \param number The fixed point number to convert to floating point
+ * \return The converted value
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/fixedpoint.h b/src/ipa/libipa/fixedpoint.h
new file mode 100644
index 00000000..709cf50f
--- /dev/null
+++ b/src/ipa/libipa/fixedpoint.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Fixed / floating point conversions
+ */
+
+#pragma once
+
+#include <cmath>
+#include <type_traits>
+
+namespace libcamera {
+
+namespace ipa {
+
+#ifndef __DOXYGEN__
+template<unsigned int I, unsigned int F, typename R, typename T,
+ std::enable_if_t<std::is_integral_v<R> &&
+ std::is_floating_point_v<T>> * = nullptr>
+#else
+template<unsigned int I, unsigned int F, typename R, typename T>
+#endif
+constexpr R floatingToFixedPoint(T number)
+{
+ static_assert(sizeof(int) >= sizeof(R));
+ static_assert(I + F <= sizeof(R) * 8);
+
+ /*
+ * The intermediate cast to int is needed on arm platforms to properly
+ * cast negative values. See
+ * https://embeddeduse.com/2013/08/25/casting-a-negative-float-to-an-unsigned-int/
+ */
+ R mask = (1 << (F + I)) - 1;
+ R frac = static_cast<R>(static_cast<int>(std::round(number * (1 << F)))) & mask;
+
+ return frac;
+}
+
+#ifndef __DOXYGEN__
+template<unsigned int I, unsigned int F, typename R, typename T,
+ std::enable_if_t<std::is_floating_point_v<R> &&
+ std::is_integral_v<T>> * = nullptr>
+#else
+template<unsigned int I, unsigned int F, typename R, typename T>
+#endif
+constexpr R fixedToFloatingPoint(T number)
+{
+ static_assert(sizeof(int) >= sizeof(T));
+ static_assert(I + F <= sizeof(T) * 8);
+
+ /*
+ * Recreate the upper bits in case of a negative number by shifting the sign
+ * bit from the fixed point to the first bit of the unsigned and then right shifting
+ * by the same amount which keeps the sign bit in place.
+ * This can be optimized by the compiler quite well.
+ */
+ int remaining_bits = sizeof(int) * 8 - (I + F);
+ int t = static_cast<int>(static_cast<unsigned>(number) << remaining_bits) >> remaining_bits;
+ return static_cast<R>(t) / static_cast<R>(1 << F);
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/histogram.cpp b/src/ipa/libipa/histogram.cpp
index 5fbfadf5..10e44b54 100644
--- a/src/ipa/libipa/histogram.cpp
+++ b/src/ipa/libipa/histogram.cpp
@@ -63,6 +63,12 @@ Histogram::Histogram(Span<const uint32_t> data)
*/
/**
+ * \fn Histogram::data()
+ * \brief Retrieve the internal data
+ * \return The data
+ */
+
+/**
* \fn Histogram::total()
* \brief Retrieve the total number of values in the data set
* \return Number of values
diff --git a/src/ipa/libipa/histogram.h b/src/ipa/libipa/histogram.h
index 032adca0..a926002c 100644
--- a/src/ipa/libipa/histogram.h
+++ b/src/ipa/libipa/histogram.h
@@ -7,7 +7,6 @@
#pragma once
-#include <assert.h>
#include <limits.h>
#include <stdint.h>
#include <type_traits>
@@ -37,6 +36,7 @@ public:
}
size_t bins() const { return cumulative_.size() - 1; }
+ const Span<const uint64_t> data() const { return cumulative_; }
uint64_t total() const { return cumulative_[cumulative_.size() - 1]; }
uint64_t cumulativeFrequency(double bin) const;
double quantile(double q, uint32_t first = 0, uint32_t last = UINT_MAX) const;
diff --git a/src/ipa/libipa/interpolator.cpp b/src/ipa/libipa/interpolator.cpp
new file mode 100644
index 00000000..f901a86e
--- /dev/null
+++ b/src/ipa/libipa/interpolator.cpp
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class for interpolating objects
+ */
+#include "interpolator.h"
+
+#include <algorithm>
+#include <string>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "interpolator.h"
+
+/**
+ * \file interpolator.h
+ * \brief Helper class for linear interpolating a set of objects
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Interpolator)
+
+namespace ipa {
+
+/**
+ * \class Interpolator
+ * \brief Class for storing, retrieving, and interpolating objects
+ * \tparam T Type of objects stored in the interpolator
+ *
+ * The main use case is to pass a map from color temperatures to corresponding
+ * objects (eg. matrices for color correction), and then requesting a
+ * interpolated object for a specific color temperature. This class will
+ * abstract away the interpolation portion.
+ */
+
+/**
+ * \fn Interpolator::Interpolator()
+ * \brief Construct an empty interpolator
+ */
+
+/**
+ * \fn Interpolator::Interpolator(const std::map<unsigned int, T> &data)
+ * \brief Construct an interpolator from a map of objects
+ * \param data Map from which to construct the interpolator
+ */
+
+/**
+ * \fn Interpolator::Interpolator(std::map<unsigned int, T> &&data)
+ * \brief Construct an interpolator from a map of objects
+ * \param data Map from which to construct the interpolator
+ */
+
+/**
+ * \fn int Interpolator<T>::readYaml(const libcamera::YamlObject &yaml,
+ const std::string &key_name,
+ const std::string &value_name)
+ * \brief Initialize an Interpolator instance from yaml
+ * \tparam T Type of data stored in the interpolator
+ * \param[in] yaml The yaml object that contains the map of unsigned integers to
+ * objects
+ * \param[in] key_name The name of the key in the yaml object
+ * \param[in] value_name The name of the value in the yaml object
+ *
+ * The yaml object is expected to be a list of maps. Each map has two or more
+ * pairs: one of \a key_name to the key value (usually color temperature), and
+ * one or more of \a value_name to the object. This is a bit difficult to
+ * explain, so here is an example (in python, as it is easier to parse than
+ * yaml):
+ * [
+ * {
+ * 'ct': 2860,
+ * 'ccm': [ 2.12089, -0.52461, -0.59629,
+ * -0.85342, 2.80445, -0.95103,
+ * -0.26897, -1.14788, 2.41685 ],
+ * 'offsets': [ 0, 0, 0 ]
+ * },
+ *
+ * {
+ * 'ct': 2960,
+ * 'ccm': [ 2.26962, -0.54174, -0.72789,
+ * -0.77008, 2.60271, -0.83262,
+ * -0.26036, -1.51254, 2.77289 ],
+ * 'offsets': [ 0, 0, 0 ]
+ * },
+ *
+ * {
+ * 'ct': 3603,
+ * 'ccm': [ 2.18644, -0.66148, -0.52496,
+ * -0.77828, 2.69474, -0.91645,
+ * -0.25239, -0.83059, 2.08298 ],
+ * 'offsets': [ 0, 0, 0 ]
+ * },
+ * ]
+ *
+ * In this case, \a key_name would be 'ct', and \a value_name can be either
+ * 'ccm' or 'offsets'. This way multiple interpolators can be defined in
+ * one set of color temperature ranges in the tuning file, and they can be
+ * retrieved separately with the \a value_name parameter.
+ *
+ * \return Zero on success, negative error code otherwise
+ */
+
+/**
+ * \fn void Interpolator<T>::setQuantization(const unsigned int q)
+ * \brief Set the quantization value
+ * \param[in] q The quantization value
+ *
+ * Sets the quantization value. When this is set, 'key' gets quantized to this
+ * size, before doing the interpolation. This can help in reducing the number of
+ * updates pushed to the hardware.
+ *
+ * Note that normally a threshold needs to be combined with quantization.
+ * Otherwise a value that swings around the edge of the quantization step will
+ * lead to constant updates.
+ */
+
+/**
+ * \fn void Interpolator<T>::setData(std::map<unsigned int, T> &&data)
+ * \brief Set the internal map
+ *
+ * Overwrites the internal map using move semantics.
+ */
+
+/**
+ * \fn std::map<unsigned int, T> &Interpolator<T>::data() const
+ * \brief Access the internal map
+ *
+ * \return The internal map
+ */
+
+/**
+ * \fn const T& Interpolator<T>::getInterpolated()
+ * \brief Retrieve an interpolated value for the given key
+ * \param[in] key The unsigned integer key of the object to retrieve
+ * \param[out] quantizedKey If provided, the key value after quantization
+ * \return The object corresponding to the key. The object is cached internally,
+ * so on successive calls with the same key (after quantization) interpolation
+ * is not recalculated.
+ */
+
+/**
+ * \fn void Interpolator<T>::interpolate(const T &a, const T &b, T &dest, double lambda)
+ * \brief Interpolate between two instances of T
+ * \param a The first value to interpolate
+ * \param b The second value to interpolate
+ * \param dest The destination for the interpolated value
+ * \param lambda The interpolation factor (0..1)
+ *
+ * Interpolates between \a a and \a b according to \a lambda. It calculates
+ * dest = a * (1.0 - lambda) + b * lambda;
+ *
+ * If T supports multiplication with double and addition, this function can be
+ * used as is. For other types this function can be overwritten using partial
+ * template specialization.
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/interpolator.h b/src/ipa/libipa/interpolator.h
new file mode 100644
index 00000000..7880db69
--- /dev/null
+++ b/src/ipa/libipa/interpolator.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class for interpolating maps of objects
+ */
+
+#pragma once
+
+#include <algorithm>
+#include <cmath>
+#include <map>
+#include <string>
+#include <tuple>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Interpolator)
+
+namespace ipa {
+
+template<typename T>
+class Interpolator
+{
+public:
+ Interpolator() = default;
+ Interpolator(const std::map<unsigned int, T> &data)
+ : data_(data)
+ {
+ }
+ Interpolator(std::map<unsigned int, T> &&data)
+ : data_(std::move(data))
+ {
+ }
+
+ ~Interpolator() = default;
+
+ int readYaml(const libcamera::YamlObject &yaml,
+ const std::string &key_name,
+ const std::string &value_name)
+ {
+ data_.clear();
+ lastInterpolatedKey_.reset();
+
+ if (!yaml.isList()) {
+ LOG(Interpolator, Error) << "yaml object must be a list";
+ return -EINVAL;
+ }
+
+ for (const auto &value : yaml.asList()) {
+ unsigned int ct = std::stoul(value[key_name].get<std::string>(""));
+ std::optional<T> data =
+ value[value_name].get<T>();
+ if (!data) {
+ return -EINVAL;
+ }
+
+ data_[ct] = *data;
+ }
+
+ if (data_.size() < 1) {
+ LOG(Interpolator, Error) << "Need at least one element";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ void setQuantization(const unsigned int q)
+ {
+ quantization_ = q;
+ }
+
+ void setData(std::map<unsigned int, T> &&data)
+ {
+ data_ = std::move(data);
+ lastInterpolatedKey_.reset();
+ }
+
+ const std::map<unsigned int, T> &data() const
+ {
+ return data_;
+ }
+
+ const T &getInterpolated(unsigned int key, unsigned int *quantizedKey = nullptr)
+ {
+ ASSERT(data_.size() > 0);
+
+ if (quantization_ > 0)
+ key = std::lround(key / static_cast<double>(quantization_)) * quantization_;
+
+ if (quantizedKey)
+ *quantizedKey = key;
+
+ if (lastInterpolatedKey_.has_value() &&
+ *lastInterpolatedKey_ == key)
+ return lastInterpolatedValue_;
+
+ auto it = data_.lower_bound(key);
+
+ if (it == data_.begin())
+ return it->second;
+
+ if (it == data_.end())
+ return std::prev(it)->second;
+
+ if (it->first == key)
+ return it->second;
+
+ auto it2 = std::prev(it);
+ double lambda = (key - it2->first) / static_cast<double>(it->first - it2->first);
+ interpolate(it2->second, it->second, lastInterpolatedValue_, lambda);
+ lastInterpolatedKey_ = key;
+
+ return lastInterpolatedValue_;
+ }
+
+ void interpolate(const T &a, const T &b, T &dest, double lambda)
+ {
+ dest = a * (1.0 - lambda) + b * lambda;
+ }
+
+private:
+ std::map<unsigned int, T> data_;
+ T lastInterpolatedValue_;
+ std::optional<unsigned int> lastInterpolatedKey_;
+ unsigned int quantization_ = 0;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/lsc_polynomial.cpp b/src/ipa/libipa/lsc_polynomial.cpp
new file mode 100644
index 00000000..f607d86c
--- /dev/null
+++ b/src/ipa/libipa/lsc_polynomial.cpp
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * Polynomial class to represent lens shading correction
+ */
+
+#include "lsc_polynomial.h"
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file lsc_polynomial.h
+ * \brief LscPolynomial class
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(LscPolynomial)
+
+namespace ipa {
+
+/**
+ * \class LscPolynomial
+ * \brief Class for handling even polynomials used in lens shading correction
+ *
+ * Shading artifacts of camera lenses can be modeled using even radial
+ * polynomials. This class implements a polynomial with 5 coefficients which
+ * follows the definition of the FixVignetteRadial opcode in the Adobe DNG
+ * specification.
+ */
+
+/**
+ * \fn LscPolynomial::LscPolynomial(double cx = 0.0, double cy = 0.0, double k0 = 0.0,
+ double k1 = 0.0, double k2 = 0.0, double k3 = 0.0,
+ double k4 = 0.0)
+ * \brief Construct a polynomial using the given coefficients
+ * \param cx Center-x relative to the image in normalized coordinates (0..1)
+ * \param cy Center-y relative to the image in normalized coordinates (0..1)
+ * \param k0 Coefficient of the polynomial
+ * \param k1 Coefficient of the polynomial
+ * \param k2 Coefficient of the polynomial
+ * \param k3 Coefficient of the polynomial
+ * \param k4 Coefficient of the polynomial
+ */
+
+/**
+ * \fn LscPolynomial::sampleAtNormalizedPixelPos(double x, double y)
+ * \brief Sample the polynomial at the given normalized pixel position
+ *
+ * This functions samples the polynomial at the given pixel position divided by
+ * the value returned by getM().
+ *
+ * \param x x position in normalized coordinates
+ * \param y y position in normalized coordinates
+ * \return The sampled value
+ */
+
+/**
+ * \fn LscPolynomial::getM()
+ * \brief Get the value m as described in the dng specification
+ *
+ * Returns m according to dng spec. m represents the Euclidean distance
+ * (in pixels) from the optical center to the farthest pixel in the
+ * image.
+ *
+ * \return The sampled value
+ */
+
+/**
+ * \fn LscPolynomial::setReferenceImageSize(const Size &size)
+ * \brief Set the reference image size
+ *
+ * Set the reference image size that is used for subsequent calls to getM() and
+ * sampleAtNormalizedPixelPos()
+ *
+ * \param size The size of the reference image
+ */
+
+} // namespace ipa
+} // namespace libcamera
diff --git a/src/ipa/libipa/lsc_polynomial.h b/src/ipa/libipa/lsc_polynomial.h
new file mode 100644
index 00000000..c898faeb
--- /dev/null
+++ b/src/ipa/libipa/lsc_polynomial.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * Helper for radial polynomial used in lens shading correction.
+ */
+#pragma once
+
+#include <algorithm>
+#include <array>
+#include <assert.h>
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(LscPolynomial)
+
+namespace ipa {
+
+class LscPolynomial
+{
+public:
+ LscPolynomial(double cx = 0.0, double cy = 0.0, double k0 = 0.0,
+ double k1 = 0.0, double k2 = 0.0, double k3 = 0.0,
+ double k4 = 0.0)
+ : cx_(cx), cy_(cy), cnx_(0), cny_(0),
+ coefficients_({ k0, k1, k2, k3, k4 })
+ {
+ }
+
+ double sampleAtNormalizedPixelPos(double x, double y) const
+ {
+ double dx = x - cnx_;
+ double dy = y - cny_;
+ double r = sqrt(dx * dx + dy * dy);
+ double res = 1.0;
+ for (unsigned int i = 0; i < coefficients_.size(); i++) {
+ res += coefficients_[i] * std::pow(r, (i + 1) * 2);
+ }
+ return res;
+ }
+
+ double getM() const
+ {
+ double cpx = imageSize_.width * cx_;
+ double cpy = imageSize_.height * cy_;
+ double mx = std::max(cpx, std::fabs(imageSize_.width - cpx));
+ double my = std::max(cpy, std::fabs(imageSize_.height - cpy));
+
+ return sqrt(mx * mx + my * my);
+ }
+
+ void setReferenceImageSize(const Size &size)
+ {
+ assert(!size.isNull());
+ imageSize_ = size;
+
+ /* Calculate normalized centers */
+ double m = getM();
+ cnx_ = (size.width * cx_) / m;
+ cny_ = (size.height * cy_) / m;
+ }
+
+private:
+ double cx_;
+ double cy_;
+ double cnx_;
+ double cny_;
+ std::array<double, 5> coefficients_;
+
+ Size imageSize_;
+};
+
+} /* namespace ipa */
+
+#ifndef __DOXYGEN__
+
+template<>
+struct YamlObject::Getter<ipa::LscPolynomial> {
+ std::optional<ipa::LscPolynomial> get(const YamlObject &obj) const
+ {
+ std::optional<double> cx = obj["cx"].get<double>();
+ std::optional<double> cy = obj["cy"].get<double>();
+ std::optional<double> k0 = obj["k0"].get<double>();
+ std::optional<double> k1 = obj["k1"].get<double>();
+ std::optional<double> k2 = obj["k2"].get<double>();
+ std::optional<double> k3 = obj["k3"].get<double>();
+ std::optional<double> k4 = obj["k4"].get<double>();
+
+ if (!(cx && cy && k0 && k1 && k2 && k3 && k4))
+ LOG(LscPolynomial, Error)
+ << "Polynomial is missing a parameter";
+
+ return ipa::LscPolynomial(*cx, *cy, *k0, *k1, *k2, *k3, *k4);
+ }
+};
+
+#endif
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/lux.cpp b/src/ipa/libipa/lux.cpp
new file mode 100644
index 00000000..899e8824
--- /dev/null
+++ b/src/ipa/libipa/lux.cpp
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that implements lux estimation
+ */
+#include "lux.h"
+
+#include <algorithm>
+#include <chrono>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "histogram.h"
+
+/**
+ * \file lux.h
+ * \brief Helper class that implements lux estimation
+ *
+ * Estimating the lux level of an image is a common operation that can for
+ * instance be used to adjust the target Y value in AGC or for Bayesian AWB
+ * estimation.
+ */
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(Lux)
+
+namespace ipa {
+
+/**
+ * \class Lux
+ * \brief Class that implements lux estimation
+ *
+ * IPAs that wish to use lux estimation should create a Lux algorithm module
+ * that lightly wraps this module by providing the platform-specific luminance
+ * histogram. The Lux entry in the tuning file must then precede the algorithms
+ * that depend on the estimated lux value.
+ */
+
+/**
+ * \var Lux::referenceExposureTime_
+ * \brief The exposure time of the reference image, in microseconds
+ */
+
+/**
+ * \var Lux::referenceAnalogueGain_
+ * \brief The analogue gain of the reference image
+ */
+
+/**
+ * \var Lux::referenceDigitalGain_
+ * \brief The analogue gain of the reference image
+ */
+
+/**
+ * \var Lux::referenceY_
+ * \brief The measured luminance of the reference image, normalized to 1
+ *
+ */
+
+/**
+ * \var Lux::referenceLux_
+ * \brief The estimated lux level of the reference image
+ */
+
+/**
+ * \brief Construct the Lux helper module
+ */
+Lux::Lux()
+{
+}
+
+/**
+ * \brief Parse tuning data
+ * \param[in] tuningData The YamlObject representing the tuning data
+ *
+ * This function parses yaml tuning data for the common Lux module. It requires
+ * reference exposure time, analogue gain, digital gain, and lux values.
+ *
+ * \code{.unparsed}
+ * algorithms:
+ * - Lux:
+ * referenceExposureTime: 10000
+ * referenceAnalogueGain: 4.0
+ * referenceDigitalGain: 1.0
+ * referenceY: 0.1831
+ * referenceLux: 1000
+ * \endcode
+ *
+ * \return 0 on success or a negative error code
+ */
+int Lux::parseTuningData(const YamlObject &tuningData)
+{
+ auto value = tuningData["referenceExposureTime"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceExposureTime'";
+ return -EINVAL;
+ }
+ referenceExposureTime_ = *value * 1.0us;
+
+ value = tuningData["referenceAnalogueGain"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceAnalogueGain'";
+ return -EINVAL;
+ }
+ referenceAnalogueGain_ = *value;
+
+ value = tuningData["referenceDigitalGain"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceDigitalGain'";
+ return -EINVAL;
+ }
+ referenceDigitalGain_ = *value;
+
+ value = tuningData["referenceY"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceY'";
+ return -EINVAL;
+ }
+ referenceY_ = *value;
+
+ value = tuningData["referenceLux"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceLux'";
+ return -EINVAL;
+ }
+ referenceLux_ = *value;
+
+ return 0;
+}
+
+/**
+ * \brief Estimate lux given runtime values
+ * \param[in] exposureTime Exposure time applied to the frame
+ * \param[in] aGain Analogue gain applied to the frame
+ * \param[in] dGain Digital gain applied to the frame
+ * \param[in] yHist Histogram from the ISP statistics
+ *
+ * Estimate the lux given the exposure time, gain, and histogram.
+ *
+ * \return Estimated lux value
+ */
+double Lux::estimateLux(utils::Duration exposureTime,
+ double aGain, double dGain,
+ const Histogram &yHist) const
+{
+ double currentY = yHist.interQuantileMean(0, 1);
+ double exposureTimeRatio = referenceExposureTime_ / exposureTime;
+ double aGainRatio = referenceAnalogueGain_ / aGain;
+ double dGainRatio = referenceDigitalGain_ / dGain;
+ double yRatio = (currentY / yHist.bins()) / referenceY_;
+
+ double estimatedLux = exposureTimeRatio * aGainRatio * dGainRatio *
+ yRatio * referenceLux_;
+
+ LOG(Lux, Debug) << "Estimated lux " << estimatedLux;
+ return estimatedLux;
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/lux.h b/src/ipa/libipa/lux.h
new file mode 100644
index 00000000..d95bcdaf
--- /dev/null
+++ b/src/ipa/libipa/lux.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that implements lux estimation
+ */
+
+#pragma once
+
+#include <libcamera/base/utils.h>
+
+namespace libcamera {
+
+class YamlObject;
+
+namespace ipa {
+
+class Histogram;
+
+class Lux
+{
+public:
+ Lux();
+
+ int parseTuningData(const YamlObject &tuningData);
+ double estimateLux(utils::Duration exposureTime,
+ double aGain, double dGain,
+ const Histogram &yHist) const;
+
+private:
+ utils::Duration referenceExposureTime_;
+ double referenceAnalogueGain_;
+ double referenceDigitalGain_;
+ double referenceY_;
+ double referenceLux_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/matrix.cpp b/src/ipa/libipa/matrix.cpp
deleted file mode 100644
index 8346f0d3..00000000
--- a/src/ipa/libipa/matrix.cpp
+++ /dev/null
@@ -1,149 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
- *
- * Matrix and related operations
- */
-
-#include "matrix.h"
-
-#include <libcamera/base/log.h>
-
-/**
- * \file matrix.h
- * \brief Matrix class
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(Matrix)
-
-namespace ipa {
-
-/**
- * \class Matrix
- * \brief Matrix class
- * \tparam T Type of numerical values to be stored in the matrix
- * \tparam Rows Number of rows in the matrix
- * \tparam Cols Number of columns in the matrix
- */
-
-/**
- * \fn Matrix::Matrix()
- * \brief Construct a zero matrix
- */
-
-/**
- * \fn Matrix::Matrix(const std::vector<T> &data)
- * \brief Construct a matrix from supplied data
- * \param[in] data Data from which to construct a matrix
- *
- * \a data is a one-dimensional vector and will be turned into a matrix in
- * row-major order. The size of \a data must be equal to the product of the
- * number of rows and columns of the matrix (Rows x Cols).
- */
-
-/**
- * \fn Matrix::identity()
- * \brief Construct an identity matrix
- */
-
-/**
- * \fn Matrix::toString()
- * \brief Assemble and return a string describing the matrix
- * \return A string describing the matrix
- */
-
-/**
- * \fn Span<const T, Cols> Matrix::operator[](size_t i) const
- * \brief Index to a row in the matrix
- * \param[in] i Index of row to retrieve
- *
- * This operator[] returns a Span, which can then be indexed into again with
- * another operator[], allowing a convenient m[i][j] to access elements of the
- * matrix. Note that the lifetime of the Span returned by this first-level
- * operator[] is bound to that of the Matrix itself, so it is not recommended
- * to save the Span that is the result of this operator[].
- *
- * \return Row \a i from the matrix, as a Span
- */
-
-/**
- * \fn Matrix::operator[](size_t i)
- * \copydoc Matrix::operator[](size_t i) const
- */
-
-/**
- * \fn Matrix<T, Rows, Cols> &Matrix::operator*=(U d)
- * \brief Multiply the matrix by a scalar in-place
- * \tparam U Type of the numerical scalar value
- * \param d The scalar multiplier
- * \return Product of this matrix and scalar \a d
- */
-
-/**
- * \fn Matrix::Matrix<U, Rows, Cols> operator*(T d, const Matrix<U, Rows, Cols> &m)
- * \brief Multiply the matrix by a scalar
- * \tparam T Type of the numerical scalar value
- * \tparam U Type of numerical values in the matrix
- * \tparam Rows Number of rows in the matrix
- * \tparam Cols Number of columns in the matrix
- * \param d The scalar multiplier
- * \param m The matrix
- * \return Product of scalar \a d and matrix \a m
- */
-
-/**
- * \fn Matrix::Matrix<U, Rows, Cols> operator*(const Matrix<U, Rows, Cols> &m, T d)
- * \copydoc operator*(T d, const Matrix<U, Rows, Cols> &m)
- */
-
-/**
- * \fn Matrix<T, R1, C2> operator*(const Matrix<T, R1, C1> &m1, const Matrix<T, R2, C2> &m2)
- * \brief Matrix multiplication
- * \tparam T Type of numerical values in the matrices
- * \tparam R1 Number of rows in the first matrix
- * \tparam C1 Number of columns in the first matrix
- * \tparam R2 Number of rows in the second matrix
- * \tparam C2 Number of columns in the second matrix
- * \param m1 Multiplicand matrix
- * \param m2 Multiplier matrix
- * \return Matrix product of matrices \a m1 and \a m2
- */
-
-/**
- * \fn Matrix<T, Rows, Cols> operator+(const Matrix<T, Rows, Cols> &m1, const Matrix<T, Rows, Cols> &m2)
- * \brief Matrix addition
- * \tparam T Type of numerical values in the matrices
- * \tparam Rows Number of rows in the matrices
- * \tparam Cols Number of columns in the matrices
- * \param m1 Summand matrix
- * \param m2 Summand matrix
- * \return Matrix sum of matrices \a m1 and \a m2
- */
-
-#ifndef __DOXYGEN__
-/*
- * The YAML data shall be a list of numerical values. Its size shall be equal
- * to the product of the number of rows and columns of the matrix (Rows x
- * Cols). The values shall be stored in row-major order.
- */
-bool matrixValidateYaml(const YamlObject &obj, unsigned int size)
-{
- if (!obj.isList())
- return false;
-
- if (obj.size() != size) {
- LOG(Matrix, Error)
- << "Wrong number of values in matrix: expected "
- << size << ", got " << obj.size();
- return false;
- }
-
- return true;
-}
-#endif /* __DOXYGEN__ */
-
-} /* namespace ipa */
-
-} /* namespace libcamera */
diff --git a/src/ipa/libipa/matrix.h b/src/ipa/libipa/matrix.h
deleted file mode 100644
index 8aa8f343..00000000
--- a/src/ipa/libipa/matrix.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
- *
- * Matrix and related operations
- */
-#pragma once
-
-#include <algorithm>
-#include <cmath>
-#include <sstream>
-#include <vector>
-
-#include <libcamera/base/log.h>
-#include <libcamera/base/span.h>
-
-#include "libcamera/internal/yaml_parser.h"
-
-namespace libcamera {
-
-LOG_DECLARE_CATEGORY(Matrix)
-
-namespace ipa {
-
-#ifndef __DOXYGEN__
-template<typename T, unsigned int Rows, unsigned int Cols,
- std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
-#else
-template<typename T, unsigned int Rows, unsigned int Cols>
-#endif /* __DOXYGEN__ */
-class Matrix
-{
-public:
- Matrix()
- {
- data_.fill(static_cast<T>(0));
- }
-
- Matrix(const std::vector<T> &data)
- {
- std::copy(data.begin(), data.end(), data_.begin());
- }
-
- static Matrix identity()
- {
- Matrix ret;
- for (size_t i = 0; i < std::min(Rows, Cols); i++)
- ret[i][i] = static_cast<T>(1);
- return ret;
- }
-
- ~Matrix() = default;
-
- const std::string toString() const
- {
- std::stringstream out;
-
- out << "Matrix { ";
- for (unsigned int i = 0; i < Rows; i++) {
- out << "[ ";
- for (unsigned int j = 0; j < Cols; j++) {
- out << (*this)[i][j];
- out << ((j + 1 < Cols) ? ", " : " ");
- }
- out << ((i + 1 < Rows) ? "], " : "]");
- }
- out << " }";
-
- return out.str();
- }
-
- Span<const T, Cols> operator[](size_t i) const
- {
- return Span<const T, Cols>{ &data_.data()[i * Cols], Cols };
- }
-
- Span<T, Cols> operator[](size_t i)
- {
- return Span<T, Cols>{ &data_.data()[i * Cols], Cols };
- }
-
-#ifndef __DOXYGEN__
- template<typename U, std::enable_if_t<std::is_arithmetic_v<U>>>
-#else
- template<typename U>
-#endif /* __DOXYGEN__ */
- Matrix<T, Rows, Cols> &operator*=(U d)
- {
- for (unsigned int i = 0; i < Rows * Cols; i++)
- data_[i] *= d;
- return *this;
- }
-
-private:
- std::array<T, Rows * Cols> data_;
-};
-
-#ifndef __DOXYGEN__
-template<typename T, typename U, unsigned int Rows, unsigned int Cols,
- std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
-#else
-template<typename T, typename U, unsigned int Rows, unsigned int Cols>
-#endif /* __DOXYGEN__ */
-Matrix<U, Rows, Cols> operator*(T d, const Matrix<U, Rows, Cols> &m)
-{
- Matrix<U, Rows, Cols> result;
-
- for (unsigned int i = 0; i < Rows; i++) {
- for (unsigned int j = 0; j < Cols; j++)
- result[i][j] = d * m[i][j];
- }
-
- return result;
-}
-
-#ifndef __DOXYGEN__
-template<typename T, typename U, unsigned int Rows, unsigned int Cols,
- std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
-#else
-template<typename T, typename U, unsigned int Rows, unsigned int Cols>
-#endif /* __DOXYGEN__ */
-Matrix<U, Rows, Cols> operator*(const Matrix<U, Rows, Cols> &m, T d)
-{
- return d * m;
-}
-
-#ifndef __DOXYGEN__
-template<typename T,
- unsigned int R1, unsigned int C1,
- unsigned int R2, unsigned int C2,
- std::enable_if_t<C1 == R2> * = nullptr>
-#else
-template<typename T, unsigned int R1, unsigned int C1, unsigned int R2, unsigned in C2>
-#endif /* __DOXYGEN__ */
-Matrix<T, R1, C2> operator*(const Matrix<T, R1, C1> &m1, const Matrix<T, R2, C2> &m2)
-{
- Matrix<T, R1, C2> result;
-
- for (unsigned int i = 0; i < R1; i++) {
- for (unsigned int j = 0; j < C2; j++) {
- T sum = 0;
-
- for (unsigned int k = 0; k < C1; k++)
- sum += m1[i][k] * m2[k][j];
-
- result[i][j] = sum;
- }
- }
-
- return result;
-}
-
-template<typename T, unsigned int Rows, unsigned int Cols>
-Matrix<T, Rows, Cols> operator+(const Matrix<T, Rows, Cols> &m1, const Matrix<T, Rows, Cols> &m2)
-{
- Matrix<T, Rows, Cols> result;
-
- for (unsigned int i = 0; i < Rows; i++) {
- for (unsigned int j = 0; j < Cols; j++)
- result[i][j] = m1[i][j] + m2[i][j];
- }
-
- return result;
-}
-
-#ifndef __DOXYGEN__
-bool matrixValidateYaml(const YamlObject &obj, unsigned int size);
-#endif /* __DOXYGEN__ */
-
-} /* namespace ipa */
-
-#ifndef __DOXYGEN__
-template<typename T, unsigned int Rows, unsigned int Cols>
-std::ostream &operator<<(std::ostream &out, const ipa::Matrix<T, Rows, Cols> &m)
-{
- out << m.toString();
- return out;
-}
-
-template<typename T, unsigned int Rows, unsigned int Cols>
-struct YamlObject::Getter<ipa::Matrix<T, Rows, Cols>> {
- std::optional<ipa::Matrix<T, Rows, Cols>> get(const YamlObject &obj) const
- {
- if (!ipa::matrixValidateYaml(obj, Rows * Cols))
- return std::nullopt;
-
- ipa::Matrix<T, Rows, Cols> matrix;
- T *data = &matrix[0][0];
-
- unsigned int i = 0;
- for (const YamlObject &entry : obj.asList()) {
- const auto value = entry.get<T>();
- if (!value)
- return std::nullopt;
-
- data[i++] = *value;
- }
-
- return matrix;
- }
-};
-#endif /* __DOXYGEN__ */
-
-} /* namespace libcamera */
diff --git a/src/ipa/libipa/matrix_interpolator.cpp b/src/ipa/libipa/matrix_interpolator.cpp
deleted file mode 100644
index 04ca177f..00000000
--- a/src/ipa/libipa/matrix_interpolator.cpp
+++ /dev/null
@@ -1,110 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
- *
- * Helper class for interpolating maps of matrices
- */
-#include "matrix_interpolator.h"
-
-#include <algorithm>
-#include <string>
-
-#include <libcamera/base/log.h>
-
-#include "libcamera/internal/yaml_parser.h"
-
-#include "matrix.h"
-
-/**
- * \file matrix_interpolator.h
- * \brief Helper class for interpolating maps of matrices
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(MatrixInterpolator)
-
-namespace ipa {
-
-/**
- * \class MatrixInterpolator
- * \brief Class for storing, retrieving, and interpolating matrices
- * \tparam T Type of numerical values to be stored in the matrices
- * \tparam R Number of rows in the matrices
- * \tparam C Number of columns in the matrices
- *
- * The main use case is to pass a map from color temperatures to corresponding
- * matrices (eg. color correction), and then requesting a matrix for a specific
- * color temperature. This class will abstract away the interpolation portion.
- */
-
-/**
- * \fn MatrixInterpolator::MatrixInterpolator(const std::map<unsigned int, Matrix<T, R, C>> &matrices)
- * \brief Construct a matrix interpolator from a map of matrices
- * \param matrices Map from which to construct the matrix interpolator
- */
-
-/**
- * \fn MatrixInterpolator::reset()
- * \brief Reset the matrix interpolator content to a single identity matrix
- */
-
-/**
- * \fn int MatrixInterpolator<T, R, C>::readYaml()
- * \brief Initialize an MatrixInterpolator instance from yaml
- * \tparam T Type of data stored in the matrices
- * \tparam R Number of rows of the matrices
- * \tparam C Number of columns of the matrices
- * \param[in] yaml The yaml object that contains the map of unsigned integers to matrices
- * \param[in] key_name The name of the key in the yaml object
- * \param[in] matrix_name The name of the matrix in the yaml object
- *
- * The yaml object is expected to be a list of maps. Each map has two or more
- * pairs: one of \a key_name to the key value (usually color temperature), and
- * one or more of \a matrix_name to the matrix. This is a bit difficult to
- * explain, so here is an example (in python, as it is easier to parse than
- * yaml):
- * [
- * {
- * 'ct': 2860,
- * 'ccm': [ 2.12089, -0.52461, -0.59629,
- * -0.85342, 2.80445, -0.95103,
- * -0.26897, -1.14788, 2.41685 ],
- * 'offsets': [ 0, 0, 0 ]
- * },
- *
- * {
- * 'ct': 2960,
- * 'ccm': [ 2.26962, -0.54174, -0.72789,
- * -0.77008, 2.60271, -0.83262,
- * -0.26036, -1.51254, 2.77289 ],
- * 'offsets': [ 0, 0, 0 ]
- * },
- *
- * {
- * 'ct': 3603,
- * 'ccm': [ 2.18644, -0.66148, -0.52496,
- * -0.77828, 2.69474, -0.91645,
- * -0.25239, -0.83059, 2.08298 ],
- * 'offsets': [ 0, 0, 0 ]
- * },
- * ]
- *
- * In this case, \a key_name would be 'ct', and \a matrix_name can be either
- * 'ccm' or 'offsets'. This way multiple matrix interpolators can be defined in
- * one set of color temperature ranges in the tuning file, and they can be
- * retrieved separately with the \a matrix_name parameter.
- *
- * \return Zero on success, negative error code otherwise
- */
-
-/**
- * \fn Matrix<T, R, C> MatrixInterpolator<T, R, C>::get(unsigned int key)
- * \brief Retrieve a matrix from the list of matrices, interpolating if necessary
- * \param[in] key The unsigned integer key of the matrix to retrieve
- * \return The matrix corresponding to the color temperature
- */
-
-} /* namespace ipa */
-
-} /* namespace libcamera */
diff --git a/src/ipa/libipa/matrix_interpolator.h b/src/ipa/libipa/matrix_interpolator.h
deleted file mode 100644
index 087c4fd1..00000000
--- a/src/ipa/libipa/matrix_interpolator.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
- *
- * Helper class for interpolating maps of matrices
- */
-
-#pragma once
-
-#include <algorithm>
-#include <map>
-#include <string>
-#include <tuple>
-
-#include <libcamera/base/log.h>
-
-#include "libcamera/internal/yaml_parser.h"
-
-#include "matrix.h"
-
-namespace libcamera {
-
-LOG_DECLARE_CATEGORY(MatrixInterpolator)
-
-namespace ipa {
-
-#ifndef __DOXYGEN__
-template<typename T, unsigned int R, unsigned int C,
- std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
-#else
-template<typename T, unsigned int R, unsigned int C>
-#endif /* __DOXYGEN__ */
-class MatrixInterpolator
-{
-public:
- MatrixInterpolator()
- {
- reset();
- }
-
- MatrixInterpolator(const std::map<unsigned int, Matrix<T, R, C>> &matrices)
- {
- for (const auto &pair : matrices)
- matrices_[pair.first] = pair.second;
- }
-
- ~MatrixInterpolator() {}
-
- void reset()
- {
- matrices_.clear();
- matrices_[0] = Matrix<T, R, C>::identity();
- }
-
- int readYaml(const libcamera::YamlObject &yaml,
- const std::string &key_name,
- const std::string &matrix_name)
- {
- matrices_.clear();
-
- if (!yaml.isList()) {
- LOG(MatrixInterpolator, Error) << "yaml object must be a list";
- return -EINVAL;
- }
-
- for (const auto &value : yaml.asList()) {
- unsigned int ct = std::stoul(value[key_name].get<std::string>(""));
- std::optional<Matrix<T, R, C>> matrix =
- value[matrix_name].get<Matrix<T, R, C>>();
- if (!matrix) {
- LOG(MatrixInterpolator, Error) << "Failed to read matrix";
- return -EINVAL;
- }
-
- matrices_[ct] = *matrix;
-
- LOG(MatrixInterpolator, Debug)
- << "Read matrix '" << matrix_name << "' for key '"
- << key_name << "' " << ct << ": "
- << matrices_[ct].toString();
- }
-
- if (matrices_.size() < 1) {
- LOG(MatrixInterpolator, Error) << "Need at least one matrix";
- return -EINVAL;
- }
-
- return 0;
- }
-
- Matrix<T, R, C> get(unsigned int ct)
- {
- ASSERT(matrices_.size() > 0);
-
- if (matrices_.size() == 1 ||
- ct <= matrices_.begin()->first)
- return matrices_.begin()->second;
-
- if (ct >= matrices_.rbegin()->first)
- return matrices_.rbegin()->second;
-
- if (matrices_.find(ct) != matrices_.end())
- return matrices_[ct];
-
- /* The above four guarantee that this will succeed */
- auto iter = matrices_.upper_bound(ct);
- unsigned int ctUpper = iter->first;
- unsigned int ctLower = (--iter)->first;
-
- double lambda = (ct - ctLower) / static_cast<double>(ctUpper - ctLower);
- Matrix<T, R, C> ret =
- lambda * matrices_[ctUpper] + (1.0 - lambda) * matrices_[ctLower];
- return ret;
- }
-
-private:
- std::map<unsigned int, Matrix<T, R, C>> matrices_;
-};
-
-} /* namespace ipa */
-
-} /* namespace libcamera */
diff --git a/src/ipa/libipa/meson.build b/src/ipa/libipa/meson.build
index eff8ce26..660be940 100644
--- a/src/ipa/libipa/meson.build
+++ b/src/ipa/libipa/meson.build
@@ -3,29 +3,39 @@
libipa_headers = files([
'agc_mean_luminance.h',
'algorithm.h',
+ 'awb_bayes.h',
+ 'awb_grey.h',
+ 'awb.h',
'camera_sensor_helper.h',
+ 'colours.h',
'exposure_mode_helper.h',
'fc_queue.h',
+ 'fixedpoint.h',
'histogram.h',
- 'matrix.h',
- 'matrix_interpolator.h',
+ 'interpolator.h',
+ 'lsc_polynomial.h',
+ 'lux.h',
'module.h',
'pwl.h',
- 'vector.h',
])
libipa_sources = files([
'agc_mean_luminance.cpp',
'algorithm.cpp',
+ 'awb_bayes.cpp',
+ 'awb_grey.cpp',
+ 'awb.cpp',
'camera_sensor_helper.cpp',
+ 'colours.cpp',
'exposure_mode_helper.cpp',
'fc_queue.cpp',
+ 'fixedpoint.cpp',
'histogram.cpp',
- 'matrix.cpp',
- 'matrix_interpolator.cpp',
+ 'interpolator.cpp',
+ 'lsc_polynomial.cpp',
+ 'lux.cpp',
'module.cpp',
'pwl.cpp',
- 'vector.cpp',
])
libipa_includes = include_directories('..')
diff --git a/src/ipa/libipa/pwl.cpp b/src/ipa/libipa/pwl.cpp
index 9b213754..3fa005ba 100644
--- a/src/ipa/libipa/pwl.cpp
+++ b/src/ipa/libipa/pwl.cpp
@@ -8,10 +8,8 @@
#include "pwl.h"
-#include <assert.h>
#include <cmath>
#include <sstream>
-#include <stdexcept>
/**
* \file pwl.h
@@ -162,6 +160,11 @@ void Pwl::prepend(double x, double y, const double eps)
*/
/**
+ * \fn Pwl::clear()
+ * \brief Clear the piecewise linear function
+ */
+
+/**
* \fn Pwl::size() const
* \brief Retrieve the number of points in the piecewise linear function
* \return The number of points in the piecewise linear function
diff --git a/src/ipa/libipa/pwl.h b/src/ipa/libipa/pwl.h
index b6f93494..c1496c30 100644
--- a/src/ipa/libipa/pwl.h
+++ b/src/ipa/libipa/pwl.h
@@ -7,15 +7,12 @@
#pragma once
#include <algorithm>
-#include <cmath>
#include <functional>
#include <string>
#include <utility>
#include <vector>
-#include "libcamera/internal/yaml_parser.h"
-
-#include "vector.h"
+#include "libcamera/internal/vector.h"
namespace libcamera {
@@ -52,6 +49,7 @@ public:
void append(double x, double y, double eps = 1e-6);
bool empty() const { return points_.empty(); }
+ void clear() { points_.clear(); }
size_t size() const { return points_.size(); }
Interval domain() const;
diff --git a/src/ipa/libipa/vector.cpp b/src/ipa/libipa/vector.cpp
deleted file mode 100644
index bd00b019..00000000
--- a/src/ipa/libipa/vector.cpp
+++ /dev/null
@@ -1,168 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
- *
- * Vector and related operations
- */
-
-#include "vector.h"
-
-#include <libcamera/base/log.h>
-
-/**
- * \file vector.h
- * \brief Vector class
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(Vector)
-
-namespace ipa {
-
-/**
- * \class Vector
- * \brief Vector class
- * \tparam T Type of numerical values to be stored in the vector
- * \tparam Rows Number of dimension of the vector (= number of elements)
- */
-
-/**
- * \fn Vector::Vector()
- * \brief Construct a zero vector
- */
-
-/**
- * \fn Vector::Vector(const std::array<T, Rows> &data)
- * \brief Construct vector from supplied data
- * \param data Data from which to construct a vector
- *
- * The size of \a data must be equal to the dimension size Rows of the vector.
- */
-
-/**
- * \fn T Vector::operator[](size_t i) const
- * \brief Index to an element in the vector
- * \param i Index of element to retrieve
- * \return Element at index \a i from the vector
- */
-
-/**
- * \fn T &Vector::operator[](size_t i)
- * \copydoc Vector::operator[](size_t i) const
- */
-
-/**
- * \fn Vector::x()
- * \brief Convenience function to access the first element of the vector
- * \return The first element of the vector
- */
-
-/**
- * \fn Vector::y()
- * \brief Convenience function to access the second element of the vector
- * \return The second element of the vector
- */
-
-/**
- * \fn Vector::z()
- * \brief Convenience function to access the third element of the vector
- * \return The third element of the vector
- */
-
-/**
- * \fn Vector::operator-() const
- * \brief Negate a Vector by negating both all of its coordinates
- * \return The negated vector
- */
-
-/**
- * \fn Vector::operator-(Vector const &other) const
- * \brief Subtract one vector from another
- * \param[in] other The other vector
- * \return The difference of \a other from this vector
- */
-
-/**
- * \fn Vector::operator+()
- * \brief Add two vectors together
- * \param[in] other The other vector
- * \return The sum of the two vectors
- */
-
-/**
- * \fn Vector::operator*(const Vector<T, Rows> &other) const
- * \brief Compute the dot product
- * \param[in] other The other vector
- * \return The dot product of the two vectors
- */
-
-/**
- * \fn Vector::operator*(T factor) const
- * \brief Multiply the vector by a scalar
- * \param[in] factor The factor
- * \return The vector multiplied by \a factor
- */
-
-/**
- * \fn Vector::operator/()
- * \brief Divide the vector by a scalar
- * \param[in] factor The factor
- * \return The vector divided by \a factor
- */
-
-/**
- * \fn Vector::length2()
- * \brief Get the squared length of the vector
- * \return The squared length of the vector
- */
-
-/**
- * \fn Vector::length()
- * \brief Get the length of the vector
- * \return The length of the vector
- */
-
-/**
- * \fn Vector<T, Rows> operator*(const Matrix<T, Rows, Cols> &m, const Vector<T, Cols> &v)
- * \brief Multiply a matrix by a vector
- * \tparam T Numerical type of the contents of the matrix and vector
- * \tparam Rows The number of rows in the matrix
- * \tparam Cols The number of columns in the matrix (= rows in the vector)
- * \param m The matrix
- * \param v The vector
- * \return Product of matrix \a m and vector \a v
- */
-
-/**
- * \fn bool operator==(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
- * \brief Compare vectors for equality
- * \return True if the two vectors are equal, false otherwise
- */
-
-/**
- * \fn bool operator!=(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
- * \brief Compare vectors for inequality
- * \return True if the two vectors are not equal, false otherwise
- */
-
-#ifndef __DOXYGEN__
-bool vectorValidateYaml(const YamlObject &obj, unsigned int size)
-{
- if (!obj.isList())
- return false;
-
- if (obj.size() != size) {
- LOG(Vector, Error)
- << "Wrong number of values in YAML vector: expected "
- << size << ", got " << obj.size();
- return false;
- }
-
- return true;
-}
-#endif /* __DOXYGEN__ */
-
-} /* namespace ipa */
-
-} /* namespace libcamera */
diff --git a/src/ipa/libipa/vector.h b/src/ipa/libipa/vector.h
deleted file mode 100644
index 556e0967..00000000
--- a/src/ipa/libipa/vector.h
+++ /dev/null
@@ -1,219 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
- *
- * Vector and related operations
- */
-#pragma once
-
-#include <algorithm>
-#include <array>
-#include <cmath>
-#include <sstream>
-
-#include <libcamera/base/log.h>
-#include <libcamera/base/span.h>
-
-#include "libcamera/internal/yaml_parser.h"
-
-#include "matrix.h"
-
-namespace libcamera {
-
-LOG_DECLARE_CATEGORY(Vector)
-
-namespace ipa {
-
-#ifndef __DOXYGEN__
-template<typename T, unsigned int Rows,
- std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
-#else
-template<typename T, unsigned int Rows>
-#endif /* __DOXYGEN__ */
-class Vector
-{
-public:
- constexpr Vector() = default;
-
- constexpr Vector(const std::array<T, Rows> &data)
- {
- for (unsigned int i = 0; i < Rows; i++)
- data_[i] = data[i];
- }
-
- const T &operator[](size_t i) const
- {
- ASSERT(i < data_.size());
- return data_[i];
- }
-
- T &operator[](size_t i)
- {
- ASSERT(i < data_.size());
- return data_[i];
- }
-
-#ifndef __DOXYGEN__
- template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
-#endif /* __DOXYGEN__ */
- constexpr T x() const
- {
- return data_[0];
- }
-
-#ifndef __DOXYGEN__
- template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
-#endif /* __DOXYGEN__ */
- constexpr T y() const
- {
- return data_[1];
- }
-
-#ifndef __DOXYGEN__
- template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
-#endif /* __DOXYGEN__ */
- constexpr T z() const
- {
- return data_[2];
- }
-
- constexpr Vector<T, Rows> operator-() const
- {
- Vector<T, Rows> ret;
- for (unsigned int i = 0; i < Rows; i++)
- ret[i] = -data_[i];
- return ret;
- }
-
- constexpr Vector<T, Rows> operator-(const Vector<T, Rows> &other) const
- {
- Vector<T, Rows> ret;
- for (unsigned int i = 0; i < Rows; i++)
- ret[i] = data_[i] - other[i];
- return ret;
- }
-
- constexpr Vector<T, Rows> operator+(const Vector<T, Rows> &other) const
- {
- Vector<T, Rows> ret;
- for (unsigned int i = 0; i < Rows; i++)
- ret[i] = data_[i] + other[i];
- return ret;
- }
-
- constexpr T operator*(const Vector<T, Rows> &other) const
- {
- T ret = 0;
- for (unsigned int i = 0; i < Rows; i++)
- ret += data_[i] * other[i];
- return ret;
- }
-
- constexpr Vector<T, Rows> operator*(T factor) const
- {
- Vector<T, Rows> ret;
- for (unsigned int i = 0; i < Rows; i++)
- ret[i] = data_[i] * factor;
- return ret;
- }
-
- constexpr Vector<T, Rows> operator/(T factor) const
- {
- Vector<T, Rows> ret;
- for (unsigned int i = 0; i < Rows; i++)
- ret[i] = data_[i] / factor;
- return ret;
- }
-
- constexpr double length2() const
- {
- double ret = 0;
- for (unsigned int i = 0; i < Rows; i++)
- ret += data_[i] * data_[i];
- return ret;
- }
-
- constexpr double length() const
- {
- return std::sqrt(length2());
- }
-
-private:
- std::array<T, Rows> data_;
-};
-
-template<typename T, unsigned int Rows, unsigned int Cols>
-Vector<T, Rows> operator*(const Matrix<T, Rows, Cols> &m, const Vector<T, Cols> &v)
-{
- Vector<T, Rows> result;
-
- for (unsigned int i = 0; i < Rows; i++) {
- T sum = 0;
- for (unsigned int j = 0; j < Cols; j++)
- sum += m[i][j] * v[j];
- result[i] = sum;
- }
-
- return result;
-}
-
-template<typename T, unsigned int Rows>
-bool operator==(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
-{
- for (unsigned int i = 0; i < Rows; i++) {
- if (lhs[i] != rhs[i])
- return false;
- }
-
- return true;
-}
-
-template<typename T, unsigned int Rows>
-bool operator!=(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
-{
- return !(lhs == rhs);
-}
-
-#ifndef __DOXYGEN__
-bool vectorValidateYaml(const YamlObject &obj, unsigned int size);
-#endif /* __DOXYGEN__ */
-
-} /* namespace ipa */
-
-#ifndef __DOXYGEN__
-template<typename T, unsigned int Rows>
-std::ostream &operator<<(std::ostream &out, const ipa::Vector<T, Rows> &v)
-{
- out << "Vector { ";
- for (unsigned int i = 0; i < Rows; i++) {
- out << v[i];
- out << ((i + 1 < Rows) ? ", " : " ");
- }
- out << " }";
-
- return out;
-}
-
-template<typename T, unsigned int Rows>
-struct YamlObject::Getter<ipa::Vector<T, Rows>> {
- std::optional<ipa::Vector<T, Rows>> get(const YamlObject &obj) const
- {
- if (!ipa::vectorValidateYaml(obj, Rows))
- return std::nullopt;
-
- ipa::Vector<T, Rows> vector;
-
- unsigned int i = 0;
- for (const YamlObject &entry : obj.asList()) {
- const auto value = entry.get<T>();
- if (!value)
- return std::nullopt;
- vector[i++] = *value;
- }
-
- return vector;
- }
-};
-#endif /* __DOXYGEN__ */
-
-} /* namespace libcamera */