diff options
Diffstat (limited to 'src/ipa/libipa')
27 files changed, 2798 insertions, 166 deletions
diff --git a/src/ipa/libipa/agc_mean_luminance.cpp b/src/ipa/libipa/agc_mean_luminance.cpp new file mode 100644 index 00000000..02555a44 --- /dev/null +++ b/src/ipa/libipa/agc_mean_luminance.cpp @@ -0,0 +1,578 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024 Ideas on Board Oy + * + * Base class for mean luminance AGC algorithms + */ + +#include "agc_mean_luminance.h" + +#include <cmath> + +#include <libcamera/base/log.h> +#include <libcamera/control_ids.h> + +#include "exposure_mode_helper.h" + +using namespace libcamera::controls; + +/** + * \file agc_mean_luminance.h + * \brief Base class implementing mean luminance AEGC + */ + +namespace libcamera { + +using namespace std::literals::chrono_literals; + +LOG_DEFINE_CATEGORY(AgcMeanLuminance) + +namespace ipa { + +/* + * Number of frames for which to run the algorithm at full speed, before slowing + * down to prevent large and jarring changes in exposure from frame to frame. + */ +static constexpr uint32_t kNumStartupFrames = 10; + +/* + * Default relative luminance target + * + * This value should be chosen so that when the camera points at a grey target, + * the resulting image brightness looks "right". Custom values can be passed + * as the relativeLuminanceTarget value in sensor tuning files. + */ +static constexpr double kDefaultRelativeLuminanceTarget = 0.16; + +/** + * \struct AgcMeanLuminance::AgcConstraint + * \brief The boundaries and target for an AeConstraintMode constraint + * + * This structure describes an AeConstraintMode constraint for the purposes of + * this algorithm. These constraints are expressed as a pair of quantile + * boundaries for a histogram, along with a luminance target and a bounds-type. + * The algorithm uses the constraints by ensuring that the defined portion of a + * luminance histogram (I.E. lying between the two quantiles) is above or below + * the given luminance value. + */ + +/** + * \enum AgcMeanLuminance::AgcConstraint::Bound + * \brief Specify whether the constraint defines a lower or upper bound + * \var AgcMeanLuminance::AgcConstraint::Lower + * \brief The constraint defines a lower bound + * \var AgcMeanLuminance::AgcConstraint::Upper + * \brief The constraint defines an upper bound + */ + +/** + * \var AgcMeanLuminance::AgcConstraint::bound + * \brief The type of constraint bound + */ + +/** + * \var AgcMeanLuminance::AgcConstraint::qLo + * \brief The lower quantile to use for the constraint + */ + +/** + * \var AgcMeanLuminance::AgcConstraint::qHi + * \brief The upper quantile to use for the constraint + */ + +/** + * \var AgcMeanLuminance::AgcConstraint::yTarget + * \brief The luminance target for the constraint + */ + +/** + * \class AgcMeanLuminance + * \brief A mean-based auto-exposure algorithm + * + * This algorithm calculates an exposure time, analogue and digital gain such + * that the normalised mean luminance value of an image is driven towards a + * target, which itself is discovered from tuning data. The algorithm is a + * two-stage process. + * + * In the first stage, an initial gain value is derived by iteratively comparing + * the gain-adjusted mean luminance across the entire image against a target, + * and selecting a value which pushes it as closely as possible towards the + * target. + * + * In the second stage we calculate the gain required to drive the average of a + * section of a histogram to a target value, where the target and the boundaries + * of the section of the histogram used in the calculation are taken from the + * values defined for the currently configured AeConstraintMode within the + * tuning data. This class provides a helper function to parse those tuning data + * to discover the constraints, and so requires a specific format for those + * data which is described in \ref parseTuningData(). The gain from the first + * stage is then clamped to the gain from this stage. + * + * The final gain is used to adjust the effective exposure value of the image, + * and that new exposure value is divided into exposure time, analogue gain and + * digital gain according to the selected AeExposureMode. This class uses the + * \ref ExposureModeHelper class to assist in that division, and expects the + * data needed to initialise that class to be present in tuning data in a + * format described in \ref parseTuningData(). + * + * In order to be able to use this algorithm an IPA module needs to be able to + * do the following: + * + * 1. Provide a luminance estimation across an entire image. + * 2. Provide a luminance Histogram for the image to use in calculating + * constraint compliance. The precision of the Histogram that is available + * will determine the supportable precision of the constraints. + * + * IPA modules that want to use this class to implement their AEGC algorithm + * should derive it and provide an overriding estimateLuminance() function for + * this class to use. They must call parseTuningData() in init(), and must also + * call setLimits() and resetFrameCounter() in configure(). They may then use + * calculateNewEv() in process(). If the limits passed to setLimits() change for + * any reason (for example, in response to a FrameDurationLimit control being + * passed in queueRequest()) then setLimits() must be called again with the new + * values. + */ + +AgcMeanLuminance::AgcMeanLuminance() + : frameCount_(0), filteredExposure_(0s), relativeLuminanceTarget_(0) +{ +} + +AgcMeanLuminance::~AgcMeanLuminance() = default; + +void AgcMeanLuminance::parseRelativeLuminanceTarget(const YamlObject &tuningData) +{ + relativeLuminanceTarget_ = + tuningData["relativeLuminanceTarget"].get<double>(kDefaultRelativeLuminanceTarget); +} + +void AgcMeanLuminance::parseConstraint(const YamlObject &modeDict, int32_t id) +{ + for (const auto &[boundName, content] : modeDict.asDict()) { + if (boundName != "upper" && boundName != "lower") { + LOG(AgcMeanLuminance, Warning) + << "Ignoring unknown constraint bound '" << boundName << "'"; + continue; + } + + unsigned int idx = static_cast<unsigned int>(boundName == "upper"); + AgcConstraint::Bound bound = static_cast<AgcConstraint::Bound>(idx); + double qLo = content["qLo"].get<double>().value_or(0.98); + double qHi = content["qHi"].get<double>().value_or(1.0); + double yTarget = + content["yTarget"].getList<double>().value_or(std::vector<double>{ 0.5 }).at(0); + + AgcConstraint constraint = { bound, qLo, qHi, yTarget }; + + if (!constraintModes_.count(id)) + constraintModes_[id] = {}; + + if (idx) + constraintModes_[id].push_back(constraint); + else + constraintModes_[id].insert(constraintModes_[id].begin(), constraint); + } +} + +int AgcMeanLuminance::parseConstraintModes(const YamlObject &tuningData) +{ + std::vector<ControlValue> availableConstraintModes; + + const YamlObject &yamlConstraintModes = tuningData[controls::AeConstraintMode.name()]; + if (yamlConstraintModes.isDictionary()) { + for (const auto &[modeName, modeDict] : yamlConstraintModes.asDict()) { + if (AeConstraintModeNameValueMap.find(modeName) == + AeConstraintModeNameValueMap.end()) { + LOG(AgcMeanLuminance, Warning) + << "Skipping unknown constraint mode '" << modeName << "'"; + continue; + } + + if (!modeDict.isDictionary()) { + LOG(AgcMeanLuminance, Error) + << "Invalid constraint mode '" << modeName << "'"; + return -EINVAL; + } + + parseConstraint(modeDict, + AeConstraintModeNameValueMap.at(modeName)); + availableConstraintModes.push_back( + AeConstraintModeNameValueMap.at(modeName)); + } + } + + /* + * If the tuning data file contains no constraints then we use the + * default constraint that the IPU3/RkISP1 Agc algorithms were adhering + * to anyway before centralisation; this constraint forces the top 2% of + * the histogram to be at least 0.5. + */ + if (constraintModes_.empty()) { + AgcConstraint constraint = { + AgcConstraint::Bound::Lower, + 0.98, + 1.0, + 0.5 + }; + + constraintModes_[controls::ConstraintNormal].insert( + constraintModes_[controls::ConstraintNormal].begin(), + constraint); + availableConstraintModes.push_back( + AeConstraintModeNameValueMap.at("ConstraintNormal")); + } + + controls_[&controls::AeConstraintMode] = ControlInfo(availableConstraintModes); + + return 0; +} + +int AgcMeanLuminance::parseExposureModes(const YamlObject &tuningData) +{ + std::vector<ControlValue> availableExposureModes; + + const YamlObject &yamlExposureModes = tuningData[controls::AeExposureMode.name()]; + if (yamlExposureModes.isDictionary()) { + for (const auto &[modeName, modeValues] : yamlExposureModes.asDict()) { + if (AeExposureModeNameValueMap.find(modeName) == + AeExposureModeNameValueMap.end()) { + LOG(AgcMeanLuminance, Warning) + << "Skipping unknown exposure mode '" << modeName << "'"; + continue; + } + + if (!modeValues.isDictionary()) { + LOG(AgcMeanLuminance, Error) + << "Invalid exposure mode '" << modeName << "'"; + return -EINVAL; + } + + std::vector<uint32_t> exposureTimes = + modeValues["exposureTime"].getList<uint32_t>().value_or(std::vector<uint32_t>{}); + std::vector<double> gains = + modeValues["gain"].getList<double>().value_or(std::vector<double>{}); + + if (exposureTimes.size() != gains.size()) { + LOG(AgcMeanLuminance, Error) + << "Exposure time and gain array sizes unequal"; + return -EINVAL; + } + + if (exposureTimes.empty()) { + LOG(AgcMeanLuminance, Error) + << "Exposure time and gain arrays are empty"; + return -EINVAL; + } + + std::vector<std::pair<utils::Duration, double>> stages; + for (unsigned int i = 0; i < exposureTimes.size(); i++) { + stages.push_back({ + std::chrono::microseconds(exposureTimes[i]), + gains[i] + }); + } + + std::shared_ptr<ExposureModeHelper> helper = + std::make_shared<ExposureModeHelper>(stages); + + exposureModeHelpers_[AeExposureModeNameValueMap.at(modeName)] = helper; + availableExposureModes.push_back(AeExposureModeNameValueMap.at(modeName)); + } + } + + /* + * If we don't have any exposure modes in the tuning data we create an + * ExposureModeHelper using an empty vector of stages. This will result + * in the ExposureModeHelper simply driving the exposure time as high as + * possible before touching gain. + */ + if (availableExposureModes.empty()) { + int32_t exposureModeId = AeExposureModeNameValueMap.at("ExposureNormal"); + std::vector<std::pair<utils::Duration, double>> stages = { }; + + std::shared_ptr<ExposureModeHelper> helper = + std::make_shared<ExposureModeHelper>(stages); + + exposureModeHelpers_[exposureModeId] = helper; + availableExposureModes.push_back(exposureModeId); + } + + controls_[&controls::AeExposureMode] = ControlInfo(availableExposureModes); + + return 0; +} + +/** + * \brief Parse tuning data for AeConstraintMode and AeExposureMode controls + * \param[in] tuningData the YamlObject representing the tuning data + * + * This function parses tuning data to build the list of allowed values for the + * AeConstraintMode and AeExposureMode controls. Those tuning data must provide + * the data in a specific format; the Agc algorithm's tuning data should contain + * a dictionary called AeConstraintMode containing per-mode setting dictionaries + * with the key being a value from \ref controls::AeConstraintModeNameValueMap. + * Each mode dict may contain either a "lower" or "upper" key or both, for + * example: + * + * \code{.unparsed} + * algorithms: + * - Agc: + * AeConstraintMode: + * ConstraintNormal: + * lower: + * qLo: 0.98 + * qHi: 1.0 + * yTarget: 0.5 + * ConstraintHighlight: + * lower: + * qLo: 0.98 + * qHi: 1.0 + * yTarget: 0.5 + * upper: + * qLo: 0.98 + * qHi: 1.0 + * yTarget: 0.8 + * + * \endcode + * + * For the AeExposureMode control the data should contain a dictionary called + * AeExposureMode containing per-mode setting dictionaries with the key being a + * value from \ref controls::AeExposureModeNameValueMap. Each mode dict should + * contain an array of exposure times with the key "exposureTime" and an array + * of gain values with the key "gain", in this format: + * + * \code{.unparsed} + * algorithms: + * - Agc: + * AeExposureMode: + * ExposureNormal: + * exposureTime: [ 100, 10000, 30000, 60000, 120000 ] + * gain: [ 2.0, 4.0, 6.0, 8.0, 10.0 ] + * ExposureShort: + * exposureTime: [ 100, 10000, 30000, 60000, 120000 ] + * gain: [ 2.0, 4.0, 6.0, 8.0, 10.0 ] + * + * \endcode + * + * \return 0 on success or a negative error code + */ +int AgcMeanLuminance::parseTuningData(const YamlObject &tuningData) +{ + int ret; + + parseRelativeLuminanceTarget(tuningData); + + ret = parseConstraintModes(tuningData); + if (ret) + return ret; + + return parseExposureModes(tuningData); +} + +/** + * \brief Set the ExposureModeHelper limits for this class + * \param[in] minExposureTime Minimum exposure time to allow + * \param[in] maxExposureTime Maximum ewposure time to allow + * \param[in] minGain Minimum gain to allow + * \param[in] maxGain Maximum gain to allow + * + * This function calls \ref ExposureModeHelper::setLimits() for each + * ExposureModeHelper that has been created for this class. + */ +void AgcMeanLuminance::setLimits(utils::Duration minExposureTime, + utils::Duration maxExposureTime, + double minGain, double maxGain) +{ + for (auto &[id, helper] : exposureModeHelpers_) + helper->setLimits(minExposureTime, maxExposureTime, minGain, maxGain); +} + +/** + * \fn AgcMeanLuminance::constraintModes() + * \brief Get the constraint modes that have been parsed from tuning data + */ + +/** + * \fn AgcMeanLuminance::exposureModeHelpers() + * \brief Get the ExposureModeHelpers that have been parsed from tuning data + */ + +/** + * \fn AgcMeanLuminance::controls() + * \brief Get the controls that have been generated after parsing tuning data + */ + +/** + * \fn AgcMeanLuminance::estimateLuminance(const double gain) + * \brief Estimate the luminance of an image, adjusted by a given gain + * \param[in] gain The gain with which to adjust the luminance estimate + * + * This function estimates the average relative luminance of the frame that + * would be output by the sensor if an additional \a gain was applied. It is a + * pure virtual function because estimation of luminance is a hardware-specific + * operation, which depends wholly on the format of the stats that are delivered + * to libcamera from the ISP. Derived classes must override this function with + * one that calculates the normalised mean luminance value across the entire + * image. + * + * \return The normalised relative luminance of the image + */ + +/** + * \brief Estimate the initial gain needed to achieve a relative luminance + * target + * \return The calculated initial gain + */ +double AgcMeanLuminance::estimateInitialGain() const +{ + double yTarget = relativeLuminanceTarget_; + double yGain = 1.0; + + /* + * To account for non-linearity caused by saturation, the value needs to + * be estimated in an iterative process, as multiplying by a gain will + * not increase the relative luminance by the same factor if some image + * regions are saturated. + */ + for (unsigned int i = 0; i < 8; i++) { + double yValue = estimateLuminance(yGain); + double extra_gain = std::min(10.0, yTarget / (yValue + .001)); + + yGain *= extra_gain; + LOG(AgcMeanLuminance, Debug) << "Y value: " << yValue + << ", Y target: " << yTarget + << ", gives gain " << yGain; + + if (utils::abs_diff(extra_gain, 1.0) < 0.01) + break; + } + + return yGain; +} + +/** + * \brief Clamp gain within the bounds of a defined constraint + * \param[in] constraintModeIndex The index of the constraint to adhere to + * \param[in] hist A histogram over which to calculate inter-quantile means + * \param[in] gain The gain to clamp + * + * \return The gain clamped within the constraint bounds + */ +double AgcMeanLuminance::constraintClampGain(uint32_t constraintModeIndex, + const Histogram &hist, + double gain) +{ + std::vector<AgcConstraint> &constraints = constraintModes_[constraintModeIndex]; + for (const AgcConstraint &constraint : constraints) { + double newGain = constraint.yTarget * hist.bins() / + hist.interQuantileMean(constraint.qLo, constraint.qHi); + + if (constraint.bound == AgcConstraint::Bound::Lower && + newGain > gain) + gain = newGain; + + if (constraint.bound == AgcConstraint::Bound::Upper && + newGain < gain) + gain = newGain; + } + + return gain; +} + +/** + * \brief Apply a filter on the exposure value to limit the speed of changes + * \param[in] exposureValue The target exposure from the AGC algorithm + * + * The speed of the filter is adaptive, and will produce the target quicker + * during startup, or when the target exposure is within 20% of the most recent + * filter output. + * + * \return The filtered exposure + */ +utils::Duration AgcMeanLuminance::filterExposure(utils::Duration exposureValue) +{ + double speed = 0.2; + + /* Adapt instantly if we are in startup phase. */ + if (frameCount_ < kNumStartupFrames) + speed = 1.0; + + /* + * If we are close to the desired result, go faster to avoid making + * multiple micro-adjustments. + * \todo Make this customisable? + */ + if (filteredExposure_ < 1.2 * exposureValue && + filteredExposure_ > 0.8 * exposureValue) + speed = sqrt(speed); + + filteredExposure_ = speed * exposureValue + + filteredExposure_ * (1.0 - speed); + + return filteredExposure_; +} + +/** + * \brief Calculate the new exposure value and splut it between exposure time + * and gain + * \param[in] constraintModeIndex The index of the current constraint mode + * \param[in] exposureModeIndex The index of the current exposure mode + * \param[in] yHist A Histogram from the ISP statistics to use in constraining + * the calculated gain + * \param[in] effectiveExposureValue The EV applied to the frame from which the + * statistics in use derive + * + * Calculate a new exposure value to try to obtain the target. The calculated + * exposure value is filtered to prevent rapid changes from frame to frame, and + * divided into exposure time, analogue and digital gain. + * + * \return Tuple of exposure time, analogue gain, and digital gain + */ +std::tuple<utils::Duration, double, double> +AgcMeanLuminance::calculateNewEv(uint32_t constraintModeIndex, + uint32_t exposureModeIndex, + const Histogram &yHist, + utils::Duration effectiveExposureValue) +{ + /* + * The pipeline handler should validate that we have received an allowed + * value for AeExposureMode. + */ + std::shared_ptr<ExposureModeHelper> exposureModeHelper = + exposureModeHelpers_.at(exposureModeIndex); + + double gain = estimateInitialGain(); + gain = constraintClampGain(constraintModeIndex, yHist, gain); + + /* + * We don't check whether we're already close to the target, because + * even if the effective exposure value is the same as the last frame's + * we could have switched to an exposure mode that would require a new + * pass through the splitExposure() function. + */ + + utils::Duration newExposureValue = effectiveExposureValue * gain; + + /* + * We filter the exposure value to make sure changes are not too jarring + * from frame to frame. + */ + newExposureValue = filterExposure(newExposureValue); + + frameCount_++; + return exposureModeHelper->splitExposure(newExposureValue); +} + +/** + * \fn AgcMeanLuminance::resetFrameCount() + * \brief Reset the frame counter + * + * This function resets the internal frame counter, which exists to help the + * algorithm decide whether it should respond instantly or not. The expectation + * is for derived classes to call this function before each camera start call in + * their configure() function. + */ + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/agc_mean_luminance.h b/src/ipa/libipa/agc_mean_luminance.h new file mode 100644 index 00000000..c41391cb --- /dev/null +++ b/src/ipa/libipa/agc_mean_luminance.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024 Ideas on Board Oy + * + agc_mean_luminance.h - Base class for mean luminance AGC algorithms + */ + +#pragma once + +#include <map> +#include <memory> +#include <tuple> +#include <vector> + +#include <libcamera/base/utils.h> + +#include <libcamera/controls.h> + +#include "libcamera/internal/yaml_parser.h" + +#include "exposure_mode_helper.h" +#include "histogram.h" + +namespace libcamera { + +namespace ipa { + +class AgcMeanLuminance +{ +public: + AgcMeanLuminance(); + virtual ~AgcMeanLuminance(); + + struct AgcConstraint { + enum class Bound { + Lower = 0, + Upper = 1 + }; + Bound bound; + double qLo; + double qHi; + double yTarget; + }; + + int parseTuningData(const YamlObject &tuningData); + + void setLimits(utils::Duration minExposureTime, utils::Duration maxExposureTime, + double minGain, double maxGain); + + std::map<int32_t, std::vector<AgcConstraint>> constraintModes() + { + return constraintModes_; + } + + std::map<int32_t, std::shared_ptr<ExposureModeHelper>> exposureModeHelpers() + { + return exposureModeHelpers_; + } + + ControlInfoMap::Map controls() + { + return controls_; + } + + std::tuple<utils::Duration, double, double> + calculateNewEv(uint32_t constraintModeIndex, uint32_t exposureModeIndex, + const Histogram &yHist, utils::Duration effectiveExposureValue); + + void resetFrameCount() + { + frameCount_ = 0; + } + +private: + virtual double estimateLuminance(const double gain) const = 0; + + void parseRelativeLuminanceTarget(const YamlObject &tuningData); + void parseConstraint(const YamlObject &modeDict, int32_t id); + int parseConstraintModes(const YamlObject &tuningData); + int parseExposureModes(const YamlObject &tuningData); + double estimateInitialGain() const; + double constraintClampGain(uint32_t constraintModeIndex, + const Histogram &hist, + double gain); + utils::Duration filterExposure(utils::Duration exposureValue); + + uint64_t frameCount_; + utils::Duration filteredExposure_; + double relativeLuminanceTarget_; + + std::map<int32_t, std::vector<AgcConstraint>> constraintModes_; + std::map<int32_t, std::shared_ptr<ExposureModeHelper>> exposureModeHelpers_; + ControlInfoMap::Map controls_; +}; + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/algorithm.cpp b/src/ipa/libipa/algorithm.cpp index bc1c29a6..201efdfd 100644 --- a/src/ipa/libipa/algorithm.cpp +++ b/src/ipa/libipa/algorithm.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2021, Ideas On Board * - * algorithm.cpp - IPA control algorithm interface + * IPA control algorithm interface */ #include "algorithm.h" diff --git a/src/ipa/libipa/algorithm.h b/src/ipa/libipa/algorithm.h index 987e3e4c..9a19dbd6 100644 --- a/src/ipa/libipa/algorithm.h +++ b/src/ipa/libipa/algorithm.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2021, Ideas On Board * - * algorithm.h - ISP control algorithm interface + * ISP control algorithm interface */ #pragma once diff --git a/src/ipa/libipa/camera_sensor_helper.cpp b/src/ipa/libipa/camera_sensor_helper.cpp index ce29f423..7c66cd57 100644 --- a/src/ipa/libipa/camera_sensor_helper.cpp +++ b/src/ipa/libipa/camera_sensor_helper.cpp @@ -2,12 +2,13 @@ /* * Copyright (C) 2021, Google Inc. * - * camera_sensor_helper.cpp - Helper class that performs sensor-specific + * Helper class that performs sensor-specific * parameter computations */ #include "camera_sensor_helper.h" #include <cmath> +#include <limits> #include <libcamera/base/log.h> @@ -40,6 +41,7 @@ namespace ipa { */ /** + * \fn CameraSensorHelper::CameraSensorHelper() * \brief Construct a CameraSensorHelper instance * * CameraSensorHelper derived class instances shall never be constructed @@ -48,6 +50,33 @@ namespace ipa { */ /** + * \fn CameraSensorHelper::blackLevel() + * \brief Fetch the black level of the sensor + * + * This function returns the black level of the sensor scaled to a 16bit pixel + * width. If it is unknown an empty optional is returned. + * + * \todo Fill the blanks and add pedestal values for all supported sensors. Once + * done, drop the std::optional<>. + * + * Black levels are typically the result of the following phenomena: + * - Pedestal added by the sensor to pixel values. They are typically fixed, + * sometimes programmable and should be reported in datasheets (but + * documentation is not always available). + * - Dark currents and other physical effects that add charge to pixels in the + * absence of light. Those can depend on the integration time and the sensor + * die temperature, and their contribution to pixel values depend on the + * sensor gains. + * + * The pedestal is usually the value with the biggest contribution to the + * overall black level. In most cases it is either known before or in rare cases + * (there is not a single driver with such a control in the linux kernel) can be + * queried from the sensor. This function provides that fixed, known value. + * + * \return The black level of the sensor, or std::nullopt if not known + */ + +/** * \brief Compute gain code from the analogue gain absolute value * \param[in] gain The real gain to pass * @@ -58,21 +87,16 @@ namespace ipa { */ uint32_t CameraSensorHelper::gainCode(double gain) const { - const AnalogueGainConstants &k = gainConstants_; - - switch (gainType_) { - case AnalogueGainLinear: - ASSERT(k.linear.m0 == 0 || k.linear.m1 == 0); + if (auto *l = std::get_if<AnalogueGainLinear>(&gain_)) { + ASSERT(l->m0 == 0 || l->m1 == 0); - return (k.linear.c0 - k.linear.c1 * gain) / - (k.linear.m1 * gain - k.linear.m0); + return (l->c0 - l->c1 * gain) / + (l->m1 * gain - l->m0); + } else if (auto *e = std::get_if<AnalogueGainExp>(&gain_)) { + ASSERT(e->a != 0 && e->m != 0); - case AnalogueGainExponential: - ASSERT(k.exp.a != 0 && k.exp.m != 0); - - return std::log2(gain / k.exp.a) / k.exp.m; - - default: + return std::log2(gain / e->a) / e->m; + } else { ASSERT(false); return 0; } @@ -90,38 +114,26 @@ uint32_t CameraSensorHelper::gainCode(double gain) const */ double CameraSensorHelper::gain(uint32_t gainCode) const { - const AnalogueGainConstants &k = gainConstants_; double gain = static_cast<double>(gainCode); - switch (gainType_) { - case AnalogueGainLinear: - ASSERT(k.linear.m0 == 0 || k.linear.m1 == 0); - - return (k.linear.m0 * gain + k.linear.c0) / - (k.linear.m1 * gain + k.linear.c1); - - case AnalogueGainExponential: - ASSERT(k.exp.a != 0 && k.exp.m != 0); + if (auto *l = std::get_if<AnalogueGainLinear>(&gain_)) { + ASSERT(l->m0 == 0 || l->m1 == 0); - return k.exp.a * std::exp2(k.exp.m * gain); + return (l->m0 * gain + l->c0) / + (l->m1 * gain + l->c1); + } else if (auto *e = std::get_if<AnalogueGainExp>(&gain_)) { + ASSERT(e->a != 0 && e->m != 0); - default: + return e->a * std::exp2(e->m * gain); + } else { ASSERT(false); return 0.0; } } /** - * \enum CameraSensorHelper::AnalogueGainType - * \brief The gain calculation modes as defined by the MIPI CCS - * - * Describes the image sensor analogue gain capabilities. - * Two modes are possible, depending on the sensor: Linear and Exponential. - */ - -/** - * \var CameraSensorHelper::AnalogueGainLinear - * \brief Gain is computed using linear gain estimation + * \struct CameraSensorHelper::AnalogueGainLinear + * \brief Analogue gain constants for the linear gain model * * The relationship between the integer gain parameter and the resulting gain * multiplier is given by the following equation: @@ -136,11 +148,27 @@ double CameraSensorHelper::gain(uint32_t gainCode) const * The full Gain equation therefore reduces to either: * * \f$gain=\frac{c0}{m1x+c1}\f$ or \f$\frac{m0x+c0}{c1}\f$ + * + * \var CameraSensorHelper::AnalogueGainLinear::m0 + * \brief Constant used in the linear gain coding/decoding + * + * \note Either m0 or m1 shall be zero. + * + * \var CameraSensorHelper::AnalogueGainLinear::c0 + * \brief Constant used in the linear gain coding/decoding + * + * \var CameraSensorHelper::AnalogueGainLinear::m1 + * \brief Constant used in the linear gain coding/decoding + * + * \note Either m0 or m1 shall be zero. + * + * \var CameraSensorHelper::AnalogueGainLinear::c1 + * \brief Constant used in the linear gain coding/decoding */ /** - * \var CameraSensorHelper::AnalogueGainExponential - * \brief Gain is expressed using an exponential model + * \struct CameraSensorHelper::AnalogueGainExp + * \brief Analogue gain constants for the exponential gain model * * The relationship between the integer gain parameter and the resulting gain * multiplier is given by the following equation: @@ -156,61 +184,22 @@ double CameraSensorHelper::gain(uint32_t gainCode) const * * When the gain is expressed in dB, 'a' is equal to 1 and 'm' to * \f$log_{2}{10^{\frac{1}{20}}}\f$. - */ - -/** - * \struct CameraSensorHelper::AnalogueGainLinearConstants - * \brief Analogue gain constants for the linear gain model - * - * \var CameraSensorHelper::AnalogueGainLinearConstants::m0 - * \brief Constant used in the linear gain coding/decoding - * - * \note Either m0 or m1 shall be zero. - * - * \var CameraSensorHelper::AnalogueGainLinearConstants::c0 - * \brief Constant used in the linear gain coding/decoding - * - * \var CameraSensorHelper::AnalogueGainLinearConstants::m1 - * \brief Constant used in the linear gain coding/decoding - * - * \note Either m0 or m1 shall be zero. - * - * \var CameraSensorHelper::AnalogueGainLinearConstants::c1 - * \brief Constant used in the linear gain coding/decoding - */ - -/** - * \struct CameraSensorHelper::AnalogueGainExpConstants - * \brief Analogue gain constants for the exponential gain model * - * \var CameraSensorHelper::AnalogueGainExpConstants::a + * \var CameraSensorHelper::AnalogueGainExp::a * \brief Constant used in the exponential gain coding/decoding * - * \var CameraSensorHelper::AnalogueGainExpConstants::m + * \var CameraSensorHelper::AnalogueGainExp::m * \brief Constant used in the exponential gain coding/decoding */ /** - * \struct CameraSensorHelper::AnalogueGainConstants - * \brief Analogue gain model constants - * - * This union stores the constants used to calculate the analogue gain. The - * CameraSensorHelper::gainType_ variable selects which union member is valid. - * - * \var CameraSensorHelper::AnalogueGainConstants::linear - * \brief Constants for the linear gain model - * - * \var CameraSensorHelper::AnalogueGainConstants::exp - * \brief Constants for the exponential gain model - */ - -/** - * \var CameraSensorHelper::gainType_ - * \brief The analogue gain model type + * \var CameraSensorHelper::blackLevel_ + * \brief The black level of the sensor + * \sa CameraSensorHelper::blackLevel() */ /** - * \var CameraSensorHelper::gainConstants_ + * \var CameraSensorHelper::gain_ * \brief The analogue gain parameters used for calculation * * The analogue gain is calculated through a formula, and its parameters are @@ -366,42 +355,168 @@ static constexpr double expGainDb(double step) return log2_10 * step / 20; } -class CameraSensorHelperAr0521 : public CameraSensorHelper +class CameraSensorHelperAr0144 : public CameraSensorHelper { public: - uint32_t gainCode(double gain) const override; - double gain(uint32_t gainCode) const override; + CameraSensorHelperAr0144() + { + /* Power-on default value: 168 at 12bits. */ + blackLevel_ = 2688; + } + + uint32_t gainCode(double gain) const override + { + /* The recommended minimum gain is 1.6842 to avoid artifacts. */ + gain = std::clamp(gain, 1.0 / (1.0 - 13.0 / 32.0), 18.45); + + /* + * The analogue gain is made of a coarse exponential gain in + * the range [2^0, 2^4] and a fine inversely linear gain in the + * range [1.0, 2.0[. There is an additional fixed 1.153125 + * multiplier when the coarse gain reaches 2^2. + */ + + if (gain > 4.0) + gain /= 1.153125; + + unsigned int coarse = std::log2(gain); + unsigned int fine = (1 - (1 << coarse) / gain) * 32; + + /* The fine gain rounding depends on the coarse gain. */ + if (coarse == 1 || coarse == 3) + fine &= ~1; + else if (coarse == 4) + fine &= ~3; + + return (coarse << 4) | (fine & 0xf); + } + + double gain(uint32_t gainCode) const override + { + unsigned int coarse = gainCode >> 4; + unsigned int fine = gainCode & 0xf; + unsigned int d1; + double d2, m; + + switch (coarse) { + default: + case 0: + d1 = 1; + d2 = 32.0; + m = 1.0; + break; + case 1: + d1 = 2; + d2 = 16.0; + m = 1.0; + break; + case 2: + d1 = 1; + d2 = 32.0; + m = 1.153125; + break; + case 3: + d1 = 2; + d2 = 16.0; + m = 1.153125; + break; + case 4: + d1 = 4; + d2 = 8.0; + m = 1.153125; + break; + } + + /* + * With infinite precision, the calculated gain would be exact, + * and the reverse conversion with gainCode() would produce the + * same gain code. In the real world, rounding errors may cause + * the calculated gain to be lower by an amount negligible for + * all purposes, except for the reverse conversion. Converting + * the gain to a gain code could then return the quantized value + * just lower than the original gain code. To avoid this, tests + * showed that adding the machine epsilon to the multiplier m is + * sufficient. + */ + m += std::numeric_limits<decltype(m)>::epsilon(); + + return m * (1 << coarse) / (1.0 - (fine / d1) / d2); + } private: static constexpr double kStep_ = 16; }; +REGISTER_CAMERA_SENSOR_HELPER("ar0144", CameraSensorHelperAr0144) -uint32_t CameraSensorHelperAr0521::gainCode(double gain) const +class CameraSensorHelperAr0521 : public CameraSensorHelper { - gain = std::clamp(gain, 1.0, 15.5); - unsigned int coarse = std::log2(gain); - unsigned int fine = (gain / (1 << coarse) - 1) * kStep_; +public: + uint32_t gainCode(double gain) const override + { + gain = std::clamp(gain, 1.0, 15.5); + unsigned int coarse = std::log2(gain); + unsigned int fine = (gain / (1 << coarse) - 1) * kStep_; - return (coarse << 4) | (fine & 0xf); -} + return (coarse << 4) | (fine & 0xf); + } -double CameraSensorHelperAr0521::gain(uint32_t gainCode) const -{ - unsigned int coarse = gainCode >> 4; - unsigned int fine = gainCode & 0xf; + double gain(uint32_t gainCode) const override + { + unsigned int coarse = gainCode >> 4; + unsigned int fine = gainCode & 0xf; - return (1 << coarse) * (1 + fine / kStep_); -} + return (1 << coarse) * (1 + fine / kStep_); + } +private: + static constexpr double kStep_ = 16; +}; REGISTER_CAMERA_SENSOR_HELPER("ar0521", CameraSensorHelperAr0521) +class CameraSensorHelperGc05a2 : public CameraSensorHelper +{ +public: + CameraSensorHelperGc05a2() + { + /* From datasheet: 64 at 10bits. */ + blackLevel_ = 4096; + gain_ = AnalogueGainLinear{ 100, 0, 0, 1024 }; + } +}; +REGISTER_CAMERA_SENSOR_HELPER("gc05a2", CameraSensorHelperGc05a2) + +class CameraSensorHelperGc08a3 : public CameraSensorHelper +{ +public: + CameraSensorHelperGc08a3() + { + /* From datasheet: 64 at 10bits. */ + blackLevel_ = 4096; + gain_ = AnalogueGainLinear{ 100, 0, 0, 1024 }; + } +}; +REGISTER_CAMERA_SENSOR_HELPER("gc08a3", CameraSensorHelperGc08a3) + +class CameraSensorHelperImx214 : public CameraSensorHelper +{ +public: + CameraSensorHelperImx214() + { + /* From datasheet: 64 at 10bits. */ + blackLevel_ = 4096; + gain_ = AnalogueGainLinear{ 0, 512, -1, 512 }; + } +}; +REGISTER_CAMERA_SENSOR_HELPER("imx214", CameraSensorHelperImx214) + class CameraSensorHelperImx219 : public CameraSensorHelper { public: CameraSensorHelperImx219() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 0, 256, -1, 256 }; + /* From datasheet: 64 at 10bits. */ + blackLevel_ = 4096; + gain_ = AnalogueGainLinear{ 0, 256, -1, 256 }; } }; REGISTER_CAMERA_SENSOR_HELPER("imx219", CameraSensorHelperImx219) @@ -411,19 +526,33 @@ class CameraSensorHelperImx258 : public CameraSensorHelper public: CameraSensorHelperImx258() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 0, 512, -1, 512 }; + /* From datasheet: 0x40 at 10bits. */ + blackLevel_ = 4096; + gain_ = AnalogueGainLinear{ 0, 512, -1, 512 }; } }; REGISTER_CAMERA_SENSOR_HELPER("imx258", CameraSensorHelperImx258) +class CameraSensorHelperImx283 : public CameraSensorHelper +{ +public: + CameraSensorHelperImx283() + { + /* From datasheet: 0x32 at 10bits. */ + blackLevel_ = 3200; + gain_ = AnalogueGainLinear{ 0, 2048, -1, 2048 }; + } +}; +REGISTER_CAMERA_SENSOR_HELPER("imx283", CameraSensorHelperImx283) + class CameraSensorHelperImx290 : public CameraSensorHelper { public: CameraSensorHelperImx290() { - gainType_ = AnalogueGainExponential; - gainConstants_.exp = { 1.0, expGainDb(0.3) }; + /* From datasheet: 0xf0 at 12bits. */ + blackLevel_ = 3840; + gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) }; } }; REGISTER_CAMERA_SENSOR_HELPER("imx290", CameraSensorHelperImx290) @@ -433,8 +562,7 @@ class CameraSensorHelperImx296 : public CameraSensorHelper public: CameraSensorHelperImx296() { - gainType_ = AnalogueGainExponential; - gainConstants_.exp = { 1.0, expGainDb(0.1) }; + gain_ = AnalogueGainExp{ 1.0, expGainDb(0.1) }; } }; REGISTER_CAMERA_SENSOR_HELPER("imx296", CameraSensorHelperImx296) @@ -444,13 +572,39 @@ class CameraSensorHelperImx327 : public CameraSensorHelperImx290 }; REGISTER_CAMERA_SENSOR_HELPER("imx327", CameraSensorHelperImx327) +class CameraSensorHelperImx335 : public CameraSensorHelper +{ +public: + CameraSensorHelperImx335() + { + /* From datasheet: 0x32 at 10bits. */ + blackLevel_ = 3200; + gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) }; + } +}; +REGISTER_CAMERA_SENSOR_HELPER("imx335", CameraSensorHelperImx335) + +class CameraSensorHelperImx415 : public CameraSensorHelper +{ +public: + CameraSensorHelperImx415() + { + gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) }; + } +}; +REGISTER_CAMERA_SENSOR_HELPER("imx415", CameraSensorHelperImx415) + +class CameraSensorHelperImx462 : public CameraSensorHelperImx290 +{ +}; +REGISTER_CAMERA_SENSOR_HELPER("imx462", CameraSensorHelperImx462) + class CameraSensorHelperImx477 : public CameraSensorHelper { public: CameraSensorHelperImx477() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 0, 1024, -1, 1024 }; + gain_ = AnalogueGainLinear{ 0, 1024, -1, 1024 }; } }; REGISTER_CAMERA_SENSOR_HELPER("imx477", CameraSensorHelperImx477) @@ -464,8 +618,7 @@ public: * The Sensor Manual doesn't appear to document the gain model. * This has been validated with some empirical testing only. */ - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 128 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov2685", CameraSensorHelperOv2685) @@ -475,8 +628,7 @@ class CameraSensorHelperOv2740 : public CameraSensorHelper public: CameraSensorHelperOv2740() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 128 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov2740", CameraSensorHelperOv2740) @@ -486,8 +638,9 @@ class CameraSensorHelperOv4689 : public CameraSensorHelper public: CameraSensorHelperOv4689() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 128 }; + /* From datasheet: 0x40 at 12bits. */ + blackLevel_ = 1024; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov4689", CameraSensorHelperOv4689) @@ -497,8 +650,9 @@ class CameraSensorHelperOv5640 : public CameraSensorHelper public: CameraSensorHelperOv5640() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 16 }; + /* From datasheet: 0x10 at 10bits. */ + blackLevel_ = 1024; + gain_ = AnalogueGainLinear{ 1, 0, 0, 16 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov5640", CameraSensorHelperOv5640) @@ -508,8 +662,7 @@ class CameraSensorHelperOv5647 : public CameraSensorHelper public: CameraSensorHelperOv5647() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 16 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 16 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov5647", CameraSensorHelperOv5647) @@ -519,8 +672,7 @@ class CameraSensorHelperOv5670 : public CameraSensorHelper public: CameraSensorHelperOv5670() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 128 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov5670", CameraSensorHelperOv5670) @@ -530,8 +682,9 @@ class CameraSensorHelperOv5675 : public CameraSensorHelper public: CameraSensorHelperOv5675() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 128 }; + /* From Linux kernel driver: 0x40 at 10bits. */ + blackLevel_ = 4096; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov5675", CameraSensorHelperOv5675) @@ -541,8 +694,7 @@ class CameraSensorHelperOv5693 : public CameraSensorHelper public: CameraSensorHelperOv5693() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 16 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 16 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov5693", CameraSensorHelperOv5693) @@ -552,8 +704,7 @@ class CameraSensorHelperOv64a40 : public CameraSensorHelper public: CameraSensorHelperOv64a40() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 128 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov64a40", CameraSensorHelperOv64a40) @@ -563,15 +714,13 @@ class CameraSensorHelperOv8858 : public CameraSensorHelper public: CameraSensorHelperOv8858() { - gainType_ = AnalogueGainLinear; - /* * \todo Validate the selected 1/128 step value as it differs * from what the sensor manual describes. * * See: https://patchwork.linuxtv.org/project/linux-media/patch/20221106171129.166892-2-nicholas@rothemail.net/#142267 */ - gainConstants_.linear = { 1, 0, 0, 128 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov8858", CameraSensorHelperOv8858) @@ -581,8 +730,7 @@ class CameraSensorHelperOv8865 : public CameraSensorHelper public: CameraSensorHelperOv8865() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 128 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov8865", CameraSensorHelperOv8865) @@ -592,8 +740,7 @@ class CameraSensorHelperOv13858 : public CameraSensorHelper public: CameraSensorHelperOv13858() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 128 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov13858", CameraSensorHelperOv13858) diff --git a/src/ipa/libipa/camera_sensor_helper.h b/src/ipa/libipa/camera_sensor_helper.h index 1ca9371b..a9300a64 100644 --- a/src/ipa/libipa/camera_sensor_helper.h +++ b/src/ipa/libipa/camera_sensor_helper.h @@ -2,15 +2,16 @@ /* * Copyright (C) 2021, Google Inc. * - * camera_sensor_helper.h - Helper class that performs sensor-specific parameter computations + * Helper class that performs sensor-specific parameter computations */ #pragma once -#include <stdint.h> - #include <memory> +#include <optional> +#include <stdint.h> #include <string> +#include <variant> #include <vector> #include <libcamera/base/class.h> @@ -25,34 +26,25 @@ public: CameraSensorHelper() = default; virtual ~CameraSensorHelper() = default; + std::optional<int16_t> blackLevel() const { return blackLevel_; } virtual uint32_t gainCode(double gain) const; virtual double gain(uint32_t gainCode) const; protected: - enum AnalogueGainType { - AnalogueGainLinear, - AnalogueGainExponential, - }; - - struct AnalogueGainLinearConstants { + struct AnalogueGainLinear { int16_t m0; int16_t c0; int16_t m1; int16_t c1; }; - struct AnalogueGainExpConstants { + struct AnalogueGainExp { double a; double m; }; - union AnalogueGainConstants { - AnalogueGainLinearConstants linear; - AnalogueGainExpConstants exp; - }; - - AnalogueGainType gainType_; - AnalogueGainConstants gainConstants_; + std::optional<int16_t> blackLevel_; + std::variant<std::monostate, AnalogueGainLinear, AnalogueGainExp> gain_; private: LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorHelper) diff --git a/src/ipa/libipa/colours.cpp b/src/ipa/libipa/colours.cpp new file mode 100644 index 00000000..97124cf4 --- /dev/null +++ b/src/ipa/libipa/colours.cpp @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (C) 2024, Ideas on Board Oy + * + * libipa miscellaneous colour helpers + */ + +#include "colours.h" + +#include <algorithm> +#include <cmath> + +namespace libcamera { + +namespace ipa { + +/** + * \file colours.h + * \brief Functions to reduce code duplication between IPA modules + */ + +/** + * \brief Estimate luminance from RGB values following ITU-R BT.601 + * \param[in] rgb The RGB value + * + * This function estimates a luminance value from a triplet of Red, Green and + * Blue values, following the formula defined by ITU-R Recommendation BT.601-7 + * which can be found at https://www.itu.int/rec/R-REC-BT.601 + * + * \return The estimated luminance value + */ +double rec601LuminanceFromRGB(const RGB<double> &rgb) +{ + static const Vector<double, 3> rgb2y{{ + 0.299, 0.587, 0.114 + }}; + + return rgb.dot(rgb2y); +} + +/** + * \brief Estimate correlated colour temperature from RGB color space input + * \param[in] rgb The RGB value + * + * This function estimates the correlated color temperature RGB color space + * input. In physics and color science, the Planckian locus or black body locus + * is the path or locus that the color of an incandescent black body would take + * in a particular chromaticity space as the black body temperature changes. + * + * If a narrow range of color temperatures is considered (those encapsulating + * daylight being the most practical case) one can approximate the Planckian + * locus in order to calculate the CCT in terms of chromaticity coordinates. + * + * More detailed information can be found in: + * https://en.wikipedia.org/wiki/Color_temperature#Approximation + * + * \return The estimated color temperature + */ +uint32_t estimateCCT(const RGB<double> &rgb) +{ + /* + * Convert the RGB values to CIE tristimulus values (XYZ) and divide by + * the sum of X, Y and Z to calculate the CIE xy chromaticity. + */ + static const Matrix<double, 3, 3> rgb2xyz({ + -0.14282, 1.54924, -0.95641, + -0.32466, 1.57837, -0.73191, + -0.68202, 0.77073, 0.56332 + }); + + Vector<double, 3> xyz = rgb2xyz * rgb; + xyz /= xyz.sum(); + + /* Calculate CCT */ + double n = (xyz.x() - 0.3320) / (0.1858 - xyz.y()); + return 449 * n * n * n + 3525 * n * n + 6823.3 * n + 5520.33; +} + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/colours.h b/src/ipa/libipa/colours.h new file mode 100644 index 00000000..d39b2ca8 --- /dev/null +++ b/src/ipa/libipa/colours.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (C) 2024, Ideas on Board Oy + * + * libipa miscellaneous colour helpers + */ + +#pragma once + +#include <stdint.h> + +#include "libcamera/internal/vector.h" + +namespace libcamera { + +namespace ipa { + +double rec601LuminanceFromRGB(const RGB<double> &rgb); +uint32_t estimateCCT(const RGB<double> &rgb); + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/exposure_mode_helper.cpp b/src/ipa/libipa/exposure_mode_helper.cpp new file mode 100644 index 00000000..f235316d --- /dev/null +++ b/src/ipa/libipa/exposure_mode_helper.cpp @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> + * + * Helper class that performs computations relating to exposure + */ +#include "exposure_mode_helper.h" + +#include <algorithm> + +#include <libcamera/base/log.h> + +/** + * \file exposure_mode_helper.h + * \brief Helper class that performs computations relating to exposure + * + * AEGC algorithms have a need to split exposure between exposure time, analogue + * and digital gain. Multiple implementations do so based on paired stages of + * exposure time and gain limits; provide a helper to avoid duplicating the code. + */ + +namespace libcamera { + +using namespace std::literals::chrono_literals; + +LOG_DEFINE_CATEGORY(ExposureModeHelper) + +namespace ipa { + +/** + * \class ExposureModeHelper + * \brief Class for splitting exposure into exposure time and total gain + * + * The ExposureModeHelper class provides a standard interface through which an + * AEGC algorithm can divide exposure between exposure time and gain. It is + * configured with a set of exposure time and gain pairs and works by initially + * fixing gain at 1.0 and increasing exposure time up to the exposure time value + * from the first pair in the set in an attempt to meet the required exposure + * value. + * + * If the required exposure is not achievable by the first exposure time value + * alone it ramps gain up to the value from the first pair in the set. If the + * required exposure is still not met it then allows exposure time to ramp up to + * the exposure time value from the second pair in the set, and continues in this + * vein until either the required exposure time is met, or else the hardware's + * exposure time or gain limits are reached. + * + * This method allows users to strike a balance between a well-exposed image and + * an acceptable frame-rate, as opposed to simply maximising exposure time + * followed by gain. The same helpers can be used to perform the latter + * operation if needed by passing an empty set of pairs to the initialisation + * function. + * + * The gain values may exceed a camera sensor's analogue gain limits if either + * it or the IPA is also capable of digital gain. The configure() function must + * be called with the hardware's limits to inform the helper of those + * constraints. Any gain that is needed will be applied as analogue gain first + * until the hardware's limit is reached, following which digital gain will be + * used. + */ + +/** + * \brief Construct an ExposureModeHelper instance + * \param[in] stages The vector of paired exposure time and gain limits + * + * The input stages are exposure time and _total_ gain pairs; the gain + * encompasses both analogue and digital gain. + * + * The vector of stages may be empty. In that case, the helper will simply use + * the runtime limits set through setLimits() instead. + */ +ExposureModeHelper::ExposureModeHelper(const Span<std::pair<utils::Duration, double>> stages) +{ + minExposureTime_ = 0us; + maxExposureTime_ = 0us; + minGain_ = 0; + maxGain_ = 0; + + for (const auto &[s, g] : stages) { + exposureTimes_.push_back(s); + gains_.push_back(g); + } +} + +/** + * \brief Set the exposure time and gain limits + * \param[in] minExposureTime The minimum exposure time supported + * \param[in] maxExposureTime The maximum exposure time supported + * \param[in] minGain The minimum analogue gain supported + * \param[in] maxGain The maximum analogue gain supported + * + * This function configures the exposure time and analogue gain limits that need + * to be adhered to as the helper divides up exposure. Note that this function + * *must* be called whenever those limits change and before splitExposure() is + * used. + * + * If the algorithm using the helpers needs to indicate that either exposure time + * or analogue gain or both should be fixed it can do so by setting both the + * minima and maxima to the same value. + */ +void ExposureModeHelper::setLimits(utils::Duration minExposureTime, + utils::Duration maxExposureTime, + double minGain, double maxGain) +{ + minExposureTime_ = minExposureTime; + maxExposureTime_ = maxExposureTime; + minGain_ = minGain; + maxGain_ = maxGain; +} + +utils::Duration ExposureModeHelper::clampExposureTime(utils::Duration exposureTime) const +{ + return std::clamp(exposureTime, minExposureTime_, maxExposureTime_); +} + +double ExposureModeHelper::clampGain(double gain) const +{ + return std::clamp(gain, minGain_, maxGain_); +} + +/** + * \brief Split exposure into exposure time and gain + * \param[in] exposure Exposure value + * + * This function divides a given exposure into exposure time, analogue and + * digital gain by iterating through stages of exposure time and gain limits. + * At each stage the current stage's exposure time limit is multiplied by the + * previous stage's gain limit (or 1.0 initially) to see if the combination of + * the two can meet the required exposure. If they cannot then the current + * stage's exposure time limit is multiplied by the same stage's gain limit to + * see if that combination can meet the required exposure time. If they cannot + * then the function moves to consider the next stage. + * + * When a combination of exposure time and gain _stage_ limits are found that + * are sufficient to meet the required exposure, the function attempts to reduce + * exposure time as much as possible whilst fixing gain and still meeting the + * exposure. If a _runtime_ limit prevents exposure time from being lowered + * enough to meet the exposure with gain fixed at the stage limit, gain is also + * lowered to compensate. + * + * Once the exposure time and gain values are ascertained, gain is assigned as + * analogue gain as much as possible, with digital gain only in use if the + * maximum analogue gain runtime limit is unable to accommodate the exposure + * value. + * + * If no combination of exposure time and gain limits is found that meets the + * required exposure, the helper falls-back to simply maximising the exposure + * time first, followed by analogue gain, followed by digital gain. + * + * \return Tuple of exposure time, analogue gain, and digital gain + */ +std::tuple<utils::Duration, double, double> +ExposureModeHelper::splitExposure(utils::Duration exposure) const +{ + ASSERT(maxExposureTime_); + ASSERT(maxGain_); + + bool gainFixed = minGain_ == maxGain_; + bool exposureTimeFixed = minExposureTime_ == maxExposureTime_; + + /* + * There's no point entering the loop if we cannot change either gain + * nor exposure time anyway. + */ + if (exposureTimeFixed && gainFixed) + return { minExposureTime_, minGain_, exposure / (minExposureTime_ * minGain_) }; + + utils::Duration exposureTime; + double stageGain = 1.0; + double gain; + + for (unsigned int stage = 0; stage < gains_.size(); stage++) { + double lastStageGain = stage == 0 ? 1.0 : clampGain(gains_[stage - 1]); + utils::Duration stageExposureTime = clampExposureTime(exposureTimes_[stage]); + stageGain = clampGain(gains_[stage]); + + /* + * We perform the clamping on both exposure time and gain in + * case the helper has had limits set that prevent those values + * being lowered beyond a certain minimum...this can happen at + * runtime for various reasons and so would not be known when + * the stage limits are initialised. + */ + + if (stageExposureTime * lastStageGain >= exposure) { + exposureTime = clampExposureTime(exposure / clampGain(lastStageGain)); + gain = clampGain(exposure / exposureTime); + + return { exposureTime, gain, exposure / (exposureTime * gain) }; + } + + if (stageExposureTime * stageGain >= exposure) { + exposureTime = clampExposureTime(exposure / clampGain(stageGain)); + gain = clampGain(exposure / exposureTime); + + return { exposureTime, gain, exposure / (exposureTime * gain) }; + } + } + + /* + * From here on all we can do is max out the exposure time, followed by + * the analogue gain. If we still haven't achieved the target we send + * the rest of the exposure time to digital gain. If we were given no + * stages to use then the default stageGain of 1.0 is used so that + * exposure time is maxed before gain is touched at all. + */ + exposureTime = clampExposureTime(exposure / clampGain(stageGain)); + gain = clampGain(exposure / exposureTime); + + return { exposureTime, gain, exposure / (exposureTime * gain) }; +} + +/** + * \fn ExposureModeHelper::minExposureTime() + * \brief Retrieve the configured minimum exposure time limit set through + * setLimits() + * \return The minExposureTime_ value + */ + +/** + * \fn ExposureModeHelper::maxExposureTime() + * \brief Retrieve the configured maximum exposure time set through setLimits() + * \return The maxExposureTime_ value + */ + +/** + * \fn ExposureModeHelper::minGain() + * \brief Retrieve the configured minimum gain set through setLimits() + * \return The minGain_ value + */ + +/** + * \fn ExposureModeHelper::maxGain() + * \brief Retrieve the configured maximum gain set through setLimits() + * \return The maxGain_ value + */ + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/exposure_mode_helper.h b/src/ipa/libipa/exposure_mode_helper.h new file mode 100644 index 00000000..c5be1b67 --- /dev/null +++ b/src/ipa/libipa/exposure_mode_helper.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> + * + * Helper class that performs computations relating to exposure + */ + +#pragma once + +#include <tuple> +#include <utility> +#include <vector> + +#include <libcamera/base/span.h> +#include <libcamera/base/utils.h> + +namespace libcamera { + +namespace ipa { + +class ExposureModeHelper +{ +public: + ExposureModeHelper(const Span<std::pair<utils::Duration, double>> stages); + ~ExposureModeHelper() = default; + + void setLimits(utils::Duration minExposureTime, utils::Duration maxExposureTime, + double minGain, double maxGain); + + std::tuple<utils::Duration, double, double> + splitExposure(utils::Duration exposure) const; + + utils::Duration minExposureTime() const { return minExposureTime_; } + utils::Duration maxExposureTime() const { return maxExposureTime_; } + double minGain() const { return minGain_; } + double maxGain() const { return maxGain_; } + +private: + utils::Duration clampExposureTime(utils::Duration exposureTime) const; + double clampGain(double gain) const; + + std::vector<utils::Duration> exposureTimes_; + std::vector<double> gains_; + + utils::Duration minExposureTime_; + utils::Duration maxExposureTime_; + double minGain_; + double maxGain_; +}; + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/fc_queue.cpp b/src/ipa/libipa/fc_queue.cpp index e812faa5..0365e919 100644 --- a/src/ipa/libipa/fc_queue.cpp +++ b/src/ipa/libipa/fc_queue.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2022, Google Inc. * - * fc_queue.cpp - IPA Frame context queue + * IPA Frame context queue */ #include "fc_queue.h" diff --git a/src/ipa/libipa/fc_queue.h b/src/ipa/libipa/fc_queue.h index a589e7e1..a1d13652 100644 --- a/src/ipa/libipa/fc_queue.h +++ b/src/ipa/libipa/fc_queue.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2022, Google Inc. * - * fc_queue.h - IPA Frame context queue + * IPA Frame context queue */ #pragma once @@ -25,6 +25,7 @@ struct FrameContext { private: template<typename T> friend class FCQueue; uint32_t frame; + bool initialised = false; }; template<typename FrameContext> @@ -38,8 +39,10 @@ public: void clear() { - for (FrameContext &ctx : contexts_) + for (FrameContext &ctx : contexts_) { + ctx.initialised = false; ctx.frame = 0; + } } FrameContext &alloc(const uint32_t frame) @@ -83,6 +86,21 @@ public: << " has been overwritten by " << frameContext.frame; + if (frame == 0 && !frameContext.initialised) { + /* + * If the IPA calls get() at start() time it will get an + * un-intialized FrameContext as the below "frame == + * frameContext.frame" check will return success because + * FrameContexts are zeroed at creation time. + * + * Make sure the FrameContext gets initialised if get() + * is called before alloc() by the IPA for frame#0. + */ + init(frameContext, frame); + + return frameContext; + } + if (frame == frameContext.frame) return frameContext; @@ -108,6 +126,7 @@ private: { frameContext = {}; frameContext.frame = frame; + frameContext.initialised = true; } std::vector<FrameContext> contexts_; diff --git a/src/ipa/libipa/fixedpoint.cpp b/src/ipa/libipa/fixedpoint.cpp new file mode 100644 index 00000000..6b698fc5 --- /dev/null +++ b/src/ipa/libipa/fixedpoint.cpp @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> + * + * Fixed / floating point conversions + */ + +#include "fixedpoint.h" + +/** + * \file fixedpoint.h + */ + +namespace libcamera { + +namespace ipa { + +/** + * \fn R floatingToFixedPoint(T number) + * \brief Convert a floating point number to a fixed-point representation + * \tparam I Bit width of the integer part of the fixed-point + * \tparam F Bit width of the fractional part of the fixed-point + * \tparam R Return type of the fixed-point representation + * \tparam T Input type of the floating point representation + * \param number The floating point number to convert to fixed point + * \return The converted value + */ + +/** + * \fn R fixedToFloatingPoint(T number) + * \brief Convert a fixed-point number to a floating point representation + * \tparam I Bit width of the integer part of the fixed-point + * \tparam F Bit width of the fractional part of the fixed-point + * \tparam R Return type of the floating point representation + * \tparam T Input type of the fixed-point representation + * \param number The fixed point number to convert to floating point + * \return The converted value + */ + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/fixedpoint.h b/src/ipa/libipa/fixedpoint.h new file mode 100644 index 00000000..709cf50f --- /dev/null +++ b/src/ipa/libipa/fixedpoint.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> + * + * Fixed / floating point conversions + */ + +#pragma once + +#include <cmath> +#include <type_traits> + +namespace libcamera { + +namespace ipa { + +#ifndef __DOXYGEN__ +template<unsigned int I, unsigned int F, typename R, typename T, + std::enable_if_t<std::is_integral_v<R> && + std::is_floating_point_v<T>> * = nullptr> +#else +template<unsigned int I, unsigned int F, typename R, typename T> +#endif +constexpr R floatingToFixedPoint(T number) +{ + static_assert(sizeof(int) >= sizeof(R)); + static_assert(I + F <= sizeof(R) * 8); + + /* + * The intermediate cast to int is needed on arm platforms to properly + * cast negative values. See + * https://embeddeduse.com/2013/08/25/casting-a-negative-float-to-an-unsigned-int/ + */ + R mask = (1 << (F + I)) - 1; + R frac = static_cast<R>(static_cast<int>(std::round(number * (1 << F)))) & mask; + + return frac; +} + +#ifndef __DOXYGEN__ +template<unsigned int I, unsigned int F, typename R, typename T, + std::enable_if_t<std::is_floating_point_v<R> && + std::is_integral_v<T>> * = nullptr> +#else +template<unsigned int I, unsigned int F, typename R, typename T> +#endif +constexpr R fixedToFloatingPoint(T number) +{ + static_assert(sizeof(int) >= sizeof(T)); + static_assert(I + F <= sizeof(T) * 8); + + /* + * Recreate the upper bits in case of a negative number by shifting the sign + * bit from the fixed point to the first bit of the unsigned and then right shifting + * by the same amount which keeps the sign bit in place. + * This can be optimized by the compiler quite well. + */ + int remaining_bits = sizeof(int) * 8 - (I + F); + int t = static_cast<int>(static_cast<unsigned>(number) << remaining_bits) >> remaining_bits; + return static_cast<R>(t) / static_cast<R>(1 << F); +} + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/histogram.cpp b/src/ipa/libipa/histogram.cpp index 6b5cde8e..10e44b54 100644 --- a/src/ipa/libipa/histogram.cpp +++ b/src/ipa/libipa/histogram.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * histogram.cpp - histogram calculations + * histogram calculations */ #include "histogram.h" @@ -29,24 +29,46 @@ namespace ipa { */ /** + * \fn Histogram::Histogram() + * \brief Construct an empty Histogram + * + * This empty constructor exists largely to allow Histograms to be embedded in + * other classes which may be created before the contents of the Histogram are + * known. + */ + +/** * \brief Create a cumulative histogram - * \param[in] data A pre-sorted histogram to be passed + * \param[in] data A (non-cumulative) histogram */ Histogram::Histogram(Span<const uint32_t> data) { - cumulative_.reserve(data.size()); - cumulative_.push_back(0); - for (const uint32_t &value : data) - cumulative_.push_back(cumulative_.back() + value); + cumulative_.resize(data.size() + 1); + cumulative_[0] = 0; + for (const auto &[i, value] : utils::enumerate(data)) + cumulative_[i + 1] = cumulative_[i] + value; } /** + * \fn Histogram::Histogram(Span<const uint32_t> data, Transform transform) + * \brief Create a cumulative histogram + * \param[in] data A (non-cumulative) histogram + * \param[in] transform The transformation function to apply to every bin + */ + +/** * \fn Histogram::bins() * \brief Retrieve the number of bins currently used by the Histogram * \return Number of bins */ /** + * \fn Histogram::data() + * \brief Retrieve the internal data + * \return The data + */ + +/** * \fn Histogram::total() * \brief Retrieve the total number of values in the data set * \return Number of values diff --git a/src/ipa/libipa/histogram.h b/src/ipa/libipa/histogram.h index 05bb4b80..a926002c 100644 --- a/src/ipa/libipa/histogram.h +++ b/src/ipa/libipa/histogram.h @@ -2,18 +2,18 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * histogram.h - histogram calculation interface + * histogram calculation interface */ #pragma once -#include <assert.h> #include <limits.h> #include <stdint.h> - +#include <type_traits> #include <vector> #include <libcamera/base/span.h> +#include <libcamera/base/utils.h> namespace libcamera { @@ -22,8 +22,21 @@ namespace ipa { class Histogram { public: + Histogram() { cumulative_.push_back(0); } Histogram(Span<const uint32_t> data); + + template<typename Transform, + std::enable_if_t<std::is_invocable_v<Transform, uint32_t>> * = nullptr> + Histogram(Span<const uint32_t> data, Transform transform) + { + cumulative_.resize(data.size() + 1); + cumulative_[0] = 0; + for (const auto &[i, value] : utils::enumerate(data)) + cumulative_[i + 1] = cumulative_[i] + transform(value); + } + size_t bins() const { return cumulative_.size() - 1; } + const Span<const uint64_t> data() const { return cumulative_; } uint64_t total() const { return cumulative_[cumulative_.size() - 1]; } uint64_t cumulativeFrequency(double bin) const; double quantile(double q, uint32_t first = 0, uint32_t last = UINT_MAX) const; diff --git a/src/ipa/libipa/interpolator.cpp b/src/ipa/libipa/interpolator.cpp new file mode 100644 index 00000000..73e8d3b7 --- /dev/null +++ b/src/ipa/libipa/interpolator.cpp @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> + * + * Helper class for interpolating objects + */ +#include "interpolator.h" + +#include <algorithm> +#include <string> + +#include <libcamera/base/log.h> + +#include "libcamera/internal/yaml_parser.h" + +#include "interpolator.h" + +/** + * \file interpolator.h + * \brief Helper class for linear interpolating a set of objects + */ + +namespace libcamera { + +LOG_DEFINE_CATEGORY(Interpolator) + +namespace ipa { + +/** + * \class Interpolator + * \brief Class for storing, retrieving, and interpolating objects + * \tparam T Type of objects stored in the interpolator + * + * The main use case is to pass a map from color temperatures to corresponding + * objects (eg. matrices for color correction), and then requesting a + * interpolated object for a specific color temperature. This class will + * abstract away the interpolation portion. + */ + +/** + * \fn Interpolator::Interpolator() + * \brief Construct an empty interpolator + */ + +/** + * \fn Interpolator::Interpolator(const std::map<unsigned int, T> &data) + * \brief Construct an interpolator from a map of objects + * \param data Map from which to construct the interpolator + */ + +/** + * \fn Interpolator::Interpolator(std::map<unsigned int, T> &&data) + * \brief Construct an interpolator from a map of objects + * \param data Map from which to construct the interpolator + */ + +/** + * \fn int Interpolator<T>::readYaml(const libcamera::YamlObject &yaml, + const std::string &key_name, + const std::string &value_name) + * \brief Initialize an Interpolator instance from yaml + * \tparam T Type of data stored in the interpolator + * \param[in] yaml The yaml object that contains the map of unsigned integers to + * objects + * \param[in] key_name The name of the key in the yaml object + * \param[in] value_name The name of the value in the yaml object + * + * The yaml object is expected to be a list of maps. Each map has two or more + * pairs: one of \a key_name to the key value (usually color temperature), and + * one or more of \a value_name to the object. This is a bit difficult to + * explain, so here is an example (in python, as it is easier to parse than + * yaml): + * [ + * { + * 'ct': 2860, + * 'ccm': [ 2.12089, -0.52461, -0.59629, + * -0.85342, 2.80445, -0.95103, + * -0.26897, -1.14788, 2.41685 ], + * 'offsets': [ 0, 0, 0 ] + * }, + * + * { + * 'ct': 2960, + * 'ccm': [ 2.26962, -0.54174, -0.72789, + * -0.77008, 2.60271, -0.83262, + * -0.26036, -1.51254, 2.77289 ], + * 'offsets': [ 0, 0, 0 ] + * }, + * + * { + * 'ct': 3603, + * 'ccm': [ 2.18644, -0.66148, -0.52496, + * -0.77828, 2.69474, -0.91645, + * -0.25239, -0.83059, 2.08298 ], + * 'offsets': [ 0, 0, 0 ] + * }, + * ] + * + * In this case, \a key_name would be 'ct', and \a value_name can be either + * 'ccm' or 'offsets'. This way multiple interpolators can be defined in + * one set of color temperature ranges in the tuning file, and they can be + * retrieved separately with the \a value_name parameter. + * + * \return Zero on success, negative error code otherwise + */ + +/** + * \fn void Interpolator<T>::setQuantization(const unsigned int q) + * \brief Set the quantization value + * \param[in] q The quantization value + * + * Sets the quantization value. When this is set, 'key' gets quantized to this + * size, before doing the interpolation. This can help in reducing the number of + * updates pushed to the hardware. + * + * Note that normally a threshold needs to be combined with quantization. + * Otherwise a value that swings around the edge of the quantization step will + * lead to constant updates. + */ + +/** + * \fn void Interpolator<T>::setData(std::map<unsigned int, T> &&data) + * \brief Set the internal map + * + * Overwrites the internal map using move semantics. + */ + +/** + * \fn const T& Interpolator<T>::getInterpolated() + * \brief Retrieve an interpolated value for the given key + * \param[in] key The unsigned integer key of the object to retrieve + * \param[out] quantizedKey If provided, the key value after quantization + * \return The object corresponding to the key. The object is cached internally, + * so on successive calls with the same key (after quantization) interpolation + * is not recalculated. + */ + +/** + * \fn void Interpolator<T>::interpolate(const T &a, const T &b, T &dest, double + * lambda) + * \brief Interpolate between two instances of T + * \param a The first value to interpolate + * \param b The second value to interpolate + * \param dest The destination for the interpolated value + * \param lambda The interpolation factor (0..1) + * + * Interpolates between \a a and \a b according to \a lambda. It calculates + * dest = a * (1.0 - lambda) + b * lambda; + * + * If T supports multiplication with double and addition, this function can be + * used as is. For other types this function can be overwritten using partial + * template specialization. + */ + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/interpolator.h b/src/ipa/libipa/interpolator.h new file mode 100644 index 00000000..fffce214 --- /dev/null +++ b/src/ipa/libipa/interpolator.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> + * + * Helper class for interpolating maps of objects + */ + +#pragma once + +#include <algorithm> +#include <cmath> +#include <map> +#include <string> +#include <tuple> + +#include <libcamera/base/log.h> + +#include "libcamera/internal/yaml_parser.h" + +namespace libcamera { + +LOG_DECLARE_CATEGORY(Interpolator) + +namespace ipa { + +template<typename T> +class Interpolator +{ +public: + Interpolator() = default; + Interpolator(const std::map<unsigned int, T> &data) + : data_(data) + { + } + Interpolator(std::map<unsigned int, T> &&data) + : data_(std::move(data)) + { + } + + ~Interpolator() = default; + + int readYaml(const libcamera::YamlObject &yaml, + const std::string &key_name, + const std::string &value_name) + { + data_.clear(); + lastInterpolatedKey_.reset(); + + if (!yaml.isList()) { + LOG(Interpolator, Error) << "yaml object must be a list"; + return -EINVAL; + } + + for (const auto &value : yaml.asList()) { + unsigned int ct = std::stoul(value[key_name].get<std::string>("")); + std::optional<T> data = + value[value_name].get<T>(); + if (!data) { + return -EINVAL; + } + + data_[ct] = *data; + } + + if (data_.size() < 1) { + LOG(Interpolator, Error) << "Need at least one element"; + return -EINVAL; + } + + return 0; + } + + void setQuantization(const unsigned int q) + { + quantization_ = q; + } + + void setData(std::map<unsigned int, T> &&data) + { + data_ = std::move(data); + lastInterpolatedKey_.reset(); + } + + const T &getInterpolated(unsigned int key, unsigned int *quantizedKey = nullptr) + { + ASSERT(data_.size() > 0); + + if (quantization_ > 0) + key = std::lround(key / static_cast<double>(quantization_)) * quantization_; + + if (quantizedKey) + *quantizedKey = key; + + if (lastInterpolatedKey_.has_value() && + *lastInterpolatedKey_ == key) + return lastInterpolatedValue_; + + auto it = data_.lower_bound(key); + + if (it == data_.begin()) + return it->second; + + if (it == data_.end()) + return std::prev(it)->second; + + if (it->first == key) + return it->second; + + auto it2 = std::prev(it); + double lambda = (key - it2->first) / static_cast<double>(it->first - it2->first); + interpolate(it2->second, it->second, lastInterpolatedValue_, lambda); + lastInterpolatedKey_ = key; + + return lastInterpolatedValue_; + } + + void interpolate(const T &a, const T &b, T &dest, double lambda) + { + dest = a * (1.0 - lambda) + b * lambda; + } + +private: + std::map<unsigned int, T> data_; + T lastInterpolatedValue_; + std::optional<unsigned int> lastInterpolatedKey_; + unsigned int quantization_ = 0; +}; + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/lsc_polynomial.cpp b/src/ipa/libipa/lsc_polynomial.cpp new file mode 100644 index 00000000..f607d86c --- /dev/null +++ b/src/ipa/libipa/lsc_polynomial.cpp @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board + * + * Polynomial class to represent lens shading correction + */ + +#include "lsc_polynomial.h" + +#include <libcamera/base/log.h> + +/** + * \file lsc_polynomial.h + * \brief LscPolynomial class + */ + +namespace libcamera { + +LOG_DEFINE_CATEGORY(LscPolynomial) + +namespace ipa { + +/** + * \class LscPolynomial + * \brief Class for handling even polynomials used in lens shading correction + * + * Shading artifacts of camera lenses can be modeled using even radial + * polynomials. This class implements a polynomial with 5 coefficients which + * follows the definition of the FixVignetteRadial opcode in the Adobe DNG + * specification. + */ + +/** + * \fn LscPolynomial::LscPolynomial(double cx = 0.0, double cy = 0.0, double k0 = 0.0, + double k1 = 0.0, double k2 = 0.0, double k3 = 0.0, + double k4 = 0.0) + * \brief Construct a polynomial using the given coefficients + * \param cx Center-x relative to the image in normalized coordinates (0..1) + * \param cy Center-y relative to the image in normalized coordinates (0..1) + * \param k0 Coefficient of the polynomial + * \param k1 Coefficient of the polynomial + * \param k2 Coefficient of the polynomial + * \param k3 Coefficient of the polynomial + * \param k4 Coefficient of the polynomial + */ + +/** + * \fn LscPolynomial::sampleAtNormalizedPixelPos(double x, double y) + * \brief Sample the polynomial at the given normalized pixel position + * + * This functions samples the polynomial at the given pixel position divided by + * the value returned by getM(). + * + * \param x x position in normalized coordinates + * \param y y position in normalized coordinates + * \return The sampled value + */ + +/** + * \fn LscPolynomial::getM() + * \brief Get the value m as described in the dng specification + * + * Returns m according to dng spec. m represents the Euclidean distance + * (in pixels) from the optical center to the farthest pixel in the + * image. + * + * \return The sampled value + */ + +/** + * \fn LscPolynomial::setReferenceImageSize(const Size &size) + * \brief Set the reference image size + * + * Set the reference image size that is used for subsequent calls to getM() and + * sampleAtNormalizedPixelPos() + * + * \param size The size of the reference image + */ + +} // namespace ipa +} // namespace libcamera diff --git a/src/ipa/libipa/lsc_polynomial.h b/src/ipa/libipa/lsc_polynomial.h new file mode 100644 index 00000000..c898faeb --- /dev/null +++ b/src/ipa/libipa/lsc_polynomial.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board + * + * Helper for radial polynomial used in lens shading correction. + */ +#pragma once + +#include <algorithm> +#include <array> +#include <assert.h> +#include <cmath> + +#include <libcamera/base/log.h> +#include <libcamera/base/span.h> + +#include "libcamera/internal/yaml_parser.h" + +namespace libcamera { + +LOG_DECLARE_CATEGORY(LscPolynomial) + +namespace ipa { + +class LscPolynomial +{ +public: + LscPolynomial(double cx = 0.0, double cy = 0.0, double k0 = 0.0, + double k1 = 0.0, double k2 = 0.0, double k3 = 0.0, + double k4 = 0.0) + : cx_(cx), cy_(cy), cnx_(0), cny_(0), + coefficients_({ k0, k1, k2, k3, k4 }) + { + } + + double sampleAtNormalizedPixelPos(double x, double y) const + { + double dx = x - cnx_; + double dy = y - cny_; + double r = sqrt(dx * dx + dy * dy); + double res = 1.0; + for (unsigned int i = 0; i < coefficients_.size(); i++) { + res += coefficients_[i] * std::pow(r, (i + 1) * 2); + } + return res; + } + + double getM() const + { + double cpx = imageSize_.width * cx_; + double cpy = imageSize_.height * cy_; + double mx = std::max(cpx, std::fabs(imageSize_.width - cpx)); + double my = std::max(cpy, std::fabs(imageSize_.height - cpy)); + + return sqrt(mx * mx + my * my); + } + + void setReferenceImageSize(const Size &size) + { + assert(!size.isNull()); + imageSize_ = size; + + /* Calculate normalized centers */ + double m = getM(); + cnx_ = (size.width * cx_) / m; + cny_ = (size.height * cy_) / m; + } + +private: + double cx_; + double cy_; + double cnx_; + double cny_; + std::array<double, 5> coefficients_; + + Size imageSize_; +}; + +} /* namespace ipa */ + +#ifndef __DOXYGEN__ + +template<> +struct YamlObject::Getter<ipa::LscPolynomial> { + std::optional<ipa::LscPolynomial> get(const YamlObject &obj) const + { + std::optional<double> cx = obj["cx"].get<double>(); + std::optional<double> cy = obj["cy"].get<double>(); + std::optional<double> k0 = obj["k0"].get<double>(); + std::optional<double> k1 = obj["k1"].get<double>(); + std::optional<double> k2 = obj["k2"].get<double>(); + std::optional<double> k3 = obj["k3"].get<double>(); + std::optional<double> k4 = obj["k4"].get<double>(); + + if (!(cx && cy && k0 && k1 && k2 && k3 && k4)) + LOG(LscPolynomial, Error) + << "Polynomial is missing a parameter"; + + return ipa::LscPolynomial(*cx, *cy, *k0, *k1, *k2, *k3, *k4); + } +}; + +#endif + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/lux.cpp b/src/ipa/libipa/lux.cpp new file mode 100644 index 00000000..61f8fea8 --- /dev/null +++ b/src/ipa/libipa/lux.cpp @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (C) 2019, Raspberry Pi Ltd + * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> + * + * Helper class that implements lux estimation + */ +#include "lux.h" + +#include <algorithm> +#include <chrono> + +#include <libcamera/base/log.h> + +#include "libcamera/internal/yaml_parser.h" + +#include "histogram.h" + +/** + * \file lux.h + * \brief Helper class that implements lux estimation + * + * Estimating the lux level of an image is a common operation that can for + * instance be used to adjust the target Y value in AGC or for Bayesian AWB + * estimation. + */ + +namespace libcamera { + +using namespace std::literals::chrono_literals; + +LOG_DEFINE_CATEGORY(Lux) + +namespace ipa { + +/** + * \class Lux + * \brief Class that implements lux estimation + * + * IPAs that wish to use lux estimation should create a Lux algorithm module + * that lightly wraps this module by providing the platform-specific luminance + * histogram. The Lux entry in the tuning file must then precede the algorithms + * that depend on the estimated lux value. + */ + +/** + * \var Lux::binSize_ + * \brief The maximum count of each bin + */ + +/** + * \var Lux::referenceExposureTime_ + * \brief The exposure time of the reference image, in microseconds + */ + +/** + * \var Lux::referenceAnalogueGain_ + * \brief The analogue gain of the reference image + */ + +/** + * \var Lux::referenceDigitalGain_ + * \brief The analogue gain of the reference image + */ + +/** + * \var Lux::referenceY_ + * \brief The measured luminance of the reference image, out of the bin size + * + * \sa binSize_ + */ + +/** + * \var Lux::referenceLux_ + * \brief The estimated lux level of the reference image + */ + +/** + * \brief Construct the Lux helper module + * \param[in] binSize The maximum count of each bin + */ +Lux::Lux(unsigned int binSize) + : binSize_(binSize) +{ +} + +/** + * \brief Parse tuning data + * \param[in] tuningData The YamlObject representing the tuning data + * + * This function parses yaml tuning data for the common Lux module. It requires + * reference exposure time, analogue gain, digital gain, and lux values. + * + * \code{.unparsed} + * algorithms: + * - Lux: + * referenceExposureTime: 10000 + * referenceAnalogueGain: 4.0 + * referenceDigitalGain: 1.0 + * referenceY: 12000 + * referenceLux: 1000 + * \endcode + * + * \return 0 on success or a negative error code + */ +int Lux::parseTuningData(const YamlObject &tuningData) +{ + auto value = tuningData["referenceExposureTime"].get<double>(); + if (!value) { + LOG(Lux, Error) << "Missing tuning parameter: " + << "'referenceExposureTime'"; + return -EINVAL; + } + referenceExposureTime_ = *value * 1.0us; + + value = tuningData["referenceAnalogueGain"].get<double>(); + if (!value) { + LOG(Lux, Error) << "Missing tuning parameter: " + << "'referenceAnalogueGain'"; + return -EINVAL; + } + referenceAnalogueGain_ = *value; + + value = tuningData["referenceDigitalGain"].get<double>(); + if (!value) { + LOG(Lux, Error) << "Missing tuning parameter: " + << "'referenceDigitalGain'"; + return -EINVAL; + } + referenceDigitalGain_ = *value; + + value = tuningData["referenceY"].get<double>(); + if (!value) { + LOG(Lux, Error) << "Missing tuning parameter: " + << "'referenceY'"; + return -EINVAL; + } + referenceY_ = *value; + + value = tuningData["referenceLux"].get<double>(); + if (!value) { + LOG(Lux, Error) << "Missing tuning parameter: " + << "'referenceLux'"; + return -EINVAL; + } + referenceLux_ = *value; + + return 0; +} + +/** + * \brief Estimate lux given runtime values + * \param[in] exposureTime Exposure time applied to the frame + * \param[in] aGain Analogue gain applied to the frame + * \param[in] dGain Digital gain applied to the frame + * \param[in] yHist Histogram from the ISP statistics + * + * Estimate the lux given the exposure time, gain, and histogram. + * + * \return Estimated lux value + */ +double Lux::estimateLux(utils::Duration exposureTime, + double aGain, double dGain, + const Histogram &yHist) const +{ + double currentY = yHist.interQuantileMean(0, 1); + double exposureTimeRatio = referenceExposureTime_ / exposureTime; + double aGainRatio = referenceAnalogueGain_ / aGain; + double dGainRatio = referenceDigitalGain_ / dGain; + double yRatio = currentY * (binSize_ / yHist.bins()) / referenceY_; + + double estimatedLux = exposureTimeRatio * aGainRatio * dGainRatio * + yRatio * referenceLux_; + + LOG(Lux, Debug) << "Estimated lux " << estimatedLux; + return estimatedLux; +} + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/lux.h b/src/ipa/libipa/lux.h new file mode 100644 index 00000000..93ca6479 --- /dev/null +++ b/src/ipa/libipa/lux.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (C) 2019, Raspberry Pi Ltd + * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> + * + * Helper class that implements lux estimation + */ + +#pragma once + +#include <libcamera/base/utils.h> + +namespace libcamera { + +class YamlObject; + +namespace ipa { + +class Histogram; + +class Lux +{ +public: + Lux(unsigned int binSize); + + int parseTuningData(const YamlObject &tuningData); + double estimateLux(utils::Duration exposureTime, + double aGain, double dGain, + const Histogram &yHist) const; + +private: + unsigned int binSize_; + utils::Duration referenceExposureTime_; + double referenceAnalogueGain_; + double referenceDigitalGain_; + double referenceY_; + double referenceLux_; +}; + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/meson.build b/src/ipa/libipa/meson.build index 016b8e0e..12d8d15b 100644 --- a/src/ipa/libipa/meson.build +++ b/src/ipa/libipa/meson.build @@ -1,19 +1,35 @@ # SPDX-License-Identifier: CC0-1.0 libipa_headers = files([ + 'agc_mean_luminance.h', 'algorithm.h', 'camera_sensor_helper.h', + 'colours.h', + 'exposure_mode_helper.h', 'fc_queue.h', + 'fixedpoint.h', 'histogram.h', + 'interpolator.h', + 'lsc_polynomial.h', + 'lux.h', 'module.h', + 'pwl.h', ]) libipa_sources = files([ + 'agc_mean_luminance.cpp', 'algorithm.cpp', 'camera_sensor_helper.cpp', + 'colours.cpp', + 'exposure_mode_helper.cpp', 'fc_queue.cpp', + 'fixedpoint.cpp', 'histogram.cpp', + 'interpolator.cpp', + 'lsc_polynomial.cpp', + 'lux.cpp', 'module.cpp', + 'pwl.cpp', ]) libipa_includes = include_directories('..') @@ -21,3 +37,7 @@ libipa_includes = include_directories('..') libipa = static_library('ipa', [libipa_sources, libipa_headers], include_directories : ipa_includes, dependencies : libcamera_private) + +libipa_dep = declare_dependency(sources : libipa_headers, + include_directories : libipa_includes, + link_with : libipa) diff --git a/src/ipa/libipa/module.cpp b/src/ipa/libipa/module.cpp index ee01f12a..64ca9141 100644 --- a/src/ipa/libipa/module.cpp +++ b/src/ipa/libipa/module.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2022, Ideas On Board * - * module.cpp - IPA Module + * IPA Module */ #include "module.h" diff --git a/src/ipa/libipa/module.h b/src/ipa/libipa/module.h index 4149a353..0fb51916 100644 --- a/src/ipa/libipa/module.h +++ b/src/ipa/libipa/module.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2022, Ideas On Board * - * module.h - IPA module + * IPA module */ #pragma once diff --git a/src/ipa/libipa/pwl.cpp b/src/ipa/libipa/pwl.cpp new file mode 100644 index 00000000..88fe2022 --- /dev/null +++ b/src/ipa/libipa/pwl.cpp @@ -0,0 +1,457 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (C) 2019, Raspberry Pi Ltd + * Copyright (C) 2024, Ideas on Board Oy + * + * Piecewise linear functions + */ + +#include "pwl.h" + +#include <cmath> +#include <sstream> + +/** + * \file pwl.h + * \brief Piecewise linear functions + */ + +namespace libcamera { + +namespace ipa { + +/** + * \class Pwl + * \brief Describe a univariate piecewise linear function in two-dimensional + * real space + * + * A piecewise linear function is a univariate function that maps reals to + * reals, and it is composed of multiple straight-line segments. + * + * While a mathematical piecewise linear function would usually be defined by + * a list of linear functions and for which values of the domain they apply, + * this Pwl class is instead defined by a list of points at which these line + * segments intersect. These intersecting points are known as knots. + * + * https://en.wikipedia.org/wiki/Piecewise_linear_function + * + * A consequence of the Pwl class being defined by knots instead of linear + * functions is that the values of the piecewise linear function past the ends + * of the function are constants as opposed to linear functions. In a + * mathematical piecewise linear function that is defined by multiple linear + * functions, the ends of the function are also linear functions and hence grow + * to infinity (or negative infinity). However, since this Pwl class is defined + * by knots, the y-value of the leftmost and rightmost knots will hold for all + * x values to negative infinity and positive infinity, respectively. + */ + +/** + * \typedef Pwl::Point + * \brief Describe a point in two-dimensional real space + */ + +/** + * \class Pwl::Interval + * \brief Describe an interval in one-dimensional real space + */ + +/** + * \fn Pwl::Interval::Interval(double _start, double _end) + * \brief Construct an interval + * \param[in] _start Start of the interval + * \param[in] _end End of the interval + */ + +/** + * \fn Pwl::Interval::contains + * \brief Check if a given value falls within the interval + * \param[in] value Value to check + * \return True if the value falls within the interval, including its bounds, + * or false otherwise + */ + +/** + * \fn Pwl::Interval::clamp + * \brief Clamp a value such that it is within the interval + * \param[in] value Value to clamp + * \return The clamped value + */ + +/** + * \fn Pwl::Interval::length + * \brief Compute the length of the interval + * \return The length of the interval + */ + +/** + * \var Pwl::Interval::start + * \brief Start of the interval + */ + +/** + * \var Pwl::Interval::end + * \brief End of the interval + */ + +/** + * \brief Construct an empty piecewise linear function + */ +Pwl::Pwl() +{ +} + +/** + * \brief Construct a piecewise linear function from a list of 2D points + * \param[in] points Vector of points from which to construct the piecewise + * linear function + * + * \a points must be in ascending order of x-value. + */ +Pwl::Pwl(const std::vector<Point> &points) + : points_(points) +{ +} + +/** + * \copydoc Pwl::Pwl(const std::vector<Point> &points) + * + * The contents of the \a points vector is moved to the newly constructed Pwl + * instance. + */ +Pwl::Pwl(std::vector<Point> &&points) + : points_(std::move(points)) +{ +} + +/** + * \brief Append a point to the end of the piecewise linear function + * \param[in] x x-coordinate of the point to add to the piecewise linear function + * \param[in] y y-coordinate of the point to add to the piecewise linear function + * \param[in] eps Epsilon for the minimum x distance between points (optional) + * + * The point's x-coordinate must be greater than the x-coordinate of the last + * (= greatest) point already in the piecewise linear function. + */ +void Pwl::append(double x, double y, const double eps) +{ + if (points_.empty() || points_.back().x() + eps < x) + points_.push_back(Point({ x, y })); +} + +/** + * \brief Prepend a point to the beginning of the piecewise linear function + * \param[in] x x-coordinate of the point to add to the piecewise linear function + * \param[in] y y-coordinate of the point to add to the piecewise linear function + * \param[in] eps Epsilon for the minimum x distance between points (optional) + * + * The point's x-coordinate must be less than the x-coordinate of the first + * (= smallest) point already in the piecewise linear function. + */ +void Pwl::prepend(double x, double y, const double eps) +{ + if (points_.empty() || points_.front().x() - eps > x) + points_.insert(points_.begin(), Point({ x, y })); +} + +/** + * \fn Pwl::empty() const + * \brief Check if the piecewise linear function is empty + * \return True if there are no points in the function, false otherwise + */ + +/** + * \fn Pwl::size() const + * \brief Retrieve the number of points in the piecewise linear function + * \return The number of points in the piecewise linear function + */ + +/** + * \brief Get the domain of the piecewise linear function + * \return An interval representing the domain + */ +Pwl::Interval Pwl::domain() const +{ + return Interval(points_[0].x(), points_[points_.size() - 1].x()); +} + +/** + * \brief Get the range of the piecewise linear function + * \return An interval representing the range + */ +Pwl::Interval Pwl::range() const +{ + double lo = points_[0].y(), hi = lo; + for (auto &p : points_) + lo = std::min(lo, p.y()), hi = std::max(hi, p.y()); + return Interval(lo, hi); +} + +/** + * \brief Evaluate the piecewise linear function + * \param[in] x The x value to input into the function + * \param[inout] span Initial guess for span + * \param[in] updateSpan Set to true to update span + * + * Evaluate Pwl, optionally supplying an initial guess for the + * "span". The "span" may be optionally be updated. If you want to know + * the "span" value but don't have an initial guess you can set it to + * -1. + * + * \return The result of evaluating the piecewise linear function at position \a x + */ +double Pwl::eval(double x, int *span, bool updateSpan) const +{ + int index = findSpan(x, span && *span != -1 + ? *span + : points_.size() / 2 - 1); + if (span && updateSpan) + *span = index; + return points_[index].y() + + (x - points_[index].x()) * (points_[index + 1].y() - points_[index].y()) / + (points_[index + 1].x() - points_[index].x()); +} + +int Pwl::findSpan(double x, int span) const +{ + /* + * Pwls are generally small, so linear search may well be faster than + * binary, though could review this if large Pwls start turning up. + */ + int lastSpan = points_.size() - 2; + /* + * some algorithms may call us with span pointing directly at the last + * control point + */ + span = std::max(0, std::min(lastSpan, span)); + while (span < lastSpan && x >= points_[span + 1].x()) + span++; + while (span && x < points_[span].x()) + span--; + return span; +} + +/** + * \brief Compute the inverse function + * \param[in] eps Epsilon for the minimum x distance between points (optional) + * + * The output includes whether the resulting inverse function is a proper + * (true) inverse, or only a best effort (e.g. input was non-monotonic). + * + * \return A pair of the inverse piecewise linear function, and whether or not + * the result is a proper/true inverse + */ +std::pair<Pwl, bool> Pwl::inverse(const double eps) const +{ + bool appended = false, prepended = false, neither = false; + Pwl inverse; + + for (Point const &p : points_) { + if (inverse.empty()) { + inverse.append(p.y(), p.x(), eps); + } else if (std::abs(inverse.points_.back().x() - p.y()) <= eps || + std::abs(inverse.points_.front().x() - p.y()) <= eps) { + /* do nothing */; + } else if (p.y() > inverse.points_.back().x()) { + inverse.append(p.y(), p.x(), eps); + appended = true; + } else if (p.y() < inverse.points_.front().x()) { + inverse.prepend(p.y(), p.x(), eps); + prepended = true; + } else { + neither = true; + } + } + + /* + * This is not a proper inverse if we found ourselves putting points + * onto both ends of the inverse, or if there were points that couldn't + * go on either. + */ + bool trueInverse = !(neither || (appended && prepended)); + + return { inverse, trueInverse }; +} + +/** + * \brief Compose two piecewise linear functions together + * \param[in] other The "other" piecewise linear function + * \param[in] eps Epsilon for the minimum x distance between points (optional) + * + * The "this" function is done first, and "other" after. + * + * \return The composed piecewise linear function + */ +Pwl Pwl::compose(Pwl const &other, const double eps) const +{ + double thisX = points_[0].x(), thisY = points_[0].y(); + int thisSpan = 0, otherSpan = other.findSpan(thisY, 0); + Pwl result({ Point({ thisX, other.eval(thisY, &otherSpan, false) }) }); + + while (thisSpan != (int)points_.size() - 1) { + double dx = points_[thisSpan + 1].x() - points_[thisSpan].x(), + dy = points_[thisSpan + 1].y() - points_[thisSpan].y(); + if (std::abs(dy) > eps && + otherSpan + 1 < (int)other.points_.size() && + points_[thisSpan + 1].y() >= other.points_[otherSpan + 1].x() + eps) { + /* + * next control point in result will be where this + * function's y reaches the next span in other + */ + thisX = points_[thisSpan].x() + + (other.points_[otherSpan + 1].x() - + points_[thisSpan].y()) * + dx / dy; + thisY = other.points_[++otherSpan].x(); + } else if (std::abs(dy) > eps && otherSpan > 0 && + points_[thisSpan + 1].y() <= + other.points_[otherSpan - 1].x() - eps) { + /* + * next control point in result will be where this + * function's y reaches the previous span in other + */ + thisX = points_[thisSpan].x() + + (other.points_[otherSpan + 1].x() - + points_[thisSpan].y()) * + dx / dy; + thisY = other.points_[--otherSpan].x(); + } else { + /* we stay in the same span in other */ + thisSpan++; + thisX = points_[thisSpan].x(), + thisY = points_[thisSpan].y(); + } + result.append(thisX, other.eval(thisY, &otherSpan, false), + eps); + } + return result; +} + +/** + * \brief Apply function to (x, y) values at every control point + * \param[in] f Function to be applied + */ +void Pwl::map(std::function<void(double x, double y)> f) const +{ + for (auto &pt : points_) + f(pt.x(), pt.y()); +} + +/** + * \brief Apply function to (x, y0, y1) values wherever either Pwl has a + * control point. + * \param[in] pwl0 First piecewise linear function + * \param[in] pwl1 Second piecewise linear function + * \param[in] f Function to be applied + * + * This applies the function \a f to every parameter (x, y0, y1), where x is + * the combined list of x-values from \a pwl0 and \a pwl1, y0 is the y-value + * for the given x in \a pwl0, and y1 is the y-value for the same x in \a pwl1. + */ +void Pwl::map2(Pwl const &pwl0, Pwl const &pwl1, + std::function<void(double x, double y0, double y1)> f) +{ + int span0 = 0, span1 = 0; + double x = std::min(pwl0.points_[0].x(), pwl1.points_[0].x()); + f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false)); + + while (span0 < (int)pwl0.points_.size() - 1 || + span1 < (int)pwl1.points_.size() - 1) { + if (span0 == (int)pwl0.points_.size() - 1) + x = pwl1.points_[++span1].x(); + else if (span1 == (int)pwl1.points_.size() - 1) + x = pwl0.points_[++span0].x(); + else if (pwl0.points_[span0 + 1].x() > pwl1.points_[span1 + 1].x()) + x = pwl1.points_[++span1].x(); + else + x = pwl0.points_[++span0].x(); + f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false)); + } +} + +/** + * \brief Combine two Pwls + * \param[in] pwl0 First piecewise linear function + * \param[in] pwl1 Second piecewise linear function + * \param[in] f Function to be applied + * \param[in] eps Epsilon for the minimum x distance between points (optional) + * + * Create a new Pwl where the y values are given by running \a f wherever + * either pwl has a knot. + * + * \return The combined pwl + */ +Pwl Pwl::combine(Pwl const &pwl0, Pwl const &pwl1, + std::function<double(double x, double y0, double y1)> f, + const double eps) +{ + Pwl result; + map2(pwl0, pwl1, [&](double x, double y0, double y1) { + result.append(x, f(x, y0, y1), eps); + }); + return result; +} + +/** + * \brief Multiply the piecewise linear function + * \param[in] d Scalar multiplier to multiply the function by + * \return This function, after it has been multiplied by \a d + */ +Pwl &Pwl::operator*=(double d) +{ + for (auto &pt : points_) + pt[1] *= d; + return *this; +} + +/** + * \brief Assemble and return a string describing the piecewise linear function + * \return A string describing the piecewise linear function + */ +std::string Pwl::toString() const +{ + std::stringstream ss; + ss << "Pwl { "; + for (auto &p : points_) + ss << "(" << p.x() << ", " << p.y() << ") "; + ss << "}"; + return ss.str(); +} + +} /* namespace ipa */ + +#ifndef __DOXYGEN__ +/* + * The YAML data shall be a list of numerical values with an even number of + * elements. They are parsed in pairs into x and y points in the piecewise + * linear function, and added in order. x must be monotonically increasing. + */ +template<> +std::optional<ipa::Pwl> +YamlObject::Getter<ipa::Pwl>::get(const YamlObject &obj) const +{ + if (!obj.size() || obj.size() % 2) + return std::nullopt; + + ipa::Pwl pwl; + + const auto &list = obj.asList(); + + for (auto it = list.begin(); it != list.end(); it++) { + auto x = it->get<double>(); + if (!x) + return std::nullopt; + auto y = (++it)->get<double>(); + if (!y) + return std::nullopt; + + pwl.append(*x, *y); + } + + if (pwl.size() != obj.size() / 2) + return std::nullopt; + + return pwl; +} +#endif /* __DOXYGEN__ */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/pwl.h b/src/ipa/libipa/pwl.h new file mode 100644 index 00000000..8fdc7053 --- /dev/null +++ b/src/ipa/libipa/pwl.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (C) 2019, Raspberry Pi Ltd + * + * Piecewise linear functions interface + */ +#pragma once + +#include <algorithm> +#include <functional> +#include <string> +#include <utility> +#include <vector> + +#include "libcamera/internal/vector.h" + +namespace libcamera { + +namespace ipa { + +class Pwl +{ +public: + using Point = Vector<double, 2>; + + struct Interval { + Interval(double _start, double _end) + : start(_start), end(_end) {} + + bool contains(double value) + { + return value >= start && value <= end; + } + + double clamp(double value) + { + return std::clamp(value, start, end); + } + + double length() const { return end - start; } + + double start, end; + }; + + Pwl(); + Pwl(const std::vector<Point> &points); + Pwl(std::vector<Point> &&points); + + void append(double x, double y, double eps = 1e-6); + + bool empty() const { return points_.empty(); } + size_t size() const { return points_.size(); } + + Interval domain() const; + Interval range() const; + + double eval(double x, int *span = nullptr, + bool updateSpan = true) const; + + std::pair<Pwl, bool> inverse(double eps = 1e-6) const; + Pwl compose(const Pwl &other, double eps = 1e-6) const; + + void map(std::function<void(double x, double y)> f) const; + + static Pwl + combine(const Pwl &pwl0, const Pwl &pwl1, + std::function<double(double x, double y0, double y1)> f, + double eps = 1e-6); + + Pwl &operator*=(double d); + + std::string toString() const; + +private: + static void map2(const Pwl &pwl0, const Pwl &pwl1, + std::function<void(double x, double y0, double y1)> f); + void prepend(double x, double y, double eps = 1e-6); + int findSpan(double x, int span) const; + + std::vector<Point> points_; +}; + +} /* namespace ipa */ + +} /* namespace libcamera */ |