diff options
Diffstat (limited to 'src/ipa/rpi/controller')
25 files changed, 350 insertions, 255 deletions
diff --git a/src/ipa/rpi/controller/agc_algorithm.h b/src/ipa/rpi/controller/agc_algorithm.h index 1132de7e..fdaa10e6 100644 --- a/src/ipa/rpi/controller/agc_algorithm.h +++ b/src/ipa/rpi/controller/agc_algorithm.h @@ -23,15 +23,19 @@ public: virtual std::vector<double> const &getWeights() const = 0; virtual void setEv(unsigned int channel, double ev) = 0; virtual void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) = 0; - virtual void setFixedShutter(unsigned int channel, - libcamera::utils::Duration fixedShutter) = 0; - virtual void setMaxShutter(libcamera::utils::Duration maxShutter) = 0; + virtual void setFixedExposureTime(unsigned int channel, + libcamera::utils::Duration fixedExposureTime) = 0; + virtual void setMaxExposureTime(libcamera::utils::Duration maxExposureTime) = 0; virtual void setFixedAnalogueGain(unsigned int channel, double fixedAnalogueGain) = 0; virtual void setMeteringMode(std::string const &meteringModeName) = 0; virtual void setExposureMode(std::string const &exposureModeName) = 0; virtual void setConstraintMode(std::string const &contraintModeName) = 0; - virtual void enableAuto() = 0; - virtual void disableAuto() = 0; + virtual void enableAutoExposure() = 0; + virtual void disableAutoExposure() = 0; + virtual bool autoExposureEnabled() const = 0; + virtual void enableAutoGain() = 0; + virtual void disableAutoGain() = 0; + virtual bool autoGainEnabled() const = 0; virtual void setActiveChannels(const std::vector<unsigned int> &activeChannels) = 0; }; diff --git a/src/ipa/rpi/controller/agc_status.h b/src/ipa/rpi/controller/agc_status.h index c7c87b83..9308b156 100644 --- a/src/ipa/rpi/controller/agc_status.h +++ b/src/ipa/rpi/controller/agc_status.h @@ -28,7 +28,7 @@ struct AgcStatus { libcamera::utils::Duration totalExposureValue; /* value for all exposure and gain for this image */ libcamera::utils::Duration targetExposureValue; /* (unfiltered) target total exposure AGC is aiming for */ - libcamera::utils::Duration shutterTime; + libcamera::utils::Duration exposureTime; double analogueGain; std::string exposureMode; std::string constraintMode; @@ -36,7 +36,7 @@ struct AgcStatus { double ev; libcamera::utils::Duration flickerPeriod; int floatingRegionEnable; - libcamera::utils::Duration fixedShutter; + libcamera::utils::Duration fixedExposureTime; double fixedAnalogueGain; unsigned int channel; HdrStatus hdr; diff --git a/src/ipa/rpi/controller/awb_algorithm.h b/src/ipa/rpi/controller/awb_algorithm.h index 1779b050..d941ed4e 100644 --- a/src/ipa/rpi/controller/awb_algorithm.h +++ b/src/ipa/rpi/controller/awb_algorithm.h @@ -19,6 +19,7 @@ public: virtual void initialValues(double &gainR, double &gainB) = 0; virtual void setMode(std::string const &modeName) = 0; virtual void setManualGains(double manualR, double manualB) = 0; + virtual void setColourTemperature(double temperatureK) = 0; virtual void enableAuto() = 0; virtual void disableAuto() = 0; }; diff --git a/src/ipa/rpi/controller/camera_mode.h b/src/ipa/rpi/controller/camera_mode.h index 4fdb5b85..61162b32 100644 --- a/src/ipa/rpi/controller/camera_mode.h +++ b/src/ipa/rpi/controller/camera_mode.h @@ -50,9 +50,9 @@ struct CameraMode { double sensitivity; /* pixel clock rate */ uint64_t pixelRate; - /* Mode specific shutter speed limits */ - libcamera::utils::Duration minShutter; - libcamera::utils::Duration maxShutter; + /* Mode specific exposure time limits */ + libcamera::utils::Duration minExposureTime; + libcamera::utils::Duration maxExposureTime; /* Mode specific analogue gain limits */ double minAnalogueGain; double maxAnalogueGain; diff --git a/src/ipa/rpi/controller/controller.cpp b/src/ipa/rpi/controller/controller.cpp index e0131018..651fff63 100644 --- a/src/ipa/rpi/controller/controller.cpp +++ b/src/ipa/rpi/controller/controller.cpp @@ -39,6 +39,7 @@ static const std::map<std::string, Controller::HardwareConfig> HardwareConfigMap .pipelineWidth = 13, .statsInline = false, .minPixelProcessingTime = 0s, + .dataBufferStrided = true, } }, { @@ -71,6 +72,7 @@ static const std::map<std::string, Controller::HardwareConfig> HardwareConfigMap * frames wider than ~16,000 pixels. */ .minPixelProcessingTime = 1.0us / 380, + .dataBufferStrided = false, } }, }; diff --git a/src/ipa/rpi/controller/controller.h b/src/ipa/rpi/controller/controller.h index eff520bd..fdb46557 100644 --- a/src/ipa/rpi/controller/controller.h +++ b/src/ipa/rpi/controller/controller.h @@ -49,6 +49,7 @@ public: unsigned int pipelineWidth; bool statsInline; libcamera::utils::Duration minPixelProcessingTime; + bool dataBufferStrided; }; Controller(); diff --git a/src/ipa/rpi/controller/device_status.cpp b/src/ipa/rpi/controller/device_status.cpp index 68100137..1695764d 100644 --- a/src/ipa/rpi/controller/device_status.cpp +++ b/src/ipa/rpi/controller/device_status.cpp @@ -10,7 +10,7 @@ using namespace libcamera; /* for the Duration operator<< overload */ std::ostream &operator<<(std::ostream &out, const DeviceStatus &d) { - out << "Exposure: " << d.shutterSpeed + out << "Exposure time: " << d.exposureTime << " Frame length: " << d.frameLength << " Line length: " << d.lineLength << " Gain: " << d.analogueGain; diff --git a/src/ipa/rpi/controller/device_status.h b/src/ipa/rpi/controller/device_status.h index 518f15b5..b1792035 100644 --- a/src/ipa/rpi/controller/device_status.h +++ b/src/ipa/rpi/controller/device_status.h @@ -12,21 +12,21 @@ #include <libcamera/base/utils.h> /* - * Definition of "device metadata" which stores things like shutter time and + * Definition of "device metadata" which stores things like exposure time and * analogue gain that downstream control algorithms will want to know. */ struct DeviceStatus { DeviceStatus() - : shutterSpeed(std::chrono::seconds(0)), frameLength(0), + : exposureTime(std::chrono::seconds(0)), frameLength(0), lineLength(std::chrono::seconds(0)), analogueGain(0.0) { } friend std::ostream &operator<<(std::ostream &out, const DeviceStatus &d); - /* time shutter is open */ - libcamera::utils::Duration shutterSpeed; + /* time the image is exposed */ + libcamera::utils::Duration exposureTime; /* frame length given in number of lines */ uint32_t frameLength; /* line length for the current frame */ diff --git a/src/ipa/rpi/controller/histogram.cpp b/src/ipa/rpi/controller/histogram.cpp index ba5b25dd..13089839 100644 --- a/src/ipa/rpi/controller/histogram.cpp +++ b/src/ipa/rpi/controller/histogram.cpp @@ -4,7 +4,7 @@ * * histogram calculations */ -#include <math.h> +#include <cmath> #include <stdio.h> #include "histogram.h" @@ -49,9 +49,9 @@ double Histogram::interBinMean(double binLo, double binHi) const { assert(binHi >= binLo); double sumBinFreq = 0, cumulFreq = 0; - for (double binNext = floor(binLo) + 1.0; binNext <= ceil(binHi); + for (double binNext = std::floor(binLo) + 1.0; binNext <= std::ceil(binHi); binLo = binNext, binNext += 1.0) { - int bin = floor(binLo); + int bin = std::floor(binLo); double freq = (cumulative_[bin + 1] - cumulative_[bin]) * (std::min(binNext, binHi) - binLo); sumBinFreq += bin * freq; diff --git a/src/ipa/rpi/controller/metadata.h b/src/ipa/rpi/controller/metadata.h index b4650d25..77d3b074 100644 --- a/src/ipa/rpi/controller/metadata.h +++ b/src/ipa/rpi/controller/metadata.h @@ -12,6 +12,7 @@ #include <map> #include <mutex> #include <string> +#include <utility> #include <libcamera/base/thread_annotations.h> @@ -36,10 +37,10 @@ public: } template<typename T> - void set(std::string const &tag, T const &value) + void set(std::string const &tag, T &&value) { std::scoped_lock lock(mutex_); - data_[tag] = value; + data_[tag] = std::forward<T>(value); } template<typename T> @@ -90,6 +91,12 @@ public: data_.insert(other.data_.begin(), other.data_.end()); } + void erase(std::string const &tag) + { + std::scoped_lock lock(mutex_); + eraseLocked(tag); + } + template<typename T> T *getLocked(std::string const &tag) { @@ -104,10 +111,18 @@ public: } template<typename T> - void setLocked(std::string const &tag, T const &value) + void setLocked(std::string const &tag, T &&value) { /* Use this only if you're holding the lock yourself. */ - data_[tag] = value; + data_[tag] = std::forward<T>(value); + } + + void eraseLocked(std::string const &tag) + { + auto it = data_.find(tag); + if (it == data_.end()) + return; + data_.erase(it); } /* diff --git a/src/ipa/rpi/controller/rpi/af.cpp b/src/ipa/rpi/controller/rpi/af.cpp index 5ca76dd9..2157eb94 100644 --- a/src/ipa/rpi/controller/rpi/af.cpp +++ b/src/ipa/rpi/controller/rpi/af.cpp @@ -7,8 +7,8 @@ #include "af.h" +#include <cmath> #include <iomanip> -#include <math.h> #include <stdlib.h> #include <libcamera/base/log.h> diff --git a/src/ipa/rpi/controller/rpi/agc.cpp b/src/ipa/rpi/controller/rpi/agc.cpp index fcf7aec9..02bfdb4a 100644 --- a/src/ipa/rpi/controller/rpi/agc.cpp +++ b/src/ipa/rpi/controller/rpi/agc.cpp @@ -74,22 +74,62 @@ int Agc::checkChannel(unsigned int channelIndex) const return 0; } -void Agc::disableAuto() +void Agc::disableAutoExposure() { - LOG(RPiAgc, Debug) << "disableAuto"; + LOG(RPiAgc, Debug) << "disableAutoExposure"; /* All channels are enabled/disabled together. */ for (auto &data : channelData_) - data.channel.disableAuto(); + data.channel.disableAutoExposure(); } -void Agc::enableAuto() +void Agc::enableAutoExposure() { - LOG(RPiAgc, Debug) << "enableAuto"; + LOG(RPiAgc, Debug) << "enableAutoExposure"; /* All channels are enabled/disabled together. */ for (auto &data : channelData_) - data.channel.enableAuto(); + data.channel.enableAutoExposure(); +} + +bool Agc::autoExposureEnabled() const +{ + LOG(RPiAgc, Debug) << "autoExposureEnabled"; + + /* + * We always have at least one channel, and since all channels are + * enabled and disabled together we can simply check the first one. + */ + return channelData_[0].channel.autoExposureEnabled(); +} + +void Agc::disableAutoGain() +{ + LOG(RPiAgc, Debug) << "disableAutoGain"; + + /* All channels are enabled/disabled together. */ + for (auto &data : channelData_) + data.channel.disableAutoGain(); +} + +void Agc::enableAutoGain() +{ + LOG(RPiAgc, Debug) << "enableAutoGain"; + + /* All channels are enabled/disabled together. */ + for (auto &data : channelData_) + data.channel.enableAutoGain(); +} + +bool Agc::autoGainEnabled() const +{ + LOG(RPiAgc, Debug) << "autoGainEnabled"; + + /* + * We always have at least one channel, and since all channels are + * enabled and disabled together we can simply check the first one. + */ + return channelData_[0].channel.autoGainEnabled(); } unsigned int Agc::getConvergenceFrames() const @@ -127,21 +167,21 @@ void Agc::setFlickerPeriod(Duration flickerPeriod) data.channel.setFlickerPeriod(flickerPeriod); } -void Agc::setMaxShutter(Duration maxShutter) +void Agc::setMaxExposureTime(Duration maxExposureTime) { /* Frame durations will be the same across all channels too. */ for (auto &data : channelData_) - data.channel.setMaxShutter(maxShutter); + data.channel.setMaxExposureTime(maxExposureTime); } -void Agc::setFixedShutter(unsigned int channelIndex, Duration fixedShutter) +void Agc::setFixedExposureTime(unsigned int channelIndex, Duration fixedExposureTime) { if (checkChannel(channelIndex)) return; - LOG(RPiAgc, Debug) << "setFixedShutter " << fixedShutter + LOG(RPiAgc, Debug) << "setFixedExposureTime " << fixedExposureTime << " for channel " << channelIndex; - channelData_[channelIndex].channel.setFixedShutter(fixedShutter); + channelData_[channelIndex].channel.setFixedExposureTime(fixedExposureTime); } void Agc::setFixedAnalogueGain(unsigned int channelIndex, double fixedAnalogueGain) diff --git a/src/ipa/rpi/controller/rpi/agc.h b/src/ipa/rpi/controller/rpi/agc.h index 5d056f02..c3a940bf 100644 --- a/src/ipa/rpi/controller/rpi/agc.h +++ b/src/ipa/rpi/controller/rpi/agc.h @@ -32,16 +32,20 @@ public: std::vector<double> const &getWeights() const override; void setEv(unsigned int channel, double ev) override; void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) override; - void setMaxShutter(libcamera::utils::Duration maxShutter) override; - void setFixedShutter(unsigned int channelIndex, - libcamera::utils::Duration fixedShutter) override; + void setMaxExposureTime(libcamera::utils::Duration maxExposureTime) override; + void setFixedExposureTime(unsigned int channelIndex, + libcamera::utils::Duration fixedExposureTime) override; void setFixedAnalogueGain(unsigned int channelIndex, double fixedAnalogueGain) override; void setMeteringMode(std::string const &meteringModeName) override; void setExposureMode(std::string const &exposureModeName) override; void setConstraintMode(std::string const &contraintModeName) override; - void enableAuto() override; - void disableAuto() override; + void enableAutoExposure() override; + void disableAutoExposure() override; + bool autoExposureEnabled() const override; + void enableAutoGain() override; + void disableAutoGain() override; + bool autoGainEnabled() const override; void switchMode(CameraMode const &cameraMode, Metadata *metadata) override; void prepare(Metadata *imageMetadata) override; void process(StatisticsPtr &stats, Metadata *imageMetadata) override; diff --git a/src/ipa/rpi/controller/rpi/agc_channel.cpp b/src/ipa/rpi/controller/rpi/agc_channel.cpp index cf2565a8..a5562760 100644 --- a/src/ipa/rpi/controller/rpi/agc_channel.cpp +++ b/src/ipa/rpi/controller/rpi/agc_channel.cpp @@ -12,6 +12,10 @@ #include <libcamera/base/log.h> +#include "libcamera/internal/vector.h" + +#include "libipa/colours.h" + #include "../awb_status.h" #include "../device_status.h" #include "../histogram.h" @@ -65,7 +69,7 @@ int AgcExposureMode::read(const libcamera::YamlObject ¶ms) auto value = params["shutter"].getList<double>(); if (!value) return -EINVAL; - std::transform(value->begin(), value->end(), std::back_inserter(shutter), + std::transform(value->begin(), value->end(), std::back_inserter(exposureTime), [](double v) { return v * 1us; }); value = params["gain"].getList<double>(); @@ -73,13 +77,13 @@ int AgcExposureMode::read(const libcamera::YamlObject ¶ms) return -EINVAL; gain = std::move(*value); - if (shutter.size() < 2 || gain.size() < 2) { + if (exposureTime.size() < 2 || gain.size() < 2) { LOG(RPiAgc, Error) << "AgcExposureMode: must have at least two entries in exposure profile"; return -EINVAL; } - if (shutter.size() != gain.size()) { + if (exposureTime.size() != gain.size()) { LOG(RPiAgc, Error) << "AgcExposureMode: expect same number of exposure and gain entries in exposure profile"; return -EINVAL; @@ -260,7 +264,7 @@ int AgcConfig::read(const libcamera::YamlObject ¶ms) } AgcChannel::ExposureValues::ExposureValues() - : shutter(0s), analogueGain(0), + : exposureTime(0s), analogueGain(0), totalExposure(0s), totalExposureNoDG(0s) { } @@ -269,7 +273,7 @@ AgcChannel::AgcChannel() : meteringMode_(nullptr), exposureMode_(nullptr), constraintMode_(nullptr), frameCount_(0), lockCount_(0), lastTargetExposure_(0s), ev_(1.0), flickerPeriod_(0s), - maxShutter_(0s), fixedShutter_(0s), fixedAnalogueGain_(0.0) + maxExposureTime_(0s), fixedExposureTime_(0s), fixedAnalogueGain_(0.0) { /* Set AWB default values in case early frames have no updates in metadata. */ awb_.gainR = 1.0; @@ -310,31 +314,49 @@ int AgcChannel::read(const libcamera::YamlObject ¶ms, exposureMode_ = &config_.exposureModes[exposureModeName_]; constraintModeName_ = config_.defaultConstraintMode; constraintMode_ = &config_.constraintModes[constraintModeName_]; - /* Set up the "last shutter/gain" values, in case AGC starts "disabled". */ - status_.shutterTime = config_.defaultExposureTime; + /* Set up the "last exposure time/gain" values, in case AGC starts "disabled". */ + status_.exposureTime = config_.defaultExposureTime; status_.analogueGain = config_.defaultAnalogueGain; return 0; } -void AgcChannel::disableAuto() +void AgcChannel::disableAutoExposure() +{ + fixedExposureTime_ = status_.exposureTime; +} + +void AgcChannel::enableAutoExposure() +{ + fixedExposureTime_ = 0s; +} + +bool AgcChannel::autoExposureEnabled() const +{ + return fixedExposureTime_ == 0s; +} + +void AgcChannel::disableAutoGain() { - fixedShutter_ = status_.shutterTime; fixedAnalogueGain_ = status_.analogueGain; } -void AgcChannel::enableAuto() +void AgcChannel::enableAutoGain() { - fixedShutter_ = 0s; fixedAnalogueGain_ = 0; } +bool AgcChannel::autoGainEnabled() const +{ + return fixedAnalogueGain_ == 0; +} + unsigned int AgcChannel::getConvergenceFrames() const { /* - * If shutter and gain have been explicitly set, there is no + * If exposure time and gain have been explicitly set, there is no * convergence to happen, so no need to drop any frames - return zero. */ - if (fixedShutter_ && fixedAnalogueGain_) + if (fixedExposureTime_ && fixedAnalogueGain_) return 0; else return config_.convergenceFrames; @@ -362,16 +384,16 @@ void AgcChannel::setFlickerPeriod(Duration flickerPeriod) flickerPeriod_ = flickerPeriod; } -void AgcChannel::setMaxShutter(Duration maxShutter) +void AgcChannel::setMaxExposureTime(Duration maxExposureTime) { - maxShutter_ = maxShutter; + maxExposureTime_ = maxExposureTime; } -void AgcChannel::setFixedShutter(Duration fixedShutter) +void AgcChannel::setFixedExposureTime(Duration fixedExposureTime) { - fixedShutter_ = fixedShutter; + fixedExposureTime_ = fixedExposureTime; /* Set this in case someone calls disableAuto() straight after. */ - status_.shutterTime = limitShutter(fixedShutter_); + status_.exposureTime = limitExposureTime(fixedExposureTime_); } void AgcChannel::setFixedAnalogueGain(double fixedAnalogueGain) @@ -411,22 +433,22 @@ void AgcChannel::switchMode(CameraMode const &cameraMode, double lastSensitivity = mode_.sensitivity; mode_ = cameraMode; - Duration fixedShutter = limitShutter(fixedShutter_); - if (fixedShutter && fixedAnalogueGain_) { + Duration fixedExposureTime = limitExposureTime(fixedExposureTime_); + if (fixedExposureTime && fixedAnalogueGain_) { /* We're going to reset the algorithm here with these fixed values. */ fetchAwbStatus(metadata); double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 }); ASSERT(minColourGain != 0.0); /* This is the equivalent of computeTargetExposure and applyDigitalGain. */ - target_.totalExposureNoDG = fixedShutter_ * fixedAnalogueGain_; + target_.totalExposureNoDG = fixedExposureTime_ * fixedAnalogueGain_; target_.totalExposure = target_.totalExposureNoDG / minColourGain; /* Equivalent of filterExposure. This resets any "history". */ filtered_ = target_; /* Equivalent of divideUpExposure. */ - filtered_.shutter = fixedShutter; + filtered_.exposureTime = fixedExposureTime; filtered_.analogueGain = fixedAnalogueGain_; } else if (status_.totalExposureValue) { /* @@ -448,14 +470,15 @@ void AgcChannel::switchMode(CameraMode const &cameraMode, divideUpExposure(); } else { /* - * We come through here on startup, when at least one of the shutter - * or gain has not been fixed. We must still write those values out so - * that they will be applied immediately. We supply some arbitrary defaults - * for any that weren't set. + * We come through here on startup, when at least one of the + * exposure time or gain has not been fixed. We must still + * write those values out so that they will be applied + * immediately. We supply some arbitrary defaults for any that + * weren't set. */ /* Equivalent of divideUpExposure. */ - filtered_.shutter = fixedShutter ? fixedShutter : config_.defaultExposureTime; + filtered_.exposureTime = fixedExposureTime ? fixedExposureTime : config_.defaultExposureTime; filtered_.analogueGain = fixedAnalogueGain_ ? fixedAnalogueGain_ : config_.defaultAnalogueGain; } @@ -481,7 +504,7 @@ void AgcChannel::prepare(Metadata *imageMetadata) /* Process has run, so we have meaningful values. */ DeviceStatus deviceStatus; if (imageMetadata->get("device.status", deviceStatus) == 0) { - Duration actualExposure = deviceStatus.shutterSpeed * + Duration actualExposure = deviceStatus.exposureTime * deviceStatus.analogueGain; if (actualExposure) { double digitalGain = totalExposureValue / actualExposure; @@ -535,7 +558,7 @@ void AgcChannel::process(StatisticsPtr &stats, DeviceStatus const &deviceStatus, */ bool desaturate = applyDigitalGain(gain, targetY, channelBound); /* - * The last thing is to divide up the exposure value into a shutter time + * The last thing is to divide up the exposure value into a exposure time * and analogue gain, according to the current exposure mode. */ divideUpExposure(); @@ -551,7 +574,7 @@ bool AgcChannel::updateLockStatus(DeviceStatus const &deviceStatus) const double resetMargin = 1.5; /* Add 200us to the exposure time error to allow for line quantisation. */ - Duration exposureError = lastDeviceStatus_.shutterSpeed * errorFactor + 200us; + Duration exposureError = lastDeviceStatus_.exposureTime * errorFactor + 200us; double gainError = lastDeviceStatus_.analogueGain * errorFactor; Duration targetError = lastTargetExposure_ * errorFactor; @@ -560,15 +583,15 @@ bool AgcChannel::updateLockStatus(DeviceStatus const &deviceStatus) * the values we keep requesting may be unachievable. For this reason * we only insist that we're close to values in the past few frames. */ - if (deviceStatus.shutterSpeed > lastDeviceStatus_.shutterSpeed - exposureError && - deviceStatus.shutterSpeed < lastDeviceStatus_.shutterSpeed + exposureError && + if (deviceStatus.exposureTime > lastDeviceStatus_.exposureTime - exposureError && + deviceStatus.exposureTime < lastDeviceStatus_.exposureTime + exposureError && deviceStatus.analogueGain > lastDeviceStatus_.analogueGain - gainError && deviceStatus.analogueGain < lastDeviceStatus_.analogueGain + gainError && status_.targetExposureValue > lastTargetExposure_ - targetError && status_.targetExposureValue < lastTargetExposure_ + targetError) lockCount_ = std::min(lockCount_ + 1, maxLockCount); - else if (deviceStatus.shutterSpeed < lastDeviceStatus_.shutterSpeed - resetMargin * exposureError || - deviceStatus.shutterSpeed > lastDeviceStatus_.shutterSpeed + resetMargin * exposureError || + else if (deviceStatus.exposureTime < lastDeviceStatus_.exposureTime - resetMargin * exposureError || + deviceStatus.exposureTime > lastDeviceStatus_.exposureTime + resetMargin * exposureError || deviceStatus.analogueGain < lastDeviceStatus_.analogueGain - resetMargin * gainError || deviceStatus.analogueGain > lastDeviceStatus_.analogueGain + resetMargin * gainError || status_.targetExposureValue < lastTargetExposure_ - resetMargin * targetError || @@ -586,11 +609,11 @@ void AgcChannel::housekeepConfig() { /* First fetch all the up-to-date settings, so no one else has to do it. */ status_.ev = ev_; - status_.fixedShutter = limitShutter(fixedShutter_); + status_.fixedExposureTime = limitExposureTime(fixedExposureTime_); status_.fixedAnalogueGain = fixedAnalogueGain_; status_.flickerPeriod = flickerPeriod_; - LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixedShutter " - << status_.fixedShutter << " fixedAnalogueGain " + LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixedExposureTime " + << status_.fixedExposureTime << " fixedAnalogueGain " << status_.fixedAnalogueGain; /* * Make sure the "mode" pointers point to the up-to-date things, if @@ -634,10 +657,10 @@ void AgcChannel::housekeepConfig() void AgcChannel::fetchCurrentExposure(DeviceStatus const &deviceStatus) { - current_.shutter = deviceStatus.shutterSpeed; + current_.exposureTime = deviceStatus.exposureTime; current_.analogueGain = deviceStatus.analogueGain; current_.totalExposure = 0s; /* this value is unused */ - current_.totalExposureNoDG = current_.shutter * current_.analogueGain; + current_.totalExposureNoDG = current_.exposureTime * current_.analogueGain; } void AgcChannel::fetchAwbStatus(Metadata *imageMetadata) @@ -678,12 +701,13 @@ static double computeInitialY(StatisticsPtr &stats, AwbStatus const &awb, * Note that the weights are applied by the IPA to the statistics directly, * before they are given to us here. */ - double rSum = 0, gSum = 0, bSum = 0, pixelSum = 0; + RGB<double> sum{ 0.0 }; + double pixelSum = 0; for (unsigned int i = 0; i < stats->agcRegions.numRegions(); i++) { auto ®ion = stats->agcRegions.get(i); - rSum += std::min<double>(region.val.rSum * gain, (maxVal - 1) * region.counted); - gSum += std::min<double>(region.val.gSum * gain, (maxVal - 1) * region.counted); - bSum += std::min<double>(region.val.bSum * gain, (maxVal - 1) * region.counted); + sum.r() += std::min<double>(region.val.rSum * gain, (maxVal - 1) * region.counted); + sum.g() += std::min<double>(region.val.gSum * gain, (maxVal - 1) * region.counted); + sum.b() += std::min<double>(region.val.bSum * gain, (maxVal - 1) * region.counted); pixelSum += region.counted; } if (pixelSum == 0.0) { @@ -691,14 +715,11 @@ static double computeInitialY(StatisticsPtr &stats, AwbStatus const &awb, return 0; } - double ySum; /* Factor in the AWB correction if needed. */ - if (stats->agcStatsPos == Statistics::AgcStatsPos::PreWb) { - ySum = rSum * awb.gainR * .299 + - gSum * awb.gainG * .587 + - bSum * awb.gainB * .114; - } else - ySum = rSum * .299 + gSum * .587 + bSum * .114; + if (stats->agcStatsPos == Statistics::AgcStatsPos::PreWb) + sum *= RGB<double>{ { awb.gainR, awb.gainR, awb.gainB } }; + + double ySum = ipa::rec601LuminanceFromRGB(sum); return ySum / pixelSum / (1 << 16); } @@ -775,17 +796,17 @@ void AgcChannel::computeGain(StatisticsPtr &statistics, Metadata *imageMetadata, void AgcChannel::computeTargetExposure(double gain) { - if (status_.fixedShutter && status_.fixedAnalogueGain) { + if (status_.fixedExposureTime && status_.fixedAnalogueGain) { /* - * When ag and shutter are both fixed, we need to drive the - * total exposure so that we end up with a digital gain of at least - * 1/minColourGain. Otherwise we'd desaturate channels causing - * white to go cyan or magenta. + * When analogue gain and exposure time are both fixed, we need + * to drive the total exposure so that we end up with a digital + * gain of at least 1/minColourGain. Otherwise we'd desaturate + * channels causing white to go cyan or magenta. */ double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 }); ASSERT(minColourGain != 0.0); target_.totalExposure = - status_.fixedShutter * status_.fixedAnalogueGain / minColourGain; + status_.fixedExposureTime * status_.fixedAnalogueGain / minColourGain; } else { /* * The statistics reflect the image without digital gain, so the final @@ -793,12 +814,12 @@ void AgcChannel::computeTargetExposure(double gain) */ target_.totalExposure = current_.totalExposureNoDG * gain; /* The final target exposure is also limited to what the exposure mode allows. */ - Duration maxShutter = status_.fixedShutter - ? status_.fixedShutter - : exposureMode_->shutter.back(); - maxShutter = limitShutter(maxShutter); + Duration maxExposureTime = status_.fixedExposureTime + ? status_.fixedExposureTime + : exposureMode_->exposureTime.back(); + maxExposureTime = limitExposureTime(maxExposureTime); Duration maxTotalExposure = - maxShutter * + maxExposureTime * (status_.fixedAnalogueGain != 0.0 ? status_.fixedAnalogueGain : exposureMode_->gain.back()); @@ -882,12 +903,16 @@ void AgcChannel::filterExposure() double stableRegion = config_.stableRegion; /* - * AGC adapts instantly if both shutter and gain are directly specified - * or we're in the startup phase. + * AGC adapts instantly if both exposure time and gain are directly + * specified or we're in the startup phase. Also disable the stable + * region, because we want to reflect any user exposure/gain updates, + * however small. */ - if ((status_.fixedShutter && status_.fixedAnalogueGain) || - frameCount_ <= config_.startupFrames) + if ((status_.fixedExposureTime && status_.fixedAnalogueGain) || + frameCount_ <= config_.startupFrames) { speed = 1.0; + stableRegion = 0.0; + } if (!filtered_.totalExposure) { filtered_.totalExposure = target_.totalExposure; } else if (filtered_.totalExposure * (1.0 - stableRegion) < target_.totalExposure && @@ -911,34 +936,34 @@ void AgcChannel::filterExposure() void AgcChannel::divideUpExposure() { /* - * Sending the fixed shutter/gain cases through the same code may seem - * unnecessary, but it will make more sense when extend this to cover - * variable aperture. + * Sending the fixed exposure time/gain cases through the same code may + * seem unnecessary, but it will make more sense when extend this to + * cover variable aperture. */ Duration exposureValue = filtered_.totalExposureNoDG; - Duration shutterTime; + Duration exposureTime; double analogueGain; - shutterTime = status_.fixedShutter ? status_.fixedShutter - : exposureMode_->shutter[0]; - shutterTime = limitShutter(shutterTime); + exposureTime = status_.fixedExposureTime ? status_.fixedExposureTime + : exposureMode_->exposureTime[0]; + exposureTime = limitExposureTime(exposureTime); analogueGain = status_.fixedAnalogueGain != 0.0 ? status_.fixedAnalogueGain : exposureMode_->gain[0]; analogueGain = limitGain(analogueGain); - if (shutterTime * analogueGain < exposureValue) { + if (exposureTime * analogueGain < exposureValue) { for (unsigned int stage = 1; stage < exposureMode_->gain.size(); stage++) { - if (!status_.fixedShutter) { - Duration stageShutter = - limitShutter(exposureMode_->shutter[stage]); - if (stageShutter * analogueGain >= exposureValue) { - shutterTime = exposureValue / analogueGain; + if (!status_.fixedExposureTime) { + Duration stageExposureTime = + limitExposureTime(exposureMode_->exposureTime[stage]); + if (stageExposureTime * analogueGain >= exposureValue) { + exposureTime = exposureValue / analogueGain; break; } - shutterTime = stageShutter; + exposureTime = stageExposureTime; } if (status_.fixedAnalogueGain == 0.0) { - if (exposureMode_->gain[stage] * shutterTime >= exposureValue) { - analogueGain = exposureValue / shutterTime; + if (exposureMode_->gain[stage] * exposureTime >= exposureValue) { + analogueGain = exposureValue / exposureTime; break; } analogueGain = exposureMode_->gain[stage]; @@ -946,18 +971,19 @@ void AgcChannel::divideUpExposure() } } } - LOG(RPiAgc, Debug) << "Divided up shutter and gain are " << shutterTime << " and " - << analogueGain; + LOG(RPiAgc, Debug) + << "Divided up exposure time and gain are " << exposureTime + << " and " << analogueGain; /* - * Finally adjust shutter time for flicker avoidance (require both - * shutter and gain not to be fixed). + * Finally adjust exposure time for flicker avoidance (require both + * exposure time and gain not to be fixed). */ - if (!status_.fixedShutter && !status_.fixedAnalogueGain && + if (!status_.fixedExposureTime && !status_.fixedAnalogueGain && status_.flickerPeriod) { - int flickerPeriods = shutterTime / status_.flickerPeriod; + int flickerPeriods = exposureTime / status_.flickerPeriod; if (flickerPeriods) { - Duration newShutterTime = flickerPeriods * status_.flickerPeriod; - analogueGain *= shutterTime / newShutterTime; + Duration newExposureTime = flickerPeriods * status_.flickerPeriod; + analogueGain *= exposureTime / newExposureTime; /* * We should still not allow the ag to go over the * largest value in the exposure mode. Note that this @@ -966,12 +992,12 @@ void AgcChannel::divideUpExposure() */ analogueGain = std::min(analogueGain, exposureMode_->gain.back()); analogueGain = limitGain(analogueGain); - shutterTime = newShutterTime; + exposureTime = newExposureTime; } - LOG(RPiAgc, Debug) << "After flicker avoidance, shutter " - << shutterTime << " gain " << analogueGain; + LOG(RPiAgc, Debug) << "After flicker avoidance, exposure time " + << exposureTime << " gain " << analogueGain; } - filtered_.shutter = shutterTime; + filtered_.exposureTime = exposureTime; filtered_.analogueGain = analogueGain; } @@ -979,7 +1005,7 @@ void AgcChannel::writeAndFinish(Metadata *imageMetadata, bool desaturate) { status_.totalExposureValue = filtered_.totalExposure; status_.targetExposureValue = desaturate ? 0s : target_.totalExposure; - status_.shutterTime = filtered_.shutter; + status_.exposureTime = filtered_.exposureTime; status_.analogueGain = filtered_.analogueGain; /* * Write to metadata as well, in case anyone wants to update the camera @@ -988,32 +1014,32 @@ void AgcChannel::writeAndFinish(Metadata *imageMetadata, bool desaturate) imageMetadata->set("agc.status", status_); LOG(RPiAgc, Debug) << "Output written, total exposure requested is " << filtered_.totalExposure; - LOG(RPiAgc, Debug) << "Camera exposure update: shutter time " << filtered_.shutter + LOG(RPiAgc, Debug) << "Camera exposure update: exposure time " << filtered_.exposureTime << " analogue gain " << filtered_.analogueGain; } -Duration AgcChannel::limitShutter(Duration shutter) +Duration AgcChannel::limitExposureTime(Duration exposureTime) { /* - * shutter == 0 is a special case for fixed shutter values, and must pass - * through unchanged + * exposureTime == 0 is a special case for fixed exposure time values, + * and must pass through unchanged. */ - if (!shutter) - return shutter; + if (!exposureTime) + return exposureTime; - shutter = std::clamp(shutter, mode_.minShutter, maxShutter_); - return shutter; + exposureTime = std::clamp(exposureTime, mode_.minExposureTime, maxExposureTime_); + return exposureTime; } double AgcChannel::limitGain(double gain) const { /* - * Only limit the lower bounds of the gain value to what the sensor limits. - * The upper bound on analogue gain will be made up with additional digital - * gain applied by the ISP. + * Only limit the lower bounds of the gain value to what the sensor + * limits. The upper bound on analogue gain will be made up with + * additional digital gain applied by the ISP. * - * gain == 0.0 is a special case for fixed shutter values, and must pass - * through unchanged + * gain == 0.0 is a special case for fixed exposure time values, and + * must pass through unchanged. */ if (!gain) return gain; diff --git a/src/ipa/rpi/controller/rpi/agc_channel.h b/src/ipa/rpi/controller/rpi/agc_channel.h index 58368889..fa697e6f 100644 --- a/src/ipa/rpi/controller/rpi/agc_channel.h +++ b/src/ipa/rpi/controller/rpi/agc_channel.h @@ -30,7 +30,7 @@ struct AgcMeteringMode { }; struct AgcExposureMode { - std::vector<libcamera::utils::Duration> shutter; + std::vector<libcamera::utils::Duration> exposureTime; std::vector<double> gain; int read(const libcamera::YamlObject ¶ms); }; @@ -90,14 +90,18 @@ public: std::vector<double> const &getWeights() const; void setEv(double ev); void setFlickerPeriod(libcamera::utils::Duration flickerPeriod); - void setMaxShutter(libcamera::utils::Duration maxShutter); - void setFixedShutter(libcamera::utils::Duration fixedShutter); + void setMaxExposureTime(libcamera::utils::Duration maxExposureTime); + void setFixedExposureTime(libcamera::utils::Duration fixedExposureTime); void setFixedAnalogueGain(double fixedAnalogueGain); void setMeteringMode(std::string const &meteringModeName); void setExposureMode(std::string const &exposureModeName); void setConstraintMode(std::string const &contraintModeName); - void enableAuto(); - void disableAuto(); + void enableAutoExposure(); + void disableAutoExposure(); + bool autoExposureEnabled() const; + void enableAutoGain(); + void disableAutoGain(); + bool autoGainEnabled() const; void switchMode(CameraMode const &cameraMode, Metadata *metadata); void prepare(Metadata *imageMetadata); void process(StatisticsPtr &stats, DeviceStatus const &deviceStatus, Metadata *imageMetadata, @@ -117,7 +121,7 @@ private: bool applyDigitalGain(double gain, double targetY, bool channelBound); void divideUpExposure(); void writeAndFinish(Metadata *imageMetadata, bool desaturate); - libcamera::utils::Duration limitShutter(libcamera::utils::Duration shutter); + libcamera::utils::Duration limitExposureTime(libcamera::utils::Duration exposureTime); double limitGain(double gain) const; AgcMeteringMode *meteringMode_; AgcExposureMode *exposureMode_; @@ -128,7 +132,7 @@ private: struct ExposureValues { ExposureValues(); - libcamera::utils::Duration shutter; + libcamera::utils::Duration exposureTime; double analogueGain; libcamera::utils::Duration totalExposure; libcamera::utils::Duration totalExposureNoDG; /* without digital gain */ @@ -146,8 +150,8 @@ private: std::string constraintModeName_; double ev_; libcamera::utils::Duration flickerPeriod_; - libcamera::utils::Duration maxShutter_; - libcamera::utils::Duration fixedShutter_; + libcamera::utils::Duration maxExposureTime_; + libcamera::utils::Duration fixedExposureTime_; double fixedAnalogueGain_; }; diff --git a/src/ipa/rpi/controller/rpi/alsc.cpp b/src/ipa/rpi/controller/rpi/alsc.cpp index 161fd455..21edb819 100644 --- a/src/ipa/rpi/controller/rpi/alsc.cpp +++ b/src/ipa/rpi/controller/rpi/alsc.cpp @@ -6,8 +6,8 @@ */ #include <algorithm> +#include <cmath> #include <functional> -#include <math.h> #include <numeric> #include <vector> @@ -252,12 +252,12 @@ static bool compareModes(CameraMode const &cm0, CameraMode const &cm1) */ if (cm0.transform != cm1.transform) return true; - int leftDiff = abs(cm0.cropX - cm1.cropX); - int topDiff = abs(cm0.cropY - cm1.cropY); - int rightDiff = fabs(cm0.cropX + cm0.scaleX * cm0.width - - cm1.cropX - cm1.scaleX * cm1.width); - int bottomDiff = fabs(cm0.cropY + cm0.scaleY * cm0.height - - cm1.cropY - cm1.scaleY * cm1.height); + int leftDiff = std::abs(cm0.cropX - cm1.cropX); + int topDiff = std::abs(cm0.cropY - cm1.cropY); + int rightDiff = std::abs(cm0.cropX + cm0.scaleX * cm0.width - + cm1.cropX - cm1.scaleX * cm1.width); + int bottomDiff = std::abs(cm0.cropY + cm0.scaleY * cm0.height - + cm1.cropY - cm1.scaleY * cm1.height); /* * These thresholds are a rather arbitrary amount chosen to trigger * when carrying on with the previously calculated tables might be @@ -732,7 +732,7 @@ static double gaussSeidel2Sor(const SparseArray<double> &M, double omega, double maxDiff = 0; for (i = 0; i < XY; i++) { lambda[i] = oldLambda[i] + (lambda[i] - oldLambda[i]) * omega; - if (fabs(lambda[i] - oldLambda[i]) > fabs(maxDiff)) + if (std::abs(lambda[i] - oldLambda[i]) > std::abs(maxDiff)) maxDiff = lambda[i] - oldLambda[i]; } return maxDiff; @@ -764,7 +764,7 @@ static void runMatrixIterations(const Array2D<double> &C, constructM(C, W, M); double lastMaxDiff = std::numeric_limits<double>::max(); for (unsigned int i = 0; i < nIter; i++) { - double maxDiff = fabs(gaussSeidel2Sor(M, omega, lambda, lambdaBound)); + double maxDiff = std::abs(gaussSeidel2Sor(M, omega, lambda, lambdaBound)); if (maxDiff < threshold) { LOG(RPiAlsc, Debug) << "Stop after " << i + 1 << " iterations"; diff --git a/src/ipa/rpi/controller/rpi/awb.cpp b/src/ipa/rpi/controller/rpi/awb.cpp index f45525bc..8479ae40 100644 --- a/src/ipa/rpi/controller/rpi/awb.cpp +++ b/src/ipa/rpi/controller/rpi/awb.cpp @@ -6,6 +6,7 @@ */ #include <assert.h> +#include <cmath> #include <functional> #include <libcamera/base/log.h> @@ -20,6 +21,8 @@ using namespace libcamera; LOG_DEFINE_CATEGORY(RPiAwb) +constexpr double kDefaultCT = 4500.0; + #define NAME "rpi.awb" /* @@ -167,6 +170,14 @@ int AwbConfig::read(const libcamera::YamlObject ¶ms) whitepointB = params["whitepoint_b"].get<double>(0.0); if (bayes == false) sensitivityR = sensitivityB = 1.0; /* nor do sensitivities make any sense */ + /* + * The biasProportion parameter adds a small proportion of the counted + * pixles to a region biased to the biasCT colour temperature. + * + * A typical value for biasProportion would be between 0.05 to 0.1. + */ + biasProportion = params["bias_proportion"].get<double>(0.0); + biasCT = params["bias_ct"].get<double>(kDefaultCT); return 0; } @@ -214,7 +225,7 @@ void Awb::initialise() syncResults_.gainB = 1.0 / config_.ctB.eval(syncResults_.temperatureK); } else { /* random values just to stop the world blowing up */ - syncResults_.temperatureK = 4500; + syncResults_.temperatureK = kDefaultCT; syncResults_.gainR = syncResults_.gainG = syncResults_.gainB = 1.0; } prevSyncResults_ = syncResults_; @@ -282,6 +293,24 @@ void Awb::setManualGains(double manualR, double manualB) } } +void Awb::setColourTemperature(double temperatureK) +{ + if (!config_.bayes) { + LOG(RPiAwb, Warning) << "AWB uncalibrated - cannot set colour temperature"; + return; + } + + temperatureK = config_.ctR.domain().clamp(temperatureK); + manualR_ = 1 / config_.ctR.eval(temperatureK); + manualB_ = 1 / config_.ctB.eval(temperatureK); + + syncResults_.temperatureK = temperatureK; + syncResults_.gainR = manualR_; + syncResults_.gainG = 1.0; + syncResults_.gainB = manualB_; + prevSyncResults_ = syncResults_; +} + void Awb::switchMode([[maybe_unused]] CameraMode const &cameraMode, Metadata *metadata) { @@ -407,7 +436,8 @@ void Awb::asyncFunc() static void generateStats(std::vector<Awb::RGB> &zones, StatisticsPtr &stats, double minPixels, - double minG, Metadata &globalMetadata) + double minG, Metadata &globalMetadata, + double biasProportion, double biasCtR, double biasCtB) { std::scoped_lock<RPiController::Metadata> l(globalMetadata); @@ -420,6 +450,14 @@ static void generateStats(std::vector<Awb::RGB> &zones, continue; zone.R = region.val.rSum / region.counted; zone.B = region.val.bSum / region.counted; + /* + * Add some bias samples to allow the search to tend to a + * bias CT in failure cases. + */ + const unsigned int proportion = biasProportion * region.counted; + zone.R += proportion * biasCtR; + zone.B += proportion * biasCtB; + zone.G += proportion * 1.0; /* Factor in the ALSC applied colour shading correction if required. */ const AlscStatus *alscStatus = globalMetadata.getLocked<AlscStatus>("alsc.status"); if (stats->colourStatsPos == Statistics::ColourStatsPos::PreLsc && alscStatus) { @@ -439,8 +477,11 @@ void Awb::prepareStats() * LSC has already been applied to the stats in this pipeline, so stop * any LSC compensation. We also ignore config_.fast in this version. */ + const double biasCtR = config_.bayes ? config_.ctR.eval(config_.biasCT) : 0; + const double biasCtB = config_.bayes ? config_.ctB.eval(config_.biasCT) : 0; generateStats(zones_, statistics_, config_.minPixels, - config_.minG, getGlobalMetadata()); + config_.minG, getGlobalMetadata(), + config_.biasProportion, biasCtR, biasCtB); /* * apply sensitivities, so values appear to come from our "canonical" * sensor. @@ -505,7 +546,7 @@ static double interpolateQuadatric(ipa::Pwl::Point const &a, ipa::Pwl::Point con const double eps = 1e-3; ipa::Pwl::Point ca = c - a, ba = b - a; double denominator = 2 * (ba.y() * ca.x() - ca.y() * ba.x()); - if (abs(denominator) > eps) { + if (std::abs(denominator) > eps) { double numerator = ba.y() * ca.x() * ca.x() - ca.y() * ba.x() * ba.x(); double result = numerator / denominator + a.x(); return std::max(a.x(), std::min(c.x(), result)); @@ -716,7 +757,11 @@ void Awb::awbGrey() sumR += *ri, sumB += *bi; double gainR = sumR.G / (sumR.R + 1), gainB = sumB.G / (sumB.B + 1); - asyncResults_.temperatureK = 4500; /* don't know what it is */ + /* + * The grey world model can't estimate the colour temperature, use a + * default value. + */ + asyncResults_.temperatureK = kDefaultCT; asyncResults_.gainR = gainR; asyncResults_.gainG = 1.0; asyncResults_.gainB = gainB; diff --git a/src/ipa/rpi/controller/rpi/awb.h b/src/ipa/rpi/controller/rpi/awb.h index ab30f4fa..86640f8f 100644 --- a/src/ipa/rpi/controller/rpi/awb.h +++ b/src/ipa/rpi/controller/rpi/awb.h @@ -87,6 +87,10 @@ struct AwbConfig { double whitepointR; double whitepointB; bool bayes; /* use Bayesian algorithm */ + /* proportion of counted samples to add for the search bias */ + double biasProportion; + /* CT target for the search bias */ + double biasCT; }; class Awb : public AwbAlgorithm @@ -101,6 +105,7 @@ public: void initialValues(double &gainR, double &gainB) override; void setMode(std::string const &name) override; void setManualGains(double manualR, double manualB) override; + void setColourTemperature(double temperatureK) override; void enableAuto() override; void disableAuto() override; void switchMode(CameraMode const &cameraMode, Metadata *metadata) override; diff --git a/src/ipa/rpi/controller/rpi/black_level.cpp b/src/ipa/rpi/controller/rpi/black_level.cpp index ea991df9..4c968f14 100644 --- a/src/ipa/rpi/controller/rpi/black_level.cpp +++ b/src/ipa/rpi/controller/rpi/black_level.cpp @@ -5,7 +5,6 @@ * black level control algorithm */ -#include <math.h> #include <stdint.h> #include <libcamera/base/log.h> diff --git a/src/ipa/rpi/controller/rpi/ccm.cpp b/src/ipa/rpi/controller/rpi/ccm.cpp index aefa580c..8607f152 100644 --- a/src/ipa/rpi/controller/rpi/ccm.cpp +++ b/src/ipa/rpi/controller/rpi/ccm.cpp @@ -29,34 +29,7 @@ LOG_DEFINE_CATEGORY(RPiCcm) #define NAME "rpi.ccm" -Matrix::Matrix() -{ - memset(m, 0, sizeof(m)); -} -Matrix::Matrix(double m0, double m1, double m2, double m3, double m4, double m5, - double m6, double m7, double m8) -{ - m[0][0] = m0, m[0][1] = m1, m[0][2] = m2, m[1][0] = m3, m[1][1] = m4, - m[1][2] = m5, m[2][0] = m6, m[2][1] = m7, m[2][2] = m8; -} -int Matrix::read(const libcamera::YamlObject ¶ms) -{ - double *ptr = (double *)m; - - if (params.size() != 9) { - LOG(RPiCcm, Error) << "Wrong number of values in CCM"; - return -EINVAL; - } - - for (const auto ¶m : params.asList()) { - auto value = param.get<double>(); - if (!value) - return -EINVAL; - *ptr++ = *value; - } - - return 0; -} +using Matrix3x3 = Matrix<double, 3, 3>; Ccm::Ccm(Controller *controller) : CcmAlgorithm(controller), saturation_(1.0) {} @@ -68,8 +41,6 @@ char const *Ccm::name() const int Ccm::read(const libcamera::YamlObject ¶ms) { - int ret; - if (params.contains("saturation")) { config_.saturation = params["saturation"].get<ipa::Pwl>(ipa::Pwl{}); if (config_.saturation.empty()) @@ -83,9 +54,12 @@ int Ccm::read(const libcamera::YamlObject ¶ms) CtCcm ctCcm; ctCcm.ct = *value; - ret = ctCcm.ccm.read(p["ccm"]); - if (ret) - return ret; + + auto ccm = p["ccm"].get<Matrix3x3>(); + if (!ccm) + return -EINVAL; + + ctCcm.ccm = *ccm; if (!config_.ccms.empty() && ctCcm.ct <= config_.ccms.back().ct) { LOG(RPiCcm, Error) @@ -125,7 +99,7 @@ bool getLocked(Metadata *metadata, std::string const &tag, T &value) return true; } -Matrix calculateCcm(std::vector<CtCcm> const &ccms, double ct) +Matrix3x3 calculateCcm(std::vector<CtCcm> const &ccms, double ct) { if (ct <= ccms.front().ct) return ccms.front().ccm; @@ -141,13 +115,20 @@ Matrix calculateCcm(std::vector<CtCcm> const &ccms, double ct) } } -Matrix applySaturation(Matrix const &ccm, double saturation) +Matrix3x3 applySaturation(Matrix3x3 const &ccm, double saturation) { - Matrix RGB2Y(0.299, 0.587, 0.114, -0.169, -0.331, 0.500, 0.500, -0.419, - -0.081); - Matrix Y2RGB(1.000, 0.000, 1.402, 1.000, -0.345, -0.714, 1.000, 1.771, - 0.000); - Matrix S(1, 0, 0, 0, saturation, 0, 0, 0, saturation); + static const Matrix3x3 RGB2Y({ 0.299, 0.587, 0.114, + -0.169, -0.331, 0.500, + 0.500, -0.419, -0.081 }); + + static const Matrix3x3 Y2RGB({ 1.000, 0.000, 1.402, + 1.000, -0.345, -0.714, + 1.000, 1.771, 0.000 }); + + Matrix3x3 S({ 1, 0, 0, + 0, saturation, 0, + 0, 0, saturation }); + return Y2RGB * S * RGB2Y * ccm; } @@ -170,7 +151,7 @@ void Ccm::prepare(Metadata *imageMetadata) LOG(RPiCcm, Warning) << "no colour temperature found"; if (!luxOk) LOG(RPiCcm, Warning) << "no lux value found"; - Matrix ccm = calculateCcm(config_.ccms, awb.temperatureK); + Matrix3x3 ccm = calculateCcm(config_.ccms, awb.temperatureK); double saturation = saturation_; struct CcmStatus ccmStatus; ccmStatus.saturation = saturation; @@ -181,7 +162,7 @@ void Ccm::prepare(Metadata *imageMetadata) for (int j = 0; j < 3; j++) for (int i = 0; i < 3; i++) ccmStatus.matrix[j * 3 + i] = - std::max(-8.0, std::min(7.9999, ccm.m[j][i])); + std::max(-8.0, std::min(7.9999, ccm[j][i])); LOG(RPiCcm, Debug) << "colour temperature " << awb.temperatureK << "K"; LOG(RPiCcm, Debug) diff --git a/src/ipa/rpi/controller/rpi/ccm.h b/src/ipa/rpi/controller/rpi/ccm.h index 4e5b33fe..c05dbb17 100644 --- a/src/ipa/rpi/controller/rpi/ccm.h +++ b/src/ipa/rpi/controller/rpi/ccm.h @@ -8,6 +8,7 @@ #include <vector> +#include "libcamera/internal/matrix.h" #include <libipa/pwl.h> #include "../ccm_algorithm.h" @@ -16,41 +17,9 @@ namespace RPiController { /* Algorithm to calculate colour matrix. Should be placed after AWB. */ -struct Matrix { - Matrix(double m0, double m1, double m2, double m3, double m4, double m5, - double m6, double m7, double m8); - Matrix(); - double m[3][3]; - int read(const libcamera::YamlObject ¶ms); -}; -static inline Matrix operator*(double d, Matrix const &m) -{ - return Matrix(m.m[0][0] * d, m.m[0][1] * d, m.m[0][2] * d, - m.m[1][0] * d, m.m[1][1] * d, m.m[1][2] * d, - m.m[2][0] * d, m.m[2][1] * d, m.m[2][2] * d); -} -static inline Matrix operator*(Matrix const &m1, Matrix const &m2) -{ - Matrix m; - for (int i = 0; i < 3; i++) - for (int j = 0; j < 3; j++) - m.m[i][j] = m1.m[i][0] * m2.m[0][j] + - m1.m[i][1] * m2.m[1][j] + - m1.m[i][2] * m2.m[2][j]; - return m; -} -static inline Matrix operator+(Matrix const &m1, Matrix const &m2) -{ - Matrix m; - for (int i = 0; i < 3; i++) - for (int j = 0; j < 3; j++) - m.m[i][j] = m1.m[i][j] + m2.m[i][j]; - return m; -} - struct CtCcm { double ct; - Matrix ccm; + libcamera::Matrix<double, 3, 3> ccm; }; struct CcmConfig { diff --git a/src/ipa/rpi/controller/rpi/lux.cpp b/src/ipa/rpi/controller/rpi/lux.cpp index 7b31faab..27b89a8f 100644 --- a/src/ipa/rpi/controller/rpi/lux.cpp +++ b/src/ipa/rpi/controller/rpi/lux.cpp @@ -4,7 +4,6 @@ * * Lux control algorithm */ -#include <math.h> #include <libcamera/base/log.h> @@ -41,7 +40,7 @@ int Lux::read(const libcamera::YamlObject ¶ms) auto value = params["reference_shutter_speed"].get<double>(); if (!value) return -EINVAL; - referenceShutterSpeed_ = *value * 1.0us; + referenceExposureTime_ = *value * 1.0us; value = params["reference_gain"].get<double>(); if (!value) @@ -83,11 +82,11 @@ void Lux::process(StatisticsPtr &stats, Metadata *imageMetadata) double currentAperture = deviceStatus.aperture.value_or(currentAperture_); double currentY = stats->yHist.interQuantileMean(0, 1); double gainRatio = referenceGain_ / currentGain; - double shutterSpeedRatio = - referenceShutterSpeed_ / deviceStatus.shutterSpeed; + double exposureTimeRatio = + referenceExposureTime_ / deviceStatus.exposureTime; double apertureRatio = referenceAperture_ / currentAperture; double yRatio = currentY * (65536 / stats->yHist.bins()) / referenceY_; - double estimatedLux = shutterSpeedRatio * gainRatio * + double estimatedLux = exposureTimeRatio * gainRatio * apertureRatio * apertureRatio * yRatio * referenceLux_; LuxStatus status; diff --git a/src/ipa/rpi/controller/rpi/lux.h b/src/ipa/rpi/controller/rpi/lux.h index 89f441fc..da007fe9 100644 --- a/src/ipa/rpi/controller/rpi/lux.h +++ b/src/ipa/rpi/controller/rpi/lux.h @@ -32,7 +32,7 @@ private: * These values define the conditions of the reference image, against * which we compare the new image. */ - libcamera::utils::Duration referenceShutterSpeed_; + libcamera::utils::Duration referenceExposureTime_; double referenceGain_; double referenceAperture_; /* units of 1/f */ double referenceY_; /* out of 65536 */ diff --git a/src/ipa/rpi/controller/rpi/noise.cpp b/src/ipa/rpi/controller/rpi/noise.cpp index 3f1c62cf..145175fb 100644 --- a/src/ipa/rpi/controller/rpi/noise.cpp +++ b/src/ipa/rpi/controller/rpi/noise.cpp @@ -5,7 +5,7 @@ * Noise control algorithm */ -#include <math.h> +#include <cmath> #include <libcamera/base/log.h> @@ -69,7 +69,7 @@ void Noise::prepare(Metadata *imageMetadata) * make some adjustments based on the camera mode (such as * binning), if we knew how to discover it... */ - double factor = sqrt(deviceStatus.analogueGain) / modeFactor_; + double factor = std::sqrt(deviceStatus.analogueGain) / modeFactor_; struct NoiseStatus status; status.noiseConstant = referenceConstant_ * factor; status.noiseSlope = referenceSlope_ * factor; diff --git a/src/ipa/rpi/controller/rpi/sharpen.cpp b/src/ipa/rpi/controller/rpi/sharpen.cpp index 39537f4a..1d143ff5 100644 --- a/src/ipa/rpi/controller/rpi/sharpen.cpp +++ b/src/ipa/rpi/controller/rpi/sharpen.cpp @@ -5,7 +5,7 @@ * sharpening control algorithm */ -#include <math.h> +#include <cmath> #include <libcamera/base/log.h> @@ -68,7 +68,7 @@ void Sharpen::prepare(Metadata *imageMetadata) * we adjust the limit and threshold less aggressively. Using a sqrt * function is an arbitrary but gentle way of accomplishing this. */ - double userStrengthSqrt = sqrt(userStrength_); + double userStrengthSqrt = std::sqrt(userStrength_); struct SharpenStatus status; /* * Binned modes seem to need the sharpening toned down with this |