diff options
Diffstat (limited to 'src/ipa/rpi/controller')
78 files changed, 655 insertions, 848 deletions
diff --git a/src/ipa/rpi/controller/af_status.h b/src/ipa/rpi/controller/af_status.h index 92c08812..c1487cc4 100644 --- a/src/ipa/rpi/controller/af_status.h +++ b/src/ipa/rpi/controller/af_status.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2022, Raspberry Pi Ltd * - * af_status.h - AF control algorithm status + * AF control algorithm status */ #pragma once diff --git a/src/ipa/rpi/controller/agc_algorithm.h b/src/ipa/rpi/controller/agc_algorithm.h index 534e38e2..fdaa10e6 100644 --- a/src/ipa/rpi/controller/agc_algorithm.h +++ b/src/ipa/rpi/controller/agc_algorithm.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * agc_algorithm.h - AGC/AEC control algorithm interface + * AGC/AEC control algorithm interface */ #pragma once @@ -23,15 +23,19 @@ public: virtual std::vector<double> const &getWeights() const = 0; virtual void setEv(unsigned int channel, double ev) = 0; virtual void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) = 0; - virtual void setFixedShutter(unsigned int channel, - libcamera::utils::Duration fixedShutter) = 0; - virtual void setMaxShutter(libcamera::utils::Duration maxShutter) = 0; + virtual void setFixedExposureTime(unsigned int channel, + libcamera::utils::Duration fixedExposureTime) = 0; + virtual void setMaxExposureTime(libcamera::utils::Duration maxExposureTime) = 0; virtual void setFixedAnalogueGain(unsigned int channel, double fixedAnalogueGain) = 0; virtual void setMeteringMode(std::string const &meteringModeName) = 0; virtual void setExposureMode(std::string const &exposureModeName) = 0; virtual void setConstraintMode(std::string const &contraintModeName) = 0; - virtual void enableAuto() = 0; - virtual void disableAuto() = 0; + virtual void enableAutoExposure() = 0; + virtual void disableAutoExposure() = 0; + virtual bool autoExposureEnabled() const = 0; + virtual void enableAutoGain() = 0; + virtual void disableAutoGain() = 0; + virtual bool autoGainEnabled() const = 0; virtual void setActiveChannels(const std::vector<unsigned int> &activeChannels) = 0; }; diff --git a/src/ipa/rpi/controller/agc_status.h b/src/ipa/rpi/controller/agc_status.h index 68f89958..9308b156 100644 --- a/src/ipa/rpi/controller/agc_status.h +++ b/src/ipa/rpi/controller/agc_status.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * agc_status.h - AGC/AEC control algorithm status + * AGC/AEC control algorithm status */ #pragma once @@ -28,7 +28,7 @@ struct AgcStatus { libcamera::utils::Duration totalExposureValue; /* value for all exposure and gain for this image */ libcamera::utils::Duration targetExposureValue; /* (unfiltered) target total exposure AGC is aiming for */ - libcamera::utils::Duration shutterTime; + libcamera::utils::Duration exposureTime; double analogueGain; std::string exposureMode; std::string constraintMode; @@ -36,7 +36,7 @@ struct AgcStatus { double ev; libcamera::utils::Duration flickerPeriod; int floatingRegionEnable; - libcamera::utils::Duration fixedShutter; + libcamera::utils::Duration fixedExposureTime; double fixedAnalogueGain; unsigned int channel; HdrStatus hdr; diff --git a/src/ipa/rpi/controller/algorithm.cpp b/src/ipa/rpi/controller/algorithm.cpp index a957fde5..beed47a1 100644 --- a/src/ipa/rpi/controller/algorithm.cpp +++ b/src/ipa/rpi/controller/algorithm.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * algorithm.cpp - ISP control algorithms + * ISP control algorithms */ #include "algorithm.h" diff --git a/src/ipa/rpi/controller/algorithm.h b/src/ipa/rpi/controller/algorithm.h index 4aa814eb..1971bfdc 100644 --- a/src/ipa/rpi/controller/algorithm.h +++ b/src/ipa/rpi/controller/algorithm.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * algorithm.h - ISP control algorithm interface + * ISP control algorithm interface */ #pragma once diff --git a/src/ipa/rpi/controller/alsc_status.h b/src/ipa/rpi/controller/alsc_status.h index 49a9f4a0..329e8a37 100644 --- a/src/ipa/rpi/controller/alsc_status.h +++ b/src/ipa/rpi/controller/alsc_status.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * alsc_status.h - ALSC (auto lens shading correction) control algorithm status + * ALSC (auto lens shading correction) control algorithm status */ #pragma once diff --git a/src/ipa/rpi/controller/awb_algorithm.h b/src/ipa/rpi/controller/awb_algorithm.h index 6009bdac..d941ed4e 100644 --- a/src/ipa/rpi/controller/awb_algorithm.h +++ b/src/ipa/rpi/controller/awb_algorithm.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * awb_algorithm.h - AWB control algorithm interface + * AWB control algorithm interface */ #pragma once @@ -19,6 +19,7 @@ public: virtual void initialValues(double &gainR, double &gainB) = 0; virtual void setMode(std::string const &modeName) = 0; virtual void setManualGains(double manualR, double manualB) = 0; + virtual void setColourTemperature(double temperatureK) = 0; virtual void enableAuto() = 0; virtual void disableAuto() = 0; }; diff --git a/src/ipa/rpi/controller/awb_status.h b/src/ipa/rpi/controller/awb_status.h index dd5a79e3..125df1a0 100644 --- a/src/ipa/rpi/controller/awb_status.h +++ b/src/ipa/rpi/controller/awb_status.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * awb_status.h - AWB control algorithm status + * AWB control algorithm status */ #pragma once diff --git a/src/ipa/rpi/controller/black_level_algorithm.h b/src/ipa/rpi/controller/black_level_algorithm.h index c2cff2f5..ce044e59 100644 --- a/src/ipa/rpi/controller/black_level_algorithm.h +++ b/src/ipa/rpi/controller/black_level_algorithm.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2023, Raspberry Pi Ltd * - * black_level_algorithm.h - black level control algorithm interface + * black level control algorithm interface */ #pragma once diff --git a/src/ipa/rpi/controller/black_level_status.h b/src/ipa/rpi/controller/black_level_status.h index fd5e4ccb..57a0705a 100644 --- a/src/ipa/rpi/controller/black_level_status.h +++ b/src/ipa/rpi/controller/black_level_status.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * black_level_status.h - black level control algorithm status + * black level control algorithm status */ #pragma once diff --git a/src/ipa/rpi/controller/cac_status.h b/src/ipa/rpi/controller/cac_status.h index 475d4c5c..adffce41 100644 --- a/src/ipa/rpi/controller/cac_status.h +++ b/src/ipa/rpi/controller/cac_status.h @@ -6,8 +6,6 @@ */ #pragma once -#include "pwl.h" - struct CacStatus { std::vector<double> lutRx; std::vector<double> lutRy; diff --git a/src/ipa/rpi/controller/camera_mode.h b/src/ipa/rpi/controller/camera_mode.h index 63b11778..61162b32 100644 --- a/src/ipa/rpi/controller/camera_mode.h +++ b/src/ipa/rpi/controller/camera_mode.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019-2020, Raspberry Pi Ltd * - * camera_mode.h - description of a particular operating mode of a sensor + * description of a particular operating mode of a sensor */ #pragma once @@ -50,9 +50,9 @@ struct CameraMode { double sensitivity; /* pixel clock rate */ uint64_t pixelRate; - /* Mode specific shutter speed limits */ - libcamera::utils::Duration minShutter; - libcamera::utils::Duration maxShutter; + /* Mode specific exposure time limits */ + libcamera::utils::Duration minExposureTime; + libcamera::utils::Duration maxExposureTime; /* Mode specific analogue gain limits */ double minAnalogueGain; double maxAnalogueGain; diff --git a/src/ipa/rpi/controller/ccm_algorithm.h b/src/ipa/rpi/controller/ccm_algorithm.h index e2c4d771..6678ba75 100644 --- a/src/ipa/rpi/controller/ccm_algorithm.h +++ b/src/ipa/rpi/controller/ccm_algorithm.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * ccm_algorithm.h - CCM (colour correction matrix) control algorithm interface + * CCM (colour correction matrix) control algorithm interface */ #pragma once diff --git a/src/ipa/rpi/controller/ccm_status.h b/src/ipa/rpi/controller/ccm_status.h index 5e28ee7c..c81bcd42 100644 --- a/src/ipa/rpi/controller/ccm_status.h +++ b/src/ipa/rpi/controller/ccm_status.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * ccm_status.h - CCM (colour correction matrix) control algorithm status + * CCM (colour correction matrix) control algorithm status */ #pragma once diff --git a/src/ipa/rpi/controller/contrast_algorithm.h b/src/ipa/rpi/controller/contrast_algorithm.h index 895b36b0..2e983350 100644 --- a/src/ipa/rpi/controller/contrast_algorithm.h +++ b/src/ipa/rpi/controller/contrast_algorithm.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * contrast_algorithm.h - contrast (gamma) control algorithm interface + * contrast (gamma) control algorithm interface */ #pragma once diff --git a/src/ipa/rpi/controller/contrast_status.h b/src/ipa/rpi/controller/contrast_status.h index fb9fe4ba..1f175872 100644 --- a/src/ipa/rpi/controller/contrast_status.h +++ b/src/ipa/rpi/controller/contrast_status.h @@ -2,11 +2,11 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * contrast_status.h - contrast (gamma) control algorithm status + * contrast (gamma) control algorithm status */ #pragma once -#include "pwl.h" +#include "libipa/pwl.h" /* * The "contrast" algorithm creates a gamma curve, optionally doing a little bit @@ -14,7 +14,7 @@ */ struct ContrastStatus { - RPiController::Pwl gammaCurve; + libcamera::ipa::Pwl gammaCurve; double brightness; double contrast; }; diff --git a/src/ipa/rpi/controller/controller.cpp b/src/ipa/rpi/controller/controller.cpp index 5ca98b98..651fff63 100644 --- a/src/ipa/rpi/controller/controller.cpp +++ b/src/ipa/rpi/controller/controller.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * controller.cpp - ISP controller + * ISP controller */ #include <assert.h> @@ -39,6 +39,7 @@ static const std::map<std::string, Controller::HardwareConfig> HardwareConfigMap .pipelineWidth = 13, .statsInline = false, .minPixelProcessingTime = 0s, + .dataBufferStrided = true, } }, { @@ -71,6 +72,7 @@ static const std::map<std::string, Controller::HardwareConfig> HardwareConfigMap * frames wider than ~16,000 pixels. */ .minPixelProcessingTime = 1.0us / 380, + .dataBufferStrided = false, } }, }; diff --git a/src/ipa/rpi/controller/controller.h b/src/ipa/rpi/controller/controller.h index 170aea74..fdb46557 100644 --- a/src/ipa/rpi/controller/controller.h +++ b/src/ipa/rpi/controller/controller.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * controller.h - ISP controller interface + * ISP controller interface */ #pragma once @@ -49,6 +49,7 @@ public: unsigned int pipelineWidth; bool statsInline; libcamera::utils::Duration minPixelProcessingTime; + bool dataBufferStrided; }; Controller(); diff --git a/src/ipa/rpi/controller/denoise_algorithm.h b/src/ipa/rpi/controller/denoise_algorithm.h index 444cbc25..b9a2a33c 100644 --- a/src/ipa/rpi/controller/denoise_algorithm.h +++ b/src/ipa/rpi/controller/denoise_algorithm.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2021, Raspberry Pi Ltd * - * denoise.h - Denoise control algorithm interface + * Denoise control algorithm interface */ #pragma once diff --git a/src/ipa/rpi/controller/denoise_status.h b/src/ipa/rpi/controller/denoise_status.h index 4d2bd291..eead6086 100644 --- a/src/ipa/rpi/controller/denoise_status.h +++ b/src/ipa/rpi/controller/denoise_status.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019-2021, Raspberry Pi Ltd * - * denoise_status.h - Denoise control algorithm status + * Denoise control algorithm status */ #pragma once diff --git a/src/ipa/rpi/controller/device_status.cpp b/src/ipa/rpi/controller/device_status.cpp index c907efdd..1695764d 100644 --- a/src/ipa/rpi/controller/device_status.cpp +++ b/src/ipa/rpi/controller/device_status.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2021, Raspberry Pi Ltd * - * device_status.cpp - device (image sensor) status + * device (image sensor) status */ #include "device_status.h" @@ -10,7 +10,7 @@ using namespace libcamera; /* for the Duration operator<< overload */ std::ostream &operator<<(std::ostream &out, const DeviceStatus &d) { - out << "Exposure: " << d.shutterSpeed + out << "Exposure time: " << d.exposureTime << " Frame length: " << d.frameLength << " Line length: " << d.lineLength << " Gain: " << d.analogueGain; diff --git a/src/ipa/rpi/controller/device_status.h b/src/ipa/rpi/controller/device_status.h index c45db749..b1792035 100644 --- a/src/ipa/rpi/controller/device_status.h +++ b/src/ipa/rpi/controller/device_status.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019-2021, Raspberry Pi Ltd * - * device_status.h - device (image sensor) status + * device (image sensor) status */ #pragma once @@ -12,21 +12,21 @@ #include <libcamera/base/utils.h> /* - * Definition of "device metadata" which stores things like shutter time and + * Definition of "device metadata" which stores things like exposure time and * analogue gain that downstream control algorithms will want to know. */ struct DeviceStatus { DeviceStatus() - : shutterSpeed(std::chrono::seconds(0)), frameLength(0), + : exposureTime(std::chrono::seconds(0)), frameLength(0), lineLength(std::chrono::seconds(0)), analogueGain(0.0) { } friend std::ostream &operator<<(std::ostream &out, const DeviceStatus &d); - /* time shutter is open */ - libcamera::utils::Duration shutterSpeed; + /* time the image is exposed */ + libcamera::utils::Duration exposureTime; /* frame length given in number of lines */ uint32_t frameLength; /* line length for the current frame */ diff --git a/src/ipa/rpi/controller/dpc_status.h b/src/ipa/rpi/controller/dpc_status.h index 46d0cf34..9f30d5d9 100644 --- a/src/ipa/rpi/controller/dpc_status.h +++ b/src/ipa/rpi/controller/dpc_status.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * dpc_status.h - DPC (defective pixel correction) control algorithm status + * DPC (defective pixel correction) control algorithm status */ #pragma once diff --git a/src/ipa/rpi/controller/geq_status.h b/src/ipa/rpi/controller/geq_status.h index 2d749fc9..cb107a48 100644 --- a/src/ipa/rpi/controller/geq_status.h +++ b/src/ipa/rpi/controller/geq_status.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * geq_status.h - GEQ (green equalisation) control algorithm status + * GEQ (green equalisation) control algorithm status */ #pragma once diff --git a/src/ipa/rpi/controller/hdr_algorithm.h b/src/ipa/rpi/controller/hdr_algorithm.h index f622e099..b889d8fd 100644 --- a/src/ipa/rpi/controller/hdr_algorithm.h +++ b/src/ipa/rpi/controller/hdr_algorithm.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2023, Raspberry Pi Ltd * - * hdr_algorithm.h - HDR control algorithm interface + * HDR control algorithm interface */ #pragma once diff --git a/src/ipa/rpi/controller/hdr_status.h b/src/ipa/rpi/controller/hdr_status.h index 24b1a935..a4955778 100644 --- a/src/ipa/rpi/controller/hdr_status.h +++ b/src/ipa/rpi/controller/hdr_status.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2023 Raspberry Pi Ltd * - * hdr_status.h - HDR control algorithm status + * HDR control algorithm status */ #pragma once diff --git a/src/ipa/rpi/controller/histogram.cpp b/src/ipa/rpi/controller/histogram.cpp index 78116141..13089839 100644 --- a/src/ipa/rpi/controller/histogram.cpp +++ b/src/ipa/rpi/controller/histogram.cpp @@ -2,9 +2,9 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * histogram.cpp - histogram calculations + * histogram calculations */ -#include <math.h> +#include <cmath> #include <stdio.h> #include "histogram.h" @@ -49,9 +49,9 @@ double Histogram::interBinMean(double binLo, double binHi) const { assert(binHi >= binLo); double sumBinFreq = 0, cumulFreq = 0; - for (double binNext = floor(binLo) + 1.0; binNext <= ceil(binHi); + for (double binNext = std::floor(binLo) + 1.0; binNext <= std::ceil(binHi); binLo = binNext, binNext += 1.0) { - int bin = floor(binLo); + int bin = std::floor(binLo); double freq = (cumulative_[bin + 1] - cumulative_[bin]) * (std::min(binNext, binHi) - binLo); sumBinFreq += bin * freq; diff --git a/src/ipa/rpi/controller/histogram.h b/src/ipa/rpi/controller/histogram.h index e2c5509b..ab4e5e31 100644 --- a/src/ipa/rpi/controller/histogram.h +++ b/src/ipa/rpi/controller/histogram.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * histogram.h - histogram calculation interface + * histogram calculation interface */ #pragma once diff --git a/src/ipa/rpi/controller/lux_status.h b/src/ipa/rpi/controller/lux_status.h index 5eb9faac..d8729f43 100644 --- a/src/ipa/rpi/controller/lux_status.h +++ b/src/ipa/rpi/controller/lux_status.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * lux_status.h - Lux control algorithm status + * Lux control algorithm status */ #pragma once diff --git a/src/ipa/rpi/controller/meson.build b/src/ipa/rpi/controller/meson.build index 32a4d31c..74b74888 100644 --- a/src/ipa/rpi/controller/meson.build +++ b/src/ipa/rpi/controller/meson.build @@ -5,7 +5,6 @@ rpi_ipa_controller_sources = files([ 'controller.cpp', 'device_status.cpp', 'histogram.cpp', - 'pwl.cpp', 'rpi/af.cpp', 'rpi/agc.cpp', 'rpi/agc_channel.cpp', @@ -32,4 +31,5 @@ rpi_ipa_controller_deps = [ ] rpi_ipa_controller_lib = static_library('rpi_ipa_controller', rpi_ipa_controller_sources, + include_directories : libipa_includes, dependencies : rpi_ipa_controller_deps) diff --git a/src/ipa/rpi/controller/metadata.h b/src/ipa/rpi/controller/metadata.h index a232dcb1..77d3b074 100644 --- a/src/ipa/rpi/controller/metadata.h +++ b/src/ipa/rpi/controller/metadata.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019-2021, Raspberry Pi Ltd * - * metadata.h - general metadata class + * general metadata class */ #pragma once @@ -12,6 +12,7 @@ #include <map> #include <mutex> #include <string> +#include <utility> #include <libcamera/base/thread_annotations.h> @@ -36,10 +37,10 @@ public: } template<typename T> - void set(std::string const &tag, T const &value) + void set(std::string const &tag, T &&value) { std::scoped_lock lock(mutex_); - data_[tag] = value; + data_[tag] = std::forward<T>(value); } template<typename T> @@ -90,6 +91,12 @@ public: data_.insert(other.data_.begin(), other.data_.end()); } + void erase(std::string const &tag) + { + std::scoped_lock lock(mutex_); + eraseLocked(tag); + } + template<typename T> T *getLocked(std::string const &tag) { @@ -104,10 +111,18 @@ public: } template<typename T> - void setLocked(std::string const &tag, T const &value) + void setLocked(std::string const &tag, T &&value) { /* Use this only if you're holding the lock yourself. */ - data_[tag] = value; + data_[tag] = std::forward<T>(value); + } + + void eraseLocked(std::string const &tag) + { + auto it = data_.find(tag); + if (it == data_.end()) + return; + data_.erase(it); } /* diff --git a/src/ipa/rpi/controller/noise_status.h b/src/ipa/rpi/controller/noise_status.h index da194f71..1919da32 100644 --- a/src/ipa/rpi/controller/noise_status.h +++ b/src/ipa/rpi/controller/noise_status.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * noise_status.h - Noise control algorithm status + * Noise control algorithm status */ #pragma once diff --git a/src/ipa/rpi/controller/pdaf_data.h b/src/ipa/rpi/controller/pdaf_data.h index 470510f2..779b987d 100644 --- a/src/ipa/rpi/controller/pdaf_data.h +++ b/src/ipa/rpi/controller/pdaf_data.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2022, Raspberry Pi Ltd * - * pdaf_data.h - PDAF Metadata + * PDAF Metadata */ #pragma once diff --git a/src/ipa/rpi/controller/pwl.cpp b/src/ipa/rpi/controller/pwl.cpp deleted file mode 100644 index 70c2e24b..00000000 --- a/src/ipa/rpi/controller/pwl.cpp +++ /dev/null @@ -1,269 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * pwl.cpp - piecewise linear functions - */ - -#include <cassert> -#include <cmath> -#include <stdexcept> - -#include "pwl.h" - -using namespace RPiController; - -int Pwl::read(const libcamera::YamlObject ¶ms) -{ - if (!params.size() || params.size() % 2) - return -EINVAL; - - const auto &list = params.asList(); - - for (auto it = list.begin(); it != list.end(); it++) { - auto x = it->get<double>(); - if (!x) - return -EINVAL; - if (it != list.begin() && *x <= points_.back().x) - return -EINVAL; - - auto y = (++it)->get<double>(); - if (!y) - return -EINVAL; - - points_.push_back(Point(*x, *y)); - } - - return 0; -} - -void Pwl::append(double x, double y, const double eps) -{ - if (points_.empty() || points_.back().x + eps < x) - points_.push_back(Point(x, y)); -} - -void Pwl::prepend(double x, double y, const double eps) -{ - if (points_.empty() || points_.front().x - eps > x) - points_.insert(points_.begin(), Point(x, y)); -} - -Pwl::Interval Pwl::domain() const -{ - return Interval(points_[0].x, points_[points_.size() - 1].x); -} - -Pwl::Interval Pwl::range() const -{ - double lo = points_[0].y, hi = lo; - for (auto &p : points_) - lo = std::min(lo, p.y), hi = std::max(hi, p.y); - return Interval(lo, hi); -} - -bool Pwl::empty() const -{ - return points_.empty(); -} - -double Pwl::eval(double x, int *spanPtr, bool updateSpan) const -{ - int span = findSpan(x, spanPtr && *spanPtr != -1 ? *spanPtr : points_.size() / 2 - 1); - if (spanPtr && updateSpan) - *spanPtr = span; - return points_[span].y + - (x - points_[span].x) * (points_[span + 1].y - points_[span].y) / - (points_[span + 1].x - points_[span].x); -} - -int Pwl::findSpan(double x, int span) const -{ - /* - * Pwls are generally small, so linear search may well be faster than - * binary, though could review this if large PWls start turning up. - */ - int lastSpan = points_.size() - 2; - /* - * some algorithms may call us with span pointing directly at the last - * control point - */ - span = std::max(0, std::min(lastSpan, span)); - while (span < lastSpan && x >= points_[span + 1].x) - span++; - while (span && x < points_[span].x) - span--; - return span; -} - -Pwl::PerpType Pwl::invert(Point const &xy, Point &perp, int &span, - const double eps) const -{ - assert(span >= -1); - bool prevOffEnd = false; - for (span = span + 1; span < (int)points_.size() - 1; span++) { - Point spanVec = points_[span + 1] - points_[span]; - double t = ((xy - points_[span]) % spanVec) / spanVec.len2(); - if (t < -eps) /* off the start of this span */ - { - if (span == 0) { - perp = points_[span]; - return PerpType::Start; - } else if (prevOffEnd) { - perp = points_[span]; - return PerpType::Vertex; - } - } else if (t > 1 + eps) /* off the end of this span */ - { - if (span == (int)points_.size() - 2) { - perp = points_[span + 1]; - return PerpType::End; - } - prevOffEnd = true; - } else /* a true perpendicular */ - { - perp = points_[span] + spanVec * t; - return PerpType::Perpendicular; - } - } - return PerpType::None; -} - -Pwl Pwl::inverse(bool *trueInverse, const double eps) const -{ - bool appended = false, prepended = false, neither = false; - Pwl inverse; - - for (Point const &p : points_) { - if (inverse.empty()) - inverse.append(p.y, p.x, eps); - else if (std::abs(inverse.points_.back().x - p.y) <= eps || - std::abs(inverse.points_.front().x - p.y) <= eps) - /* do nothing */; - else if (p.y > inverse.points_.back().x) { - inverse.append(p.y, p.x, eps); - appended = true; - } else if (p.y < inverse.points_.front().x) { - inverse.prepend(p.y, p.x, eps); - prepended = true; - } else - neither = true; - } - - /* - * This is not a proper inverse if we found ourselves putting points - * onto both ends of the inverse, or if there were points that couldn't - * go on either. - */ - if (trueInverse) - *trueInverse = !(neither || (appended && prepended)); - - return inverse; -} - -Pwl Pwl::compose(Pwl const &other, const double eps) const -{ - double thisX = points_[0].x, thisY = points_[0].y; - int thisSpan = 0, otherSpan = other.findSpan(thisY, 0); - Pwl result({ { thisX, other.eval(thisY, &otherSpan, false) } }); - while (thisSpan != (int)points_.size() - 1) { - double dx = points_[thisSpan + 1].x - points_[thisSpan].x, - dy = points_[thisSpan + 1].y - points_[thisSpan].y; - if (std::abs(dy) > eps && - otherSpan + 1 < (int)other.points_.size() && - points_[thisSpan + 1].y >= - other.points_[otherSpan + 1].x + eps) { - /* - * next control point in result will be where this - * function's y reaches the next span in other - */ - thisX = points_[thisSpan].x + - (other.points_[otherSpan + 1].x - - points_[thisSpan].y) * - dx / dy; - thisY = other.points_[++otherSpan].x; - } else if (std::abs(dy) > eps && otherSpan > 0 && - points_[thisSpan + 1].y <= - other.points_[otherSpan - 1].x - eps) { - /* - * next control point in result will be where this - * function's y reaches the previous span in other - */ - thisX = points_[thisSpan].x + - (other.points_[otherSpan + 1].x - - points_[thisSpan].y) * - dx / dy; - thisY = other.points_[--otherSpan].x; - } else { - /* we stay in the same span in other */ - thisSpan++; - thisX = points_[thisSpan].x, - thisY = points_[thisSpan].y; - } - result.append(thisX, other.eval(thisY, &otherSpan, false), - eps); - } - return result; -} - -void Pwl::map(std::function<void(double x, double y)> f) const -{ - for (auto &pt : points_) - f(pt.x, pt.y); -} - -void Pwl::map2(Pwl const &pwl0, Pwl const &pwl1, - std::function<void(double x, double y0, double y1)> f) -{ - int span0 = 0, span1 = 0; - double x = std::min(pwl0.points_[0].x, pwl1.points_[0].x); - f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false)); - while (span0 < (int)pwl0.points_.size() - 1 || - span1 < (int)pwl1.points_.size() - 1) { - if (span0 == (int)pwl0.points_.size() - 1) - x = pwl1.points_[++span1].x; - else if (span1 == (int)pwl1.points_.size() - 1) - x = pwl0.points_[++span0].x; - else if (pwl0.points_[span0 + 1].x > pwl1.points_[span1 + 1].x) - x = pwl1.points_[++span1].x; - else - x = pwl0.points_[++span0].x; - f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false)); - } -} - -Pwl Pwl::combine(Pwl const &pwl0, Pwl const &pwl1, - std::function<double(double x, double y0, double y1)> f, - const double eps) -{ - Pwl result; - map2(pwl0, pwl1, [&](double x, double y0, double y1) { - result.append(x, f(x, y0, y1), eps); - }); - return result; -} - -void Pwl::matchDomain(Interval const &domain, bool clip, const double eps) -{ - int span = 0; - prepend(domain.start, eval(clip ? points_[0].x : domain.start, &span), - eps); - span = points_.size() - 2; - append(domain.end, eval(clip ? points_.back().x : domain.end, &span), - eps); -} - -Pwl &Pwl::operator*=(double d) -{ - for (auto &pt : points_) - pt.y *= d; - return *this; -} - -void Pwl::debug(FILE *fp) const -{ - fprintf(fp, "Pwl {\n"); - for (auto &p : points_) - fprintf(fp, "\t(%g, %g)\n", p.x, p.y); - fprintf(fp, "}\n"); -} diff --git a/src/ipa/rpi/controller/pwl.h b/src/ipa/rpi/controller/pwl.h deleted file mode 100644 index aacf6039..00000000 --- a/src/ipa/rpi/controller/pwl.h +++ /dev/null @@ -1,127 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * pwl.h - piecewise linear functions interface - */ -#pragma once - -#include <functional> -#include <math.h> -#include <vector> - -#include "libcamera/internal/yaml_parser.h" - -namespace RPiController { - -class Pwl -{ -public: - struct Interval { - Interval(double _start, double _end) - : start(_start), end(_end) - { - } - double start, end; - bool contains(double value) - { - return value >= start && value <= end; - } - double clip(double value) - { - return value < start ? start - : (value > end ? end : value); - } - double len() const { return end - start; } - }; - struct Point { - Point() : x(0), y(0) {} - Point(double _x, double _y) - : x(_x), y(_y) {} - double x, y; - Point operator-(Point const &p) const - { - return Point(x - p.x, y - p.y); - } - Point operator+(Point const &p) const - { - return Point(x + p.x, y + p.y); - } - double operator%(Point const &p) const - { - return x * p.x + y * p.y; - } - Point operator*(double f) const { return Point(x * f, y * f); } - Point operator/(double f) const { return Point(x / f, y / f); } - double len2() const { return x * x + y * y; } - double len() const { return sqrt(len2()); } - }; - Pwl() {} - Pwl(std::vector<Point> const &points) : points_(points) {} - int read(const libcamera::YamlObject ¶ms); - void append(double x, double y, const double eps = 1e-6); - void prepend(double x, double y, const double eps = 1e-6); - Interval domain() const; - Interval range() const; - bool empty() const; - /* - * Evaluate Pwl, optionally supplying an initial guess for the - * "span". The "span" may be optionally be updated. If you want to know - * the "span" value but don't have an initial guess you can set it to - * -1. - */ - double eval(double x, int *spanPtr = nullptr, - bool updateSpan = true) const; - /* - * Find perpendicular closest to xy, starting from span+1 so you can - * call it repeatedly to check for multiple closest points (set span to - * -1 on the first call). Also returns "pseudo" perpendiculars; see - * PerpType enum. - */ - enum class PerpType { - None, /* no perpendicular found */ - Start, /* start of Pwl is closest point */ - End, /* end of Pwl is closest point */ - Vertex, /* vertex of Pwl is closest point */ - Perpendicular /* true perpendicular found */ - }; - PerpType invert(Point const &xy, Point &perp, int &span, - const double eps = 1e-6) const; - /* - * Compute the inverse function. Indicate if it is a proper (true) - * inverse, or only a best effort (e.g. input was non-monotonic). - */ - Pwl inverse(bool *trueInverse = nullptr, const double eps = 1e-6) const; - /* Compose two Pwls together, doing "this" first and "other" after. */ - Pwl compose(Pwl const &other, const double eps = 1e-6) const; - /* Apply function to (x,y) values at every control point. */ - void map(std::function<void(double x, double y)> f) const; - /* - * Apply function to (x, y0, y1) values wherever either Pwl has a - * control point. - */ - static void map2(Pwl const &pwl0, Pwl const &pwl1, - std::function<void(double x, double y0, double y1)> f); - /* - * Combine two Pwls, meaning we create a new Pwl where the y values are - * given by running f wherever either has a knot. - */ - static Pwl - combine(Pwl const &pwl0, Pwl const &pwl1, - std::function<double(double x, double y0, double y1)> f, - const double eps = 1e-6); - /* - * Make "this" match (at least) the given domain. Any extension my be - * clipped or linear. - */ - void matchDomain(Interval const &domain, bool clip = true, - const double eps = 1e-6); - Pwl &operator*=(double d); - void debug(FILE *fp = stdout) const; - -private: - int findSpan(double x, int span) const; - std::vector<Point> points_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/rpi/controller/region_stats.h b/src/ipa/rpi/controller/region_stats.h index a8860dc8..c60f7d9a 100644 --- a/src/ipa/rpi/controller/region_stats.h +++ b/src/ipa/rpi/controller/region_stats.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2022, Raspberry Pi Ltd * - * region_stats.h - Raspberry Pi region based statistics container + * Raspberry Pi region based statistics container */ #pragma once diff --git a/src/ipa/rpi/controller/rpi/af.cpp b/src/ipa/rpi/controller/rpi/af.cpp index ed0c8a94..2157eb94 100644 --- a/src/ipa/rpi/controller/rpi/af.cpp +++ b/src/ipa/rpi/controller/rpi/af.cpp @@ -2,13 +2,13 @@ /* * Copyright (C) 2022-2023, Raspberry Pi Ltd * - * af.cpp - Autofocus control algorithm + * Autofocus control algorithm */ #include "af.h" +#include <cmath> #include <iomanip> -#include <math.h> #include <stdlib.h> #include <libcamera/base/log.h> @@ -139,7 +139,7 @@ int Af::CfgParams::read(const libcamera::YamlObject ¶ms) readNumber<uint32_t>(skipFrames, params, "skip_frames"); if (params.contains("map")) - map.read(params["map"]); + map = params["map"].get<ipa::Pwl>(ipa::Pwl{}); else LOG(RPiAf, Warning) << "No map defined"; @@ -721,7 +721,7 @@ bool Af::setLensPosition(double dioptres, int *hwpos) if (mode_ == AfModeManual) { LOG(RPiAf, Debug) << "setLensPosition: " << dioptres; - ftarget_ = cfg_.map.domain().clip(dioptres); + ftarget_ = cfg_.map.domain().clamp(dioptres); changed = !(initted_ && fsmooth_ == ftarget_); updateLensPosition(); } diff --git a/src/ipa/rpi/controller/rpi/af.h b/src/ipa/rpi/controller/rpi/af.h index 6d2bae67..317a51f3 100644 --- a/src/ipa/rpi/controller/rpi/af.h +++ b/src/ipa/rpi/controller/rpi/af.h @@ -2,14 +2,15 @@ /* * Copyright (C) 2022-2023, Raspberry Pi Ltd * - * af.h - Autofocus control algorithm + * Autofocus control algorithm */ #pragma once #include "../af_algorithm.h" #include "../af_status.h" #include "../pdaf_data.h" -#include "../pwl.h" + +#include "libipa/pwl.h" /* * This algorithm implements a hybrid of CDAF and PDAF, favouring PDAF. @@ -100,7 +101,7 @@ private: uint32_t confThresh; /* PDAF confidence cell min (sensor-specific) */ uint32_t confClip; /* PDAF confidence cell max (sensor-specific) */ uint32_t skipFrames; /* frames to skip at start or modeswitch */ - Pwl map; /* converts dioptres -> lens driver position */ + libcamera::ipa::Pwl map; /* converts dioptres -> lens driver position */ CfgParams(); int read(const libcamera::YamlObject ¶ms); diff --git a/src/ipa/rpi/controller/rpi/agc.cpp b/src/ipa/rpi/controller/rpi/agc.cpp index 6549dedd..02bfdb4a 100644 --- a/src/ipa/rpi/controller/rpi/agc.cpp +++ b/src/ipa/rpi/controller/rpi/agc.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * agc.cpp - AGC/AEC control algorithm + * AGC/AEC control algorithm */ #include "agc.h" @@ -74,22 +74,62 @@ int Agc::checkChannel(unsigned int channelIndex) const return 0; } -void Agc::disableAuto() +void Agc::disableAutoExposure() { - LOG(RPiAgc, Debug) << "disableAuto"; + LOG(RPiAgc, Debug) << "disableAutoExposure"; /* All channels are enabled/disabled together. */ for (auto &data : channelData_) - data.channel.disableAuto(); + data.channel.disableAutoExposure(); } -void Agc::enableAuto() +void Agc::enableAutoExposure() { - LOG(RPiAgc, Debug) << "enableAuto"; + LOG(RPiAgc, Debug) << "enableAutoExposure"; /* All channels are enabled/disabled together. */ for (auto &data : channelData_) - data.channel.enableAuto(); + data.channel.enableAutoExposure(); +} + +bool Agc::autoExposureEnabled() const +{ + LOG(RPiAgc, Debug) << "autoExposureEnabled"; + + /* + * We always have at least one channel, and since all channels are + * enabled and disabled together we can simply check the first one. + */ + return channelData_[0].channel.autoExposureEnabled(); +} + +void Agc::disableAutoGain() +{ + LOG(RPiAgc, Debug) << "disableAutoGain"; + + /* All channels are enabled/disabled together. */ + for (auto &data : channelData_) + data.channel.disableAutoGain(); +} + +void Agc::enableAutoGain() +{ + LOG(RPiAgc, Debug) << "enableAutoGain"; + + /* All channels are enabled/disabled together. */ + for (auto &data : channelData_) + data.channel.enableAutoGain(); +} + +bool Agc::autoGainEnabled() const +{ + LOG(RPiAgc, Debug) << "autoGainEnabled"; + + /* + * We always have at least one channel, and since all channels are + * enabled and disabled together we can simply check the first one. + */ + return channelData_[0].channel.autoGainEnabled(); } unsigned int Agc::getConvergenceFrames() const @@ -127,21 +167,21 @@ void Agc::setFlickerPeriod(Duration flickerPeriod) data.channel.setFlickerPeriod(flickerPeriod); } -void Agc::setMaxShutter(Duration maxShutter) +void Agc::setMaxExposureTime(Duration maxExposureTime) { /* Frame durations will be the same across all channels too. */ for (auto &data : channelData_) - data.channel.setMaxShutter(maxShutter); + data.channel.setMaxExposureTime(maxExposureTime); } -void Agc::setFixedShutter(unsigned int channelIndex, Duration fixedShutter) +void Agc::setFixedExposureTime(unsigned int channelIndex, Duration fixedExposureTime) { if (checkChannel(channelIndex)) return; - LOG(RPiAgc, Debug) << "setFixedShutter " << fixedShutter + LOG(RPiAgc, Debug) << "setFixedExposureTime " << fixedExposureTime << " for channel " << channelIndex; - channelData_[channelIndex].channel.setFixedShutter(fixedShutter); + channelData_[channelIndex].channel.setFixedExposureTime(fixedExposureTime); } void Agc::setFixedAnalogueGain(unsigned int channelIndex, double fixedAnalogueGain) diff --git a/src/ipa/rpi/controller/rpi/agc.h b/src/ipa/rpi/controller/rpi/agc.h index 7d26bdf6..c3a940bf 100644 --- a/src/ipa/rpi/controller/rpi/agc.h +++ b/src/ipa/rpi/controller/rpi/agc.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * agc.h - AGC/AEC control algorithm + * AGC/AEC control algorithm */ #pragma once @@ -32,16 +32,20 @@ public: std::vector<double> const &getWeights() const override; void setEv(unsigned int channel, double ev) override; void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) override; - void setMaxShutter(libcamera::utils::Duration maxShutter) override; - void setFixedShutter(unsigned int channelIndex, - libcamera::utils::Duration fixedShutter) override; + void setMaxExposureTime(libcamera::utils::Duration maxExposureTime) override; + void setFixedExposureTime(unsigned int channelIndex, + libcamera::utils::Duration fixedExposureTime) override; void setFixedAnalogueGain(unsigned int channelIndex, double fixedAnalogueGain) override; void setMeteringMode(std::string const &meteringModeName) override; void setExposureMode(std::string const &exposureModeName) override; void setConstraintMode(std::string const &contraintModeName) override; - void enableAuto() override; - void disableAuto() override; + void enableAutoExposure() override; + void disableAutoExposure() override; + bool autoExposureEnabled() const override; + void enableAutoGain() override; + void disableAutoGain() override; + bool autoGainEnabled() const override; void switchMode(CameraMode const &cameraMode, Metadata *metadata) override; void prepare(Metadata *imageMetadata) override; void process(StatisticsPtr &stats, Metadata *imageMetadata) override; diff --git a/src/ipa/rpi/controller/rpi/agc_channel.cpp b/src/ipa/rpi/controller/rpi/agc_channel.cpp index 8116c6c1..b2999364 100644 --- a/src/ipa/rpi/controller/rpi/agc_channel.cpp +++ b/src/ipa/rpi/controller/rpi/agc_channel.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2023, Raspberry Pi Ltd * - * agc_channel.cpp - AGC/AEC control algorithm + * AGC/AEC control algorithm */ #include "agc_channel.h" @@ -12,6 +12,10 @@ #include <libcamera/base/log.h> +#include "libcamera/internal/vector.h" + +#include "libipa/colours.h" + #include "../awb_status.h" #include "../device_status.h" #include "../histogram.h" @@ -65,7 +69,7 @@ int AgcExposureMode::read(const libcamera::YamlObject ¶ms) auto value = params["shutter"].getList<double>(); if (!value) return -EINVAL; - std::transform(value->begin(), value->end(), std::back_inserter(shutter), + std::transform(value->begin(), value->end(), std::back_inserter(exposureTime), [](double v) { return v * 1us; }); value = params["gain"].getList<double>(); @@ -73,13 +77,13 @@ int AgcExposureMode::read(const libcamera::YamlObject ¶ms) return -EINVAL; gain = std::move(*value); - if (shutter.size() < 2 || gain.size() < 2) { + if (exposureTime.size() < 2 || gain.size() < 2) { LOG(RPiAgc, Error) << "AgcExposureMode: must have at least two entries in exposure profile"; return -EINVAL; } - if (shutter.size() != gain.size()) { + if (exposureTime.size() != gain.size()) { LOG(RPiAgc, Error) << "AgcExposureMode: expect same number of exposure and gain entries in exposure profile"; return -EINVAL; @@ -130,7 +134,8 @@ int AgcConstraint::read(const libcamera::YamlObject ¶ms) return -EINVAL; qHi = *value; - return yTarget.read(params["y_target"]); + yTarget = params["y_target"].get<ipa::Pwl>(ipa::Pwl{}); + return yTarget.empty() ? -EINVAL : 0; } static std::tuple<int, AgcConstraintMode> @@ -237,9 +242,9 @@ int AgcConfig::read(const libcamera::YamlObject ¶ms) return ret; } - ret = yTarget.read(params["y_target"]); - if (ret) - return ret; + yTarget = params["y_target"].get<ipa::Pwl>(ipa::Pwl{}); + if (yTarget.empty()) + return -EINVAL; speed = params["speed"].get<double>(0.2); startupFrames = params["startup_frames"].get<uint16_t>(10); @@ -259,7 +264,7 @@ int AgcConfig::read(const libcamera::YamlObject ¶ms) } AgcChannel::ExposureValues::ExposureValues() - : shutter(0s), analogueGain(0), + : exposureTime(0s), analogueGain(0), totalExposure(0s), totalExposureNoDG(0s) { } @@ -268,7 +273,7 @@ AgcChannel::AgcChannel() : meteringMode_(nullptr), exposureMode_(nullptr), constraintMode_(nullptr), frameCount_(0), lockCount_(0), lastTargetExposure_(0s), ev_(1.0), flickerPeriod_(0s), - maxShutter_(0s), fixedShutter_(0s), fixedAnalogueGain_(0.0) + maxExposureTime_(0s), fixedExposureTime_(0s), fixedAnalogueGain_(0.0) { /* Set AWB default values in case early frames have no updates in metadata. */ awb_.gainR = 1.0; @@ -309,31 +314,49 @@ int AgcChannel::read(const libcamera::YamlObject ¶ms, exposureMode_ = &config_.exposureModes[exposureModeName_]; constraintModeName_ = config_.defaultConstraintMode; constraintMode_ = &config_.constraintModes[constraintModeName_]; - /* Set up the "last shutter/gain" values, in case AGC starts "disabled". */ - status_.shutterTime = config_.defaultExposureTime; + /* Set up the "last exposure time/gain" values, in case AGC starts "disabled". */ + status_.exposureTime = config_.defaultExposureTime; status_.analogueGain = config_.defaultAnalogueGain; return 0; } -void AgcChannel::disableAuto() +void AgcChannel::disableAutoExposure() +{ + fixedExposureTime_ = status_.exposureTime; +} + +void AgcChannel::enableAutoExposure() +{ + fixedExposureTime_ = 0s; +} + +bool AgcChannel::autoExposureEnabled() const +{ + return fixedExposureTime_ == 0s; +} + +void AgcChannel::disableAutoGain() { - fixedShutter_ = status_.shutterTime; fixedAnalogueGain_ = status_.analogueGain; } -void AgcChannel::enableAuto() +void AgcChannel::enableAutoGain() { - fixedShutter_ = 0s; fixedAnalogueGain_ = 0; } +bool AgcChannel::autoGainEnabled() const +{ + return fixedAnalogueGain_ == 0; +} + unsigned int AgcChannel::getConvergenceFrames() const { /* - * If shutter and gain have been explicitly set, there is no + * If exposure time and gain have been explicitly set, there is no * convergence to happen, so no need to drop any frames - return zero. */ - if (fixedShutter_ && fixedAnalogueGain_) + if (fixedExposureTime_ && fixedAnalogueGain_) return 0; else return config_.convergenceFrames; @@ -361,16 +384,16 @@ void AgcChannel::setFlickerPeriod(Duration flickerPeriod) flickerPeriod_ = flickerPeriod; } -void AgcChannel::setMaxShutter(Duration maxShutter) +void AgcChannel::setMaxExposureTime(Duration maxExposureTime) { - maxShutter_ = maxShutter; + maxExposureTime_ = maxExposureTime; } -void AgcChannel::setFixedShutter(Duration fixedShutter) +void AgcChannel::setFixedExposureTime(Duration fixedExposureTime) { - fixedShutter_ = fixedShutter; + fixedExposureTime_ = fixedExposureTime; /* Set this in case someone calls disableAuto() straight after. */ - status_.shutterTime = limitShutter(fixedShutter_); + status_.exposureTime = limitExposureTime(fixedExposureTime_); } void AgcChannel::setFixedAnalogueGain(double fixedAnalogueGain) @@ -410,22 +433,22 @@ void AgcChannel::switchMode(CameraMode const &cameraMode, double lastSensitivity = mode_.sensitivity; mode_ = cameraMode; - Duration fixedShutter = limitShutter(fixedShutter_); - if (fixedShutter && fixedAnalogueGain_) { + Duration fixedExposureTime = limitExposureTime(fixedExposureTime_); + if (fixedExposureTime && fixedAnalogueGain_) { /* We're going to reset the algorithm here with these fixed values. */ fetchAwbStatus(metadata); double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 }); ASSERT(minColourGain != 0.0); /* This is the equivalent of computeTargetExposure and applyDigitalGain. */ - target_.totalExposureNoDG = fixedShutter_ * fixedAnalogueGain_; + target_.totalExposureNoDG = fixedExposureTime_ * fixedAnalogueGain_; target_.totalExposure = target_.totalExposureNoDG / minColourGain; /* Equivalent of filterExposure. This resets any "history". */ filtered_ = target_; /* Equivalent of divideUpExposure. */ - filtered_.shutter = fixedShutter; + filtered_.exposureTime = fixedExposureTime; filtered_.analogueGain = fixedAnalogueGain_; } else if (status_.totalExposureValue) { /* @@ -447,14 +470,15 @@ void AgcChannel::switchMode(CameraMode const &cameraMode, divideUpExposure(); } else { /* - * We come through here on startup, when at least one of the shutter - * or gain has not been fixed. We must still write those values out so - * that they will be applied immediately. We supply some arbitrary defaults - * for any that weren't set. + * We come through here on startup, when at least one of the + * exposure time or gain has not been fixed. We must still + * write those values out so that they will be applied + * immediately. We supply some arbitrary defaults for any that + * weren't set. */ /* Equivalent of divideUpExposure. */ - filtered_.shutter = fixedShutter ? fixedShutter : config_.defaultExposureTime; + filtered_.exposureTime = fixedExposureTime ? fixedExposureTime : config_.defaultExposureTime; filtered_.analogueGain = fixedAnalogueGain_ ? fixedAnalogueGain_ : config_.defaultAnalogueGain; } @@ -480,7 +504,7 @@ void AgcChannel::prepare(Metadata *imageMetadata) /* Process has run, so we have meaningful values. */ DeviceStatus deviceStatus; if (imageMetadata->get("device.status", deviceStatus) == 0) { - Duration actualExposure = deviceStatus.shutterSpeed * + Duration actualExposure = deviceStatus.exposureTime * deviceStatus.analogueGain; if (actualExposure) { double digitalGain = totalExposureValue / actualExposure; @@ -534,7 +558,7 @@ void AgcChannel::process(StatisticsPtr &stats, DeviceStatus const &deviceStatus, */ bool desaturate = applyDigitalGain(gain, targetY, channelBound); /* - * The last thing is to divide up the exposure value into a shutter time + * The last thing is to divide up the exposure value into a exposure time * and analogue gain, according to the current exposure mode. */ divideUpExposure(); @@ -550,7 +574,7 @@ bool AgcChannel::updateLockStatus(DeviceStatus const &deviceStatus) const double resetMargin = 1.5; /* Add 200us to the exposure time error to allow for line quantisation. */ - Duration exposureError = lastDeviceStatus_.shutterSpeed * errorFactor + 200us; + Duration exposureError = lastDeviceStatus_.exposureTime * errorFactor + 200us; double gainError = lastDeviceStatus_.analogueGain * errorFactor; Duration targetError = lastTargetExposure_ * errorFactor; @@ -559,15 +583,15 @@ bool AgcChannel::updateLockStatus(DeviceStatus const &deviceStatus) * the values we keep requesting may be unachievable. For this reason * we only insist that we're close to values in the past few frames. */ - if (deviceStatus.shutterSpeed > lastDeviceStatus_.shutterSpeed - exposureError && - deviceStatus.shutterSpeed < lastDeviceStatus_.shutterSpeed + exposureError && + if (deviceStatus.exposureTime > lastDeviceStatus_.exposureTime - exposureError && + deviceStatus.exposureTime < lastDeviceStatus_.exposureTime + exposureError && deviceStatus.analogueGain > lastDeviceStatus_.analogueGain - gainError && deviceStatus.analogueGain < lastDeviceStatus_.analogueGain + gainError && status_.targetExposureValue > lastTargetExposure_ - targetError && status_.targetExposureValue < lastTargetExposure_ + targetError) lockCount_ = std::min(lockCount_ + 1, maxLockCount); - else if (deviceStatus.shutterSpeed < lastDeviceStatus_.shutterSpeed - resetMargin * exposureError || - deviceStatus.shutterSpeed > lastDeviceStatus_.shutterSpeed + resetMargin * exposureError || + else if (deviceStatus.exposureTime < lastDeviceStatus_.exposureTime - resetMargin * exposureError || + deviceStatus.exposureTime > lastDeviceStatus_.exposureTime + resetMargin * exposureError || deviceStatus.analogueGain < lastDeviceStatus_.analogueGain - resetMargin * gainError || deviceStatus.analogueGain > lastDeviceStatus_.analogueGain + resetMargin * gainError || status_.targetExposureValue < lastTargetExposure_ - resetMargin * targetError || @@ -585,11 +609,11 @@ void AgcChannel::housekeepConfig() { /* First fetch all the up-to-date settings, so no one else has to do it. */ status_.ev = ev_; - status_.fixedShutter = limitShutter(fixedShutter_); + status_.fixedExposureTime = limitExposureTime(fixedExposureTime_); status_.fixedAnalogueGain = fixedAnalogueGain_; status_.flickerPeriod = flickerPeriod_; - LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixedShutter " - << status_.fixedShutter << " fixedAnalogueGain " + LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixedExposureTime " + << status_.fixedExposureTime << " fixedAnalogueGain " << status_.fixedAnalogueGain; /* * Make sure the "mode" pointers point to the up-to-date things, if @@ -633,10 +657,10 @@ void AgcChannel::housekeepConfig() void AgcChannel::fetchCurrentExposure(DeviceStatus const &deviceStatus) { - current_.shutter = deviceStatus.shutterSpeed; + current_.exposureTime = deviceStatus.exposureTime; current_.analogueGain = deviceStatus.analogueGain; current_.totalExposure = 0s; /* this value is unused */ - current_.totalExposureNoDG = current_.shutter * current_.analogueGain; + current_.totalExposureNoDG = current_.exposureTime * current_.analogueGain; } void AgcChannel::fetchAwbStatus(Metadata *imageMetadata) @@ -677,12 +701,13 @@ static double computeInitialY(StatisticsPtr &stats, AwbStatus const &awb, * Note that the weights are applied by the IPA to the statistics directly, * before they are given to us here. */ - double rSum = 0, gSum = 0, bSum = 0, pixelSum = 0; + RGB<double> sum{ 0.0 }; + double pixelSum = 0; for (unsigned int i = 0; i < stats->agcRegions.numRegions(); i++) { auto ®ion = stats->agcRegions.get(i); - rSum += std::min<double>(region.val.rSum * gain, (maxVal - 1) * region.counted); - gSum += std::min<double>(region.val.gSum * gain, (maxVal - 1) * region.counted); - bSum += std::min<double>(region.val.bSum * gain, (maxVal - 1) * region.counted); + sum.r() += std::min<double>(region.val.rSum * gain, (maxVal - 1) * region.counted); + sum.g() += std::min<double>(region.val.gSum * gain, (maxVal - 1) * region.counted); + sum.b() += std::min<double>(region.val.bSum * gain, (maxVal - 1) * region.counted); pixelSum += region.counted; } if (pixelSum == 0.0) { @@ -690,14 +715,11 @@ static double computeInitialY(StatisticsPtr &stats, AwbStatus const &awb, return 0; } - double ySum; /* Factor in the AWB correction if needed. */ - if (stats->agcStatsPos == Statistics::AgcStatsPos::PreWb) { - ySum = rSum * awb.gainR * .299 + - gSum * awb.gainG * .587 + - bSum * awb.gainB * .114; - } else - ySum = rSum * .299 + gSum * .587 + bSum * .114; + if (stats->agcStatsPos == Statistics::AgcStatsPos::PreWb) + sum *= RGB<double>{ { awb.gainR, awb.gainG, awb.gainB } }; + + double ySum = ipa::rec601LuminanceFromRGB(sum); return ySum / pixelSum / (1 << 16); } @@ -715,7 +737,7 @@ static constexpr double EvGainYTargetLimit = 0.9; static double constraintComputeGain(AgcConstraint &c, const Histogram &h, double lux, double evGain, double &targetY) { - targetY = c.yTarget.eval(c.yTarget.domain().clip(lux)); + targetY = c.yTarget.eval(c.yTarget.domain().clamp(lux)); targetY = std::min(EvGainYTargetLimit, targetY * evGain); double iqm = h.interQuantileMean(c.qLo, c.qHi); return (targetY * h.bins()) / iqm; @@ -734,7 +756,7 @@ void AgcChannel::computeGain(StatisticsPtr &statistics, Metadata *imageMetadata, * The initial gain and target_Y come from some of the regions. After * that we consider the histogram constraints. */ - targetY = config_.yTarget.eval(config_.yTarget.domain().clip(lux.lux)); + targetY = config_.yTarget.eval(config_.yTarget.domain().clamp(lux.lux)); targetY = std::min(EvGainYTargetLimit, targetY * evGain); /* @@ -774,17 +796,17 @@ void AgcChannel::computeGain(StatisticsPtr &statistics, Metadata *imageMetadata, void AgcChannel::computeTargetExposure(double gain) { - if (status_.fixedShutter && status_.fixedAnalogueGain) { + if (status_.fixedExposureTime && status_.fixedAnalogueGain) { /* - * When ag and shutter are both fixed, we need to drive the - * total exposure so that we end up with a digital gain of at least - * 1/minColourGain. Otherwise we'd desaturate channels causing - * white to go cyan or magenta. + * When analogue gain and exposure time are both fixed, we need + * to drive the total exposure so that we end up with a digital + * gain of at least 1/minColourGain. Otherwise we'd desaturate + * channels causing white to go cyan or magenta. */ double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 }); ASSERT(minColourGain != 0.0); target_.totalExposure = - status_.fixedShutter * status_.fixedAnalogueGain / minColourGain; + status_.fixedExposureTime * status_.fixedAnalogueGain / minColourGain; } else { /* * The statistics reflect the image without digital gain, so the final @@ -792,12 +814,12 @@ void AgcChannel::computeTargetExposure(double gain) */ target_.totalExposure = current_.totalExposureNoDG * gain; /* The final target exposure is also limited to what the exposure mode allows. */ - Duration maxShutter = status_.fixedShutter - ? status_.fixedShutter - : exposureMode_->shutter.back(); - maxShutter = limitShutter(maxShutter); + Duration maxExposureTime = status_.fixedExposureTime + ? status_.fixedExposureTime + : exposureMode_->exposureTime.back(); + maxExposureTime = limitExposureTime(maxExposureTime); Duration maxTotalExposure = - maxShutter * + maxExposureTime * (status_.fixedAnalogueGain != 0.0 ? status_.fixedAnalogueGain : exposureMode_->gain.back()); @@ -881,12 +903,16 @@ void AgcChannel::filterExposure() double stableRegion = config_.stableRegion; /* - * AGC adapts instantly if both shutter and gain are directly specified - * or we're in the startup phase. + * AGC adapts instantly if both exposure time and gain are directly + * specified or we're in the startup phase. Also disable the stable + * region, because we want to reflect any user exposure/gain updates, + * however small. */ - if ((status_.fixedShutter && status_.fixedAnalogueGain) || - frameCount_ <= config_.startupFrames) + if ((status_.fixedExposureTime && status_.fixedAnalogueGain) || + frameCount_ <= config_.startupFrames) { speed = 1.0; + stableRegion = 0.0; + } if (!filtered_.totalExposure) { filtered_.totalExposure = target_.totalExposure; } else if (filtered_.totalExposure * (1.0 - stableRegion) < target_.totalExposure && @@ -910,34 +936,34 @@ void AgcChannel::filterExposure() void AgcChannel::divideUpExposure() { /* - * Sending the fixed shutter/gain cases through the same code may seem - * unnecessary, but it will make more sense when extend this to cover - * variable aperture. + * Sending the fixed exposure time/gain cases through the same code may + * seem unnecessary, but it will make more sense when extend this to + * cover variable aperture. */ Duration exposureValue = filtered_.totalExposureNoDG; - Duration shutterTime; + Duration exposureTime; double analogueGain; - shutterTime = status_.fixedShutter ? status_.fixedShutter - : exposureMode_->shutter[0]; - shutterTime = limitShutter(shutterTime); + exposureTime = status_.fixedExposureTime ? status_.fixedExposureTime + : exposureMode_->exposureTime[0]; + exposureTime = limitExposureTime(exposureTime); analogueGain = status_.fixedAnalogueGain != 0.0 ? status_.fixedAnalogueGain : exposureMode_->gain[0]; analogueGain = limitGain(analogueGain); - if (shutterTime * analogueGain < exposureValue) { + if (exposureTime * analogueGain < exposureValue) { for (unsigned int stage = 1; stage < exposureMode_->gain.size(); stage++) { - if (!status_.fixedShutter) { - Duration stageShutter = - limitShutter(exposureMode_->shutter[stage]); - if (stageShutter * analogueGain >= exposureValue) { - shutterTime = exposureValue / analogueGain; + if (!status_.fixedExposureTime) { + Duration stageExposureTime = + limitExposureTime(exposureMode_->exposureTime[stage]); + if (stageExposureTime * analogueGain >= exposureValue) { + exposureTime = exposureValue / analogueGain; break; } - shutterTime = stageShutter; + exposureTime = stageExposureTime; } if (status_.fixedAnalogueGain == 0.0) { - if (exposureMode_->gain[stage] * shutterTime >= exposureValue) { - analogueGain = exposureValue / shutterTime; + if (exposureMode_->gain[stage] * exposureTime >= exposureValue) { + analogueGain = exposureValue / exposureTime; break; } analogueGain = exposureMode_->gain[stage]; @@ -945,18 +971,19 @@ void AgcChannel::divideUpExposure() } } } - LOG(RPiAgc, Debug) << "Divided up shutter and gain are " << shutterTime << " and " - << analogueGain; + LOG(RPiAgc, Debug) + << "Divided up exposure time and gain are " << exposureTime + << " and " << analogueGain; /* - * Finally adjust shutter time for flicker avoidance (require both - * shutter and gain not to be fixed). + * Finally adjust exposure time for flicker avoidance (require both + * exposure time and gain not to be fixed). */ - if (!status_.fixedShutter && !status_.fixedAnalogueGain && + if (!status_.fixedExposureTime && !status_.fixedAnalogueGain && status_.flickerPeriod) { - int flickerPeriods = shutterTime / status_.flickerPeriod; + int flickerPeriods = exposureTime / status_.flickerPeriod; if (flickerPeriods) { - Duration newShutterTime = flickerPeriods * status_.flickerPeriod; - analogueGain *= shutterTime / newShutterTime; + Duration newExposureTime = flickerPeriods * status_.flickerPeriod; + analogueGain *= exposureTime / newExposureTime; /* * We should still not allow the ag to go over the * largest value in the exposure mode. Note that this @@ -965,12 +992,12 @@ void AgcChannel::divideUpExposure() */ analogueGain = std::min(analogueGain, exposureMode_->gain.back()); analogueGain = limitGain(analogueGain); - shutterTime = newShutterTime; + exposureTime = newExposureTime; } - LOG(RPiAgc, Debug) << "After flicker avoidance, shutter " - << shutterTime << " gain " << analogueGain; + LOG(RPiAgc, Debug) << "After flicker avoidance, exposure time " + << exposureTime << " gain " << analogueGain; } - filtered_.shutter = shutterTime; + filtered_.exposureTime = exposureTime; filtered_.analogueGain = analogueGain; } @@ -978,7 +1005,7 @@ void AgcChannel::writeAndFinish(Metadata *imageMetadata, bool desaturate) { status_.totalExposureValue = filtered_.totalExposure; status_.targetExposureValue = desaturate ? 0s : target_.totalExposure; - status_.shutterTime = filtered_.shutter; + status_.exposureTime = filtered_.exposureTime; status_.analogueGain = filtered_.analogueGain; /* * Write to metadata as well, in case anyone wants to update the camera @@ -987,32 +1014,32 @@ void AgcChannel::writeAndFinish(Metadata *imageMetadata, bool desaturate) imageMetadata->set("agc.status", status_); LOG(RPiAgc, Debug) << "Output written, total exposure requested is " << filtered_.totalExposure; - LOG(RPiAgc, Debug) << "Camera exposure update: shutter time " << filtered_.shutter + LOG(RPiAgc, Debug) << "Camera exposure update: exposure time " << filtered_.exposureTime << " analogue gain " << filtered_.analogueGain; } -Duration AgcChannel::limitShutter(Duration shutter) +Duration AgcChannel::limitExposureTime(Duration exposureTime) { /* - * shutter == 0 is a special case for fixed shutter values, and must pass - * through unchanged + * exposureTime == 0 is a special case for fixed exposure time values, + * and must pass through unchanged. */ - if (!shutter) - return shutter; + if (!exposureTime) + return exposureTime; - shutter = std::clamp(shutter, mode_.minShutter, maxShutter_); - return shutter; + exposureTime = std::clamp(exposureTime, mode_.minExposureTime, maxExposureTime_); + return exposureTime; } double AgcChannel::limitGain(double gain) const { /* - * Only limit the lower bounds of the gain value to what the sensor limits. - * The upper bound on analogue gain will be made up with additional digital - * gain applied by the ISP. + * Only limit the lower bounds of the gain value to what the sensor + * limits. The upper bound on analogue gain will be made up with + * additional digital gain applied by the ISP. * - * gain == 0.0 is a special case for fixed shutter values, and must pass - * through unchanged + * gain == 0.0 is a special case for fixed exposure time values, and + * must pass through unchanged. */ if (!gain) return gain; diff --git a/src/ipa/rpi/controller/rpi/agc_channel.h b/src/ipa/rpi/controller/rpi/agc_channel.h index 4cf7233e..fa697e6f 100644 --- a/src/ipa/rpi/controller/rpi/agc_channel.h +++ b/src/ipa/rpi/controller/rpi/agc_channel.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2023, Raspberry Pi Ltd * - * agc_channel.h - AGC/AEC control algorithm + * AGC/AEC control algorithm */ #pragma once @@ -12,10 +12,11 @@ #include <libcamera/base/utils.h> +#include <libipa/pwl.h> + #include "../agc_status.h" #include "../awb_status.h" #include "../controller.h" -#include "../pwl.h" /* This is our implementation of AGC. */ @@ -29,7 +30,7 @@ struct AgcMeteringMode { }; struct AgcExposureMode { - std::vector<libcamera::utils::Duration> shutter; + std::vector<libcamera::utils::Duration> exposureTime; std::vector<double> gain; int read(const libcamera::YamlObject ¶ms); }; @@ -40,7 +41,7 @@ struct AgcConstraint { Bound bound; double qLo; double qHi; - Pwl yTarget; + libcamera::ipa::Pwl yTarget; int read(const libcamera::YamlObject ¶ms); }; @@ -61,7 +62,7 @@ struct AgcConfig { std::map<std::string, AgcExposureMode> exposureModes; std::map<std::string, AgcConstraintMode> constraintModes; std::vector<AgcChannelConstraint> channelConstraints; - Pwl yTarget; + libcamera::ipa::Pwl yTarget; double speed; uint16_t startupFrames; unsigned int convergenceFrames; @@ -89,14 +90,18 @@ public: std::vector<double> const &getWeights() const; void setEv(double ev); void setFlickerPeriod(libcamera::utils::Duration flickerPeriod); - void setMaxShutter(libcamera::utils::Duration maxShutter); - void setFixedShutter(libcamera::utils::Duration fixedShutter); + void setMaxExposureTime(libcamera::utils::Duration maxExposureTime); + void setFixedExposureTime(libcamera::utils::Duration fixedExposureTime); void setFixedAnalogueGain(double fixedAnalogueGain); void setMeteringMode(std::string const &meteringModeName); void setExposureMode(std::string const &exposureModeName); void setConstraintMode(std::string const &contraintModeName); - void enableAuto(); - void disableAuto(); + void enableAutoExposure(); + void disableAutoExposure(); + bool autoExposureEnabled() const; + void enableAutoGain(); + void disableAutoGain(); + bool autoGainEnabled() const; void switchMode(CameraMode const &cameraMode, Metadata *metadata); void prepare(Metadata *imageMetadata); void process(StatisticsPtr &stats, DeviceStatus const &deviceStatus, Metadata *imageMetadata, @@ -116,7 +121,7 @@ private: bool applyDigitalGain(double gain, double targetY, bool channelBound); void divideUpExposure(); void writeAndFinish(Metadata *imageMetadata, bool desaturate); - libcamera::utils::Duration limitShutter(libcamera::utils::Duration shutter); + libcamera::utils::Duration limitExposureTime(libcamera::utils::Duration exposureTime); double limitGain(double gain) const; AgcMeteringMode *meteringMode_; AgcExposureMode *exposureMode_; @@ -127,7 +132,7 @@ private: struct ExposureValues { ExposureValues(); - libcamera::utils::Duration shutter; + libcamera::utils::Duration exposureTime; double analogueGain; libcamera::utils::Duration totalExposure; libcamera::utils::Duration totalExposureNoDG; /* without digital gain */ @@ -145,8 +150,8 @@ private: std::string constraintModeName_; double ev_; libcamera::utils::Duration flickerPeriod_; - libcamera::utils::Duration maxShutter_; - libcamera::utils::Duration fixedShutter_; + libcamera::utils::Duration maxExposureTime_; + libcamera::utils::Duration fixedExposureTime_; double fixedAnalogueGain_; }; diff --git a/src/ipa/rpi/controller/rpi/alsc.cpp b/src/ipa/rpi/controller/rpi/alsc.cpp index 8a205c60..21edb819 100644 --- a/src/ipa/rpi/controller/rpi/alsc.cpp +++ b/src/ipa/rpi/controller/rpi/alsc.cpp @@ -2,13 +2,14 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * alsc.cpp - ALSC (auto lens shading correction) control algorithm + * ALSC (auto lens shading correction) control algorithm */ #include <algorithm> +#include <cmath> #include <functional> -#include <math.h> #include <numeric> +#include <vector> #include <libcamera/base/log.h> #include <libcamera/base/span.h> @@ -251,12 +252,12 @@ static bool compareModes(CameraMode const &cm0, CameraMode const &cm1) */ if (cm0.transform != cm1.transform) return true; - int leftDiff = abs(cm0.cropX - cm1.cropX); - int topDiff = abs(cm0.cropY - cm1.cropY); - int rightDiff = fabs(cm0.cropX + cm0.scaleX * cm0.width - - cm1.cropX - cm1.scaleX * cm1.width); - int bottomDiff = fabs(cm0.cropY + cm0.scaleY * cm0.height - - cm1.cropY - cm1.scaleY * cm1.height); + int leftDiff = std::abs(cm0.cropX - cm1.cropX); + int topDiff = std::abs(cm0.cropY - cm1.cropY); + int rightDiff = std::abs(cm0.cropX + cm0.scaleX * cm0.width - + cm1.cropX - cm1.scaleX * cm1.width); + int bottomDiff = std::abs(cm0.cropY + cm0.scaleY * cm0.height - + cm1.cropY - cm1.scaleY * cm1.height); /* * These thresholds are a rather arbitrary amount chosen to trigger * when carrying on with the previously calculated tables might be @@ -496,8 +497,9 @@ void resampleCalTable(const Array2D<double> &calTableIn, * Precalculate and cache the x sampling locations and phases to save * recomputing them on every row. */ - int xLo[X], xHi[X]; - double xf[X]; + std::vector<int> xLo(X); + std::vector<int> xHi(X); + std::vector<double> xf(X); double scaleX = cameraMode.sensorWidth / (cameraMode.width * cameraMode.scaleX); double xOff = cameraMode.cropX / (double)cameraMode.sensorWidth; @@ -730,7 +732,7 @@ static double gaussSeidel2Sor(const SparseArray<double> &M, double omega, double maxDiff = 0; for (i = 0; i < XY; i++) { lambda[i] = oldLambda[i] + (lambda[i] - oldLambda[i]) * omega; - if (fabs(lambda[i] - oldLambda[i]) > fabs(maxDiff)) + if (std::abs(lambda[i] - oldLambda[i]) > std::abs(maxDiff)) maxDiff = lambda[i] - oldLambda[i]; } return maxDiff; @@ -762,7 +764,7 @@ static void runMatrixIterations(const Array2D<double> &C, constructM(C, W, M); double lastMaxDiff = std::numeric_limits<double>::max(); for (unsigned int i = 0; i < nIter; i++) { - double maxDiff = fabs(gaussSeidel2Sor(M, omega, lambda, lambdaBound)); + double maxDiff = std::abs(gaussSeidel2Sor(M, omega, lambda, lambdaBound)); if (maxDiff < threshold) { LOG(RPiAlsc, Debug) << "Stop after " << i + 1 << " iterations"; diff --git a/src/ipa/rpi/controller/rpi/alsc.h b/src/ipa/rpi/controller/rpi/alsc.h index 0b6d9478..31087982 100644 --- a/src/ipa/rpi/controller/rpi/alsc.h +++ b/src/ipa/rpi/controller/rpi/alsc.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * alsc.h - ALSC (auto lens shading correction) control algorithm + * ALSC (auto lens shading correction) control algorithm */ #pragma once diff --git a/src/ipa/rpi/controller/rpi/awb.cpp b/src/ipa/rpi/controller/rpi/awb.cpp index dde5785a..365b595f 100644 --- a/src/ipa/rpi/controller/rpi/awb.cpp +++ b/src/ipa/rpi/controller/rpi/awb.cpp @@ -2,10 +2,11 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * awb.cpp - AWB control algorithm + * AWB control algorithm */ #include <assert.h> +#include <cmath> #include <functional> #include <libcamera/base/log.h> @@ -20,6 +21,8 @@ using namespace libcamera; LOG_DEFINE_CATEGORY(RPiAwb) +constexpr double kDefaultCT = 4500.0; + #define NAME "rpi.awb" /* @@ -49,10 +52,11 @@ int AwbPrior::read(const libcamera::YamlObject ¶ms) return -EINVAL; lux = *value; - return prior.read(params["prior"]); + prior = params["prior"].get<ipa::Pwl>(ipa::Pwl{}); + return prior.empty() ? -EINVAL : 0; } -static int readCtCurve(Pwl &ctR, Pwl &ctB, const libcamera::YamlObject ¶ms) +static int readCtCurve(ipa::Pwl &ctR, ipa::Pwl &ctB, const libcamera::YamlObject ¶ms) { if (params.size() % 3) { LOG(RPiAwb, Error) << "AwbConfig: incomplete CT curve entry"; @@ -103,8 +107,8 @@ int AwbConfig::read(const libcamera::YamlObject ¶ms) if (ret) return ret; /* We will want the inverse functions of these too. */ - ctRInverse = ctR.inverse(); - ctBInverse = ctB.inverse(); + ctRInverse = ctR.inverse().first; + ctBInverse = ctB.inverse().first; } if (params.contains("priors")) { @@ -121,7 +125,7 @@ int AwbConfig::read(const libcamera::YamlObject ¶ms) } if (priors.empty()) { LOG(RPiAwb, Error) << "AwbConfig: no AWB priors configured"; - return ret; + return -EINVAL; } } if (params.contains("modes")) { @@ -161,11 +165,18 @@ int AwbConfig::read(const libcamera::YamlObject ¶ms) bayes = false; } } - fast = params[fast].get<int>(bayes); /* default to fast for Bayesian, otherwise slow */ whitepointR = params["whitepoint_r"].get<double>(0.0); whitepointB = params["whitepoint_b"].get<double>(0.0); if (bayes == false) sensitivityR = sensitivityB = 1.0; /* nor do sensitivities make any sense */ + /* + * The biasProportion parameter adds a small proportion of the counted + * pixles to a region biased to the biasCT colour temperature. + * + * A typical value for biasProportion would be between 0.05 to 0.1. + */ + biasProportion = params["bias_proportion"].get<double>(0.0); + biasCT = params["bias_ct"].get<double>(kDefaultCT); return 0; } @@ -207,13 +218,13 @@ void Awb::initialise() * them. */ if (!config_.ctR.empty() && !config_.ctB.empty()) { - syncResults_.temperatureK = config_.ctR.domain().clip(4000); + syncResults_.temperatureK = config_.ctR.domain().clamp(4000); syncResults_.gainR = 1.0 / config_.ctR.eval(syncResults_.temperatureK); syncResults_.gainG = 1.0; syncResults_.gainB = 1.0 / config_.ctB.eval(syncResults_.temperatureK); } else { /* random values just to stop the world blowing up */ - syncResults_.temperatureK = 4500; + syncResults_.temperatureK = kDefaultCT; syncResults_.gainR = syncResults_.gainG = syncResults_.gainB = 1.0; } prevSyncResults_ = syncResults_; @@ -273,14 +284,32 @@ void Awb::setManualGains(double manualR, double manualB) syncResults_.gainB = prevSyncResults_.gainB = manualB_; if (config_.bayes) { /* Also estimate the best corresponding colour temperature from the curves. */ - double ctR = config_.ctRInverse.eval(config_.ctRInverse.domain().clip(1 / manualR_)); - double ctB = config_.ctBInverse.eval(config_.ctBInverse.domain().clip(1 / manualB_)); + double ctR = config_.ctRInverse.eval(config_.ctRInverse.domain().clamp(1 / manualR_)); + double ctB = config_.ctBInverse.eval(config_.ctBInverse.domain().clamp(1 / manualB_)); prevSyncResults_.temperatureK = (ctR + ctB) / 2; syncResults_.temperatureK = prevSyncResults_.temperatureK; } } } +void Awb::setColourTemperature(double temperatureK) +{ + if (!config_.bayes) { + LOG(RPiAwb, Warning) << "AWB uncalibrated - cannot set colour temperature"; + return; + } + + temperatureK = config_.ctR.domain().clamp(temperatureK); + manualR_ = 1 / config_.ctR.eval(temperatureK); + manualB_ = 1 / config_.ctB.eval(temperatureK); + + syncResults_.temperatureK = temperatureK; + syncResults_.gainR = manualR_; + syncResults_.gainG = 1.0; + syncResults_.gainB = manualB_; + prevSyncResults_ = syncResults_; +} + void Awb::switchMode([[maybe_unused]] CameraMode const &cameraMode, Metadata *metadata) { @@ -406,7 +435,8 @@ void Awb::asyncFunc() static void generateStats(std::vector<Awb::RGB> &zones, StatisticsPtr &stats, double minPixels, - double minG, Metadata &globalMetadata) + double minG, Metadata &globalMetadata, + double biasProportion, double biasCtR, double biasCtB) { std::scoped_lock<RPiController::Metadata> l(globalMetadata); @@ -419,6 +449,14 @@ static void generateStats(std::vector<Awb::RGB> &zones, continue; zone.R = region.val.rSum / region.counted; zone.B = region.val.bSum / region.counted; + /* + * Add some bias samples to allow the search to tend to a + * bias CT in failure cases. + */ + const unsigned int proportion = biasProportion * region.counted; + zone.R += proportion * biasCtR; + zone.B += proportion * biasCtB; + zone.G += proportion * 1.0; /* Factor in the ALSC applied colour shading correction if required. */ const AlscStatus *alscStatus = globalMetadata.getLocked<AlscStatus>("alsc.status"); if (stats->colourStatsPos == Statistics::ColourStatsPos::PreLsc && alscStatus) { @@ -438,8 +476,11 @@ void Awb::prepareStats() * LSC has already been applied to the stats in this pipeline, so stop * any LSC compensation. We also ignore config_.fast in this version. */ + const double biasCtR = config_.bayes ? config_.ctR.eval(config_.biasCT) : 0; + const double biasCtB = config_.bayes ? config_.ctB.eval(config_.biasCT) : 0; generateStats(zones_, statistics_, config_.minPixels, - config_.minG, getGlobalMetadata()); + config_.minG, getGlobalMetadata(), + config_.biasProportion, biasCtR, biasCtB); /* * apply sensitivities, so values appear to come from our "canonical" * sensor. @@ -468,7 +509,7 @@ double Awb::computeDelta2Sum(double gainR, double gainB) return delta2Sum; } -Pwl Awb::interpolatePrior() +ipa::Pwl Awb::interpolatePrior() { /* * Interpolate the prior log likelihood function for our current lux @@ -485,7 +526,7 @@ Pwl Awb::interpolatePrior() idx++; double lux0 = config_.priors[idx].lux, lux1 = config_.priors[idx + 1].lux; - return Pwl::combine(config_.priors[idx].prior, + return ipa::Pwl::combine(config_.priors[idx].prior, config_.priors[idx + 1].prior, [&](double /*x*/, double y0, double y1) { return y0 + (y1 - y0) * @@ -494,26 +535,26 @@ Pwl Awb::interpolatePrior() } } -static double interpolateQuadatric(Pwl::Point const &a, Pwl::Point const &b, - Pwl::Point const &c) +static double interpolateQuadatric(ipa::Pwl::Point const &a, ipa::Pwl::Point const &b, + ipa::Pwl::Point const &c) { /* * Given 3 points on a curve, find the extremum of the function in that * interval by fitting a quadratic. */ const double eps = 1e-3; - Pwl::Point ca = c - a, ba = b - a; - double denominator = 2 * (ba.y * ca.x - ca.y * ba.x); - if (abs(denominator) > eps) { - double numerator = ba.y * ca.x * ca.x - ca.y * ba.x * ba.x; - double result = numerator / denominator + a.x; - return std::max(a.x, std::min(c.x, result)); + ipa::Pwl::Point ca = c - a, ba = b - a; + double denominator = 2 * (ba.y() * ca.x() - ca.y() * ba.x()); + if (std::abs(denominator) > eps) { + double numerator = ba.y() * ca.x() * ca.x() - ca.y() * ba.x() * ba.x(); + double result = numerator / denominator + a.x(); + return std::max(a.x(), std::min(c.x(), result)); } /* has degenerated to straight line segment */ - return a.y < c.y - eps ? a.x : (c.y < a.y - eps ? c.x : b.x); + return a.y() < c.y() - eps ? a.x() : (c.y() < a.y() - eps ? c.x() : b.x()); } -double Awb::coarseSearch(Pwl const &prior) +double Awb::coarseSearch(ipa::Pwl const &prior) { points_.clear(); /* assume doesn't deallocate memory */ size_t bestPoint = 0; @@ -525,22 +566,22 @@ double Awb::coarseSearch(Pwl const &prior) double b = config_.ctB.eval(t, &spanB); double gainR = 1 / r, gainB = 1 / b; double delta2Sum = computeDelta2Sum(gainR, gainB); - double priorLogLikelihood = prior.eval(prior.domain().clip(t)); + double priorLogLikelihood = prior.eval(prior.domain().clamp(t)); double finalLogLikelihood = delta2Sum - priorLogLikelihood; LOG(RPiAwb, Debug) << "t: " << t << " gain R " << gainR << " gain B " << gainB << " delta2_sum " << delta2Sum << " prior " << priorLogLikelihood << " final " << finalLogLikelihood; - points_.push_back(Pwl::Point(t, finalLogLikelihood)); - if (points_.back().y < points_[bestPoint].y) + points_.push_back(ipa::Pwl::Point({ t, finalLogLikelihood })); + if (points_.back().y() < points_[bestPoint].y()) bestPoint = points_.size() - 1; if (t == mode_->ctHi) break; /* for even steps along the r/b curve scale them by the current t */ t = std::min(t + t / 10 * config_.coarseStep, mode_->ctHi); } - t = points_[bestPoint].x; + t = points_[bestPoint].x(); LOG(RPiAwb, Debug) << "Coarse search found CT " << t; /* * We have the best point of the search, but refine it with a quadratic @@ -559,7 +600,7 @@ double Awb::coarseSearch(Pwl const &prior) return t; } -void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior) +void Awb::fineSearch(double &t, double &r, double &b, ipa::Pwl const &prior) { int spanR = -1, spanB = -1; config_.ctR.eval(t, &spanR); @@ -570,14 +611,14 @@ void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior) config_.ctR.eval(t - nsteps * step, &spanR); double bDiff = config_.ctB.eval(t + nsteps * step, &spanB) - config_.ctB.eval(t - nsteps * step, &spanB); - Pwl::Point transverse(bDiff, -rDiff); - if (transverse.len2() < 1e-6) + ipa::Pwl::Point transverse({ bDiff, -rDiff }); + if (transverse.length2() < 1e-6) return; /* * unit vector orthogonal to the b vs. r function (pointing outwards * with r and b increasing) */ - transverse = transverse / transverse.len(); + transverse = transverse / transverse.length(); double bestLogLikelihood = 0, bestT = 0, bestR = 0, bestB = 0; double transverseRange = config_.transverseNeg + config_.transversePos; const int maxNumDeltas = 12; @@ -592,26 +633,26 @@ void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior) for (int i = -nsteps; i <= nsteps; i++) { double tTest = t + i * step; double priorLogLikelihood = - prior.eval(prior.domain().clip(tTest)); + prior.eval(prior.domain().clamp(tTest)); double rCurve = config_.ctR.eval(tTest, &spanR); double bCurve = config_.ctB.eval(tTest, &spanB); /* x will be distance off the curve, y the log likelihood there */ - Pwl::Point points[maxNumDeltas]; + ipa::Pwl::Point points[maxNumDeltas]; int bestPoint = 0; /* Take some measurements transversely *off* the CT curve. */ for (int j = 0; j < numDeltas; j++) { - points[j].x = -config_.transverseNeg + - (transverseRange * j) / (numDeltas - 1); - Pwl::Point rbTest = Pwl::Point(rCurve, bCurve) + - transverse * points[j].x; - double rTest = rbTest.x, bTest = rbTest.y; + points[j][0] = -config_.transverseNeg + + (transverseRange * j) / (numDeltas - 1); + ipa::Pwl::Point rbTest = ipa::Pwl::Point({ rCurve, bCurve }) + + transverse * points[j].x(); + double rTest = rbTest.x(), bTest = rbTest.y(); double gainR = 1 / rTest, gainB = 1 / bTest; double delta2Sum = computeDelta2Sum(gainR, gainB); - points[j].y = delta2Sum - priorLogLikelihood; + points[j][1] = delta2Sum - priorLogLikelihood; LOG(RPiAwb, Debug) << "At t " << tTest << " r " << rTest << " b " - << bTest << ": " << points[j].y; - if (points[j].y < points[bestPoint].y) + << bTest << ": " << points[j].y(); + if (points[j].y() < points[bestPoint].y()) bestPoint = j; } /* @@ -619,11 +660,11 @@ void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior) * now let's do a quadratic interpolation for the best result. */ bestPoint = std::max(1, std::min(bestPoint, numDeltas - 2)); - Pwl::Point rbTest = Pwl::Point(rCurve, bCurve) + - transverse * interpolateQuadatric(points[bestPoint - 1], - points[bestPoint], - points[bestPoint + 1]); - double rTest = rbTest.x, bTest = rbTest.y; + ipa::Pwl::Point rbTest = ipa::Pwl::Point({ rCurve, bCurve }) + + transverse * interpolateQuadatric(points[bestPoint - 1], + points[bestPoint], + points[bestPoint + 1]); + double rTest = rbTest.x(), bTest = rbTest.y(); double gainR = 1 / rTest, gainB = 1 / bTest; double delta2Sum = computeDelta2Sum(gainR, gainB); double finalLogLikelihood = delta2Sum - priorLogLikelihood; @@ -653,7 +694,7 @@ void Awb::awbBayes() * Get the current prior, and scale according to how many zones are * valid... not entirely sure about this. */ - Pwl prior = interpolatePrior(); + ipa::Pwl prior = interpolatePrior(); prior *= zones_.size() / (double)(statistics_->awbRegions.numRegions()); prior.map([](double x, double y) { LOG(RPiAwb, Debug) << "(" << x << "," << y << ")"; @@ -715,7 +756,11 @@ void Awb::awbGrey() sumR += *ri, sumB += *bi; double gainR = sumR.G / (sumR.R + 1), gainB = sumB.G / (sumB.B + 1); - asyncResults_.temperatureK = 4500; /* don't know what it is */ + /* + * The grey world model can't estimate the colour temperature, use a + * default value. + */ + asyncResults_.temperatureK = kDefaultCT; asyncResults_.gainR = gainR; asyncResults_.gainG = 1.0; asyncResults_.gainB = gainB; diff --git a/src/ipa/rpi/controller/rpi/awb.h b/src/ipa/rpi/controller/rpi/awb.h index cde6a62f..2fb91254 100644 --- a/src/ipa/rpi/controller/rpi/awb.h +++ b/src/ipa/rpi/controller/rpi/awb.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * awb.h - AWB control algorithm + * AWB control algorithm */ #pragma once @@ -10,11 +10,14 @@ #include <condition_variable> #include <thread> +#include <libcamera/geometry.h> + #include "../awb_algorithm.h" -#include "../pwl.h" #include "../awb_status.h" #include "../statistics.h" +#include "libipa/pwl.h" + namespace RPiController { /* Control algorithm to perform AWB calculations. */ @@ -28,7 +31,7 @@ struct AwbMode { struct AwbPrior { int read(const libcamera::YamlObject ¶ms); double lux; /* lux level */ - Pwl prior; /* maps CT to prior log likelihood for this lux level */ + libcamera::ipa::Pwl prior; /* maps CT to prior log likelihood for this lux level */ }; struct AwbConfig { @@ -40,11 +43,10 @@ struct AwbConfig { uint16_t startupFrames; unsigned int convergenceFrames; /* approx number of frames to converge */ double speed; /* IIR filter speed applied to algorithm results */ - bool fast; /* "fast" mode uses a 16x16 rather than 32x32 grid */ - Pwl ctR; /* function maps CT to r (= R/G) */ - Pwl ctB; /* function maps CT to b (= B/G) */ - Pwl ctRInverse; /* inverse of ctR */ - Pwl ctBInverse; /* inverse of ctB */ + libcamera::ipa::Pwl ctR; /* function maps CT to r (= R/G) */ + libcamera::ipa::Pwl ctB; /* function maps CT to b (= B/G) */ + libcamera::ipa::Pwl ctRInverse; /* inverse of ctR */ + libcamera::ipa::Pwl ctBInverse; /* inverse of ctB */ /* table of illuminant priors at different lux levels */ std::vector<AwbPrior> priors; /* AWB "modes" (determines the search range) */ @@ -84,6 +86,10 @@ struct AwbConfig { double whitepointR; double whitepointB; bool bayes; /* use Bayesian algorithm */ + /* proportion of counted samples to add for the search bias */ + double biasProportion; + /* CT target for the search bias */ + double biasCT; }; class Awb : public AwbAlgorithm @@ -98,6 +104,7 @@ public: void initialValues(double &gainR, double &gainB) override; void setMode(std::string const &name) override; void setManualGains(double manualR, double manualB) override; + void setColourTemperature(double temperatureK) override; void enableAuto() override; void disableAuto() override; void switchMode(CameraMode const &cameraMode, Metadata *metadata) override; @@ -161,11 +168,11 @@ private: void awbGrey(); void prepareStats(); double computeDelta2Sum(double gainR, double gainB); - Pwl interpolatePrior(); - double coarseSearch(Pwl const &prior); - void fineSearch(double &t, double &r, double &b, Pwl const &prior); + libcamera::ipa::Pwl interpolatePrior(); + double coarseSearch(libcamera::ipa::Pwl const &prior); + void fineSearch(double &t, double &r, double &b, libcamera::ipa::Pwl const &prior); std::vector<RGB> zones_; - std::vector<Pwl::Point> points_; + std::vector<libcamera::ipa::Pwl::Point> points_; /* manual r setting */ double manualR_; /* manual b setting */ diff --git a/src/ipa/rpi/controller/rpi/black_level.cpp b/src/ipa/rpi/controller/rpi/black_level.cpp index 2e3db51f..4c968f14 100644 --- a/src/ipa/rpi/controller/rpi/black_level.cpp +++ b/src/ipa/rpi/controller/rpi/black_level.cpp @@ -2,10 +2,9 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * black_level.cpp - black level control algorithm + * black level control algorithm */ -#include <math.h> #include <stdint.h> #include <libcamera/base/log.h> diff --git a/src/ipa/rpi/controller/rpi/black_level.h b/src/ipa/rpi/controller/rpi/black_level.h index d8c41c62..f50729db 100644 --- a/src/ipa/rpi/controller/rpi/black_level.h +++ b/src/ipa/rpi/controller/rpi/black_level.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * black_level.h - black level control algorithm + * black level control algorithm */ #pragma once diff --git a/src/ipa/rpi/controller/rpi/cac.cpp b/src/ipa/rpi/controller/rpi/cac.cpp index f2c8d282..17779ad5 100644 --- a/src/ipa/rpi/controller/rpi/cac.cpp +++ b/src/ipa/rpi/controller/rpi/cac.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2023 Raspberry Pi Ltd * - * cac.cpp - Chromatic Aberration Correction algorithm + * Chromatic Aberration Correction algorithm */ #include "cac.h" diff --git a/src/ipa/rpi/controller/rpi/ccm.cpp b/src/ipa/rpi/controller/rpi/ccm.cpp index 2e2e6664..8607f152 100644 --- a/src/ipa/rpi/controller/rpi/ccm.cpp +++ b/src/ipa/rpi/controller/rpi/ccm.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * ccm.cpp - CCM (colour correction matrix) control algorithm + * CCM (colour correction matrix) control algorithm */ #include <libcamera/base/log.h> @@ -29,34 +29,7 @@ LOG_DEFINE_CATEGORY(RPiCcm) #define NAME "rpi.ccm" -Matrix::Matrix() -{ - memset(m, 0, sizeof(m)); -} -Matrix::Matrix(double m0, double m1, double m2, double m3, double m4, double m5, - double m6, double m7, double m8) -{ - m[0][0] = m0, m[0][1] = m1, m[0][2] = m2, m[1][0] = m3, m[1][1] = m4, - m[1][2] = m5, m[2][0] = m6, m[2][1] = m7, m[2][2] = m8; -} -int Matrix::read(const libcamera::YamlObject ¶ms) -{ - double *ptr = (double *)m; - - if (params.size() != 9) { - LOG(RPiCcm, Error) << "Wrong number of values in CCM"; - return -EINVAL; - } - - for (const auto ¶m : params.asList()) { - auto value = param.get<double>(); - if (!value) - return -EINVAL; - *ptr++ = *value; - } - - return 0; -} +using Matrix3x3 = Matrix<double, 3, 3>; Ccm::Ccm(Controller *controller) : CcmAlgorithm(controller), saturation_(1.0) {} @@ -68,12 +41,10 @@ char const *Ccm::name() const int Ccm::read(const libcamera::YamlObject ¶ms) { - int ret; - if (params.contains("saturation")) { - ret = config_.saturation.read(params["saturation"]); - if (ret) - return ret; + config_.saturation = params["saturation"].get<ipa::Pwl>(ipa::Pwl{}); + if (config_.saturation.empty()) + return -EINVAL; } for (auto &p : params["ccms"].asList()) { @@ -83,9 +54,12 @@ int Ccm::read(const libcamera::YamlObject ¶ms) CtCcm ctCcm; ctCcm.ct = *value; - ret = ctCcm.ccm.read(p["ccm"]); - if (ret) - return ret; + + auto ccm = p["ccm"].get<Matrix3x3>(); + if (!ccm) + return -EINVAL; + + ctCcm.ccm = *ccm; if (!config_.ccms.empty() && ctCcm.ct <= config_.ccms.back().ct) { LOG(RPiCcm, Error) @@ -113,8 +87,10 @@ void Ccm::initialise() { } +namespace { + template<typename T> -static bool getLocked(Metadata *metadata, std::string const &tag, T &value) +bool getLocked(Metadata *metadata, std::string const &tag, T &value) { T *ptr = metadata->getLocked<T>(tag); if (ptr == nullptr) @@ -123,7 +99,7 @@ static bool getLocked(Metadata *metadata, std::string const &tag, T &value) return true; } -Matrix calculateCcm(std::vector<CtCcm> const &ccms, double ct) +Matrix3x3 calculateCcm(std::vector<CtCcm> const &ccms, double ct) { if (ct <= ccms.front().ct) return ccms.front().ccm; @@ -139,16 +115,25 @@ Matrix calculateCcm(std::vector<CtCcm> const &ccms, double ct) } } -Matrix applySaturation(Matrix const &ccm, double saturation) +Matrix3x3 applySaturation(Matrix3x3 const &ccm, double saturation) { - Matrix RGB2Y(0.299, 0.587, 0.114, -0.169, -0.331, 0.500, 0.500, -0.419, - -0.081); - Matrix Y2RGB(1.000, 0.000, 1.402, 1.000, -0.345, -0.714, 1.000, 1.771, - 0.000); - Matrix S(1, 0, 0, 0, saturation, 0, 0, 0, saturation); + static const Matrix3x3 RGB2Y({ 0.299, 0.587, 0.114, + -0.169, -0.331, 0.500, + 0.500, -0.419, -0.081 }); + + static const Matrix3x3 Y2RGB({ 1.000, 0.000, 1.402, + 1.000, -0.345, -0.714, + 1.000, 1.771, 0.000 }); + + Matrix3x3 S({ 1, 0, 0, + 0, saturation, 0, + 0, 0, saturation }); + return Y2RGB * S * RGB2Y * ccm; } +} /* namespace */ + void Ccm::prepare(Metadata *imageMetadata) { bool awbOk = false, luxOk = false; @@ -166,18 +151,18 @@ void Ccm::prepare(Metadata *imageMetadata) LOG(RPiCcm, Warning) << "no colour temperature found"; if (!luxOk) LOG(RPiCcm, Warning) << "no lux value found"; - Matrix ccm = calculateCcm(config_.ccms, awb.temperatureK); + Matrix3x3 ccm = calculateCcm(config_.ccms, awb.temperatureK); double saturation = saturation_; struct CcmStatus ccmStatus; ccmStatus.saturation = saturation; if (!config_.saturation.empty()) saturation *= config_.saturation.eval( - config_.saturation.domain().clip(lux.lux)); + config_.saturation.domain().clamp(lux.lux)); ccm = applySaturation(ccm, saturation); for (int j = 0; j < 3; j++) for (int i = 0; i < 3; i++) ccmStatus.matrix[j * 3 + i] = - std::max(-8.0, std::min(7.9999, ccm.m[j][i])); + std::max(-8.0, std::min(7.9999, ccm[j][i])); LOG(RPiCcm, Debug) << "colour temperature " << awb.temperatureK << "K"; LOG(RPiCcm, Debug) diff --git a/src/ipa/rpi/controller/rpi/ccm.h b/src/ipa/rpi/controller/rpi/ccm.h index 286d0b33..c05dbb17 100644 --- a/src/ipa/rpi/controller/rpi/ccm.h +++ b/src/ipa/rpi/controller/rpi/ccm.h @@ -2,59 +2,29 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * ccm.h - CCM (colour correction matrix) control algorithm + * CCM (colour correction matrix) control algorithm */ #pragma once #include <vector> +#include "libcamera/internal/matrix.h" +#include <libipa/pwl.h> + #include "../ccm_algorithm.h" -#include "../pwl.h" namespace RPiController { /* Algorithm to calculate colour matrix. Should be placed after AWB. */ -struct Matrix { - Matrix(double m0, double m1, double m2, double m3, double m4, double m5, - double m6, double m7, double m8); - Matrix(); - double m[3][3]; - int read(const libcamera::YamlObject ¶ms); -}; -static inline Matrix operator*(double d, Matrix const &m) -{ - return Matrix(m.m[0][0] * d, m.m[0][1] * d, m.m[0][2] * d, - m.m[1][0] * d, m.m[1][1] * d, m.m[1][2] * d, - m.m[2][0] * d, m.m[2][1] * d, m.m[2][2] * d); -} -static inline Matrix operator*(Matrix const &m1, Matrix const &m2) -{ - Matrix m; - for (int i = 0; i < 3; i++) - for (int j = 0; j < 3; j++) - m.m[i][j] = m1.m[i][0] * m2.m[0][j] + - m1.m[i][1] * m2.m[1][j] + - m1.m[i][2] * m2.m[2][j]; - return m; -} -static inline Matrix operator+(Matrix const &m1, Matrix const &m2) -{ - Matrix m; - for (int i = 0; i < 3; i++) - for (int j = 0; j < 3; j++) - m.m[i][j] = m1.m[i][j] + m2.m[i][j]; - return m; -} - struct CtCcm { double ct; - Matrix ccm; + libcamera::Matrix<double, 3, 3> ccm; }; struct CcmConfig { std::vector<CtCcm> ccms; - Pwl saturation; + libcamera::ipa::Pwl saturation; }; class Ccm : public CcmAlgorithm diff --git a/src/ipa/rpi/controller/rpi/contrast.cpp b/src/ipa/rpi/controller/rpi/contrast.cpp index 4e038a02..fe866a54 100644 --- a/src/ipa/rpi/controller/rpi/contrast.cpp +++ b/src/ipa/rpi/controller/rpi/contrast.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * contrast.cpp - contrast (gamma) control algorithm + * contrast (gamma) control algorithm */ #include <stdint.h> @@ -53,7 +53,9 @@ int Contrast::read(const libcamera::YamlObject ¶ms) config_.hiHistogram = params["hi_histogram"].get<double>(0.95); config_.hiLevel = params["hi_level"].get<double>(0.95); config_.hiMax = params["hi_max"].get<double>(2000); - return config_.gammaCurve.read(params["gamma_curve"]); + + config_.gammaCurve = params["gamma_curve"].get<ipa::Pwl>(ipa::Pwl{}); + return config_.gammaCurve.empty() ? -EINVAL : 0; } void Contrast::setBrightness(double brightness) @@ -92,10 +94,12 @@ void Contrast::prepare(Metadata *imageMetadata) imageMetadata->set("contrast.status", status_); } -Pwl computeStretchCurve(Histogram const &histogram, +namespace { + +ipa::Pwl computeStretchCurve(Histogram const &histogram, ContrastConfig const &config) { - Pwl enhance; + ipa::Pwl enhance; enhance.append(0, 0); /* * If the start of the histogram is rather empty, try to pull it down a @@ -136,10 +140,10 @@ Pwl computeStretchCurve(Histogram const &histogram, return enhance; } -Pwl applyManualContrast(Pwl const &gammaCurve, double brightness, - double contrast) +ipa::Pwl applyManualContrast(ipa::Pwl const &gammaCurve, double brightness, + double contrast) { - Pwl newGammaCurve; + ipa::Pwl newGammaCurve; LOG(RPiContrast, Debug) << "Manual brightness " << brightness << " contrast " << contrast; gammaCurve.map([&](double x, double y) { @@ -151,6 +155,8 @@ Pwl applyManualContrast(Pwl const &gammaCurve, double brightness, return newGammaCurve; } +} /* namespace */ + void Contrast::process(StatisticsPtr &stats, [[maybe_unused]] Metadata *imageMetadata) { @@ -160,7 +166,7 @@ void Contrast::process(StatisticsPtr &stats, * ways: 1. Adjust the gamma curve so as to pull the start of the * histogram down, and possibly push the end up. */ - Pwl gammaCurve = config_.gammaCurve; + ipa::Pwl gammaCurve = config_.gammaCurve; if (ceEnable_) { if (config_.loMax != 0 || config_.hiMax != 0) gammaCurve = computeStretchCurve(histogram, config_).compose(gammaCurve); diff --git a/src/ipa/rpi/controller/rpi/contrast.h b/src/ipa/rpi/controller/rpi/contrast.h index 59aa70dc..c0f7db98 100644 --- a/src/ipa/rpi/controller/rpi/contrast.h +++ b/src/ipa/rpi/controller/rpi/contrast.h @@ -2,14 +2,15 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * contrast.h - contrast (gamma) control algorithm + * contrast (gamma) control algorithm */ #pragma once #include <mutex> +#include <libipa/pwl.h> + #include "../contrast_algorithm.h" -#include "../pwl.h" namespace RPiController { @@ -26,7 +27,7 @@ struct ContrastConfig { double hiHistogram; double hiLevel; double hiMax; - Pwl gammaCurve; + libcamera::ipa::Pwl gammaCurve; }; class Contrast : public ContrastAlgorithm diff --git a/src/ipa/rpi/controller/rpi/denoise.cpp b/src/ipa/rpi/controller/rpi/denoise.cpp index 154ee604..ba851658 100644 --- a/src/ipa/rpi/controller/rpi/denoise.cpp +++ b/src/ipa/rpi/controller/rpi/denoise.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2022 Raspberry Pi Ltd * - * Denoise.cpp - Denoise (spatial, colour, temporal) control algorithm + * Denoise (spatial, colour, temporal) control algorithm */ #include "denoise.h" diff --git a/src/ipa/rpi/controller/rpi/dpc.cpp b/src/ipa/rpi/controller/rpi/dpc.cpp index be3871df..8aac03f7 100644 --- a/src/ipa/rpi/controller/rpi/dpc.cpp +++ b/src/ipa/rpi/controller/rpi/dpc.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * dpc.cpp - DPC (defective pixel correction) control algorithm + * DPC (defective pixel correction) control algorithm */ #include <libcamera/base/log.h> diff --git a/src/ipa/rpi/controller/rpi/dpc.h b/src/ipa/rpi/controller/rpi/dpc.h index 84a05604..9cefb06d 100644 --- a/src/ipa/rpi/controller/rpi/dpc.h +++ b/src/ipa/rpi/controller/rpi/dpc.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * dpc.h - DPC (defective pixel correction) control algorithm + * DPC (defective pixel correction) control algorithm */ #pragma once diff --git a/src/ipa/rpi/controller/rpi/focus.h b/src/ipa/rpi/controller/rpi/focus.h index 8556039d..ee014be9 100644 --- a/src/ipa/rpi/controller/rpi/focus.h +++ b/src/ipa/rpi/controller/rpi/focus.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2020, Raspberry Pi Ltd * - * focus.h - focus algorithm + * focus algorithm */ #pragma once diff --git a/src/ipa/rpi/controller/rpi/geq.cpp b/src/ipa/rpi/controller/rpi/geq.cpp index 510870e9..40e7191b 100644 --- a/src/ipa/rpi/controller/rpi/geq.cpp +++ b/src/ipa/rpi/controller/rpi/geq.cpp @@ -2,14 +2,13 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * geq.cpp - GEQ (green equalisation) control algorithm + * GEQ (green equalisation) control algorithm */ #include <libcamera/base/log.h> #include "../device_status.h" #include "../lux_status.h" -#include "../pwl.h" #include "geq.h" @@ -45,9 +44,9 @@ int Geq::read(const libcamera::YamlObject ¶ms) } if (params.contains("strength")) { - int ret = config_.strength.read(params["strength"]); - if (ret) - return ret; + config_.strength = params["strength"].get<ipa::Pwl>(ipa::Pwl{}); + if (config_.strength.empty()) + return -EINVAL; } return 0; @@ -67,7 +66,7 @@ void Geq::prepare(Metadata *imageMetadata) GeqStatus geqStatus = {}; double strength = config_.strength.empty() ? 1.0 - : config_.strength.eval(config_.strength.domain().clip(luxStatus.lux)); + : config_.strength.eval(config_.strength.domain().clamp(luxStatus.lux)); strength *= deviceStatus.analogueGain; double offset = config_.offset * strength; double slope = config_.slope * strength; diff --git a/src/ipa/rpi/controller/rpi/geq.h b/src/ipa/rpi/controller/rpi/geq.h index ee3a52ff..e8b9f427 100644 --- a/src/ipa/rpi/controller/rpi/geq.h +++ b/src/ipa/rpi/controller/rpi/geq.h @@ -2,10 +2,12 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * geq.h - GEQ (green equalisation) control algorithm + * GEQ (green equalisation) control algorithm */ #pragma once +#include <libipa/pwl.h> + #include "../algorithm.h" #include "../geq_status.h" @@ -16,7 +18,7 @@ namespace RPiController { struct GeqConfig { uint16_t offset; double slope; - Pwl strength; /* lux to strength factor */ + libcamera::ipa::Pwl strength; /* lux to strength factor */ }; class Geq : public Algorithm diff --git a/src/ipa/rpi/controller/rpi/hdr.cpp b/src/ipa/rpi/controller/rpi/hdr.cpp index fb580548..f3da8291 100644 --- a/src/ipa/rpi/controller/rpi/hdr.cpp +++ b/src/ipa/rpi/controller/rpi/hdr.cpp @@ -2,11 +2,13 @@ /* * Copyright (C) 2023 Raspberry Pi Ltd * - * hdr.cpp - HDR control algorithm + * HDR control algorithm */ #include "hdr.h" +#include <cmath> + #include <libcamera/base/log.h> #include "../agc_status.h" @@ -39,25 +41,52 @@ void HdrConfig::read(const libcamera::YamlObject ¶ms, const std::string &mod channelMap[v.get<unsigned int>().value()] = k; /* Lens shading related parameters. */ - if (params.contains("spatial_gain")) { - spatialGain.read(params["spatial_gain"]); - diffusion = params["diffusion"].get<unsigned int>(3); - /* Clip to an arbitrary limit just to stop typos from killing the system! */ - const unsigned int MAX_DIFFUSION = 15; - if (diffusion > MAX_DIFFUSION) { - diffusion = MAX_DIFFUSION; - LOG(RPiHdr, Warning) << "Diffusion value clipped to " << MAX_DIFFUSION; - } + if (params.contains("spatial_gain_curve")) { + spatialGainCurve = params["spatial_gain_curve"].get<ipa::Pwl>(ipa::Pwl{}); + } else if (params.contains("spatial_gain")) { + double spatialGain = params["spatial_gain"].get<double>(2.0); + spatialGainCurve.append(0.0, spatialGain); + spatialGainCurve.append(0.01, spatialGain); + spatialGainCurve.append(0.06, 1.0); /* maybe make this programmable? */ + spatialGainCurve.append(1.0, 1.0); + } + + diffusion = params["diffusion"].get<unsigned int>(3); + /* Clip to an arbitrary limit just to stop typos from killing the system! */ + const unsigned int MAX_DIFFUSION = 15; + if (diffusion > MAX_DIFFUSION) { + diffusion = MAX_DIFFUSION; + LOG(RPiHdr, Warning) << "Diffusion value clipped to " << MAX_DIFFUSION; } /* Read any tonemap parameters. */ tonemapEnable = params["tonemap_enable"].get<int>(0); - detailConstant = params["detail_constant"].get<uint16_t>(50); - detailSlope = params["detail_slope"].get<double>(8.0); + detailConstant = params["detail_constant"].get<uint16_t>(0); + detailSlope = params["detail_slope"].get<double>(0.0); iirStrength = params["iir_strength"].get<double>(8.0); strength = params["strength"].get<double>(1.5); if (tonemapEnable) - tonemap.read(params["tonemap"]); + tonemap = params["tonemap"].get<ipa::Pwl>(ipa::Pwl{}); + speed = params["speed"].get<double>(1.0); + if (params.contains("hi_quantile_targets")) { + hiQuantileTargets = params["hi_quantile_targets"].getList<double>().value(); + if (hiQuantileTargets.empty() || hiQuantileTargets.size() % 2) + LOG(RPiHdr, Fatal) << "hi_quantile_targets much be even and non-empty"; + } else + hiQuantileTargets = { 0.95, 0.65, 0.5, 0.28, 0.3, 0.25 }; + hiQuantileMaxGain = params["hi_quantile_max_gain"].get<double>(1.6); + if (params.contains("quantile_targets")) { + quantileTargets = params["quantile_targets"].getList<double>().value(); + if (quantileTargets.empty() || quantileTargets.size() % 2) + LOG(RPiHdr, Fatal) << "quantile_targets much be even and non-empty"; + } else + quantileTargets = { 0.2, 0.03, 1.0, 0.15 }; + powerMin = params["power_min"].get<double>(0.65); + powerMax = params["power_max"].get<double>(1.0); + if (params.contains("contrast_adjustments")) { + contrastAdjustments = params["contrast_adjustments"].getList<double>().value(); + } else + contrastAdjustments = { 0.5, 0.75 }; /* Read any stitch parameters. */ stitchEnable = params["stitch_enable"].get<int>(0); @@ -159,7 +188,7 @@ void Hdr::prepare(Metadata *imageMetadata) } HdrConfig &config = it->second; - if (config.spatialGain.empty()) + if (config.spatialGainCurve.empty()) return; AlscStatus alscStatus{}; /* some compilers seem to require the braces */ @@ -183,7 +212,7 @@ bool Hdr::updateTonemap([[maybe_unused]] StatisticsPtr &stats, HdrConfig &config /* When there's a change of HDR mode we start over with a new tonemap curve. */ if (delayedStatus_.mode != previousMode_) { previousMode_ = delayedStatus_.mode; - tonemap_ = Pwl(); + tonemap_ = ipa::Pwl(); } /* No tonemapping. No need to output a tonemap.status. */ @@ -205,10 +234,61 @@ bool Hdr::updateTonemap([[maybe_unused]] StatisticsPtr &stats, HdrConfig &config return true; /* - * If we wanted to build or adjust tonemaps dynamically, this would be the place - * to do it. But for now we seem to be getting by without. + * Create a tonemap dynamically. We have three ingredients. + * + * 1. We have a list of "hi quantiles" and "targets". We use these to judge if + * the image does seem to be reasonably saturated. If it isn't, we calculate + * a gain that we will feed as a linear factor into the tonemap generation. + * This prevents unsaturated images from beoming quite so "flat". + * + * 2. We have a list of quantile/target pairs for the bottom of the histogram. + * We use these to calculate how much gain we must apply to the bottom of the + * tonemap. We apply this gain as a power curve so as not to blow out the top + * end. + * + * 3. Finally, when we generate the tonemap, we have some contrast adjustments + * for the bottom because we know that power curves can start quite steeply and + * cause a washed-out look. */ + /* Compute the linear gain from the headroom for saturation at the top. */ + double gain = 10; /* arbitrary, but hiQuantileMaxGain will clamp it later */ + for (unsigned int i = 0; i < config.hiQuantileTargets.size(); i += 2) { + double quantile = config.hiQuantileTargets[i]; + double target = config.hiQuantileTargets[i + 1]; + double value = stats->yHist.interQuantileMean(quantile, 1.0) / 1024.0; + double newGain = target / (value + 0.01); + gain = std::min(gain, newGain); + } + gain = std::clamp(gain, 1.0, config.hiQuantileMaxGain); + + /* Compute the power curve from the amount of gain needed at the bottom. */ + double min_power = 2; /* arbitrary, but config.powerMax will clamp it later */ + for (unsigned int i = 0; i < config.quantileTargets.size(); i += 2) { + double quantile = config.quantileTargets[i]; + double target = config.quantileTargets[i + 1]; + double value = stats->yHist.interQuantileMean(0, quantile) / 1024.0; + value = std::min(value * gain, 1.0); + double power = log(target + 1e-6) / log(value + 1e-6); + min_power = std::min(min_power, power); + } + double power = std::clamp(min_power, config.powerMin, config.powerMax); + + /* Generate the tonemap, including the contrast adjustment factors. */ + libcamera::ipa::Pwl tonemap; + tonemap.append(0, 0); + for (unsigned int i = 0; i <= 6; i++) { + double x = 1 << (i + 9); /* x loops from 512 to 32768 inclusive */ + double y = pow(std::min(x * gain, 65535.0) / 65536.0, power) * 65536; + if (i < config.contrastAdjustments.size()) + y *= config.contrastAdjustments[i]; + if (!tonemap_.empty()) + y = y * config.speed + tonemap_.eval(x) * (1 - config.speed); + tonemap.append(x, y); + } + tonemap.append(65535, 65535); + tonemap_ = tonemap; + return true; } @@ -255,7 +335,7 @@ static void averageGains(std::vector<double> &src, std::vector<double> &dst, con void Hdr::updateGains(StatisticsPtr &stats, HdrConfig &config) { - if (config.spatialGain.empty()) + if (config.spatialGainCurve.empty()) return; /* When alternating exposures, only compute these gains for the short frame. */ @@ -270,7 +350,7 @@ void Hdr::updateGains(StatisticsPtr &stats, HdrConfig &config) double g = region.val.gSum / counted; double b = region.val.bSum / counted; double brightness = std::max({ r, g, b }) / 65535; - gains_[0][i] = config.spatialGain.eval(brightness); + gains_[0][i] = config.spatialGainCurve.eval(brightness); } /* Ping-pong between the two gains_ buffers. */ diff --git a/src/ipa/rpi/controller/rpi/hdr.h b/src/ipa/rpi/controller/rpi/hdr.h index 980aa3d1..5c2f3988 100644 --- a/src/ipa/rpi/controller/rpi/hdr.h +++ b/src/ipa/rpi/controller/rpi/hdr.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2023, Raspberry Pi Ltd * - * hdr.h - HDR control algorithm + * HDR control algorithm */ #pragma once @@ -12,9 +12,10 @@ #include <libcamera/geometry.h> +#include <libipa/pwl.h> + #include "../hdr_algorithm.h" #include "../hdr_status.h" -#include "../pwl.h" /* This is our implementation of an HDR algorithm. */ @@ -26,7 +27,7 @@ struct HdrConfig { std::map<unsigned int, std::string> channelMap; /* Lens shading related parameters. */ - Pwl spatialGain; /* Brightness to gain curve for different image regions. */ + libcamera::ipa::Pwl spatialGainCurve; /* Brightness to gain curve for different image regions. */ unsigned int diffusion; /* How much to diffuse the gain spatially. */ /* Tonemap related parameters. */ @@ -35,7 +36,15 @@ struct HdrConfig { double detailSlope; double iirStrength; double strength; - Pwl tonemap; + libcamera::ipa::Pwl tonemap; + /* These relate to adaptive tonemap calculation. */ + double speed; + std::vector<double> hiQuantileTargets; /* quantiles to check for unsaturated images */ + double hiQuantileMaxGain; /* the max gain we'll apply when unsaturated */ + std::vector<double> quantileTargets; /* target values for histogram quantiles */ + double powerMin; /* minimum tonemap power */ + double powerMax; /* maximum tonemap power */ + std::vector<double> contrastAdjustments; /* any contrast adjustment factors */ /* Stitch related parameters. */ bool stitchEnable; @@ -67,7 +76,7 @@ private: HdrStatus status_; /* track the current HDR mode and channel */ HdrStatus delayedStatus_; /* track the delayed HDR mode and channel */ std::string previousMode_; - Pwl tonemap_; + libcamera::ipa::Pwl tonemap_; libcamera::Size regions_; /* stats regions */ unsigned int numRegions_; /* total number of stats regions */ std::vector<double> gains_[2]; diff --git a/src/ipa/rpi/controller/rpi/lux.cpp b/src/ipa/rpi/controller/rpi/lux.cpp index 06625f3a..27b89a8f 100644 --- a/src/ipa/rpi/controller/rpi/lux.cpp +++ b/src/ipa/rpi/controller/rpi/lux.cpp @@ -2,9 +2,8 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * lux.cpp - Lux control algorithm + * Lux control algorithm */ -#include <math.h> #include <libcamera/base/log.h> @@ -41,7 +40,7 @@ int Lux::read(const libcamera::YamlObject ¶ms) auto value = params["reference_shutter_speed"].get<double>(); if (!value) return -EINVAL; - referenceShutterSpeed_ = *value * 1.0us; + referenceExposureTime_ = *value * 1.0us; value = params["reference_gain"].get<double>(); if (!value) @@ -83,11 +82,11 @@ void Lux::process(StatisticsPtr &stats, Metadata *imageMetadata) double currentAperture = deviceStatus.aperture.value_or(currentAperture_); double currentY = stats->yHist.interQuantileMean(0, 1); double gainRatio = referenceGain_ / currentGain; - double shutterSpeedRatio = - referenceShutterSpeed_ / deviceStatus.shutterSpeed; + double exposureTimeRatio = + referenceExposureTime_ / deviceStatus.exposureTime; double apertureRatio = referenceAperture_ / currentAperture; double yRatio = currentY * (65536 / stats->yHist.bins()) / referenceY_; - double estimatedLux = shutterSpeedRatio * gainRatio * + double estimatedLux = exposureTimeRatio * gainRatio * apertureRatio * apertureRatio * yRatio * referenceLux_; LuxStatus status; diff --git a/src/ipa/rpi/controller/rpi/lux.h b/src/ipa/rpi/controller/rpi/lux.h index 89411a54..da007fe9 100644 --- a/src/ipa/rpi/controller/rpi/lux.h +++ b/src/ipa/rpi/controller/rpi/lux.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * lux.h - Lux control algorithm + * Lux control algorithm */ #pragma once @@ -32,7 +32,7 @@ private: * These values define the conditions of the reference image, against * which we compare the new image. */ - libcamera::utils::Duration referenceShutterSpeed_; + libcamera::utils::Duration referenceExposureTime_; double referenceGain_; double referenceAperture_; /* units of 1/f */ double referenceY_; /* out of 65536 */ diff --git a/src/ipa/rpi/controller/rpi/noise.cpp b/src/ipa/rpi/controller/rpi/noise.cpp index bcd8b9ed..145175fb 100644 --- a/src/ipa/rpi/controller/rpi/noise.cpp +++ b/src/ipa/rpi/controller/rpi/noise.cpp @@ -2,10 +2,10 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * noise.cpp - Noise control algorithm + * Noise control algorithm */ -#include <math.h> +#include <cmath> #include <libcamera/base/log.h> @@ -69,7 +69,7 @@ void Noise::prepare(Metadata *imageMetadata) * make some adjustments based on the camera mode (such as * binning), if we knew how to discover it... */ - double factor = sqrt(deviceStatus.analogueGain) / modeFactor_; + double factor = std::sqrt(deviceStatus.analogueGain) / modeFactor_; struct NoiseStatus status; status.noiseConstant = referenceConstant_ * factor; status.noiseSlope = referenceSlope_ * factor; diff --git a/src/ipa/rpi/controller/rpi/noise.h b/src/ipa/rpi/controller/rpi/noise.h index 74c31e64..6deae1f0 100644 --- a/src/ipa/rpi/controller/rpi/noise.h +++ b/src/ipa/rpi/controller/rpi/noise.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * noise.h - Noise control algorithm + * Noise control algorithm */ #pragma once diff --git a/src/ipa/rpi/controller/rpi/saturation.cpp b/src/ipa/rpi/controller/rpi/saturation.cpp index 813540e5..b83c5887 100644 --- a/src/ipa/rpi/controller/rpi/saturation.cpp +++ b/src/ipa/rpi/controller/rpi/saturation.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2022 Raspberry Pi Ltd * - * saturation.cpp - Saturation control algorithm + * Saturation control algorithm */ #include "saturation.h" diff --git a/src/ipa/rpi/controller/rpi/sdn.cpp b/src/ipa/rpi/controller/rpi/sdn.cpp index 2f777dd7..619178a8 100644 --- a/src/ipa/rpi/controller/rpi/sdn.cpp +++ b/src/ipa/rpi/controller/rpi/sdn.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2019-2021, Raspberry Pi Ltd * - * sdn.cpp - SDN (spatial denoise) control algorithm + * SDN (spatial denoise) control algorithm */ #include <libcamera/base/log.h> diff --git a/src/ipa/rpi/controller/rpi/sdn.h b/src/ipa/rpi/controller/rpi/sdn.h index 9dd73c38..cb226de8 100644 --- a/src/ipa/rpi/controller/rpi/sdn.h +++ b/src/ipa/rpi/controller/rpi/sdn.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * sdn.h - SDN (spatial denoise) control algorithm + * SDN (spatial denoise) control algorithm */ #pragma once diff --git a/src/ipa/rpi/controller/rpi/sharpen.cpp b/src/ipa/rpi/controller/rpi/sharpen.cpp index 4f6f020a..1d143ff5 100644 --- a/src/ipa/rpi/controller/rpi/sharpen.cpp +++ b/src/ipa/rpi/controller/rpi/sharpen.cpp @@ -2,10 +2,10 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * sharpen.cpp - sharpening control algorithm + * sharpening control algorithm */ -#include <math.h> +#include <cmath> #include <libcamera/base/log.h> @@ -68,7 +68,7 @@ void Sharpen::prepare(Metadata *imageMetadata) * we adjust the limit and threshold less aggressively. Using a sqrt * function is an arbitrary but gentle way of accomplishing this. */ - double userStrengthSqrt = sqrt(userStrength_); + double userStrengthSqrt = std::sqrt(userStrength_); struct SharpenStatus status; /* * Binned modes seem to need the sharpening toned down with this diff --git a/src/ipa/rpi/controller/rpi/sharpen.h b/src/ipa/rpi/controller/rpi/sharpen.h index 8bb7631e..96ccd609 100644 --- a/src/ipa/rpi/controller/rpi/sharpen.h +++ b/src/ipa/rpi/controller/rpi/sharpen.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * sharpen.h - sharpening control algorithm + * sharpening control algorithm */ #pragma once diff --git a/src/ipa/rpi/controller/rpi/tonemap.cpp b/src/ipa/rpi/controller/rpi/tonemap.cpp index 5f8b2bf2..3422adfe 100644 --- a/src/ipa/rpi/controller/rpi/tonemap.cpp +++ b/src/ipa/rpi/controller/rpi/tonemap.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2022 Raspberry Pi Ltd * - * tonemap.cpp - Tonemap control algorithm + * Tonemap control algorithm */ #include "tonemap.h" @@ -33,7 +33,7 @@ int Tonemap::read(const libcamera::YamlObject ¶ms) config_.detailSlope = params["detail_slope"].get<double>(0.1); config_.iirStrength = params["iir_strength"].get<double>(1.0); config_.strength = params["strength"].get<double>(1.0); - config_.tonemap.read(params["tone_curve"]); + config_.tonemap = params["tone_curve"].get<ipa::Pwl>(ipa::Pwl{}); return 0; } diff --git a/src/ipa/rpi/controller/rpi/tonemap.h b/src/ipa/rpi/controller/rpi/tonemap.h index f25aa47f..ba0cf5c4 100644 --- a/src/ipa/rpi/controller/rpi/tonemap.h +++ b/src/ipa/rpi/controller/rpi/tonemap.h @@ -6,8 +6,9 @@ */ #pragma once +#include <libipa/pwl.h> + #include "algorithm.h" -#include "pwl.h" namespace RPiController { @@ -16,7 +17,7 @@ struct TonemapConfig { double detailSlope; double iirStrength; double strength; - Pwl tonemap; + libcamera::ipa::Pwl tonemap; }; class Tonemap : public Algorithm diff --git a/src/ipa/rpi/controller/saturation_status.h b/src/ipa/rpi/controller/saturation_status.h index 337b66a3..c7fadc99 100644 --- a/src/ipa/rpi/controller/saturation_status.h +++ b/src/ipa/rpi/controller/saturation_status.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2022 Raspberry Pi Ltd * - * saturation_status.h - Saturation control algorithm status + * Saturation control algorithm status */ #pragma once diff --git a/src/ipa/rpi/controller/sharpen_algorithm.h b/src/ipa/rpi/controller/sharpen_algorithm.h index 3be21c32..abd82cb2 100644 --- a/src/ipa/rpi/controller/sharpen_algorithm.h +++ b/src/ipa/rpi/controller/sharpen_algorithm.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2020, Raspberry Pi Ltd * - * sharpen_algorithm.h - sharpness control algorithm interface + * sharpness control algorithm interface */ #pragma once diff --git a/src/ipa/rpi/controller/sharpen_status.h b/src/ipa/rpi/controller/sharpen_status.h index 106166db..74910199 100644 --- a/src/ipa/rpi/controller/sharpen_status.h +++ b/src/ipa/rpi/controller/sharpen_status.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Raspberry Pi Ltd * - * sharpen_status.h - Sharpen control algorithm status + * Sharpen control algorithm status */ #pragma once diff --git a/src/ipa/rpi/controller/statistics.h b/src/ipa/rpi/controller/statistics.h index 015d4efc..cbd81161 100644 --- a/src/ipa/rpi/controller/statistics.h +++ b/src/ipa/rpi/controller/statistics.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2022, Raspberry Pi Ltd * - * statistics.h - Raspberry Pi generic statistics structure + * Raspberry Pi generic statistics structure */ #pragma once diff --git a/src/ipa/rpi/controller/stitch_status.h b/src/ipa/rpi/controller/stitch_status.h index b17800ed..7812f3e3 100644 --- a/src/ipa/rpi/controller/stitch_status.h +++ b/src/ipa/rpi/controller/stitch_status.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2023 Raspberry Pi Ltd * - * stitch_status.h - stitch control algorithm status + * stitch control algorithm status */ #pragma once diff --git a/src/ipa/rpi/controller/tonemap_status.h b/src/ipa/rpi/controller/tonemap_status.h index 0e639946..0364ff66 100644 --- a/src/ipa/rpi/controller/tonemap_status.h +++ b/src/ipa/rpi/controller/tonemap_status.h @@ -2,16 +2,16 @@ /* * Copyright (C) 2022 Raspberry Pi Ltd * - * hdr.h - Tonemap control algorithm status + * Tonemap control algorithm status */ #pragma once -#include "pwl.h" +#include <libipa/pwl.h> struct TonemapStatus { uint16_t detailConstant; double detailSlope; double iirStrength; double strength; - RPiController::Pwl tonemap; + libcamera::ipa::Pwl tonemap; }; |