diff options
author | Naushir Patuck <naush@raspberrypi.com> | 2023-05-03 13:20:27 +0100 |
---|---|---|
committer | Laurent Pinchart <laurent.pinchart@ideasonboard.com> | 2023-05-04 20:47:40 +0300 |
commit | 726e9274ea95fa46352556d340c5793a8da51fcd (patch) | |
tree | 80f6adcdbf744f9317e09eff3e80c602b384a753 /src/ipa/raspberrypi/controller | |
parent | 46aefed208fef4bc8d6f6e8882b92b9af710a60b (diff) |
pipeline: ipa: raspberrypi: Refactor and move the Raspberry Pi code
Split the Raspberry Pi pipeline handler and IPA source code into common
and VC4/BCM2835 specific file structures.
For the pipeline handler, the common code files now live in
src/libcamera/pipeline/rpi/common/
and the VC4-specific files in src/libcamera/pipeline/rpi/vc4/.
For the IPA, the common code files now live in
src/ipa/rpi/{cam_helper,controller}/
and the vc4 specific files in src/ipa/rpi/vc4/. With this change, the
camera tuning files are now installed under share/libcamera/ipa/rpi/vc4/.
To build the pipeline and IPA, the meson configuration options have now
changed from "raspberrypi" to "rpi/vc4":
meson setup build -Dipas=rpi/vc4 -Dpipelines=rpi/vc4
Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
Reviewed-by: Jacopo Mondi <jacopo.mondi@ideasonboard.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Diffstat (limited to 'src/ipa/raspberrypi/controller')
61 files changed, 0 insertions, 7125 deletions
diff --git a/src/ipa/raspberrypi/controller/af_algorithm.h b/src/ipa/raspberrypi/controller/af_algorithm.h deleted file mode 100644 index ad9b5754..00000000 --- a/src/ipa/raspberrypi/controller/af_algorithm.h +++ /dev/null @@ -1,76 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2022, Raspberry Pi Ltd - * - * af_algorithm.hpp - auto focus algorithm interface - */ -#pragma once - -#include <optional> - -#include <libcamera/base/span.h> - -#include "algorithm.h" - -namespace RPiController { - -class AfAlgorithm : public Algorithm -{ -public: - AfAlgorithm(Controller *controller) - : Algorithm(controller) {} - - /* - * An autofocus algorithm should provide the following calls. - * - * Where a ControlList combines a change of AfMode with other AF - * controls, setMode() should be called first, to ensure the - * algorithm will be in the correct state to handle controls. - * - * setLensPosition() returns true if the mode was AfModeManual and - * the lens position has changed, otherwise returns false. When it - * returns true, hwpos should be sent immediately to the lens driver. - * - * getMode() is provided mainly for validating controls. - * getLensPosition() is provided for populating DeviceStatus. - */ - - enum AfRange { AfRangeNormal = 0, - AfRangeMacro, - AfRangeFull, - AfRangeMax }; - - enum AfSpeed { AfSpeedNormal = 0, - AfSpeedFast, - AfSpeedMax }; - - enum AfMode { AfModeManual = 0, - AfModeAuto, - AfModeContinuous }; - - enum AfPause { AfPauseImmediate = 0, - AfPauseDeferred, - AfPauseResume }; - - virtual void setRange([[maybe_unused]] AfRange range) - { - } - virtual void setSpeed([[maybe_unused]] AfSpeed speed) - { - } - virtual void setMetering([[maybe_unused]] bool use_windows) - { - } - virtual void setWindows([[maybe_unused]] libcamera::Span<libcamera::Rectangle const> const &wins) - { - } - virtual void setMode(AfMode mode) = 0; - virtual AfMode getMode() const = 0; - virtual bool setLensPosition(double dioptres, int32_t *hwpos) = 0; - virtual std::optional<double> getLensPosition() const = 0; - virtual void triggerScan() = 0; - virtual void cancelScan() = 0; - virtual void pause(AfPause pause) = 0; -}; - -} // namespace RPiController diff --git a/src/ipa/raspberrypi/controller/af_status.h b/src/ipa/raspberrypi/controller/af_status.h deleted file mode 100644 index 92c08812..00000000 --- a/src/ipa/raspberrypi/controller/af_status.h +++ /dev/null @@ -1,35 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2022, Raspberry Pi Ltd - * - * af_status.h - AF control algorithm status - */ -#pragma once - -#include <optional> - -/* - * The AF algorithm should post the following structure into the image's - * "af.status" metadata. lensSetting should control the lens. - */ - -enum class AfState { - Idle = 0, - Scanning, - Focused, - Failed -}; - -enum class AfPauseState { - Running = 0, - Pausing, - Paused -}; - -struct AfStatus { - /* state for reporting */ - AfState state; - AfPauseState pauseState; - /* lensSetting should be sent to the lens driver, when valid */ - std::optional<int> lensSetting; -}; diff --git a/src/ipa/raspberrypi/controller/agc_algorithm.h b/src/ipa/raspberrypi/controller/agc_algorithm.h deleted file mode 100644 index 36e6c110..00000000 --- a/src/ipa/raspberrypi/controller/agc_algorithm.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * agc_algorithm.h - AGC/AEC control algorithm interface - */ -#pragma once - -#include <libcamera/base/utils.h> - -#include "algorithm.h" - -namespace RPiController { - -class AgcAlgorithm : public Algorithm -{ -public: - AgcAlgorithm(Controller *controller) : Algorithm(controller) {} - /* An AGC algorithm must provide the following: */ - virtual unsigned int getConvergenceFrames() const = 0; - virtual void setEv(double ev) = 0; - virtual void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) = 0; - virtual void setFixedShutter(libcamera::utils::Duration fixedShutter) = 0; - virtual void setMaxShutter(libcamera::utils::Duration maxShutter) = 0; - virtual void setFixedAnalogueGain(double fixedAnalogueGain) = 0; - virtual void setMeteringMode(std::string const &meteringModeName) = 0; - virtual void setExposureMode(std::string const &exposureModeName) = 0; - virtual void setConstraintMode(std::string const &contraintModeName) = 0; - virtual void enableAuto() = 0; - virtual void disableAuto() = 0; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/agc_status.h b/src/ipa/raspberrypi/controller/agc_status.h deleted file mode 100644 index 6abf09d9..00000000 --- a/src/ipa/raspberrypi/controller/agc_status.h +++ /dev/null @@ -1,37 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * agc_status.h - AGC/AEC control algorithm status - */ -#pragma once - -#include <libcamera/base/utils.h> - -/* - * The AGC algorithm should post the following structure into the image's - * "agc.status" metadata. - */ - -/* - * Note: total_exposure_value will be reported as zero until the algorithm has - * seen statistics and calculated meaningful values. The contents should be - * ignored until then. - */ - -struct AgcStatus { - libcamera::utils::Duration totalExposureValue; /* value for all exposure and gain for this image */ - libcamera::utils::Duration targetExposureValue; /* (unfiltered) target total exposure AGC is aiming for */ - libcamera::utils::Duration shutterTime; - double analogueGain; - char exposureMode[32]; - char constraintMode[32]; - char meteringMode[32]; - double ev; - libcamera::utils::Duration flickerPeriod; - int floatingRegionEnable; - libcamera::utils::Duration fixedShutter; - double fixedAnalogueGain; - double digitalGain; - int locked; -}; diff --git a/src/ipa/raspberrypi/controller/algorithm.cpp b/src/ipa/raspberrypi/controller/algorithm.cpp deleted file mode 100644 index a957fde5..00000000 --- a/src/ipa/raspberrypi/controller/algorithm.cpp +++ /dev/null @@ -1,56 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * algorithm.cpp - ISP control algorithms - */ - -#include "algorithm.h" - -using namespace RPiController; - -int Algorithm::read([[maybe_unused]] const libcamera::YamlObject ¶ms) -{ - return 0; -} - -void Algorithm::initialise() -{ -} - -void Algorithm::switchMode([[maybe_unused]] CameraMode const &cameraMode, - [[maybe_unused]] Metadata *metadata) -{ -} - -void Algorithm::prepare([[maybe_unused]] Metadata *imageMetadata) -{ -} - -void Algorithm::process([[maybe_unused]] StatisticsPtr &stats, - [[maybe_unused]] Metadata *imageMetadata) -{ -} - -/* For registering algorithms with the system: */ - -namespace { - -std::map<std::string, AlgoCreateFunc> &algorithms() -{ - static std::map<std::string, AlgoCreateFunc> algorithms; - return algorithms; -} - -} /* namespace */ - -std::map<std::string, AlgoCreateFunc> const &RPiController::getAlgorithms() -{ - return algorithms(); -} - -RegisterAlgorithm::RegisterAlgorithm(char const *name, - AlgoCreateFunc createFunc) -{ - algorithms()[std::string(name)] = createFunc; -} diff --git a/src/ipa/raspberrypi/controller/algorithm.h b/src/ipa/raspberrypi/controller/algorithm.h deleted file mode 100644 index 4aa814eb..00000000 --- a/src/ipa/raspberrypi/controller/algorithm.h +++ /dev/null @@ -1,68 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * algorithm.h - ISP control algorithm interface - */ -#pragma once - -/* - * All algorithms should be derived from this class and made available to the - * Controller. - */ - -#include <string> -#include <memory> -#include <map> - -#include "libcamera/internal/yaml_parser.h" - -#include "controller.h" - -namespace RPiController { - -/* This defines the basic interface for all control algorithms. */ - -class Algorithm -{ -public: - Algorithm(Controller *controller) - : controller_(controller) - { - } - virtual ~Algorithm() = default; - virtual char const *name() const = 0; - virtual int read(const libcamera::YamlObject ¶ms); - virtual void initialise(); - virtual void switchMode(CameraMode const &cameraMode, Metadata *metadata); - virtual void prepare(Metadata *imageMetadata); - virtual void process(StatisticsPtr &stats, Metadata *imageMetadata); - Metadata &getGlobalMetadata() const - { - return controller_->getGlobalMetadata(); - } - const std::string &getTarget() const - { - return controller_->getTarget(); - } - const Controller::HardwareConfig &getHardwareConfig() const - { - return controller_->getHardwareConfig(); - } - -private: - Controller *controller_; -}; - -/* - * This code is for automatic registration of Front End algorithms with the - * system. - */ - -typedef Algorithm *(*AlgoCreateFunc)(Controller *controller); -struct RegisterAlgorithm { - RegisterAlgorithm(char const *name, AlgoCreateFunc createFunc); -}; -std::map<std::string, AlgoCreateFunc> const &getAlgorithms(); - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/alsc_status.h b/src/ipa/raspberrypi/controller/alsc_status.h deleted file mode 100644 index 49a9f4a0..00000000 --- a/src/ipa/raspberrypi/controller/alsc_status.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * alsc_status.h - ALSC (auto lens shading correction) control algorithm status - */ -#pragma once - -#include <vector> - -/* - * The ALSC algorithm should post the following structure into the image's - * "alsc.status" metadata. - */ - -struct AlscStatus { - std::vector<double> r; - std::vector<double> g; - std::vector<double> b; - unsigned int rows; - unsigned int cols; -}; diff --git a/src/ipa/raspberrypi/controller/awb_algorithm.h b/src/ipa/raspberrypi/controller/awb_algorithm.h deleted file mode 100644 index 8462c4db..00000000 --- a/src/ipa/raspberrypi/controller/awb_algorithm.h +++ /dev/null @@ -1,25 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * awb_algorithm.h - AWB control algorithm interface - */ -#pragma once - -#include "algorithm.h" - -namespace RPiController { - -class AwbAlgorithm : public Algorithm -{ -public: - AwbAlgorithm(Controller *controller) : Algorithm(controller) {} - /* An AWB algorithm must provide the following: */ - virtual unsigned int getConvergenceFrames() const = 0; - virtual void setMode(std::string const &modeName) = 0; - virtual void setManualGains(double manualR, double manualB) = 0; - virtual void enableAuto() = 0; - virtual void disableAuto() = 0; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/awb_status.h b/src/ipa/raspberrypi/controller/awb_status.h deleted file mode 100644 index dd5a79e3..00000000 --- a/src/ipa/raspberrypi/controller/awb_status.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * awb_status.h - AWB control algorithm status - */ -#pragma once - -/* - * The AWB algorithm places its results into both the image and global metadata, - * under the tag "awb.status". - */ - -struct AwbStatus { - char mode[32]; - double temperatureK; - double gainR; - double gainG; - double gainB; -}; diff --git a/src/ipa/raspberrypi/controller/black_level_status.h b/src/ipa/raspberrypi/controller/black_level_status.h deleted file mode 100644 index fd5e4ccb..00000000 --- a/src/ipa/raspberrypi/controller/black_level_status.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * black_level_status.h - black level control algorithm status - */ -#pragma once - -/* The "black level" algorithm stores the black levels to use. */ - -struct BlackLevelStatus { - uint16_t blackLevelR; /* out of 16 bits */ - uint16_t blackLevelG; - uint16_t blackLevelB; -}; diff --git a/src/ipa/raspberrypi/controller/camera_mode.h b/src/ipa/raspberrypi/controller/camera_mode.h deleted file mode 100644 index 63b11778..00000000 --- a/src/ipa/raspberrypi/controller/camera_mode.h +++ /dev/null @@ -1,59 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019-2020, Raspberry Pi Ltd - * - * camera_mode.h - description of a particular operating mode of a sensor - */ -#pragma once - -#include <libcamera/transform.h> - -#include <libcamera/base/utils.h> - -/* - * Description of a "camera mode", holding enough information for control - * algorithms to adapt their behaviour to the different modes of the camera, - * including binning, scaling, cropping etc. - */ - -struct CameraMode { - /* bit depth of the raw camera output */ - uint32_t bitdepth; - /* size in pixels of frames in this mode */ - uint16_t width; - uint16_t height; - /* size of full resolution uncropped frame ("sensor frame") */ - uint16_t sensorWidth; - uint16_t sensorHeight; - /* binning factor (1 = no binning, 2 = 2-pixel binning etc.) */ - uint8_t binX; - uint8_t binY; - /* location of top left pixel in the sensor frame */ - uint16_t cropX; - uint16_t cropY; - /* scaling factor (so if uncropped, width*scaleX is sensorWidth) */ - double scaleX; - double scaleY; - /* scaling of the noise compared to the native sensor mode */ - double noiseFactor; - /* minimum and maximum line time and frame durations */ - libcamera::utils::Duration minLineLength; - libcamera::utils::Duration maxLineLength; - libcamera::utils::Duration minFrameDuration; - libcamera::utils::Duration maxFrameDuration; - /* any camera transform *not* reflected already in the camera tuning */ - libcamera::Transform transform; - /* minimum and maximum frame lengths in units of lines */ - uint32_t minFrameLength; - uint32_t maxFrameLength; - /* sensitivity of this mode */ - double sensitivity; - /* pixel clock rate */ - uint64_t pixelRate; - /* Mode specific shutter speed limits */ - libcamera::utils::Duration minShutter; - libcamera::utils::Duration maxShutter; - /* Mode specific analogue gain limits */ - double minAnalogueGain; - double maxAnalogueGain; -}; diff --git a/src/ipa/raspberrypi/controller/ccm_algorithm.h b/src/ipa/raspberrypi/controller/ccm_algorithm.h deleted file mode 100644 index e2c4d771..00000000 --- a/src/ipa/raspberrypi/controller/ccm_algorithm.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * ccm_algorithm.h - CCM (colour correction matrix) control algorithm interface - */ -#pragma once - -#include "algorithm.h" - -namespace RPiController { - -class CcmAlgorithm : public Algorithm -{ -public: - CcmAlgorithm(Controller *controller) : Algorithm(controller) {} - /* A CCM algorithm must provide the following: */ - virtual void setSaturation(double saturation) = 0; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/ccm_status.h b/src/ipa/raspberrypi/controller/ccm_status.h deleted file mode 100644 index 5e28ee7c..00000000 --- a/src/ipa/raspberrypi/controller/ccm_status.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * ccm_status.h - CCM (colour correction matrix) control algorithm status - */ -#pragma once - -/* The "ccm" algorithm generates an appropriate colour matrix. */ - -struct CcmStatus { - double matrix[9]; - double saturation; -}; diff --git a/src/ipa/raspberrypi/controller/contrast_algorithm.h b/src/ipa/raspberrypi/controller/contrast_algorithm.h deleted file mode 100644 index ce17a4f9..00000000 --- a/src/ipa/raspberrypi/controller/contrast_algorithm.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * contrast_algorithm.h - contrast (gamma) control algorithm interface - */ -#pragma once - -#include "algorithm.h" - -namespace RPiController { - -class ContrastAlgorithm : public Algorithm -{ -public: - ContrastAlgorithm(Controller *controller) : Algorithm(controller) {} - /* A contrast algorithm must provide the following: */ - virtual void setBrightness(double brightness) = 0; - virtual void setContrast(double contrast) = 0; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/contrast_status.h b/src/ipa/raspberrypi/controller/contrast_status.h deleted file mode 100644 index fb9fe4ba..00000000 --- a/src/ipa/raspberrypi/controller/contrast_status.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * contrast_status.h - contrast (gamma) control algorithm status - */ -#pragma once - -#include "pwl.h" - -/* - * The "contrast" algorithm creates a gamma curve, optionally doing a little bit - * of contrast stretching based on the AGC histogram. - */ - -struct ContrastStatus { - RPiController::Pwl gammaCurve; - double brightness; - double contrast; -}; diff --git a/src/ipa/raspberrypi/controller/controller.cpp b/src/ipa/raspberrypi/controller/controller.cpp deleted file mode 100644 index fa172113..00000000 --- a/src/ipa/raspberrypi/controller/controller.cpp +++ /dev/null @@ -1,181 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * controller.cpp - ISP controller - */ - -#include <assert.h> - -#include <libcamera/base/file.h> -#include <libcamera/base/log.h> - -#include "libcamera/internal/yaml_parser.h" - -#include "algorithm.h" -#include "controller.h" - -using namespace RPiController; -using namespace libcamera; - -LOG_DEFINE_CATEGORY(RPiController) - -static const std::map<std::string, Controller::HardwareConfig> HardwareConfigMap = { - { - "bcm2835", - { - /* - * There are only ever 15 AGC regions computed by the firmware - * due to zoning, but the HW defines AGC_REGIONS == 16! - */ - .agcRegions = { 15 , 1 }, - .agcZoneWeights = { 15 , 1 }, - .awbRegions = { 16, 12 }, - .focusRegions = { 4, 3 }, - .numHistogramBins = 128, - .numGammaPoints = 33, - .pipelineWidth = 13 - } - }, -}; - -Controller::Controller() - : switchModeCalled_(false) -{ -} - -Controller::~Controller() {} - -int Controller::read(char const *filename) -{ - File file(filename); - if (!file.open(File::OpenModeFlag::ReadOnly)) { - LOG(RPiController, Warning) - << "Failed to open tuning file '" << filename << "'"; - return -EINVAL; - } - - std::unique_ptr<YamlObject> root = YamlParser::parse(file); - double version = (*root)["version"].get<double>(1.0); - target_ = (*root)["target"].get<std::string>("bcm2835"); - - if (version < 2.0) { - LOG(RPiController, Warning) - << "This format of the tuning file will be deprecated soon!" - << " Please use the convert_tuning.py utility to update to version 2.0."; - - for (auto const &[key, value] : root->asDict()) { - int ret = createAlgorithm(key, value); - if (ret) - return ret; - } - } else if (version < 3.0) { - if (!root->contains("algorithms")) { - LOG(RPiController, Error) - << "Tuning file " << filename - << " does not have an \"algorithms\" list!"; - return -EINVAL; - } - - for (auto const &rootAlgo : (*root)["algorithms"].asList()) - for (auto const &[key, value] : rootAlgo.asDict()) { - int ret = createAlgorithm(key, value); - if (ret) - return ret; - } - } else { - LOG(RPiController, Error) - << "Unrecognised version " << version - << " for the tuning file " << filename; - return -EINVAL; - } - - return 0; -} - -int Controller::createAlgorithm(const std::string &name, const YamlObject ¶ms) -{ - auto it = getAlgorithms().find(name); - if (it == getAlgorithms().end()) { - LOG(RPiController, Warning) - << "No algorithm found for \"" << name << "\""; - return 0; - } - - Algorithm *algo = (*it->second)(this); - int ret = algo->read(params); - if (ret) - return ret; - - algorithms_.push_back(AlgorithmPtr(algo)); - return 0; -} - -void Controller::initialise() -{ - for (auto &algo : algorithms_) - algo->initialise(); -} - -void Controller::switchMode(CameraMode const &cameraMode, Metadata *metadata) -{ - for (auto &algo : algorithms_) - algo->switchMode(cameraMode, metadata); - switchModeCalled_ = true; -} - -void Controller::prepare(Metadata *imageMetadata) -{ - assert(switchModeCalled_); - for (auto &algo : algorithms_) - algo->prepare(imageMetadata); -} - -void Controller::process(StatisticsPtr stats, Metadata *imageMetadata) -{ - assert(switchModeCalled_); - for (auto &algo : algorithms_) - algo->process(stats, imageMetadata); -} - -Metadata &Controller::getGlobalMetadata() -{ - return globalMetadata_; -} - -Algorithm *Controller::getAlgorithm(std::string const &name) const -{ - /* - * The passed name must be the entire algorithm name, or must match the - * last part of it with a period (.) just before. - */ - size_t nameLen = name.length(); - for (auto &algo : algorithms_) { - char const *algoName = algo->name(); - size_t algoNameLen = strlen(algoName); - if (algoNameLen >= nameLen && - strcasecmp(name.c_str(), - algoName + algoNameLen - nameLen) == 0 && - (nameLen == algoNameLen || - algoName[algoNameLen - nameLen - 1] == '.')) - return algo.get(); - } - return nullptr; -} - -const std::string &Controller::getTarget() const -{ - return target_; -} - -const Controller::HardwareConfig &Controller::getHardwareConfig() const -{ - auto cfg = HardwareConfigMap.find(getTarget()); - - /* - * This really should not happen, the IPA ought to validate the target - * on initialisation. - */ - ASSERT(cfg != HardwareConfigMap.end()); - return cfg->second; -} diff --git a/src/ipa/raspberrypi/controller/controller.h b/src/ipa/raspberrypi/controller/controller.h deleted file mode 100644 index c6af5cd6..00000000 --- a/src/ipa/raspberrypi/controller/controller.h +++ /dev/null @@ -1,73 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * controller.h - ISP controller interface - */ -#pragma once - -/* - * The Controller is simply a container for a collecting together a number of - * "control algorithms" (such as AWB etc.) and for running them all in a - * convenient manner. - */ - -#include <vector> -#include <string> - -#include "libcamera/internal/yaml_parser.h" - -#include "camera_mode.h" -#include "device_status.h" -#include "metadata.h" -#include "statistics.h" - -namespace RPiController { - -class Algorithm; -typedef std::unique_ptr<Algorithm> AlgorithmPtr; - -/* - * The Controller holds a pointer to some global_metadata, which is how - * different controllers and control algorithms within them can exchange - * information. The Prepare function returns a pointer to metadata for this - * specific image, and which should be passed on to the Process function. - */ - -class Controller -{ -public: - struct HardwareConfig { - libcamera::Size agcRegions; - libcamera::Size agcZoneWeights; - libcamera::Size awbRegions; - libcamera::Size focusRegions; - unsigned int numHistogramBins; - unsigned int numGammaPoints; - unsigned int pipelineWidth; - }; - - Controller(); - ~Controller(); - int read(char const *filename); - void initialise(); - void switchMode(CameraMode const &cameraMode, Metadata *metadata); - void prepare(Metadata *imageMetadata); - void process(StatisticsPtr stats, Metadata *imageMetadata); - Metadata &getGlobalMetadata(); - Algorithm *getAlgorithm(std::string const &name) const; - const std::string &getTarget() const; - const HardwareConfig &getHardwareConfig() const; - -protected: - int createAlgorithm(const std::string &name, const libcamera::YamlObject ¶ms); - - Metadata globalMetadata_; - std::vector<AlgorithmPtr> algorithms_; - bool switchModeCalled_; - -private: - std::string target_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/denoise_algorithm.h b/src/ipa/raspberrypi/controller/denoise_algorithm.h deleted file mode 100644 index 52009ba9..00000000 --- a/src/ipa/raspberrypi/controller/denoise_algorithm.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2021, Raspberry Pi Ltd - * - * denoise.h - Denoise control algorithm interface - */ -#pragma once - -#include "algorithm.h" - -namespace RPiController { - -enum class DenoiseMode { Off, ColourOff, ColourFast, ColourHighQuality }; - -class DenoiseAlgorithm : public Algorithm -{ -public: - DenoiseAlgorithm(Controller *controller) : Algorithm(controller) {} - /* A Denoise algorithm must provide the following: */ - virtual void setMode(DenoiseMode mode) = 0; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/denoise_status.h b/src/ipa/raspberrypi/controller/denoise_status.h deleted file mode 100644 index f6b9ee29..00000000 --- a/src/ipa/raspberrypi/controller/denoise_status.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019-2021, Raspberry Pi Ltd - * - * denoise_status.h - Denoise control algorithm status - */ -#pragma once - -/* This stores the parameters required for Denoise. */ - -struct DenoiseStatus { - double noiseConstant; - double noiseSlope; - double strength; - unsigned int mode; -}; diff --git a/src/ipa/raspberrypi/controller/device_status.cpp b/src/ipa/raspberrypi/controller/device_status.cpp deleted file mode 100644 index c907efdd..00000000 --- a/src/ipa/raspberrypi/controller/device_status.cpp +++ /dev/null @@ -1,31 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2021, Raspberry Pi Ltd - * - * device_status.cpp - device (image sensor) status - */ -#include "device_status.h" - -using namespace libcamera; /* for the Duration operator<< overload */ - -std::ostream &operator<<(std::ostream &out, const DeviceStatus &d) -{ - out << "Exposure: " << d.shutterSpeed - << " Frame length: " << d.frameLength - << " Line length: " << d.lineLength - << " Gain: " << d.analogueGain; - - if (d.aperture) - out << " Aperture: " << *d.aperture; - - if (d.lensPosition) - out << " Lens: " << *d.lensPosition; - - if (d.flashIntensity) - out << " Flash: " << *d.flashIntensity; - - if (d.sensorTemperature) - out << " Temperature: " << *d.sensorTemperature; - - return out; -} diff --git a/src/ipa/raspberrypi/controller/device_status.h b/src/ipa/raspberrypi/controller/device_status.h deleted file mode 100644 index c45db749..00000000 --- a/src/ipa/raspberrypi/controller/device_status.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019-2021, Raspberry Pi Ltd - * - * device_status.h - device (image sensor) status - */ -#pragma once - -#include <iostream> -#include <optional> - -#include <libcamera/base/utils.h> - -/* - * Definition of "device metadata" which stores things like shutter time and - * analogue gain that downstream control algorithms will want to know. - */ - -struct DeviceStatus { - DeviceStatus() - : shutterSpeed(std::chrono::seconds(0)), frameLength(0), - lineLength(std::chrono::seconds(0)), analogueGain(0.0) - { - } - - friend std::ostream &operator<<(std::ostream &out, const DeviceStatus &d); - - /* time shutter is open */ - libcamera::utils::Duration shutterSpeed; - /* frame length given in number of lines */ - uint32_t frameLength; - /* line length for the current frame */ - libcamera::utils::Duration lineLength; - double analogueGain; - /* 1.0/distance-in-metres */ - std::optional<double> lensPosition; - /* 1/f so that brightness quadruples when this doubles */ - std::optional<double> aperture; - /* proportional to brightness with 0 = no flash, 1 = maximum flash */ - std::optional<double> flashIntensity; - /* Sensor reported temperature value (in degrees) */ - std::optional<double> sensorTemperature; -}; diff --git a/src/ipa/raspberrypi/controller/dpc_status.h b/src/ipa/raspberrypi/controller/dpc_status.h deleted file mode 100644 index 46d0cf34..00000000 --- a/src/ipa/raspberrypi/controller/dpc_status.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * dpc_status.h - DPC (defective pixel correction) control algorithm status - */ -#pragma once - -/* The "DPC" algorithm sets defective pixel correction strength. */ - -struct DpcStatus { - int strength; /* 0 = "off", 1 = "normal", 2 = "strong" */ -}; diff --git a/src/ipa/raspberrypi/controller/geq_status.h b/src/ipa/raspberrypi/controller/geq_status.h deleted file mode 100644 index 2d749fc9..00000000 --- a/src/ipa/raspberrypi/controller/geq_status.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * geq_status.h - GEQ (green equalisation) control algorithm status - */ -#pragma once - -/* The "GEQ" algorithm calculates the green equalisation thresholds */ - -struct GeqStatus { - uint16_t offset; - double slope; -}; diff --git a/src/ipa/raspberrypi/controller/histogram.cpp b/src/ipa/raspberrypi/controller/histogram.cpp deleted file mode 100644 index 16a9207f..00000000 --- a/src/ipa/raspberrypi/controller/histogram.cpp +++ /dev/null @@ -1,64 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * histogram.cpp - histogram calculations - */ -#include <math.h> -#include <stdio.h> - -#include "histogram.h" - -using namespace RPiController; - -uint64_t Histogram::cumulativeFreq(double bin) const -{ - if (bin <= 0) - return 0; - else if (bin >= bins()) - return total(); - int b = (int)bin; - return cumulative_[b] + - (bin - b) * (cumulative_[b + 1] - cumulative_[b]); -} - -double Histogram::quantile(double q, int first, int last) const -{ - if (first == -1) - first = 0; - if (last == -1) - last = cumulative_.size() - 2; - assert(first <= last); - uint64_t items = q * total(); - while (first < last) /* binary search to find the right bin */ - { - int middle = (first + last) / 2; - if (cumulative_[middle + 1] > items) - last = middle; /* between first and middle */ - else - first = middle + 1; /* after middle */ - } - assert(items >= cumulative_[first] && items <= cumulative_[last + 1]); - double frac = cumulative_[first + 1] == cumulative_[first] ? 0 - : (double)(items - cumulative_[first]) / - (cumulative_[first + 1] - cumulative_[first]); - return first + frac; -} - -double Histogram::interQuantileMean(double qLo, double qHi) const -{ - assert(qHi > qLo); - double pLo = quantile(qLo); - double pHi = quantile(qHi, (int)pLo); - double sumBinFreq = 0, cumulFreq = 0; - for (double pNext = floor(pLo) + 1.0; pNext <= ceil(pHi); - pLo = pNext, pNext += 1.0) { - int bin = floor(pLo); - double freq = (cumulative_[bin + 1] - cumulative_[bin]) * - (std::min(pNext, pHi) - pLo); - sumBinFreq += bin * freq; - cumulFreq += freq; - } - /* add 0.5 to give an average for bin mid-points */ - return sumBinFreq / cumulFreq + 0.5; -} diff --git a/src/ipa/raspberrypi/controller/histogram.h b/src/ipa/raspberrypi/controller/histogram.h deleted file mode 100644 index 6b3e3a9e..00000000 --- a/src/ipa/raspberrypi/controller/histogram.h +++ /dev/null @@ -1,53 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * histogram.h - histogram calculation interface - */ -#pragma once - -#include <stdint.h> -#include <vector> -#include <cassert> - -/* - * A simple histogram class, for use in particular to find "quantiles" and - * averages between "quantiles". - */ - -namespace RPiController { - -class Histogram -{ -public: - Histogram() - { - cumulative_.push_back(0); - } - - template<typename T> Histogram(T *histogram, int num) - { - assert(num); - cumulative_.reserve(num + 1); - cumulative_.push_back(0); - for (int i = 0; i < num; i++) - cumulative_.push_back(cumulative_.back() + - histogram[i]); - } - uint32_t bins() const { return cumulative_.size() - 1; } - uint64_t total() const { return cumulative_[cumulative_.size() - 1]; } - /* Cumulative frequency up to a (fractional) point in a bin. */ - uint64_t cumulativeFreq(double bin) const; - /* - * Return the (fractional) bin of the point q (0 <= q <= 1) through the - * histogram. Optionally provide limits to help. - */ - double quantile(double q, int first = -1, int last = -1) const; - /* Return the average histogram bin value between the two quantiles. */ - double interQuantileMean(double qLo, double qHi) const; - -private: - std::vector<uint64_t> cumulative_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/lux_status.h b/src/ipa/raspberrypi/controller/lux_status.h deleted file mode 100644 index 5eb9faac..00000000 --- a/src/ipa/raspberrypi/controller/lux_status.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * lux_status.h - Lux control algorithm status - */ -#pragma once - -/* - * The "lux" algorithm looks at the (AGC) histogram statistics of the frame and - * estimates the current lux level of the scene. It does this by a simple ratio - * calculation comparing to a reference image that was taken in known conditions - * with known statistics and a properly measured lux level. There is a slight - * problem with aperture, in that it may be variable without the system knowing - * or being aware of it. In this case an external application may set a - * "current_aperture" value if it wishes, which would be used in place of the - * (presumably meaningless) value in the image metadata. - */ - -struct LuxStatus { - double lux; - double aperture; -}; diff --git a/src/ipa/raspberrypi/controller/metadata.h b/src/ipa/raspberrypi/controller/metadata.h deleted file mode 100644 index bf8a2393..00000000 --- a/src/ipa/raspberrypi/controller/metadata.h +++ /dev/null @@ -1,126 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019-2021, Raspberry Pi Ltd - * - * metadata.h - general metadata class - */ -#pragma once - -/* A simple class for carrying arbitrary metadata, for example about an image. */ - -#include <any> -#include <map> -#include <mutex> -#include <string> - -#include <libcamera/base/thread_annotations.h> - -namespace RPiController { - -class LIBCAMERA_TSA_CAPABILITY("mutex") Metadata -{ -public: - Metadata() = default; - - Metadata(Metadata const &other) - { - std::scoped_lock otherLock(other.mutex_); - data_ = other.data_; - } - - Metadata(Metadata &&other) - { - std::scoped_lock otherLock(other.mutex_); - data_ = std::move(other.data_); - other.data_.clear(); - } - - template<typename T> - void set(std::string const &tag, T const &value) - { - std::scoped_lock lock(mutex_); - data_[tag] = value; - } - - template<typename T> - int get(std::string const &tag, T &value) const - { - std::scoped_lock lock(mutex_); - auto it = data_.find(tag); - if (it == data_.end()) - return -1; - value = std::any_cast<T>(it->second); - return 0; - } - - void clear() - { - std::scoped_lock lock(mutex_); - data_.clear(); - } - - Metadata &operator=(Metadata const &other) - { - std::scoped_lock lock(mutex_, other.mutex_); - data_ = other.data_; - return *this; - } - - Metadata &operator=(Metadata &&other) - { - std::scoped_lock lock(mutex_, other.mutex_); - data_ = std::move(other.data_); - other.data_.clear(); - return *this; - } - - void merge(Metadata &other) - { - std::scoped_lock lock(mutex_, other.mutex_); - data_.merge(other.data_); - } - - void mergeCopy(const Metadata &other) - { - std::scoped_lock lock(mutex_, other.mutex_); - /* - * If the metadata key exists, ignore this item and copy only - * unique key/value pairs. - */ - data_.insert(other.data_.begin(), other.data_.end()); - } - - template<typename T> - T *getLocked(std::string const &tag) - { - /* - * This allows in-place access to the Metadata contents, - * for which you should be holding the lock. - */ - auto it = data_.find(tag); - if (it == data_.end()) - return nullptr; - return std::any_cast<T>(&it->second); - } - - template<typename T> - void setLocked(std::string const &tag, T const &value) - { - /* Use this only if you're holding the lock yourself. */ - data_[tag] = value; - } - - /* - * Note: use of (lowercase) lock and unlock means you can create scoped - * locks with the standard lock classes. - * e.g. std::lock_guard<RPiController::Metadata> lock(metadata) - */ - void lock() LIBCAMERA_TSA_ACQUIRE() { mutex_.lock(); } - void unlock() LIBCAMERA_TSA_RELEASE() { mutex_.unlock(); } - -private: - mutable std::mutex mutex_; - std::map<std::string, std::any> data_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/noise_status.h b/src/ipa/raspberrypi/controller/noise_status.h deleted file mode 100644 index da194f71..00000000 --- a/src/ipa/raspberrypi/controller/noise_status.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * noise_status.h - Noise control algorithm status - */ -#pragma once - -/* The "noise" algorithm stores an estimate of the noise profile for this image. */ - -struct NoiseStatus { - double noiseConstant; - double noiseSlope; -}; diff --git a/src/ipa/raspberrypi/controller/pdaf_data.h b/src/ipa/raspberrypi/controller/pdaf_data.h deleted file mode 100644 index 470510f2..00000000 --- a/src/ipa/raspberrypi/controller/pdaf_data.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2022, Raspberry Pi Ltd - * - * pdaf_data.h - PDAF Metadata - */ -#pragma once - -#include <stdint.h> - -#include "region_stats.h" - -namespace RPiController { - -struct PdafData { - /* Confidence, in arbitrary units */ - uint16_t conf; - /* Phase error, in s16 Q4 format (S.11.4) */ - int16_t phase; -}; - -using PdafRegions = RegionStats<PdafData>; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/pwl.cpp b/src/ipa/raspberrypi/controller/pwl.cpp deleted file mode 100644 index 70c2e24b..00000000 --- a/src/ipa/raspberrypi/controller/pwl.cpp +++ /dev/null @@ -1,269 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * pwl.cpp - piecewise linear functions - */ - -#include <cassert> -#include <cmath> -#include <stdexcept> - -#include "pwl.h" - -using namespace RPiController; - -int Pwl::read(const libcamera::YamlObject ¶ms) -{ - if (!params.size() || params.size() % 2) - return -EINVAL; - - const auto &list = params.asList(); - - for (auto it = list.begin(); it != list.end(); it++) { - auto x = it->get<double>(); - if (!x) - return -EINVAL; - if (it != list.begin() && *x <= points_.back().x) - return -EINVAL; - - auto y = (++it)->get<double>(); - if (!y) - return -EINVAL; - - points_.push_back(Point(*x, *y)); - } - - return 0; -} - -void Pwl::append(double x, double y, const double eps) -{ - if (points_.empty() || points_.back().x + eps < x) - points_.push_back(Point(x, y)); -} - -void Pwl::prepend(double x, double y, const double eps) -{ - if (points_.empty() || points_.front().x - eps > x) - points_.insert(points_.begin(), Point(x, y)); -} - -Pwl::Interval Pwl::domain() const -{ - return Interval(points_[0].x, points_[points_.size() - 1].x); -} - -Pwl::Interval Pwl::range() const -{ - double lo = points_[0].y, hi = lo; - for (auto &p : points_) - lo = std::min(lo, p.y), hi = std::max(hi, p.y); - return Interval(lo, hi); -} - -bool Pwl::empty() const -{ - return points_.empty(); -} - -double Pwl::eval(double x, int *spanPtr, bool updateSpan) const -{ - int span = findSpan(x, spanPtr && *spanPtr != -1 ? *spanPtr : points_.size() / 2 - 1); - if (spanPtr && updateSpan) - *spanPtr = span; - return points_[span].y + - (x - points_[span].x) * (points_[span + 1].y - points_[span].y) / - (points_[span + 1].x - points_[span].x); -} - -int Pwl::findSpan(double x, int span) const -{ - /* - * Pwls are generally small, so linear search may well be faster than - * binary, though could review this if large PWls start turning up. - */ - int lastSpan = points_.size() - 2; - /* - * some algorithms may call us with span pointing directly at the last - * control point - */ - span = std::max(0, std::min(lastSpan, span)); - while (span < lastSpan && x >= points_[span + 1].x) - span++; - while (span && x < points_[span].x) - span--; - return span; -} - -Pwl::PerpType Pwl::invert(Point const &xy, Point &perp, int &span, - const double eps) const -{ - assert(span >= -1); - bool prevOffEnd = false; - for (span = span + 1; span < (int)points_.size() - 1; span++) { - Point spanVec = points_[span + 1] - points_[span]; - double t = ((xy - points_[span]) % spanVec) / spanVec.len2(); - if (t < -eps) /* off the start of this span */ - { - if (span == 0) { - perp = points_[span]; - return PerpType::Start; - } else if (prevOffEnd) { - perp = points_[span]; - return PerpType::Vertex; - } - } else if (t > 1 + eps) /* off the end of this span */ - { - if (span == (int)points_.size() - 2) { - perp = points_[span + 1]; - return PerpType::End; - } - prevOffEnd = true; - } else /* a true perpendicular */ - { - perp = points_[span] + spanVec * t; - return PerpType::Perpendicular; - } - } - return PerpType::None; -} - -Pwl Pwl::inverse(bool *trueInverse, const double eps) const -{ - bool appended = false, prepended = false, neither = false; - Pwl inverse; - - for (Point const &p : points_) { - if (inverse.empty()) - inverse.append(p.y, p.x, eps); - else if (std::abs(inverse.points_.back().x - p.y) <= eps || - std::abs(inverse.points_.front().x - p.y) <= eps) - /* do nothing */; - else if (p.y > inverse.points_.back().x) { - inverse.append(p.y, p.x, eps); - appended = true; - } else if (p.y < inverse.points_.front().x) { - inverse.prepend(p.y, p.x, eps); - prepended = true; - } else - neither = true; - } - - /* - * This is not a proper inverse if we found ourselves putting points - * onto both ends of the inverse, or if there were points that couldn't - * go on either. - */ - if (trueInverse) - *trueInverse = !(neither || (appended && prepended)); - - return inverse; -} - -Pwl Pwl::compose(Pwl const &other, const double eps) const -{ - double thisX = points_[0].x, thisY = points_[0].y; - int thisSpan = 0, otherSpan = other.findSpan(thisY, 0); - Pwl result({ { thisX, other.eval(thisY, &otherSpan, false) } }); - while (thisSpan != (int)points_.size() - 1) { - double dx = points_[thisSpan + 1].x - points_[thisSpan].x, - dy = points_[thisSpan + 1].y - points_[thisSpan].y; - if (std::abs(dy) > eps && - otherSpan + 1 < (int)other.points_.size() && - points_[thisSpan + 1].y >= - other.points_[otherSpan + 1].x + eps) { - /* - * next control point in result will be where this - * function's y reaches the next span in other - */ - thisX = points_[thisSpan].x + - (other.points_[otherSpan + 1].x - - points_[thisSpan].y) * - dx / dy; - thisY = other.points_[++otherSpan].x; - } else if (std::abs(dy) > eps && otherSpan > 0 && - points_[thisSpan + 1].y <= - other.points_[otherSpan - 1].x - eps) { - /* - * next control point in result will be where this - * function's y reaches the previous span in other - */ - thisX = points_[thisSpan].x + - (other.points_[otherSpan + 1].x - - points_[thisSpan].y) * - dx / dy; - thisY = other.points_[--otherSpan].x; - } else { - /* we stay in the same span in other */ - thisSpan++; - thisX = points_[thisSpan].x, - thisY = points_[thisSpan].y; - } - result.append(thisX, other.eval(thisY, &otherSpan, false), - eps); - } - return result; -} - -void Pwl::map(std::function<void(double x, double y)> f) const -{ - for (auto &pt : points_) - f(pt.x, pt.y); -} - -void Pwl::map2(Pwl const &pwl0, Pwl const &pwl1, - std::function<void(double x, double y0, double y1)> f) -{ - int span0 = 0, span1 = 0; - double x = std::min(pwl0.points_[0].x, pwl1.points_[0].x); - f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false)); - while (span0 < (int)pwl0.points_.size() - 1 || - span1 < (int)pwl1.points_.size() - 1) { - if (span0 == (int)pwl0.points_.size() - 1) - x = pwl1.points_[++span1].x; - else if (span1 == (int)pwl1.points_.size() - 1) - x = pwl0.points_[++span0].x; - else if (pwl0.points_[span0 + 1].x > pwl1.points_[span1 + 1].x) - x = pwl1.points_[++span1].x; - else - x = pwl0.points_[++span0].x; - f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false)); - } -} - -Pwl Pwl::combine(Pwl const &pwl0, Pwl const &pwl1, - std::function<double(double x, double y0, double y1)> f, - const double eps) -{ - Pwl result; - map2(pwl0, pwl1, [&](double x, double y0, double y1) { - result.append(x, f(x, y0, y1), eps); - }); - return result; -} - -void Pwl::matchDomain(Interval const &domain, bool clip, const double eps) -{ - int span = 0; - prepend(domain.start, eval(clip ? points_[0].x : domain.start, &span), - eps); - span = points_.size() - 2; - append(domain.end, eval(clip ? points_.back().x : domain.end, &span), - eps); -} - -Pwl &Pwl::operator*=(double d) -{ - for (auto &pt : points_) - pt.y *= d; - return *this; -} - -void Pwl::debug(FILE *fp) const -{ - fprintf(fp, "Pwl {\n"); - for (auto &p : points_) - fprintf(fp, "\t(%g, %g)\n", p.x, p.y); - fprintf(fp, "}\n"); -} diff --git a/src/ipa/raspberrypi/controller/pwl.h b/src/ipa/raspberrypi/controller/pwl.h deleted file mode 100644 index aacf6039..00000000 --- a/src/ipa/raspberrypi/controller/pwl.h +++ /dev/null @@ -1,127 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * pwl.h - piecewise linear functions interface - */ -#pragma once - -#include <functional> -#include <math.h> -#include <vector> - -#include "libcamera/internal/yaml_parser.h" - -namespace RPiController { - -class Pwl -{ -public: - struct Interval { - Interval(double _start, double _end) - : start(_start), end(_end) - { - } - double start, end; - bool contains(double value) - { - return value >= start && value <= end; - } - double clip(double value) - { - return value < start ? start - : (value > end ? end : value); - } - double len() const { return end - start; } - }; - struct Point { - Point() : x(0), y(0) {} - Point(double _x, double _y) - : x(_x), y(_y) {} - double x, y; - Point operator-(Point const &p) const - { - return Point(x - p.x, y - p.y); - } - Point operator+(Point const &p) const - { - return Point(x + p.x, y + p.y); - } - double operator%(Point const &p) const - { - return x * p.x + y * p.y; - } - Point operator*(double f) const { return Point(x * f, y * f); } - Point operator/(double f) const { return Point(x / f, y / f); } - double len2() const { return x * x + y * y; } - double len() const { return sqrt(len2()); } - }; - Pwl() {} - Pwl(std::vector<Point> const &points) : points_(points) {} - int read(const libcamera::YamlObject ¶ms); - void append(double x, double y, const double eps = 1e-6); - void prepend(double x, double y, const double eps = 1e-6); - Interval domain() const; - Interval range() const; - bool empty() const; - /* - * Evaluate Pwl, optionally supplying an initial guess for the - * "span". The "span" may be optionally be updated. If you want to know - * the "span" value but don't have an initial guess you can set it to - * -1. - */ - double eval(double x, int *spanPtr = nullptr, - bool updateSpan = true) const; - /* - * Find perpendicular closest to xy, starting from span+1 so you can - * call it repeatedly to check for multiple closest points (set span to - * -1 on the first call). Also returns "pseudo" perpendiculars; see - * PerpType enum. - */ - enum class PerpType { - None, /* no perpendicular found */ - Start, /* start of Pwl is closest point */ - End, /* end of Pwl is closest point */ - Vertex, /* vertex of Pwl is closest point */ - Perpendicular /* true perpendicular found */ - }; - PerpType invert(Point const &xy, Point &perp, int &span, - const double eps = 1e-6) const; - /* - * Compute the inverse function. Indicate if it is a proper (true) - * inverse, or only a best effort (e.g. input was non-monotonic). - */ - Pwl inverse(bool *trueInverse = nullptr, const double eps = 1e-6) const; - /* Compose two Pwls together, doing "this" first and "other" after. */ - Pwl compose(Pwl const &other, const double eps = 1e-6) const; - /* Apply function to (x,y) values at every control point. */ - void map(std::function<void(double x, double y)> f) const; - /* - * Apply function to (x, y0, y1) values wherever either Pwl has a - * control point. - */ - static void map2(Pwl const &pwl0, Pwl const &pwl1, - std::function<void(double x, double y0, double y1)> f); - /* - * Combine two Pwls, meaning we create a new Pwl where the y values are - * given by running f wherever either has a knot. - */ - static Pwl - combine(Pwl const &pwl0, Pwl const &pwl1, - std::function<double(double x, double y0, double y1)> f, - const double eps = 1e-6); - /* - * Make "this" match (at least) the given domain. Any extension my be - * clipped or linear. - */ - void matchDomain(Interval const &domain, bool clip = true, - const double eps = 1e-6); - Pwl &operator*=(double d); - void debug(FILE *fp = stdout) const; - -private: - int findSpan(double x, int span) const; - std::vector<Point> points_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/region_stats.h b/src/ipa/raspberrypi/controller/region_stats.h deleted file mode 100644 index a8860dc8..00000000 --- a/src/ipa/raspberrypi/controller/region_stats.h +++ /dev/null @@ -1,123 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2022, Raspberry Pi Ltd - * - * region_stats.h - Raspberry Pi region based statistics container - */ -#pragma once - -#include <array> -#include <stdint.h> -#include <vector> - -#include <libcamera/geometry.h> - -namespace RPiController { - -template<typename T> -class RegionStats -{ -public: - struct Region { - T val; - uint32_t counted; - uint32_t uncounted; - }; - - RegionStats() - : size_({}), numFloating_(0), default_({}) - { - } - - void init(const libcamera::Size &size, unsigned int numFloating = 0) - { - size_ = size; - numFloating_ = numFloating; - regions_.clear(); - regions_.resize(size_.width * size_.height + numFloating_); - } - - void init(unsigned int num) - { - size_ = libcamera::Size(num, 1); - numFloating_ = 0; - regions_.clear(); - regions_.resize(num); - } - - unsigned int numRegions() const - { - return size_.width * size_.height; - } - - unsigned int numFloatingRegions() const - { - return numFloating_; - } - - libcamera::Size size() const - { - return size_; - } - - void set(unsigned int index, const Region ®ion) - { - if (index >= numRegions()) - return; - set_(index, region); - } - - void set(const libcamera::Point &pos, const Region ®ion) - { - set(pos.y * size_.width + pos.x, region); - } - - void setFloating(unsigned int index, const Region ®ion) - { - if (index >= numFloatingRegions()) - return; - set(numRegions() + index, region); - } - - const Region &get(unsigned int index) const - { - if (index >= numRegions()) - return default_; - return get_(index); - } - - const Region &get(const libcamera::Point &pos) const - { - return get(pos.y * size_.width + pos.x); - } - - const Region &getFloating(unsigned int index) const - { - if (index >= numFloatingRegions()) - return default_; - return get_(numRegions() + index); - } - - typename std::vector<Region>::iterator begin() { return regions_.begin(); } - typename std::vector<Region>::iterator end() { return regions_.end(); } - typename std::vector<Region>::const_iterator begin() const { return regions_.begin(); } - typename std::vector<Region>::const_iterator end() const { return regions_.end(); } - -private: - void set_(unsigned int index, const Region ®ion) - { - regions_[index] = region; - } - - const Region &get_(unsigned int index) const - { - return regions_[index]; - } - - libcamera::Size size_; - unsigned int numFloating_; - std::vector<Region> regions_; - Region default_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/rpi/af.cpp b/src/ipa/raspberrypi/controller/rpi/af.cpp deleted file mode 100644 index ed0c8a94..00000000 --- a/src/ipa/raspberrypi/controller/rpi/af.cpp +++ /dev/null @@ -1,797 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2022-2023, Raspberry Pi Ltd - * - * af.cpp - Autofocus control algorithm - */ - -#include "af.h" - -#include <iomanip> -#include <math.h> -#include <stdlib.h> - -#include <libcamera/base/log.h> - -#include <libcamera/control_ids.h> - -using namespace RPiController; -using namespace libcamera; - -LOG_DEFINE_CATEGORY(RPiAf) - -#define NAME "rpi.af" - -/* - * Default values for parameters. All may be overridden in the tuning file. - * Many of these values are sensor- or module-dependent; the defaults here - * assume IMX708 in a Raspberry Pi V3 camera with the standard lens. - * - * Here all focus values are in dioptres (1/m). They are converted to hardware - * units when written to status.lensSetting or returned from setLensPosition(). - * - * Gain and delay values are relative to the update rate, since much (not all) - * of the delay is in the sensor and (for CDAF) ISP, not the lens mechanism; - * but note that algorithms are updated at no more than 30 Hz. - */ - -Af::RangeDependentParams::RangeDependentParams() - : focusMin(0.0), - focusMax(12.0), - focusDefault(1.0) -{ -} - -Af::SpeedDependentParams::SpeedDependentParams() - : stepCoarse(1.0), - stepFine(0.25), - contrastRatio(0.75), - pdafGain(-0.02), - pdafSquelch(0.125), - maxSlew(2.0), - pdafFrames(20), - dropoutFrames(6), - stepFrames(4) -{ -} - -Af::CfgParams::CfgParams() - : confEpsilon(8), - confThresh(16), - confClip(512), - skipFrames(5), - map() -{ -} - -template<typename T> -static void readNumber(T &dest, const libcamera::YamlObject ¶ms, char const *name) -{ - auto value = params[name].get<T>(); - if (value) - dest = *value; - else - LOG(RPiAf, Warning) << "Missing parameter \"" << name << "\""; -} - -void Af::RangeDependentParams::read(const libcamera::YamlObject ¶ms) -{ - - readNumber<double>(focusMin, params, "min"); - readNumber<double>(focusMax, params, "max"); - readNumber<double>(focusDefault, params, "default"); -} - -void Af::SpeedDependentParams::read(const libcamera::YamlObject ¶ms) -{ - readNumber<double>(stepCoarse, params, "step_coarse"); - readNumber<double>(stepFine, params, "step_fine"); - readNumber<double>(contrastRatio, params, "contrast_ratio"); - readNumber<double>(pdafGain, params, "pdaf_gain"); - readNumber<double>(pdafSquelch, params, "pdaf_squelch"); - readNumber<double>(maxSlew, params, "max_slew"); - readNumber<uint32_t>(pdafFrames, params, "pdaf_frames"); - readNumber<uint32_t>(dropoutFrames, params, "dropout_frames"); - readNumber<uint32_t>(stepFrames, params, "step_frames"); -} - -int Af::CfgParams::read(const libcamera::YamlObject ¶ms) -{ - if (params.contains("ranges")) { - auto &rr = params["ranges"]; - - if (rr.contains("normal")) - ranges[AfRangeNormal].read(rr["normal"]); - else - LOG(RPiAf, Warning) << "Missing range \"normal\""; - - ranges[AfRangeMacro] = ranges[AfRangeNormal]; - if (rr.contains("macro")) - ranges[AfRangeMacro].read(rr["macro"]); - - ranges[AfRangeFull].focusMin = std::min(ranges[AfRangeNormal].focusMin, - ranges[AfRangeMacro].focusMin); - ranges[AfRangeFull].focusMax = std::max(ranges[AfRangeNormal].focusMax, - ranges[AfRangeMacro].focusMax); - ranges[AfRangeFull].focusDefault = ranges[AfRangeNormal].focusDefault; - if (rr.contains("full")) - ranges[AfRangeFull].read(rr["full"]); - } else - LOG(RPiAf, Warning) << "No ranges defined"; - - if (params.contains("speeds")) { - auto &ss = params["speeds"]; - - if (ss.contains("normal")) - speeds[AfSpeedNormal].read(ss["normal"]); - else - LOG(RPiAf, Warning) << "Missing speed \"normal\""; - - speeds[AfSpeedFast] = speeds[AfSpeedNormal]; - if (ss.contains("fast")) - speeds[AfSpeedFast].read(ss["fast"]); - } else - LOG(RPiAf, Warning) << "No speeds defined"; - - readNumber<uint32_t>(confEpsilon, params, "conf_epsilon"); - readNumber<uint32_t>(confThresh, params, "conf_thresh"); - readNumber<uint32_t>(confClip, params, "conf_clip"); - readNumber<uint32_t>(skipFrames, params, "skip_frames"); - - if (params.contains("map")) - map.read(params["map"]); - else - LOG(RPiAf, Warning) << "No map defined"; - - return 0; -} - -void Af::CfgParams::initialise() -{ - if (map.empty()) { - /* Default mapping from dioptres to hardware setting */ - static constexpr double DefaultMapX0 = 0.0; - static constexpr double DefaultMapY0 = 445.0; - static constexpr double DefaultMapX1 = 15.0; - static constexpr double DefaultMapY1 = 925.0; - - map.append(DefaultMapX0, DefaultMapY0); - map.append(DefaultMapX1, DefaultMapY1); - } -} - -/* Af Algorithm class */ - -static constexpr unsigned MaxWindows = 10; - -Af::Af(Controller *controller) - : AfAlgorithm(controller), - cfg_(), - range_(AfRangeNormal), - speed_(AfSpeedNormal), - mode_(AfAlgorithm::AfModeManual), - pauseFlag_(false), - statsRegion_(0, 0, 0, 0), - windows_(), - useWindows_(false), - phaseWeights_(), - contrastWeights_(), - scanState_(ScanState::Idle), - initted_(false), - ftarget_(-1.0), - fsmooth_(-1.0), - prevContrast_(0.0), - skipCount_(0), - stepCount_(0), - dropCount_(0), - scanMaxContrast_(0.0), - scanMinContrast_(1.0e9), - scanData_(), - reportState_(AfState::Idle) -{ - /* - * Reserve space for data, to reduce memory fragmentation. It's too early - * to query the size of the PDAF (from camera) and Contrast (from ISP) - * statistics, but these are plausible upper bounds. - */ - phaseWeights_.w.reserve(16 * 12); - contrastWeights_.w.reserve(getHardwareConfig().focusRegions.width * - getHardwareConfig().focusRegions.height); - scanData_.reserve(32); -} - -Af::~Af() -{ -} - -char const *Af::name() const -{ - return NAME; -} - -int Af::read(const libcamera::YamlObject ¶ms) -{ - return cfg_.read(params); -} - -void Af::initialise() -{ - cfg_.initialise(); -} - -void Af::switchMode(CameraMode const &cameraMode, [[maybe_unused]] Metadata *metadata) -{ - (void)metadata; - - /* Assume that PDAF and Focus stats grids cover the visible area */ - statsRegion_.x = (int)cameraMode.cropX; - statsRegion_.y = (int)cameraMode.cropY; - statsRegion_.width = (unsigned)(cameraMode.width * cameraMode.scaleX); - statsRegion_.height = (unsigned)(cameraMode.height * cameraMode.scaleY); - LOG(RPiAf, Debug) << "switchMode: statsRegion: " - << statsRegion_.x << ',' - << statsRegion_.y << ',' - << statsRegion_.width << ',' - << statsRegion_.height; - invalidateWeights(); - - if (scanState_ >= ScanState::Coarse && scanState_ < ScanState::Settle) { - /* - * If a scan was in progress, re-start it, as CDAF statistics - * may have changed. Though if the application is just about - * to take a still picture, this will not help... - */ - startProgrammedScan(); - } - skipCount_ = cfg_.skipFrames; -} - -void Af::computeWeights(RegionWeights *wgts, unsigned rows, unsigned cols) -{ - wgts->rows = rows; - wgts->cols = cols; - wgts->sum = 0; - wgts->w.resize(rows * cols); - std::fill(wgts->w.begin(), wgts->w.end(), 0); - - if (rows > 0 && cols > 0 && useWindows_ && - statsRegion_.height >= rows && statsRegion_.width >= cols) { - /* - * Here we just merge all of the given windows, weighted by area. - * \todo Perhaps a better approach might be to find the phase in each - * window and choose either the closest or the highest-confidence one? - * Ensure weights sum to less than (1<<16). 46080 is a "round number" - * below 65536, for better rounding when window size is a simple - * fraction of image dimensions. - */ - const unsigned maxCellWeight = 46080u / (MaxWindows * rows * cols); - const unsigned cellH = statsRegion_.height / rows; - const unsigned cellW = statsRegion_.width / cols; - const unsigned cellA = cellH * cellW; - - for (auto &w : windows_) { - for (unsigned r = 0; r < rows; ++r) { - int y0 = std::max(statsRegion_.y + (int)(cellH * r), w.y); - int y1 = std::min(statsRegion_.y + (int)(cellH * (r + 1)), - w.y + (int)(w.height)); - if (y0 >= y1) - continue; - y1 -= y0; - for (unsigned c = 0; c < cols; ++c) { - int x0 = std::max(statsRegion_.x + (int)(cellW * c), w.x); - int x1 = std::min(statsRegion_.x + (int)(cellW * (c + 1)), - w.x + (int)(w.width)); - if (x0 >= x1) - continue; - unsigned a = y1 * (x1 - x0); - a = (maxCellWeight * a + cellA - 1) / cellA; - wgts->w[r * cols + c] += a; - wgts->sum += a; - } - } - } - } - - if (wgts->sum == 0) { - /* Default AF window is the middle 1/2 width of the middle 1/3 height */ - for (unsigned r = rows / 3; r < rows - rows / 3; ++r) { - for (unsigned c = cols / 4; c < cols - cols / 4; ++c) { - wgts->w[r * cols + c] = 1; - wgts->sum += 1; - } - } - } -} - -void Af::invalidateWeights() -{ - phaseWeights_.sum = 0; - contrastWeights_.sum = 0; -} - -bool Af::getPhase(PdafRegions const ®ions, double &phase, double &conf) -{ - libcamera::Size size = regions.size(); - if (size.height != phaseWeights_.rows || size.width != phaseWeights_.cols || - phaseWeights_.sum == 0) { - LOG(RPiAf, Debug) << "Recompute Phase weights " << size.width << 'x' << size.height; - computeWeights(&phaseWeights_, size.height, size.width); - } - - uint32_t sumWc = 0; - int64_t sumWcp = 0; - for (unsigned i = 0; i < regions.numRegions(); ++i) { - unsigned w = phaseWeights_.w[i]; - if (w) { - const PdafData &data = regions.get(i).val; - unsigned c = data.conf; - if (c >= cfg_.confThresh) { - if (c > cfg_.confClip) - c = cfg_.confClip; - c -= (cfg_.confThresh >> 2); - sumWc += w * c; - c -= (cfg_.confThresh >> 2); - sumWcp += (int64_t)(w * c) * (int64_t)data.phase; - } - } - } - - if (0 < phaseWeights_.sum && phaseWeights_.sum <= sumWc) { - phase = (double)sumWcp / (double)sumWc; - conf = (double)sumWc / (double)phaseWeights_.sum; - return true; - } else { - phase = 0.0; - conf = 0.0; - return false; - } -} - -double Af::getContrast(const FocusRegions &focusStats) -{ - libcamera::Size size = focusStats.size(); - if (size.height != contrastWeights_.rows || - size.width != contrastWeights_.cols || contrastWeights_.sum == 0) { - LOG(RPiAf, Debug) << "Recompute Contrast weights " - << size.width << 'x' << size.height; - computeWeights(&contrastWeights_, size.height, size.width); - } - - uint64_t sumWc = 0; - for (unsigned i = 0; i < focusStats.numRegions(); ++i) - sumWc += contrastWeights_.w[i] * focusStats.get(i).val; - - return (contrastWeights_.sum > 0) ? ((double)sumWc / (double)contrastWeights_.sum) : 0.0; -} - -void Af::doPDAF(double phase, double conf) -{ - /* Apply loop gain */ - phase *= cfg_.speeds[speed_].pdafGain; - - if (mode_ == AfModeContinuous) { - /* - * PDAF in Continuous mode. Scale down lens movement when - * delta is small or confidence is low, to suppress wobble. - */ - phase *= conf / (conf + cfg_.confEpsilon); - if (std::abs(phase) < cfg_.speeds[speed_].pdafSquelch) { - double a = phase / cfg_.speeds[speed_].pdafSquelch; - phase *= a * a; - } - } else { - /* - * PDAF in triggered-auto mode. Allow early termination when - * phase delta is small; scale down lens movements towards - * the end of the sequence, to ensure a stable image. - */ - if (stepCount_ >= cfg_.speeds[speed_].stepFrames) { - if (std::abs(phase) < cfg_.speeds[speed_].pdafSquelch) - stepCount_ = cfg_.speeds[speed_].stepFrames; - } else - phase *= stepCount_ / cfg_.speeds[speed_].stepFrames; - } - - /* Apply slew rate limit. Report failure if out of bounds. */ - if (phase < -cfg_.speeds[speed_].maxSlew) { - phase = -cfg_.speeds[speed_].maxSlew; - reportState_ = (ftarget_ <= cfg_.ranges[range_].focusMin) ? AfState::Failed - : AfState::Scanning; - } else if (phase > cfg_.speeds[speed_].maxSlew) { - phase = cfg_.speeds[speed_].maxSlew; - reportState_ = (ftarget_ >= cfg_.ranges[range_].focusMax) ? AfState::Failed - : AfState::Scanning; - } else - reportState_ = AfState::Focused; - - ftarget_ = fsmooth_ + phase; -} - -bool Af::earlyTerminationByPhase(double phase) -{ - if (scanData_.size() > 0 && - scanData_[scanData_.size() - 1].conf >= cfg_.confEpsilon) { - double oldFocus = scanData_[scanData_.size() - 1].focus; - double oldPhase = scanData_[scanData_.size() - 1].phase; - - /* - * Check that the gradient is finite and has the expected sign; - * Interpolate/extrapolate the lens position for zero phase. - * Check that the extrapolation is well-conditioned. - */ - if ((ftarget_ - oldFocus) * (phase - oldPhase) > 0.0) { - double param = phase / (phase - oldPhase); - if (-3.0 <= param && param <= 3.5) { - ftarget_ += param * (oldFocus - ftarget_); - LOG(RPiAf, Debug) << "ETBP: param=" << param; - return true; - } - } - } - - return false; -} - -double Af::findPeak(unsigned i) const -{ - double f = scanData_[i].focus; - - if (i > 0 && i + 1 < scanData_.size()) { - double dropLo = scanData_[i].contrast - scanData_[i - 1].contrast; - double dropHi = scanData_[i].contrast - scanData_[i + 1].contrast; - if (0.0 <= dropLo && dropLo < dropHi) { - double param = 0.3125 * (1.0 - dropLo / dropHi) * (1.6 - dropLo / dropHi); - f += param * (scanData_[i - 1].focus - f); - } else if (0.0 <= dropHi && dropHi < dropLo) { - double param = 0.3125 * (1.0 - dropHi / dropLo) * (1.6 - dropHi / dropLo); - f += param * (scanData_[i + 1].focus - f); - } - } - - LOG(RPiAf, Debug) << "FindPeak: " << f; - return f; -} - -void Af::doScan(double contrast, double phase, double conf) -{ - /* Record lens position, contrast and phase values for the current scan */ - if (scanData_.empty() || contrast > scanMaxContrast_) { - scanMaxContrast_ = contrast; - scanMaxIndex_ = scanData_.size(); - } - if (contrast < scanMinContrast_) - scanMinContrast_ = contrast; - scanData_.emplace_back(ScanRecord{ ftarget_, contrast, phase, conf }); - - if (scanState_ == ScanState::Coarse) { - if (ftarget_ >= cfg_.ranges[range_].focusMax || - contrast < cfg_.speeds[speed_].contrastRatio * scanMaxContrast_) { - /* - * Finished course scan, or termination based on contrast. - * Jump to just after max contrast and start fine scan. - */ - ftarget_ = std::min(ftarget_, findPeak(scanMaxIndex_) + - 2.0 * cfg_.speeds[speed_].stepFine); - scanState_ = ScanState::Fine; - scanData_.clear(); - } else - ftarget_ += cfg_.speeds[speed_].stepCoarse; - } else { /* ScanState::Fine */ - if (ftarget_ <= cfg_.ranges[range_].focusMin || scanData_.size() >= 5 || - contrast < cfg_.speeds[speed_].contrastRatio * scanMaxContrast_) { - /* - * Finished fine scan, or termination based on contrast. - * Use quadratic peak-finding to find best contrast position. - */ - ftarget_ = findPeak(scanMaxIndex_); - scanState_ = ScanState::Settle; - } else - ftarget_ -= cfg_.speeds[speed_].stepFine; - } - - stepCount_ = (ftarget_ == fsmooth_) ? 0 : cfg_.speeds[speed_].stepFrames; -} - -void Af::doAF(double contrast, double phase, double conf) -{ - /* Skip frames at startup and after sensor mode change */ - if (skipCount_ > 0) { - LOG(RPiAf, Debug) << "SKIP"; - skipCount_--; - return; - } - - if (scanState_ == ScanState::Pdaf) { - /* - * Use PDAF closed-loop control whenever available, in both CAF - * mode and (for a limited number of iterations) when triggered. - * If PDAF fails (due to poor contrast, noise or large defocus), - * fall back to a CDAF-based scan. To avoid "nuisance" scans, - * scan only after a number of frames with low PDAF confidence. - */ - if (conf > (dropCount_ ? 1.0 : 0.25) * cfg_.confEpsilon) { - doPDAF(phase, conf); - if (stepCount_ > 0) - stepCount_--; - else if (mode_ != AfModeContinuous) - scanState_ = ScanState::Idle; - dropCount_ = 0; - } else if (++dropCount_ == cfg_.speeds[speed_].dropoutFrames) - startProgrammedScan(); - } else if (scanState_ >= ScanState::Coarse && fsmooth_ == ftarget_) { - /* - * Scanning sequence. This means PDAF has become unavailable. - * Allow a delay between steps for CDAF FoM statistics to be - * updated, and a "settling time" at the end of the sequence. - * [A coarse or fine scan can be abandoned if two PDAF samples - * allow direct interpolation of the zero-phase lens position.] - */ - if (stepCount_ > 0) - stepCount_--; - else if (scanState_ == ScanState::Settle) { - if (prevContrast_ >= cfg_.speeds[speed_].contrastRatio * scanMaxContrast_ && - scanMinContrast_ <= cfg_.speeds[speed_].contrastRatio * scanMaxContrast_) - reportState_ = AfState::Focused; - else - reportState_ = AfState::Failed; - if (mode_ == AfModeContinuous && !pauseFlag_ && - cfg_.speeds[speed_].dropoutFrames > 0) - scanState_ = ScanState::Pdaf; - else - scanState_ = ScanState::Idle; - scanData_.clear(); - } else if (conf >= cfg_.confEpsilon && earlyTerminationByPhase(phase)) { - scanState_ = ScanState::Settle; - stepCount_ = (mode_ == AfModeContinuous) ? 0 - : cfg_.speeds[speed_].stepFrames; - } else - doScan(contrast, phase, conf); - } -} - -void Af::updateLensPosition() -{ - if (scanState_ >= ScanState::Pdaf) { - ftarget_ = std::clamp(ftarget_, - cfg_.ranges[range_].focusMin, - cfg_.ranges[range_].focusMax); - } - - if (initted_) { - /* from a known lens position: apply slew rate limit */ - fsmooth_ = std::clamp(ftarget_, - fsmooth_ - cfg_.speeds[speed_].maxSlew, - fsmooth_ + cfg_.speeds[speed_].maxSlew); - } else { - /* from an unknown position: go straight to target, but add delay */ - fsmooth_ = ftarget_; - initted_ = true; - skipCount_ = cfg_.skipFrames; - } -} - -void Af::startAF() -{ - /* Use PDAF if the tuning file allows it; else CDAF. */ - if (cfg_.speeds[speed_].dropoutFrames > 0 && - (mode_ == AfModeContinuous || cfg_.speeds[speed_].pdafFrames > 0)) { - if (!initted_) { - ftarget_ = cfg_.ranges[range_].focusDefault; - updateLensPosition(); - } - stepCount_ = (mode_ == AfModeContinuous) ? 0 : cfg_.speeds[speed_].pdafFrames; - scanState_ = ScanState::Pdaf; - scanData_.clear(); - dropCount_ = 0; - reportState_ = AfState::Scanning; - } else - startProgrammedScan(); -} - -void Af::startProgrammedScan() -{ - ftarget_ = cfg_.ranges[range_].focusMin; - updateLensPosition(); - scanState_ = ScanState::Coarse; - scanMaxContrast_ = 0.0; - scanMinContrast_ = 1.0e9; - scanMaxIndex_ = 0; - scanData_.clear(); - stepCount_ = cfg_.speeds[speed_].stepFrames; - reportState_ = AfState::Scanning; -} - -void Af::goIdle() -{ - scanState_ = ScanState::Idle; - reportState_ = AfState::Idle; - scanData_.clear(); -} - -/* - * PDAF phase data are available in prepare(), but CDAF statistics are not - * available until process(). We are gambling on the availability of PDAF. - * To expedite feedback control using PDAF, issue the V4L2 lens control from - * prepare(). Conversely, during scans, we must allow an extra frame delay - * between steps, to retrieve CDAF statistics from the previous process() - * so we can terminate the scan early without having to change our minds. - */ - -void Af::prepare(Metadata *imageMetadata) -{ - /* Initialize for triggered scan or start of CAF mode */ - if (scanState_ == ScanState::Trigger) - startAF(); - - if (initted_) { - /* Get PDAF from the embedded metadata, and run AF algorithm core */ - PdafRegions regions; - double phase = 0.0, conf = 0.0; - double oldFt = ftarget_; - double oldFs = fsmooth_; - ScanState oldSs = scanState_; - uint32_t oldSt = stepCount_; - if (imageMetadata->get("pdaf.regions", regions) == 0) - getPhase(regions, phase, conf); - doAF(prevContrast_, phase, conf); - updateLensPosition(); - LOG(RPiAf, Debug) << std::fixed << std::setprecision(2) - << static_cast<unsigned int>(reportState_) - << " sst" << static_cast<unsigned int>(oldSs) - << "->" << static_cast<unsigned int>(scanState_) - << " stp" << oldSt << "->" << stepCount_ - << " ft" << oldFt << "->" << ftarget_ - << " fs" << oldFs << "->" << fsmooth_ - << " cont=" << (int)prevContrast_ - << " phase=" << (int)phase << " conf=" << (int)conf; - } - - /* Report status and produce new lens setting */ - AfStatus status; - if (pauseFlag_) - status.pauseState = (scanState_ == ScanState::Idle) ? AfPauseState::Paused - : AfPauseState::Pausing; - else - status.pauseState = AfPauseState::Running; - - if (mode_ == AfModeAuto && scanState_ != ScanState::Idle) - status.state = AfState::Scanning; - else - status.state = reportState_; - status.lensSetting = initted_ ? std::optional<int>(cfg_.map.eval(fsmooth_)) - : std::nullopt; - imageMetadata->set("af.status", status); -} - -void Af::process(StatisticsPtr &stats, [[maybe_unused]] Metadata *imageMetadata) -{ - (void)imageMetadata; - prevContrast_ = getContrast(stats->focusRegions); -} - -/* Controls */ - -void Af::setRange(AfRange r) -{ - LOG(RPiAf, Debug) << "setRange: " << (unsigned)r; - if (r < AfAlgorithm::AfRangeMax) - range_ = r; -} - -void Af::setSpeed(AfSpeed s) -{ - LOG(RPiAf, Debug) << "setSpeed: " << (unsigned)s; - if (s < AfAlgorithm::AfSpeedMax) { - if (scanState_ == ScanState::Pdaf && - cfg_.speeds[s].pdafFrames > cfg_.speeds[speed_].pdafFrames) - stepCount_ += cfg_.speeds[s].pdafFrames - cfg_.speeds[speed_].pdafFrames; - speed_ = s; - } -} - -void Af::setMetering(bool mode) -{ - if (useWindows_ != mode) { - useWindows_ = mode; - invalidateWeights(); - } -} - -void Af::setWindows(libcamera::Span<libcamera::Rectangle const> const &wins) -{ - windows_.clear(); - for (auto &w : wins) { - LOG(RPiAf, Debug) << "Window: " - << w.x << ", " - << w.y << ", " - << w.width << ", " - << w.height; - windows_.push_back(w); - if (windows_.size() >= MaxWindows) - break; - } - - if (useWindows_) - invalidateWeights(); -} - -bool Af::setLensPosition(double dioptres, int *hwpos) -{ - bool changed = false; - - if (mode_ == AfModeManual) { - LOG(RPiAf, Debug) << "setLensPosition: " << dioptres; - ftarget_ = cfg_.map.domain().clip(dioptres); - changed = !(initted_ && fsmooth_ == ftarget_); - updateLensPosition(); - } - - if (hwpos) - *hwpos = cfg_.map.eval(fsmooth_); - - return changed; -} - -std::optional<double> Af::getLensPosition() const -{ - /* - * \todo We ought to perform some precise timing here to determine - * the current lens position. - */ - return initted_ ? std::optional<double>(fsmooth_) : std::nullopt; -} - -void Af::cancelScan() -{ - LOG(RPiAf, Debug) << "cancelScan"; - if (mode_ == AfModeAuto) - goIdle(); -} - -void Af::triggerScan() -{ - LOG(RPiAf, Debug) << "triggerScan"; - if (mode_ == AfModeAuto && scanState_ == ScanState::Idle) - scanState_ = ScanState::Trigger; -} - -void Af::setMode(AfAlgorithm::AfMode mode) -{ - LOG(RPiAf, Debug) << "setMode: " << (unsigned)mode; - if (mode_ != mode) { - mode_ = mode; - pauseFlag_ = false; - if (mode == AfModeContinuous) - scanState_ = ScanState::Trigger; - else if (mode != AfModeAuto || scanState_ < ScanState::Coarse) - goIdle(); - } -} - -AfAlgorithm::AfMode Af::getMode() const -{ - return mode_; -} - -void Af::pause(AfAlgorithm::AfPause pause) -{ - LOG(RPiAf, Debug) << "pause: " << (unsigned)pause; - if (mode_ == AfModeContinuous) { - if (pause == AfPauseResume && pauseFlag_) { - pauseFlag_ = false; - if (scanState_ < ScanState::Coarse) - scanState_ = ScanState::Trigger; - } else if (pause != AfPauseResume && !pauseFlag_) { - pauseFlag_ = true; - if (pause == AfPauseImmediate || scanState_ < ScanState::Coarse) - goIdle(); - } - } -} - -// Register algorithm with the system. -static Algorithm *create(Controller *controller) -{ - return (Algorithm *)new Af(controller); -} -static RegisterAlgorithm reg(NAME, &create); diff --git a/src/ipa/raspberrypi/controller/rpi/af.h b/src/ipa/raspberrypi/controller/rpi/af.h deleted file mode 100644 index 6d2bae67..00000000 --- a/src/ipa/raspberrypi/controller/rpi/af.h +++ /dev/null @@ -1,165 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2022-2023, Raspberry Pi Ltd - * - * af.h - Autofocus control algorithm - */ -#pragma once - -#include "../af_algorithm.h" -#include "../af_status.h" -#include "../pdaf_data.h" -#include "../pwl.h" - -/* - * This algorithm implements a hybrid of CDAF and PDAF, favouring PDAF. - * - * Whenever PDAF is available, it is used in a continuous feedback loop. - * When triggered in auto mode, we simply enable AF for a limited number - * of frames (it may terminate early if the delta becomes small enough). - * - * When PDAF confidence is low (due e.g. to low contrast or extreme defocus) - * or PDAF data are absent, fall back to CDAF with a programmed scan pattern. - * A coarse and fine scan are performed, using ISP's CDAF focus FoM to - * estimate the lens position with peak contrast. This is slower due to - * extra latency in the ISP, and requires a settling time between steps. - * - * Some hysteresis is applied to the switch between PDAF and CDAF, to avoid - * "nuisance" scans. During each interval where PDAF is not working, only - * ONE scan will be performed; CAF cannot track objects using CDAF alone. - * - */ - -namespace RPiController { - -class Af : public AfAlgorithm -{ -public: - Af(Controller *controller = NULL); - ~Af(); - char const *name() const override; - int read(const libcamera::YamlObject ¶ms) override; - void initialise() override; - - /* IPA calls */ - void switchMode(CameraMode const &cameraMode, Metadata *metadata) override; - void prepare(Metadata *imageMetadata) override; - void process(StatisticsPtr &stats, Metadata *imageMetadata) override; - - /* controls */ - void setRange(AfRange range) override; - void setSpeed(AfSpeed speed) override; - void setMetering(bool use_windows) override; - void setWindows(libcamera::Span<libcamera::Rectangle const> const &wins) override; - void setMode(AfMode mode) override; - AfMode getMode() const override; - bool setLensPosition(double dioptres, int32_t *hwpos) override; - std::optional<double> getLensPosition() const override; - void triggerScan() override; - void cancelScan() override; - void pause(AfPause pause) override; - -private: - enum class ScanState { - Idle = 0, - Trigger, - Pdaf, - Coarse, - Fine, - Settle - }; - - struct RangeDependentParams { - double focusMin; /* lower (far) limit in dipotres */ - double focusMax; /* upper (near) limit in dioptres */ - double focusDefault; /* default setting ("hyperfocal") */ - - RangeDependentParams(); - void read(const libcamera::YamlObject ¶ms); - }; - - struct SpeedDependentParams { - double stepCoarse; /* used for scans */ - double stepFine; /* used for scans */ - double contrastRatio; /* used for scan termination and reporting */ - double pdafGain; /* coefficient for PDAF feedback loop */ - double pdafSquelch; /* PDAF stability parameter (device-specific) */ - double maxSlew; /* limit for lens movement per frame */ - uint32_t pdafFrames; /* number of iterations when triggered */ - uint32_t dropoutFrames; /* number of non-PDAF frames to switch to CDAF */ - uint32_t stepFrames; /* frames to skip in between steps of a scan */ - - SpeedDependentParams(); - void read(const libcamera::YamlObject ¶ms); - }; - - struct CfgParams { - RangeDependentParams ranges[AfRangeMax]; - SpeedDependentParams speeds[AfSpeedMax]; - uint32_t confEpsilon; /* PDAF hysteresis threshold (sensor-specific) */ - uint32_t confThresh; /* PDAF confidence cell min (sensor-specific) */ - uint32_t confClip; /* PDAF confidence cell max (sensor-specific) */ - uint32_t skipFrames; /* frames to skip at start or modeswitch */ - Pwl map; /* converts dioptres -> lens driver position */ - - CfgParams(); - int read(const libcamera::YamlObject ¶ms); - void initialise(); - }; - - struct ScanRecord { - double focus; - double contrast; - double phase; - double conf; - }; - - struct RegionWeights { - unsigned rows; - unsigned cols; - uint32_t sum; - std::vector<uint16_t> w; - - RegionWeights() - : rows(0), cols(0), sum(0), w() {} - }; - - void computeWeights(RegionWeights *wgts, unsigned rows, unsigned cols); - void invalidateWeights(); - bool getPhase(PdafRegions const ®ions, double &phase, double &conf); - double getContrast(const FocusRegions &focusStats); - void doPDAF(double phase, double conf); - bool earlyTerminationByPhase(double phase); - double findPeak(unsigned index) const; - void doScan(double contrast, double phase, double conf); - void doAF(double contrast, double phase, double conf); - void updateLensPosition(); - void startAF(); - void startProgrammedScan(); - void goIdle(); - - /* Configuration and settings */ - CfgParams cfg_; - AfRange range_; - AfSpeed speed_; - AfMode mode_; - bool pauseFlag_; - libcamera::Rectangle statsRegion_; - std::vector<libcamera::Rectangle> windows_; - bool useWindows_; - RegionWeights phaseWeights_; - RegionWeights contrastWeights_; - - /* Working state. */ - ScanState scanState_; - bool initted_; - double ftarget_, fsmooth_; - double prevContrast_; - unsigned skipCount_, stepCount_, dropCount_; - unsigned scanMaxIndex_; - double scanMaxContrast_, scanMinContrast_; - std::vector<ScanRecord> scanData_; - AfState reportState_; -}; - -} // namespace RPiController diff --git a/src/ipa/raspberrypi/controller/rpi/agc.cpp b/src/ipa/raspberrypi/controller/rpi/agc.cpp deleted file mode 100644 index e6fb7b8d..00000000 --- a/src/ipa/raspberrypi/controller/rpi/agc.cpp +++ /dev/null @@ -1,922 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * agc.cpp - AGC/AEC control algorithm - */ - -#include <algorithm> -#include <map> -#include <tuple> - -#include <libcamera/base/log.h> - -#include "../awb_status.h" -#include "../device_status.h" -#include "../histogram.h" -#include "../lux_status.h" -#include "../metadata.h" - -#include "agc.h" - -using namespace RPiController; -using namespace libcamera; -using libcamera::utils::Duration; -using namespace std::literals::chrono_literals; - -LOG_DEFINE_CATEGORY(RPiAgc) - -#define NAME "rpi.agc" - -int AgcMeteringMode::read(const libcamera::YamlObject ¶ms) -{ - const YamlObject &yamlWeights = params["weights"]; - - for (const auto &p : yamlWeights.asList()) { - auto value = p.get<double>(); - if (!value) - return -EINVAL; - weights.push_back(*value); - } - - return 0; -} - -static std::tuple<int, std::string> -readMeteringModes(std::map<std::string, AgcMeteringMode> &metering_modes, - const libcamera::YamlObject ¶ms) -{ - std::string first; - int ret; - - for (const auto &[key, value] : params.asDict()) { - AgcMeteringMode meteringMode; - ret = meteringMode.read(value); - if (ret) - return { ret, {} }; - - metering_modes[key] = std::move(meteringMode); - if (first.empty()) - first = key; - } - - return { 0, first }; -} - -int AgcExposureMode::read(const libcamera::YamlObject ¶ms) -{ - auto value = params["shutter"].getList<double>(); - if (!value) - return -EINVAL; - std::transform(value->begin(), value->end(), std::back_inserter(shutter), - [](double v) { return v * 1us; }); - - value = params["gain"].getList<double>(); - if (!value) - return -EINVAL; - gain = std::move(*value); - - if (shutter.size() < 2 || gain.size() < 2) { - LOG(RPiAgc, Error) - << "AgcExposureMode: must have at least two entries in exposure profile"; - return -EINVAL; - } - - if (shutter.size() != gain.size()) { - LOG(RPiAgc, Error) - << "AgcExposureMode: expect same number of exposure and gain entries in exposure profile"; - return -EINVAL; - } - - return 0; -} - -static std::tuple<int, std::string> -readExposureModes(std::map<std::string, AgcExposureMode> &exposureModes, - const libcamera::YamlObject ¶ms) -{ - std::string first; - int ret; - - for (const auto &[key, value] : params.asDict()) { - AgcExposureMode exposureMode; - ret = exposureMode.read(value); - if (ret) - return { ret, {} }; - - exposureModes[key] = std::move(exposureMode); - if (first.empty()) - first = key; - } - - return { 0, first }; -} - -int AgcConstraint::read(const libcamera::YamlObject ¶ms) -{ - std::string boundString = params["bound"].get<std::string>(""); - transform(boundString.begin(), boundString.end(), - boundString.begin(), ::toupper); - if (boundString != "UPPER" && boundString != "LOWER") { - LOG(RPiAgc, Error) << "AGC constraint type should be UPPER or LOWER"; - return -EINVAL; - } - bound = boundString == "UPPER" ? Bound::UPPER : Bound::LOWER; - - auto value = params["q_lo"].get<double>(); - if (!value) - return -EINVAL; - qLo = *value; - - value = params["q_hi"].get<double>(); - if (!value) - return -EINVAL; - qHi = *value; - - return yTarget.read(params["y_target"]); -} - -static std::tuple<int, AgcConstraintMode> -readConstraintMode(const libcamera::YamlObject ¶ms) -{ - AgcConstraintMode mode; - int ret; - - for (const auto &p : params.asList()) { - AgcConstraint constraint; - ret = constraint.read(p); - if (ret) - return { ret, {} }; - - mode.push_back(std::move(constraint)); - } - - return { 0, mode }; -} - -static std::tuple<int, std::string> -readConstraintModes(std::map<std::string, AgcConstraintMode> &constraintModes, - const libcamera::YamlObject ¶ms) -{ - std::string first; - int ret; - - for (const auto &[key, value] : params.asDict()) { - std::tie(ret, constraintModes[key]) = readConstraintMode(value); - if (ret) - return { ret, {} }; - - if (first.empty()) - first = key; - } - - return { 0, first }; -} - -int AgcConfig::read(const libcamera::YamlObject ¶ms) -{ - LOG(RPiAgc, Debug) << "AgcConfig"; - int ret; - - std::tie(ret, defaultMeteringMode) = - readMeteringModes(meteringModes, params["metering_modes"]); - if (ret) - return ret; - std::tie(ret, defaultExposureMode) = - readExposureModes(exposureModes, params["exposure_modes"]); - if (ret) - return ret; - std::tie(ret, defaultConstraintMode) = - readConstraintModes(constraintModes, params["constraint_modes"]); - if (ret) - return ret; - - ret = yTarget.read(params["y_target"]); - if (ret) - return ret; - - speed = params["speed"].get<double>(0.2); - startupFrames = params["startup_frames"].get<uint16_t>(10); - convergenceFrames = params["convergence_frames"].get<unsigned int>(6); - fastReduceThreshold = params["fast_reduce_threshold"].get<double>(0.4); - baseEv = params["base_ev"].get<double>(1.0); - - /* Start with quite a low value as ramping up is easier than ramping down. */ - defaultExposureTime = params["default_exposure_time"].get<double>(1000) * 1us; - defaultAnalogueGain = params["default_analogue_gain"].get<double>(1.0); - - return 0; -} - -Agc::ExposureValues::ExposureValues() - : shutter(0s), analogueGain(0), - totalExposure(0s), totalExposureNoDG(0s) -{ -} - -Agc::Agc(Controller *controller) - : AgcAlgorithm(controller), meteringMode_(nullptr), - exposureMode_(nullptr), constraintMode_(nullptr), - frameCount_(0), lockCount_(0), - lastTargetExposure_(0s), ev_(1.0), flickerPeriod_(0s), - maxShutter_(0s), fixedShutter_(0s), fixedAnalogueGain_(0.0) -{ - memset(&awb_, 0, sizeof(awb_)); - /* - * Setting status_.totalExposureValue_ to zero initially tells us - * it's not been calculated yet (i.e. Process hasn't yet run). - */ - memset(&status_, 0, sizeof(status_)); - status_.ev = ev_; -} - -char const *Agc::name() const -{ - return NAME; -} - -int Agc::read(const libcamera::YamlObject ¶ms) -{ - LOG(RPiAgc, Debug) << "Agc"; - - int ret = config_.read(params); - if (ret) - return ret; - - const Size &size = getHardwareConfig().agcZoneWeights; - for (auto const &modes : config_.meteringModes) { - if (modes.second.weights.size() != size.width * size.height) { - LOG(RPiAgc, Error) << "AgcMeteringMode: Incorrect number of weights"; - return -EINVAL; - } - } - - /* - * Set the config's defaults (which are the first ones it read) as our - * current modes, until someone changes them. (they're all known to - * exist at this point) - */ - meteringModeName_ = config_.defaultMeteringMode; - meteringMode_ = &config_.meteringModes[meteringModeName_]; - exposureModeName_ = config_.defaultExposureMode; - exposureMode_ = &config_.exposureModes[exposureModeName_]; - constraintModeName_ = config_.defaultConstraintMode; - constraintMode_ = &config_.constraintModes[constraintModeName_]; - /* Set up the "last shutter/gain" values, in case AGC starts "disabled". */ - status_.shutterTime = config_.defaultExposureTime; - status_.analogueGain = config_.defaultAnalogueGain; - return 0; -} - -void Agc::disableAuto() -{ - fixedShutter_ = status_.shutterTime; - fixedAnalogueGain_ = status_.analogueGain; -} - -void Agc::enableAuto() -{ - fixedShutter_ = 0s; - fixedAnalogueGain_ = 0; -} - -unsigned int Agc::getConvergenceFrames() const -{ - /* - * If shutter and gain have been explicitly set, there is no - * convergence to happen, so no need to drop any frames - return zero. - */ - if (fixedShutter_ && fixedAnalogueGain_) - return 0; - else - return config_.convergenceFrames; -} - -void Agc::setEv(double ev) -{ - ev_ = ev; -} - -void Agc::setFlickerPeriod(Duration flickerPeriod) -{ - flickerPeriod_ = flickerPeriod; -} - -void Agc::setMaxShutter(Duration maxShutter) -{ - maxShutter_ = maxShutter; -} - -void Agc::setFixedShutter(Duration fixedShutter) -{ - fixedShutter_ = fixedShutter; - /* Set this in case someone calls disableAuto() straight after. */ - status_.shutterTime = limitShutter(fixedShutter_); -} - -void Agc::setFixedAnalogueGain(double fixedAnalogueGain) -{ - fixedAnalogueGain_ = fixedAnalogueGain; - /* Set this in case someone calls disableAuto() straight after. */ - status_.analogueGain = limitGain(fixedAnalogueGain); -} - -void Agc::setMeteringMode(std::string const &meteringModeName) -{ - meteringModeName_ = meteringModeName; -} - -void Agc::setExposureMode(std::string const &exposureModeName) -{ - exposureModeName_ = exposureModeName; -} - -void Agc::setConstraintMode(std::string const &constraintModeName) -{ - constraintModeName_ = constraintModeName; -} - -void Agc::switchMode(CameraMode const &cameraMode, - Metadata *metadata) -{ - /* AGC expects the mode sensitivity always to be non-zero. */ - ASSERT(cameraMode.sensitivity); - - housekeepConfig(); - - /* - * Store the mode in the local state. We must cache the sensitivity of - * of the previous mode for the calculations below. - */ - double lastSensitivity = mode_.sensitivity; - mode_ = cameraMode; - - Duration fixedShutter = limitShutter(fixedShutter_); - if (fixedShutter && fixedAnalogueGain_) { - /* We're going to reset the algorithm here with these fixed values. */ - - fetchAwbStatus(metadata); - double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 }); - ASSERT(minColourGain != 0.0); - - /* This is the equivalent of computeTargetExposure and applyDigitalGain. */ - target_.totalExposureNoDG = fixedShutter_ * fixedAnalogueGain_; - target_.totalExposure = target_.totalExposureNoDG / minColourGain; - - /* Equivalent of filterExposure. This resets any "history". */ - filtered_ = target_; - - /* Equivalent of divideUpExposure. */ - filtered_.shutter = fixedShutter; - filtered_.analogueGain = fixedAnalogueGain_; - } else if (status_.totalExposureValue) { - /* - * On a mode switch, various things could happen: - * - the exposure profile might change - * - a fixed exposure or gain might be set - * - the new mode's sensitivity might be different - * We cope with the last of these by scaling the target values. After - * that we just need to re-divide the exposure/gain according to the - * current exposure profile, which takes care of everything else. - */ - - double ratio = lastSensitivity / cameraMode.sensitivity; - target_.totalExposureNoDG *= ratio; - target_.totalExposure *= ratio; - filtered_.totalExposureNoDG *= ratio; - filtered_.totalExposure *= ratio; - - divideUpExposure(); - } else { - /* - * We come through here on startup, when at least one of the shutter - * or gain has not been fixed. We must still write those values out so - * that they will be applied immediately. We supply some arbitrary defaults - * for any that weren't set. - */ - - /* Equivalent of divideUpExposure. */ - filtered_.shutter = fixedShutter ? fixedShutter : config_.defaultExposureTime; - filtered_.analogueGain = fixedAnalogueGain_ ? fixedAnalogueGain_ : config_.defaultAnalogueGain; - } - - writeAndFinish(metadata, false); -} - -void Agc::prepare(Metadata *imageMetadata) -{ - Duration totalExposureValue = status_.totalExposureValue; - AgcStatus delayedStatus; - - if (!imageMetadata->get("agc.delayed_status", delayedStatus)) - totalExposureValue = delayedStatus.totalExposureValue; - - status_.digitalGain = 1.0; - fetchAwbStatus(imageMetadata); /* always fetch it so that Process knows it's been done */ - - if (status_.totalExposureValue) { - /* Process has run, so we have meaningful values. */ - DeviceStatus deviceStatus; - if (imageMetadata->get("device.status", deviceStatus) == 0) { - Duration actualExposure = deviceStatus.shutterSpeed * - deviceStatus.analogueGain; - if (actualExposure) { - status_.digitalGain = totalExposureValue / actualExposure; - LOG(RPiAgc, Debug) << "Want total exposure " << totalExposureValue; - /* - * Never ask for a gain < 1.0, and also impose - * some upper limit. Make it customisable? - */ - status_.digitalGain = std::max(1.0, std::min(status_.digitalGain, 4.0)); - LOG(RPiAgc, Debug) << "Actual exposure " << actualExposure; - LOG(RPiAgc, Debug) << "Use digitalGain " << status_.digitalGain; - LOG(RPiAgc, Debug) << "Effective exposure " - << actualExposure * status_.digitalGain; - /* Decide whether AEC/AGC has converged. */ - updateLockStatus(deviceStatus); - } - } else - LOG(RPiAgc, Warning) << name() << ": no device metadata"; - imageMetadata->set("agc.status", status_); - } -} - -void Agc::process(StatisticsPtr &stats, Metadata *imageMetadata) -{ - frameCount_++; - /* - * First a little bit of housekeeping, fetching up-to-date settings and - * configuration, that kind of thing. - */ - housekeepConfig(); - /* Get the current exposure values for the frame that's just arrived. */ - fetchCurrentExposure(imageMetadata); - /* Compute the total gain we require relative to the current exposure. */ - double gain, targetY; - computeGain(stats, imageMetadata, gain, targetY); - /* Now compute the target (final) exposure which we think we want. */ - computeTargetExposure(gain); - /* - * Some of the exposure has to be applied as digital gain, so work out - * what that is. This function also tells us whether it's decided to - * "desaturate" the image more quickly. - */ - bool desaturate = applyDigitalGain(gain, targetY); - /* The results have to be filtered so as not to change too rapidly. */ - filterExposure(desaturate); - /* - * The last thing is to divide up the exposure value into a shutter time - * and analogue gain, according to the current exposure mode. - */ - divideUpExposure(); - /* Finally advertise what we've done. */ - writeAndFinish(imageMetadata, desaturate); -} - -void Agc::updateLockStatus(DeviceStatus const &deviceStatus) -{ - const double errorFactor = 0.10; /* make these customisable? */ - const int maxLockCount = 5; - /* Reset "lock count" when we exceed this multiple of errorFactor */ - const double resetMargin = 1.5; - - /* Add 200us to the exposure time error to allow for line quantisation. */ - Duration exposureError = lastDeviceStatus_.shutterSpeed * errorFactor + 200us; - double gainError = lastDeviceStatus_.analogueGain * errorFactor; - Duration targetError = lastTargetExposure_ * errorFactor; - - /* - * Note that we don't know the exposure/gain limits of the sensor, so - * the values we keep requesting may be unachievable. For this reason - * we only insist that we're close to values in the past few frames. - */ - if (deviceStatus.shutterSpeed > lastDeviceStatus_.shutterSpeed - exposureError && - deviceStatus.shutterSpeed < lastDeviceStatus_.shutterSpeed + exposureError && - deviceStatus.analogueGain > lastDeviceStatus_.analogueGain - gainError && - deviceStatus.analogueGain < lastDeviceStatus_.analogueGain + gainError && - status_.targetExposureValue > lastTargetExposure_ - targetError && - status_.targetExposureValue < lastTargetExposure_ + targetError) - lockCount_ = std::min(lockCount_ + 1, maxLockCount); - else if (deviceStatus.shutterSpeed < lastDeviceStatus_.shutterSpeed - resetMargin * exposureError || - deviceStatus.shutterSpeed > lastDeviceStatus_.shutterSpeed + resetMargin * exposureError || - deviceStatus.analogueGain < lastDeviceStatus_.analogueGain - resetMargin * gainError || - deviceStatus.analogueGain > lastDeviceStatus_.analogueGain + resetMargin * gainError || - status_.targetExposureValue < lastTargetExposure_ - resetMargin * targetError || - status_.targetExposureValue > lastTargetExposure_ + resetMargin * targetError) - lockCount_ = 0; - - lastDeviceStatus_ = deviceStatus; - lastTargetExposure_ = status_.targetExposureValue; - - LOG(RPiAgc, Debug) << "Lock count updated to " << lockCount_; - status_.locked = lockCount_ == maxLockCount; -} - -static void copyString(std::string const &s, char *d, size_t size) -{ - size_t length = s.copy(d, size - 1); - d[length] = '\0'; -} - -void Agc::housekeepConfig() -{ - /* First fetch all the up-to-date settings, so no one else has to do it. */ - status_.ev = ev_; - status_.fixedShutter = limitShutter(fixedShutter_); - status_.fixedAnalogueGain = fixedAnalogueGain_; - status_.flickerPeriod = flickerPeriod_; - LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixedShutter " - << status_.fixedShutter << " fixedAnalogueGain " - << status_.fixedAnalogueGain; - /* - * Make sure the "mode" pointers point to the up-to-date things, if - * they've changed. - */ - if (strcmp(meteringModeName_.c_str(), status_.meteringMode)) { - auto it = config_.meteringModes.find(meteringModeName_); - if (it == config_.meteringModes.end()) - LOG(RPiAgc, Fatal) << "No metering mode " << meteringModeName_; - meteringMode_ = &it->second; - copyString(meteringModeName_, status_.meteringMode, - sizeof(status_.meteringMode)); - } - if (strcmp(exposureModeName_.c_str(), status_.exposureMode)) { - auto it = config_.exposureModes.find(exposureModeName_); - if (it == config_.exposureModes.end()) - LOG(RPiAgc, Fatal) << "No exposure profile " << exposureModeName_; - exposureMode_ = &it->second; - copyString(exposureModeName_, status_.exposureMode, - sizeof(status_.exposureMode)); - } - if (strcmp(constraintModeName_.c_str(), status_.constraintMode)) { - auto it = - config_.constraintModes.find(constraintModeName_); - if (it == config_.constraintModes.end()) - LOG(RPiAgc, Fatal) << "No constraint list " << constraintModeName_; - constraintMode_ = &it->second; - copyString(constraintModeName_, status_.constraintMode, - sizeof(status_.constraintMode)); - } - LOG(RPiAgc, Debug) << "exposureMode " - << exposureModeName_ << " constraintMode " - << constraintModeName_ << " meteringMode " - << meteringModeName_; -} - -void Agc::fetchCurrentExposure(Metadata *imageMetadata) -{ - std::unique_lock<Metadata> lock(*imageMetadata); - DeviceStatus *deviceStatus = - imageMetadata->getLocked<DeviceStatus>("device.status"); - if (!deviceStatus) - LOG(RPiAgc, Fatal) << "No device metadata"; - current_.shutter = deviceStatus->shutterSpeed; - current_.analogueGain = deviceStatus->analogueGain; - AgcStatus *agcStatus = - imageMetadata->getLocked<AgcStatus>("agc.status"); - current_.totalExposure = agcStatus ? agcStatus->totalExposureValue : 0s; - current_.totalExposureNoDG = current_.shutter * current_.analogueGain; -} - -void Agc::fetchAwbStatus(Metadata *imageMetadata) -{ - awb_.gainR = 1.0; /* in case not found in metadata */ - awb_.gainG = 1.0; - awb_.gainB = 1.0; - if (imageMetadata->get("awb.status", awb_) != 0) - LOG(RPiAgc, Debug) << "No AWB status found"; -} - -static double computeInitialY(StatisticsPtr &stats, AwbStatus const &awb, - std::vector<double> &weights, double gain) -{ - constexpr uint64_t maxVal = 1 << Statistics::NormalisationFactorPow2; - - ASSERT(weights.size() == stats->agcRegions.numRegions()); - - /* - * Note how the calculation below means that equal weights give you - * "average" metering (i.e. all pixels equally important). - */ - double rSum = 0, gSum = 0, bSum = 0, pixelSum = 0; - for (unsigned int i = 0; i < stats->agcRegions.numRegions(); i++) { - auto ®ion = stats->agcRegions.get(i); - double rAcc = std::min<double>(region.val.rSum * gain, (maxVal - 1) * region.counted); - double gAcc = std::min<double>(region.val.gSum * gain, (maxVal - 1) * region.counted); - double bAcc = std::min<double>(region.val.bSum * gain, (maxVal - 1) * region.counted); - rSum += rAcc * weights[i]; - gSum += gAcc * weights[i]; - bSum += bAcc * weights[i]; - pixelSum += region.counted * weights[i]; - } - if (pixelSum == 0.0) { - LOG(RPiAgc, Warning) << "computeInitialY: pixelSum is zero"; - return 0; - } - double ySum = rSum * awb.gainR * .299 + - gSum * awb.gainG * .587 + - bSum * awb.gainB * .114; - return ySum / pixelSum / maxVal; -} - -/* - * We handle extra gain through EV by adjusting our Y targets. However, you - * simply can't monitor histograms once they get very close to (or beyond!) - * saturation, so we clamp the Y targets to this value. It does mean that EV - * increases don't necessarily do quite what you might expect in certain - * (contrived) cases. - */ - -static constexpr double EvGainYTargetLimit = 0.9; - -static double constraintComputeGain(AgcConstraint &c, const Histogram &h, double lux, - double evGain, double &targetY) -{ - targetY = c.yTarget.eval(c.yTarget.domain().clip(lux)); - targetY = std::min(EvGainYTargetLimit, targetY * evGain); - double iqm = h.interQuantileMean(c.qLo, c.qHi); - return (targetY * h.bins()) / iqm; -} - -void Agc::computeGain(StatisticsPtr &statistics, Metadata *imageMetadata, - double &gain, double &targetY) -{ - struct LuxStatus lux = {}; - lux.lux = 400; /* default lux level to 400 in case no metadata found */ - if (imageMetadata->get("lux.status", lux) != 0) - LOG(RPiAgc, Warning) << "No lux level found"; - const Histogram &h = statistics->yHist; - double evGain = status_.ev * config_.baseEv; - /* - * The initial gain and target_Y come from some of the regions. After - * that we consider the histogram constraints. - */ - targetY = config_.yTarget.eval(config_.yTarget.domain().clip(lux.lux)); - targetY = std::min(EvGainYTargetLimit, targetY * evGain); - - /* - * Do this calculation a few times as brightness increase can be - * non-linear when there are saturated regions. - */ - gain = 1.0; - for (int i = 0; i < 8; i++) { - double initialY = computeInitialY(statistics, awb_, meteringMode_->weights, gain); - double extraGain = std::min(10.0, targetY / (initialY + .001)); - gain *= extraGain; - LOG(RPiAgc, Debug) << "Initial Y " << initialY << " target " << targetY - << " gives gain " << gain; - if (extraGain < 1.01) /* close enough */ - break; - } - - for (auto &c : *constraintMode_) { - double newTargetY; - double newGain = constraintComputeGain(c, h, lux.lux, evGain, newTargetY); - LOG(RPiAgc, Debug) << "Constraint has target_Y " - << newTargetY << " giving gain " << newGain; - if (c.bound == AgcConstraint::Bound::LOWER && newGain > gain) { - LOG(RPiAgc, Debug) << "Lower bound constraint adopted"; - gain = newGain; - targetY = newTargetY; - } else if (c.bound == AgcConstraint::Bound::UPPER && newGain < gain) { - LOG(RPiAgc, Debug) << "Upper bound constraint adopted"; - gain = newGain; - targetY = newTargetY; - } - } - LOG(RPiAgc, Debug) << "Final gain " << gain << " (target_Y " << targetY << " ev " - << status_.ev << " base_ev " << config_.baseEv - << ")"; -} - -void Agc::computeTargetExposure(double gain) -{ - if (status_.fixedShutter && status_.fixedAnalogueGain) { - /* - * When ag and shutter are both fixed, we need to drive the - * total exposure so that we end up with a digital gain of at least - * 1/minColourGain. Otherwise we'd desaturate channels causing - * white to go cyan or magenta. - */ - double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 }); - ASSERT(minColourGain != 0.0); - target_.totalExposure = - status_.fixedShutter * status_.fixedAnalogueGain / minColourGain; - } else { - /* - * The statistics reflect the image without digital gain, so the final - * total exposure we're aiming for is: - */ - target_.totalExposure = current_.totalExposureNoDG * gain; - /* The final target exposure is also limited to what the exposure mode allows. */ - Duration maxShutter = status_.fixedShutter - ? status_.fixedShutter - : exposureMode_->shutter.back(); - maxShutter = limitShutter(maxShutter); - Duration maxTotalExposure = - maxShutter * - (status_.fixedAnalogueGain != 0.0 - ? status_.fixedAnalogueGain - : exposureMode_->gain.back()); - target_.totalExposure = std::min(target_.totalExposure, maxTotalExposure); - } - LOG(RPiAgc, Debug) << "Target totalExposure " << target_.totalExposure; -} - -bool Agc::applyDigitalGain(double gain, double targetY) -{ - double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 }); - ASSERT(minColourGain != 0.0); - double dg = 1.0 / minColourGain; - /* - * I think this pipeline subtracts black level and rescales before we - * get the stats, so no need to worry about it. - */ - LOG(RPiAgc, Debug) << "after AWB, target dg " << dg << " gain " << gain - << " target_Y " << targetY; - /* - * Finally, if we're trying to reduce exposure but the target_Y is - * "close" to 1.0, then the gain computed for that constraint will be - * only slightly less than one, because the measured Y can never be - * larger than 1.0. When this happens, demand a large digital gain so - * that the exposure can be reduced, de-saturating the image much more - * quickly (and we then approach the correct value more quickly from - * below). - */ - bool desaturate = targetY > config_.fastReduceThreshold && - gain < sqrt(targetY); - if (desaturate) - dg /= config_.fastReduceThreshold; - LOG(RPiAgc, Debug) << "Digital gain " << dg << " desaturate? " << desaturate; - target_.totalExposureNoDG = target_.totalExposure / dg; - LOG(RPiAgc, Debug) << "Target totalExposureNoDG " << target_.totalExposureNoDG; - return desaturate; -} - -void Agc::filterExposure(bool desaturate) -{ - double speed = config_.speed; - /* - * AGC adapts instantly if both shutter and gain are directly specified - * or we're in the startup phase. - */ - if ((status_.fixedShutter && status_.fixedAnalogueGain) || - frameCount_ <= config_.startupFrames) - speed = 1.0; - if (!filtered_.totalExposure) { - filtered_.totalExposure = target_.totalExposure; - filtered_.totalExposureNoDG = target_.totalExposureNoDG; - } else { - /* - * If close to the result go faster, to save making so many - * micro-adjustments on the way. (Make this customisable?) - */ - if (filtered_.totalExposure < 1.2 * target_.totalExposure && - filtered_.totalExposure > 0.8 * target_.totalExposure) - speed = sqrt(speed); - filtered_.totalExposure = speed * target_.totalExposure + - filtered_.totalExposure * (1.0 - speed); - /* - * When desaturing, take a big jump down in totalExposureNoDG, - * which we'll hide with digital gain. - */ - if (desaturate) - filtered_.totalExposureNoDG = - target_.totalExposureNoDG; - else - filtered_.totalExposureNoDG = - speed * target_.totalExposureNoDG + - filtered_.totalExposureNoDG * (1.0 - speed); - } - /* - * We can't let the totalExposureNoDG exposure deviate too far below the - * total exposure, as there might not be enough digital gain available - * in the ISP to hide it (which will cause nasty oscillation). - */ - if (filtered_.totalExposureNoDG < - filtered_.totalExposure * config_.fastReduceThreshold) - filtered_.totalExposureNoDG = filtered_.totalExposure * config_.fastReduceThreshold; - LOG(RPiAgc, Debug) << "After filtering, totalExposure " << filtered_.totalExposure - << " no dg " << filtered_.totalExposureNoDG; -} - -void Agc::divideUpExposure() -{ - /* - * Sending the fixed shutter/gain cases through the same code may seem - * unnecessary, but it will make more sense when extend this to cover - * variable aperture. - */ - Duration exposureValue = filtered_.totalExposureNoDG; - Duration shutterTime; - double analogueGain; - shutterTime = status_.fixedShutter ? status_.fixedShutter - : exposureMode_->shutter[0]; - shutterTime = limitShutter(shutterTime); - analogueGain = status_.fixedAnalogueGain != 0.0 ? status_.fixedAnalogueGain - : exposureMode_->gain[0]; - analogueGain = limitGain(analogueGain); - if (shutterTime * analogueGain < exposureValue) { - for (unsigned int stage = 1; - stage < exposureMode_->gain.size(); stage++) { - if (!status_.fixedShutter) { - Duration stageShutter = - limitShutter(exposureMode_->shutter[stage]); - if (stageShutter * analogueGain >= exposureValue) { - shutterTime = exposureValue / analogueGain; - break; - } - shutterTime = stageShutter; - } - if (status_.fixedAnalogueGain == 0.0) { - if (exposureMode_->gain[stage] * shutterTime >= exposureValue) { - analogueGain = exposureValue / shutterTime; - break; - } - analogueGain = exposureMode_->gain[stage]; - analogueGain = limitGain(analogueGain); - } - } - } - LOG(RPiAgc, Debug) << "Divided up shutter and gain are " << shutterTime << " and " - << analogueGain; - /* - * Finally adjust shutter time for flicker avoidance (require both - * shutter and gain not to be fixed). - */ - if (!status_.fixedShutter && !status_.fixedAnalogueGain && - status_.flickerPeriod) { - int flickerPeriods = shutterTime / status_.flickerPeriod; - if (flickerPeriods) { - Duration newShutterTime = flickerPeriods * status_.flickerPeriod; - analogueGain *= shutterTime / newShutterTime; - /* - * We should still not allow the ag to go over the - * largest value in the exposure mode. Note that this - * may force more of the total exposure into the digital - * gain as a side-effect. - */ - analogueGain = std::min(analogueGain, exposureMode_->gain.back()); - analogueGain = limitGain(analogueGain); - shutterTime = newShutterTime; - } - LOG(RPiAgc, Debug) << "After flicker avoidance, shutter " - << shutterTime << " gain " << analogueGain; - } - filtered_.shutter = shutterTime; - filtered_.analogueGain = analogueGain; -} - -void Agc::writeAndFinish(Metadata *imageMetadata, bool desaturate) -{ - status_.totalExposureValue = filtered_.totalExposure; - status_.targetExposureValue = desaturate ? 0s : target_.totalExposureNoDG; - status_.shutterTime = filtered_.shutter; - status_.analogueGain = filtered_.analogueGain; - /* - * Write to metadata as well, in case anyone wants to update the camera - * immediately. - */ - imageMetadata->set("agc.status", status_); - LOG(RPiAgc, Debug) << "Output written, total exposure requested is " - << filtered_.totalExposure; - LOG(RPiAgc, Debug) << "Camera exposure update: shutter time " << filtered_.shutter - << " analogue gain " << filtered_.analogueGain; -} - -Duration Agc::limitShutter(Duration shutter) -{ - /* - * shutter == 0 is a special case for fixed shutter values, and must pass - * through unchanged - */ - if (!shutter) - return shutter; - - shutter = std::clamp(shutter, mode_.minShutter, maxShutter_); - return shutter; -} - -double Agc::limitGain(double gain) const -{ - /* - * Only limit the lower bounds of the gain value to what the sensor limits. - * The upper bound on analogue gain will be made up with additional digital - * gain applied by the ISP. - * - * gain == 0.0 is a special case for fixed shutter values, and must pass - * through unchanged - */ - if (!gain) - return gain; - - gain = std::max(gain, mode_.minAnalogueGain); - return gain; -} - -/* Register algorithm with the system. */ -static Algorithm *create(Controller *controller) -{ - return (Algorithm *)new Agc(controller); -} -static RegisterAlgorithm reg(NAME, &create); diff --git a/src/ipa/raspberrypi/controller/rpi/agc.h b/src/ipa/raspberrypi/controller/rpi/agc.h deleted file mode 100644 index 4e5f272f..00000000 --- a/src/ipa/raspberrypi/controller/rpi/agc.h +++ /dev/null @@ -1,133 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * agc.h - AGC/AEC control algorithm - */ -#pragma once - -#include <vector> -#include <mutex> - -#include <libcamera/base/utils.h> - -#include "../agc_algorithm.h" -#include "../agc_status.h" -#include "../pwl.h" - -/* This is our implementation of AGC. */ - -namespace RPiController { - -struct AgcMeteringMode { - std::vector<double> weights; - int read(const libcamera::YamlObject ¶ms); -}; - -struct AgcExposureMode { - std::vector<libcamera::utils::Duration> shutter; - std::vector<double> gain; - int read(const libcamera::YamlObject ¶ms); -}; - -struct AgcConstraint { - enum class Bound { LOWER = 0, UPPER = 1 }; - Bound bound; - double qLo; - double qHi; - Pwl yTarget; - int read(const libcamera::YamlObject ¶ms); -}; - -typedef std::vector<AgcConstraint> AgcConstraintMode; - -struct AgcConfig { - int read(const libcamera::YamlObject ¶ms); - std::map<std::string, AgcMeteringMode> meteringModes; - std::map<std::string, AgcExposureMode> exposureModes; - std::map<std::string, AgcConstraintMode> constraintModes; - Pwl yTarget; - double speed; - uint16_t startupFrames; - unsigned int convergenceFrames; - double maxChange; - double minChange; - double fastReduceThreshold; - double speedUpThreshold; - std::string defaultMeteringMode; - std::string defaultExposureMode; - std::string defaultConstraintMode; - double baseEv; - libcamera::utils::Duration defaultExposureTime; - double defaultAnalogueGain; -}; - -class Agc : public AgcAlgorithm -{ -public: - Agc(Controller *controller); - char const *name() const override; - int read(const libcamera::YamlObject ¶ms) override; - unsigned int getConvergenceFrames() const override; - void setEv(double ev) override; - void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) override; - void setMaxShutter(libcamera::utils::Duration maxShutter) override; - void setFixedShutter(libcamera::utils::Duration fixedShutter) override; - void setFixedAnalogueGain(double fixedAnalogueGain) override; - void setMeteringMode(std::string const &meteringModeName) override; - void setExposureMode(std::string const &exposureModeName) override; - void setConstraintMode(std::string const &contraintModeName) override; - void enableAuto() override; - void disableAuto() override; - void switchMode(CameraMode const &cameraMode, Metadata *metadata) override; - void prepare(Metadata *imageMetadata) override; - void process(StatisticsPtr &stats, Metadata *imageMetadata) override; - -private: - void updateLockStatus(DeviceStatus const &deviceStatus); - AgcConfig config_; - void housekeepConfig(); - void fetchCurrentExposure(Metadata *imageMetadata); - void fetchAwbStatus(Metadata *imageMetadata); - void computeGain(StatisticsPtr &statistics, Metadata *imageMetadata, - double &gain, double &targetY); - void computeTargetExposure(double gain); - bool applyDigitalGain(double gain, double targetY); - void filterExposure(bool desaturate); - void divideUpExposure(); - void writeAndFinish(Metadata *imageMetadata, bool desaturate); - libcamera::utils::Duration limitShutter(libcamera::utils::Duration shutter); - double limitGain(double gain) const; - AgcMeteringMode *meteringMode_; - AgcExposureMode *exposureMode_; - AgcConstraintMode *constraintMode_; - CameraMode mode_; - uint64_t frameCount_; - AwbStatus awb_; - struct ExposureValues { - ExposureValues(); - - libcamera::utils::Duration shutter; - double analogueGain; - libcamera::utils::Duration totalExposure; - libcamera::utils::Duration totalExposureNoDG; /* without digital gain */ - }; - ExposureValues current_; /* values for the current frame */ - ExposureValues target_; /* calculate the values we want here */ - ExposureValues filtered_; /* these values are filtered towards target */ - AgcStatus status_; - int lockCount_; - DeviceStatus lastDeviceStatus_; - libcamera::utils::Duration lastTargetExposure_; - /* Below here the "settings" that applications can change. */ - std::string meteringModeName_; - std::string exposureModeName_; - std::string constraintModeName_; - double ev_; - libcamera::utils::Duration flickerPeriod_; - libcamera::utils::Duration maxShutter_; - libcamera::utils::Duration fixedShutter_; - double fixedAnalogueGain_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/rpi/alsc.cpp b/src/ipa/raspberrypi/controller/rpi/alsc.cpp deleted file mode 100644 index 3a2e8fe0..00000000 --- a/src/ipa/raspberrypi/controller/rpi/alsc.cpp +++ /dev/null @@ -1,865 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * alsc.cpp - ALSC (auto lens shading correction) control algorithm - */ - -#include <algorithm> -#include <functional> -#include <math.h> -#include <numeric> - -#include <libcamera/base/log.h> -#include <libcamera/base/span.h> - -#include "../awb_status.h" -#include "alsc.h" - -/* Raspberry Pi ALSC (Auto Lens Shading Correction) algorithm. */ - -using namespace RPiController; -using namespace libcamera; - -LOG_DEFINE_CATEGORY(RPiAlsc) - -#define NAME "rpi.alsc" - -static const double InsufficientData = -1.0; - -Alsc::Alsc(Controller *controller) - : Algorithm(controller) -{ - asyncAbort_ = asyncStart_ = asyncStarted_ = asyncFinished_ = false; - asyncThread_ = std::thread(std::bind(&Alsc::asyncFunc, this)); -} - -Alsc::~Alsc() -{ - { - std::lock_guard<std::mutex> lock(mutex_); - asyncAbort_ = true; - } - asyncSignal_.notify_one(); - asyncThread_.join(); -} - -char const *Alsc::name() const -{ - return NAME; -} - -static int generateLut(Array2D<double> &lut, const libcamera::YamlObject ¶ms) -{ - /* These must be signed ints for the co-ordinate calculations below. */ - int X = lut.dimensions().width, Y = lut.dimensions().height; - double cstrength = params["corner_strength"].get<double>(2.0); - if (cstrength <= 1.0) { - LOG(RPiAlsc, Error) << "corner_strength must be > 1.0"; - return -EINVAL; - } - - double asymmetry = params["asymmetry"].get<double>(1.0); - if (asymmetry < 0) { - LOG(RPiAlsc, Error) << "asymmetry must be >= 0"; - return -EINVAL; - } - - double f1 = cstrength - 1, f2 = 1 + sqrt(cstrength); - double R2 = X * Y / 4 * (1 + asymmetry * asymmetry); - int num = 0; - for (int y = 0; y < Y; y++) { - for (int x = 0; x < X; x++) { - double dy = y - Y / 2 + 0.5, - dx = (x - X / 2 + 0.5) * asymmetry; - double r2 = (dx * dx + dy * dy) / R2; - lut[num++] = - (f1 * r2 + f2) * (f1 * r2 + f2) / - (f2 * f2); /* this reproduces the cos^4 rule */ - } - } - return 0; -} - -static int readLut(Array2D<double> &lut, const libcamera::YamlObject ¶ms) -{ - if (params.size() != lut.size()) { - LOG(RPiAlsc, Error) << "Invalid number of entries in LSC table"; - return -EINVAL; - } - - int num = 0; - for (const auto &p : params.asList()) { - auto value = p.get<double>(); - if (!value) - return -EINVAL; - lut[num++] = *value; - } - - return 0; -} - -static int readCalibrations(std::vector<AlscCalibration> &calibrations, - const libcamera::YamlObject ¶ms, - std::string const &name, const Size &size) -{ - if (params.contains(name)) { - double lastCt = 0; - for (const auto &p : params[name].asList()) { - auto value = p["ct"].get<double>(); - if (!value) - return -EINVAL; - double ct = *value; - if (ct <= lastCt) { - LOG(RPiAlsc, Error) - << "Entries in " << name << " must be in increasing ct order"; - return -EINVAL; - } - AlscCalibration calibration; - calibration.ct = lastCt = ct; - - const libcamera::YamlObject &table = p["table"]; - if (table.size() != size.width * size.height) { - LOG(RPiAlsc, Error) - << "Incorrect number of values for ct " - << ct << " in " << name; - return -EINVAL; - } - - int num = 0; - calibration.table.resize(size); - for (const auto &elem : table.asList()) { - value = elem.get<double>(); - if (!value) - return -EINVAL; - calibration.table[num++] = *value; - } - - calibrations.push_back(std::move(calibration)); - LOG(RPiAlsc, Debug) - << "Read " << name << " calibration for ct " << ct; - } - } - return 0; -} - -int Alsc::read(const libcamera::YamlObject ¶ms) -{ - config_.tableSize = getHardwareConfig().awbRegions; - config_.framePeriod = params["frame_period"].get<uint16_t>(12); - config_.startupFrames = params["startup_frames"].get<uint16_t>(10); - config_.speed = params["speed"].get<double>(0.05); - double sigma = params["sigma"].get<double>(0.01); - config_.sigmaCr = params["sigma_Cr"].get<double>(sigma); - config_.sigmaCb = params["sigma_Cb"].get<double>(sigma); - config_.minCount = params["min_count"].get<double>(10.0); - config_.minG = params["min_G"].get<uint16_t>(50); - config_.omega = params["omega"].get<double>(1.3); - config_.nIter = params["n_iter"].get<uint32_t>(config_.tableSize.width + config_.tableSize.height); - config_.luminanceStrength = - params["luminance_strength"].get<double>(1.0); - - config_.luminanceLut.resize(config_.tableSize, 1.0); - int ret = 0; - - if (params.contains("corner_strength")) - ret = generateLut(config_.luminanceLut, params); - else if (params.contains("luminance_lut")) - ret = readLut(config_.luminanceLut, params["luminance_lut"]); - else - LOG(RPiAlsc, Warning) - << "no luminance table - assume unity everywhere"; - if (ret) - return ret; - - ret = readCalibrations(config_.calibrationsCr, params, "calibrations_Cr", - config_.tableSize); - if (ret) - return ret; - ret = readCalibrations(config_.calibrationsCb, params, "calibrations_Cb", - config_.tableSize); - if (ret) - return ret; - - config_.defaultCt = params["default_ct"].get<double>(4500.0); - config_.threshold = params["threshold"].get<double>(1e-3); - config_.lambdaBound = params["lambda_bound"].get<double>(0.05); - - return 0; -} - -static double getCt(Metadata *metadata, double defaultCt); -static void getCalTable(double ct, std::vector<AlscCalibration> const &calibrations, - Array2D<double> &calTable); -static void resampleCalTable(const Array2D<double> &calTableIn, CameraMode const &cameraMode, - Array2D<double> &calTableOut); -static void compensateLambdasForCal(const Array2D<double> &calTable, - const Array2D<double> &oldLambdas, - Array2D<double> &newLambdas); -static void addLuminanceToTables(std::array<Array2D<double>, 3> &results, - const Array2D<double> &lambdaR, double lambdaG, - const Array2D<double> &lambdaB, - const Array2D<double> &luminanceLut, - double luminanceStrength); - -void Alsc::initialise() -{ - frameCount2_ = frameCount_ = framePhase_ = 0; - firstTime_ = true; - ct_ = config_.defaultCt; - - const size_t XY = config_.tableSize.width * config_.tableSize.height; - - for (auto &r : syncResults_) - r.resize(config_.tableSize); - for (auto &r : prevSyncResults_) - r.resize(config_.tableSize); - for (auto &r : asyncResults_) - r.resize(config_.tableSize); - - luminanceTable_.resize(config_.tableSize); - asyncLambdaR_.resize(config_.tableSize); - asyncLambdaB_.resize(config_.tableSize); - /* The lambdas are initialised in the SwitchMode. */ - lambdaR_.resize(config_.tableSize); - lambdaB_.resize(config_.tableSize); - - /* Temporaries for the computations, but sensible to allocate this up-front! */ - for (auto &c : tmpC_) - c.resize(config_.tableSize); - for (auto &m : tmpM_) - m.resize(XY); -} - -void Alsc::waitForAysncThread() -{ - if (asyncStarted_) { - asyncStarted_ = false; - std::unique_lock<std::mutex> lock(mutex_); - syncSignal_.wait(lock, [&] { - return asyncFinished_; - }); - asyncFinished_ = false; - } -} - -static bool compareModes(CameraMode const &cm0, CameraMode const &cm1) -{ - /* - * Return true if the modes crop from the sensor significantly differently, - * or if the user transform has changed. - */ - if (cm0.transform != cm1.transform) - return true; - int leftDiff = abs(cm0.cropX - cm1.cropX); - int topDiff = abs(cm0.cropY - cm1.cropY); - int rightDiff = fabs(cm0.cropX + cm0.scaleX * cm0.width - - cm1.cropX - cm1.scaleX * cm1.width); - int bottomDiff = fabs(cm0.cropY + cm0.scaleY * cm0.height - - cm1.cropY - cm1.scaleY * cm1.height); - /* - * These thresholds are a rather arbitrary amount chosen to trigger - * when carrying on with the previously calculated tables might be - * worse than regenerating them (but without the adaptive algorithm). - */ - int thresholdX = cm0.sensorWidth >> 4; - int thresholdY = cm0.sensorHeight >> 4; - return leftDiff > thresholdX || rightDiff > thresholdX || - topDiff > thresholdY || bottomDiff > thresholdY; -} - -void Alsc::switchMode(CameraMode const &cameraMode, - [[maybe_unused]] Metadata *metadata) -{ - /* - * We're going to start over with the tables if there's any "significant" - * change. - */ - bool resetTables = firstTime_ || compareModes(cameraMode_, cameraMode); - - /* Believe the colour temperature from the AWB, if there is one. */ - ct_ = getCt(metadata, ct_); - - /* Ensure the other thread isn't running while we do this. */ - waitForAysncThread(); - - cameraMode_ = cameraMode; - - /* - * We must resample the luminance table like we do the others, but it's - * fixed so we can simply do it up front here. - */ - resampleCalTable(config_.luminanceLut, cameraMode_, luminanceTable_); - - if (resetTables) { - /* - * Upon every "table reset", arrange for something sensible to be - * generated. Construct the tables for the previous recorded colour - * temperature. In order to start over from scratch we initialise - * the lambdas, but the rest of this code then echoes the code in - * doAlsc, without the adaptive algorithm. - */ - std::fill(lambdaR_.begin(), lambdaR_.end(), 1.0); - std::fill(lambdaB_.begin(), lambdaB_.end(), 1.0); - Array2D<double> &calTableR = tmpC_[0], &calTableB = tmpC_[1], &calTableTmp = tmpC_[2]; - getCalTable(ct_, config_.calibrationsCr, calTableTmp); - resampleCalTable(calTableTmp, cameraMode_, calTableR); - getCalTable(ct_, config_.calibrationsCb, calTableTmp); - resampleCalTable(calTableTmp, cameraMode_, calTableB); - compensateLambdasForCal(calTableR, lambdaR_, asyncLambdaR_); - compensateLambdasForCal(calTableB, lambdaB_, asyncLambdaB_); - addLuminanceToTables(syncResults_, asyncLambdaR_, 1.0, asyncLambdaB_, - luminanceTable_, config_.luminanceStrength); - prevSyncResults_ = syncResults_; - framePhase_ = config_.framePeriod; /* run the algo again asap */ - firstTime_ = false; - } -} - -void Alsc::fetchAsyncResults() -{ - LOG(RPiAlsc, Debug) << "Fetch ALSC results"; - asyncFinished_ = false; - asyncStarted_ = false; - syncResults_ = asyncResults_; -} - -double getCt(Metadata *metadata, double defaultCt) -{ - AwbStatus awbStatus; - awbStatus.temperatureK = defaultCt; /* in case nothing found */ - if (metadata->get("awb.status", awbStatus) != 0) - LOG(RPiAlsc, Debug) << "no AWB results found, using " - << awbStatus.temperatureK; - else - LOG(RPiAlsc, Debug) << "AWB results found, using " - << awbStatus.temperatureK; - return awbStatus.temperatureK; -} - -static void copyStats(RgbyRegions ®ions, StatisticsPtr &stats, - AlscStatus const &status) -{ - if (!regions.numRegions()) - regions.init(stats->awbRegions.size()); - - const std::vector<double> &rTable = status.r; - const std::vector<double> &gTable = status.g; - const std::vector<double> &bTable = status.b; - for (unsigned int i = 0; i < stats->awbRegions.numRegions(); i++) { - auto r = stats->awbRegions.get(i); - r.val.rSum = static_cast<uint64_t>(r.val.rSum / rTable[i]); - r.val.gSum = static_cast<uint64_t>(r.val.gSum / gTable[i]); - r.val.bSum = static_cast<uint64_t>(r.val.bSum / bTable[i]); - regions.set(i, r); - } -} - -void Alsc::restartAsync(StatisticsPtr &stats, Metadata *imageMetadata) -{ - LOG(RPiAlsc, Debug) << "Starting ALSC calculation"; - /* - * Get the current colour temperature. It's all we need from the - * metadata. Default to the last CT value (which could be the default). - */ - ct_ = getCt(imageMetadata, ct_); - /* - * We have to copy the statistics here, dividing out our best guess of - * the LSC table that the pipeline applied to them. - */ - AlscStatus alscStatus; - if (imageMetadata->get("alsc.status", alscStatus) != 0) { - LOG(RPiAlsc, Warning) - << "No ALSC status found for applied gains!"; - alscStatus.r.resize(config_.tableSize.width * config_.tableSize.height, 1.0); - alscStatus.g.resize(config_.tableSize.width * config_.tableSize.height, 1.0); - alscStatus.b.resize(config_.tableSize.width * config_.tableSize.height, 1.0); - } - copyStats(statistics_, stats, alscStatus); - framePhase_ = 0; - asyncStarted_ = true; - { - std::lock_guard<std::mutex> lock(mutex_); - asyncStart_ = true; - } - asyncSignal_.notify_one(); -} - -void Alsc::prepare(Metadata *imageMetadata) -{ - /* - * Count frames since we started, and since we last poked the async - * thread. - */ - if (frameCount_ < (int)config_.startupFrames) - frameCount_++; - double speed = frameCount_ < (int)config_.startupFrames - ? 1.0 - : config_.speed; - LOG(RPiAlsc, Debug) - << "frame count " << frameCount_ << " speed " << speed; - { - std::unique_lock<std::mutex> lock(mutex_); - if (asyncStarted_ && asyncFinished_) - fetchAsyncResults(); - } - /* Apply IIR filter to results and program into the pipeline. */ - for (unsigned int j = 0; j < syncResults_.size(); j++) { - for (unsigned int i = 0; i < syncResults_[j].size(); i++) - prevSyncResults_[j][i] = speed * syncResults_[j][i] + (1.0 - speed) * prevSyncResults_[j][i]; - } - /* Put output values into status metadata. */ - AlscStatus status; - status.r = prevSyncResults_[0].data(); - status.g = prevSyncResults_[1].data(); - status.b = prevSyncResults_[2].data(); - imageMetadata->set("alsc.status", status); -} - -void Alsc::process(StatisticsPtr &stats, Metadata *imageMetadata) -{ - /* - * Count frames since we started, and since we last poked the async - * thread. - */ - if (framePhase_ < (int)config_.framePeriod) - framePhase_++; - if (frameCount2_ < (int)config_.startupFrames) - frameCount2_++; - LOG(RPiAlsc, Debug) << "frame_phase " << framePhase_; - if (framePhase_ >= (int)config_.framePeriod || - frameCount2_ < (int)config_.startupFrames) { - if (asyncStarted_ == false) - restartAsync(stats, imageMetadata); - } -} - -void Alsc::asyncFunc() -{ - while (true) { - { - std::unique_lock<std::mutex> lock(mutex_); - asyncSignal_.wait(lock, [&] { - return asyncStart_ || asyncAbort_; - }); - asyncStart_ = false; - if (asyncAbort_) - break; - } - doAlsc(); - { - std::lock_guard<std::mutex> lock(mutex_); - asyncFinished_ = true; - } - syncSignal_.notify_one(); - } -} - -void getCalTable(double ct, std::vector<AlscCalibration> const &calibrations, - Array2D<double> &calTable) -{ - if (calibrations.empty()) { - std::fill(calTable.begin(), calTable.end(), 1.0); - LOG(RPiAlsc, Debug) << "no calibrations found"; - } else if (ct <= calibrations.front().ct) { - calTable = calibrations.front().table; - LOG(RPiAlsc, Debug) << "using calibration for " - << calibrations.front().ct; - } else if (ct >= calibrations.back().ct) { - calTable = calibrations.back().table; - LOG(RPiAlsc, Debug) << "using calibration for " - << calibrations.back().ct; - } else { - int idx = 0; - while (ct > calibrations[idx + 1].ct) - idx++; - double ct0 = calibrations[idx].ct, ct1 = calibrations[idx + 1].ct; - LOG(RPiAlsc, Debug) - << "ct is " << ct << ", interpolating between " - << ct0 << " and " << ct1; - for (unsigned int i = 0; i < calTable.size(); i++) - calTable[i] = - (calibrations[idx].table[i] * (ct1 - ct) + - calibrations[idx + 1].table[i] * (ct - ct0)) / - (ct1 - ct0); - } -} - -void resampleCalTable(const Array2D<double> &calTableIn, - CameraMode const &cameraMode, - Array2D<double> &calTableOut) -{ - int X = calTableIn.dimensions().width; - int Y = calTableIn.dimensions().height; - - /* - * Precalculate and cache the x sampling locations and phases to save - * recomputing them on every row. - */ - int xLo[X], xHi[X]; - double xf[X]; - double scaleX = cameraMode.sensorWidth / - (cameraMode.width * cameraMode.scaleX); - double xOff = cameraMode.cropX / (double)cameraMode.sensorWidth; - double x = .5 / scaleX + xOff * X - .5; - double xInc = 1 / scaleX; - for (int i = 0; i < X; i++, x += xInc) { - xLo[i] = floor(x); - xf[i] = x - xLo[i]; - xHi[i] = std::min(xLo[i] + 1, X - 1); - xLo[i] = std::max(xLo[i], 0); - if (!!(cameraMode.transform & libcamera::Transform::HFlip)) { - xLo[i] = X - 1 - xLo[i]; - xHi[i] = X - 1 - xHi[i]; - } - } - /* Now march over the output table generating the new values. */ - double scaleY = cameraMode.sensorHeight / - (cameraMode.height * cameraMode.scaleY); - double yOff = cameraMode.cropY / (double)cameraMode.sensorHeight; - double y = .5 / scaleY + yOff * Y - .5; - double yInc = 1 / scaleY; - for (int j = 0; j < Y; j++, y += yInc) { - int yLo = floor(y); - double yf = y - yLo; - int yHi = std::min(yLo + 1, Y - 1); - yLo = std::max(yLo, 0); - if (!!(cameraMode.transform & libcamera::Transform::VFlip)) { - yLo = Y - 1 - yLo; - yHi = Y - 1 - yHi; - } - double const *rowAbove = calTableIn.ptr() + X * yLo; - double const *rowBelow = calTableIn.ptr() + X * yHi; - double *out = calTableOut.ptr() + X * j; - for (int i = 0; i < X; i++) { - double above = rowAbove[xLo[i]] * (1 - xf[i]) + - rowAbove[xHi[i]] * xf[i]; - double below = rowBelow[xLo[i]] * (1 - xf[i]) + - rowBelow[xHi[i]] * xf[i]; - *(out++) = above * (1 - yf) + below * yf; - } - } -} - -/* Calculate chrominance statistics (R/G and B/G) for each region. */ -static void calculateCrCb(const RgbyRegions &awbRegion, Array2D<double> &cr, - Array2D<double> &cb, uint32_t minCount, uint16_t minG) -{ - for (unsigned int i = 0; i < cr.size(); i++) { - auto s = awbRegion.get(i); - - if (s.counted <= minCount || s.val.gSum / s.counted <= minG) { - cr[i] = cb[i] = InsufficientData; - continue; - } - - cr[i] = s.val.rSum / (double)s.val.gSum; - cb[i] = s.val.bSum / (double)s.val.gSum; - } -} - -static void applyCalTable(const Array2D<double> &calTable, Array2D<double> &C) -{ - for (unsigned int i = 0; i < C.size(); i++) - if (C[i] != InsufficientData) - C[i] *= calTable[i]; -} - -void compensateLambdasForCal(const Array2D<double> &calTable, - const Array2D<double> &oldLambdas, - Array2D<double> &newLambdas) -{ - double minNewLambda = std::numeric_limits<double>::max(); - for (unsigned int i = 0; i < newLambdas.size(); i++) { - newLambdas[i] = oldLambdas[i] * calTable[i]; - minNewLambda = std::min(minNewLambda, newLambdas[i]); - } - for (unsigned int i = 0; i < newLambdas.size(); i++) - newLambdas[i] /= minNewLambda; -} - -[[maybe_unused]] static void printCalTable(const Array2D<double> &C) -{ - const Size &size = C.dimensions(); - printf("table: [\n"); - for (unsigned int j = 0; j < size.height; j++) { - for (unsigned int i = 0; i < size.width; i++) { - printf("%5.3f", 1.0 / C[j * size.width + i]); - if (i != size.width - 1 || j != size.height - 1) - printf(","); - } - printf("\n"); - } - printf("]\n"); -} - -/* - * Compute weight out of 1.0 which reflects how similar we wish to make the - * colours of these two regions. - */ -static double computeWeight(double Ci, double Cj, double sigma) -{ - if (Ci == InsufficientData || Cj == InsufficientData) - return 0; - double diff = (Ci - Cj) / sigma; - return exp(-diff * diff / 2); -} - -/* Compute all weights. */ -static void computeW(const Array2D<double> &C, double sigma, - SparseArray<double> &W) -{ - size_t XY = C.size(); - size_t X = C.dimensions().width; - - for (unsigned int i = 0; i < XY; i++) { - /* Start with neighbour above and go clockwise. */ - W[i][0] = i >= X ? computeWeight(C[i], C[i - X], sigma) : 0; - W[i][1] = i % X < X - 1 ? computeWeight(C[i], C[i + 1], sigma) : 0; - W[i][2] = i < XY - X ? computeWeight(C[i], C[i + X], sigma) : 0; - W[i][3] = i % X ? computeWeight(C[i], C[i - 1], sigma) : 0; - } -} - -/* Compute M, the large but sparse matrix such that M * lambdas = 0. */ -static void constructM(const Array2D<double> &C, - const SparseArray<double> &W, - SparseArray<double> &M) -{ - size_t XY = C.size(); - size_t X = C.dimensions().width; - - double epsilon = 0.001; - for (unsigned int i = 0; i < XY; i++) { - /* - * Note how, if C[i] == INSUFFICIENT_DATA, the weights will all - * be zero so the equation is still set up correctly. - */ - int m = !!(i >= X) + !!(i % X < X - 1) + !!(i < XY - X) + - !!(i % X); /* total number of neighbours */ - /* we'll divide the diagonal out straight away */ - double diagonal = (epsilon + W[i][0] + W[i][1] + W[i][2] + W[i][3]) * C[i]; - M[i][0] = i >= X ? (W[i][0] * C[i - X] + epsilon / m * C[i]) / diagonal : 0; - M[i][1] = i % X < X - 1 ? (W[i][1] * C[i + 1] + epsilon / m * C[i]) / diagonal : 0; - M[i][2] = i < XY - X ? (W[i][2] * C[i + X] + epsilon / m * C[i]) / diagonal : 0; - M[i][3] = i % X ? (W[i][3] * C[i - 1] + epsilon / m * C[i]) / diagonal : 0; - } -} - -/* - * In the compute_lambda_ functions, note that the matrix coefficients for the - * left/right neighbours are zero down the left/right edges, so we don't need - * need to test the i value to exclude them. - */ -static double computeLambdaBottom(int i, const SparseArray<double> &M, - Array2D<double> &lambda) -{ - return M[i][1] * lambda[i + 1] + M[i][2] * lambda[i + lambda.dimensions().width] + - M[i][3] * lambda[i - 1]; -} -static double computeLambdaBottomStart(int i, const SparseArray<double> &M, - Array2D<double> &lambda) -{ - return M[i][1] * lambda[i + 1] + M[i][2] * lambda[i + lambda.dimensions().width]; -} -static double computeLambdaInterior(int i, const SparseArray<double> &M, - Array2D<double> &lambda) -{ - return M[i][0] * lambda[i - lambda.dimensions().width] + M[i][1] * lambda[i + 1] + - M[i][2] * lambda[i + lambda.dimensions().width] + M[i][3] * lambda[i - 1]; -} -static double computeLambdaTop(int i, const SparseArray<double> &M, - Array2D<double> &lambda) -{ - return M[i][0] * lambda[i - lambda.dimensions().width] + M[i][1] * lambda[i + 1] + - M[i][3] * lambda[i - 1]; -} -static double computeLambdaTopEnd(int i, const SparseArray<double> &M, - Array2D<double> &lambda) -{ - return M[i][0] * lambda[i - lambda.dimensions().width] + M[i][3] * lambda[i - 1]; -} - -/* Gauss-Seidel iteration with over-relaxation. */ -static double gaussSeidel2Sor(const SparseArray<double> &M, double omega, - Array2D<double> &lambda, double lambdaBound) -{ - int XY = lambda.size(); - int X = lambda.dimensions().width; - const double min = 1 - lambdaBound, max = 1 + lambdaBound; - Array2D<double> oldLambda = lambda; - int i; - lambda[0] = computeLambdaBottomStart(0, M, lambda); - lambda[0] = std::clamp(lambda[0], min, max); - for (i = 1; i < X; i++) { - lambda[i] = computeLambdaBottom(i, M, lambda); - lambda[i] = std::clamp(lambda[i], min, max); - } - for (; i < XY - X; i++) { - lambda[i] = computeLambdaInterior(i, M, lambda); - lambda[i] = std::clamp(lambda[i], min, max); - } - for (; i < XY - 1; i++) { - lambda[i] = computeLambdaTop(i, M, lambda); - lambda[i] = std::clamp(lambda[i], min, max); - } - lambda[i] = computeLambdaTopEnd(i, M, lambda); - lambda[i] = std::clamp(lambda[i], min, max); - /* - * Also solve the system from bottom to top, to help spread the updates - * better. - */ - lambda[i] = computeLambdaTopEnd(i, M, lambda); - lambda[i] = std::clamp(lambda[i], min, max); - for (i = XY - 2; i >= XY - X; i--) { - lambda[i] = computeLambdaTop(i, M, lambda); - lambda[i] = std::clamp(lambda[i], min, max); - } - for (; i >= X; i--) { - lambda[i] = computeLambdaInterior(i, M, lambda); - lambda[i] = std::clamp(lambda[i], min, max); - } - for (; i >= 1; i--) { - lambda[i] = computeLambdaBottom(i, M, lambda); - lambda[i] = std::clamp(lambda[i], min, max); - } - lambda[0] = computeLambdaBottomStart(0, M, lambda); - lambda[0] = std::clamp(lambda[0], min, max); - double maxDiff = 0; - for (i = 0; i < XY; i++) { - lambda[i] = oldLambda[i] + (lambda[i] - oldLambda[i]) * omega; - if (fabs(lambda[i] - oldLambda[i]) > fabs(maxDiff)) - maxDiff = lambda[i] - oldLambda[i]; - } - return maxDiff; -} - -/* Normalise the values so that the smallest value is 1. */ -static void normalise(Array2D<double> &results) -{ - double minval = *std::min_element(results.begin(), results.end()); - std::for_each(results.begin(), results.end(), - [minval](double val) { return val / minval; }); -} - -/* Rescale the values so that the average value is 1. */ -static void reaverage(Array2D<double> &data) -{ - double sum = std::accumulate(data.begin(), data.end(), 0.0); - double ratio = 1 / (sum / data.size()); - std::for_each(data.begin(), data.end(), - [ratio](double val) { return val * ratio; }); -} - -static void runMatrixIterations(const Array2D<double> &C, - Array2D<double> &lambda, - const SparseArray<double> &W, - SparseArray<double> &M, double omega, - unsigned int nIter, double threshold, double lambdaBound) -{ - constructM(C, W, M); - double lastMaxDiff = std::numeric_limits<double>::max(); - for (unsigned int i = 0; i < nIter; i++) { - double maxDiff = fabs(gaussSeidel2Sor(M, omega, lambda, lambdaBound)); - if (maxDiff < threshold) { - LOG(RPiAlsc, Debug) - << "Stop after " << i + 1 << " iterations"; - break; - } - /* - * this happens very occasionally (so make a note), though - * doesn't seem to matter - */ - if (maxDiff > lastMaxDiff) - LOG(RPiAlsc, Debug) - << "Iteration " << i << ": maxDiff gone up " - << lastMaxDiff << " to " << maxDiff; - lastMaxDiff = maxDiff; - } - /* We're going to normalise the lambdas so the total average is 1. */ - reaverage(lambda); -} - -static void addLuminanceRb(Array2D<double> &result, const Array2D<double> &lambda, - const Array2D<double> &luminanceLut, - double luminanceStrength) -{ - for (unsigned int i = 0; i < result.size(); i++) - result[i] = lambda[i] * ((luminanceLut[i] - 1) * luminanceStrength + 1); -} - -static void addLuminanceG(Array2D<double> &result, double lambda, - const Array2D<double> &luminanceLut, - double luminanceStrength) -{ - for (unsigned int i = 0; i < result.size(); i++) - result[i] = lambda * ((luminanceLut[i] - 1) * luminanceStrength + 1); -} - -void addLuminanceToTables(std::array<Array2D<double>, 3> &results, - const Array2D<double> &lambdaR, - double lambdaG, const Array2D<double> &lambdaB, - const Array2D<double> &luminanceLut, - double luminanceStrength) -{ - addLuminanceRb(results[0], lambdaR, luminanceLut, luminanceStrength); - addLuminanceG(results[1], lambdaG, luminanceLut, luminanceStrength); - addLuminanceRb(results[2], lambdaB, luminanceLut, luminanceStrength); - for (auto &r : results) - normalise(r); -} - -void Alsc::doAlsc() -{ - Array2D<double> &cr = tmpC_[0], &cb = tmpC_[1], &calTableR = tmpC_[2], - &calTableB = tmpC_[3], &calTableTmp = tmpC_[4]; - SparseArray<double> &wr = tmpM_[0], &wb = tmpM_[1], &M = tmpM_[2]; - - /* - * Calculate our R/B ("Cr"/"Cb") colour statistics, and assess which are - * usable. - */ - calculateCrCb(statistics_, cr, cb, config_.minCount, config_.minG); - /* - * Fetch the new calibrations (if any) for this CT. Resample them in - * case the camera mode is not full-frame. - */ - getCalTable(ct_, config_.calibrationsCr, calTableTmp); - resampleCalTable(calTableTmp, cameraMode_, calTableR); - getCalTable(ct_, config_.calibrationsCb, calTableTmp); - resampleCalTable(calTableTmp, cameraMode_, calTableB); - /* - * You could print out the cal tables for this image here, if you're - * tuning the algorithm... - * Apply any calibration to the statistics, so the adaptive algorithm - * makes only the extra adjustments. - */ - applyCalTable(calTableR, cr); - applyCalTable(calTableB, cb); - /* Compute weights between zones. */ - computeW(cr, config_.sigmaCr, wr); - computeW(cb, config_.sigmaCb, wb); - /* Run Gauss-Seidel iterations over the resulting matrix, for R and B. */ - runMatrixIterations(cr, lambdaR_, wr, M, config_.omega, config_.nIter, - config_.threshold, config_.lambdaBound); - runMatrixIterations(cb, lambdaB_, wb, M, config_.omega, config_.nIter, - config_.threshold, config_.lambdaBound); - /* - * Fold the calibrated gains into our final lambda values. (Note that on - * the next run, we re-start with the lambda values that don't have the - * calibration gains included.) - */ - compensateLambdasForCal(calTableR, lambdaR_, asyncLambdaR_); - compensateLambdasForCal(calTableB, lambdaB_, asyncLambdaB_); - /* Fold in the luminance table at the appropriate strength. */ - addLuminanceToTables(asyncResults_, asyncLambdaR_, 1.0, - asyncLambdaB_, luminanceTable_, - config_.luminanceStrength); -} - -/* Register algorithm with the system. */ -static Algorithm *create(Controller *controller) -{ - return (Algorithm *)new Alsc(controller); -} -static RegisterAlgorithm reg(NAME, &create); diff --git a/src/ipa/raspberrypi/controller/rpi/alsc.h b/src/ipa/raspberrypi/controller/rpi/alsc.h deleted file mode 100644 index 0b6d9478..00000000 --- a/src/ipa/raspberrypi/controller/rpi/alsc.h +++ /dev/null @@ -1,174 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * alsc.h - ALSC (auto lens shading correction) control algorithm - */ -#pragma once - -#include <array> -#include <mutex> -#include <condition_variable> -#include <thread> -#include <vector> - -#include <libcamera/geometry.h> - -#include "../algorithm.h" -#include "../alsc_status.h" -#include "../statistics.h" - -namespace RPiController { - -/* Algorithm to generate automagic LSC (Lens Shading Correction) tables. */ - -/* - * The Array2D class is a very thin wrapper round std::vector so that it can - * be used in exactly the same way in the code but carries its correct width - * and height ("dimensions") with it. - */ - -template<typename T> -class Array2D -{ -public: - using Size = libcamera::Size; - - const Size &dimensions() const { return dimensions_; } - - size_t size() const { return data_.size(); } - - const std::vector<T> &data() const { return data_; } - - void resize(const Size &dims) - { - dimensions_ = dims; - data_.resize(dims.width * dims.height); - } - - void resize(const Size &dims, const T &value) - { - resize(dims); - std::fill(data_.begin(), data_.end(), value); - } - - T &operator[](int index) { return data_[index]; } - - const T &operator[](int index) const { return data_[index]; } - - T *ptr() { return data_.data(); } - - const T *ptr() const { return data_.data(); } - - auto begin() { return data_.begin(); } - auto end() { return data_.end(); } - -private: - Size dimensions_; - std::vector<T> data_; -}; - -/* - * We'll use the term SparseArray for the large sparse matrices that are - * XY tall but have only 4 non-zero elements on each row. - */ - -template<typename T> -using SparseArray = std::vector<std::array<T, 4>>; - -struct AlscCalibration { - double ct; - Array2D<double> table; -}; - -struct AlscConfig { - /* Only repeat the ALSC calculation every "this many" frames */ - uint16_t framePeriod; - /* number of initial frames for which speed taken as 1.0 (maximum) */ - uint16_t startupFrames; - /* IIR filter speed applied to algorithm results */ - double speed; - double sigmaCr; - double sigmaCb; - double minCount; - uint16_t minG; - double omega; - uint32_t nIter; - Array2D<double> luminanceLut; - double luminanceStrength; - std::vector<AlscCalibration> calibrationsCr; - std::vector<AlscCalibration> calibrationsCb; - double defaultCt; /* colour temperature if no metadata found */ - double threshold; /* iteration termination threshold */ - double lambdaBound; /* upper/lower bound for lambda from a value of 1 */ - libcamera::Size tableSize; -}; - -class Alsc : public Algorithm -{ -public: - Alsc(Controller *controller = NULL); - ~Alsc(); - char const *name() const override; - void initialise() override; - void switchMode(CameraMode const &cameraMode, Metadata *metadata) override; - int read(const libcamera::YamlObject ¶ms) override; - void prepare(Metadata *imageMetadata) override; - void process(StatisticsPtr &stats, Metadata *imageMetadata) override; - -private: - /* configuration is read-only, and available to both threads */ - AlscConfig config_; - bool firstTime_; - CameraMode cameraMode_; - Array2D<double> luminanceTable_; - std::thread asyncThread_; - void asyncFunc(); /* asynchronous thread function */ - std::mutex mutex_; - /* condvar for async thread to wait on */ - std::condition_variable asyncSignal_; - /* condvar for synchronous thread to wait on */ - std::condition_variable syncSignal_; - /* for sync thread to check if async thread finished (requires mutex) */ - bool asyncFinished_; - /* for async thread to check if it's been told to run (requires mutex) */ - bool asyncStart_; - /* for async thread to check if it's been told to quit (requires mutex) */ - bool asyncAbort_; - - /* - * The following are only for the synchronous thread to use: - * for sync thread to note its has asked async thread to run - */ - bool asyncStarted_; - /* counts up to framePeriod before restarting the async thread */ - int framePhase_; - /* counts up to startupFrames */ - int frameCount_; - /* counts up to startupFrames for Process function */ - int frameCount2_; - std::array<Array2D<double>, 3> syncResults_; - std::array<Array2D<double>, 3> prevSyncResults_; - void waitForAysncThread(); - /* - * The following are for the asynchronous thread to use, though the main - * thread can set/reset them if the async thread is known to be idle: - */ - void restartAsync(StatisticsPtr &stats, Metadata *imageMetadata); - /* copy out the results from the async thread so that it can be restarted */ - void fetchAsyncResults(); - double ct_; - RgbyRegions statistics_; - std::array<Array2D<double>, 3> asyncResults_; - Array2D<double> asyncLambdaR_; - Array2D<double> asyncLambdaB_; - void doAlsc(); - Array2D<double> lambdaR_; - Array2D<double> lambdaB_; - - /* Temporaries for the computations */ - std::array<Array2D<double>, 5> tmpC_; - std::array<SparseArray<double>, 3> tmpM_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/rpi/awb.cpp b/src/ipa/raspberrypi/controller/rpi/awb.cpp deleted file mode 100644 index ef3435d6..00000000 --- a/src/ipa/raspberrypi/controller/rpi/awb.cpp +++ /dev/null @@ -1,734 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * awb.cpp - AWB control algorithm - */ - -#include <assert.h> -#include <functional> - -#include <libcamera/base/log.h> - -#include "../lux_status.h" - -#include "awb.h" - -using namespace RPiController; -using namespace libcamera; - -LOG_DEFINE_CATEGORY(RPiAwb) - -#define NAME "rpi.awb" - -/* - * todo - the locking in this algorithm needs some tidying up as has been done - * elsewhere (ALSC and AGC). - */ - -int AwbMode::read(const libcamera::YamlObject ¶ms) -{ - auto value = params["lo"].get<double>(); - if (!value) - return -EINVAL; - ctLo = *value; - - value = params["hi"].get<double>(); - if (!value) - return -EINVAL; - ctHi = *value; - - return 0; -} - -int AwbPrior::read(const libcamera::YamlObject ¶ms) -{ - auto value = params["lux"].get<double>(); - if (!value) - return -EINVAL; - lux = *value; - - return prior.read(params["prior"]); -} - -static int readCtCurve(Pwl &ctR, Pwl &ctB, const libcamera::YamlObject ¶ms) -{ - if (params.size() % 3) { - LOG(RPiAwb, Error) << "AwbConfig: incomplete CT curve entry"; - return -EINVAL; - } - - if (params.size() < 6) { - LOG(RPiAwb, Error) << "AwbConfig: insufficient points in CT curve"; - return -EINVAL; - } - - const auto &list = params.asList(); - - for (auto it = list.begin(); it != list.end(); it++) { - auto value = it->get<double>(); - if (!value) - return -EINVAL; - double ct = *value; - - assert(it == list.begin() || ct != ctR.domain().end); - - value = (++it)->get<double>(); - if (!value) - return -EINVAL; - ctR.append(ct, *value); - - value = (++it)->get<double>(); - if (!value) - return -EINVAL; - ctB.append(ct, *value); - } - - return 0; -} - -int AwbConfig::read(const libcamera::YamlObject ¶ms) -{ - int ret; - - bayes = params["bayes"].get<int>(1); - framePeriod = params["frame_period"].get<uint16_t>(10); - startupFrames = params["startup_frames"].get<uint16_t>(10); - convergenceFrames = params["convergence_frames"].get<unsigned int>(3); - speed = params["speed"].get<double>(0.05); - - if (params.contains("ct_curve")) { - ret = readCtCurve(ctR, ctB, params["ct_curve"]); - if (ret) - return ret; - /* We will want the inverse functions of these too. */ - ctRInverse = ctR.inverse(); - ctBInverse = ctB.inverse(); - } - - if (params.contains("priors")) { - for (const auto &p : params["priors"].asList()) { - AwbPrior prior; - ret = prior.read(p); - if (ret) - return ret; - if (!priors.empty() && prior.lux <= priors.back().lux) { - LOG(RPiAwb, Error) << "AwbConfig: Prior must be ordered in increasing lux value"; - return -EINVAL; - } - priors.push_back(prior); - } - if (priors.empty()) { - LOG(RPiAwb, Error) << "AwbConfig: no AWB priors configured"; - return ret; - } - } - if (params.contains("modes")) { - for (const auto &[key, value] : params["modes"].asDict()) { - ret = modes[key].read(value); - if (ret) - return ret; - if (defaultMode == nullptr) - defaultMode = &modes[key]; - } - if (defaultMode == nullptr) { - LOG(RPiAwb, Error) << "AwbConfig: no AWB modes configured"; - return -EINVAL; - } - } - - minPixels = params["min_pixels"].get<double>(16.0); - minG = params["min_G"].get<uint16_t>(32); - minRegions = params["min_regions"].get<uint32_t>(10); - deltaLimit = params["delta_limit"].get<double>(0.2); - coarseStep = params["coarse_step"].get<double>(0.2); - transversePos = params["transverse_pos"].get<double>(0.01); - transverseNeg = params["transverse_neg"].get<double>(0.01); - if (transversePos <= 0 || transverseNeg <= 0) { - LOG(RPiAwb, Error) << "AwbConfig: transverse_pos/neg must be > 0"; - return -EINVAL; - } - - sensitivityR = params["sensitivity_r"].get<double>(1.0); - sensitivityB = params["sensitivity_b"].get<double>(1.0); - - if (bayes) { - if (ctR.empty() || ctB.empty() || priors.empty() || - defaultMode == nullptr) { - LOG(RPiAwb, Warning) - << "Bayesian AWB mis-configured - switch to Grey method"; - bayes = false; - } - } - fast = params[fast].get<int>(bayes); /* default to fast for Bayesian, otherwise slow */ - whitepointR = params["whitepoint_r"].get<double>(0.0); - whitepointB = params["whitepoint_b"].get<double>(0.0); - if (bayes == false) - sensitivityR = sensitivityB = 1.0; /* nor do sensitivities make any sense */ - return 0; -} - -Awb::Awb(Controller *controller) - : AwbAlgorithm(controller) -{ - asyncAbort_ = asyncStart_ = asyncStarted_ = asyncFinished_ = false; - mode_ = nullptr; - manualR_ = manualB_ = 0.0; - asyncThread_ = std::thread(std::bind(&Awb::asyncFunc, this)); -} - -Awb::~Awb() -{ - { - std::lock_guard<std::mutex> lock(mutex_); - asyncAbort_ = true; - } - asyncSignal_.notify_one(); - asyncThread_.join(); -} - -char const *Awb::name() const -{ - return NAME; -} - -int Awb::read(const libcamera::YamlObject ¶ms) -{ - return config_.read(params); -} - -void Awb::initialise() -{ - frameCount_ = framePhase_ = 0; - /* - * Put something sane into the status that we are filtering towards, - * just in case the first few frames don't have anything meaningful in - * them. - */ - if (!config_.ctR.empty() && !config_.ctB.empty()) { - syncResults_.temperatureK = config_.ctR.domain().clip(4000); - syncResults_.gainR = 1.0 / config_.ctR.eval(syncResults_.temperatureK); - syncResults_.gainG = 1.0; - syncResults_.gainB = 1.0 / config_.ctB.eval(syncResults_.temperatureK); - } else { - /* random values just to stop the world blowing up */ - syncResults_.temperatureK = 4500; - syncResults_.gainR = syncResults_.gainG = syncResults_.gainB = 1.0; - } - prevSyncResults_ = syncResults_; - asyncResults_ = syncResults_; -} - -void Awb::disableAuto() -{ - /* Freeze the most recent values, and treat them as manual gains */ - manualR_ = syncResults_.gainR = prevSyncResults_.gainR; - manualB_ = syncResults_.gainB = prevSyncResults_.gainB; - syncResults_.gainG = prevSyncResults_.gainG; - syncResults_.temperatureK = prevSyncResults_.temperatureK; -} - -void Awb::enableAuto() -{ - manualR_ = 0.0; - manualB_ = 0.0; -} - -unsigned int Awb::getConvergenceFrames() const -{ - /* - * If not in auto mode, there is no convergence - * to happen, so no need to drop any frames - return zero. - */ - if (!isAutoEnabled()) - return 0; - else - return config_.convergenceFrames; -} - -void Awb::setMode(std::string const &modeName) -{ - modeName_ = modeName; -} - -void Awb::setManualGains(double manualR, double manualB) -{ - /* If any of these are 0.0, we swich back to auto. */ - manualR_ = manualR; - manualB_ = manualB; - /* - * If not in auto mode, set these values into the syncResults which - * means that Prepare() will adopt them immediately. - */ - if (!isAutoEnabled()) { - syncResults_.gainR = prevSyncResults_.gainR = manualR_; - syncResults_.gainG = prevSyncResults_.gainG = 1.0; - syncResults_.gainB = prevSyncResults_.gainB = manualB_; - if (config_.bayes) { - /* Also estimate the best corresponding colour temperature from the curves. */ - double ctR = config_.ctRInverse.eval(config_.ctRInverse.domain().clip(1 / manualR_)); - double ctB = config_.ctBInverse.eval(config_.ctBInverse.domain().clip(1 / manualB_)); - prevSyncResults_.temperatureK = (ctR + ctB) / 2; - syncResults_.temperatureK = prevSyncResults_.temperatureK; - } - } -} - -void Awb::switchMode([[maybe_unused]] CameraMode const &cameraMode, - Metadata *metadata) -{ - /* Let other algorithms know the current white balance values. */ - metadata->set("awb.status", prevSyncResults_); -} - -bool Awb::isAutoEnabled() const -{ - return manualR_ == 0.0 || manualB_ == 0.0; -} - -void Awb::fetchAsyncResults() -{ - LOG(RPiAwb, Debug) << "Fetch AWB results"; - asyncFinished_ = false; - asyncStarted_ = false; - /* - * It's possible manual gains could be set even while the async - * thread was running, so only copy the results if still in auto mode. - */ - if (isAutoEnabled()) - syncResults_ = asyncResults_; -} - -void Awb::restartAsync(StatisticsPtr &stats, double lux) -{ - LOG(RPiAwb, Debug) << "Starting AWB calculation"; - /* this makes a new reference which belongs to the asynchronous thread */ - statistics_ = stats; - /* store the mode as it could technically change */ - auto m = config_.modes.find(modeName_); - mode_ = m != config_.modes.end() - ? &m->second - : (mode_ == nullptr ? config_.defaultMode : mode_); - lux_ = lux; - framePhase_ = 0; - asyncStarted_ = true; - size_t len = modeName_.copy(asyncResults_.mode, - sizeof(asyncResults_.mode) - 1); - asyncResults_.mode[len] = '\0'; - { - std::lock_guard<std::mutex> lock(mutex_); - asyncStart_ = true; - } - asyncSignal_.notify_one(); -} - -void Awb::prepare(Metadata *imageMetadata) -{ - if (frameCount_ < (int)config_.startupFrames) - frameCount_++; - double speed = frameCount_ < (int)config_.startupFrames - ? 1.0 - : config_.speed; - LOG(RPiAwb, Debug) - << "frame_count " << frameCount_ << " speed " << speed; - { - std::unique_lock<std::mutex> lock(mutex_); - if (asyncStarted_ && asyncFinished_) - fetchAsyncResults(); - } - /* Finally apply IIR filter to results and put into metadata. */ - memcpy(prevSyncResults_.mode, syncResults_.mode, - sizeof(prevSyncResults_.mode)); - prevSyncResults_.temperatureK = speed * syncResults_.temperatureK + - (1.0 - speed) * prevSyncResults_.temperatureK; - prevSyncResults_.gainR = speed * syncResults_.gainR + - (1.0 - speed) * prevSyncResults_.gainR; - prevSyncResults_.gainG = speed * syncResults_.gainG + - (1.0 - speed) * prevSyncResults_.gainG; - prevSyncResults_.gainB = speed * syncResults_.gainB + - (1.0 - speed) * prevSyncResults_.gainB; - imageMetadata->set("awb.status", prevSyncResults_); - LOG(RPiAwb, Debug) - << "Using AWB gains r " << prevSyncResults_.gainR << " g " - << prevSyncResults_.gainG << " b " - << prevSyncResults_.gainB; -} - -void Awb::process(StatisticsPtr &stats, Metadata *imageMetadata) -{ - /* Count frames since we last poked the async thread. */ - if (framePhase_ < (int)config_.framePeriod) - framePhase_++; - LOG(RPiAwb, Debug) << "frame_phase " << framePhase_; - /* We do not restart the async thread if we're not in auto mode. */ - if (isAutoEnabled() && - (framePhase_ >= (int)config_.framePeriod || - frameCount_ < (int)config_.startupFrames)) { - /* Update any settings and any image metadata that we need. */ - struct LuxStatus luxStatus = {}; - luxStatus.lux = 400; /* in case no metadata */ - if (imageMetadata->get("lux.status", luxStatus) != 0) - LOG(RPiAwb, Debug) << "No lux metadata found"; - LOG(RPiAwb, Debug) << "Awb lux value is " << luxStatus.lux; - - if (asyncStarted_ == false) - restartAsync(stats, luxStatus.lux); - } -} - -void Awb::asyncFunc() -{ - while (true) { - { - std::unique_lock<std::mutex> lock(mutex_); - asyncSignal_.wait(lock, [&] { - return asyncStart_ || asyncAbort_; - }); - asyncStart_ = false; - if (asyncAbort_) - break; - } - doAwb(); - { - std::lock_guard<std::mutex> lock(mutex_); - asyncFinished_ = true; - } - syncSignal_.notify_one(); - } -} - -static void generateStats(std::vector<Awb::RGB> &zones, - RgbyRegions &stats, double minPixels, - double minG) -{ - for (auto const ®ion : stats) { - Awb::RGB zone; - if (region.counted >= minPixels) { - zone.G = region.val.gSum / region.counted; - if (zone.G >= minG) { - zone.R = region.val.rSum / region.counted; - zone.B = region.val.bSum / region.counted; - zones.push_back(zone); - } - } - } -} - -void Awb::prepareStats() -{ - zones_.clear(); - /* - * LSC has already been applied to the stats in this pipeline, so stop - * any LSC compensation. We also ignore config_.fast in this version. - */ - generateStats(zones_, statistics_->awbRegions, config_.minPixels, - config_.minG); - /* - * apply sensitivities, so values appear to come from our "canonical" - * sensor. - */ - for (auto &zone : zones_) { - zone.R *= config_.sensitivityR; - zone.B *= config_.sensitivityB; - } -} - -double Awb::computeDelta2Sum(double gainR, double gainB) -{ - /* - * Compute the sum of the squared colour error (non-greyness) as it - * appears in the log likelihood equation. - */ - double delta2Sum = 0; - for (auto &z : zones_) { - double deltaR = gainR * z.R - 1 - config_.whitepointR; - double deltaB = gainB * z.B - 1 - config_.whitepointB; - double delta2 = deltaR * deltaR + deltaB * deltaB; - /* LOG(RPiAwb, Debug) << "deltaR " << deltaR << " deltaB " << deltaB << " delta2 " << delta2; */ - delta2 = std::min(delta2, config_.deltaLimit); - delta2Sum += delta2; - } - return delta2Sum; -} - -Pwl Awb::interpolatePrior() -{ - /* - * Interpolate the prior log likelihood function for our current lux - * value. - */ - if (lux_ <= config_.priors.front().lux) - return config_.priors.front().prior; - else if (lux_ >= config_.priors.back().lux) - return config_.priors.back().prior; - else { - int idx = 0; - /* find which two we lie between */ - while (config_.priors[idx + 1].lux < lux_) - idx++; - double lux0 = config_.priors[idx].lux, - lux1 = config_.priors[idx + 1].lux; - return Pwl::combine(config_.priors[idx].prior, - config_.priors[idx + 1].prior, - [&](double /*x*/, double y0, double y1) { - return y0 + (y1 - y0) * - (lux_ - lux0) / (lux1 - lux0); - }); - } -} - -static double interpolateQuadatric(Pwl::Point const &a, Pwl::Point const &b, - Pwl::Point const &c) -{ - /* - * Given 3 points on a curve, find the extremum of the function in that - * interval by fitting a quadratic. - */ - const double eps = 1e-3; - Pwl::Point ca = c - a, ba = b - a; - double denominator = 2 * (ba.y * ca.x - ca.y * ba.x); - if (abs(denominator) > eps) { - double numerator = ba.y * ca.x * ca.x - ca.y * ba.x * ba.x; - double result = numerator / denominator + a.x; - return std::max(a.x, std::min(c.x, result)); - } - /* has degenerated to straight line segment */ - return a.y < c.y - eps ? a.x : (c.y < a.y - eps ? c.x : b.x); -} - -double Awb::coarseSearch(Pwl const &prior) -{ - points_.clear(); /* assume doesn't deallocate memory */ - size_t bestPoint = 0; - double t = mode_->ctLo; - int spanR = 0, spanB = 0; - /* Step down the CT curve evaluating log likelihood. */ - while (true) { - double r = config_.ctR.eval(t, &spanR); - double b = config_.ctB.eval(t, &spanB); - double gainR = 1 / r, gainB = 1 / b; - double delta2Sum = computeDelta2Sum(gainR, gainB); - double priorLogLikelihood = prior.eval(prior.domain().clip(t)); - double finalLogLikelihood = delta2Sum - priorLogLikelihood; - LOG(RPiAwb, Debug) - << "t: " << t << " gain R " << gainR << " gain B " - << gainB << " delta2_sum " << delta2Sum - << " prior " << priorLogLikelihood << " final " - << finalLogLikelihood; - points_.push_back(Pwl::Point(t, finalLogLikelihood)); - if (points_.back().y < points_[bestPoint].y) - bestPoint = points_.size() - 1; - if (t == mode_->ctHi) - break; - /* for even steps along the r/b curve scale them by the current t */ - t = std::min(t + t / 10 * config_.coarseStep, mode_->ctHi); - } - t = points_[bestPoint].x; - LOG(RPiAwb, Debug) << "Coarse search found CT " << t; - /* - * We have the best point of the search, but refine it with a quadratic - * interpolation around its neighbours. - */ - if (points_.size() > 2) { - unsigned long bp = std::min(bestPoint, points_.size() - 2); - bestPoint = std::max(1UL, bp); - t = interpolateQuadatric(points_[bestPoint - 1], - points_[bestPoint], - points_[bestPoint + 1]); - LOG(RPiAwb, Debug) - << "After quadratic refinement, coarse search has CT " - << t; - } - return t; -} - -void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior) -{ - int spanR = -1, spanB = -1; - config_.ctR.eval(t, &spanR); - config_.ctB.eval(t, &spanB); - double step = t / 10 * config_.coarseStep * 0.1; - int nsteps = 5; - double rDiff = config_.ctR.eval(t + nsteps * step, &spanR) - - config_.ctR.eval(t - nsteps * step, &spanR); - double bDiff = config_.ctB.eval(t + nsteps * step, &spanB) - - config_.ctB.eval(t - nsteps * step, &spanB); - Pwl::Point transverse(bDiff, -rDiff); - if (transverse.len2() < 1e-6) - return; - /* - * unit vector orthogonal to the b vs. r function (pointing outwards - * with r and b increasing) - */ - transverse = transverse / transverse.len(); - double bestLogLikelihood = 0, bestT = 0, bestR = 0, bestB = 0; - double transverseRange = config_.transverseNeg + config_.transversePos; - const int maxNumDeltas = 12; - /* a transverse step approximately every 0.01 r/b units */ - int numDeltas = floor(transverseRange * 100 + 0.5) + 1; - numDeltas = numDeltas < 3 ? 3 : (numDeltas > maxNumDeltas ? maxNumDeltas : numDeltas); - /* - * Step down CT curve. March a bit further if the transverse range is - * large. - */ - nsteps += numDeltas; - for (int i = -nsteps; i <= nsteps; i++) { - double tTest = t + i * step; - double priorLogLikelihood = - prior.eval(prior.domain().clip(tTest)); - double rCurve = config_.ctR.eval(tTest, &spanR); - double bCurve = config_.ctB.eval(tTest, &spanB); - /* x will be distance off the curve, y the log likelihood there */ - Pwl::Point points[maxNumDeltas]; - int bestPoint = 0; - /* Take some measurements transversely *off* the CT curve. */ - for (int j = 0; j < numDeltas; j++) { - points[j].x = -config_.transverseNeg + - (transverseRange * j) / (numDeltas - 1); - Pwl::Point rbTest = Pwl::Point(rCurve, bCurve) + - transverse * points[j].x; - double rTest = rbTest.x, bTest = rbTest.y; - double gainR = 1 / rTest, gainB = 1 / bTest; - double delta2Sum = computeDelta2Sum(gainR, gainB); - points[j].y = delta2Sum - priorLogLikelihood; - LOG(RPiAwb, Debug) - << "At t " << tTest << " r " << rTest << " b " - << bTest << ": " << points[j].y; - if (points[j].y < points[bestPoint].y) - bestPoint = j; - } - /* - * We have NUM_DELTAS points transversely across the CT curve, - * now let's do a quadratic interpolation for the best result. - */ - bestPoint = std::max(1, std::min(bestPoint, numDeltas - 2)); - Pwl::Point rbTest = Pwl::Point(rCurve, bCurve) + - transverse * interpolateQuadatric(points[bestPoint - 1], - points[bestPoint], - points[bestPoint + 1]); - double rTest = rbTest.x, bTest = rbTest.y; - double gainR = 1 / rTest, gainB = 1 / bTest; - double delta2Sum = computeDelta2Sum(gainR, gainB); - double finalLogLikelihood = delta2Sum - priorLogLikelihood; - LOG(RPiAwb, Debug) - << "Finally " - << tTest << " r " << rTest << " b " << bTest << ": " - << finalLogLikelihood - << (finalLogLikelihood < bestLogLikelihood ? " BEST" : ""); - if (bestT == 0 || finalLogLikelihood < bestLogLikelihood) - bestLogLikelihood = finalLogLikelihood, - bestT = tTest, bestR = rTest, bestB = bTest; - } - t = bestT, r = bestR, b = bestB; - LOG(RPiAwb, Debug) - << "Fine search found t " << t << " r " << r << " b " << b; -} - -void Awb::awbBayes() -{ - /* - * May as well divide out G to save computeDelta2Sum from doing it over - * and over. - */ - for (auto &z : zones_) - z.R = z.R / (z.G + 1), z.B = z.B / (z.G + 1); - /* - * Get the current prior, and scale according to how many zones are - * valid... not entirely sure about this. - */ - Pwl prior = interpolatePrior(); - prior *= zones_.size() / (double)(statistics_->awbRegions.numRegions()); - prior.map([](double x, double y) { - LOG(RPiAwb, Debug) << "(" << x << "," << y << ")"; - }); - double t = coarseSearch(prior); - double r = config_.ctR.eval(t); - double b = config_.ctB.eval(t); - LOG(RPiAwb, Debug) - << "After coarse search: r " << r << " b " << b << " (gains r " - << 1 / r << " b " << 1 / b << ")"; - /* - * Not entirely sure how to handle the fine search yet. Mostly the - * estimated CT is already good enough, but the fine search allows us to - * wander transverely off the CT curve. Under some illuminants, where - * there may be more or less green light, this may prove beneficial, - * though I probably need more real datasets before deciding exactly how - * this should be controlled and tuned. - */ - fineSearch(t, r, b, prior); - LOG(RPiAwb, Debug) - << "After fine search: r " << r << " b " << b << " (gains r " - << 1 / r << " b " << 1 / b << ")"; - /* - * Write results out for the main thread to pick up. Remember to adjust - * the gains from the ones that the "canonical sensor" would require to - * the ones needed by *this* sensor. - */ - asyncResults_.temperatureK = t; - asyncResults_.gainR = 1.0 / r * config_.sensitivityR; - asyncResults_.gainG = 1.0; - asyncResults_.gainB = 1.0 / b * config_.sensitivityB; -} - -void Awb::awbGrey() -{ - LOG(RPiAwb, Debug) << "Grey world AWB"; - /* - * Make a separate list of the derivatives for each of red and blue, so - * that we can sort them to exclude the extreme gains. We could - * consider some variations, such as normalising all the zones first, or - * doing an L2 average etc. - */ - std::vector<RGB> &derivsR(zones_); - std::vector<RGB> derivsB(derivsR); - std::sort(derivsR.begin(), derivsR.end(), - [](RGB const &a, RGB const &b) { - return a.G * b.R < b.G * a.R; - }); - std::sort(derivsB.begin(), derivsB.end(), - [](RGB const &a, RGB const &b) { - return a.G * b.B < b.G * a.B; - }); - /* Average the middle half of the values. */ - int discard = derivsR.size() / 4; - RGB sumR(0, 0, 0), sumB(0, 0, 0); - for (auto ri = derivsR.begin() + discard, - bi = derivsB.begin() + discard; - ri != derivsR.end() - discard; ri++, bi++) - sumR += *ri, sumB += *bi; - double gainR = sumR.G / (sumR.R + 1), - gainB = sumB.G / (sumB.B + 1); - asyncResults_.temperatureK = 4500; /* don't know what it is */ - asyncResults_.gainR = gainR; - asyncResults_.gainG = 1.0; - asyncResults_.gainB = gainB; -} - -void Awb::doAwb() -{ - prepareStats(); - LOG(RPiAwb, Debug) << "Valid zones: " << zones_.size(); - if (zones_.size() > config_.minRegions) { - if (config_.bayes) - awbBayes(); - else - awbGrey(); - LOG(RPiAwb, Debug) - << "CT found is " - << asyncResults_.temperatureK - << " with gains r " << asyncResults_.gainR - << " and b " << asyncResults_.gainB; - } - /* - * we're done with these; we may as well relinquish our hold on the - * pointer. - */ - statistics_.reset(); -} - -/* Register algorithm with the system. */ -static Algorithm *create(Controller *controller) -{ - return (Algorithm *)new Awb(controller); -} -static RegisterAlgorithm reg(NAME, &create); diff --git a/src/ipa/raspberrypi/controller/rpi/awb.h b/src/ipa/raspberrypi/controller/rpi/awb.h deleted file mode 100644 index e7d49cd8..00000000 --- a/src/ipa/raspberrypi/controller/rpi/awb.h +++ /dev/null @@ -1,191 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * awb.h - AWB control algorithm - */ -#pragma once - -#include <mutex> -#include <condition_variable> -#include <thread> - -#include "../awb_algorithm.h" -#include "../pwl.h" -#include "../awb_status.h" -#include "../statistics.h" - -namespace RPiController { - -/* Control algorithm to perform AWB calculations. */ - -struct AwbMode { - int read(const libcamera::YamlObject ¶ms); - double ctLo; /* low CT value for search */ - double ctHi; /* high CT value for search */ -}; - -struct AwbPrior { - int read(const libcamera::YamlObject ¶ms); - double lux; /* lux level */ - Pwl prior; /* maps CT to prior log likelihood for this lux level */ -}; - -struct AwbConfig { - AwbConfig() : defaultMode(nullptr) {} - int read(const libcamera::YamlObject ¶ms); - /* Only repeat the AWB calculation every "this many" frames */ - uint16_t framePeriod; - /* number of initial frames for which speed taken as 1.0 (maximum) */ - uint16_t startupFrames; - unsigned int convergenceFrames; /* approx number of frames to converge */ - double speed; /* IIR filter speed applied to algorithm results */ - bool fast; /* "fast" mode uses a 16x16 rather than 32x32 grid */ - Pwl ctR; /* function maps CT to r (= R/G) */ - Pwl ctB; /* function maps CT to b (= B/G) */ - Pwl ctRInverse; /* inverse of ctR */ - Pwl ctBInverse; /* inverse of ctB */ - /* table of illuminant priors at different lux levels */ - std::vector<AwbPrior> priors; - /* AWB "modes" (determines the search range) */ - std::map<std::string, AwbMode> modes; - AwbMode *defaultMode; /* mode used if no mode selected */ - /* - * minimum proportion of pixels counted within AWB region for it to be - * "useful" - */ - double minPixels; - /* minimum G value of those pixels, to be regarded a "useful" */ - uint16_t minG; - /* - * number of AWB regions that must be "useful" in order to do the AWB - * calculation - */ - uint32_t minRegions; - /* clamp on colour error term (so as not to penalise non-grey excessively) */ - double deltaLimit; - /* step size control in coarse search */ - double coarseStep; - /* how far to wander off CT curve towards "more purple" */ - double transversePos; - /* how far to wander off CT curve towards "more green" */ - double transverseNeg; - /* - * red sensitivity ratio (set to canonical sensor's R/G divided by this - * sensor's R/G) - */ - double sensitivityR; - /* - * blue sensitivity ratio (set to canonical sensor's B/G divided by this - * sensor's B/G) - */ - double sensitivityB; - /* The whitepoint (which we normally "aim" for) can be moved. */ - double whitepointR; - double whitepointB; - bool bayes; /* use Bayesian algorithm */ -}; - -class Awb : public AwbAlgorithm -{ -public: - Awb(Controller *controller = NULL); - ~Awb(); - char const *name() const override; - void initialise() override; - int read(const libcamera::YamlObject ¶ms) override; - unsigned int getConvergenceFrames() const override; - void setMode(std::string const &name) override; - void setManualGains(double manualR, double manualB) override; - void enableAuto() override; - void disableAuto() override; - void switchMode(CameraMode const &cameraMode, Metadata *metadata) override; - void prepare(Metadata *imageMetadata) override; - void process(StatisticsPtr &stats, Metadata *imageMetadata) override; - struct RGB { - RGB(double r = 0, double g = 0, double b = 0) - : R(r), G(g), B(b) - { - } - double R, G, B; - RGB &operator+=(RGB const &other) - { - R += other.R, G += other.G, B += other.B; - return *this; - } - }; - -private: - bool isAutoEnabled() const; - /* configuration is read-only, and available to both threads */ - AwbConfig config_; - std::thread asyncThread_; - void asyncFunc(); /* asynchronous thread function */ - std::mutex mutex_; - /* condvar for async thread to wait on */ - std::condition_variable asyncSignal_; - /* condvar for synchronous thread to wait on */ - std::condition_variable syncSignal_; - /* for sync thread to check if async thread finished (requires mutex) */ - bool asyncFinished_; - /* for async thread to check if it's been told to run (requires mutex) */ - bool asyncStart_; - /* for async thread to check if it's been told to quit (requires mutex) */ - bool asyncAbort_; - - /* - * The following are only for the synchronous thread to use: - * for sync thread to note its has asked async thread to run - */ - bool asyncStarted_; - /* counts up to framePeriod before restarting the async thread */ - int framePhase_; - int frameCount_; /* counts up to startup_frames */ - AwbStatus syncResults_; - AwbStatus prevSyncResults_; - std::string modeName_; - /* - * The following are for the asynchronous thread to use, though the main - * thread can set/reset them if the async thread is known to be idle: - */ - void restartAsync(StatisticsPtr &stats, double lux); - /* copy out the results from the async thread so that it can be restarted */ - void fetchAsyncResults(); - StatisticsPtr statistics_; - AwbMode *mode_; - double lux_; - AwbStatus asyncResults_; - void doAwb(); - void awbBayes(); - void awbGrey(); - void prepareStats(); - double computeDelta2Sum(double gainR, double gainB); - Pwl interpolatePrior(); - double coarseSearch(Pwl const &prior); - void fineSearch(double &t, double &r, double &b, Pwl const &prior); - std::vector<RGB> zones_; - std::vector<Pwl::Point> points_; - /* manual r setting */ - double manualR_; - /* manual b setting */ - double manualB_; -}; - -static inline Awb::RGB operator+(Awb::RGB const &a, Awb::RGB const &b) -{ - return Awb::RGB(a.R + b.R, a.G + b.G, a.B + b.B); -} -static inline Awb::RGB operator-(Awb::RGB const &a, Awb::RGB const &b) -{ - return Awb::RGB(a.R - b.R, a.G - b.G, a.B - b.B); -} -static inline Awb::RGB operator*(double d, Awb::RGB const &rgb) -{ - return Awb::RGB(d * rgb.R, d * rgb.G, d * rgb.B); -} -static inline Awb::RGB operator*(Awb::RGB const &rgb, double d) -{ - return d * rgb; -} - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/rpi/black_level.cpp b/src/ipa/raspberrypi/controller/rpi/black_level.cpp deleted file mode 100644 index 85baec3f..00000000 --- a/src/ipa/raspberrypi/controller/rpi/black_level.cpp +++ /dev/null @@ -1,66 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * black_level.cpp - black level control algorithm - */ - -#include <math.h> -#include <stdint.h> - -#include <libcamera/base/log.h> - -#include "../black_level_status.h" - -#include "black_level.h" - -using namespace RPiController; -using namespace libcamera; - -LOG_DEFINE_CATEGORY(RPiBlackLevel) - -#define NAME "rpi.black_level" - -BlackLevel::BlackLevel(Controller *controller) - : Algorithm(controller) -{ -} - -char const *BlackLevel::name() const -{ - return NAME; -} - -int BlackLevel::read(const libcamera::YamlObject ¶ms) -{ - /* 64 in 10 bits scaled to 16 bits */ - uint16_t blackLevel = params["black_level"].get<uint16_t>(4096); - blackLevelR_ = params["black_level_r"].get<uint16_t>(blackLevel); - blackLevelG_ = params["black_level_g"].get<uint16_t>(blackLevel); - blackLevelB_ = params["black_level_b"].get<uint16_t>(blackLevel); - LOG(RPiBlackLevel, Debug) - << " Read black levels red " << blackLevelR_ - << " green " << blackLevelG_ - << " blue " << blackLevelB_; - return 0; -} - -void BlackLevel::prepare(Metadata *imageMetadata) -{ - /* - * Possibly we should think about doing this in a switchMode or - * something? - */ - struct BlackLevelStatus status; - status.blackLevelR = blackLevelR_; - status.blackLevelG = blackLevelG_; - status.blackLevelB = blackLevelB_; - imageMetadata->set("black_level.status", status); -} - -/* Register algorithm with the system. */ -static Algorithm *create(Controller *controller) -{ - return new BlackLevel(controller); -} -static RegisterAlgorithm reg(NAME, &create); diff --git a/src/ipa/raspberrypi/controller/rpi/black_level.h b/src/ipa/raspberrypi/controller/rpi/black_level.h deleted file mode 100644 index 2403f7f7..00000000 --- a/src/ipa/raspberrypi/controller/rpi/black_level.h +++ /dev/null @@ -1,30 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * black_level.h - black level control algorithm - */ -#pragma once - -#include "../algorithm.h" -#include "../black_level_status.h" - -/* This is our implementation of the "black level algorithm". */ - -namespace RPiController { - -class BlackLevel : public Algorithm -{ -public: - BlackLevel(Controller *controller); - char const *name() const override; - int read(const libcamera::YamlObject ¶ms) override; - void prepare(Metadata *imageMetadata) override; - -private: - double blackLevelR_; - double blackLevelG_; - double blackLevelB_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/rpi/ccm.cpp b/src/ipa/raspberrypi/controller/rpi/ccm.cpp deleted file mode 100644 index 2e2e6664..00000000 --- a/src/ipa/raspberrypi/controller/rpi/ccm.cpp +++ /dev/null @@ -1,199 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * ccm.cpp - CCM (colour correction matrix) control algorithm - */ - -#include <libcamera/base/log.h> - -#include "../awb_status.h" -#include "../ccm_status.h" -#include "../lux_status.h" -#include "../metadata.h" - -#include "ccm.h" - -using namespace RPiController; -using namespace libcamera; - -LOG_DEFINE_CATEGORY(RPiCcm) - -/* - * This algorithm selects a CCM (Colour Correction Matrix) according to the - * colour temperature estimated by AWB (interpolating between known matricies as - * necessary). Additionally the amount of colour saturation can be controlled - * both according to the current estimated lux level and according to a - * saturation setting that is exposed to applications. - */ - -#define NAME "rpi.ccm" - -Matrix::Matrix() -{ - memset(m, 0, sizeof(m)); -} -Matrix::Matrix(double m0, double m1, double m2, double m3, double m4, double m5, - double m6, double m7, double m8) -{ - m[0][0] = m0, m[0][1] = m1, m[0][2] = m2, m[1][0] = m3, m[1][1] = m4, - m[1][2] = m5, m[2][0] = m6, m[2][1] = m7, m[2][2] = m8; -} -int Matrix::read(const libcamera::YamlObject ¶ms) -{ - double *ptr = (double *)m; - - if (params.size() != 9) { - LOG(RPiCcm, Error) << "Wrong number of values in CCM"; - return -EINVAL; - } - - for (const auto ¶m : params.asList()) { - auto value = param.get<double>(); - if (!value) - return -EINVAL; - *ptr++ = *value; - } - - return 0; -} - -Ccm::Ccm(Controller *controller) - : CcmAlgorithm(controller), saturation_(1.0) {} - -char const *Ccm::name() const -{ - return NAME; -} - -int Ccm::read(const libcamera::YamlObject ¶ms) -{ - int ret; - - if (params.contains("saturation")) { - ret = config_.saturation.read(params["saturation"]); - if (ret) - return ret; - } - - for (auto &p : params["ccms"].asList()) { - auto value = p["ct"].get<double>(); - if (!value) - return -EINVAL; - - CtCcm ctCcm; - ctCcm.ct = *value; - ret = ctCcm.ccm.read(p["ccm"]); - if (ret) - return ret; - - if (!config_.ccms.empty() && ctCcm.ct <= config_.ccms.back().ct) { - LOG(RPiCcm, Error) - << "CCM not in increasing colour temperature order"; - return -EINVAL; - } - - config_.ccms.push_back(std::move(ctCcm)); - } - - if (config_.ccms.empty()) { - LOG(RPiCcm, Error) << "No CCMs specified"; - return -EINVAL; - } - - return 0; -} - -void Ccm::setSaturation(double saturation) -{ - saturation_ = saturation; -} - -void Ccm::initialise() -{ -} - -template<typename T> -static bool getLocked(Metadata *metadata, std::string const &tag, T &value) -{ - T *ptr = metadata->getLocked<T>(tag); - if (ptr == nullptr) - return false; - value = *ptr; - return true; -} - -Matrix calculateCcm(std::vector<CtCcm> const &ccms, double ct) -{ - if (ct <= ccms.front().ct) - return ccms.front().ccm; - else if (ct >= ccms.back().ct) - return ccms.back().ccm; - else { - int i = 0; - for (; ct > ccms[i].ct; i++) - ; - double lambda = - (ct - ccms[i - 1].ct) / (ccms[i].ct - ccms[i - 1].ct); - return lambda * ccms[i].ccm + (1.0 - lambda) * ccms[i - 1].ccm; - } -} - -Matrix applySaturation(Matrix const &ccm, double saturation) -{ - Matrix RGB2Y(0.299, 0.587, 0.114, -0.169, -0.331, 0.500, 0.500, -0.419, - -0.081); - Matrix Y2RGB(1.000, 0.000, 1.402, 1.000, -0.345, -0.714, 1.000, 1.771, - 0.000); - Matrix S(1, 0, 0, 0, saturation, 0, 0, 0, saturation); - return Y2RGB * S * RGB2Y * ccm; -} - -void Ccm::prepare(Metadata *imageMetadata) -{ - bool awbOk = false, luxOk = false; - struct AwbStatus awb = {}; - awb.temperatureK = 4000; /* in case no metadata */ - struct LuxStatus lux = {}; - lux.lux = 400; /* in case no metadata */ - { - /* grab mutex just once to get everything */ - std::lock_guard<Metadata> lock(*imageMetadata); - awbOk = getLocked(imageMetadata, "awb.status", awb); - luxOk = getLocked(imageMetadata, "lux.status", lux); - } - if (!awbOk) - LOG(RPiCcm, Warning) << "no colour temperature found"; - if (!luxOk) - LOG(RPiCcm, Warning) << "no lux value found"; - Matrix ccm = calculateCcm(config_.ccms, awb.temperatureK); - double saturation = saturation_; - struct CcmStatus ccmStatus; - ccmStatus.saturation = saturation; - if (!config_.saturation.empty()) - saturation *= config_.saturation.eval( - config_.saturation.domain().clip(lux.lux)); - ccm = applySaturation(ccm, saturation); - for (int j = 0; j < 3; j++) - for (int i = 0; i < 3; i++) - ccmStatus.matrix[j * 3 + i] = - std::max(-8.0, std::min(7.9999, ccm.m[j][i])); - LOG(RPiCcm, Debug) - << "colour temperature " << awb.temperatureK << "K"; - LOG(RPiCcm, Debug) - << "CCM: " << ccmStatus.matrix[0] << " " << ccmStatus.matrix[1] - << " " << ccmStatus.matrix[2] << " " - << ccmStatus.matrix[3] << " " << ccmStatus.matrix[4] - << " " << ccmStatus.matrix[5] << " " - << ccmStatus.matrix[6] << " " << ccmStatus.matrix[7] - << " " << ccmStatus.matrix[8]; - imageMetadata->set("ccm.status", ccmStatus); -} - -/* Register algorithm with the system. */ -static Algorithm *create(Controller *controller) -{ - return (Algorithm *)new Ccm(controller); - ; -} -static RegisterAlgorithm reg(NAME, &create); diff --git a/src/ipa/raspberrypi/controller/rpi/ccm.h b/src/ipa/raspberrypi/controller/rpi/ccm.h deleted file mode 100644 index 286d0b33..00000000 --- a/src/ipa/raspberrypi/controller/rpi/ccm.h +++ /dev/null @@ -1,75 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * ccm.h - CCM (colour correction matrix) control algorithm - */ -#pragma once - -#include <vector> - -#include "../ccm_algorithm.h" -#include "../pwl.h" - -namespace RPiController { - -/* Algorithm to calculate colour matrix. Should be placed after AWB. */ - -struct Matrix { - Matrix(double m0, double m1, double m2, double m3, double m4, double m5, - double m6, double m7, double m8); - Matrix(); - double m[3][3]; - int read(const libcamera::YamlObject ¶ms); -}; -static inline Matrix operator*(double d, Matrix const &m) -{ - return Matrix(m.m[0][0] * d, m.m[0][1] * d, m.m[0][2] * d, - m.m[1][0] * d, m.m[1][1] * d, m.m[1][2] * d, - m.m[2][0] * d, m.m[2][1] * d, m.m[2][2] * d); -} -static inline Matrix operator*(Matrix const &m1, Matrix const &m2) -{ - Matrix m; - for (int i = 0; i < 3; i++) - for (int j = 0; j < 3; j++) - m.m[i][j] = m1.m[i][0] * m2.m[0][j] + - m1.m[i][1] * m2.m[1][j] + - m1.m[i][2] * m2.m[2][j]; - return m; -} -static inline Matrix operator+(Matrix const &m1, Matrix const &m2) -{ - Matrix m; - for (int i = 0; i < 3; i++) - for (int j = 0; j < 3; j++) - m.m[i][j] = m1.m[i][j] + m2.m[i][j]; - return m; -} - -struct CtCcm { - double ct; - Matrix ccm; -}; - -struct CcmConfig { - std::vector<CtCcm> ccms; - Pwl saturation; -}; - -class Ccm : public CcmAlgorithm -{ -public: - Ccm(Controller *controller = NULL); - char const *name() const override; - int read(const libcamera::YamlObject ¶ms) override; - void setSaturation(double saturation) override; - void initialise() override; - void prepare(Metadata *imageMetadata) override; - -private: - CcmConfig config_; - double saturation_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/rpi/contrast.cpp b/src/ipa/raspberrypi/controller/rpi/contrast.cpp deleted file mode 100644 index bee1eadd..00000000 --- a/src/ipa/raspberrypi/controller/rpi/contrast.cpp +++ /dev/null @@ -1,181 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * contrast.cpp - contrast (gamma) control algorithm - */ -#include <stdint.h> - -#include <libcamera/base/log.h> - -#include "../contrast_status.h" -#include "../histogram.h" - -#include "contrast.h" - -using namespace RPiController; -using namespace libcamera; - -LOG_DEFINE_CATEGORY(RPiContrast) - -/* - * This is a very simple control algorithm which simply retrieves the results of - * AGC and AWB via their "status" metadata, and applies digital gain to the - * colour channels in accordance with those instructions. We take care never to - * apply less than unity gains, as that would cause fully saturated pixels to go - * off-white. - */ - -#define NAME "rpi.contrast" - -Contrast::Contrast(Controller *controller) - : ContrastAlgorithm(controller), brightness_(0.0), contrast_(1.0) -{ -} - -char const *Contrast::name() const -{ - return NAME; -} - -int Contrast::read(const libcamera::YamlObject ¶ms) -{ - // enable adaptive enhancement by default - config_.ceEnable = params["ce_enable"].get<int>(1); - // the point near the bottom of the histogram to move - config_.loHistogram = params["lo_histogram"].get<double>(0.01); - // where in the range to try and move it to - config_.loLevel = params["lo_level"].get<double>(0.015); - // but don't move by more than this - config_.loMax = params["lo_max"].get<double>(500); - // equivalent values for the top of the histogram... - config_.hiHistogram = params["hi_histogram"].get<double>(0.95); - config_.hiLevel = params["hi_level"].get<double>(0.95); - config_.hiMax = params["hi_max"].get<double>(2000); - return config_.gammaCurve.read(params["gamma_curve"]); -} - -void Contrast::setBrightness(double brightness) -{ - brightness_ = brightness; -} - -void Contrast::setContrast(double contrast) -{ - contrast_ = contrast; -} - -void Contrast::initialise() -{ - /* - * Fill in some default values as Prepare will run before Process gets - * called. - */ - status_.brightness = brightness_; - status_.contrast = contrast_; - status_.gammaCurve = config_.gammaCurve; -} - -void Contrast::prepare(Metadata *imageMetadata) -{ - imageMetadata->set("contrast.status", status_); -} - -Pwl computeStretchCurve(Histogram const &histogram, - ContrastConfig const &config) -{ - Pwl enhance; - enhance.append(0, 0); - /* - * If the start of the histogram is rather empty, try to pull it down a - * bit. - */ - double histLo = histogram.quantile(config.loHistogram) * - (65536 / histogram.bins()); - double levelLo = config.loLevel * 65536; - LOG(RPiContrast, Debug) - << "Move histogram point " << histLo << " to " << levelLo; - histLo = std::max(levelLo, - std::min(65535.0, std::min(histLo, levelLo + config.loMax))); - LOG(RPiContrast, Debug) - << "Final values " << histLo << " -> " << levelLo; - enhance.append(histLo, levelLo); - /* - * Keep the mid-point (median) in the same place, though, to limit the - * apparent amount of global brightness shift. - */ - double mid = histogram.quantile(0.5) * (65536 / histogram.bins()); - enhance.append(mid, mid); - - /* - * If the top to the histogram is empty, try to pull the pixel values - * there up. - */ - double histHi = histogram.quantile(config.hiHistogram) * - (65536 / histogram.bins()); - double levelHi = config.hiLevel * 65536; - LOG(RPiContrast, Debug) - << "Move histogram point " << histHi << " to " << levelHi; - histHi = std::min(levelHi, - std::max(0.0, std::max(histHi, levelHi - config.hiMax))); - LOG(RPiContrast, Debug) - << "Final values " << histHi << " -> " << levelHi; - enhance.append(histHi, levelHi); - enhance.append(65535, 65535); - return enhance; -} - -Pwl applyManualContrast(Pwl const &gammaCurve, double brightness, - double contrast) -{ - Pwl newGammaCurve; - LOG(RPiContrast, Debug) - << "Manual brightness " << brightness << " contrast " << contrast; - gammaCurve.map([&](double x, double y) { - newGammaCurve.append( - x, std::max(0.0, std::min(65535.0, - (y - 32768) * contrast + - 32768 + brightness))); - }); - return newGammaCurve; -} - -void Contrast::process(StatisticsPtr &stats, - [[maybe_unused]] Metadata *imageMetadata) -{ - Histogram &histogram = stats->yHist; - /* - * We look at the histogram and adjust the gamma curve in the following - * ways: 1. Adjust the gamma curve so as to pull the start of the - * histogram down, and possibly push the end up. - */ - Pwl gammaCurve = config_.gammaCurve; - if (config_.ceEnable) { - if (config_.loMax != 0 || config_.hiMax != 0) - gammaCurve = computeStretchCurve(histogram, config_).compose(gammaCurve); - /* - * We could apply other adjustments (e.g. partial equalisation) - * based on the histogram...? - */ - } - /* - * 2. Finally apply any manually selected brightness/contrast - * adjustment. - */ - if (brightness_ != 0 || contrast_ != 1.0) - gammaCurve = applyManualContrast(gammaCurve, brightness_, contrast_); - /* - * And fill in the status for output. Use more points towards the bottom - * of the curve. - */ - status_.brightness = brightness_; - status_.contrast = contrast_; - status_.gammaCurve = std::move(gammaCurve); -} - -/* Register algorithm with the system. */ -static Algorithm *create(Controller *controller) -{ - return (Algorithm *)new Contrast(controller); -} -static RegisterAlgorithm reg(NAME, &create); diff --git a/src/ipa/raspberrypi/controller/rpi/contrast.h b/src/ipa/raspberrypi/controller/rpi/contrast.h deleted file mode 100644 index 9c81277a..00000000 --- a/src/ipa/raspberrypi/controller/rpi/contrast.h +++ /dev/null @@ -1,51 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * contrast.h - contrast (gamma) control algorithm - */ -#pragma once - -#include <mutex> - -#include "../contrast_algorithm.h" -#include "../pwl.h" - -namespace RPiController { - -/* - * Back End algorithm to appaly correct digital gain. Should be placed after - * Back End AWB. - */ - -struct ContrastConfig { - bool ceEnable; - double loHistogram; - double loLevel; - double loMax; - double hiHistogram; - double hiLevel; - double hiMax; - Pwl gammaCurve; -}; - -class Contrast : public ContrastAlgorithm -{ -public: - Contrast(Controller *controller = NULL); - char const *name() const override; - int read(const libcamera::YamlObject ¶ms) override; - void setBrightness(double brightness) override; - void setContrast(double contrast) override; - void initialise() override; - void prepare(Metadata *imageMetadata) override; - void process(StatisticsPtr &stats, Metadata *imageMetadata) override; - -private: - ContrastConfig config_; - double brightness_; - double contrast_; - ContrastStatus status_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/rpi/dpc.cpp b/src/ipa/raspberrypi/controller/rpi/dpc.cpp deleted file mode 100644 index be3871df..00000000 --- a/src/ipa/raspberrypi/controller/rpi/dpc.cpp +++ /dev/null @@ -1,59 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * dpc.cpp - DPC (defective pixel correction) control algorithm - */ - -#include <libcamera/base/log.h> - -#include "dpc.h" - -using namespace RPiController; -using namespace libcamera; - -LOG_DEFINE_CATEGORY(RPiDpc) - -/* - * We use the lux status so that we can apply stronger settings in darkness (if - * necessary). - */ - -#define NAME "rpi.dpc" - -Dpc::Dpc(Controller *controller) - : Algorithm(controller) -{ -} - -char const *Dpc::name() const -{ - return NAME; -} - -int Dpc::read(const libcamera::YamlObject ¶ms) -{ - config_.strength = params["strength"].get<int>(1); - if (config_.strength < 0 || config_.strength > 2) { - LOG(RPiDpc, Error) << "Bad strength value"; - return -EINVAL; - } - - return 0; -} - -void Dpc::prepare(Metadata *imageMetadata) -{ - DpcStatus dpcStatus = {}; - /* Should we vary this with lux level or analogue gain? TBD. */ - dpcStatus.strength = config_.strength; - LOG(RPiDpc, Debug) << "strength " << dpcStatus.strength; - imageMetadata->set("dpc.status", dpcStatus); -} - -/* Register algorithm with the system. */ -static Algorithm *create(Controller *controller) -{ - return (Algorithm *)new Dpc(controller); -} -static RegisterAlgorithm reg(NAME, &create); diff --git a/src/ipa/raspberrypi/controller/rpi/dpc.h b/src/ipa/raspberrypi/controller/rpi/dpc.h deleted file mode 100644 index 84a05604..00000000 --- a/src/ipa/raspberrypi/controller/rpi/dpc.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * dpc.h - DPC (defective pixel correction) control algorithm - */ -#pragma once - -#include "../algorithm.h" -#include "../dpc_status.h" - -namespace RPiController { - -/* Back End algorithm to apply appropriate GEQ settings. */ - -struct DpcConfig { - int strength; -}; - -class Dpc : public Algorithm -{ -public: - Dpc(Controller *controller); - char const *name() const override; - int read(const libcamera::YamlObject ¶ms) override; - void prepare(Metadata *imageMetadata) override; - -private: - DpcConfig config_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/rpi/focus.h b/src/ipa/raspberrypi/controller/rpi/focus.h deleted file mode 100644 index 8556039d..00000000 --- a/src/ipa/raspberrypi/controller/rpi/focus.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2020, Raspberry Pi Ltd - * - * focus.h - focus algorithm - */ -#pragma once - -#include "../algorithm.h" -#include "../metadata.h" - -/* - * The "focus" algorithm. All it does it print out a version of the - * focus contrast measure; there is no actual auto-focus mechanism to - * control. - */ - -namespace RPiController { - -class Focus : public Algorithm -{ -public: - Focus(Controller *controller); - char const *name() const override; - void process(StatisticsPtr &stats, Metadata *imageMetadata) override; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/rpi/geq.cpp b/src/ipa/raspberrypi/controller/rpi/geq.cpp deleted file mode 100644 index 510870e9..00000000 --- a/src/ipa/raspberrypi/controller/rpi/geq.cpp +++ /dev/null @@ -1,89 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * geq.cpp - GEQ (green equalisation) control algorithm - */ - -#include <libcamera/base/log.h> - -#include "../device_status.h" -#include "../lux_status.h" -#include "../pwl.h" - -#include "geq.h" - -using namespace RPiController; -using namespace libcamera; - -LOG_DEFINE_CATEGORY(RPiGeq) - -/* - * We use the lux status so that we can apply stronger settings in darkness (if - * necessary). - */ - -#define NAME "rpi.geq" - -Geq::Geq(Controller *controller) - : Algorithm(controller) -{ -} - -char const *Geq::name() const -{ - return NAME; -} - -int Geq::read(const libcamera::YamlObject ¶ms) -{ - config_.offset = params["offset"].get<uint16_t>(0); - config_.slope = params["slope"].get<double>(0.0); - if (config_.slope < 0.0 || config_.slope >= 1.0) { - LOG(RPiGeq, Error) << "Bad slope value"; - return -EINVAL; - } - - if (params.contains("strength")) { - int ret = config_.strength.read(params["strength"]); - if (ret) - return ret; - } - - return 0; -} - -void Geq::prepare(Metadata *imageMetadata) -{ - LuxStatus luxStatus = {}; - luxStatus.lux = 400; - if (imageMetadata->get("lux.status", luxStatus)) - LOG(RPiGeq, Warning) << "no lux data found"; - DeviceStatus deviceStatus; - deviceStatus.analogueGain = 1.0; /* in case not found */ - if (imageMetadata->get("device.status", deviceStatus)) - LOG(RPiGeq, Warning) - << "no device metadata - use analogue gain of 1x"; - GeqStatus geqStatus = {}; - double strength = config_.strength.empty() - ? 1.0 - : config_.strength.eval(config_.strength.domain().clip(luxStatus.lux)); - strength *= deviceStatus.analogueGain; - double offset = config_.offset * strength; - double slope = config_.slope * strength; - geqStatus.offset = std::min(65535.0, std::max(0.0, offset)); - geqStatus.slope = std::min(.99999, std::max(0.0, slope)); - LOG(RPiGeq, Debug) - << "offset " << geqStatus.offset << " slope " - << geqStatus.slope << " (analogue gain " - << deviceStatus.analogueGain << " lux " - << luxStatus.lux << ")"; - imageMetadata->set("geq.status", geqStatus); -} - -/* Register algorithm with the system. */ -static Algorithm *create(Controller *controller) -{ - return (Algorithm *)new Geq(controller); -} -static RegisterAlgorithm reg(NAME, &create); diff --git a/src/ipa/raspberrypi/controller/rpi/geq.h b/src/ipa/raspberrypi/controller/rpi/geq.h deleted file mode 100644 index ee3a52ff..00000000 --- a/src/ipa/raspberrypi/controller/rpi/geq.h +++ /dev/null @@ -1,34 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * geq.h - GEQ (green equalisation) control algorithm - */ -#pragma once - -#include "../algorithm.h" -#include "../geq_status.h" - -namespace RPiController { - -/* Back End algorithm to apply appropriate GEQ settings. */ - -struct GeqConfig { - uint16_t offset; - double slope; - Pwl strength; /* lux to strength factor */ -}; - -class Geq : public Algorithm -{ -public: - Geq(Controller *controller); - char const *name() const override; - int read(const libcamera::YamlObject ¶ms) override; - void prepare(Metadata *imageMetadata) override; - -private: - GeqConfig config_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/rpi/lux.cpp b/src/ipa/raspberrypi/controller/rpi/lux.cpp deleted file mode 100644 index 06625f3a..00000000 --- a/src/ipa/raspberrypi/controller/rpi/lux.cpp +++ /dev/null @@ -1,115 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * lux.cpp - Lux control algorithm - */ -#include <math.h> - -#include <libcamera/base/log.h> - -#include "../device_status.h" - -#include "lux.h" - -using namespace RPiController; -using namespace libcamera; -using namespace std::literals::chrono_literals; - -LOG_DEFINE_CATEGORY(RPiLux) - -#define NAME "rpi.lux" - -Lux::Lux(Controller *controller) - : Algorithm(controller) -{ - /* - * Put in some defaults as there will be no meaningful values until - * Process has run. - */ - status_.aperture = 1.0; - status_.lux = 400; -} - -char const *Lux::name() const -{ - return NAME; -} - -int Lux::read(const libcamera::YamlObject ¶ms) -{ - auto value = params["reference_shutter_speed"].get<double>(); - if (!value) - return -EINVAL; - referenceShutterSpeed_ = *value * 1.0us; - - value = params["reference_gain"].get<double>(); - if (!value) - return -EINVAL; - referenceGain_ = *value; - - referenceAperture_ = params["reference_aperture"].get<double>(1.0); - - value = params["reference_Y"].get<double>(); - if (!value) - return -EINVAL; - referenceY_ = *value; - - value = params["reference_lux"].get<double>(); - if (!value) - return -EINVAL; - referenceLux_ = *value; - - currentAperture_ = referenceAperture_; - return 0; -} - -void Lux::setCurrentAperture(double aperture) -{ - currentAperture_ = aperture; -} - -void Lux::prepare(Metadata *imageMetadata) -{ - std::unique_lock<std::mutex> lock(mutex_); - imageMetadata->set("lux.status", status_); -} - -void Lux::process(StatisticsPtr &stats, Metadata *imageMetadata) -{ - DeviceStatus deviceStatus; - if (imageMetadata->get("device.status", deviceStatus) == 0) { - double currentGain = deviceStatus.analogueGain; - double currentAperture = deviceStatus.aperture.value_or(currentAperture_); - double currentY = stats->yHist.interQuantileMean(0, 1); - double gainRatio = referenceGain_ / currentGain; - double shutterSpeedRatio = - referenceShutterSpeed_ / deviceStatus.shutterSpeed; - double apertureRatio = referenceAperture_ / currentAperture; - double yRatio = currentY * (65536 / stats->yHist.bins()) / referenceY_; - double estimatedLux = shutterSpeedRatio * gainRatio * - apertureRatio * apertureRatio * - yRatio * referenceLux_; - LuxStatus status; - status.lux = estimatedLux; - status.aperture = currentAperture; - LOG(RPiLux, Debug) << ": estimated lux " << estimatedLux; - { - std::unique_lock<std::mutex> lock(mutex_); - status_ = status; - } - /* - * Overwrite the metadata here as well, so that downstream - * algorithms get the latest value. - */ - imageMetadata->set("lux.status", status); - } else - LOG(RPiLux, Warning) << ": no device metadata"; -} - -/* Register algorithm with the system. */ -static Algorithm *create(Controller *controller) -{ - return (Algorithm *)new Lux(controller); -} -static RegisterAlgorithm reg(NAME, &create); diff --git a/src/ipa/raspberrypi/controller/rpi/lux.h b/src/ipa/raspberrypi/controller/rpi/lux.h deleted file mode 100644 index 89411a54..00000000 --- a/src/ipa/raspberrypi/controller/rpi/lux.h +++ /dev/null @@ -1,45 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * lux.h - Lux control algorithm - */ -#pragma once - -#include <mutex> - -#include <libcamera/base/utils.h> - -#include "../lux_status.h" -#include "../algorithm.h" - -/* This is our implementation of the "lux control algorithm". */ - -namespace RPiController { - -class Lux : public Algorithm -{ -public: - Lux(Controller *controller); - char const *name() const override; - int read(const libcamera::YamlObject ¶ms) override; - void prepare(Metadata *imageMetadata) override; - void process(StatisticsPtr &stats, Metadata *imageMetadata) override; - void setCurrentAperture(double aperture); - -private: - /* - * These values define the conditions of the reference image, against - * which we compare the new image. - */ - libcamera::utils::Duration referenceShutterSpeed_; - double referenceGain_; - double referenceAperture_; /* units of 1/f */ - double referenceY_; /* out of 65536 */ - double referenceLux_; - double currentAperture_; - LuxStatus status_; - std::mutex mutex_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/rpi/noise.cpp b/src/ipa/raspberrypi/controller/rpi/noise.cpp deleted file mode 100644 index bcd8b9ed..00000000 --- a/src/ipa/raspberrypi/controller/rpi/noise.cpp +++ /dev/null @@ -1,89 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * noise.cpp - Noise control algorithm - */ - -#include <math.h> - -#include <libcamera/base/log.h> - -#include "../device_status.h" -#include "../noise_status.h" - -#include "noise.h" - -using namespace RPiController; -using namespace libcamera; - -LOG_DEFINE_CATEGORY(RPiNoise) - -#define NAME "rpi.noise" - -Noise::Noise(Controller *controller) - : Algorithm(controller), modeFactor_(1.0) -{ -} - -char const *Noise::name() const -{ - return NAME; -} - -void Noise::switchMode(CameraMode const &cameraMode, - [[maybe_unused]] Metadata *metadata) -{ - /* - * For example, we would expect a 2x2 binned mode to have a "noise - * factor" of sqrt(2x2) = 2. (can't be less than one, right?) - */ - modeFactor_ = std::max(1.0, cameraMode.noiseFactor); -} - -int Noise::read(const libcamera::YamlObject ¶ms) -{ - auto value = params["reference_constant"].get<double>(); - if (!value) - return -EINVAL; - referenceConstant_ = *value; - - value = params["reference_slope"].get<double>(); - if (!value) - return -EINVAL; - referenceSlope_ = *value; - - return 0; -} - -void Noise::prepare(Metadata *imageMetadata) -{ - struct DeviceStatus deviceStatus; - deviceStatus.analogueGain = 1.0; /* keep compiler calm */ - if (imageMetadata->get("device.status", deviceStatus) == 0) { - /* - * There is a slight question as to exactly how the noise - * profile, specifically the constant part of it, scales. For - * now we assume it all scales the same, and we'll revisit this - * if it proves substantially wrong. NOTE: we may also want to - * make some adjustments based on the camera mode (such as - * binning), if we knew how to discover it... - */ - double factor = sqrt(deviceStatus.analogueGain) / modeFactor_; - struct NoiseStatus status; - status.noiseConstant = referenceConstant_ * factor; - status.noiseSlope = referenceSlope_ * factor; - imageMetadata->set("noise.status", status); - LOG(RPiNoise, Debug) - << "constant " << status.noiseConstant - << " slope " << status.noiseSlope; - } else - LOG(RPiNoise, Warning) << " no metadata"; -} - -/* Register algorithm with the system. */ -static Algorithm *create(Controller *controller) -{ - return new Noise(controller); -} -static RegisterAlgorithm reg(NAME, &create); diff --git a/src/ipa/raspberrypi/controller/rpi/noise.h b/src/ipa/raspberrypi/controller/rpi/noise.h deleted file mode 100644 index 74c31e64..00000000 --- a/src/ipa/raspberrypi/controller/rpi/noise.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * noise.h - Noise control algorithm - */ -#pragma once - -#include "../algorithm.h" -#include "../noise_status.h" - -/* This is our implementation of the "noise algorithm". */ - -namespace RPiController { - -class Noise : public Algorithm -{ -public: - Noise(Controller *controller); - char const *name() const override; - void switchMode(CameraMode const &cameraMode, Metadata *metadata) override; - int read(const libcamera::YamlObject ¶ms) override; - void prepare(Metadata *imageMetadata) override; - -private: - /* the noise profile for analogue gain of 1.0 */ - double referenceConstant_; - double referenceSlope_; - double modeFactor_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/rpi/sdn.cpp b/src/ipa/raspberrypi/controller/rpi/sdn.cpp deleted file mode 100644 index b6b66251..00000000 --- a/src/ipa/raspberrypi/controller/rpi/sdn.cpp +++ /dev/null @@ -1,80 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019-2021, Raspberry Pi Ltd - * - * sdn.cpp - SDN (spatial denoise) control algorithm - */ - -#include <libcamera/base/log.h> - -#include "../denoise_status.h" -#include "../noise_status.h" - -#include "sdn.h" - -using namespace RPiController; -using namespace libcamera; - -LOG_DEFINE_CATEGORY(RPiSdn) - -/* - * Calculate settings for the spatial denoise block using the noise profile in - * the image metadata. - */ - -#define NAME "rpi.sdn" - -Sdn::Sdn(Controller *controller) - : DenoiseAlgorithm(controller), mode_(DenoiseMode::ColourOff) -{ -} - -char const *Sdn::name() const -{ - return NAME; -} - -int Sdn::read(const libcamera::YamlObject ¶ms) -{ - deviation_ = params["deviation"].get<double>(3.2); - strength_ = params["strength"].get<double>(0.75); - return 0; -} - -void Sdn::initialise() -{ -} - -void Sdn::prepare(Metadata *imageMetadata) -{ - struct NoiseStatus noiseStatus = {}; - noiseStatus.noiseSlope = 3.0; /* in case no metadata */ - if (imageMetadata->get("noise.status", noiseStatus) != 0) - LOG(RPiSdn, Warning) << "no noise profile found"; - LOG(RPiSdn, Debug) - << "Noise profile: constant " << noiseStatus.noiseConstant - << " slope " << noiseStatus.noiseSlope; - struct DenoiseStatus status; - status.noiseConstant = noiseStatus.noiseConstant * deviation_; - status.noiseSlope = noiseStatus.noiseSlope * deviation_; - status.strength = strength_; - status.mode = static_cast<std::underlying_type_t<DenoiseMode>>(mode_); - imageMetadata->set("denoise.status", status); - LOG(RPiSdn, Debug) - << "programmed constant " << status.noiseConstant - << " slope " << status.noiseSlope - << " strength " << status.strength; -} - -void Sdn::setMode(DenoiseMode mode) -{ - /* We only distinguish between off and all other modes. */ - mode_ = mode; -} - -/* Register algorithm with the system. */ -static Algorithm *create(Controller *controller) -{ - return (Algorithm *)new Sdn(controller); -} -static RegisterAlgorithm reg(NAME, &create); diff --git a/src/ipa/raspberrypi/controller/rpi/sdn.h b/src/ipa/raspberrypi/controller/rpi/sdn.h deleted file mode 100644 index 9dd73c38..00000000 --- a/src/ipa/raspberrypi/controller/rpi/sdn.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * sdn.h - SDN (spatial denoise) control algorithm - */ -#pragma once - -#include "../algorithm.h" -#include "../denoise_algorithm.h" - -namespace RPiController { - -/* Algorithm to calculate correct spatial denoise (SDN) settings. */ - -class Sdn : public DenoiseAlgorithm -{ -public: - Sdn(Controller *controller = NULL); - char const *name() const override; - int read(const libcamera::YamlObject ¶ms) override; - void initialise() override; - void prepare(Metadata *imageMetadata) override; - void setMode(DenoiseMode mode) override; - -private: - double deviation_; - double strength_; - DenoiseMode mode_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/rpi/sharpen.cpp b/src/ipa/raspberrypi/controller/rpi/sharpen.cpp deleted file mode 100644 index 4f6f020a..00000000 --- a/src/ipa/raspberrypi/controller/rpi/sharpen.cpp +++ /dev/null @@ -1,92 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * sharpen.cpp - sharpening control algorithm - */ - -#include <math.h> - -#include <libcamera/base/log.h> - -#include "../sharpen_status.h" - -#include "sharpen.h" - -using namespace RPiController; -using namespace libcamera; - -LOG_DEFINE_CATEGORY(RPiSharpen) - -#define NAME "rpi.sharpen" - -Sharpen::Sharpen(Controller *controller) - : SharpenAlgorithm(controller), userStrength_(1.0) -{ -} - -char const *Sharpen::name() const -{ - return NAME; -} - -void Sharpen::switchMode(CameraMode const &cameraMode, - [[maybe_unused]] Metadata *metadata) -{ - /* can't be less than one, right? */ - modeFactor_ = std::max(1.0, cameraMode.noiseFactor); -} - -int Sharpen::read(const libcamera::YamlObject ¶ms) -{ - threshold_ = params["threshold"].get<double>(1.0); - strength_ = params["strength"].get<double>(1.0); - limit_ = params["limit"].get<double>(1.0); - LOG(RPiSharpen, Debug) - << "Read threshold " << threshold_ - << " strength " << strength_ - << " limit " << limit_; - return 0; -} - -void Sharpen::setStrength(double strength) -{ - /* - * Note that this function is how an application sets the overall - * sharpening "strength". We call this the "user strength" field - * as there already is a strength_ field - being an internal gain - * parameter that gets passed to the ISP control code. Negative - * values are not allowed - coerce them to zero (no sharpening). - */ - userStrength_ = std::max(0.0, strength); -} - -void Sharpen::prepare(Metadata *imageMetadata) -{ - /* - * The userStrength_ affects the algorithm's internal gain directly, but - * we adjust the limit and threshold less aggressively. Using a sqrt - * function is an arbitrary but gentle way of accomplishing this. - */ - double userStrengthSqrt = sqrt(userStrength_); - struct SharpenStatus status; - /* - * Binned modes seem to need the sharpening toned down with this - * pipeline, thus we use the modeFactor_ here. Also avoid - * divide-by-zero with the userStrengthSqrt. - */ - status.threshold = threshold_ * modeFactor_ / - std::max(0.01, userStrengthSqrt); - status.strength = strength_ / modeFactor_ * userStrength_; - status.limit = limit_ / modeFactor_ * userStrengthSqrt; - /* Finally, report any application-supplied parameters that were used. */ - status.userStrength = userStrength_; - imageMetadata->set("sharpen.status", status); -} - -/* Register algorithm with the system. */ -static Algorithm *create(Controller *controller) -{ - return new Sharpen(controller); -} -static RegisterAlgorithm reg(NAME, &create); diff --git a/src/ipa/raspberrypi/controller/rpi/sharpen.h b/src/ipa/raspberrypi/controller/rpi/sharpen.h deleted file mode 100644 index 8bb7631e..00000000 --- a/src/ipa/raspberrypi/controller/rpi/sharpen.h +++ /dev/null @@ -1,34 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * sharpen.h - sharpening control algorithm - */ -#pragma once - -#include "../sharpen_algorithm.h" -#include "../sharpen_status.h" - -/* This is our implementation of the "sharpen algorithm". */ - -namespace RPiController { - -class Sharpen : public SharpenAlgorithm -{ -public: - Sharpen(Controller *controller); - char const *name() const override; - void switchMode(CameraMode const &cameraMode, Metadata *metadata) override; - int read(const libcamera::YamlObject ¶ms) override; - void setStrength(double strength) override; - void prepare(Metadata *imageMetadata) override; - -private: - double threshold_; - double strength_; - double limit_; - double modeFactor_; - double userStrength_; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/sharpen_algorithm.h b/src/ipa/raspberrypi/controller/sharpen_algorithm.h deleted file mode 100644 index 3be21c32..00000000 --- a/src/ipa/raspberrypi/controller/sharpen_algorithm.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2020, Raspberry Pi Ltd - * - * sharpen_algorithm.h - sharpness control algorithm interface - */ -#pragma once - -#include "algorithm.h" - -namespace RPiController { - -class SharpenAlgorithm : public Algorithm -{ -public: - SharpenAlgorithm(Controller *controller) : Algorithm(controller) {} - /* A sharpness control algorithm must provide the following: */ - virtual void setStrength(double strength) = 0; -}; - -} /* namespace RPiController */ diff --git a/src/ipa/raspberrypi/controller/sharpen_status.h b/src/ipa/raspberrypi/controller/sharpen_status.h deleted file mode 100644 index 106166db..00000000 --- a/src/ipa/raspberrypi/controller/sharpen_status.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Copyright (C) 2019, Raspberry Pi Ltd - * - * sharpen_status.h - Sharpen control algorithm status - */ -#pragma once - -/* The "sharpen" algorithm stores the strength to use. */ - -struct SharpenStatus { - /* controls the smallest level of detail (or noise!) that sharpening will pick up */ - double threshold; - /* the rate at which the sharpening response ramps once above the threshold */ - double strength; - /* upper limit of the allowed sharpening response */ - double limit; - /* The sharpening strength requested by the user or application. */ - double userStrength; -}; |