summaryrefslogtreecommitdiff
path: root/src/ipa/raspberrypi/controller/rpi
diff options
context:
space:
mode:
authorNaushir Patuck <naush@raspberrypi.com>2020-05-03 16:48:42 +0100
committerLaurent Pinchart <laurent.pinchart@ideasonboard.com>2020-05-11 23:54:40 +0300
commit0db2c8dc75e466e7648dc1b95380495c6a126349 (patch)
treefc723a251981ded749c900947a2f510ed56e60da /src/ipa/raspberrypi/controller/rpi
parent740fd1b62f670bd1ad4965ef0866ef5d51bdf947 (diff)
libcamera: ipa: Raspberry Pi IPA
Initial implementation of the Raspberry Pi (BCM2835) libcamera IPA and associated libraries. All code is licensed under the BSD-2-Clause terms. Copyright (c) 2019-2020 Raspberry Pi Trading Ltd. Signed-off-by: Naushir Patuck <naush@raspberrypi.com> Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Diffstat (limited to 'src/ipa/raspberrypi/controller/rpi')
-rw-r--r--src/ipa/raspberrypi/controller/rpi/agc.cpp642
-rw-r--r--src/ipa/raspberrypi/controller/rpi/agc.hpp123
-rw-r--r--src/ipa/raspberrypi/controller/rpi/alsc.cpp705
-rw-r--r--src/ipa/raspberrypi/controller/rpi/alsc.hpp104
-rw-r--r--src/ipa/raspberrypi/controller/rpi/awb.cpp608
-rw-r--r--src/ipa/raspberrypi/controller/rpi/awb.hpp178
-rw-r--r--src/ipa/raspberrypi/controller/rpi/black_level.cpp56
-rw-r--r--src/ipa/raspberrypi/controller/rpi/black_level.hpp30
-rw-r--r--src/ipa/raspberrypi/controller/rpi/ccm.cpp163
-rw-r--r--src/ipa/raspberrypi/controller/rpi/ccm.hpp76
-rw-r--r--src/ipa/raspberrypi/controller/rpi/contrast.cpp176
-rw-r--r--src/ipa/raspberrypi/controller/rpi/contrast.hpp51
-rw-r--r--src/ipa/raspberrypi/controller/rpi/dpc.cpp49
-rw-r--r--src/ipa/raspberrypi/controller/rpi/dpc.hpp32
-rw-r--r--src/ipa/raspberrypi/controller/rpi/geq.cpp75
-rw-r--r--src/ipa/raspberrypi/controller/rpi/geq.hpp34
-rw-r--r--src/ipa/raspberrypi/controller/rpi/lux.cpp104
-rw-r--r--src/ipa/raspberrypi/controller/rpi/lux.hpp42
-rw-r--r--src/ipa/raspberrypi/controller/rpi/noise.cpp71
-rw-r--r--src/ipa/raspberrypi/controller/rpi/noise.hpp32
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sdn.cpp63
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sdn.hpp29
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sharpen.cpp60
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sharpen.hpp32
24 files changed, 3535 insertions, 0 deletions
diff --git a/src/ipa/raspberrypi/controller/rpi/agc.cpp b/src/ipa/raspberrypi/controller/rpi/agc.cpp
new file mode 100644
index 00000000..a4742872
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/agc.cpp
@@ -0,0 +1,642 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * agc.cpp - AGC/AEC control algorithm
+ */
+
+#include <map>
+
+#include "linux/bcm2835-isp.h"
+
+#include "../awb_status.h"
+#include "../device_status.h"
+#include "../histogram.hpp"
+#include "../logging.hpp"
+#include "../lux_status.h"
+#include "../metadata.hpp"
+
+#include "agc.hpp"
+
+using namespace RPi;
+
+#define NAME "rpi.agc"
+
+#define PIPELINE_BITS 13 // seems to be a 13-bit pipeline
+
+void AgcMeteringMode::Read(boost::property_tree::ptree const &params)
+{
+ int num = 0;
+ for (auto &p : params.get_child("weights")) {
+ if (num == AGC_STATS_SIZE)
+ throw std::runtime_error("AgcConfig: too many weights");
+ weights[num++] = p.second.get_value<double>();
+ }
+ if (num != AGC_STATS_SIZE)
+ throw std::runtime_error("AgcConfig: insufficient weights");
+}
+
+static std::string
+read_metering_modes(std::map<std::string, AgcMeteringMode> &metering_modes,
+ boost::property_tree::ptree const &params)
+{
+ std::string first;
+ for (auto &p : params) {
+ AgcMeteringMode metering_mode;
+ metering_mode.Read(p.second);
+ metering_modes[p.first] = std::move(metering_mode);
+ if (first.empty())
+ first = p.first;
+ }
+ return first;
+}
+
+static int read_double_list(std::vector<double> &list,
+ boost::property_tree::ptree const &params)
+{
+ for (auto &p : params)
+ list.push_back(p.second.get_value<double>());
+ return list.size();
+}
+
+void AgcExposureMode::Read(boost::property_tree::ptree const &params)
+{
+ int num_shutters =
+ read_double_list(shutter, params.get_child("shutter"));
+ int num_ags = read_double_list(gain, params.get_child("gain"));
+ if (num_shutters < 2 || num_ags < 2)
+ throw std::runtime_error(
+ "AgcConfig: must have at least two entries in exposure profile");
+ if (num_shutters != num_ags)
+ throw std::runtime_error(
+ "AgcConfig: expect same number of exposure and gain entries in exposure profile");
+}
+
+static std::string
+read_exposure_modes(std::map<std::string, AgcExposureMode> &exposure_modes,
+ boost::property_tree::ptree const &params)
+{
+ std::string first;
+ for (auto &p : params) {
+ AgcExposureMode exposure_mode;
+ exposure_mode.Read(p.second);
+ exposure_modes[p.first] = std::move(exposure_mode);
+ if (first.empty())
+ first = p.first;
+ }
+ return first;
+}
+
+void AgcConstraint::Read(boost::property_tree::ptree const &params)
+{
+ std::string bound_string = params.get<std::string>("bound", "");
+ transform(bound_string.begin(), bound_string.end(),
+ bound_string.begin(), ::toupper);
+ if (bound_string != "UPPER" && bound_string != "LOWER")
+ throw std::runtime_error(
+ "AGC constraint type should be UPPER or LOWER");
+ bound = bound_string == "UPPER" ? Bound::UPPER : Bound::LOWER;
+ q_lo = params.get<double>("q_lo");
+ q_hi = params.get<double>("q_hi");
+ Y_target.Read(params.get_child("y_target"));
+}
+
+static AgcConstraintMode
+read_constraint_mode(boost::property_tree::ptree const &params)
+{
+ AgcConstraintMode mode;
+ for (auto &p : params) {
+ AgcConstraint constraint;
+ constraint.Read(p.second);
+ mode.push_back(std::move(constraint));
+ }
+ return mode;
+}
+
+static std::string read_constraint_modes(
+ std::map<std::string, AgcConstraintMode> &constraint_modes,
+ boost::property_tree::ptree const &params)
+{
+ std::string first;
+ for (auto &p : params) {
+ constraint_modes[p.first] = read_constraint_mode(p.second);
+ if (first.empty())
+ first = p.first;
+ }
+ return first;
+}
+
+void AgcConfig::Read(boost::property_tree::ptree const &params)
+{
+ RPI_LOG("AgcConfig");
+ default_metering_mode = read_metering_modes(
+ metering_modes, params.get_child("metering_modes"));
+ default_exposure_mode = read_exposure_modes(
+ exposure_modes, params.get_child("exposure_modes"));
+ default_constraint_mode = read_constraint_modes(
+ constraint_modes, params.get_child("constraint_modes"));
+ Y_target.Read(params.get_child("y_target"));
+ speed = params.get<double>("speed", 0.2);
+ startup_frames = params.get<uint16_t>("startup_frames", 10);
+ fast_reduce_threshold =
+ params.get<double>("fast_reduce_threshold", 0.4);
+ base_ev = params.get<double>("base_ev", 1.0);
+}
+
+Agc::Agc(Controller *controller)
+ : AgcAlgorithm(controller), metering_mode_(nullptr),
+ exposure_mode_(nullptr), constraint_mode_(nullptr),
+ frame_count_(0), lock_count_(0)
+{
+ ev_ = status_.ev = 1.0;
+ flicker_period_ = status_.flicker_period = 0.0;
+ fixed_shutter_ = status_.fixed_shutter = 0;
+ fixed_analogue_gain_ = status_.fixed_analogue_gain = 0.0;
+ // set to zero initially, so we can tell it's not been calculated
+ status_.total_exposure_value = 0.0;
+ status_.target_exposure_value = 0.0;
+ status_.locked = false;
+ output_status_ = status_;
+}
+
+char const *Agc::Name() const
+{
+ return NAME;
+}
+
+void Agc::Read(boost::property_tree::ptree const &params)
+{
+ RPI_LOG("Agc");
+ config_.Read(params);
+ // Set the config's defaults (which are the first ones it read) as our
+ // current modes, until someone changes them. (they're all known to
+ // exist at this point)
+ metering_mode_name_ = config_.default_metering_mode;
+ metering_mode_ = &config_.metering_modes[metering_mode_name_];
+ exposure_mode_name_ = config_.default_exposure_mode;
+ exposure_mode_ = &config_.exposure_modes[exposure_mode_name_];
+ constraint_mode_name_ = config_.default_constraint_mode;
+ constraint_mode_ = &config_.constraint_modes[constraint_mode_name_];
+}
+
+void Agc::SetEv(double ev)
+{
+ std::unique_lock<std::mutex> lock(settings_mutex_);
+ ev_ = ev;
+}
+
+void Agc::SetFlickerPeriod(double flicker_period)
+{
+ std::unique_lock<std::mutex> lock(settings_mutex_);
+ flicker_period_ = flicker_period;
+}
+
+void Agc::SetFixedShutter(double fixed_shutter)
+{
+ std::unique_lock<std::mutex> lock(settings_mutex_);
+ fixed_shutter_ = fixed_shutter;
+}
+
+void Agc::SetFixedAnalogueGain(double fixed_analogue_gain)
+{
+ std::unique_lock<std::mutex> lock(settings_mutex_);
+ fixed_analogue_gain_ = fixed_analogue_gain;
+}
+
+void Agc::SetMeteringMode(std::string const &metering_mode_name)
+{
+ std::unique_lock<std::mutex> lock(settings_mutex_);
+ metering_mode_name_ = metering_mode_name;
+}
+
+void Agc::SetExposureMode(std::string const &exposure_mode_name)
+{
+ std::unique_lock<std::mutex> lock(settings_mutex_);
+ exposure_mode_name_ = exposure_mode_name;
+}
+
+void Agc::SetConstraintMode(std::string const &constraint_mode_name)
+{
+ std::unique_lock<std::mutex> lock(settings_mutex_);
+ constraint_mode_name_ = constraint_mode_name;
+}
+
+void Agc::Prepare(Metadata *image_metadata)
+{
+ AgcStatus status;
+ {
+ std::unique_lock<std::mutex> lock(output_mutex_);
+ status = output_status_;
+ }
+ int lock_count = lock_count_;
+ lock_count_ = 0;
+ status.digital_gain = 1.0;
+ if (status_.total_exposure_value) {
+ // Process has run, so we have meaningful values.
+ DeviceStatus device_status;
+ if (image_metadata->Get("device.status", device_status) == 0) {
+ double actual_exposure = device_status.shutter_speed *
+ device_status.analogue_gain;
+ if (actual_exposure) {
+ status.digital_gain =
+ status_.total_exposure_value /
+ actual_exposure;
+ RPI_LOG("Want total exposure " << status_.total_exposure_value);
+ // Never ask for a gain < 1.0, and also impose
+ // some upper limit. Make it customisable?
+ status.digital_gain = std::max(
+ 1.0,
+ std::min(status.digital_gain, 4.0));
+ RPI_LOG("Actual exposure " << actual_exposure);
+ RPI_LOG("Use digital_gain " << status.digital_gain);
+ RPI_LOG("Effective exposure " << actual_exposure * status.digital_gain);
+ // Decide whether AEC/AGC has converged.
+ // Insist AGC is steady for MAX_LOCK_COUNT
+ // frames before we say we are "locked".
+ // (The hard-coded constants may need to
+ // become customisable.)
+ if (status.target_exposure_value) {
+#define MAX_LOCK_COUNT 3
+ double err = 0.10 * status.target_exposure_value + 200;
+ if (actual_exposure <
+ status.target_exposure_value + err
+ && actual_exposure >
+ status.target_exposure_value - err)
+ lock_count_ =
+ std::min(lock_count + 1,
+ MAX_LOCK_COUNT);
+ else if (actual_exposure <
+ status.target_exposure_value
+ + 1.5 * err &&
+ actual_exposure >
+ status.target_exposure_value
+ - 1.5 * err)
+ lock_count_ = lock_count;
+ RPI_LOG("Lock count: " << lock_count_);
+ }
+ }
+ } else
+ RPI_LOG(Name() << ": no device metadata");
+ status.locked = lock_count_ >= MAX_LOCK_COUNT;
+ //printf("%s\n", status.locked ? "+++++++++" : "-");
+ image_metadata->Set("agc.status", status);
+ }
+}
+
+void Agc::Process(StatisticsPtr &stats, Metadata *image_metadata)
+{
+ frame_count_++;
+ // First a little bit of housekeeping, fetching up-to-date settings and
+ // configuration, that kind of thing.
+ housekeepConfig();
+ // Get the current exposure values for the frame that's just arrived.
+ fetchCurrentExposure(image_metadata);
+ // Compute the total gain we require relative to the current exposure.
+ double gain, target_Y;
+ computeGain(stats.get(), image_metadata, gain, target_Y);
+ // Now compute the target (final) exposure which we think we want.
+ computeTargetExposure(gain);
+ // Some of the exposure has to be applied as digital gain, so work out
+ // what that is. This function also tells us whether it's decided to
+ // "desaturate" the image more quickly.
+ bool desaturate = applyDigitalGain(image_metadata, gain, target_Y);
+ // The results have to be filtered so as not to change too rapidly.
+ filterExposure(desaturate);
+ // The last thing is to divvy up the exposure value into a shutter time
+ // and analogue_gain, according to the current exposure mode.
+ divvyupExposure();
+ // Finally advertise what we've done.
+ writeAndFinish(image_metadata, desaturate);
+}
+
+static void copy_string(std::string const &s, char *d, size_t size)
+{
+ size_t length = s.copy(d, size - 1);
+ d[length] = '\0';
+}
+
+void Agc::housekeepConfig()
+{
+ // First fetch all the up-to-date settings, so no one else has to do it.
+ std::string new_exposure_mode_name, new_constraint_mode_name,
+ new_metering_mode_name;
+ {
+ std::unique_lock<std::mutex> lock(settings_mutex_);
+ new_metering_mode_name = metering_mode_name_;
+ new_exposure_mode_name = exposure_mode_name_;
+ new_constraint_mode_name = constraint_mode_name_;
+ status_.ev = ev_;
+ status_.fixed_shutter = fixed_shutter_;
+ status_.fixed_analogue_gain = fixed_analogue_gain_;
+ status_.flicker_period = flicker_period_;
+ }
+ RPI_LOG("ev " << status_.ev << " fixed_shutter "
+ << status_.fixed_shutter << " fixed_analogue_gain "
+ << status_.fixed_analogue_gain);
+ // Make sure the "mode" pointers point to the up-to-date things, if
+ // they've changed.
+ if (strcmp(new_metering_mode_name.c_str(), status_.metering_mode)) {
+ auto it = config_.metering_modes.find(new_metering_mode_name);
+ if (it == config_.metering_modes.end())
+ throw std::runtime_error("Agc: no metering mode " +
+ new_metering_mode_name);
+ metering_mode_ = &it->second;
+ copy_string(new_metering_mode_name, status_.metering_mode,
+ sizeof(status_.metering_mode));
+ }
+ if (strcmp(new_exposure_mode_name.c_str(), status_.exposure_mode)) {
+ auto it = config_.exposure_modes.find(new_exposure_mode_name);
+ if (it == config_.exposure_modes.end())
+ throw std::runtime_error("Agc: no exposure profile " +
+ new_exposure_mode_name);
+ exposure_mode_ = &it->second;
+ copy_string(new_exposure_mode_name, status_.exposure_mode,
+ sizeof(status_.exposure_mode));
+ }
+ if (strcmp(new_constraint_mode_name.c_str(), status_.constraint_mode)) {
+ auto it =
+ config_.constraint_modes.find(new_constraint_mode_name);
+ if (it == config_.constraint_modes.end())
+ throw std::runtime_error("Agc: no constraint list " +
+ new_constraint_mode_name);
+ constraint_mode_ = &it->second;
+ copy_string(new_constraint_mode_name, status_.constraint_mode,
+ sizeof(status_.constraint_mode));
+ }
+ RPI_LOG("exposure_mode "
+ << new_exposure_mode_name << " constraint_mode "
+ << new_constraint_mode_name << " metering_mode "
+ << new_metering_mode_name);
+}
+
+void Agc::fetchCurrentExposure(Metadata *image_metadata)
+{
+ std::unique_lock<Metadata> lock(*image_metadata);
+ DeviceStatus *device_status =
+ image_metadata->GetLocked<DeviceStatus>("device.status");
+ if (!device_status)
+ throw std::runtime_error("Agc: no device metadata");
+ current_.shutter = device_status->shutter_speed;
+ current_.analogue_gain = device_status->analogue_gain;
+ AgcStatus *agc_status =
+ image_metadata->GetLocked<AgcStatus>("agc.status");
+ current_.total_exposure = agc_status ? agc_status->total_exposure_value : 0;
+ current_.total_exposure_no_dg = current_.shutter * current_.analogue_gain;
+}
+
+static double compute_initial_Y(bcm2835_isp_stats *stats, Metadata *image_metadata,
+ double weights[])
+{
+ bcm2835_isp_stats_region *regions = stats->agc_stats;
+ struct AwbStatus awb;
+ awb.gain_r = awb.gain_g = awb.gain_b = 1.0; // in case no metadata
+ if (image_metadata->Get("awb.status", awb) != 0)
+ RPI_WARN("Agc: no AWB status found");
+ double Y_sum = 0, weight_sum = 0;
+ for (int i = 0; i < AGC_STATS_SIZE; i++) {
+ if (regions[i].counted == 0)
+ continue;
+ weight_sum += weights[i];
+ double Y = regions[i].r_sum * awb.gain_r * .299 +
+ regions[i].g_sum * awb.gain_g * .587 +
+ regions[i].b_sum * awb.gain_b * .114;
+ Y /= regions[i].counted;
+ Y_sum += Y * weights[i];
+ }
+ return Y_sum / weight_sum / (1 << PIPELINE_BITS);
+}
+
+// We handle extra gain through EV by adjusting our Y targets. However, you
+// simply can't monitor histograms once they get very close to (or beyond!)
+// saturation, so we clamp the Y targets to this value. It does mean that EV
+// increases don't necessarily do quite what you might expect in certain
+// (contrived) cases.
+
+#define EV_GAIN_Y_TARGET_LIMIT 0.9
+
+static double constraint_compute_gain(AgcConstraint &c, Histogram &h,
+ double lux, double ev_gain,
+ double &target_Y)
+{
+ target_Y = c.Y_target.Eval(c.Y_target.Domain().Clip(lux));
+ target_Y = std::min(EV_GAIN_Y_TARGET_LIMIT, target_Y * ev_gain);
+ double iqm = h.InterQuantileMean(c.q_lo, c.q_hi);
+ return (target_Y * NUM_HISTOGRAM_BINS) / iqm;
+}
+
+void Agc::computeGain(bcm2835_isp_stats *statistics, Metadata *image_metadata,
+ double &gain, double &target_Y)
+{
+ struct LuxStatus lux = {};
+ lux.lux = 400; // default lux level to 400 in case no metadata found
+ if (image_metadata->Get("lux.status", lux) != 0)
+ RPI_WARN("Agc: no lux level found");
+ Histogram h(statistics->hist[0].g_hist, NUM_HISTOGRAM_BINS);
+ double ev_gain = status_.ev * config_.base_ev;
+ // The initial gain and target_Y come from some of the regions. After
+ // that we consider the histogram constraints.
+ target_Y =
+ config_.Y_target.Eval(config_.Y_target.Domain().Clip(lux.lux));
+ target_Y = std::min(EV_GAIN_Y_TARGET_LIMIT, target_Y * ev_gain);
+ double initial_Y = compute_initial_Y(statistics, image_metadata,
+ metering_mode_->weights);
+ gain = std::min(10.0, target_Y / (initial_Y + .001));
+ RPI_LOG("Initially Y " << initial_Y << " target " << target_Y
+ << " gives gain " << gain);
+ for (auto &c : *constraint_mode_) {
+ double new_target_Y;
+ double new_gain =
+ constraint_compute_gain(c, h, lux.lux, ev_gain,
+ new_target_Y);
+ RPI_LOG("Constraint has target_Y "
+ << new_target_Y << " giving gain " << new_gain);
+ if (c.bound == AgcConstraint::Bound::LOWER &&
+ new_gain > gain) {
+ RPI_LOG("Lower bound constraint adopted");
+ gain = new_gain, target_Y = new_target_Y;
+ } else if (c.bound == AgcConstraint::Bound::UPPER &&
+ new_gain < gain) {
+ RPI_LOG("Upper bound constraint adopted");
+ gain = new_gain, target_Y = new_target_Y;
+ }
+ }
+ RPI_LOG("Final gain " << gain << " (target_Y " << target_Y << " ev "
+ << status_.ev << " base_ev " << config_.base_ev
+ << ")");
+}
+
+void Agc::computeTargetExposure(double gain)
+{
+ // The statistics reflect the image without digital gain, so the final
+ // total exposure we're aiming for is:
+ target_.total_exposure = current_.total_exposure_no_dg * gain;
+ // The final target exposure is also limited to what the exposure
+ // mode allows.
+ double max_total_exposure =
+ (status_.fixed_shutter != 0.0
+ ? status_.fixed_shutter
+ : exposure_mode_->shutter.back()) *
+ (status_.fixed_analogue_gain != 0.0
+ ? status_.fixed_analogue_gain
+ : exposure_mode_->gain.back());
+ target_.total_exposure = std::min(target_.total_exposure,
+ max_total_exposure);
+ RPI_LOG("Target total_exposure " << target_.total_exposure);
+}
+
+bool Agc::applyDigitalGain(Metadata *image_metadata, double gain,
+ double target_Y)
+{
+ double dg = 1.0;
+ // I think this pipeline subtracts black level and rescales before we
+ // get the stats, so no need to worry about it.
+ struct AwbStatus awb;
+ if (image_metadata->Get("awb.status", awb) == 0) {
+ double min_gain = std::min(awb.gain_r,
+ std::min(awb.gain_g, awb.gain_b));
+ dg *= std::max(1.0, 1.0 / min_gain);
+ } else
+ RPI_WARN("Agc: no AWB status found");
+ RPI_LOG("after AWB, target dg " << dg << " gain " << gain
+ << " target_Y " << target_Y);
+ // Finally, if we're trying to reduce exposure but the target_Y is
+ // "close" to 1.0, then the gain computed for that constraint will be
+ // only slightly less than one, because the measured Y can never be
+ // larger than 1.0. When this happens, demand a large digital gain so
+ // that the exposure can be reduced, de-saturating the image much more
+ // quickly (and we then approach the correct value more quickly from
+ // below).
+ bool desaturate = target_Y > config_.fast_reduce_threshold &&
+ gain < sqrt(target_Y);
+ if (desaturate)
+ dg /= config_.fast_reduce_threshold;
+ RPI_LOG("Digital gain " << dg << " desaturate? " << desaturate);
+ target_.total_exposure_no_dg = target_.total_exposure / dg;
+ RPI_LOG("Target total_exposure_no_dg " << target_.total_exposure_no_dg);
+ return desaturate;
+}
+
+void Agc::filterExposure(bool desaturate)
+{
+ double speed = frame_count_ <= config_.startup_frames ? 1.0 : config_.speed;
+ if (filtered_.total_exposure == 0.0) {
+ filtered_.total_exposure = target_.total_exposure;
+ filtered_.total_exposure_no_dg = target_.total_exposure_no_dg;
+ } else {
+ // If close to the result go faster, to save making so many
+ // micro-adjustments on the way. (Make this customisable?)
+ if (filtered_.total_exposure < 1.2 * target_.total_exposure &&
+ filtered_.total_exposure > 0.8 * target_.total_exposure)
+ speed = sqrt(speed);
+ filtered_.total_exposure = speed * target_.total_exposure +
+ filtered_.total_exposure * (1.0 - speed);
+ // When desaturing, take a big jump down in exposure_no_dg,
+ // which we'll hide with digital gain.
+ if (desaturate)
+ filtered_.total_exposure_no_dg =
+ target_.total_exposure_no_dg;
+ else
+ filtered_.total_exposure_no_dg =
+ speed * target_.total_exposure_no_dg +
+ filtered_.total_exposure_no_dg * (1.0 - speed);
+ }
+ // We can't let the no_dg exposure deviate too far below the
+ // total exposure, as there might not be enough digital gain available
+ // in the ISP to hide it (which will cause nasty oscillation).
+ if (filtered_.total_exposure_no_dg <
+ filtered_.total_exposure * config_.fast_reduce_threshold)
+ filtered_.total_exposure_no_dg = filtered_.total_exposure *
+ config_.fast_reduce_threshold;
+ RPI_LOG("After filtering, total_exposure " << filtered_.total_exposure <<
+ " no dg " << filtered_.total_exposure_no_dg);
+}
+
+void Agc::divvyupExposure()
+{
+ // Sending the fixed shutter/gain cases through the same code may seem
+ // unnecessary, but it will make more sense when extend this to cover
+ // variable aperture.
+ double exposure_value = filtered_.total_exposure_no_dg;
+ double shutter_time, analogue_gain;
+ shutter_time = status_.fixed_shutter != 0.0
+ ? status_.fixed_shutter
+ : exposure_mode_->shutter[0];
+ analogue_gain = status_.fixed_analogue_gain != 0.0
+ ? status_.fixed_analogue_gain
+ : exposure_mode_->gain[0];
+ if (shutter_time * analogue_gain < exposure_value) {
+ for (unsigned int stage = 1;
+ stage < exposure_mode_->gain.size(); stage++) {
+ if (status_.fixed_shutter == 0.0) {
+ if (exposure_mode_->shutter[stage] *
+ analogue_gain >=
+ exposure_value) {
+ shutter_time =
+ exposure_value / analogue_gain;
+ break;
+ }
+ shutter_time = exposure_mode_->shutter[stage];
+ }
+ if (status_.fixed_analogue_gain == 0.0) {
+ if (exposure_mode_->gain[stage] *
+ shutter_time >=
+ exposure_value) {
+ analogue_gain =
+ exposure_value / shutter_time;
+ break;
+ }
+ analogue_gain = exposure_mode_->gain[stage];
+ }
+ }
+ }
+ RPI_LOG("Divided up shutter and gain are " << shutter_time << " and "
+ << analogue_gain);
+ // Finally adjust shutter time for flicker avoidance (require both
+ // shutter and gain not to be fixed).
+ if (status_.fixed_shutter == 0.0 &&
+ status_.fixed_analogue_gain == 0.0 &&
+ status_.flicker_period != 0.0) {
+ int flicker_periods = shutter_time / status_.flicker_period;
+ if (flicker_periods > 0) {
+ double new_shutter_time = flicker_periods * status_.flicker_period;
+ analogue_gain *= shutter_time / new_shutter_time;
+ // We should still not allow the ag to go over the
+ // largest value in the exposure mode. Note that this
+ // may force more of the total exposure into the digital
+ // gain as a side-effect.
+ analogue_gain = std::min(analogue_gain,
+ exposure_mode_->gain.back());
+ shutter_time = new_shutter_time;
+ }
+ RPI_LOG("After flicker avoidance, shutter "
+ << shutter_time << " gain " << analogue_gain);
+ }
+ filtered_.shutter = shutter_time;
+ filtered_.analogue_gain = analogue_gain;
+}
+
+void Agc::writeAndFinish(Metadata *image_metadata, bool desaturate)
+{
+ status_.total_exposure_value = filtered_.total_exposure;
+ status_.target_exposure_value = desaturate ? 0 : target_.total_exposure_no_dg;
+ status_.shutter_time = filtered_.shutter;
+ status_.analogue_gain = filtered_.analogue_gain;
+ {
+ std::unique_lock<std::mutex> lock(output_mutex_);
+ output_status_ = status_;
+ }
+ // Write to metadata as well, in case anyone wants to update the camera
+ // immediately.
+ image_metadata->Set("agc.status", status_);
+ RPI_LOG("Output written, total exposure requested is "
+ << filtered_.total_exposure);
+ RPI_LOG("Camera exposure update: shutter time " << filtered_.shutter <<
+ " analogue gain " << filtered_.analogue_gain);
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Agc(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/agc.hpp b/src/ipa/raspberrypi/controller/rpi/agc.hpp
new file mode 100644
index 00000000..dbcefba6
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/agc.hpp
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * agc.hpp - AGC/AEC control algorithm
+ */
+#pragma once
+
+#include <vector>
+#include <mutex>
+
+#include "../agc_algorithm.hpp"
+#include "../agc_status.h"
+#include "../pwl.hpp"
+
+// This is our implementation of AGC.
+
+// This is the number actually set up by the firmware, not the maximum possible
+// number (which is 16).
+
+#define AGC_STATS_SIZE 15
+
+namespace RPi {
+
+struct AgcMeteringMode {
+ double weights[AGC_STATS_SIZE];
+ void Read(boost::property_tree::ptree const &params);
+};
+
+struct AgcExposureMode {
+ std::vector<double> shutter;
+ std::vector<double> gain;
+ void Read(boost::property_tree::ptree const &params);
+};
+
+struct AgcConstraint {
+ enum class Bound { LOWER = 0, UPPER = 1 };
+ Bound bound;
+ double q_lo;
+ double q_hi;
+ Pwl Y_target;
+ void Read(boost::property_tree::ptree const &params);
+};
+
+typedef std::vector<AgcConstraint> AgcConstraintMode;
+
+struct AgcConfig {
+ void Read(boost::property_tree::ptree const &params);
+ std::map<std::string, AgcMeteringMode> metering_modes;
+ std::map<std::string, AgcExposureMode> exposure_modes;
+ std::map<std::string, AgcConstraintMode> constraint_modes;
+ Pwl Y_target;
+ double speed;
+ uint16_t startup_frames;
+ double max_change;
+ double min_change;
+ double fast_reduce_threshold;
+ double speed_up_threshold;
+ std::string default_metering_mode;
+ std::string default_exposure_mode;
+ std::string default_constraint_mode;
+ double base_ev;
+};
+
+class Agc : public AgcAlgorithm
+{
+public:
+ Agc(Controller *controller);
+ char const *Name() const override;
+ void Read(boost::property_tree::ptree const &params) override;
+ void SetEv(double ev) override;
+ void SetFlickerPeriod(double flicker_period) override;
+ void SetFixedShutter(double fixed_shutter) override; // microseconds
+ void SetFixedAnalogueGain(double fixed_analogue_gain) override;
+ void SetMeteringMode(std::string const &metering_mode_name) override;
+ void SetExposureMode(std::string const &exposure_mode_name) override;
+ void SetConstraintMode(std::string const &contraint_mode_name) override;
+ void Prepare(Metadata *image_metadata) override;
+ void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
+
+private:
+ AgcConfig config_;
+ void housekeepConfig();
+ void fetchCurrentExposure(Metadata *image_metadata);
+ void computeGain(bcm2835_isp_stats *statistics, Metadata *image_metadata,
+ double &gain, double &target_Y);
+ void computeTargetExposure(double gain);
+ bool applyDigitalGain(Metadata *image_metadata, double gain,
+ double target_Y);
+ void filterExposure(bool desaturate);
+ void divvyupExposure();
+ void writeAndFinish(Metadata *image_metadata, bool desaturate);
+ AgcMeteringMode *metering_mode_;
+ AgcExposureMode *exposure_mode_;
+ AgcConstraintMode *constraint_mode_;
+ uint64_t frame_count_;
+ struct ExposureValues {
+ ExposureValues() : shutter(0), analogue_gain(0),
+ total_exposure(0), total_exposure_no_dg(0) {}
+ double shutter;
+ double analogue_gain;
+ double total_exposure;
+ double total_exposure_no_dg; // without digital gain
+ };
+ ExposureValues current_; // values for the current frame
+ ExposureValues target_; // calculate the values we want here
+ ExposureValues filtered_; // these values are filtered towards target
+ AgcStatus status_; // to "latch" settings so they can't change
+ AgcStatus output_status_; // the status we will write out
+ std::mutex output_mutex_;
+ int lock_count_;
+ // Below here the "settings" that applications can change.
+ std::mutex settings_mutex_;
+ std::string metering_mode_name_;
+ std::string exposure_mode_name_;
+ std::string constraint_mode_name_;
+ double ev_;
+ double flicker_period_;
+ double fixed_shutter_;
+ double fixed_analogue_gain_;
+};
+
+} // namespace RPi
diff --git a/src/ipa/raspberrypi/controller/rpi/alsc.cpp b/src/ipa/raspberrypi/controller/rpi/alsc.cpp
new file mode 100644
index 00000000..821a0ca3
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/alsc.cpp
@@ -0,0 +1,705 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * alsc.cpp - ALSC (auto lens shading correction) control algorithm
+ */
+#include <math.h>
+
+#include "../awb_status.h"
+#include "alsc.hpp"
+
+// Raspberry Pi ALSC (Auto Lens Shading Correction) algorithm.
+
+using namespace RPi;
+
+#define NAME "rpi.alsc"
+
+static const int X = ALSC_CELLS_X;
+static const int Y = ALSC_CELLS_Y;
+static const int XY = X * Y;
+static const double INSUFFICIENT_DATA = -1.0;
+
+Alsc::Alsc(Controller *controller)
+ : Algorithm(controller)
+{
+ async_abort_ = async_start_ = async_started_ = async_finished_ = false;
+ async_thread_ = std::thread(std::bind(&Alsc::asyncFunc, this));
+}
+
+Alsc::~Alsc()
+{
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ async_abort_ = true;
+ async_signal_.notify_one();
+ }
+ async_thread_.join();
+}
+
+char const *Alsc::Name() const
+{
+ return NAME;
+}
+
+static void generate_lut(double *lut, boost::property_tree::ptree const &params)
+{
+ double cstrength = params.get<double>("corner_strength", 2.0);
+ if (cstrength <= 1.0)
+ throw std::runtime_error("Alsc: corner_strength must be > 1.0");
+ double asymmetry = params.get<double>("asymmetry", 1.0);
+ if (asymmetry < 0)
+ throw std::runtime_error("Alsc: asymmetry must be >= 0");
+ double f1 = cstrength - 1, f2 = 1 + sqrt(cstrength);
+ double R2 = X * Y / 4 * (1 + asymmetry * asymmetry);
+ int num = 0;
+ for (int y = 0; y < Y; y++) {
+ for (int x = 0; x < X; x++) {
+ double dy = y - Y / 2 + 0.5,
+ dx = (x - X / 2 + 0.5) * asymmetry;
+ double r2 = (dx * dx + dy * dy) / R2;
+ lut[num++] =
+ (f1 * r2 + f2) * (f1 * r2 + f2) /
+ (f2 * f2); // this reproduces the cos^4 rule
+ }
+ }
+}
+
+static void read_lut(double *lut, boost::property_tree::ptree const &params)
+{
+ int num = 0;
+ const int max_num = XY;
+ for (auto &p : params) {
+ if (num == max_num)
+ throw std::runtime_error(
+ "Alsc: too many entries in LSC table");
+ lut[num++] = p.second.get_value<double>();
+ }
+ if (num < max_num)
+ throw std::runtime_error("Alsc: too few entries in LSC table");
+}
+
+static void read_calibrations(std::vector<AlscCalibration> &calibrations,
+ boost::property_tree::ptree const &params,
+ std::string const &name)
+{
+ if (params.get_child_optional(name)) {
+ double last_ct = 0;
+ for (auto &p : params.get_child(name)) {
+ double ct = p.second.get<double>("ct");
+ if (ct <= last_ct)
+ throw std::runtime_error(
+ "Alsc: entries in " + name +
+ " must be in increasing ct order");
+ AlscCalibration calibration;
+ calibration.ct = last_ct = ct;
+ boost::property_tree::ptree const &table =
+ p.second.get_child("table");
+ int num = 0;
+ for (auto it = table.begin(); it != table.end(); it++) {
+ if (num == XY)
+ throw std::runtime_error(
+ "Alsc: too many values for ct " +
+ std::to_string(ct) + " in " +
+ name);
+ calibration.table[num++] =
+ it->second.get_value<double>();
+ }
+ if (num != XY)
+ throw std::runtime_error(
+ "Alsc: too few values for ct " +
+ std::to_string(ct) + " in " + name);
+ calibrations.push_back(calibration);
+ RPI_LOG("Read " << name << " calibration for ct "
+ << ct);
+ }
+ }
+}
+
+void Alsc::Read(boost::property_tree::ptree const &params)
+{
+ RPI_LOG("Alsc");
+ config_.frame_period = params.get<uint16_t>("frame_period", 12);
+ config_.startup_frames = params.get<uint16_t>("startup_frames", 10);
+ config_.speed = params.get<double>("speed", 0.05);
+ double sigma = params.get<double>("sigma", 0.01);
+ config_.sigma_Cr = params.get<double>("sigma_Cr", sigma);
+ config_.sigma_Cb = params.get<double>("sigma_Cb", sigma);
+ config_.min_count = params.get<double>("min_count", 10.0);
+ config_.min_G = params.get<uint16_t>("min_G", 50);
+ config_.omega = params.get<double>("omega", 1.3);
+ config_.n_iter = params.get<uint32_t>("n_iter", X + Y);
+ config_.luminance_strength =
+ params.get<double>("luminance_strength", 1.0);
+ for (int i = 0; i < XY; i++)
+ config_.luminance_lut[i] = 1.0;
+ if (params.get_child_optional("corner_strength"))
+ generate_lut(config_.luminance_lut, params);
+ else if (params.get_child_optional("luminance_lut"))
+ read_lut(config_.luminance_lut,
+ params.get_child("luminance_lut"));
+ else
+ RPI_WARN("Alsc: no luminance table - assume unity everywhere");
+ read_calibrations(config_.calibrations_Cr, params, "calibrations_Cr");
+ read_calibrations(config_.calibrations_Cb, params, "calibrations_Cb");
+ config_.default_ct = params.get<double>("default_ct", 4500.0);
+ config_.threshold = params.get<double>("threshold", 1e-3);
+}
+
+static void get_cal_table(double ct,
+ std::vector<AlscCalibration> const &calibrations,
+ double cal_table[XY]);
+static void resample_cal_table(double const cal_table_in[XY],
+ CameraMode const &camera_mode,
+ double cal_table_out[XY]);
+static void compensate_lambdas_for_cal(double const cal_table[XY],
+ double const old_lambdas[XY],
+ double new_lambdas[XY]);
+static void add_luminance_to_tables(double results[3][Y][X],
+ double const lambda_r[XY], double lambda_g,
+ double const lambda_b[XY],
+ double const luminance_lut[XY],
+ double luminance_strength);
+
+void Alsc::Initialise()
+{
+ RPI_LOG("Alsc");
+ frame_count2_ = frame_count_ = frame_phase_ = 0;
+ first_time_ = true;
+ // Initialise the lambdas. Each call to Process then restarts from the
+ // previous results. Also initialise the previous frame tables to the
+ // same harmless values.
+ for (int i = 0; i < XY; i++)
+ lambda_r_[i] = lambda_b_[i] = 1.0;
+}
+
+void Alsc::SwitchMode(CameraMode const &camera_mode)
+{
+ // There's a bit of a question what we should do if the "crop" of the
+ // camera mode has changed. Any calculation currently in flight would
+ // not be useful to the new mode, so arguably we should abort it, and
+ // generate a new table (like the "first_time" code already here). When
+ // the crop doesn't change, we can presumably just leave things
+ // alone. For now, I think we'll just wait and see. When the crop does
+ // change, any effects should be transient, and if they're not transient
+ // enough, we'll revisit the question then.
+ camera_mode_ = camera_mode;
+ if (first_time_) {
+ // On the first time, arrange for something sensible in the
+ // initial tables. Construct the tables for some default colour
+ // temperature. This echoes the code in doAlsc, without the
+ // adaptive algorithm.
+ double cal_table_r[XY], cal_table_b[XY], cal_table_tmp[XY];
+ get_cal_table(4000, config_.calibrations_Cr, cal_table_tmp);
+ resample_cal_table(cal_table_tmp, camera_mode_, cal_table_r);
+ get_cal_table(4000, config_.calibrations_Cb, cal_table_tmp);
+ resample_cal_table(cal_table_tmp, camera_mode_, cal_table_b);
+ compensate_lambdas_for_cal(cal_table_r, lambda_r_,
+ async_lambda_r_);
+ compensate_lambdas_for_cal(cal_table_b, lambda_b_,
+ async_lambda_b_);
+ add_luminance_to_tables(sync_results_, async_lambda_r_, 1.0,
+ async_lambda_b_, config_.luminance_lut,
+ config_.luminance_strength);
+ memcpy(prev_sync_results_, sync_results_,
+ sizeof(prev_sync_results_));
+ first_time_ = false;
+ }
+}
+
+void Alsc::fetchAsyncResults()
+{
+ RPI_LOG("Fetch ALSC results");
+ async_finished_ = false;
+ async_started_ = false;
+ memcpy(sync_results_, async_results_, sizeof(sync_results_));
+}
+
+static double get_ct(Metadata *metadata, double default_ct)
+{
+ AwbStatus awb_status;
+ awb_status.temperature_K = default_ct; // in case nothing found
+ if (metadata->Get("awb.status", awb_status) != 0)
+ RPI_WARN("Alsc: no AWB results found, using "
+ << awb_status.temperature_K);
+ else
+ RPI_LOG("Alsc: AWB results found, using "
+ << awb_status.temperature_K);
+ return awb_status.temperature_K;
+}
+
+static void copy_stats(bcm2835_isp_stats_region regions[XY], StatisticsPtr &stats,
+ AlscStatus const &status)
+{
+ bcm2835_isp_stats_region *input_regions = stats->awb_stats;
+ double *r_table = (double *)status.r;
+ double *g_table = (double *)status.g;
+ double *b_table = (double *)status.b;
+ for (int i = 0; i < XY; i++) {
+ regions[i].r_sum = input_regions[i].r_sum / r_table[i];
+ regions[i].g_sum = input_regions[i].g_sum / g_table[i];
+ regions[i].b_sum = input_regions[i].b_sum / b_table[i];
+ regions[i].counted = input_regions[i].counted;
+ // (don't care about the uncounted value)
+ }
+}
+
+void Alsc::restartAsync(StatisticsPtr &stats, Metadata *image_metadata)
+{
+ RPI_LOG("Starting ALSC thread");
+ // Get the current colour temperature. It's all we need from the
+ // metadata.
+ ct_ = get_ct(image_metadata, config_.default_ct);
+ // We have to copy the statistics here, dividing out our best guess of
+ // the LSC table that the pipeline applied to them.
+ AlscStatus alsc_status;
+ if (image_metadata->Get("alsc.status", alsc_status) != 0) {
+ RPI_WARN("No ALSC status found for applied gains!");
+ for (int y = 0; y < Y; y++)
+ for (int x = 0; x < X; x++) {
+ alsc_status.r[y][x] = 1.0;
+ alsc_status.g[y][x] = 1.0;
+ alsc_status.b[y][x] = 1.0;
+ }
+ }
+ copy_stats(statistics_, stats, alsc_status);
+ frame_phase_ = 0;
+ // copy the camera mode so it won't change during the calculations
+ async_camera_mode_ = camera_mode_;
+ async_start_ = true;
+ async_started_ = true;
+ async_signal_.notify_one();
+}
+
+void Alsc::Prepare(Metadata *image_metadata)
+{
+ // Count frames since we started, and since we last poked the async
+ // thread.
+ if (frame_count_ < (int)config_.startup_frames)
+ frame_count_++;
+ double speed = frame_count_ < (int)config_.startup_frames
+ ? 1.0
+ : config_.speed;
+ RPI_LOG("Alsc: frame_count " << frame_count_ << " speed " << speed);
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ if (async_started_ && async_finished_) {
+ RPI_LOG("ALSC thread finished");
+ fetchAsyncResults();
+ }
+ }
+ // Apply IIR filter to results and program into the pipeline.
+ double *ptr = (double *)sync_results_,
+ *pptr = (double *)prev_sync_results_;
+ for (unsigned int i = 0;
+ i < sizeof(sync_results_) / sizeof(double); i++)
+ pptr[i] = speed * ptr[i] + (1.0 - speed) * pptr[i];
+ // Put output values into status metadata.
+ AlscStatus status;
+ memcpy(status.r, prev_sync_results_[0], sizeof(status.r));
+ memcpy(status.g, prev_sync_results_[1], sizeof(status.g));
+ memcpy(status.b, prev_sync_results_[2], sizeof(status.b));
+ image_metadata->Set("alsc.status", status);
+}
+
+void Alsc::Process(StatisticsPtr &stats, Metadata *image_metadata)
+{
+ // Count frames since we started, and since we last poked the async
+ // thread.
+ if (frame_phase_ < (int)config_.frame_period)
+ frame_phase_++;
+ if (frame_count2_ < (int)config_.startup_frames)
+ frame_count2_++;
+ RPI_LOG("Alsc: frame_phase " << frame_phase_);
+ if (frame_phase_ >= (int)config_.frame_period ||
+ frame_count2_ < (int)config_.startup_frames) {
+ std::unique_lock<std::mutex> lock(mutex_);
+ if (async_started_ == false) {
+ RPI_LOG("ALSC thread starting");
+ restartAsync(stats, image_metadata);
+ }
+ }
+}
+
+void Alsc::asyncFunc()
+{
+ while (true) {
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ async_signal_.wait(lock, [&] {
+ return async_start_ || async_abort_;
+ });
+ async_start_ = false;
+ if (async_abort_)
+ break;
+ }
+ doAlsc();
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ async_finished_ = true;
+ sync_signal_.notify_one();
+ }
+ }
+}
+
+void get_cal_table(double ct, std::vector<AlscCalibration> const &calibrations,
+ double cal_table[XY])
+{
+ if (calibrations.empty()) {
+ for (int i = 0; i < XY; i++)
+ cal_table[i] = 1.0;
+ RPI_LOG("Alsc: no calibrations found");
+ } else if (ct <= calibrations.front().ct) {
+ memcpy(cal_table, calibrations.front().table,
+ XY * sizeof(double));
+ RPI_LOG("Alsc: using calibration for "
+ << calibrations.front().ct);
+ } else if (ct >= calibrations.back().ct) {
+ memcpy(cal_table, calibrations.back().table,
+ XY * sizeof(double));
+ RPI_LOG("Alsc: using calibration for "
+ << calibrations.front().ct);
+ } else {
+ int idx = 0;
+ while (ct > calibrations[idx + 1].ct)
+ idx++;
+ double ct0 = calibrations[idx].ct,
+ ct1 = calibrations[idx + 1].ct;
+ RPI_LOG("Alsc: ct is " << ct << ", interpolating between "
+ << ct0 << " and " << ct1);
+ for (int i = 0; i < XY; i++)
+ cal_table[i] =
+ (calibrations[idx].table[i] * (ct1 - ct) +
+ calibrations[idx + 1].table[i] * (ct - ct0)) /
+ (ct1 - ct0);
+ }
+}
+
+void resample_cal_table(double const cal_table_in[XY],
+ CameraMode const &camera_mode, double cal_table_out[XY])
+{
+ // Precalculate and cache the x sampling locations and phases to save
+ // recomputing them on every row.
+ int x_lo[X], x_hi[X];
+ double xf[X];
+ double scale_x = camera_mode.sensor_width /
+ (camera_mode.width * camera_mode.scale_x);
+ double x_off = camera_mode.crop_x / (double)camera_mode.sensor_width;
+ double x = .5 / scale_x + x_off * X - .5;
+ double x_inc = 1 / scale_x;
+ for (int i = 0; i < X; i++, x += x_inc) {
+ x_lo[i] = floor(x);
+ xf[i] = x - x_lo[i];
+ x_hi[i] = std::min(x_lo[i] + 1, X - 1);
+ x_lo[i] = std::max(x_lo[i], 0);
+ }
+ // Now march over the output table generating the new values.
+ double scale_y = camera_mode.sensor_height /
+ (camera_mode.height * camera_mode.scale_y);
+ double y_off = camera_mode.crop_y / (double)camera_mode.sensor_height;
+ double y = .5 / scale_y + y_off * Y - .5;
+ double y_inc = 1 / scale_y;
+ for (int j = 0; j < Y; j++, y += y_inc) {
+ int y_lo = floor(y);
+ double yf = y - y_lo;
+ int y_hi = std::min(y_lo + 1, Y - 1);
+ y_lo = std::max(y_lo, 0);
+ double const *row_above = cal_table_in + X * y_lo;
+ double const *row_below = cal_table_in + X * y_hi;
+ for (int i = 0; i < X; i++) {
+ double above = row_above[x_lo[i]] * (1 - xf[i]) +
+ row_above[x_hi[i]] * xf[i];
+ double below = row_below[x_lo[i]] * (1 - xf[i]) +
+ row_below[x_hi[i]] * xf[i];
+ *(cal_table_out++) = above * (1 - yf) + below * yf;
+ }
+ }
+}
+
+// Calculate chrominance statistics (R/G and B/G) for each region.
+static_assert(XY == AWB_REGIONS, "ALSC/AWB statistics region mismatch");
+static void calculate_Cr_Cb(bcm2835_isp_stats_region *awb_region, double Cr[XY],
+ double Cb[XY], uint32_t min_count, uint16_t min_G)
+{
+ for (int i = 0; i < XY; i++) {
+ bcm2835_isp_stats_region &zone = awb_region[i];
+ if (zone.counted <= min_count ||
+ zone.g_sum / zone.counted <= min_G) {
+ Cr[i] = Cb[i] = INSUFFICIENT_DATA;
+ continue;
+ }
+ Cr[i] = zone.r_sum / (double)zone.g_sum;
+ Cb[i] = zone.b_sum / (double)zone.g_sum;
+ }
+}
+
+static void apply_cal_table(double const cal_table[XY], double C[XY])
+{
+ for (int i = 0; i < XY; i++)
+ if (C[i] != INSUFFICIENT_DATA)
+ C[i] *= cal_table[i];
+}
+
+void compensate_lambdas_for_cal(double const cal_table[XY],
+ double const old_lambdas[XY],
+ double new_lambdas[XY])
+{
+ double min_new_lambda = std::numeric_limits<double>::max();
+ for (int i = 0; i < XY; i++) {
+ new_lambdas[i] = old_lambdas[i] * cal_table[i];
+ min_new_lambda = std::min(min_new_lambda, new_lambdas[i]);
+ }
+ for (int i = 0; i < XY; i++)
+ new_lambdas[i] /= min_new_lambda;
+}
+
+static void print_cal_table(double const C[XY])
+{
+ printf("table: [\n");
+ for (int j = 0; j < Y; j++) {
+ for (int i = 0; i < X; i++) {
+ printf("%5.3f", 1.0 / C[j * X + i]);
+ if (i != X - 1 || j != Y - 1)
+ printf(",");
+ }
+ printf("\n");
+ }
+ printf("]\n");
+}
+
+// Compute weight out of 1.0 which reflects how similar we wish to make the
+// colours of these two regions.
+static double compute_weight(double C_i, double C_j, double sigma)
+{
+ if (C_i == INSUFFICIENT_DATA || C_j == INSUFFICIENT_DATA)
+ return 0;
+ double diff = (C_i - C_j) / sigma;
+ return exp(-diff * diff / 2);
+}
+
+// Compute all weights.
+static void compute_W(double const C[XY], double sigma, double W[XY][4])
+{
+ for (int i = 0; i < XY; i++) {
+ // Start with neighbour above and go clockwise.
+ W[i][0] = i >= X ? compute_weight(C[i], C[i - X], sigma) : 0;
+ W[i][1] = i % X < X - 1 ? compute_weight(C[i], C[i + 1], sigma)
+ : 0;
+ W[i][2] =
+ i < XY - X ? compute_weight(C[i], C[i + X], sigma) : 0;
+ W[i][3] = i % X ? compute_weight(C[i], C[i - 1], sigma) : 0;
+ }
+}
+
+// Compute M, the large but sparse matrix such that M * lambdas = 0.
+static void construct_M(double const C[XY], double const W[XY][4],
+ double M[XY][4])
+{
+ double epsilon = 0.001;
+ for (int i = 0; i < XY; i++) {
+ // Note how, if C[i] == INSUFFICIENT_DATA, the weights will all
+ // be zero so the equation is still set up correctly.
+ int m = !!(i >= X) + !!(i % X < X - 1) + !!(i < XY - X) +
+ !!(i % X); // total number of neighbours
+ // we'll divide the diagonal out straight away
+ double diagonal =
+ (epsilon + W[i][0] + W[i][1] + W[i][2] + W[i][3]) *
+ C[i];
+ M[i][0] = i >= X ? (W[i][0] * C[i - X] + epsilon / m * C[i]) /
+ diagonal
+ : 0;
+ M[i][1] = i % X < X - 1
+ ? (W[i][1] * C[i + 1] + epsilon / m * C[i]) /
+ diagonal
+ : 0;
+ M[i][2] = i < XY - X
+ ? (W[i][2] * C[i + X] + epsilon / m * C[i]) /
+ diagonal
+ : 0;
+ M[i][3] = i % X ? (W[i][3] * C[i - 1] + epsilon / m * C[i]) /
+ diagonal
+ : 0;
+ }
+}
+
+// In the compute_lambda_ functions, note that the matrix coefficients for the
+// left/right neighbours are zero down the left/right edges, so we don't need
+// need to test the i value to exclude them.
+static double compute_lambda_bottom(int i, double const M[XY][4],
+ double lambda[XY])
+{
+ return M[i][1] * lambda[i + 1] + M[i][2] * lambda[i + X] +
+ M[i][3] * lambda[i - 1];
+}
+static double compute_lambda_bottom_start(int i, double const M[XY][4],
+ double lambda[XY])
+{
+ return M[i][1] * lambda[i + 1] + M[i][2] * lambda[i + X];
+}
+static double compute_lambda_interior(int i, double const M[XY][4],
+ double lambda[XY])
+{
+ return M[i][0] * lambda[i - X] + M[i][1] * lambda[i + 1] +
+ M[i][2] * lambda[i + X] + M[i][3] * lambda[i - 1];
+}
+static double compute_lambda_top(int i, double const M[XY][4],
+ double lambda[XY])
+{
+ return M[i][0] * lambda[i - X] + M[i][1] * lambda[i + 1] +
+ M[i][3] * lambda[i - 1];
+}
+static double compute_lambda_top_end(int i, double const M[XY][4],
+ double lambda[XY])
+{
+ return M[i][0] * lambda[i - X] + M[i][3] * lambda[i - 1];
+}
+
+// Gauss-Seidel iteration with over-relaxation.
+static double gauss_seidel2_SOR(double const M[XY][4], double omega,
+ double lambda[XY])
+{
+ double old_lambda[XY];
+ for (int i = 0; i < XY; i++)
+ old_lambda[i] = lambda[i];
+ int i;
+ lambda[0] = compute_lambda_bottom_start(0, M, lambda);
+ for (i = 1; i < X; i++)
+ lambda[i] = compute_lambda_bottom(i, M, lambda);
+ for (; i < XY - X; i++)
+ lambda[i] = compute_lambda_interior(i, M, lambda);
+ for (; i < XY - 1; i++)
+ lambda[i] = compute_lambda_top(i, M, lambda);
+ lambda[i] = compute_lambda_top_end(i, M, lambda);
+ // Also solve the system from bottom to top, to help spread the updates
+ // better.
+ lambda[i] = compute_lambda_top_end(i, M, lambda);
+ for (i = XY - 2; i >= XY - X; i--)
+ lambda[i] = compute_lambda_top(i, M, lambda);
+ for (; i >= X; i--)
+ lambda[i] = compute_lambda_interior(i, M, lambda);
+ for (; i >= 1; i--)
+ lambda[i] = compute_lambda_bottom(i, M, lambda);
+ lambda[0] = compute_lambda_bottom_start(0, M, lambda);
+ double max_diff = 0;
+ for (int i = 0; i < XY; i++) {
+ lambda[i] = old_lambda[i] + (lambda[i] - old_lambda[i]) * omega;
+ if (fabs(lambda[i] - old_lambda[i]) > fabs(max_diff))
+ max_diff = lambda[i] - old_lambda[i];
+ }
+ return max_diff;
+}
+
+// Normalise the values so that the smallest value is 1.
+static void normalise(double *ptr, size_t n)
+{
+ double minval = ptr[0];
+ for (size_t i = 1; i < n; i++)
+ minval = std::min(minval, ptr[i]);
+ for (size_t i = 0; i < n; i++)
+ ptr[i] /= minval;
+}
+
+static void run_matrix_iterations(double const C[XY], double lambda[XY],
+ double const W[XY][4], double omega,
+ int n_iter, double threshold)
+{
+ double M[XY][4];
+ construct_M(C, W, M);
+ double last_max_diff = std::numeric_limits<double>::max();
+ for (int i = 0; i < n_iter; i++) {
+ double max_diff = fabs(gauss_seidel2_SOR(M, omega, lambda));
+ if (max_diff < threshold) {
+ RPI_LOG("Stop after " << i + 1 << " iterations");
+ break;
+ }
+ // this happens very occasionally (so make a note), though
+ // doesn't seem to matter
+ if (max_diff > last_max_diff)
+ RPI_LOG("Iteration " << i << ": max_diff gone up "
+ << last_max_diff << " to "
+ << max_diff);
+ last_max_diff = max_diff;
+ }
+ // We're going to normalise the lambdas so the smallest is 1. Not sure
+ // this is really necessary as they get renormalised later, but I
+ // suppose it does stop these quantities from wandering off...
+ normalise(lambda, XY);
+}
+
+static void add_luminance_rb(double result[XY], double const lambda[XY],
+ double const luminance_lut[XY],
+ double luminance_strength)
+{
+ for (int i = 0; i < XY; i++)
+ result[i] = lambda[i] *
+ ((luminance_lut[i] - 1) * luminance_strength + 1);
+}
+
+static void add_luminance_g(double result[XY], double lambda,
+ double const luminance_lut[XY],
+ double luminance_strength)
+{
+ for (int i = 0; i < XY; i++)
+ result[i] = lambda *
+ ((luminance_lut[i] - 1) * luminance_strength + 1);
+}
+
+void add_luminance_to_tables(double results[3][Y][X], double const lambda_r[XY],
+ double lambda_g, double const lambda_b[XY],
+ double const luminance_lut[XY],
+ double luminance_strength)
+{
+ add_luminance_rb((double *)results[0], lambda_r, luminance_lut,
+ luminance_strength);
+ add_luminance_g((double *)results[1], lambda_g, luminance_lut,
+ luminance_strength);
+ add_luminance_rb((double *)results[2], lambda_b, luminance_lut,
+ luminance_strength);
+ normalise((double *)results, 3 * XY);
+}
+
+void Alsc::doAlsc()
+{
+ double Cr[XY], Cb[XY], Wr[XY][4], Wb[XY][4], cal_table_r[XY],
+ cal_table_b[XY], cal_table_tmp[XY];
+ // Calculate our R/B ("Cr"/"Cb") colour statistics, and assess which are
+ // usable.
+ calculate_Cr_Cb(statistics_, Cr, Cb, config_.min_count, config_.min_G);
+ // Fetch the new calibrations (if any) for this CT. Resample them in
+ // case the camera mode is not full-frame.
+ get_cal_table(ct_, config_.calibrations_Cr, cal_table_tmp);
+ resample_cal_table(cal_table_tmp, async_camera_mode_, cal_table_r);
+ get_cal_table(ct_, config_.calibrations_Cb, cal_table_tmp);
+ resample_cal_table(cal_table_tmp, async_camera_mode_, cal_table_b);
+ // You could print out the cal tables for this image here, if you're
+ // tuning the algorithm...
+ (void)print_cal_table;
+ // Apply any calibration to the statistics, so the adaptive algorithm
+ // makes only the extra adjustments.
+ apply_cal_table(cal_table_r, Cr);
+ apply_cal_table(cal_table_b, Cb);
+ // Compute weights between zones.
+ compute_W(Cr, config_.sigma_Cr, Wr);
+ compute_W(Cb, config_.sigma_Cb, Wb);
+ // Run Gauss-Seidel iterations over the resulting matrix, for R and B.
+ run_matrix_iterations(Cr, lambda_r_, Wr, config_.omega, config_.n_iter,
+ config_.threshold);
+ run_matrix_iterations(Cb, lambda_b_, Wb, config_.omega, config_.n_iter,
+ config_.threshold);
+ // Fold the calibrated gains into our final lambda values. (Note that on
+ // the next run, we re-start with the lambda values that don't have the
+ // calibration gains included.)
+ compensate_lambdas_for_cal(cal_table_r, lambda_r_, async_lambda_r_);
+ compensate_lambdas_for_cal(cal_table_b, lambda_b_, async_lambda_b_);
+ // Fold in the luminance table at the appropriate strength.
+ add_luminance_to_tables(async_results_, async_lambda_r_, 1.0,
+ async_lambda_b_, config_.luminance_lut,
+ config_.luminance_strength);
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Alsc(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/alsc.hpp b/src/ipa/raspberrypi/controller/rpi/alsc.hpp
new file mode 100644
index 00000000..c8ed3d21
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/alsc.hpp
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * alsc.hpp - ALSC (auto lens shading correction) control algorithm
+ */
+#pragma once
+
+#include <mutex>
+#include <condition_variable>
+#include <thread>
+
+#include "../algorithm.hpp"
+#include "../alsc_status.h"
+
+namespace RPi {
+
+// Algorithm to generate automagic LSC (Lens Shading Correction) tables.
+
+struct AlscCalibration {
+ double ct;
+ double table[ALSC_CELLS_X * ALSC_CELLS_Y];
+};
+
+struct AlscConfig {
+ // Only repeat the ALSC calculation every "this many" frames
+ uint16_t frame_period;
+ // number of initial frames for which speed taken as 1.0 (maximum)
+ uint16_t startup_frames;
+ // IIR filter speed applied to algorithm results
+ double speed;
+ double sigma_Cr;
+ double sigma_Cb;
+ double min_count;
+ uint16_t min_G;
+ double omega;
+ uint32_t n_iter;
+ double luminance_lut[ALSC_CELLS_X * ALSC_CELLS_Y];
+ double luminance_strength;
+ std::vector<AlscCalibration> calibrations_Cr;
+ std::vector<AlscCalibration> calibrations_Cb;
+ double default_ct; // colour temperature if no metadata found
+ double threshold; // iteration termination threshold
+};
+
+class Alsc : public Algorithm
+{
+public:
+ Alsc(Controller *controller = NULL);
+ ~Alsc();
+ char const *Name() const override;
+ void Initialise() override;
+ void SwitchMode(CameraMode const &camera_mode) override;
+ void Read(boost::property_tree::ptree const &params) override;
+ void Prepare(Metadata *image_metadata) override;
+ void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
+
+private:
+ // configuration is read-only, and available to both threads
+ AlscConfig config_;
+ bool first_time_;
+ std::atomic<CameraMode> camera_mode_;
+ std::thread async_thread_;
+ void asyncFunc(); // asynchronous thread function
+ std::mutex mutex_;
+ CameraMode async_camera_mode_;
+ // condvar for async thread to wait on
+ std::condition_variable async_signal_;
+ // condvar for synchronous thread to wait on
+ std::condition_variable sync_signal_;
+ // for sync thread to check if async thread finished (requires mutex)
+ bool async_finished_;
+ // for async thread to check if it's been told to run (requires mutex)
+ bool async_start_;
+ // for async thread to check if it's been told to quit (requires mutex)
+ bool async_abort_;
+
+ // The following are only for the synchronous thread to use:
+ // for sync thread to note its has asked async thread to run
+ bool async_started_;
+ // counts up to frame_period before restarting the async thread
+ int frame_phase_;
+ // counts up to startup_frames
+ int frame_count_;
+ // counts up to startup_frames for Process method
+ int frame_count2_;
+ double sync_results_[3][ALSC_CELLS_Y][ALSC_CELLS_X];
+ double prev_sync_results_[3][ALSC_CELLS_Y][ALSC_CELLS_X];
+ // The following are for the asynchronous thread to use, though the main
+ // thread can set/reset them if the async thread is known to be idle:
+ void restartAsync(StatisticsPtr &stats, Metadata *image_metadata);
+ // copy out the results from the async thread so that it can be restarted
+ void fetchAsyncResults();
+ double ct_;
+ bcm2835_isp_stats_region statistics_[ALSC_CELLS_Y * ALSC_CELLS_X];
+ double async_results_[3][ALSC_CELLS_Y][ALSC_CELLS_X];
+ double async_lambda_r_[ALSC_CELLS_X * ALSC_CELLS_Y];
+ double async_lambda_b_[ALSC_CELLS_X * ALSC_CELLS_Y];
+ void doAlsc();
+ double lambda_r_[ALSC_CELLS_X * ALSC_CELLS_Y];
+ double lambda_b_[ALSC_CELLS_X * ALSC_CELLS_Y];
+};
+
+} // namespace RPi
diff --git a/src/ipa/raspberrypi/controller/rpi/awb.cpp b/src/ipa/raspberrypi/controller/rpi/awb.cpp
new file mode 100644
index 00000000..a58fa11d
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/awb.cpp
@@ -0,0 +1,608 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * awb.cpp - AWB control algorithm
+ */
+
+#include "../logging.hpp"
+#include "../lux_status.h"
+
+#include "awb.hpp"
+
+using namespace RPi;
+
+#define NAME "rpi.awb"
+
+#define AWB_STATS_SIZE_X DEFAULT_AWB_REGIONS_X
+#define AWB_STATS_SIZE_Y DEFAULT_AWB_REGIONS_Y
+
+const double Awb::RGB::INVALID = -1.0;
+
+void AwbMode::Read(boost::property_tree::ptree const &params)
+{
+ ct_lo = params.get<double>("lo");
+ ct_hi = params.get<double>("hi");
+}
+
+void AwbPrior::Read(boost::property_tree::ptree const &params)
+{
+ lux = params.get<double>("lux");
+ prior.Read(params.get_child("prior"));
+}
+
+static void read_ct_curve(Pwl &ct_r, Pwl &ct_b,
+ boost::property_tree::ptree const &params)
+{
+ int num = 0;
+ for (auto it = params.begin(); it != params.end(); it++) {
+ double ct = it->second.get_value<double>();
+ assert(it == params.begin() || ct != ct_r.Domain().end);
+ if (++it == params.end())
+ throw std::runtime_error(
+ "AwbConfig: incomplete CT curve entry");
+ ct_r.Append(ct, it->second.get_value<double>());
+ if (++it == params.end())
+ throw std::runtime_error(
+ "AwbConfig: incomplete CT curve entry");
+ ct_b.Append(ct, it->second.get_value<double>());
+ num++;
+ }
+ if (num < 2)
+ throw std::runtime_error(
+ "AwbConfig: insufficient points in CT curve");
+}
+
+void AwbConfig::Read(boost::property_tree::ptree const &params)
+{
+ RPI_LOG("AwbConfig");
+ bayes = params.get<int>("bayes", 1);
+ frame_period = params.get<uint16_t>("frame_period", 10);
+ startup_frames = params.get<uint16_t>("startup_frames", 10);
+ speed = params.get<double>("speed", 0.05);
+ if (params.get_child_optional("ct_curve"))
+ read_ct_curve(ct_r, ct_b, params.get_child("ct_curve"));
+ if (params.get_child_optional("priors")) {
+ for (auto &p : params.get_child("priors")) {
+ AwbPrior prior;
+ prior.Read(p.second);
+ if (!priors.empty() && prior.lux <= priors.back().lux)
+ throw std::runtime_error(
+ "AwbConfig: Prior must be ordered in increasing lux value");
+ priors.push_back(prior);
+ }
+ if (priors.empty())
+ throw std::runtime_error(
+ "AwbConfig: no AWB priors configured");
+ }
+ if (params.get_child_optional("modes")) {
+ for (auto &p : params.get_child("modes")) {
+ modes[p.first].Read(p.second);
+ if (default_mode == nullptr)
+ default_mode = &modes[p.first];
+ }
+ if (default_mode == nullptr)
+ throw std::runtime_error(
+ "AwbConfig: no AWB modes configured");
+ }
+ min_pixels = params.get<double>("min_pixels", 16.0);
+ min_G = params.get<uint16_t>("min_G", 32);
+ min_regions = params.get<uint32_t>("min_regions", 10);
+ delta_limit = params.get<double>("delta_limit", 0.2);
+ coarse_step = params.get<double>("coarse_step", 0.2);
+ transverse_pos = params.get<double>("transverse_pos", 0.01);
+ transverse_neg = params.get<double>("transverse_neg", 0.01);
+ if (transverse_pos <= 0 || transverse_neg <= 0)
+ throw std::runtime_error(
+ "AwbConfig: transverse_pos/neg must be > 0");
+ sensitivity_r = params.get<double>("sensitivity_r", 1.0);
+ sensitivity_b = params.get<double>("sensitivity_b", 1.0);
+ if (bayes) {
+ if (ct_r.Empty() || ct_b.Empty() || priors.empty() ||
+ default_mode == nullptr) {
+ RPI_WARN(
+ "Bayesian AWB mis-configured - switch to Grey method");
+ bayes = false;
+ }
+ }
+ fast = params.get<int>(
+ "fast", bayes); // default to fast for Bayesian, otherwise slow
+ whitepoint_r = params.get<double>("whitepoint_r", 0.0);
+ whitepoint_b = params.get<double>("whitepoint_b", 0.0);
+ if (bayes == false)
+ sensitivity_r = sensitivity_b =
+ 1.0; // nor do sensitivities make any sense
+}
+
+Awb::Awb(Controller *controller)
+ : AwbAlgorithm(controller)
+{
+ async_abort_ = async_start_ = async_started_ = async_finished_ = false;
+ mode_ = nullptr;
+ manual_r_ = manual_b_ = 0.0;
+ async_thread_ = std::thread(std::bind(&Awb::asyncFunc, this));
+}
+
+Awb::~Awb()
+{
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ async_abort_ = true;
+ async_signal_.notify_one();
+ }
+ async_thread_.join();
+}
+
+char const *Awb::Name() const
+{
+ return NAME;
+}
+
+void Awb::Read(boost::property_tree::ptree const &params)
+{
+ config_.Read(params);
+}
+
+void Awb::Initialise()
+{
+ frame_count2_ = frame_count_ = frame_phase_ = 0;
+ // Put something sane into the status that we are filtering towards,
+ // just in case the first few frames don't have anything meaningful in
+ // them.
+ if (!config_.ct_r.Empty() && !config_.ct_b.Empty()) {
+ sync_results_.temperature_K = config_.ct_r.Domain().Clip(4000);
+ sync_results_.gain_r =
+ 1.0 / config_.ct_r.Eval(sync_results_.temperature_K);
+ sync_results_.gain_g = 1.0;
+ sync_results_.gain_b =
+ 1.0 / config_.ct_b.Eval(sync_results_.temperature_K);
+ } else {
+ // random values just to stop the world blowing up
+ sync_results_.temperature_K = 4500;
+ sync_results_.gain_r = sync_results_.gain_g =
+ sync_results_.gain_b = 1.0;
+ }
+ prev_sync_results_ = sync_results_;
+}
+
+void Awb::SetMode(std::string const &mode_name)
+{
+ std::unique_lock<std::mutex> lock(settings_mutex_);
+ mode_name_ = mode_name;
+}
+
+void Awb::SetManualGains(double manual_r, double manual_b)
+{
+ std::unique_lock<std::mutex> lock(settings_mutex_);
+ // If any of these are 0.0, we swich back to auto.
+ manual_r_ = manual_r;
+ manual_b_ = manual_b;
+}
+
+void Awb::fetchAsyncResults()
+{
+ RPI_LOG("Fetch AWB results");
+ async_finished_ = false;
+ async_started_ = false;
+ sync_results_ = async_results_;
+}
+
+void Awb::restartAsync(StatisticsPtr &stats, std::string const &mode_name,
+ double lux)
+{
+ RPI_LOG("Starting AWB thread");
+ // this makes a new reference which belongs to the asynchronous thread
+ statistics_ = stats;
+ // store the mode as it could technically change
+ auto m = config_.modes.find(mode_name);
+ mode_ = m != config_.modes.end()
+ ? &m->second
+ : (mode_ == nullptr ? config_.default_mode : mode_);
+ lux_ = lux;
+ frame_phase_ = 0;
+ async_start_ = true;
+ async_started_ = true;
+ size_t len = mode_name.copy(async_results_.mode,
+ sizeof(async_results_.mode) - 1);
+ async_results_.mode[len] = '\0';
+ async_signal_.notify_one();
+}
+
+void Awb::Prepare(Metadata *image_metadata)
+{
+ if (frame_count_ < (int)config_.startup_frames)
+ frame_count_++;
+ double speed = frame_count_ < (int)config_.startup_frames
+ ? 1.0
+ : config_.speed;
+ RPI_LOG("Awb: frame_count " << frame_count_ << " speed " << speed);
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ if (async_started_ && async_finished_) {
+ RPI_LOG("AWB thread finished");
+ fetchAsyncResults();
+ }
+ }
+ // Finally apply IIR filter to results and put into metadata.
+ memcpy(prev_sync_results_.mode, sync_results_.mode,
+ sizeof(prev_sync_results_.mode));
+ prev_sync_results_.temperature_K =
+ speed * sync_results_.temperature_K +
+ (1.0 - speed) * prev_sync_results_.temperature_K;
+ prev_sync_results_.gain_r = speed * sync_results_.gain_r +
+ (1.0 - speed) * prev_sync_results_.gain_r;
+ prev_sync_results_.gain_g = speed * sync_results_.gain_g +
+ (1.0 - speed) * prev_sync_results_.gain_g;
+ prev_sync_results_.gain_b = speed * sync_results_.gain_b +
+ (1.0 - speed) * prev_sync_results_.gain_b;
+ image_metadata->Set("awb.status", prev_sync_results_);
+ RPI_LOG("Using AWB gains r " << prev_sync_results_.gain_r << " g "
+ << prev_sync_results_.gain_g << " b "
+ << prev_sync_results_.gain_b);
+}
+
+void Awb::Process(StatisticsPtr &stats, Metadata *image_metadata)
+{
+ // Count frames since we last poked the async thread.
+ if (frame_phase_ < (int)config_.frame_period)
+ frame_phase_++;
+ if (frame_count2_ < (int)config_.startup_frames)
+ frame_count2_++;
+ RPI_LOG("Awb: frame_phase " << frame_phase_);
+ if (frame_phase_ >= (int)config_.frame_period ||
+ frame_count2_ < (int)config_.startup_frames) {
+ // Update any settings and any image metadata that we need.
+ std::string mode_name;
+ {
+ std::unique_lock<std::mutex> lock(settings_mutex_);
+ mode_name = mode_name_;
+ }
+ struct LuxStatus lux_status = {};
+ lux_status.lux = 400; // in case no metadata
+ if (image_metadata->Get("lux.status", lux_status) != 0)
+ RPI_LOG("No lux metadata found");
+ RPI_LOG("Awb lux value is " << lux_status.lux);
+
+ std::unique_lock<std::mutex> lock(mutex_);
+ if (async_started_ == false) {
+ RPI_LOG("AWB thread starting");
+ restartAsync(stats, mode_name, lux_status.lux);
+ }
+ }
+}
+
+void Awb::asyncFunc()
+{
+ while (true) {
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ async_signal_.wait(lock, [&] {
+ return async_start_ || async_abort_;
+ });
+ async_start_ = false;
+ if (async_abort_)
+ break;
+ }
+ doAwb();
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ async_finished_ = true;
+ sync_signal_.notify_one();
+ }
+ }
+}
+
+static void generate_stats(std::vector<Awb::RGB> &zones,
+ bcm2835_isp_stats_region *stats, double min_pixels,
+ double min_G)
+{
+ for (int i = 0; i < AWB_STATS_SIZE_X * AWB_STATS_SIZE_Y; i++) {
+ Awb::RGB zone; // this is "invalid", unless R gets overwritten later
+ double counted = stats[i].counted;
+ if (counted >= min_pixels) {
+ zone.G = stats[i].g_sum / counted;
+ if (zone.G >= min_G) {
+ zone.R = stats[i].r_sum / counted;
+ zone.B = stats[i].b_sum / counted;
+ }
+ }
+ zones.push_back(zone);
+ }
+}
+
+void Awb::prepareStats()
+{
+ zones_.clear();
+ // LSC has already been applied to the stats in this pipeline, so stop
+ // any LSC compensation. We also ignore config_.fast in this version.
+ generate_stats(zones_, statistics_->awb_stats, config_.min_pixels,
+ config_.min_G);
+ // we're done with these; we may as well relinquish our hold on the
+ // pointer.
+ statistics_.reset();
+ // apply sensitivities, so values appear to come from our "canonical"
+ // sensor.
+ for (auto &zone : zones_)
+ zone.R *= config_.sensitivity_r,
+ zone.B *= config_.sensitivity_b;
+}
+
+double Awb::computeDelta2Sum(double gain_r, double gain_b)
+{
+ // Compute the sum of the squared colour error (non-greyness) as it
+ // appears in the log likelihood equation.
+ double delta2_sum = 0;
+ for (auto &z : zones_) {
+ double delta_r = gain_r * z.R - 1 - config_.whitepoint_r;
+ double delta_b = gain_b * z.B - 1 - config_.whitepoint_b;
+ double delta2 = delta_r * delta_r + delta_b * delta_b;
+ //RPI_LOG("delta_r " << delta_r << " delta_b " << delta_b << " delta2 " << delta2);
+ delta2 = std::min(delta2, config_.delta_limit);
+ delta2_sum += delta2;
+ }
+ return delta2_sum;
+}
+
+Pwl Awb::interpolatePrior()
+{
+ // Interpolate the prior log likelihood function for our current lux
+ // value.
+ if (lux_ <= config_.priors.front().lux)
+ return config_.priors.front().prior;
+ else if (lux_ >= config_.priors.back().lux)
+ return config_.priors.back().prior;
+ else {
+ int idx = 0;
+ // find which two we lie between
+ while (config_.priors[idx + 1].lux < lux_)
+ idx++;
+ double lux0 = config_.priors[idx].lux,
+ lux1 = config_.priors[idx + 1].lux;
+ return Pwl::Combine(config_.priors[idx].prior,
+ config_.priors[idx + 1].prior,
+ [&](double /*x*/, double y0, double y1) {
+ return y0 + (y1 - y0) *
+ (lux_ - lux0) / (lux1 - lux0);
+ });
+ }
+}
+
+static double interpolate_quadatric(Pwl::Point const &A, Pwl::Point const &B,
+ Pwl::Point const &C)
+{
+ // Given 3 points on a curve, find the extremum of the function in that
+ // interval by fitting a quadratic.
+ const double eps = 1e-3;
+ Pwl::Point CA = C - A, BA = B - A;
+ double denominator = 2 * (BA.y * CA.x - CA.y * BA.x);
+ if (abs(denominator) > eps) {
+ double numerator = BA.y * CA.x * CA.x - CA.y * BA.x * BA.x;
+ double result = numerator / denominator + A.x;
+ return std::max(A.x, std::min(C.x, result));
+ }
+ // has degenerated to straight line segment
+ return A.y < C.y - eps ? A.x : (C.y < A.y - eps ? C.x : B.x);
+}
+
+double Awb::coarseSearch(Pwl const &prior)
+{
+ points_.clear(); // assume doesn't deallocate memory
+ size_t best_point = 0;
+ double t = mode_->ct_lo;
+ int span_r = 0, span_b = 0;
+ // Step down the CT curve evaluating log likelihood.
+ while (true) {
+ double r = config_.ct_r.Eval(t, &span_r);
+ double b = config_.ct_b.Eval(t, &span_b);
+ double gain_r = 1 / r, gain_b = 1 / b;
+ double delta2_sum = computeDelta2Sum(gain_r, gain_b);
+ double prior_log_likelihood =
+ prior.Eval(prior.Domain().Clip(t));
+ double final_log_likelihood = delta2_sum - prior_log_likelihood;
+ RPI_LOG("t: " << t << " gain_r " << gain_r << " gain_b "
+ << gain_b << " delta2_sum " << delta2_sum
+ << " prior " << prior_log_likelihood << " final "
+ << final_log_likelihood);
+ points_.push_back(Pwl::Point(t, final_log_likelihood));
+ if (points_.back().y < points_[best_point].y)
+ best_point = points_.size() - 1;
+ if (t == mode_->ct_hi)
+ break;
+ // for even steps along the r/b curve scale them by the current t
+ t = std::min(t + t / 10 * config_.coarse_step,
+ mode_->ct_hi);
+ }
+ t = points_[best_point].x;
+ RPI_LOG("Coarse search found CT " << t);
+ // We have the best point of the search, but refine it with a quadratic
+ // interpolation around its neighbours.
+ if (points_.size() > 2) {
+ unsigned long bp = std::min(best_point, points_.size() - 2);
+ best_point = std::max(1UL, bp);
+ t = interpolate_quadatric(points_[best_point - 1],
+ points_[best_point],
+ points_[best_point + 1]);
+ RPI_LOG("After quadratic refinement, coarse search has CT "
+ << t);
+ }
+ return t;
+}
+
+void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior)
+{
+ int span_r, span_b;
+ config_.ct_r.Eval(t, &span_r);
+ config_.ct_b.Eval(t, &span_b);
+ double step = t / 10 * config_.coarse_step * 0.1;
+ int nsteps = 5;
+ double r_diff = config_.ct_r.Eval(t + nsteps * step, &span_r) -
+ config_.ct_r.Eval(t - nsteps * step, &span_r);
+ double b_diff = config_.ct_b.Eval(t + nsteps * step, &span_b) -
+ config_.ct_b.Eval(t - nsteps * step, &span_b);
+ Pwl::Point transverse(b_diff, -r_diff);
+ if (transverse.Len2() < 1e-6)
+ return;
+ // unit vector orthogonal to the b vs. r function (pointing outwards
+ // with r and b increasing)
+ transverse = transverse / transverse.Len();
+ double best_log_likelihood = 0, best_t = 0, best_r = 0, best_b = 0;
+ double transverse_range =
+ config_.transverse_neg + config_.transverse_pos;
+ const int MAX_NUM_DELTAS = 12;
+ // a transverse step approximately every 0.01 r/b units
+ int num_deltas = floor(transverse_range * 100 + 0.5) + 1;
+ num_deltas = num_deltas < 3 ? 3 :
+ (num_deltas > MAX_NUM_DELTAS ? MAX_NUM_DELTAS : num_deltas);
+ // Step down CT curve. March a bit further if the transverse range is
+ // large.
+ nsteps += num_deltas;
+ for (int i = -nsteps; i <= nsteps; i++) {
+ double t_test = t + i * step;
+ double prior_log_likelihood =
+ prior.Eval(prior.Domain().Clip(t_test));
+ double r_curve = config_.ct_r.Eval(t_test, &span_r);
+ double b_curve = config_.ct_b.Eval(t_test, &span_b);
+ // x will be distance off the curve, y the log likelihood there
+ Pwl::Point points[MAX_NUM_DELTAS];
+ int best_point = 0;
+ // Take some measurements transversely *off* the CT curve.
+ for (int j = 0; j < num_deltas; j++) {
+ points[j].x = -config_.transverse_neg +
+ (transverse_range * j) / (num_deltas - 1);
+ Pwl::Point rb_test = Pwl::Point(r_curve, b_curve) +
+ transverse * points[j].x;
+ double r_test = rb_test.x, b_test = rb_test.y;
+ double gain_r = 1 / r_test, gain_b = 1 / b_test;
+ double delta2_sum = computeDelta2Sum(gain_r, gain_b);
+ points[j].y = delta2_sum - prior_log_likelihood;
+ RPI_LOG("At t " << t_test << " r " << r_test << " b "
+ << b_test << ": " << points[j].y);
+ if (points[j].y < points[best_point].y)
+ best_point = j;
+ }
+ // We have NUM_DELTAS points transversely across the CT curve,
+ // now let's do a quadratic interpolation for the best result.
+ best_point = std::max(1, std::min(best_point, num_deltas - 2));
+ Pwl::Point rb_test =
+ Pwl::Point(r_curve, b_curve) +
+ transverse *
+ interpolate_quadatric(points[best_point - 1],
+ points[best_point],
+ points[best_point + 1]);
+ double r_test = rb_test.x, b_test = rb_test.y;
+ double gain_r = 1 / r_test, gain_b = 1 / b_test;
+ double delta2_sum = computeDelta2Sum(gain_r, gain_b);
+ double final_log_likelihood = delta2_sum - prior_log_likelihood;
+ RPI_LOG("Finally "
+ << t_test << " r " << r_test << " b " << b_test << ": "
+ << final_log_likelihood
+ << (final_log_likelihood < best_log_likelihood ? " BEST"
+ : ""));
+ if (best_t == 0 || final_log_likelihood < best_log_likelihood)
+ best_log_likelihood = final_log_likelihood,
+ best_t = t_test, best_r = r_test, best_b = b_test;
+ }
+ t = best_t, r = best_r, b = best_b;
+ RPI_LOG("Fine search found t " << t << " r " << r << " b " << b);
+}
+
+void Awb::awbBayes()
+{
+ // May as well divide out G to save computeDelta2Sum from doing it over
+ // and over.
+ for (auto &z : zones_)
+ z.R = z.R / (z.G + 1), z.B = z.B / (z.G + 1);
+ // Get the current prior, and scale according to how many zones are
+ // valid... not entirely sure about this.
+ Pwl prior = interpolatePrior();
+ prior *= zones_.size() / (double)(AWB_STATS_SIZE_X * AWB_STATS_SIZE_Y);
+ prior.Map([](double x, double y) {
+ RPI_LOG("(" << x << "," << y << ")");
+ });
+ double t = coarseSearch(prior);
+ double r = config_.ct_r.Eval(t);
+ double b = config_.ct_b.Eval(t);
+ RPI_LOG("After coarse search: r " << r << " b " << b << " (gains r "
+ << 1 / r << " b " << 1 / b << ")");
+ // Not entirely sure how to handle the fine search yet. Mostly the
+ // estimated CT is already good enough, but the fine search allows us to
+ // wander transverely off the CT curve. Under some illuminants, where
+ // there may be more or less green light, this may prove beneficial,
+ // though I probably need more real datasets before deciding exactly how
+ // this should be controlled and tuned.
+ fineSearch(t, r, b, prior);
+ RPI_LOG("After fine search: r " << r << " b " << b << " (gains r "
+ << 1 / r << " b " << 1 / b << ")");
+ // Write results out for the main thread to pick up. Remember to adjust
+ // the gains from the ones that the "canonical sensor" would require to
+ // the ones needed by *this* sensor.
+ async_results_.temperature_K = t;
+ async_results_.gain_r = 1.0 / r * config_.sensitivity_r;
+ async_results_.gain_g = 1.0;
+ async_results_.gain_b = 1.0 / b * config_.sensitivity_b;
+}
+
+void Awb::awbGrey()
+{
+ RPI_LOG("Grey world AWB");
+ // Make a separate list of the derivatives for each of red and blue, so
+ // that we can sort them to exclude the extreme gains. We could
+ // consider some variations, such as normalising all the zones first, or
+ // doing an L2 average etc.
+ std::vector<RGB> &derivs_R(zones_);
+ std::vector<RGB> derivs_B(derivs_R);
+ std::sort(derivs_R.begin(), derivs_R.end(),
+ [](RGB const &a, RGB const &b) {
+ return a.G * b.R < b.G * a.R;
+ });
+ std::sort(derivs_B.begin(), derivs_B.end(),
+ [](RGB const &a, RGB const &b) {
+ return a.G * b.B < b.G * a.B;
+ });
+ // Average the middle half of the values.
+ int discard = derivs_R.size() / 4;
+ RGB sum_R(0, 0, 0), sum_B(0, 0, 0);
+ for (auto ri = derivs_R.begin() + discard,
+ bi = derivs_B.begin() + discard;
+ ri != derivs_R.end() - discard; ri++, bi++)
+ sum_R += *ri, sum_B += *bi;
+ double gain_r = sum_R.G / (sum_R.R + 1),
+ gain_b = sum_B.G / (sum_B.B + 1);
+ async_results_.temperature_K = 4500; // don't know what it is
+ async_results_.gain_r = gain_r;
+ async_results_.gain_g = 1.0;
+ async_results_.gain_b = gain_b;
+}
+
+void Awb::doAwb()
+{
+ if (manual_r_ != 0.0 && manual_b_ != 0.0) {
+ async_results_.temperature_K = 4500; // don't know what it is
+ async_results_.gain_r = manual_r_;
+ async_results_.gain_g = 1.0;
+ async_results_.gain_b = manual_b_;
+ RPI_LOG("Using manual white balance: gain_r "
+ << async_results_.gain_r << " gain_b "
+ << async_results_.gain_b);
+ } else {
+ prepareStats();
+ RPI_LOG("Valid zones: " << zones_.size());
+ if (zones_.size() > config_.min_regions) {
+ if (config_.bayes)
+ awbBayes();
+ else
+ awbGrey();
+ RPI_LOG("CT found is "
+ << async_results_.temperature_K
+ << " with gains r " << async_results_.gain_r
+ << " and b " << async_results_.gain_b);
+ }
+ }
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Awb(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/awb.hpp b/src/ipa/raspberrypi/controller/rpi/awb.hpp
new file mode 100644
index 00000000..36925252
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/awb.hpp
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * awb.hpp - AWB control algorithm
+ */
+#pragma once
+
+#include <mutex>
+#include <condition_variable>
+#include <thread>
+
+#include "../awb_algorithm.hpp"
+#include "../pwl.hpp"
+#include "../awb_status.h"
+
+namespace RPi {
+
+// Control algorithm to perform AWB calculations.
+
+struct AwbMode {
+ void Read(boost::property_tree::ptree const &params);
+ double ct_lo; // low CT value for search
+ double ct_hi; // high CT value for search
+};
+
+struct AwbPrior {
+ void Read(boost::property_tree::ptree const &params);
+ double lux; // lux level
+ Pwl prior; // maps CT to prior log likelihood for this lux level
+};
+
+struct AwbConfig {
+ AwbConfig() : default_mode(nullptr) {}
+ void Read(boost::property_tree::ptree const &params);
+ // Only repeat the AWB calculation every "this many" frames
+ uint16_t frame_period;
+ // number of initial frames for which speed taken as 1.0 (maximum)
+ uint16_t startup_frames;
+ double speed; // IIR filter speed applied to algorithm results
+ bool fast; // "fast" mode uses a 16x16 rather than 32x32 grid
+ Pwl ct_r; // function maps CT to r (= R/G)
+ Pwl ct_b; // function maps CT to b (= B/G)
+ // table of illuminant priors at different lux levels
+ std::vector<AwbPrior> priors;
+ // AWB "modes" (determines the search range)
+ std::map<std::string, AwbMode> modes;
+ AwbMode *default_mode; // mode used if no mode selected
+ // minimum proportion of pixels counted within AWB region for it to be
+ // "useful"
+ double min_pixels;
+ // minimum G value of those pixels, to be regarded a "useful"
+ uint16_t min_G;
+ // number of AWB regions that must be "useful" in order to do the AWB
+ // calculation
+ uint32_t min_regions;
+ // clamp on colour error term (so as not to penalise non-grey excessively)
+ double delta_limit;
+ // step size control in coarse search
+ double coarse_step;
+ // how far to wander off CT curve towards "more purple"
+ double transverse_pos;
+ // how far to wander off CT curve towards "more green"
+ double transverse_neg;
+ // red sensitivity ratio (set to canonical sensor's R/G divided by this
+ // sensor's R/G)
+ double sensitivity_r;
+ // blue sensitivity ratio (set to canonical sensor's B/G divided by this
+ // sensor's B/G)
+ double sensitivity_b;
+ // The whitepoint (which we normally "aim" for) can be moved.
+ double whitepoint_r;
+ double whitepoint_b;
+ bool bayes; // use Bayesian algorithm
+};
+
+class Awb : public AwbAlgorithm
+{
+public:
+ Awb(Controller *controller = NULL);
+ ~Awb();
+ char const *Name() const override;
+ void Initialise() override;
+ void Read(boost::property_tree::ptree const &params) override;
+ void SetMode(std::string const &name) override;
+ void SetManualGains(double manual_r, double manual_b) override;
+ void Prepare(Metadata *image_metadata) override;
+ void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
+ struct RGB {
+ RGB(double _R = INVALID, double _G = INVALID,
+ double _B = INVALID)
+ : R(_R), G(_G), B(_B)
+ {
+ }
+ double R, G, B;
+ static const double INVALID;
+ bool Valid() const { return G != INVALID; }
+ bool Invalid() const { return G == INVALID; }
+ RGB &operator+=(RGB const &other)
+ {
+ R += other.R, G += other.G, B += other.B;
+ return *this;
+ }
+ RGB Square() const { return RGB(R * R, G * G, B * B); }
+ };
+
+private:
+ // configuration is read-only, and available to both threads
+ AwbConfig config_;
+ std::thread async_thread_;
+ void asyncFunc(); // asynchronous thread function
+ std::mutex mutex_;
+ // condvar for async thread to wait on
+ std::condition_variable async_signal_;
+ // condvar for synchronous thread to wait on
+ std::condition_variable sync_signal_;
+ // for sync thread to check if async thread finished (requires mutex)
+ bool async_finished_;
+ // for async thread to check if it's been told to run (requires mutex)
+ bool async_start_;
+ // for async thread to check if it's been told to quit (requires mutex)
+ bool async_abort_;
+
+ // The following are only for the synchronous thread to use:
+ // for sync thread to note its has asked async thread to run
+ bool async_started_;
+ // counts up to frame_period before restarting the async thread
+ int frame_phase_;
+ int frame_count_; // counts up to startup_frames
+ int frame_count2_; // counts up to startup_frames for Process method
+ AwbStatus sync_results_;
+ AwbStatus prev_sync_results_;
+ std::string mode_name_;
+ std::mutex settings_mutex_;
+ // The following are for the asynchronous thread to use, though the main
+ // thread can set/reset them if the async thread is known to be idle:
+ void restartAsync(StatisticsPtr &stats, std::string const &mode_name,
+ double lux);
+ // copy out the results from the async thread so that it can be restarted
+ void fetchAsyncResults();
+ StatisticsPtr statistics_;
+ AwbMode *mode_;
+ double lux_;
+ AwbStatus async_results_;
+ void doAwb();
+ void awbBayes();
+ void awbGrey();
+ void prepareStats();
+ double computeDelta2Sum(double gain_r, double gain_b);
+ Pwl interpolatePrior();
+ double coarseSearch(Pwl const &prior);
+ void fineSearch(double &t, double &r, double &b, Pwl const &prior);
+ std::vector<RGB> zones_;
+ std::vector<Pwl::Point> points_;
+ // manual r setting
+ double manual_r_;
+ // manual b setting
+ double manual_b_;
+};
+
+static inline Awb::RGB operator+(Awb::RGB const &a, Awb::RGB const &b)
+{
+ return Awb::RGB(a.R + b.R, a.G + b.G, a.B + b.B);
+}
+static inline Awb::RGB operator-(Awb::RGB const &a, Awb::RGB const &b)
+{
+ return Awb::RGB(a.R - b.R, a.G - b.G, a.B - b.B);
+}
+static inline Awb::RGB operator*(double d, Awb::RGB const &rgb)
+{
+ return Awb::RGB(d * rgb.R, d * rgb.G, d * rgb.B);
+}
+static inline Awb::RGB operator*(Awb::RGB const &rgb, double d)
+{
+ return d * rgb;
+}
+
+} // namespace RPi
diff --git a/src/ipa/raspberrypi/controller/rpi/black_level.cpp b/src/ipa/raspberrypi/controller/rpi/black_level.cpp
new file mode 100644
index 00000000..59c9f5a6
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/black_level.cpp
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * black_level.cpp - black level control algorithm
+ */
+
+#include <math.h>
+#include <stdint.h>
+
+#include "../black_level_status.h"
+#include "../logging.hpp"
+
+#include "black_level.hpp"
+
+using namespace RPi;
+
+#define NAME "rpi.black_level"
+
+BlackLevel::BlackLevel(Controller *controller)
+ : Algorithm(controller)
+{
+}
+
+char const *BlackLevel::Name() const
+{
+ return NAME;
+}
+
+void BlackLevel::Read(boost::property_tree::ptree const &params)
+{
+ RPI_LOG(Name());
+ uint16_t black_level = params.get<uint16_t>(
+ "black_level", 4096); // 64 in 10 bits scaled to 16 bits
+ black_level_r_ = params.get<uint16_t>("black_level_r", black_level);
+ black_level_g_ = params.get<uint16_t>("black_level_g", black_level);
+ black_level_b_ = params.get<uint16_t>("black_level_b", black_level);
+}
+
+void BlackLevel::Prepare(Metadata *image_metadata)
+{
+ // Possibly we should think about doing this in a switch_mode or
+ // something?
+ struct BlackLevelStatus status;
+ status.black_level_r = black_level_r_;
+ status.black_level_g = black_level_g_;
+ status.black_level_b = black_level_b_;
+ image_metadata->Set("black_level.status", status);
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return new BlackLevel(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/black_level.hpp b/src/ipa/raspberrypi/controller/rpi/black_level.hpp
new file mode 100644
index 00000000..5d74c6da
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/black_level.hpp
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * black_level.hpp - black level control algorithm
+ */
+#pragma once
+
+#include "../algorithm.hpp"
+#include "../black_level_status.h"
+
+// This is our implementation of the "black level algorithm".
+
+namespace RPi {
+
+class BlackLevel : public Algorithm
+{
+public:
+ BlackLevel(Controller *controller);
+ char const *Name() const override;
+ void Read(boost::property_tree::ptree const &params) override;
+ void Prepare(Metadata *image_metadata) override;
+
+private:
+ double black_level_r_;
+ double black_level_g_;
+ double black_level_b_;
+};
+
+} // namespace RPi
diff --git a/src/ipa/raspberrypi/controller/rpi/ccm.cpp b/src/ipa/raspberrypi/controller/rpi/ccm.cpp
new file mode 100644
index 00000000..327cb71c
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/ccm.cpp
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * ccm.cpp - CCM (colour correction matrix) control algorithm
+ */
+
+#include "../awb_status.h"
+#include "../ccm_status.h"
+#include "../logging.hpp"
+#include "../lux_status.h"
+#include "../metadata.hpp"
+
+#include "ccm.hpp"
+
+using namespace RPi;
+
+// This algorithm selects a CCM (Colour Correction Matrix) according to the
+// colour temperature estimated by AWB (interpolating between known matricies as
+// necessary). Additionally the amount of colour saturation can be controlled
+// both according to the current estimated lux level and according to a
+// saturation setting that is exposed to applications.
+
+#define NAME "rpi.ccm"
+
+Matrix::Matrix()
+{
+ memset(m, 0, sizeof(m));
+}
+Matrix::Matrix(double m0, double m1, double m2, double m3, double m4, double m5,
+ double m6, double m7, double m8)
+{
+ m[0][0] = m0, m[0][1] = m1, m[0][2] = m2, m[1][0] = m3, m[1][1] = m4,
+ m[1][2] = m5, m[2][0] = m6, m[2][1] = m7, m[2][2] = m8;
+}
+void Matrix::Read(boost::property_tree::ptree const &params)
+{
+ double *ptr = (double *)m;
+ int n = 0;
+ for (auto it = params.begin(); it != params.end(); it++) {
+ if (n++ == 9)
+ throw std::runtime_error("Ccm: too many values in CCM");
+ *ptr++ = it->second.get_value<double>();
+ }
+ if (n < 9)
+ throw std::runtime_error("Ccm: too few values in CCM");
+}
+
+Ccm::Ccm(Controller *controller)
+ : CcmAlgorithm(controller), saturation_(1.0) {}
+
+char const *Ccm::Name() const
+{
+ return NAME;
+}
+
+void Ccm::Read(boost::property_tree::ptree const &params)
+{
+ if (params.get_child_optional("saturation"))
+ config_.saturation.Read(params.get_child("saturation"));
+ for (auto &p : params.get_child("ccms")) {
+ CtCcm ct_ccm;
+ ct_ccm.ct = p.second.get<double>("ct");
+ ct_ccm.ccm.Read(p.second.get_child("ccm"));
+ if (!config_.ccms.empty() &&
+ ct_ccm.ct <= config_.ccms.back().ct)
+ throw std::runtime_error(
+ "Ccm: CCM not in increasing colour temperature order");
+ config_.ccms.push_back(std::move(ct_ccm));
+ }
+ if (config_.ccms.empty())
+ throw std::runtime_error("Ccm: no CCMs specified");
+}
+
+void Ccm::SetSaturation(double saturation)
+{
+ saturation_ = saturation;
+}
+
+void Ccm::Initialise() {}
+
+template<typename T>
+static bool get_locked(Metadata *metadata, std::string const &tag, T &value)
+{
+ T *ptr = metadata->GetLocked<T>(tag);
+ if (ptr == nullptr)
+ return false;
+ value = *ptr;
+ return true;
+}
+
+Matrix calculate_ccm(std::vector<CtCcm> const &ccms, double ct)
+{
+ if (ct <= ccms.front().ct)
+ return ccms.front().ccm;
+ else if (ct >= ccms.back().ct)
+ return ccms.back().ccm;
+ else {
+ int i = 0;
+ for (; ct > ccms[i].ct; i++)
+ ;
+ double lambda =
+ (ct - ccms[i - 1].ct) / (ccms[i].ct - ccms[i - 1].ct);
+ return lambda * ccms[i].ccm + (1.0 - lambda) * ccms[i - 1].ccm;
+ }
+}
+
+Matrix apply_saturation(Matrix const &ccm, double saturation)
+{
+ Matrix RGB2Y(0.299, 0.587, 0.114, -0.169, -0.331, 0.500, 0.500, -0.419,
+ -0.081);
+ Matrix Y2RGB(1.000, 0.000, 1.402, 1.000, -0.345, -0.714, 1.000, 1.771,
+ 0.000);
+ Matrix S(1, 0, 0, 0, saturation, 0, 0, 0, saturation);
+ return Y2RGB * S * RGB2Y * ccm;
+}
+
+void Ccm::Prepare(Metadata *image_metadata)
+{
+ bool awb_ok = false, lux_ok = false;
+ struct AwbStatus awb = {};
+ awb.temperature_K = 4000; // in case no metadata
+ struct LuxStatus lux = {};
+ lux.lux = 400; // in case no metadata
+ {
+ // grab mutex just once to get everything
+ std::lock_guard<Metadata> lock(*image_metadata);
+ awb_ok = get_locked(image_metadata, "awb.status", awb);
+ lux_ok = get_locked(image_metadata, "lux.status", lux);
+ }
+ if (!awb_ok)
+ RPI_WARN("Ccm: no colour temperature found");
+ if (!lux_ok)
+ RPI_WARN("Ccm: no lux value found");
+ Matrix ccm = calculate_ccm(config_.ccms, awb.temperature_K);
+ double saturation = saturation_;
+ struct CcmStatus ccm_status;
+ ccm_status.saturation = saturation;
+ if (!config_.saturation.Empty())
+ saturation *= config_.saturation.Eval(
+ config_.saturation.Domain().Clip(lux.lux));
+ ccm = apply_saturation(ccm, saturation);
+ for (int j = 0; j < 3; j++)
+ for (int i = 0; i < 3; i++)
+ ccm_status.matrix[j * 3 + i] =
+ std::max(-8.0, std::min(7.9999, ccm.m[j][i]));
+ RPI_LOG("CCM: colour temperature " << awb.temperature_K << "K");
+ RPI_LOG("CCM: " << ccm_status.matrix[0] << " " << ccm_status.matrix[1]
+ << " " << ccm_status.matrix[2] << " "
+ << ccm_status.matrix[3] << " " << ccm_status.matrix[4]
+ << " " << ccm_status.matrix[5] << " "
+ << ccm_status.matrix[6] << " " << ccm_status.matrix[7]
+ << " " << ccm_status.matrix[8]);
+ image_metadata->Set("ccm.status", ccm_status);
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Ccm(controller);
+ ;
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/ccm.hpp b/src/ipa/raspberrypi/controller/rpi/ccm.hpp
new file mode 100644
index 00000000..f6f4dee1
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/ccm.hpp
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * ccm.hpp - CCM (colour correction matrix) control algorithm
+ */
+#pragma once
+
+#include <vector>
+#include <atomic>
+
+#include "../ccm_algorithm.hpp"
+#include "../pwl.hpp"
+
+namespace RPi {
+
+// Algorithm to calculate colour matrix. Should be placed after AWB.
+
+struct Matrix {
+ Matrix(double m0, double m1, double m2, double m3, double m4, double m5,
+ double m6, double m7, double m8);
+ Matrix();
+ double m[3][3];
+ void Read(boost::property_tree::ptree const &params);
+};
+static inline Matrix operator*(double d, Matrix const &m)
+{
+ return Matrix(m.m[0][0] * d, m.m[0][1] * d, m.m[0][2] * d,
+ m.m[1][0] * d, m.m[1][1] * d, m.m[1][2] * d,
+ m.m[2][0] * d, m.m[2][1] * d, m.m[2][2] * d);
+}
+static inline Matrix operator*(Matrix const &m1, Matrix const &m2)
+{
+ Matrix m;
+ for (int i = 0; i < 3; i++)
+ for (int j = 0; j < 3; j++)
+ m.m[i][j] = m1.m[i][0] * m2.m[0][j] +
+ m1.m[i][1] * m2.m[1][j] +
+ m1.m[i][2] * m2.m[2][j];
+ return m;
+}
+static inline Matrix operator+(Matrix const &m1, Matrix const &m2)
+{
+ Matrix m;
+ for (int i = 0; i < 3; i++)
+ for (int j = 0; j < 3; j++)
+ m.m[i][j] = m1.m[i][j] + m2.m[i][j];
+ return m;
+}
+
+struct CtCcm {
+ double ct;
+ Matrix ccm;
+};
+
+struct CcmConfig {
+ std::vector<CtCcm> ccms;
+ Pwl saturation;
+};
+
+class Ccm : public CcmAlgorithm
+{
+public:
+ Ccm(Controller *controller = NULL);
+ char const *Name() const override;
+ void Read(boost::property_tree::ptree const &params) override;
+ void SetSaturation(double saturation) override;
+ void Initialise() override;
+ void Prepare(Metadata *image_metadata) override;
+
+private:
+ CcmConfig config_;
+ std::atomic<double> saturation_;
+};
+
+} // namespace RPi
diff --git a/src/ipa/raspberrypi/controller/rpi/contrast.cpp b/src/ipa/raspberrypi/controller/rpi/contrast.cpp
new file mode 100644
index 00000000..e4967990
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/contrast.cpp
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * contrast.cpp - contrast (gamma) control algorithm
+ */
+#include <stdint.h>
+
+#include "../contrast_status.h"
+#include "../histogram.hpp"
+
+#include "contrast.hpp"
+
+using namespace RPi;
+
+// This is a very simple control algorithm which simply retrieves the results of
+// AGC and AWB via their "status" metadata, and applies digital gain to the
+// colour channels in accordance with those instructions. We take care never to
+// apply less than unity gains, as that would cause fully saturated pixels to go
+// off-white.
+
+#define NAME "rpi.contrast"
+
+Contrast::Contrast(Controller *controller)
+ : ContrastAlgorithm(controller), brightness_(0.0), contrast_(1.0)
+{
+}
+
+char const *Contrast::Name() const
+{
+ return NAME;
+}
+
+void Contrast::Read(boost::property_tree::ptree const &params)
+{
+ // enable adaptive enhancement by default
+ config_.ce_enable = params.get<int>("ce_enable", 1);
+ // the point near the bottom of the histogram to move
+ config_.lo_histogram = params.get<double>("lo_histogram", 0.01);
+ // where in the range to try and move it to
+ config_.lo_level = params.get<double>("lo_level", 0.015);
+ // but don't move by more than this
+ config_.lo_max = params.get<double>("lo_max", 500);
+ // equivalent values for the top of the histogram...
+ config_.hi_histogram = params.get<double>("hi_histogram", 0.95);
+ config_.hi_level = params.get<double>("hi_level", 0.95);
+ config_.hi_max = params.get<double>("hi_max", 2000);
+ config_.gamma_curve.Read(params.get_child("gamma_curve"));
+}
+
+void Contrast::SetBrightness(double brightness)
+{
+ brightness_ = brightness;
+}
+
+void Contrast::SetContrast(double contrast)
+{
+ contrast_ = contrast;
+}
+
+static void fill_in_status(ContrastStatus &status, double brightness,
+ double contrast, Pwl &gamma_curve)
+{
+ status.brightness = brightness;
+ status.contrast = contrast;
+ for (int i = 0; i < CONTRAST_NUM_POINTS - 1; i++) {
+ int x = i < 16 ? i * 1024
+ : (i < 24 ? (i - 16) * 2048 + 16384
+ : (i - 24) * 4096 + 32768);
+ status.points[i].x = x;
+ status.points[i].y = std::min(65535.0, gamma_curve.Eval(x));
+ }
+ status.points[CONTRAST_NUM_POINTS - 1].x = 65535;
+ status.points[CONTRAST_NUM_POINTS - 1].y = 65535;
+}
+
+void Contrast::Initialise()
+{
+ // Fill in some default values as Prepare will run before Process gets
+ // called.
+ fill_in_status(status_, brightness_, contrast_, config_.gamma_curve);
+}
+
+void Contrast::Prepare(Metadata *image_metadata)
+{
+ std::unique_lock<std::mutex> lock(mutex_);
+ image_metadata->Set("contrast.status", status_);
+}
+
+Pwl compute_stretch_curve(Histogram const &histogram,
+ ContrastConfig const &config)
+{
+ Pwl enhance;
+ enhance.Append(0, 0);
+ // If the start of the histogram is rather empty, try to pull it down a
+ // bit.
+ double hist_lo = histogram.Quantile(config.lo_histogram) *
+ (65536 / NUM_HISTOGRAM_BINS);
+ double level_lo = config.lo_level * 65536;
+ RPI_LOG("Move histogram point " << hist_lo << " to " << level_lo);
+ hist_lo = std::max(
+ level_lo,
+ std::min(65535.0, std::min(hist_lo, level_lo + config.lo_max)));
+ RPI_LOG("Final values " << hist_lo << " -> " << level_lo);
+ enhance.Append(hist_lo, level_lo);
+ // Keep the mid-point (median) in the same place, though, to limit the
+ // apparent amount of global brightness shift.
+ double mid = histogram.Quantile(0.5) * (65536 / NUM_HISTOGRAM_BINS);
+ enhance.Append(mid, mid);
+
+ // If the top to the histogram is empty, try to pull the pixel values
+ // there up.
+ double hist_hi = histogram.Quantile(config.hi_histogram) *
+ (65536 / NUM_HISTOGRAM_BINS);
+ double level_hi = config.hi_level * 65536;
+ RPI_LOG("Move histogram point " << hist_hi << " to " << level_hi);
+ hist_hi = std::min(
+ level_hi,
+ std::max(0.0, std::max(hist_hi, level_hi - config.hi_max)));
+ RPI_LOG("Final values " << hist_hi << " -> " << level_hi);
+ enhance.Append(hist_hi, level_hi);
+ enhance.Append(65535, 65535);
+ return enhance;
+}
+
+Pwl apply_manual_contrast(Pwl const &gamma_curve, double brightness,
+ double contrast)
+{
+ Pwl new_gamma_curve;
+ RPI_LOG("Manual brightness " << brightness << " contrast " << contrast);
+ gamma_curve.Map([&](double x, double y) {
+ new_gamma_curve.Append(
+ x, std::max(0.0, std::min(65535.0,
+ (y - 32768) * contrast +
+ 32768 + brightness)));
+ });
+ return new_gamma_curve;
+}
+
+void Contrast::Process(StatisticsPtr &stats, Metadata *image_metadata)
+{
+ (void)image_metadata;
+ double brightness = brightness_, contrast = contrast_;
+ Histogram histogram(stats->hist[0].g_hist, NUM_HISTOGRAM_BINS);
+ // We look at the histogram and adjust the gamma curve in the following
+ // ways: 1. Adjust the gamma curve so as to pull the start of the
+ // histogram down, and possibly push the end up.
+ Pwl gamma_curve = config_.gamma_curve;
+ if (config_.ce_enable) {
+ if (config_.lo_max != 0 || config_.hi_max != 0)
+ gamma_curve = compute_stretch_curve(histogram, config_)
+ .Compose(gamma_curve);
+ // We could apply other adjustments (e.g. partial equalisation)
+ // based on the histogram...?
+ }
+ // 2. Finally apply any manually selected brightness/contrast
+ // adjustment.
+ if (brightness != 0 || contrast != 1.0)
+ gamma_curve = apply_manual_contrast(gamma_curve, brightness,
+ contrast);
+ // And fill in the status for output. Use more points towards the bottom
+ // of the curve.
+ ContrastStatus status;
+ fill_in_status(status, brightness, contrast, gamma_curve);
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ status_ = status;
+ }
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Contrast(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/contrast.hpp b/src/ipa/raspberrypi/controller/rpi/contrast.hpp
new file mode 100644
index 00000000..2e38a762
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/contrast.hpp
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * contrast.hpp - contrast (gamma) control algorithm
+ */
+#pragma once
+
+#include <atomic>
+#include <mutex>
+
+#include "../contrast_algorithm.hpp"
+#include "../pwl.hpp"
+
+namespace RPi {
+
+// Back End algorithm to appaly correct digital gain. Should be placed after
+// Back End AWB.
+
+struct ContrastConfig {
+ bool ce_enable;
+ double lo_histogram;
+ double lo_level;
+ double lo_max;
+ double hi_histogram;
+ double hi_level;
+ double hi_max;
+ Pwl gamma_curve;
+};
+
+class Contrast : public ContrastAlgorithm
+{
+public:
+ Contrast(Controller *controller = NULL);
+ char const *Name() const override;
+ void Read(boost::property_tree::ptree const &params) override;
+ void SetBrightness(double brightness) override;
+ void SetContrast(double contrast) override;
+ void Initialise() override;
+ void Prepare(Metadata *image_metadata) override;
+ void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
+
+private:
+ ContrastConfig config_;
+ std::atomic<double> brightness_;
+ std::atomic<double> contrast_;
+ ContrastStatus status_;
+ std::mutex mutex_;
+};
+
+} // namespace RPi
diff --git a/src/ipa/raspberrypi/controller/rpi/dpc.cpp b/src/ipa/raspberrypi/controller/rpi/dpc.cpp
new file mode 100644
index 00000000..d31fae97
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/dpc.cpp
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * dpc.cpp - DPC (defective pixel correction) control algorithm
+ */
+
+#include "../logging.hpp"
+#include "dpc.hpp"
+
+using namespace RPi;
+
+// We use the lux status so that we can apply stronger settings in darkness (if
+// necessary).
+
+#define NAME "rpi.dpc"
+
+Dpc::Dpc(Controller *controller)
+ : Algorithm(controller)
+{
+}
+
+char const *Dpc::Name() const
+{
+ return NAME;
+}
+
+void Dpc::Read(boost::property_tree::ptree const &params)
+{
+ config_.strength = params.get<int>("strength", 1);
+ if (config_.strength < 0 || config_.strength > 2)
+ throw std::runtime_error("Dpc: bad strength value");
+}
+
+void Dpc::Prepare(Metadata *image_metadata)
+{
+ DpcStatus dpc_status = {};
+ // Should we vary this with lux level or analogue gain? TBD.
+ dpc_status.strength = config_.strength;
+ RPI_LOG("Dpc: strength " << dpc_status.strength);
+ image_metadata->Set("dpc.status", dpc_status);
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Dpc(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/dpc.hpp b/src/ipa/raspberrypi/controller/rpi/dpc.hpp
new file mode 100644
index 00000000..9fb72867
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/dpc.hpp
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * dpc.hpp - DPC (defective pixel correction) control algorithm
+ */
+#pragma once
+
+#include "../algorithm.hpp"
+#include "../dpc_status.h"
+
+namespace RPi {
+
+// Back End algorithm to apply appropriate GEQ settings.
+
+struct DpcConfig {
+ int strength;
+};
+
+class Dpc : public Algorithm
+{
+public:
+ Dpc(Controller *controller);
+ char const *Name() const override;
+ void Read(boost::property_tree::ptree const &params) override;
+ void Prepare(Metadata *image_metadata) override;
+
+private:
+ DpcConfig config_;
+};
+
+} // namespace RPi
diff --git a/src/ipa/raspberrypi/controller/rpi/geq.cpp b/src/ipa/raspberrypi/controller/rpi/geq.cpp
new file mode 100644
index 00000000..ee0cb95d
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/geq.cpp
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * geq.cpp - GEQ (green equalisation) control algorithm
+ */
+
+#include "../device_status.h"
+#include "../logging.hpp"
+#include "../lux_status.h"
+#include "../pwl.hpp"
+
+#include "geq.hpp"
+
+using namespace RPi;
+
+// We use the lux status so that we can apply stronger settings in darkness (if
+// necessary).
+
+#define NAME "rpi.geq"
+
+Geq::Geq(Controller *controller)
+ : Algorithm(controller)
+{
+}
+
+char const *Geq::Name() const
+{
+ return NAME;
+}
+
+void Geq::Read(boost::property_tree::ptree const &params)
+{
+ config_.offset = params.get<uint16_t>("offset", 0);
+ config_.slope = params.get<double>("slope", 0.0);
+ if (config_.slope < 0.0 || config_.slope >= 1.0)
+ throw std::runtime_error("Geq: bad slope value");
+ if (params.get_child_optional("strength"))
+ config_.strength.Read(params.get_child("strength"));
+}
+
+void Geq::Prepare(Metadata *image_metadata)
+{
+ LuxStatus lux_status = {};
+ lux_status.lux = 400;
+ if (image_metadata->Get("lux.status", lux_status))
+ RPI_WARN("Geq: no lux data found");
+ DeviceStatus device_status = {};
+ device_status.analogue_gain = 1.0; // in case not found
+ if (image_metadata->Get("device.status", device_status))
+ RPI_WARN("Geq: no device metadata - use analogue gain of 1x");
+ GeqStatus geq_status = {};
+ double strength =
+ config_.strength.Empty()
+ ? 1.0
+ : config_.strength.Eval(config_.strength.Domain().Clip(
+ lux_status.lux));
+ strength *= device_status.analogue_gain;
+ double offset = config_.offset * strength;
+ double slope = config_.slope * strength;
+ geq_status.offset = std::min(65535.0, std::max(0.0, offset));
+ geq_status.slope = std::min(.99999, std::max(0.0, slope));
+ RPI_LOG("Geq: offset " << geq_status.offset << " slope "
+ << geq_status.slope << " (analogue gain "
+ << device_status.analogue_gain << " lux "
+ << lux_status.lux << ")");
+ image_metadata->Set("geq.status", geq_status);
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Geq(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/geq.hpp b/src/ipa/raspberrypi/controller/rpi/geq.hpp
new file mode 100644
index 00000000..7d4bd38d
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/geq.hpp
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * geq.hpp - GEQ (green equalisation) control algorithm
+ */
+#pragma once
+
+#include "../algorithm.hpp"
+#include "../geq_status.h"
+
+namespace RPi {
+
+// Back End algorithm to apply appropriate GEQ settings.
+
+struct GeqConfig {
+ uint16_t offset;
+ double slope;
+ Pwl strength; // lux to strength factor
+};
+
+class Geq : public Algorithm
+{
+public:
+ Geq(Controller *controller);
+ char const *Name() const override;
+ void Read(boost::property_tree::ptree const &params) override;
+ void Prepare(Metadata *image_metadata) override;
+
+private:
+ GeqConfig config_;
+};
+
+} // namespace RPi
diff --git a/src/ipa/raspberrypi/controller/rpi/lux.cpp b/src/ipa/raspberrypi/controller/rpi/lux.cpp
new file mode 100644
index 00000000..154db153
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/lux.cpp
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * lux.cpp - Lux control algorithm
+ */
+#include <math.h>
+
+#include "linux/bcm2835-isp.h"
+
+#include "../device_status.h"
+#include "../logging.hpp"
+
+#include "lux.hpp"
+
+using namespace RPi;
+
+#define NAME "rpi.lux"
+
+Lux::Lux(Controller *controller)
+ : Algorithm(controller)
+{
+ // Put in some defaults as there will be no meaningful values until
+ // Process has run.
+ status_.aperture = 1.0;
+ status_.lux = 400;
+}
+
+char const *Lux::Name() const
+{
+ return NAME;
+}
+
+void Lux::Read(boost::property_tree::ptree const &params)
+{
+ RPI_LOG(Name());
+ reference_shutter_speed_ =
+ params.get<double>("reference_shutter_speed");
+ reference_gain_ = params.get<double>("reference_gain");
+ reference_aperture_ = params.get<double>("reference_aperture", 1.0);
+ reference_Y_ = params.get<double>("reference_Y");
+ reference_lux_ = params.get<double>("reference_lux");
+ current_aperture_ = reference_aperture_;
+}
+
+void Lux::Prepare(Metadata *image_metadata)
+{
+ std::unique_lock<std::mutex> lock(mutex_);
+ image_metadata->Set("lux.status", status_);
+}
+
+void Lux::Process(StatisticsPtr &stats, Metadata *image_metadata)
+{
+ // set some initial values to shut the compiler up
+ DeviceStatus device_status =
+ { .shutter_speed = 1.0,
+ .analogue_gain = 1.0,
+ .lens_position = 0.0,
+ .aperture = 0.0,
+ .flash_intensity = 0.0 };
+ if (image_metadata->Get("device.status", device_status) == 0) {
+ double current_gain = device_status.analogue_gain;
+ double current_shutter_speed = device_status.shutter_speed;
+ double current_aperture = device_status.aperture;
+ if (current_aperture == 0)
+ current_aperture = current_aperture_;
+ uint64_t sum = 0;
+ uint32_t num = 0;
+ uint32_t *bin = stats->hist[0].g_hist;
+ const int num_bins = sizeof(stats->hist[0].g_hist) /
+ sizeof(stats->hist[0].g_hist[0]);
+ for (int i = 0; i < num_bins; i++)
+ sum += bin[i] * (uint64_t)i, num += bin[i];
+ // add .5 to reflect the mid-points of bins
+ double current_Y = sum / (double)num + .5;
+ double gain_ratio = reference_gain_ / current_gain;
+ double shutter_speed_ratio =
+ reference_shutter_speed_ / current_shutter_speed;
+ double aperture_ratio = reference_aperture_ / current_aperture;
+ double Y_ratio = current_Y * (65536 / num_bins) / reference_Y_;
+ double estimated_lux = shutter_speed_ratio * gain_ratio *
+ aperture_ratio * aperture_ratio *
+ Y_ratio * reference_lux_;
+ LuxStatus status;
+ status.lux = estimated_lux;
+ status.aperture = current_aperture;
+ RPI_LOG(Name() << ": estimated lux " << estimated_lux);
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ status_ = status;
+ }
+ // Overwrite the metadata here as well, so that downstream
+ // algorithms get the latest value.
+ image_metadata->Set("lux.status", status);
+ } else
+ RPI_WARN(Name() << ": no device metadata");
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Lux(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/lux.hpp b/src/ipa/raspberrypi/controller/rpi/lux.hpp
new file mode 100644
index 00000000..eb935409
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/lux.hpp
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * lux.hpp - Lux control algorithm
+ */
+#pragma once
+
+#include <atomic>
+#include <mutex>
+
+#include "../lux_status.h"
+#include "../algorithm.hpp"
+
+// This is our implementation of the "lux control algorithm".
+
+namespace RPi {
+
+class Lux : public Algorithm
+{
+public:
+ Lux(Controller *controller);
+ char const *Name() const override;
+ void Read(boost::property_tree::ptree const &params) override;
+ void Prepare(Metadata *image_metadata) override;
+ void Process(StatisticsPtr &stats, Metadata *image_metadata) override;
+ void SetCurrentAperture(double aperture);
+
+private:
+ // These values define the conditions of the reference image, against
+ // which we compare the new image.
+ double reference_shutter_speed_; // in micro-seconds
+ double reference_gain_;
+ double reference_aperture_; // units of 1/f
+ double reference_Y_; // out of 65536
+ double reference_lux_;
+ std::atomic<double> current_aperture_;
+ LuxStatus status_;
+ std::mutex mutex_;
+};
+
+} // namespace RPi
diff --git a/src/ipa/raspberrypi/controller/rpi/noise.cpp b/src/ipa/raspberrypi/controller/rpi/noise.cpp
new file mode 100644
index 00000000..2209d791
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/noise.cpp
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * noise.cpp - Noise control algorithm
+ */
+
+#include <math.h>
+
+#include "../device_status.h"
+#include "../logging.hpp"
+#include "../noise_status.h"
+
+#include "noise.hpp"
+
+using namespace RPi;
+
+#define NAME "rpi.noise"
+
+Noise::Noise(Controller *controller)
+ : Algorithm(controller), mode_factor_(1.0)
+{
+}
+
+char const *Noise::Name() const
+{
+ return NAME;
+}
+
+void Noise::SwitchMode(CameraMode const &camera_mode)
+{
+ // For example, we would expect a 2x2 binned mode to have a "noise
+ // factor" of sqrt(2x2) = 2. (can't be less than one, right?)
+ mode_factor_ = std::max(1.0, camera_mode.noise_factor);
+}
+
+void Noise::Read(boost::property_tree::ptree const &params)
+{
+ RPI_LOG(Name());
+ reference_constant_ = params.get<double>("reference_constant");
+ reference_slope_ = params.get<double>("reference_slope");
+}
+
+void Noise::Prepare(Metadata *image_metadata)
+{
+ struct DeviceStatus device_status;
+ device_status.analogue_gain = 1.0; // keep compiler calm
+ if (image_metadata->Get("device.status", device_status) == 0) {
+ // There is a slight question as to exactly how the noise
+ // profile, specifically the constant part of it, scales. For
+ // now we assume it all scales the same, and we'll revisit this
+ // if it proves substantially wrong. NOTE: we may also want to
+ // make some adjustments based on the camera mode (such as
+ // binning), if we knew how to discover it...
+ double factor = sqrt(device_status.analogue_gain) / mode_factor_;
+ struct NoiseStatus status;
+ status.noise_constant = reference_constant_ * factor;
+ status.noise_slope = reference_slope_ * factor;
+ image_metadata->Set("noise.status", status);
+ RPI_LOG(Name() << ": constant " << status.noise_constant
+ << " slope " << status.noise_slope);
+ } else
+ RPI_WARN(Name() << " no metadata");
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return new Noise(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/noise.hpp b/src/ipa/raspberrypi/controller/rpi/noise.hpp
new file mode 100644
index 00000000..51d46a3d
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/noise.hpp
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * noise.hpp - Noise control algorithm
+ */
+#pragma once
+
+#include "../algorithm.hpp"
+#include "../noise_status.h"
+
+// This is our implementation of the "noise algorithm".
+
+namespace RPi {
+
+class Noise : public Algorithm
+{
+public:
+ Noise(Controller *controller);
+ char const *Name() const override;
+ void SwitchMode(CameraMode const &camera_mode) override;
+ void Read(boost::property_tree::ptree const &params) override;
+ void Prepare(Metadata *image_metadata) override;
+
+private:
+ // the noise profile for analogue gain of 1.0
+ double reference_constant_;
+ double reference_slope_;
+ std::atomic<double> mode_factor_;
+};
+
+} // namespace RPi
diff --git a/src/ipa/raspberrypi/controller/rpi/sdn.cpp b/src/ipa/raspberrypi/controller/rpi/sdn.cpp
new file mode 100644
index 00000000..28d9d983
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/sdn.cpp
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * sdn.cpp - SDN (spatial denoise) control algorithm
+ */
+
+#include "../noise_status.h"
+#include "../sdn_status.h"
+
+#include "sdn.hpp"
+
+using namespace RPi;
+
+// Calculate settings for the spatial denoise block using the noise profile in
+// the image metadata.
+
+#define NAME "rpi.sdn"
+
+Sdn::Sdn(Controller *controller)
+ : Algorithm(controller)
+{
+}
+
+char const *Sdn::Name() const
+{
+ return NAME;
+}
+
+void Sdn::Read(boost::property_tree::ptree const &params)
+{
+ deviation_ = params.get<double>("deviation", 3.2);
+ strength_ = params.get<double>("strength", 0.75);
+}
+
+void Sdn::Initialise() {}
+
+void Sdn::Prepare(Metadata *image_metadata)
+{
+ struct NoiseStatus noise_status = {};
+ noise_status.noise_slope = 3.0; // in case no metadata
+ if (image_metadata->Get("noise.status", noise_status) != 0)
+ RPI_WARN("Sdn: no noise profile found");
+ RPI_LOG("Noise profile: constant " << noise_status.noise_constant
+ << " slope "
+ << noise_status.noise_slope);
+ struct SdnStatus status;
+ status.noise_constant = noise_status.noise_constant * deviation_;
+ status.noise_slope = noise_status.noise_slope * deviation_;
+ status.strength = strength_;
+ image_metadata->Set("sdn.status", status);
+ RPI_LOG("Sdn: programmed constant " << status.noise_constant
+ << " slope " << status.noise_slope
+ << " strength "
+ << status.strength);
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Sdn(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/sdn.hpp b/src/ipa/raspberrypi/controller/rpi/sdn.hpp
new file mode 100644
index 00000000..d48aab7e
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/sdn.hpp
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * sdn.hpp - SDN (spatial denoise) control algorithm
+ */
+#pragma once
+
+#include "../algorithm.hpp"
+
+namespace RPi {
+
+// Algorithm to calculate correct spatial denoise (SDN) settings.
+
+class Sdn : public Algorithm
+{
+public:
+ Sdn(Controller *controller = NULL);
+ char const *Name() const override;
+ void Read(boost::property_tree::ptree const &params) override;
+ void Initialise() override;
+ void Prepare(Metadata *image_metadata) override;
+
+private:
+ double deviation_;
+ double strength_;
+};
+
+} // namespace RPi
diff --git a/src/ipa/raspberrypi/controller/rpi/sharpen.cpp b/src/ipa/raspberrypi/controller/rpi/sharpen.cpp
new file mode 100644
index 00000000..1f07bb62
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/sharpen.cpp
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * sharpen.cpp - sharpening control algorithm
+ */
+
+#include <math.h>
+
+#include "../logging.hpp"
+#include "../sharpen_status.h"
+
+#include "sharpen.hpp"
+
+using namespace RPi;
+
+#define NAME "rpi.sharpen"
+
+Sharpen::Sharpen(Controller *controller)
+ : Algorithm(controller)
+{
+}
+
+char const *Sharpen::Name() const
+{
+ return NAME;
+}
+
+void Sharpen::SwitchMode(CameraMode const &camera_mode)
+{
+ // can't be less than one, right?
+ mode_factor_ = std::max(1.0, camera_mode.noise_factor);
+}
+
+void Sharpen::Read(boost::property_tree::ptree const &params)
+{
+ RPI_LOG(Name());
+ threshold_ = params.get<double>("threshold", 1.0);
+ strength_ = params.get<double>("strength", 1.0);
+ limit_ = params.get<double>("limit", 1.0);
+}
+
+void Sharpen::Prepare(Metadata *image_metadata)
+{
+ double mode_factor = mode_factor_;
+ struct SharpenStatus status;
+ // Binned modes seem to need the sharpening toned down with this
+ // pipeline.
+ status.threshold = threshold_ * mode_factor;
+ status.strength = strength_ / mode_factor;
+ status.limit = limit_ / mode_factor;
+ image_metadata->Set("sharpen.status", status);
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return new Sharpen(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/raspberrypi/controller/rpi/sharpen.hpp b/src/ipa/raspberrypi/controller/rpi/sharpen.hpp
new file mode 100644
index 00000000..3b0d6801
--- /dev/null
+++ b/src/ipa/raspberrypi/controller/rpi/sharpen.hpp
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi (Trading) Limited
+ *
+ * sharpen.hpp - sharpening control algorithm
+ */
+#pragma once
+
+#include "../algorithm.hpp"
+#include "../sharpen_status.h"
+
+// This is our implementation of the "sharpen algorithm".
+
+namespace RPi {
+
+class Sharpen : public Algorithm
+{
+public:
+ Sharpen(Controller *controller);
+ char const *Name() const override;
+ void SwitchMode(CameraMode const &camera_mode) override;
+ void Read(boost::property_tree::ptree const &params) override;
+ void Prepare(Metadata *image_metadata) override;
+
+private:
+ double threshold_;
+ double strength_;
+ double limit_;
+ std::atomic<double> mode_factor_;
+};
+
+} // namespace RPi