summaryrefslogtreecommitdiff
path: root/src/ipa/raspberrypi/controller/rpi
diff options
context:
space:
mode:
authorNaushir Patuck <naush@raspberrypi.com>2022-07-27 09:55:18 +0100
committerLaurent Pinchart <laurent.pinchart@ideasonboard.com>2022-07-27 18:12:13 +0300
commitacd5d9979fca93bf7a0ffa6f5d08f5cf43ba0cee (patch)
treeb2fb78d222edac459107da0d54991e7a7f5b4dbb /src/ipa/raspberrypi/controller/rpi
parent177df04d2b7f357ebe41f1a9809ab68b6f948082 (diff)
ipa: raspberrypi: Change to C style code comments
As part of the on-going refactor efforts for the source files in src/ipa/raspberrypi/, switch all C++ style comments to C style comments. Signed-off-by: Naushir Patuck <naush@raspberrypi.com> Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Diffstat (limited to 'src/ipa/raspberrypi/controller/rpi')
-rw-r--r--src/ipa/raspberrypi/controller/rpi/agc.cpp269
-rw-r--r--src/ipa/raspberrypi/controller/rpi/agc.hpp24
-rw-r--r--src/ipa/raspberrypi/controller/rpi/alsc.cpp180
-rw-r--r--src/ipa/raspberrypi/controller/rpi/alsc.hpp50
-rw-r--r--src/ipa/raspberrypi/controller/rpi/awb.cpp192
-rw-r--r--src/ipa/raspberrypi/controller/rpi/awb.hpp110
-rw-r--r--src/ipa/raspberrypi/controller/rpi/black_level.cpp10
-rw-r--r--src/ipa/raspberrypi/controller/rpi/black_level.hpp4
-rw-r--r--src/ipa/raspberrypi/controller/rpi/ccm.cpp20
-rw-r--r--src/ipa/raspberrypi/controller/rpi/ccm.hpp4
-rw-r--r--src/ipa/raspberrypi/controller/rpi/contrast.cpp74
-rw-r--r--src/ipa/raspberrypi/controller/rpi/contrast.hpp8
-rw-r--r--src/ipa/raspberrypi/controller/rpi/dpc.cpp10
-rw-r--r--src/ipa/raspberrypi/controller/rpi/dpc.hpp4
-rw-r--r--src/ipa/raspberrypi/controller/rpi/geq.cpp10
-rw-r--r--src/ipa/raspberrypi/controller/rpi/geq.hpp6
-rw-r--r--src/ipa/raspberrypi/controller/rpi/lux.cpp16
-rw-r--r--src/ipa/raspberrypi/controller/rpi/lux.hpp14
-rw-r--r--src/ipa/raspberrypi/controller/rpi/noise.cpp24
-rw-r--r--src/ipa/raspberrypi/controller/rpi/noise.hpp6
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sdn.cpp12
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sdn.hpp4
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sharpen.cpp32
-rw-r--r--src/ipa/raspberrypi/controller/rpi/sharpen.hpp4
24 files changed, 642 insertions, 445 deletions
diff --git a/src/ipa/raspberrypi/controller/rpi/agc.cpp b/src/ipa/raspberrypi/controller/rpi/agc.cpp
index 52a41a55..5a282a42 100644
--- a/src/ipa/raspberrypi/controller/rpi/agc.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/agc.cpp
@@ -28,7 +28,7 @@ LOG_DEFINE_CATEGORY(RPiAgc)
#define NAME "rpi.agc"
-#define PIPELINE_BITS 13 // seems to be a 13-bit pipeline
+#define PIPELINE_BITS 13 /* seems to be a 13-bit pipeline */
void AgcMeteringMode::read(boost::property_tree::ptree const &params)
{
@@ -150,7 +150,7 @@ void AgcConfig::read(boost::property_tree::ptree const &params)
convergenceFrames = params.get<unsigned int>("convergence_frames", 6);
fastReduceThreshold = params.get<double>("fast_reduce_threshold", 0.4);
baseEv = params.get<double>("base_ev", 1.0);
- // Start with quite a low value as ramping up is easier than ramping down.
+ /* Start with quite a low value as ramping up is easier than ramping down. */
defaultExposureTime = params.get<double>("default_exposure_time", 1000) * 1us;
defaultAnalogueGain = params.get<double>("default_analogueGain", 1.0);
}
@@ -170,8 +170,10 @@ Agc::Agc(Controller *controller)
maxShutter_(0s), fixedShutter_(0s), fixedAnalogueGain_(0.0)
{
memset(&awb_, 0, sizeof(awb_));
- // Setting status_.totalExposureValue_ to zero initially tells us
- // it's not been calculated yet (i.e. Process hasn't yet run).
+ /*
+ * Setting status_.totalExposureValue_ to zero initially tells us
+ * it's not been calculated yet (i.e. Process hasn't yet run).
+ */
memset(&status_, 0, sizeof(status_));
status_.ev = ev_;
}
@@ -185,16 +187,18 @@ void Agc::read(boost::property_tree::ptree const &params)
{
LOG(RPiAgc, Debug) << "Agc";
config_.read(params);
- // Set the config's defaults (which are the first ones it read) as our
- // current modes, until someone changes them. (they're all known to
- // exist at this point)
+ /*
+ * Set the config's defaults (which are the first ones it read) as our
+ * current modes, until someone changes them. (they're all known to
+ * exist at this point)
+ */
meteringModeName_ = config_.defaultMeteringMode;
meteringMode_ = &config_.meteringModes[meteringModeName_];
exposureModeName_ = config_.defaultExposureMode;
exposureMode_ = &config_.exposureModes[exposureModeName_];
constraintModeName_ = config_.defaultConstraintMode;
constraintMode_ = &config_.constraintModes[constraintModeName_];
- // Set up the "last shutter/gain" values, in case AGC starts "disabled".
+ /* Set up the "last shutter/gain" values, in case AGC starts "disabled". */
status_.shutterTime = config_.defaultExposureTime;
status_.analogueGain = config_.defaultAnalogueGain;
}
@@ -218,8 +222,10 @@ void Agc::resume()
unsigned int Agc::getConvergenceFrames() const
{
- // If shutter and gain have been explicitly set, there is no
- // convergence to happen, so no need to drop any frames - return zero.
+ /*
+ * If shutter and gain have been explicitly set, there is no
+ * convergence to happen, so no need to drop any frames - return zero.
+ */
if (fixedShutter_ && fixedAnalogueGain_)
return 0;
else
@@ -244,14 +250,14 @@ void Agc::setMaxShutter(Duration maxShutter)
void Agc::setFixedShutter(Duration fixedShutter)
{
fixedShutter_ = fixedShutter;
- // Set this in case someone calls Pause() straight after.
+ /* Set this in case someone calls Pause() straight after. */
status_.shutterTime = clipShutter(fixedShutter_);
}
void Agc::setFixedAnalogueGain(double fixedAnalogueGain)
{
fixedAnalogueGain_ = fixedAnalogueGain;
- // Set this in case someone calls Pause() straight after.
+ /* Set this in case someone calls Pause() straight after. */
status_.analogueGain = fixedAnalogueGain;
}
@@ -280,30 +286,32 @@ void Agc::switchMode(CameraMode const &cameraMode,
Duration fixedShutter = clipShutter(fixedShutter_);
if (fixedShutter && fixedAnalogueGain_) {
- // We're going to reset the algorithm here with these fixed values.
+ /* We're going to reset the algorithm here with these fixed values. */
fetchAwbStatus(metadata);
double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 });
ASSERT(minColourGain != 0.0);
- // This is the equivalent of computeTargetExposure and applyDigitalGain.
+ /* This is the equivalent of computeTargetExposure and applyDigitalGain. */
target_.totalExposureNoDG = fixedShutter_ * fixedAnalogueGain_;
target_.totalExposure = target_.totalExposureNoDG / minColourGain;
- // Equivalent of filterExposure. This resets any "history".
+ /* Equivalent of filterExposure. This resets any "history". */
filtered_ = target_;
- // Equivalent of divideUpExposure.
+ /* Equivalent of divideUpExposure. */
filtered_.shutter = fixedShutter;
filtered_.analogueGain = fixedAnalogueGain_;
} else if (status_.totalExposureValue) {
- // On a mode switch, various things could happen:
- // - the exposure profile might change
- // - a fixed exposure or gain might be set
- // - the new mode's sensitivity might be different
- // We cope with the last of these by scaling the target values. After
- // that we just need to re-divide the exposure/gain according to the
- // current exposure profile, which takes care of everything else.
+ /*
+ * On a mode switch, various things could happen:
+ * - the exposure profile might change
+ * - a fixed exposure or gain might be set
+ * - the new mode's sensitivity might be different
+ * We cope with the last of these by scaling the target values. After
+ * that we just need to re-divide the exposure/gain according to the
+ * current exposure profile, which takes care of everything else.
+ */
double ratio = lastSensitivity_ / cameraMode.sensitivity;
target_.totalExposureNoDG *= ratio;
@@ -313,29 +321,31 @@ void Agc::switchMode(CameraMode const &cameraMode,
divideUpExposure();
} else {
- // We come through here on startup, when at least one of the shutter
- // or gain has not been fixed. We must still write those values out so
- // that they will be applied immediately. We supply some arbitrary defaults
- // for any that weren't set.
-
- // Equivalent of divideUpExposure.
+ /*
+ * We come through here on startup, when at least one of the shutter
+ * or gain has not been fixed. We must still write those values out so
+ * that they will be applied immediately. We supply some arbitrary defaults
+ * for any that weren't set.
+ */
+
+ /* Equivalent of divideUpExposure. */
filtered_.shutter = fixedShutter ? fixedShutter : config_.defaultExposureTime;
filtered_.analogueGain = fixedAnalogueGain_ ? fixedAnalogueGain_ : config_.defaultAnalogueGain;
}
writeAndFinish(metadata, false);
- // We must remember the sensitivity of this mode for the next SwitchMode.
+ /* We must remember the sensitivity of this mode for the next SwitchMode. */
lastSensitivity_ = cameraMode.sensitivity;
}
void Agc::prepare(Metadata *imageMetadata)
{
status_.digitalGain = 1.0;
- fetchAwbStatus(imageMetadata); // always fetch it so that Process knows it's been done
+ fetchAwbStatus(imageMetadata); /* always fetch it so that Process knows it's been done */
if (status_.totalExposureValue) {
- // Process has run, so we have meaningful values.
+ /* Process has run, so we have meaningful values. */
DeviceStatus deviceStatus;
if (imageMetadata->get("device.status", deviceStatus) == 0) {
Duration actualExposure = deviceStatus.shutterSpeed *
@@ -343,14 +353,16 @@ void Agc::prepare(Metadata *imageMetadata)
if (actualExposure) {
status_.digitalGain = status_.totalExposureValue / actualExposure;
LOG(RPiAgc, Debug) << "Want total exposure " << status_.totalExposureValue;
- // Never ask for a gain < 1.0, and also impose
- // some upper limit. Make it customisable?
+ /*
+ * Never ask for a gain < 1.0, and also impose
+ * some upper limit. Make it customisable?
+ */
status_.digitalGain = std::max(1.0, std::min(status_.digitalGain, 4.0));
LOG(RPiAgc, Debug) << "Actual exposure " << actualExposure;
LOG(RPiAgc, Debug) << "Use digitalGain " << status_.digitalGain;
LOG(RPiAgc, Debug) << "Effective exposure "
<< actualExposure * status_.digitalGain;
- // Decide whether AEC/AGC has converged.
+ /* Decide whether AEC/AGC has converged. */
updateLockStatus(deviceStatus);
}
} else
@@ -362,44 +374,52 @@ void Agc::prepare(Metadata *imageMetadata)
void Agc::process(StatisticsPtr &stats, Metadata *imageMetadata)
{
frameCount_++;
- // First a little bit of housekeeping, fetching up-to-date settings and
- // configuration, that kind of thing.
+ /*
+ * First a little bit of housekeeping, fetching up-to-date settings and
+ * configuration, that kind of thing.
+ */
housekeepConfig();
- // Get the current exposure values for the frame that's just arrived.
+ /* Get the current exposure values for the frame that's just arrived. */
fetchCurrentExposure(imageMetadata);
- // Compute the total gain we require relative to the current exposure.
+ /* Compute the total gain we require relative to the current exposure. */
double gain, targetY;
computeGain(stats.get(), imageMetadata, gain, targetY);
- // Now compute the target (final) exposure which we think we want.
+ /* Now compute the target (final) exposure which we think we want. */
computeTargetExposure(gain);
- // Some of the exposure has to be applied as digital gain, so work out
- // what that is. This function also tells us whether it's decided to
- // "desaturate" the image more quickly.
+ /*
+ * Some of the exposure has to be applied as digital gain, so work out
+ * what that is. This function also tells us whether it's decided to
+ * "desaturate" the image more quickly.
+ */
bool desaturate = applyDigitalGain(gain, targetY);
- // The results have to be filtered so as not to change too rapidly.
+ /* The results have to be filtered so as not to change too rapidly. */
filterExposure(desaturate);
- // The last thing is to divide up the exposure value into a shutter time
- // and analogue gain, according to the current exposure mode.
+ /*
+ * The last thing is to divide up the exposure value into a shutter time
+ * and analogue gain, according to the current exposure mode.
+ */
divideUpExposure();
- // Finally advertise what we've done.
+ /* Finally advertise what we've done. */
writeAndFinish(imageMetadata, desaturate);
}
void Agc::updateLockStatus(DeviceStatus const &deviceStatus)
{
- const double errorFactor = 0.10; // make these customisable?
+ const double errorFactor = 0.10; /* make these customisable? */
const int maxLockCount = 5;
- // Reset "lock count" when we exceed this multiple of errorFactor
+ /* Reset "lock count" when we exceed this multiple of errorFactor */
const double resetMargin = 1.5;
- // Add 200us to the exposure time error to allow for line quantisation.
+ /* Add 200us to the exposure time error to allow for line quantisation. */
Duration exposureError = lastDeviceStatus_.shutterSpeed * errorFactor + 200us;
double gainError = lastDeviceStatus_.analogueGain * errorFactor;
Duration targetError = lastTargetExposure_ * errorFactor;
- // Note that we don't know the exposure/gain limits of the sensor, so
- // the values we keep requesting may be unachievable. For this reason
- // we only insist that we're close to values in the past few frames.
+ /*
+ * Note that we don't know the exposure/gain limits of the sensor, so
+ * the values we keep requesting may be unachievable. For this reason
+ * we only insist that we're close to values in the past few frames.
+ */
if (deviceStatus.shutterSpeed > lastDeviceStatus_.shutterSpeed - exposureError &&
deviceStatus.shutterSpeed < lastDeviceStatus_.shutterSpeed + exposureError &&
deviceStatus.analogueGain > lastDeviceStatus_.analogueGain - gainError &&
@@ -430,7 +450,7 @@ static void copyString(std::string const &s, char *d, size_t size)
void Agc::housekeepConfig()
{
- // First fetch all the up-to-date settings, so no one else has to do it.
+ /* First fetch all the up-to-date settings, so no one else has to do it. */
status_.ev = ev_;
status_.fixedShutter = clipShutter(fixedShutter_);
status_.fixedAnalogueGain = fixedAnalogueGain_;
@@ -438,8 +458,10 @@ void Agc::housekeepConfig()
LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixedShutter "
<< status_.fixedShutter << " fixedAnalogueGain "
<< status_.fixedAnalogueGain;
- // Make sure the "mode" pointers point to the up-to-date things, if
- // they've changed.
+ /*
+ * Make sure the "mode" pointers point to the up-to-date things, if
+ * they've changed.
+ */
if (strcmp(meteringModeName_.c_str(), status_.meteringMode)) {
auto it = config_.meteringModes.find(meteringModeName_);
if (it == config_.meteringModes.end())
@@ -491,7 +513,7 @@ void Agc::fetchCurrentExposure(Metadata *imageMetadata)
void Agc::fetchAwbStatus(Metadata *imageMetadata)
{
- awb_.gainR = 1.0; // in case not found in metadata
+ awb_.gainR = 1.0; /* in case not found in metadata */
awb_.gainG = 1.0;
awb_.gainB = 1.0;
if (imageMetadata->get("awb.status", awb_) != 0)
@@ -502,8 +524,10 @@ static double computeInitialY(bcm2835_isp_stats *stats, AwbStatus const &awb,
double weights[], double gain)
{
bcm2835_isp_stats_region *regions = stats->agc_stats;
- // Note how the calculation below means that equal weights give you
- // "average" metering (i.e. all pixels equally important).
+ /*
+ * Note how the calculation below means that equal weights give you
+ * "average" metering (i.e. all pixels equally important).
+ */
double rSum = 0, gSum = 0, bSum = 0, pixelSum = 0;
for (int i = 0; i < AGC_STATS_SIZE; i++) {
double counted = regions[i].counted;
@@ -525,11 +549,13 @@ static double computeInitialY(bcm2835_isp_stats *stats, AwbStatus const &awb,
return ySum / pixelSum / (1 << PIPELINE_BITS);
}
-// We handle extra gain through EV by adjusting our Y targets. However, you
-// simply can't monitor histograms once they get very close to (or beyond!)
-// saturation, so we clamp the Y targets to this value. It does mean that EV
-// increases don't necessarily do quite what you might expect in certain
-// (contrived) cases.
+/*
+ * We handle extra gain through EV by adjusting our Y targets. However, you
+ * simply can't monitor histograms once they get very close to (or beyond!)
+ * saturation, so we clamp the Y targets to this value. It does mean that EV
+ * increases don't necessarily do quite what you might expect in certain
+ * (contrived) cases.
+ */
#define EV_GAIN_Y_TARGET_LIMIT 0.9
@@ -546,18 +572,22 @@ void Agc::computeGain(bcm2835_isp_stats *statistics, Metadata *imageMetadata,
double &gain, double &targetY)
{
struct LuxStatus lux = {};
- lux.lux = 400; // default lux level to 400 in case no metadata found
+ lux.lux = 400; /* default lux level to 400 in case no metadata found */
if (imageMetadata->get("lux.status", lux) != 0)
LOG(RPiAgc, Warning) << "Agc: no lux level found";
Histogram h(statistics->hist[0].g_hist, NUM_HISTOGRAM_BINS);
double evGain = status_.ev * config_.baseEv;
- // The initial gain and target_Y come from some of the regions. After
- // that we consider the histogram constraints.
+ /*
+ * The initial gain and target_Y come from some of the regions. After
+ * that we consider the histogram constraints.
+ */
targetY = config_.yTarget.eval(config_.yTarget.domain().clip(lux.lux));
targetY = std::min(EV_GAIN_Y_TARGET_LIMIT, targetY * evGain);
- // Do this calculation a few times as brightness increase can be
- // non-linear when there are saturated regions.
+ /*
+ * Do this calculation a few times as brightness increase can be
+ * non-linear when there are saturated regions.
+ */
gain = 1.0;
for (int i = 0; i < 8; i++) {
double initialY = computeInitialY(statistics, awb_, meteringMode_->weights, gain);
@@ -565,7 +595,7 @@ void Agc::computeGain(bcm2835_isp_stats *statistics, Metadata *imageMetadata,
gain *= extraGain;
LOG(RPiAgc, Debug) << "Initial Y " << initialY << " target " << targetY
<< " gives gain " << gain;
- if (extraGain < 1.01) // close enough
+ if (extraGain < 1.01) /* close enough */
break;
}
@@ -592,20 +622,23 @@ void Agc::computeGain(bcm2835_isp_stats *statistics, Metadata *imageMetadata,
void Agc::computeTargetExposure(double gain)
{
if (status_.fixedShutter && status_.fixedAnalogueGain) {
- // When ag and shutter are both fixed, we need to drive the
- // total exposure so that we end up with a digital gain of at least
- // 1/minColourGain. Otherwise we'd desaturate channels causing
- // white to go cyan or magenta.
+ /*
+ * When ag and shutter are both fixed, we need to drive the
+ * total exposure so that we end up with a digital gain of at least
+ * 1/minColourGain. Otherwise we'd desaturate channels causing
+ * white to go cyan or magenta.
+ */
double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 });
ASSERT(minColourGain != 0.0);
target_.totalExposure =
status_.fixedShutter * status_.fixedAnalogueGain / minColourGain;
} else {
- // The statistics reflect the image without digital gain, so the final
- // total exposure we're aiming for is:
+ /*
+ * The statistics reflect the image without digital gain, so the final
+ * total exposure we're aiming for is:
+ */
target_.totalExposure = current_.totalExposureNoDG * gain;
- // The final target exposure is also limited to what the exposure
- // mode allows.
+ /* The final target exposure is also limited to what the exposure mode allows. */
Duration maxShutter = status_.fixedShutter
? status_.fixedShutter
: exposureMode_->shutter.back();
@@ -625,17 +658,21 @@ bool Agc::applyDigitalGain(double gain, double targetY)
double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 });
ASSERT(minColourGain != 0.0);
double dg = 1.0 / minColourGain;
- // I think this pipeline subtracts black level and rescales before we
- // get the stats, so no need to worry about it.
+ /*
+ * I think this pipeline subtracts black level and rescales before we
+ * get the stats, so no need to worry about it.
+ */
LOG(RPiAgc, Debug) << "after AWB, target dg " << dg << " gain " << gain
<< " target_Y " << targetY;
- // Finally, if we're trying to reduce exposure but the target_Y is
- // "close" to 1.0, then the gain computed for that constraint will be
- // only slightly less than one, because the measured Y can never be
- // larger than 1.0. When this happens, demand a large digital gain so
- // that the exposure can be reduced, de-saturating the image much more
- // quickly (and we then approach the correct value more quickly from
- // below).
+ /*
+ * Finally, if we're trying to reduce exposure but the target_Y is
+ * "close" to 1.0, then the gain computed for that constraint will be
+ * only slightly less than one, because the measured Y can never be
+ * larger than 1.0. When this happens, demand a large digital gain so
+ * that the exposure can be reduced, de-saturating the image much more
+ * quickly (and we then approach the correct value more quickly from
+ * below).
+ */
bool desaturate = targetY > config_.fastReduceThreshold &&
gain < sqrt(targetY);
if (desaturate)
@@ -649,8 +686,10 @@ bool Agc::applyDigitalGain(double gain, double targetY)
void Agc::filterExposure(bool desaturate)
{
double speed = config_.speed;
- // AGC adapts instantly if both shutter and gain are directly specified
- // or we're in the startup phase.
+ /*
+ * AGC adapts instantly if both shutter and gain are directly specified
+ * or we're in the startup phase.
+ */
if ((status_.fixedShutter && status_.fixedAnalogueGain) ||
frameCount_ <= config_.startupFrames)
speed = 1.0;
@@ -658,15 +697,19 @@ void Agc::filterExposure(bool desaturate)
filtered_.totalExposure = target_.totalExposure;
filtered_.totalExposureNoDG = target_.totalExposureNoDG;
} else {
- // If close to the result go faster, to save making so many
- // micro-adjustments on the way. (Make this customisable?)
+ /*
+ * If close to the result go faster, to save making so many
+ * micro-adjustments on the way. (Make this customisable?)
+ */
if (filtered_.totalExposure < 1.2 * target_.totalExposure &&
filtered_.totalExposure > 0.8 * target_.totalExposure)
speed = sqrt(speed);
filtered_.totalExposure = speed * target_.totalExposure +
filtered_.totalExposure * (1.0 - speed);
- // When desaturing, take a big jump down in totalExposureNoDG,
- // which we'll hide with digital gain.
+ /*
+ * When desaturing, take a big jump down in totalExposureNoDG,
+ * which we'll hide with digital gain.
+ */
if (desaturate)
filtered_.totalExposureNoDG =
target_.totalExposureNoDG;
@@ -675,9 +718,11 @@ void Agc::filterExposure(bool desaturate)
speed * target_.totalExposureNoDG +
filtered_.totalExposureNoDG * (1.0 - speed);
}
- // We can't let the totalExposureNoDG exposure deviate too far below the
- // total exposure, as there might not be enough digital gain available
- // in the ISP to hide it (which will cause nasty oscillation).
+ /*
+ * We can't let the totalExposureNoDG exposure deviate too far below the
+ * total exposure, as there might not be enough digital gain available
+ * in the ISP to hide it (which will cause nasty oscillation).
+ */
if (filtered_.totalExposureNoDG <
filtered_.totalExposure * config_.fastReduceThreshold)
filtered_.totalExposureNoDG = filtered_.totalExposure * config_.fastReduceThreshold;
@@ -687,9 +732,11 @@ void Agc::filterExposure(bool desaturate)
void Agc::divideUpExposure()
{
- // Sending the fixed shutter/gain cases through the same code may seem
- // unnecessary, but it will make more sense when extend this to cover
- // variable aperture.
+ /*
+ * Sending the fixed shutter/gain cases through the same code may seem
+ * unnecessary, but it will make more sense when extend this to cover
+ * variable aperture.
+ */
Duration exposureValue = filtered_.totalExposureNoDG;
Duration shutterTime;
double analogueGain;
@@ -721,18 +768,22 @@ void Agc::divideUpExposure()
}
LOG(RPiAgc, Debug) << "Divided up shutter and gain are " << shutterTime << " and "
<< analogueGain;
- // Finally adjust shutter time for flicker avoidance (require both
- // shutter and gain not to be fixed).
+ /*
+ * Finally adjust shutter time for flicker avoidance (require both
+ * shutter and gain not to be fixed).
+ */
if (!status_.fixedShutter && !status_.fixedAnalogueGain &&
status_.flickerPeriod) {
int flickerPeriods = shutterTime / status_.flickerPeriod;
if (flickerPeriods) {
Duration newShutterTime = flickerPeriods * status_.flickerPeriod;
analogueGain *= shutterTime / newShutterTime;
- // We should still not allow the ag to go over the
- // largest value in the exposure mode. Note that this
- // may force more of the total exposure into the digital
- // gain as a side-effect.
+ /*
+ * We should still not allow the ag to go over the
+ * largest value in the exposure mode. Note that this
+ * may force more of the total exposure into the digital
+ * gain as a side-effect.
+ */
analogueGain = std::min(analogueGain, exposureMode_->gain.back());
shutterTime = newShutterTime;
}
@@ -749,8 +800,10 @@ void Agc::writeAndFinish(Metadata *imageMetadata, bool desaturate)
status_.targetExposureValue = desaturate ? 0s : target_.totalExposureNoDG;
status_.shutterTime = filtered_.shutter;
status_.analogueGain = filtered_.analogueGain;
- // Write to metadata as well, in case anyone wants to update the camera
- // immediately.
+ /*
+ * Write to metadata as well, in case anyone wants to update the camera
+ * immediately.
+ */
imageMetadata->set("agc.status", status_);
LOG(RPiAgc, Debug) << "Output written, total exposure requested is "
<< filtered_.totalExposure;
@@ -765,7 +818,7 @@ Duration Agc::clipShutter(Duration shutter)
return shutter;
}
-// Register algorithm with the system.
+/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Agc(controller);
diff --git a/src/ipa/raspberrypi/controller/rpi/agc.hpp b/src/ipa/raspberrypi/controller/rpi/agc.hpp
index 4ed7293b..c2d68b60 100644
--- a/src/ipa/raspberrypi/controller/rpi/agc.hpp
+++ b/src/ipa/raspberrypi/controller/rpi/agc.hpp
@@ -15,10 +15,12 @@
#include "../agc_status.h"
#include "../pwl.hpp"
-// This is our implementation of AGC.
+/* This is our implementation of AGC. */
-// This is the number actually set up by the firmware, not the maximum possible
-// number (which is 16).
+/*
+ * This is the number actually set up by the firmware, not the maximum possible
+ * number (which is 16).
+ */
#define AGC_STATS_SIZE 15
@@ -73,7 +75,7 @@ public:
Agc(Controller *controller);
char const *name() const override;
void read(boost::property_tree::ptree const &params) override;
- // AGC handles "pausing" for itself.
+ /* AGC handles "pausing" for itself. */
bool isPaused() const override;
void pause() override;
void resume() override;
@@ -115,17 +117,17 @@ private:
libcamera::utils::Duration shutter;
double analogueGain;
libcamera::utils::Duration totalExposure;
- libcamera::utils::Duration totalExposureNoDG; // without digital gain
+ libcamera::utils::Duration totalExposureNoDG; /* without digital gain */
};
- ExposureValues current_; // values for the current frame
- ExposureValues target_; // calculate the values we want here
- ExposureValues filtered_; // these values are filtered towards target
+ ExposureValues current_; /* values for the current frame */
+ ExposureValues target_; /* calculate the values we want here */
+ ExposureValues filtered_; /* these values are filtered towards target */
AgcStatus status_;
int lockCount_;
DeviceStatus lastDeviceStatus_;
libcamera::utils::Duration lastTargetExposure_;
- double lastSensitivity_; // sensitivity of the previous camera mode
- // Below here the "settings" that applications can change.
+ double lastSensitivity_; /* sensitivity of the previous camera mode */
+ /* Below here the "settings" that applications can change. */
std::string meteringModeName_;
std::string exposureModeName_;
std::string constraintModeName_;
@@ -136,4 +138,4 @@ private:
double fixedAnalogueGain_;
};
-} // namespace RPiController
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/alsc.cpp b/src/ipa/raspberrypi/controller/rpi/alsc.cpp
index 98b77154..6fd95a31 100644
--- a/src/ipa/raspberrypi/controller/rpi/alsc.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/alsc.cpp
@@ -14,7 +14,7 @@
#include "../awb_status.h"
#include "alsc.hpp"
-// Raspberry Pi ALSC (Auto Lens Shading Correction) algorithm.
+/* Raspberry Pi ALSC (Auto Lens Shading Correction) algorithm. */
using namespace RPiController;
using namespace libcamera;
@@ -68,7 +68,7 @@ static void generateLut(double *lut, boost::property_tree::ptree const &params)
double r2 = (dx * dx + dy * dy) / R2;
lut[num++] =
(f1 * r2 + f2) * (f1 * r2 + f2) /
- (f2 * f2); // this reproduces the cos^4 rule
+ (f2 * f2); /* this reproduces the cos^4 rule */
}
}
}
@@ -171,7 +171,7 @@ void Alsc::initialise()
frameCount2_ = frameCount_ = framePhase_ = 0;
firstTime_ = true;
ct_ = config_.defaultCt;
- // The lambdas are initialised in the SwitchMode.
+ /* The lambdas are initialised in the SwitchMode. */
}
void Alsc::waitForAysncThread()
@@ -188,8 +188,10 @@ void Alsc::waitForAysncThread()
static bool compareModes(CameraMode const &cm0, CameraMode const &cm1)
{
- // Return true if the modes crop from the sensor significantly differently,
- // or if the user transform has changed.
+ /*
+ * Return true if the modes crop from the sensor significantly differently,
+ * or if the user transform has changed.
+ */
if (cm0.transform != cm1.transform)
return true;
int leftDiff = abs(cm0.cropX - cm1.cropX);
@@ -198,9 +200,11 @@ static bool compareModes(CameraMode const &cm0, CameraMode const &cm1)
cm1.cropX - cm1.scaleX * cm1.width);
int bottomDiff = fabs(cm0.cropY + cm0.scaleY * cm0.height -
cm1.cropY - cm1.scaleY * cm1.height);
- // These thresholds are a rather arbitrary amount chosen to trigger
- // when carrying on with the previously calculated tables might be
- // worse than regenerating them (but without the adaptive algorithm).
+ /*
+ * These thresholds are a rather arbitrary amount chosen to trigger
+ * when carrying on with the previously calculated tables might be
+ * worse than regenerating them (but without the adaptive algorithm).
+ */
int thresholdX = cm0.sensorWidth >> 4;
int thresholdY = cm0.sensorHeight >> 4;
return leftDiff > thresholdX || rightDiff > thresholdX ||
@@ -210,28 +214,34 @@ static bool compareModes(CameraMode const &cm0, CameraMode const &cm1)
void Alsc::switchMode(CameraMode const &cameraMode,
[[maybe_unused]] Metadata *metadata)
{
- // We're going to start over with the tables if there's any "significant"
- // change.
+ /*
+ * We're going to start over with the tables if there's any "significant"
+ * change.
+ */
bool resetTables = firstTime_ || compareModes(cameraMode_, cameraMode);
- // Believe the colour temperature from the AWB, if there is one.
+ /* Believe the colour temperature from the AWB, if there is one. */
ct_ = getCt(metadata, ct_);
- // Ensure the other thread isn't running while we do this.
+ /* Ensure the other thread isn't running while we do this. */
waitForAysncThread();
cameraMode_ = cameraMode;
- // We must resample the luminance table like we do the others, but it's
- // fixed so we can simply do it up front here.
+ /*
+ * We must resample the luminance table like we do the others, but it's
+ * fixed so we can simply do it up front here.
+ */
resampleCalTable(config_.luminanceLut, cameraMode_, luminanceTable_);
if (resetTables) {
- // Upon every "table reset", arrange for something sensible to be
- // generated. Construct the tables for the previous recorded colour
- // temperature. In order to start over from scratch we initialise
- // the lambdas, but the rest of this code then echoes the code in
- // doAlsc, without the adaptive algorithm.
+ /*
+ * Upon every "table reset", arrange for something sensible to be
+ * generated. Construct the tables for the previous recorded colour
+ * temperature. In order to start over from scratch we initialise
+ * the lambdas, but the rest of this code then echoes the code in
+ * doAlsc, without the adaptive algorithm.
+ */
for (int i = 0; i < XY; i++)
lambdaR_[i] = lambdaB_[i] = 1.0;
double calTableR[XY], calTableB[XY], calTableTmp[XY];
@@ -244,7 +254,7 @@ void Alsc::switchMode(CameraMode const &cameraMode,
addLuminanceToTables(syncResults_, asyncLambdaR_, 1.0, asyncLambdaB_,
luminanceTable_, config_.luminanceStrength);
memcpy(prevSyncResults_, syncResults_, sizeof(prevSyncResults_));
- framePhase_ = config_.framePeriod; // run the algo again asap
+ framePhase_ = config_.framePeriod; /* run the algo again asap */
firstTime_ = false;
}
}
@@ -260,7 +270,7 @@ void Alsc::fetchAsyncResults()
double getCt(Metadata *metadata, double defaultCt)
{
AwbStatus awbStatus;
- awbStatus.temperatureK = defaultCt; // in case nothing found
+ awbStatus.temperatureK = defaultCt; /* in case nothing found */
if (metadata->get("awb.status", awbStatus) != 0)
LOG(RPiAlsc, Debug) << "no AWB results found, using "
<< awbStatus.temperatureK;
@@ -282,18 +292,22 @@ static void copyStats(bcm2835_isp_stats_region regions[XY], StatisticsPtr &stats
regions[i].g_sum = inputRegions[i].g_sum / gTable[i];
regions[i].b_sum = inputRegions[i].b_sum / bTable[i];
regions[i].counted = inputRegions[i].counted;
- // (don't care about the uncounted value)
+ /* (don't care about the uncounted value) */
}
}
void Alsc::restartAsync(StatisticsPtr &stats, Metadata *imageMetadata)
{
LOG(RPiAlsc, Debug) << "Starting ALSC calculation";
- // Get the current colour temperature. It's all we need from the
- // metadata. Default to the last CT value (which could be the default).
+ /*
+ * Get the current colour temperature. It's all we need from the
+ * metadata. Default to the last CT value (which could be the default).
+ */
ct_ = getCt(imageMetadata, ct_);
- // We have to copy the statistics here, dividing out our best guess of
- // the LSC table that the pipeline applied to them.
+ /*
+ * We have to copy the statistics here, dividing out our best guess of
+ * the LSC table that the pipeline applied to them.
+ */
AlscStatus alscStatus;
if (imageMetadata->get("alsc.status", alscStatus) != 0) {
LOG(RPiAlsc, Warning)
@@ -317,8 +331,10 @@ void Alsc::restartAsync(StatisticsPtr &stats, Metadata *imageMetadata)
void Alsc::prepare(Metadata *imageMetadata)
{
- // Count frames since we started, and since we last poked the async
- // thread.
+ /*
+ * Count frames since we started, and since we last poked the async
+ * thread.
+ */
if (frameCount_ < (int)config_.startupFrames)
frameCount_++;
double speed = frameCount_ < (int)config_.startupFrames
@@ -331,12 +347,12 @@ void Alsc::prepare(Metadata *imageMetadata)
if (asyncStarted_ && asyncFinished_)
fetchAsyncResults();
}
- // Apply IIR filter to results and program into the pipeline.
+ /* Apply IIR filter to results and program into the pipeline. */
double *ptr = (double *)syncResults_,
*pptr = (double *)prevSyncResults_;
for (unsigned int i = 0; i < sizeof(syncResults_) / sizeof(double); i++)
pptr[i] = speed * ptr[i] + (1.0 - speed) * pptr[i];
- // Put output values into status metadata.
+ /* Put output values into status metadata. */
AlscStatus status;
memcpy(status.r, prevSyncResults_[0], sizeof(status.r));
memcpy(status.g, prevSyncResults_[1], sizeof(status.g));
@@ -346,8 +362,10 @@ void Alsc::prepare(Metadata *imageMetadata)
void Alsc::process(StatisticsPtr &stats, Metadata *imageMetadata)
{
- // Count frames since we started, and since we last poked the async
- // thread.
+ /*
+ * Count frames since we started, and since we last poked the async
+ * thread.
+ */
if (framePhase_ < (int)config_.framePeriod)
framePhase_++;
if (frameCount2_ < (int)config_.startupFrames)
@@ -415,8 +433,10 @@ void getCalTable(double ct, std::vector<AlscCalibration> const &calibrations,
void resampleCalTable(double const calTableIn[XY],
CameraMode const &cameraMode, double calTableOut[XY])
{
- // Precalculate and cache the x sampling locations and phases to save
- // recomputing them on every row.
+ /*
+ * Precalculate and cache the x sampling locations and phases to save
+ * recomputing them on every row.
+ */
int xLo[X], xHi[X];
double xf[X];
double scaleX = cameraMode.sensorWidth /
@@ -434,7 +454,7 @@ void resampleCalTable(double const calTableIn[XY],
xHi[i] = X - 1 - xHi[i];
}
}
- // Now march over the output table generating the new values.
+ /* Now march over the output table generating the new values. */
double scaleY = cameraMode.sensorHeight /
(cameraMode.height * cameraMode.scaleY);
double yOff = cameraMode.cropY / (double)cameraMode.sensorHeight;
@@ -461,7 +481,7 @@ void resampleCalTable(double const calTableIn[XY],
}
}
-// Calculate chrominance statistics (R/G and B/G) for each region.
+/* Calculate chrominance statistics (R/G and B/G) for each region. */
static_assert(XY == AWB_REGIONS, "ALSC/AWB statistics region mismatch");
static void calculateCrCb(bcm2835_isp_stats_region *awbRegion, double cr[XY],
double cb[XY], uint32_t minCount, uint16_t minG)
@@ -512,8 +532,10 @@ void compensateLambdasForCal(double const calTable[XY],
printf("]\n");
}
-// Compute weight out of 1.0 which reflects how similar we wish to make the
-// colours of these two regions.
+/*
+ * Compute weight out of 1.0 which reflects how similar we wish to make the
+ * colours of these two regions.
+ */
static double computeWeight(double Ci, double Cj, double sigma)
{
if (Ci == InsufficientData || Cj == InsufficientData)
@@ -522,11 +544,11 @@ static double computeWeight(double Ci, double Cj, double sigma)
return exp(-diff * diff / 2);
}
-// Compute all weights.
+/* Compute all weights. */
static void computeW(double const C[XY], double sigma, double W[XY][4])
{
for (int i = 0; i < XY; i++) {
- // Start with neighbour above and go clockwise.
+ /* Start with neighbour above and go clockwise. */
W[i][0] = i >= X ? computeWeight(C[i], C[i - X], sigma) : 0;
W[i][1] = i % X < X - 1 ? computeWeight(C[i], C[i + 1], sigma) : 0;
W[i][2] = i < XY - X ? computeWeight(C[i], C[i + X], sigma) : 0;
@@ -534,17 +556,19 @@ static void computeW(double const C[XY], double sigma, double W[XY][4])
}
}
-// Compute M, the large but sparse matrix such that M * lambdas = 0.
+/* Compute M, the large but sparse matrix such that M * lambdas = 0. */
static void constructM(double const C[XY], double const W[XY][4],
double M[XY][4])
{
double epsilon = 0.001;
for (int i = 0; i < XY; i++) {
- // Note how, if C[i] == INSUFFICIENT_DATA, the weights will all
- // be zero so the equation is still set up correctly.
+ /*
+ * Note how, if C[i] == INSUFFICIENT_DATA, the weights will all
+ * be zero so the equation is still set up correctly.
+ */
int m = !!(i >= X) + !!(i % X < X - 1) + !!(i < XY - X) +
- !!(i % X); // total number of neighbours
- // we'll divide the diagonal out straight away
+ !!(i % X); /* total number of neighbours */
+ /* we'll divide the diagonal out straight away */
double diagonal = (epsilon + W[i][0] + W[i][1] + W[i][2] + W[i][3]) * C[i];
M[i][0] = i >= X ? (W[i][0] * C[i - X] + epsilon / m * C[i]) / diagonal : 0;
M[i][1] = i % X < X - 1 ? (W[i][1] * C[i + 1] + epsilon / m * C[i]) / diagonal : 0;
@@ -553,9 +577,11 @@ static void constructM(double const C[XY], double const W[XY][4],
}
}
-// In the compute_lambda_ functions, note that the matrix coefficients for the
-// left/right neighbours are zero down the left/right edges, so we don't need
-// need to test the i value to exclude them.
+/*
+ * In the compute_lambda_ functions, note that the matrix coefficients for the
+ * left/right neighbours are zero down the left/right edges, so we don't need
+ * need to test the i value to exclude them.
+ */
static double computeLambdaBottom(int i, double const M[XY][4],
double lambda[XY])
{
@@ -585,7 +611,7 @@ static double computeLambdaTopEnd(int i, double const M[XY][4],
return M[i][0] * lambda[i - X] + M[i][3] * lambda[i - 1];
}
-// Gauss-Seidel iteration with over-relaxation.
+/* Gauss-Seidel iteration with over-relaxation. */
static double gaussSeidel2Sor(double const M[XY][4], double omega,
double lambda[XY], double lambdaBound)
{
@@ -610,8 +636,10 @@ static double gaussSeidel2Sor(double const M[XY][4], double omega,
}
lambda[i] = computeLambdaTopEnd(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
- // Also solve the system from bottom to top, to help spread the updates
- // better.
+ /*
+ * Also solve the system from bottom to top, to help spread the updates
+ * better.
+ */
lambda[i] = computeLambdaTopEnd(i, M, lambda);
lambda[i] = std::clamp(lambda[i], min, max);
for (i = XY - 2; i >= XY - X; i--) {
@@ -637,7 +665,7 @@ static double gaussSeidel2Sor(double const M[XY][4], double omega,
return maxDiff;
}
-// Normalise the values so that the smallest value is 1.
+/* Normalise the values so that the smallest value is 1. */
static void normalise(double *ptr, size_t n)
{
double minval = ptr[0];
@@ -647,7 +675,7 @@ static void normalise(double *ptr, size_t n)
ptr[i] /= minval;
}
-// Rescale the values so that the average value is 1.
+/* Rescale the values so that the average value is 1. */
static void reaverage(Span<double> data)
{
double sum = std::accumulate(data.begin(), data.end(), 0.0);
@@ -670,15 +698,17 @@ static void runMatrixIterations(double const C[XY], double lambda[XY],
<< "Stop after " << i + 1 << " iterations";
break;
}
- // this happens very occasionally (so make a note), though
- // doesn't seem to matter
+ /*
+ * this happens very occasionally (so make a note), though
+ * doesn't seem to matter
+ */
if (maxDiff > lastMaxDiff)
LOG(RPiAlsc, Debug)
<< "Iteration " << i << ": maxDiff gone up "
<< lastMaxDiff << " to " << maxDiff;
lastMaxDiff = maxDiff;
}
- // We're going to normalise the lambdas so the total average is 1.
+ /* We're going to normalise the lambdas so the total average is 1. */
reaverage({ lambda, XY });
}
@@ -712,41 +742,49 @@ void addLuminanceToTables(double results[3][Y][X], double const lambdaR[XY],
void Alsc::doAlsc()
{
double cr[XY], cb[XY], wr[XY][4], wb[XY][4], calTableR[XY], calTableB[XY], calTableTmp[XY];
- // Calculate our R/B ("Cr"/"Cb") colour statistics, and assess which are
- // usable.
+ /*
+ * Calculate our R/B ("Cr"/"Cb") colour statistics, and assess which are
+ * usable.
+ */
calculateCrCb(statistics_, cr, cb, config_.minCount, config_.minG);
- // Fetch the new calibrations (if any) for this CT. Resample them in
- // case the camera mode is not full-frame.
+ /*
+ * Fetch the new calibrations (if any) for this CT. Resample them in
+ * case the camera mode is not full-frame.
+ */
getCalTable(ct_, config_.calibrationsCr, calTableTmp);
resampleCalTable(calTableTmp, cameraMode_, calTableR);
getCalTable(ct_, config_.calibrationsCb, calTableTmp);
resampleCalTable(calTableTmp, cameraMode_, calTableB);
- // You could print out the cal tables for this image here, if you're
- // tuning the algorithm...
- // Apply any calibration to the statistics, so the adaptive algorithm
- // makes only the extra adjustments.
+ /*
+ * You could print out the cal tables for this image here, if you're
+ * tuning the algorithm...
+ * Apply any calibration to the statistics, so the adaptive algorithm
+ * makes only the extra adjustments.
+ */
applyCalTable(calTableR, cr);
applyCalTable(calTableB, cb);
- // Compute weights between zones.
+ /* Compute weights between zones. */
computeW(cr, config_.sigmaCr, wr);
computeW(cb, config_.sigmaCb, wb);
- // Run Gauss-Seidel iterations over the resulting matrix, for R and B.
+ /* Run Gauss-Seidel iterations over the resulting matrix, for R and B. */
runMatrixIterations(cr, lambdaR_, wr, config_.omega, config_.nIter,
config_.threshold, config_.lambdaBound);
runMatrixIterations(cb, lambdaB_, wb, config_.omega, config_.nIter,
config_.threshold, config_.lambdaBound);
- // Fold the calibrated gains into our final lambda values. (Note that on
- // the next run, we re-start with the lambda values that don't have the
- // calibration gains included.)
+ /*
+ * Fold the calibrated gains into our final lambda values. (Note that on
+ * the next run, we re-start with the lambda values that don't have the
+ * calibration gains included.)
+ */
compensateLambdasForCal(calTableR, lambdaR_, asyncLambdaR_);
compensateLambdasForCal(calTableB, lambdaB_, asyncLambdaB_);
- // Fold in the luminance table at the appropriate strength.
+ /* Fold in the luminance table at the appropriate strength. */
addLuminanceToTables(asyncResults_, asyncLambdaR_, 1.0,
asyncLambdaB_, luminanceTable_,
config_.luminanceStrength);
}
-// Register algorithm with the system.
+/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Alsc(controller);
diff --git a/src/ipa/raspberrypi/controller/rpi/alsc.hpp b/src/ipa/raspberrypi/controller/rpi/alsc.hpp
index 7a0949d1..3ffc175d 100644
--- a/src/ipa/raspberrypi/controller/rpi/alsc.hpp
+++ b/src/ipa/raspberrypi/controller/rpi/alsc.hpp
@@ -15,7 +15,7 @@
namespace RPiController {
-// Algorithm to generate automagic LSC (Lens Shading Correction) tables.
+/* Algorithm to generate automagic LSC (Lens Shading Correction) tables. */
struct AlscCalibration {
double ct;
@@ -23,11 +23,11 @@ struct AlscCalibration {
};
struct AlscConfig {
- // Only repeat the ALSC calculation every "this many" frames
+ /* Only repeat the ALSC calculation every "this many" frames */
uint16_t framePeriod;
- // number of initial frames for which speed taken as 1.0 (maximum)
+ /* number of initial frames for which speed taken as 1.0 (maximum) */
uint16_t startupFrames;
- // IIR filter speed applied to algorithm results
+ /* IIR filter speed applied to algorithm results */
double speed;
double sigmaCr;
double sigmaCb;
@@ -39,9 +39,9 @@ struct AlscConfig {
double luminanceStrength;
std::vector<AlscCalibration> calibrationsCr;
std::vector<AlscCalibration> calibrationsCb;
- double defaultCt; // colour temperature if no metadata found
- double threshold; // iteration termination threshold
- double lambdaBound; // upper/lower bound for lambda from a value of 1
+ double defaultCt; /* colour temperature if no metadata found */
+ double threshold; /* iteration termination threshold */
+ double lambdaBound; /* upper/lower bound for lambda from a value of 1 */
};
class Alsc : public Algorithm
@@ -57,41 +57,45 @@ public:
void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
private:
- // configuration is read-only, and available to both threads
+ /* configuration is read-only, and available to both threads */
AlscConfig config_;
bool firstTime_;
CameraMode cameraMode_;
double luminanceTable_[ALSC_CELLS_X * ALSC_CELLS_Y];
std::thread asyncThread_;
- void asyncFunc(); // asynchronous thread function
+ void asyncFunc(); /* asynchronous thread function */
std::mutex mutex_;
- // condvar for async thread to wait on
+ /* condvar for async thread to wait on */
std::condition_variable asyncSignal_;
- // condvar for synchronous thread to wait on
+ /* condvar for synchronous thread to wait on */
std::condition_variable syncSignal_;
- // for sync thread to check if async thread finished (requires mutex)
+ /* for sync thread to check if async thread finished (requires mutex) */
bool asyncFinished_;
- // for async thread to check if it's been told to run (requires mutex)
+ /* for async thread to check if it's been told to run (requires mutex) */
bool asyncStart_;
- // for async thread to check if it's been told to quit (requires mutex)
+ /* for async thread to check if it's been told to quit (requires mutex) */
bool asyncAbort_;
- // The following are only for the synchronous thread to use:
- // for sync thread to note its has asked async thread to run
+ /*
+ * The following are only for the synchronous thread to use:
+ * for sync thread to note its has asked async thread to run
+ */
bool asyncStarted_;
- // counts up to framePeriod before restarting the async thread
+ /* counts up to framePeriod before restarting the async thread */
int framePhase_;
- // counts up to startupFrames
+ /* counts up to startupFrames */
int frameCount_;
- // counts up to startupFrames for Process function
+ /* counts up to startupFrames for Process function */
int frameCount2_;
double syncResults_[3][ALSC_CELLS_Y][ALSC_CELLS_X];
double prevSyncResults_[3][ALSC_CELLS_Y][ALSC_CELLS_X];
void waitForAysncThread();
- // The following are for the asynchronous thread to use, though the main
- // thread can set/reset them if the async thread is known to be idle:
+ /*
+ * The following are for the asynchronous thread to use, though the main
+ * thread can set/reset them if the async thread is known to be idle:
+ */
void restartAsync(StatisticsPtr &stats, Metadata *imageMetadata);
- // copy out the results from the async thread so that it can be restarted
+ /* copy out the results from the async thread so that it can be restarted */
void fetchAsyncResults();
double ct_;
bcm2835_isp_stats_region statistics_[ALSC_CELLS_Y * ALSC_CELLS_X];
@@ -103,4 +107,4 @@ private:
double lambdaB_[ALSC_CELLS_X * ALSC_CELLS_Y];
};
-} // namespace RPiController
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/awb.cpp b/src/ipa/raspberrypi/controller/rpi/awb.cpp
index 07791e8b..e4ed114d 100644
--- a/src/ipa/raspberrypi/controller/rpi/awb.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/awb.cpp
@@ -21,8 +21,10 @@ LOG_DEFINE_CATEGORY(RPiAwb)
#define AWB_STATS_SIZE_X DEFAULT_AWB_REGIONS_X
#define AWB_STATS_SIZE_Y DEFAULT_AWB_REGIONS_Y
-// todo - the locking in this algorithm needs some tidying up as has been done
-// elsewhere (ALSC and AGC).
+/*
+ * todo - the locking in this algorithm needs some tidying up as has been done
+ * elsewhere (ALSC and AGC).
+ */
void AwbMode::read(boost::property_tree::ptree const &params)
{
@@ -107,11 +109,11 @@ void AwbConfig::read(boost::property_tree::ptree const &params)
bayes = false;
}
}
- fast = params.get<int>("fast", bayes); // default to fast for Bayesian, otherwise slow
+ fast = params.get<int>("fast", bayes); /* default to fast for Bayesian, otherwise slow */
whitepointR = params.get<double>("whitepoint_r", 0.0);
whitepointB = params.get<double>("whitepoint_b", 0.0);
if (bayes == false)
- sensitivityR = sensitivityB = 1.0; // nor do sensitivities make any sense
+ sensitivityR = sensitivityB = 1.0; /* nor do sensitivities make any sense */
}
Awb::Awb(Controller *controller)
@@ -147,16 +149,18 @@ void Awb::read(boost::property_tree::ptree const &params)
void Awb::initialise()
{
frameCount_ = framePhase_ = 0;
- // Put something sane into the status that we are filtering towards,
- // just in case the first few frames don't have anything meaningful in
- // them.
+ /*
+ * Put something sane into the status that we are filtering towards,
+ * just in case the first few frames don't have anything meaningful in
+ * them.
+ */
if (!config_.ctR.empty() && !config_.ctB.empty()) {
syncResults_.temperatureK = config_.ctR.domain().clip(4000);
syncResults_.gainR = 1.0 / config_.ctR.eval(syncResults_.temperatureK);
syncResults_.gainG = 1.0;
syncResults_.gainB = 1.0 / config_.ctB.eval(syncResults_.temperatureK);
} else {
- // random values just to stop the world blowing up
+ /* random values just to stop the world blowing up */
syncResults_.temperatureK = 4500;
syncResults_.gainR = syncResults_.gainG = syncResults_.gainB = 1.0;
}
@@ -171,7 +175,7 @@ bool Awb::isPaused() const
void Awb::pause()
{
- // "Pause" by fixing everything to the most recent values.
+ /* "Pause" by fixing everything to the most recent values. */
manualR_ = syncResults_.gainR = prevSyncResults_.gainR;
manualB_ = syncResults_.gainB = prevSyncResults_.gainB;
syncResults_.gainG = prevSyncResults_.gainG;
@@ -186,8 +190,10 @@ void Awb::resume()
unsigned int Awb::getConvergenceFrames() const
{
- // If not in auto mode, there is no convergence
- // to happen, so no need to drop any frames - return zero.
+ /*
+ * If not in auto mode, there is no convergence
+ * to happen, so no need to drop any frames - return zero.
+ */
if (!isAutoEnabled())
return 0;
else
@@ -201,11 +207,13 @@ void Awb::setMode(std::string const &modeName)
void Awb::setManualGains(double manualR, double manualB)
{
- // If any of these are 0.0, we swich back to auto.
+ /* If any of these are 0.0, we swich back to auto. */
manualR_ = manualR;
manualB_ = manualB;
- // If not in auto mode, set these values into the syncResults which
- // means that Prepare() will adopt them immediately.
+ /*
+ * If not in auto mode, set these values into the syncResults which
+ * means that Prepare() will adopt them immediately.
+ */
if (!isAutoEnabled()) {
syncResults_.gainR = prevSyncResults_.gainR = manualR_;
syncResults_.gainG = prevSyncResults_.gainG = 1.0;
@@ -216,8 +224,10 @@ void Awb::setManualGains(double manualR, double manualB)
void Awb::switchMode([[maybe_unused]] CameraMode const &cameraMode,
Metadata *metadata)
{
- // On the first mode switch we'll have no meaningful colour
- // temperature, so try to dead reckon one if in manual mode.
+ /*
+ * On the first mode switch we'll have no meaningful colour
+ * temperature, so try to dead reckon one if in manual mode.
+ */
if (!isAutoEnabled() && firstSwitchMode_ && config_.bayes) {
Pwl ctRInverse = config_.ctR.inverse();
Pwl ctBInverse = config_.ctB.inverse();
@@ -226,7 +236,7 @@ void Awb::switchMode([[maybe_unused]] CameraMode const &cameraMode,
prevSyncResults_.temperatureK = (ctR + ctB) / 2;
syncResults_.temperatureK = prevSyncResults_.temperatureK;
}
- // Let other algorithms know the current white balance values.
+ /* Let other algorithms know the current white balance values. */
metadata->set("awb.status", prevSyncResults_);
firstSwitchMode_ = false;
}
@@ -241,8 +251,10 @@ void Awb::fetchAsyncResults()
LOG(RPiAwb, Debug) << "Fetch AWB results";
asyncFinished_ = false;
asyncStarted_ = false;
- // It's possible manual gains could be set even while the async
- // thread was running, so only copy the results if still in auto mode.
+ /*
+ * It's possible manual gains could be set even while the async
+ * thread was running, so only copy the results if still in auto mode.
+ */
if (isAutoEnabled())
syncResults_ = asyncResults_;
}
@@ -250,9 +262,9 @@ void Awb::fetchAsyncResults()
void Awb::restartAsync(StatisticsPtr &stats, double lux)
{
LOG(RPiAwb, Debug) << "Starting AWB calculation";
- // this makes a new reference which belongs to the asynchronous thread
+ /* this makes a new reference which belongs to the asynchronous thread */
statistics_ = stats;
- // store the mode as it could technically change
+ /* store the mode as it could technically change */
auto m = config_.modes.find(modeName_);
mode_ = m != config_.modes.end()
? &m->second
@@ -284,7 +296,7 @@ void Awb::prepare(Metadata *imageMetadata)
if (asyncStarted_ && asyncFinished_)
fetchAsyncResults();
}
- // Finally apply IIR filter to results and put into metadata.
+ /* Finally apply IIR filter to results and put into metadata. */
memcpy(prevSyncResults_.mode, syncResults_.mode,
sizeof(prevSyncResults_.mode));
prevSyncResults_.temperatureK = speed * syncResults_.temperatureK +
@@ -304,17 +316,17 @@ void Awb::prepare(Metadata *imageMetadata)
void Awb::process(StatisticsPtr &stats, Metadata *imageMetadata)
{
- // Count frames since we last poked the async thread.
+ /* Count frames since we last poked the async thread. */
if (framePhase_ < (int)config_.framePeriod)
framePhase_++;
LOG(RPiAwb, Debug) << "frame_phase " << framePhase_;
- // We do not restart the async thread if we're not in auto mode.
+ /* We do not restart the async thread if we're not in auto mode. */
if (isAutoEnabled() &&
(framePhase_ >= (int)config_.framePeriod ||
frameCount_ < (int)config_.startupFrames)) {
- // Update any settings and any image metadata that we need.
+ /* Update any settings and any image metadata that we need. */
struct LuxStatus luxStatus = {};
- luxStatus.lux = 400; // in case no metadata
+ luxStatus.lux = 400; /* in case no metadata */
if (imageMetadata->get("lux.status", luxStatus) != 0)
LOG(RPiAwb, Debug) << "No lux metadata found";
LOG(RPiAwb, Debug) << "Awb lux value is " << luxStatus.lux;
@@ -366,15 +378,21 @@ static void generateStats(std::vector<Awb::RGB> &zones,
void Awb::prepareStats()
{
zones_.clear();
- // LSC has already been applied to the stats in this pipeline, so stop
- // any LSC compensation. We also ignore config_.fast in this version.
+ /*
+ * LSC has already been applied to the stats in this pipeline, so stop
+ * any LSC compensation. We also ignore config_.fast in this version.
+ */
generateStats(zones_, statistics_->awb_stats, config_.minPixels,
config_.minG);
- // we're done with these; we may as well relinquish our hold on the
- // pointer.
+ /*
+ * we're done with these; we may as well relinquish our hold on the
+ * pointer.
+ */
statistics_.reset();
- // apply sensitivities, so values appear to come from our "canonical"
- // sensor.
+ /*
+ * apply sensitivities, so values appear to come from our "canonical"
+ * sensor.
+ */
for (auto &zone : zones_) {
zone.R *= config_.sensitivityR;
zone.B *= config_.sensitivityB;
@@ -383,14 +401,16 @@ void Awb::prepareStats()
double Awb::computeDelta2Sum(double gainR, double gainB)
{
- // Compute the sum of the squared colour error (non-greyness) as it
- // appears in the log likelihood equation.
+ /*
+ * Compute the sum of the squared colour error (non-greyness) as it
+ * appears in the log likelihood equation.
+ */
double delta2Sum = 0;
for (auto &z : zones_) {
double deltaR = gainR * z.R - 1 - config_.whitepointR;
double deltaB = gainB * z.B - 1 - config_.whitepointB;
double delta2 = deltaR * deltaR + deltaB * deltaB;
- //LOG(RPiAwb, Debug) << "deltaR " << deltaR << " deltaB " << deltaB << " delta2 " << delta2;
+ /* LOG(RPiAwb, Debug) << "deltaR " << deltaR << " deltaB " << deltaB << " delta2 " << delta2; */
delta2 = std::min(delta2, config_.deltaLimit);
delta2Sum += delta2;
}
@@ -399,15 +419,17 @@ double Awb::computeDelta2Sum(double gainR, double gainB)
Pwl Awb::interpolatePrior()
{
- // Interpolate the prior log likelihood function for our current lux
- // value.
+ /*
+ * Interpolate the prior log likelihood function for our current lux
+ * value.
+ */
if (lux_ <= config_.priors.front().lux)
return config_.priors.front().prior;
else if (lux_ >= config_.priors.back().lux)
return config_.priors.back().prior;
else {
int idx = 0;
- // find which two we lie between
+ /* find which two we lie between */
while (config_.priors[idx + 1].lux < lux_)
idx++;
double lux0 = config_.priors[idx].lux,
@@ -424,8 +446,10 @@ Pwl Awb::interpolatePrior()
static double interpolateQuadatric(Pwl::Point const &a, Pwl::Point const &b,
Pwl::Point const &c)
{
- // Given 3 points on a curve, find the extremum of the function in that
- // interval by fitting a quadratic.
+ /*
+ * Given 3 points on a curve, find the extremum of the function in that
+ * interval by fitting a quadratic.
+ */
const double eps = 1e-3;
Pwl::Point ca = c - a, ba = b - a;
double denominator = 2 * (ba.y * ca.x - ca.y * ba.x);
@@ -434,17 +458,17 @@ static double interpolateQuadatric(Pwl::Point const &a, Pwl::Point const &b,
double result = numerator / denominator + a.x;
return std::max(a.x, std::min(c.x, result));
}
- // has degenerated to straight line segment
+ /* has degenerated to straight line segment */
return a.y < c.y - eps ? a.x : (c.y < a.y - eps ? c.x : b.x);
}
double Awb::coarseSearch(Pwl const &prior)
{
- points_.clear(); // assume doesn't deallocate memory
+ points_.clear(); /* assume doesn't deallocate memory */
size_t bestPoint = 0;
double t = mode_->ctLo;
int spanR = 0, spanB = 0;
- // Step down the CT curve evaluating log likelihood.
+ /* Step down the CT curve evaluating log likelihood. */
while (true) {
double r = config_.ctR.eval(t, &spanR);
double b = config_.ctB.eval(t, &spanB);
@@ -462,13 +486,15 @@ double Awb::coarseSearch(Pwl const &prior)
bestPoint = points_.size() - 1;
if (t == mode_->ctHi)
break;
- // for even steps along the r/b curve scale them by the current t
+ /* for even steps along the r/b curve scale them by the current t */
t = std::min(t + t / 10 * config_.coarseStep, mode_->ctHi);
}
t = points_[bestPoint].x;
LOG(RPiAwb, Debug) << "Coarse search found CT " << t;
- // We have the best point of the search, but refine it with a quadratic
- // interpolation around its neighbours.
+ /*
+ * We have the best point of the search, but refine it with a quadratic
+ * interpolation around its neighbours.
+ */
if (points_.size() > 2) {
unsigned long bp = std::min(bestPoint, points_.size() - 2);
bestPoint = std::max(1UL, bp);
@@ -496,17 +522,21 @@ void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior)
Pwl::Point transverse(bDiff, -rDiff);
if (transverse.len2() < 1e-6)
return;
- // unit vector orthogonal to the b vs. r function (pointing outwards
- // with r and b increasing)
+ /*
+ * unit vector orthogonal to the b vs. r function (pointing outwards
+ * with r and b increasing)
+ */
transverse = transverse / transverse.len();
double bestLogLikelihood = 0, bestT = 0, bestR = 0, bestB = 0;
double transverseRange = config_.transverseNeg + config_.transversePos;
const int maxNumDeltas = 12;
- // a transverse step approximately every 0.01 r/b units
+ /* a transverse step approximately every 0.01 r/b units */
int numDeltas = floor(transverseRange * 100 + 0.5) + 1;
numDeltas = numDeltas < 3 ? 3 : (numDeltas > maxNumDeltas ? maxNumDeltas : numDeltas);
- // Step down CT curve. March a bit further if the transverse range is
- // large.
+ /*
+ * Step down CT curve. March a bit further if the transverse range is
+ * large.
+ */
nsteps += numDeltas;
for (int i = -nsteps; i <= nsteps; i++) {
double tTest = t + i * step;
@@ -514,10 +544,10 @@ void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior)
prior.eval(prior.domain().clip(tTest));
double rCurve = config_.ctR.eval(tTest, &spanR);
double bCurve = config_.ctB.eval(tTest, &spanB);
- // x will be distance off the curve, y the log likelihood there
+ /* x will be distance off the curve, y the log likelihood there */
Pwl::Point points[maxNumDeltas];
int bestPoint = 0;
- // Take some measurements transversely *off* the CT curve.
+ /* Take some measurements transversely *off* the CT curve. */
for (int j = 0; j < numDeltas; j++) {
points[j].x = -config_.transverseNeg +
(transverseRange * j) / (numDeltas - 1);
@@ -533,8 +563,10 @@ void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior)
if (points[j].y < points[bestPoint].y)
bestPoint = j;
}
- // We have NUM_DELTAS points transversely across the CT curve,
- // now let's do a quadratic interpolation for the best result.
+ /*
+ * We have NUM_DELTAS points transversely across the CT curve,
+ * now let's do a quadratic interpolation for the best result.
+ */
bestPoint = std::max(1, std::min(bestPoint, numDeltas - 2));
Pwl::Point rbTest = Pwl::Point(rCurve, bCurve) +
transverse * interpolateQuadatric(points[bestPoint - 1],
@@ -560,12 +592,16 @@ void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior)
void Awb::awbBayes()
{
- // May as well divide out G to save computeDelta2Sum from doing it over
- // and over.
+ /*
+ * May as well divide out G to save computeDelta2Sum from doing it over
+ * and over.
+ */
for (auto &z : zones_)
z.R = z.R / (z.G + 1), z.B = z.B / (z.G + 1);
- // Get the current prior, and scale according to how many zones are
- // valid... not entirely sure about this.
+ /*
+ * Get the current prior, and scale according to how many zones are
+ * valid... not entirely sure about this.
+ */
Pwl prior = interpolatePrior();
prior *= zones_.size() / (double)(AWB_STATS_SIZE_X * AWB_STATS_SIZE_Y);
prior.map([](double x, double y) {
@@ -577,19 +613,23 @@ void Awb::awbBayes()
LOG(RPiAwb, Debug)
<< "After coarse search: r " << r << " b " << b << " (gains r "
<< 1 / r << " b " << 1 / b << ")";
- // Not entirely sure how to handle the fine search yet. Mostly the
- // estimated CT is already good enough, but the fine search allows us to
- // wander transverely off the CT curve. Under some illuminants, where
- // there may be more or less green light, this may prove beneficial,
- // though I probably need more real datasets before deciding exactly how
- // this should be controlled and tuned.
+ /*
+ * Not entirely sure how to handle the fine search yet. Mostly the
+ * estimated CT is already good enough, but the fine search allows us to
+ * wander transverely off the CT curve. Under some illuminants, where
+ * there may be more or less green light, this may prove beneficial,
+ * though I probably need more real datasets before deciding exactly how
+ * this should be controlled and tuned.
+ */
fineSearch(t, r, b, prior);
LOG(RPiAwb, Debug)
<< "After fine search: r " << r << " b " << b << " (gains r "
<< 1 / r << " b " << 1 / b << ")";
- // Write results out for the main thread to pick up. Remember to adjust
- // the gains from the ones that the "canonical sensor" would require to
- // the ones needed by *this* sensor.
+ /*
+ * Write results out for the main thread to pick up. Remember to adjust
+ * the gains from the ones that the "canonical sensor" would require to
+ * the ones needed by *this* sensor.
+ */
asyncResults_.temperatureK = t;
asyncResults_.gainR = 1.0 / r * config_.sensitivityR;
asyncResults_.gainG = 1.0;
@@ -599,10 +639,12 @@ void Awb::awbBayes()
void Awb::awbGrey()
{
LOG(RPiAwb, Debug) << "Grey world AWB";
- // Make a separate list of the derivatives for each of red and blue, so
- // that we can sort them to exclude the extreme gains. We could
- // consider some variations, such as normalising all the zones first, or
- // doing an L2 average etc.
+ /*
+ * Make a separate list of the derivatives for each of red and blue, so
+ * that we can sort them to exclude the extreme gains. We could
+ * consider some variations, such as normalising all the zones first, or
+ * doing an L2 average etc.
+ */
std::vector<RGB> &derivsR(zones_);
std::vector<RGB> derivsB(derivsR);
std::sort(derivsR.begin(), derivsR.end(),
@@ -613,7 +655,7 @@ void Awb::awbGrey()
[](RGB const &a, RGB const &b) {
return a.G * b.B < b.G * a.B;
});
- // Average the middle half of the values.
+ /* Average the middle half of the values. */
int discard = derivsR.size() / 4;
RGB sumR(0, 0, 0), sumB(0, 0, 0);
for (auto ri = derivsR.begin() + discard,
@@ -622,7 +664,7 @@ void Awb::awbGrey()
sumR += *ri, sumB += *bi;
double gainR = sumR.G / (sumR.R + 1),
gainB = sumB.G / (sumB.B + 1);
- asyncResults_.temperatureK = 4500; // don't know what it is
+ asyncResults_.temperatureK = 4500; /* don't know what it is */
asyncResults_.gainR = gainR;
asyncResults_.gainG = 1.0;
asyncResults_.gainB = gainB;
@@ -645,7 +687,7 @@ void Awb::doAwb()
}
}
-// Register algorithm with the system.
+/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Awb(controller);
diff --git a/src/ipa/raspberrypi/controller/rpi/awb.hpp b/src/ipa/raspberrypi/controller/rpi/awb.hpp
index 021aafa9..91854853 100644
--- a/src/ipa/raspberrypi/controller/rpi/awb.hpp
+++ b/src/ipa/raspberrypi/controller/rpi/awb.hpp
@@ -16,63 +16,71 @@
namespace RPiController {
-// Control algorithm to perform AWB calculations.
+/* Control algorithm to perform AWB calculations. */
struct AwbMode {
void read(boost::property_tree::ptree const &params);
- double ctLo; // low CT value for search
- double ctHi; // high CT value for search
+ double ctLo; /* low CT value for search */
+ double ctHi; /* high CT value for search */
};
struct AwbPrior {
void read(boost::property_tree::ptree const &params);
- double lux; // lux level
- Pwl prior; // maps CT to prior log likelihood for this lux level
+ double lux; /* lux level */
+ Pwl prior; /* maps CT to prior log likelihood for this lux level */
};
struct AwbConfig {
AwbConfig() : defaultMode(nullptr) {}
void read(boost::property_tree::ptree const &params);
- // Only repeat the AWB calculation every "this many" frames
+ /* Only repeat the AWB calculation every "this many" frames */
uint16_t framePeriod;
- // number of initial frames for which speed taken as 1.0 (maximum)
+ /* number of initial frames for which speed taken as 1.0 (maximum) */
uint16_t startupFrames;
- unsigned int convergenceFrames; // approx number of frames to converge
- double speed; // IIR filter speed applied to algorithm results
- bool fast; // "fast" mode uses a 16x16 rather than 32x32 grid
- Pwl ctR; // function maps CT to r (= R/G)
- Pwl ctB; // function maps CT to b (= B/G)
- // table of illuminant priors at different lux levels
+ unsigned int convergenceFrames; /* approx number of frames to converge */
+ double speed; /* IIR filter speed applied to algorithm results */
+ bool fast; /* "fast" mode uses a 16x16 rather than 32x32 grid */
+ Pwl ctR; /* function maps CT to r (= R/G) */
+ Pwl ctB; /* function maps CT to b (= B/G) */
+ /* table of illuminant priors at different lux levels */
std::vector<AwbPrior> priors;
- // AWB "modes" (determines the search range)
+ /* AWB "modes" (determines the search range) */
std::map<std::string, AwbMode> modes;
- AwbMode *defaultMode; // mode used if no mode selected
- // minimum proportion of pixels counted within AWB region for it to be
- // "useful"
+ AwbMode *defaultMode; /* mode used if no mode selected */
+ /*
+ * minimum proportion of pixels counted within AWB region for it to be
+ * "useful"
+ */
double minPixels;
- // minimum G value of those pixels, to be regarded a "useful"
+ /* minimum G value of those pixels, to be regarded a "useful" */
uint16_t minG;
- // number of AWB regions that must be "useful" in order to do the AWB
- // calculation
+ /*
+ * number of AWB regions that must be "useful" in order to do the AWB
+ * calculation
+ */
uint32_t minRegions;
- // clamp on colour error term (so as not to penalise non-grey excessively)
+ /* clamp on colour error term (so as not to penalise non-grey excessively) */
double deltaLimit;
- // step size control in coarse search
+ /* step size control in coarse search */
double coarseStep;
- // how far to wander off CT curve towards "more purple"
+ /* how far to wander off CT curve towards "more purple" */
double transversePos;
- // how far to wander off CT curve towards "more green"
+ /* how far to wander off CT curve towards "more green" */
double transverseNeg;
- // red sensitivity ratio (set to canonical sensor's R/G divided by this
- // sensor's R/G)
+ /*
+ * red sensitivity ratio (set to canonical sensor's R/G divided by this
+ * sensor's R/G)
+ */
double sensitivityR;
- // blue sensitivity ratio (set to canonical sensor's B/G divided by this
- // sensor's B/G)
+ /*
+ * blue sensitivity ratio (set to canonical sensor's B/G divided by this
+ * sensor's B/G)
+ */
double sensitivityB;
- // The whitepoint (which we normally "aim" for) can be moved.
+ /* The whitepoint (which we normally "aim" for) can be moved. */
double whitepointR;
double whitepointB;
- bool bayes; // use Bayesian algorithm
+ bool bayes; /* use Bayesian algorithm */
};
class Awb : public AwbAlgorithm
@@ -83,7 +91,7 @@ public:
char const *name() const override;
void initialise() override;
void read(boost::property_tree::ptree const &params) override;
- // AWB handles "pausing" for itself.
+ /* AWB handles "pausing" for itself. */
bool isPaused() const override;
void pause() override;
void resume() override;
@@ -108,35 +116,39 @@ public:
private:
bool isAutoEnabled() const;
- // configuration is read-only, and available to both threads
+ /* configuration is read-only, and available to both threads */
AwbConfig config_;
std::thread asyncThread_;
- void asyncFunc(); // asynchronous thread function
+ void asyncFunc(); /* asynchronous thread function */
std::mutex mutex_;
- // condvar for async thread to wait on
+ /* condvar for async thread to wait on */
std::condition_variable asyncSignal_;
- // condvar for synchronous thread to wait on
+ /* condvar for synchronous thread to wait on */
std::condition_variable syncSignal_;
- // for sync thread to check if async thread finished (requires mutex)
+ /* for sync thread to check if async thread finished (requires mutex) */
bool asyncFinished_;
- // for async thread to check if it's been told to run (requires mutex)
+ /* for async thread to check if it's been told to run (requires mutex) */
bool asyncStart_;
- // for async thread to check if it's been told to quit (requires mutex)
+ /* for async thread to check if it's been told to quit (requires mutex) */
bool asyncAbort_;
- // The following are only for the synchronous thread to use:
- // for sync thread to note its has asked async thread to run
+ /*
+ * The following are only for the synchronous thread to use:
+ * for sync thread to note its has asked async thread to run
+ */
bool asyncStarted_;
- // counts up to framePeriod before restarting the async thread
+ /* counts up to framePeriod before restarting the async thread */
int framePhase_;
- int frameCount_; // counts up to startup_frames
+ int frameCount_; /* counts up to startup_frames */
AwbStatus syncResults_;
AwbStatus prevSyncResults_;
std::string modeName_;
- // The following are for the asynchronous thread to use, though the main
- // thread can set/reset them if the async thread is known to be idle:
+ /*
+ * The following are for the asynchronous thread to use, though the main
+ * thread can set/reset them if the async thread is known to be idle:
+ */
void restartAsync(StatisticsPtr &stats, double lux);
- // copy out the results from the async thread so that it can be restarted
+ /* copy out the results from the async thread so that it can be restarted */
void fetchAsyncResults();
StatisticsPtr statistics_;
AwbMode *mode_;
@@ -152,11 +164,11 @@ private:
void fineSearch(double &t, double &r, double &b, Pwl const &prior);
std::vector<RGB> zones_;
std::vector<Pwl::Point> points_;
- // manual r setting
+ /* manual r setting */
double manualR_;
- // manual b setting
+ /* manual b setting */
double manualB_;
- bool firstSwitchMode_; // is this the first call to SwitchMode?
+ bool firstSwitchMode_; /* is this the first call to SwitchMode? */
};
static inline Awb::RGB operator+(Awb::RGB const &a, Awb::RGB const &b)
@@ -176,4 +188,4 @@ static inline Awb::RGB operator*(Awb::RGB const &rgb, double d)
return d * rgb;
}
-} // namespace RPiController
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/black_level.cpp b/src/ipa/raspberrypi/controller/rpi/black_level.cpp
index 340da0f0..26cf073a 100644
--- a/src/ipa/raspberrypi/controller/rpi/black_level.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/black_level.cpp
@@ -34,7 +34,7 @@ char const *BlackLevel::name() const
void BlackLevel::read(boost::property_tree::ptree const &params)
{
uint16_t blackLevel = params.get<uint16_t>(
- "black_level", 4096); // 64 in 10 bits scaled to 16 bits
+ "black_level", 4096); /* 64 in 10 bits scaled to 16 bits */
blackLevelR_ = params.get<uint16_t>("black_level_r", blackLevel);
blackLevelG_ = params.get<uint16_t>("black_level_g", blackLevel);
blackLevelB_ = params.get<uint16_t>("black_level_b", blackLevel);
@@ -46,8 +46,10 @@ void BlackLevel::read(boost::property_tree::ptree const &params)
void BlackLevel::prepare(Metadata *imageMetadata)
{
- // Possibly we should think about doing this in a switchMode or
- // something?
+ /*
+ * Possibly we should think about doing this in a switchMode or
+ * something?
+ */
struct BlackLevelStatus status;
status.blackLevelR = blackLevelR_;
status.blackLevelG = blackLevelG_;
@@ -55,7 +57,7 @@ void BlackLevel::prepare(Metadata *imageMetadata)
imageMetadata->set("black_level.status", status);
}
-// Register algorithm with the system.
+/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return new BlackLevel(controller);
diff --git a/src/ipa/raspberrypi/controller/rpi/black_level.hpp b/src/ipa/raspberrypi/controller/rpi/black_level.hpp
index 0d74f6a4..f01c5515 100644
--- a/src/ipa/raspberrypi/controller/rpi/black_level.hpp
+++ b/src/ipa/raspberrypi/controller/rpi/black_level.hpp
@@ -9,7 +9,7 @@
#include "../algorithm.hpp"
#include "../black_level_status.h"
-// This is our implementation of the "black level algorithm".
+/* This is our implementation of the "black level algorithm". */
namespace RPiController {
@@ -27,4 +27,4 @@ private:
double blackLevelB_;
};
-} // namespace RPiController
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/ccm.cpp b/src/ipa/raspberrypi/controller/rpi/ccm.cpp
index 24d8e5bd..9ad63b6e 100644
--- a/src/ipa/raspberrypi/controller/rpi/ccm.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/ccm.cpp
@@ -19,11 +19,13 @@ using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiCcm)
-// This algorithm selects a CCM (Colour Correction Matrix) according to the
-// colour temperature estimated by AWB (interpolating between known matricies as
-// necessary). Additionally the amount of colour saturation can be controlled
-// both according to the current estimated lux level and according to a
-// saturation setting that is exposed to applications.
+/*
+ * This algorithm selects a CCM (Colour Correction Matrix) according to the
+ * colour temperature estimated by AWB (interpolating between known matricies as
+ * necessary). Additionally the amount of colour saturation can be controlled
+ * both according to the current estimated lux level and according to a
+ * saturation setting that is exposed to applications.
+ */
#define NAME "rpi.ccm"
@@ -125,11 +127,11 @@ void Ccm::prepare(Metadata *imageMetadata)
{
bool awbOk = false, luxOk = false;
struct AwbStatus awb = {};
- awb.temperatureK = 4000; // in case no metadata
+ awb.temperatureK = 4000; /* in case no metadata */
struct LuxStatus lux = {};
- lux.lux = 400; // in case no metadata
+ lux.lux = 400; /* in case no metadata */
{
- // grab mutex just once to get everything
+ /* grab mutex just once to get everything */
std::lock_guard<Metadata> lock(*imageMetadata);
awbOk = getLocked(imageMetadata, "awb.status", awb);
luxOk = getLocked(imageMetadata, "lux.status", lux);
@@ -162,7 +164,7 @@ void Ccm::prepare(Metadata *imageMetadata)
imageMetadata->set("ccm.status", ccmStatus);
}
-// Register algorithm with the system.
+/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Ccm(controller);
diff --git a/src/ipa/raspberrypi/controller/rpi/ccm.hpp b/src/ipa/raspberrypi/controller/rpi/ccm.hpp
index 4c4807b8..7622044c 100644
--- a/src/ipa/raspberrypi/controller/rpi/ccm.hpp
+++ b/src/ipa/raspberrypi/controller/rpi/ccm.hpp
@@ -13,7 +13,7 @@
namespace RPiController {
-// Algorithm to calculate colour matrix. Should be placed after AWB.
+/* Algorithm to calculate colour matrix. Should be placed after AWB. */
struct Matrix {
Matrix(double m0, double m1, double m2, double m3, double m4, double m5,
@@ -72,4 +72,4 @@ private:
double saturation_;
};
-} // namespace RPiController
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/contrast.cpp b/src/ipa/raspberrypi/controller/rpi/contrast.cpp
index 16983757..f11c834a 100644
--- a/src/ipa/raspberrypi/controller/rpi/contrast.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/contrast.cpp
@@ -18,11 +18,13 @@ using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiContrast)
-// This is a very simple control algorithm which simply retrieves the results of
-// AGC and AWB via their "status" metadata, and applies digital gain to the
-// colour channels in accordance with those instructions. We take care never to
-// apply less than unity gains, as that would cause fully saturated pixels to go
-// off-white.
+/*
+ * This is a very simple control algorithm which simply retrieves the results of
+ * AGC and AWB via their "status" metadata, and applies digital gain to the
+ * colour channels in accordance with those instructions. We take care never to
+ * apply less than unity gains, as that would cause fully saturated pixels to go
+ * off-white.
+ */
#define NAME "rpi.contrast"
@@ -38,15 +40,15 @@ char const *Contrast::name() const
void Contrast::read(boost::property_tree::ptree const &params)
{
- // enable adaptive enhancement by default
+ /* enable adaptive enhancement by default */
config_.ceEnable = params.get<int>("ce_enable", 1);
- // the point near the bottom of the histogram to move
+ /* the point near the bottom of the histogram to move */
config_.loHistogram = params.get<double>("lo_histogram", 0.01);
- // where in the range to try and move it to
+ /* where in the range to try and move it to */
config_.loLevel = params.get<double>("lo_level", 0.015);
- // but don't move by more than this
+ /* but don't move by more than this */
config_.loMax = params.get<double>("lo_max", 500);
- // equivalent values for the top of the histogram...
+ /* equivalent values for the top of the histogram... */
config_.hiHistogram = params.get<double>("hi_histogram", 0.95);
config_.hiLevel = params.get<double>("hi_level", 0.95);
config_.hiMax = params.get<double>("hi_max", 2000);
@@ -81,8 +83,10 @@ static void fillInStatus(ContrastStatus &status, double brightness,
void Contrast::initialise()
{
- // Fill in some default values as Prepare will run before Process gets
- // called.
+ /*
+ * Fill in some default values as Prepare will run before Process gets
+ * called.
+ */
fillInStatus(status_, brightness_, contrast_, config_.gammaCurve);
}
@@ -97,8 +101,10 @@ Pwl computeStretchCurve(Histogram const &histogram,
{
Pwl enhance;
enhance.append(0, 0);
- // If the start of the histogram is rather empty, try to pull it down a
- // bit.
+ /*
+ * If the start of the histogram is rather empty, try to pull it down a
+ * bit.
+ */
double histLo = histogram.quantile(config.loHistogram) *
(65536 / NUM_HISTOGRAM_BINS);
double levelLo = config.loLevel * 65536;
@@ -109,13 +115,17 @@ Pwl computeStretchCurve(Histogram const &histogram,
LOG(RPiContrast, Debug)
<< "Final values " << histLo << " -> " << levelLo;
enhance.append(histLo, levelLo);
- // Keep the mid-point (median) in the same place, though, to limit the
- // apparent amount of global brightness shift.
+ /*
+ * Keep the mid-point (median) in the same place, though, to limit the
+ * apparent amount of global brightness shift.
+ */
double mid = histogram.quantile(0.5) * (65536 / NUM_HISTOGRAM_BINS);
enhance.append(mid, mid);
- // If the top to the histogram is empty, try to pull the pixel values
- // there up.
+ /*
+ * If the top to the histogram is empty, try to pull the pixel values
+ * there up.
+ */
double histHi = histogram.quantile(config.hiHistogram) *
(65536 / NUM_HISTOGRAM_BINS);
double levelHi = config.hiLevel * 65536;
@@ -149,22 +159,30 @@ void Contrast::process(StatisticsPtr &stats,
[[maybe_unused]] Metadata *imageMetadata)
{
Histogram histogram(stats->hist[0].g_hist, NUM_HISTOGRAM_BINS);
- // We look at the histogram and adjust the gamma curve in the following
- // ways: 1. Adjust the gamma curve so as to pull the start of the
- // histogram down, and possibly push the end up.
+ /*
+ * We look at the histogram and adjust the gamma curve in the following
+ * ways: 1. Adjust the gamma curve so as to pull the start of the
+ * histogram down, and possibly push the end up.
+ */
Pwl gammaCurve = config_.gammaCurve;
if (config_.ceEnable) {
if (config_.loMax != 0 || config_.hiMax != 0)
gammaCurve = computeStretchCurve(histogram, config_).compose(gammaCurve);
- // We could apply other adjustments (e.g. partial equalisation)
- // based on the histogram...?
+ /*
+ * We could apply other adjustments (e.g. partial equalisation)
+ * based on the histogram...?
+ */
}
- // 2. Finally apply any manually selected brightness/contrast
- // adjustment.
+ /*
+ * 2. Finally apply any manually selected brightness/contrast
+ * adjustment.
+ */
if (brightness_ != 0 || contrast_ != 1.0)
gammaCurve = applyManualContrast(gammaCurve, brightness_, contrast_);
- // And fill in the status for output. Use more points towards the bottom
- // of the curve.
+ /*
+ * And fill in the status for output. Use more points towards the bottom
+ * of the curve.
+ */
ContrastStatus status;
fillInStatus(status, brightness_, contrast_, gammaCurve);
{
@@ -173,7 +191,7 @@ void Contrast::process(StatisticsPtr &stats,
}
}
-// Register algorithm with the system.
+/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Contrast(controller);
diff --git a/src/ipa/raspberrypi/controller/rpi/contrast.hpp b/src/ipa/raspberrypi/controller/rpi/contrast.hpp
index 5a6d530f..4793dedc 100644
--- a/src/ipa/raspberrypi/controller/rpi/contrast.hpp
+++ b/src/ipa/raspberrypi/controller/rpi/contrast.hpp
@@ -13,8 +13,10 @@
namespace RPiController {
-// Back End algorithm to appaly correct digital gain. Should be placed after
-// Back End AWB.
+/*
+ * Back End algorithm to appaly correct digital gain. Should be placed after
+ * Back End AWB.
+ */
struct ContrastConfig {
bool ceEnable;
@@ -47,4 +49,4 @@ private:
std::mutex mutex_;
};
-} // namespace RPiController
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/dpc.cpp b/src/ipa/raspberrypi/controller/rpi/dpc.cpp
index 42154cf3..68ba5e3e 100644
--- a/src/ipa/raspberrypi/controller/rpi/dpc.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/dpc.cpp
@@ -14,8 +14,10 @@ using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiDpc)
-// We use the lux status so that we can apply stronger settings in darkness (if
-// necessary).
+/*
+ * We use the lux status so that we can apply stronger settings in darkness (if
+ * necessary).
+ */
#define NAME "rpi.dpc"
@@ -39,13 +41,13 @@ void Dpc::read(boost::property_tree::ptree const &params)
void Dpc::prepare(Metadata *imageMetadata)
{
DpcStatus dpcStatus = {};
- // Should we vary this with lux level or analogue gain? TBD.
+ /* Should we vary this with lux level or analogue gain? TBD. */
dpcStatus.strength = config_.strength;
LOG(RPiDpc, Debug) << "strength " << dpcStatus.strength;
imageMetadata->set("dpc.status", dpcStatus);
}
-// Register algorithm with the system.
+/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Dpc(controller);
diff --git a/src/ipa/raspberrypi/controller/rpi/dpc.hpp b/src/ipa/raspberrypi/controller/rpi/dpc.hpp
index 039310cc..048fa2b8 100644
--- a/src/ipa/raspberrypi/controller/rpi/dpc.hpp
+++ b/src/ipa/raspberrypi/controller/rpi/dpc.hpp
@@ -11,7 +11,7 @@
namespace RPiController {
-// Back End algorithm to apply appropriate GEQ settings.
+/* Back End algorithm to apply appropriate GEQ settings. */
struct DpcConfig {
int strength;
@@ -29,4 +29,4 @@ private:
DpcConfig config_;
};
-} // namespace RPiController
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/geq.cpp b/src/ipa/raspberrypi/controller/rpi/geq.cpp
index 0da5efdf..14f226cf 100644
--- a/src/ipa/raspberrypi/controller/rpi/geq.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/geq.cpp
@@ -18,8 +18,10 @@ using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiGeq)
-// We use the lux status so that we can apply stronger settings in darkness (if
-// necessary).
+/*
+ * We use the lux status so that we can apply stronger settings in darkness (if
+ * necessary).
+ */
#define NAME "rpi.geq"
@@ -50,7 +52,7 @@ void Geq::prepare(Metadata *imageMetadata)
if (imageMetadata->get("lux.status", luxStatus))
LOG(RPiGeq, Warning) << "no lux data found";
DeviceStatus deviceStatus;
- deviceStatus.analogueGain = 1.0; // in case not found
+ deviceStatus.analogueGain = 1.0; /* in case not found */
if (imageMetadata->get("device.status", deviceStatus))
LOG(RPiGeq, Warning)
<< "no device metadata - use analogue gain of 1x";
@@ -71,7 +73,7 @@ void Geq::prepare(Metadata *imageMetadata)
imageMetadata->set("geq.status", geqStatus);
}
-// Register algorithm with the system.
+/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Geq(controller);
diff --git a/src/ipa/raspberrypi/controller/rpi/geq.hpp b/src/ipa/raspberrypi/controller/rpi/geq.hpp
index bdbc55b2..5ea424fc 100644
--- a/src/ipa/raspberrypi/controller/rpi/geq.hpp
+++ b/src/ipa/raspberrypi/controller/rpi/geq.hpp
@@ -11,12 +11,12 @@
namespace RPiController {
-// Back End algorithm to apply appropriate GEQ settings.
+/* Back End algorithm to apply appropriate GEQ settings. */
struct GeqConfig {
uint16_t offset;
double slope;
- Pwl strength; // lux to strength factor
+ Pwl strength; /* lux to strength factor */
};
class Geq : public Algorithm
@@ -31,4 +31,4 @@ private:
GeqConfig config_;
};
-} // namespace RPiController
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/lux.cpp b/src/ipa/raspberrypi/controller/rpi/lux.cpp
index 739a3d53..a76ec729 100644
--- a/src/ipa/raspberrypi/controller/rpi/lux.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/lux.cpp
@@ -25,8 +25,10 @@ LOG_DEFINE_CATEGORY(RPiLux)
Lux::Lux(Controller *controller)
: Algorithm(controller)
{
- // Put in some defaults as there will be no meaningful values until
- // Process has run.
+ /*
+ * Put in some defaults as there will be no meaningful values until
+ * Process has run.
+ */
status_.aperture = 1.0;
status_.lux = 400;
}
@@ -71,7 +73,7 @@ void Lux::process(StatisticsPtr &stats, Metadata *imageMetadata)
sizeof(stats->hist[0].g_hist[0]);
for (int i = 0; i < numBins; i++)
sum += bin[i] * (uint64_t)i, num += bin[i];
- // add .5 to reflect the mid-points of bins
+ /* add .5 to reflect the mid-points of bins */
double currentY = sum / (double)num + .5;
double gainRatio = referenceGain_ / currentGain;
double shutterSpeedRatio =
@@ -89,14 +91,16 @@ void Lux::process(StatisticsPtr &stats, Metadata *imageMetadata)
std::unique_lock<std::mutex> lock(mutex_);
status_ = status;
}
- // Overwrite the metadata here as well, so that downstream
- // algorithms get the latest value.
+ /*
+ * Overwrite the metadata here as well, so that downstream
+ * algorithms get the latest value.
+ */
imageMetadata->set("lux.status", status);
} else
LOG(RPiLux, Warning) << ": no device metadata";
}
-// Register algorithm with the system.
+/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Lux(controller);
diff --git a/src/ipa/raspberrypi/controller/rpi/lux.hpp b/src/ipa/raspberrypi/controller/rpi/lux.hpp
index bd49a409..5488299b 100644
--- a/src/ipa/raspberrypi/controller/rpi/lux.hpp
+++ b/src/ipa/raspberrypi/controller/rpi/lux.hpp
@@ -13,7 +13,7 @@
#include "../lux_status.h"
#include "../algorithm.hpp"
-// This is our implementation of the "lux control algorithm".
+/* This is our implementation of the "lux control algorithm". */
namespace RPiController {
@@ -28,16 +28,18 @@ public:
void setCurrentAperture(double aperture);
private:
- // These values define the conditions of the reference image, against
- // which we compare the new image.
+ /*
+ * These values define the conditions of the reference image, against
+ * which we compare the new image.
+ */
libcamera::utils::Duration referenceShutterSpeed_;
double referenceGain_;
- double referenceAperture_; // units of 1/f
- double referenceY_; // out of 65536
+ double referenceAperture_; /* units of 1/f */
+ double referenceY_; /* out of 65536 */
double referenceLux_;
double currentAperture_;
LuxStatus status_;
std::mutex mutex_;
};
-} // namespace RPiController
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/noise.cpp b/src/ipa/raspberrypi/controller/rpi/noise.cpp
index 97b0fd05..5d87822e 100644
--- a/src/ipa/raspberrypi/controller/rpi/noise.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/noise.cpp
@@ -34,8 +34,10 @@ char const *Noise::name() const
void Noise::switchMode(CameraMode const &cameraMode,
[[maybe_unused]] Metadata *metadata)
{
- // For example, we would expect a 2x2 binned mode to have a "noise
- // factor" of sqrt(2x2) = 2. (can't be less than one, right?)
+ /*
+ * For example, we would expect a 2x2 binned mode to have a "noise
+ * factor" of sqrt(2x2) = 2. (can't be less than one, right?)
+ */
modeFactor_ = std::max(1.0, cameraMode.noiseFactor);
}
@@ -48,14 +50,16 @@ void Noise::read(boost::property_tree::ptree const &params)
void Noise::prepare(Metadata *imageMetadata)
{
struct DeviceStatus deviceStatus;
- deviceStatus.analogueGain = 1.0; // keep compiler calm
+ deviceStatus.analogueGain = 1.0; /* keep compiler calm */
if (imageMetadata->get("device.status", deviceStatus) == 0) {
- // There is a slight question as to exactly how the noise
- // profile, specifically the constant part of it, scales. For
- // now we assume it all scales the same, and we'll revisit this
- // if it proves substantially wrong. NOTE: we may also want to
- // make some adjustments based on the camera mode (such as
- // binning), if we knew how to discover it...
+ /*
+ * There is a slight question as to exactly how the noise
+ * profile, specifically the constant part of it, scales. For
+ * now we assume it all scales the same, and we'll revisit this
+ * if it proves substantially wrong. NOTE: we may also want to
+ * make some adjustments based on the camera mode (such as
+ * binning), if we knew how to discover it...
+ */
double factor = sqrt(deviceStatus.analogueGain) / modeFactor_;
struct NoiseStatus status;
status.noiseConstant = referenceConstant_ * factor;
@@ -68,7 +72,7 @@ void Noise::prepare(Metadata *imageMetadata)
LOG(RPiNoise, Warning) << " no metadata";
}
-// Register algorithm with the system.
+/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return new Noise(controller);
diff --git a/src/ipa/raspberrypi/controller/rpi/noise.hpp b/src/ipa/raspberrypi/controller/rpi/noise.hpp
index ed6ffe91..56a4707b 100644
--- a/src/ipa/raspberrypi/controller/rpi/noise.hpp
+++ b/src/ipa/raspberrypi/controller/rpi/noise.hpp
@@ -9,7 +9,7 @@
#include "../algorithm.hpp"
#include "../noise_status.h"
-// This is our implementation of the "noise algorithm".
+/* This is our implementation of the "noise algorithm". */
namespace RPiController {
@@ -23,10 +23,10 @@ public:
void prepare(Metadata *imageMetadata) override;
private:
- // the noise profile for analogue gain of 1.0
+ /* the noise profile for analogue gain of 1.0 */
double referenceConstant_;
double referenceSlope_;
double modeFactor_;
};
-} // namespace RPiController
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/sdn.cpp b/src/ipa/raspberrypi/controller/rpi/sdn.cpp
index 480da38d..2f6b8764 100644
--- a/src/ipa/raspberrypi/controller/rpi/sdn.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/sdn.cpp
@@ -17,8 +17,10 @@ using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiSdn)
-// Calculate settings for the spatial denoise block using the noise profile in
-// the image metadata.
+/*
+ * Calculate settings for the spatial denoise block using the noise profile in
+ * the image metadata.
+ */
#define NAME "rpi.sdn"
@@ -45,7 +47,7 @@ void Sdn::initialise()
void Sdn::prepare(Metadata *imageMetadata)
{
struct NoiseStatus noiseStatus = {};
- noiseStatus.noiseSlope = 3.0; // in case no metadata
+ noiseStatus.noiseSlope = 3.0; /* in case no metadata */
if (imageMetadata->get("noise.status", noiseStatus) != 0)
LOG(RPiSdn, Warning) << "no noise profile found";
LOG(RPiSdn, Debug)
@@ -65,11 +67,11 @@ void Sdn::prepare(Metadata *imageMetadata)
void Sdn::setMode(DenoiseMode mode)
{
- // We only distinguish between off and all other modes.
+ /* We only distinguish between off and all other modes. */
mode_ = mode;
}
-// Register algorithm with the system.
+/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return (Algorithm *)new Sdn(controller);
diff --git a/src/ipa/raspberrypi/controller/rpi/sdn.hpp b/src/ipa/raspberrypi/controller/rpi/sdn.hpp
index d9b18f29..8b6e3db1 100644
--- a/src/ipa/raspberrypi/controller/rpi/sdn.hpp
+++ b/src/ipa/raspberrypi/controller/rpi/sdn.hpp
@@ -11,7 +11,7 @@
namespace RPiController {
-// Algorithm to calculate correct spatial denoise (SDN) settings.
+/* Algorithm to calculate correct spatial denoise (SDN) settings. */
class Sdn : public DenoiseAlgorithm
{
@@ -29,4 +29,4 @@ private:
DenoiseMode mode_;
};
-} // namespace RPiController
+} /* namespace RPiController */
diff --git a/src/ipa/raspberrypi/controller/rpi/sharpen.cpp b/src/ipa/raspberrypi/controller/rpi/sharpen.cpp
index 3fe62bc8..9b7f903a 100644
--- a/src/ipa/raspberrypi/controller/rpi/sharpen.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/sharpen.cpp
@@ -33,7 +33,7 @@ char const *Sharpen::name() const
void Sharpen::switchMode(CameraMode const &cameraMode,
[[maybe_unused]] Metadata *metadata)
{
- // can't be less than one, right?
+ /* can't be less than one, right? */
modeFactor_ = std::max(1.0, cameraMode.noiseFactor);
}
@@ -50,24 +50,30 @@ void Sharpen::read(boost::property_tree::ptree const &params)
void Sharpen::setStrength(double strength)
{
- // Note that this function is how an application sets the overall
- // sharpening "strength". We call this the "user strength" field
- // as there already is a strength_ field - being an internal gain
- // parameter that gets passed to the ISP control code. Negative
- // values are not allowed - coerce them to zero (no sharpening).
+ /*
+ * Note that this function is how an application sets the overall
+ * sharpening "strength". We call this the "user strength" field
+ * as there already is a strength_ field - being an internal gain
+ * parameter that gets passed to the ISP control code. Negative
+ * values are not allowed - coerce them to zero (no sharpening).
+ */
userStrength_ = std::max(0.0, strength);
}
void Sharpen::prepare(Metadata *imageMetadata)
{
- // The userStrength_ affects the algorithm's internal gain directly, but
- // we adjust the limit and threshold less aggressively. Using a sqrt
- // function is an arbitrary but gentle way of accomplishing this.
+ /*
+ * The userStrength_ affects the algorithm's internal gain directly, but
+ * we adjust the limit and threshold less aggressively. Using a sqrt
+ * function is an arbitrary but gentle way of accomplishing this.
+ */
double userStrengthSqrt = sqrt(userStrength_);
struct SharpenStatus status;
- // Binned modes seem to need the sharpening toned down with this
- // pipeline, thus we use the modeFactor_ here. Also avoid
- // divide-by-zero with the userStrengthSqrt.
+ /*
+ * Binned modes seem to need the sharpening toned down with this
+ * pipeline, thus we use the modeFactor_ here. Also avoid
+ * divide-by-zero with the userStrengthSqrt.
+ */
status.threshold = threshold_ * modeFactor_ /
std::max(0.01, userStrengthSqrt);
status.strength = strength_ / modeFactor_ * userStrength_;
@@ -77,7 +83,7 @@ void Sharpen::prepare(Metadata *imageMetadata)
imageMetadata->set("sharpen.status", status);
}
-// Register algorithm with the system.
+/* Register algorithm with the system. */
static Algorithm *create(Controller *controller)
{
return new Sharpen(controller);
diff --git a/src/ipa/raspberrypi/controller/rpi/sharpen.hpp b/src/ipa/raspberrypi/controller/rpi/sharpen.hpp
index ced917f3..18c45fd4 100644
--- a/src/ipa/raspberrypi/controller/rpi/sharpen.hpp
+++ b/src/ipa/raspberrypi/controller/rpi/sharpen.hpp
@@ -9,7 +9,7 @@
#include "../sharpen_algorithm.hpp"
#include "../sharpen_status.h"
-// This is our implementation of the "sharpen algorithm".
+/* This is our implementation of the "sharpen algorithm". */
namespace RPiController {
@@ -31,4 +31,4 @@ private:
double userStrength_;
};
-} // namespace RPiController
+} /* namespace RPiController */