summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/libcamera/base/utils.h24
-rw-r--r--include/libcamera/internal/converter.h5
-rw-r--r--include/libcamera/internal/converter/converter_v4l2_m2m.h13
-rw-r--r--include/libcamera/internal/software_isp/software_isp.h5
-rw-r--r--src/apps/common/dng_writer.cpp24
-rw-r--r--src/ipa/libipa/camera_sensor_helper.cpp42
-rw-r--r--src/ipa/libipa/camera_sensor_helper.h3
-rw-r--r--src/ipa/rkisp1/algorithms/blc.cpp85
-rw-r--r--src/ipa/rkisp1/algorithms/blc.h5
-rw-r--r--src/ipa/rkisp1/data/imx219.yaml4
-rw-r--r--src/ipa/rkisp1/data/imx258.yaml1
-rw-r--r--src/ipa/rkisp1/data/ov4689.yaml4
-rw-r--r--src/ipa/rkisp1/data/ov5640.yaml4
-rw-r--r--src/ipa/rkisp1/data/uncalibrated.yaml1
-rw-r--r--src/ipa/rkisp1/ipa_context.h6
-rw-r--r--src/ipa/rkisp1/rkisp1.cpp26
-rw-r--r--src/libcamera/converter.cpp6
-rw-r--r--src/libcamera/converter/converter_v4l2_m2m.cpp79
-rw-r--r--src/libcamera/pipeline/simple/simple.cpp15
-rw-r--r--src/libcamera/software_isp/software_isp.cpp26
-rw-r--r--test/utils.cpp17
-rw-r--r--utils/tuning/README.rst23
-rw-r--r--utils/tuning/config-example.yaml12
-rw-r--r--utils/tuning/libtuning/ctt_awb.py378
-rw-r--r--utils/tuning/libtuning/ctt_ccm.py408
-rw-r--r--utils/tuning/libtuning/ctt_colors.py30
-rw-r--r--utils/tuning/libtuning/ctt_ransac.py71
-rw-r--r--utils/tuning/libtuning/generators/yaml_output.py8
-rw-r--r--utils/tuning/libtuning/image.py8
-rw-r--r--utils/tuning/libtuning/libtuning.py23
-rw-r--r--utils/tuning/libtuning/macbeth.py65
-rw-r--r--utils/tuning/libtuning/macbeth_ref.pgm2
-rw-r--r--utils/tuning/libtuning/modules/agc/rkisp1.py2
-rw-r--r--utils/tuning/libtuning/modules/ccm/__init__.py6
-rw-r--r--utils/tuning/libtuning/modules/ccm/ccm.py41
-rw-r--r--utils/tuning/libtuning/modules/ccm/rkisp1.py28
-rw-r--r--utils/tuning/libtuning/modules/lsc/lsc.py5
-rw-r--r--utils/tuning/libtuning/modules/lsc/raspberrypi.py12
-rw-r--r--utils/tuning/libtuning/modules/lsc/rkisp1.py20
-rw-r--r--utils/tuning/libtuning/modules/static.py24
-rw-r--r--utils/tuning/libtuning/parsers/yaml_parser.py9
-rw-r--r--utils/tuning/libtuning/utils.py97
-rw-r--r--utils/tuning/requirements.txt9
-rwxr-xr-xutils/tuning/rkisp1.py24
44 files changed, 1493 insertions, 207 deletions
diff --git a/include/libcamera/base/utils.h b/include/libcamera/base/utils.h
index 4ae02dc9..734ff81e 100644
--- a/include/libcamera/base/utils.h
+++ b/include/libcamera/base/utils.h
@@ -91,6 +91,30 @@ _hex hex(T value, unsigned int width = 0);
#ifndef __DOXYGEN__
template<>
+inline _hex hex<int8_t>(int8_t value, unsigned int width)
+{
+ return { static_cast<uint64_t>(value), width ? width : 2 };
+}
+
+template<>
+inline _hex hex<uint8_t>(uint8_t value, unsigned int width)
+{
+ return { static_cast<uint64_t>(value), width ? width : 2 };
+}
+
+template<>
+inline _hex hex<int16_t>(int16_t value, unsigned int width)
+{
+ return { static_cast<uint64_t>(value), width ? width : 4 };
+}
+
+template<>
+inline _hex hex<uint16_t>(uint16_t value, unsigned int width)
+{
+ return { static_cast<uint64_t>(value), width ? width : 4 };
+}
+
+template<>
inline _hex hex<int32_t>(int32_t value, unsigned int width)
{
return { static_cast<uint64_t>(value), width ? width : 8 };
diff --git a/include/libcamera/internal/converter.h b/include/libcamera/internal/converter.h
index 5d74db6b..b51563d7 100644
--- a/include/libcamera/internal/converter.h
+++ b/include/libcamera/internal/converter.h
@@ -26,6 +26,7 @@ namespace libcamera {
class FrameBuffer;
class MediaDevice;
class PixelFormat;
+class Stream;
struct StreamConfiguration;
class Converter
@@ -46,14 +47,14 @@ public:
virtual int configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs) = 0;
- virtual int exportBuffers(unsigned int output, unsigned int count,
+ virtual int exportBuffers(const Stream *stream, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) = 0;
virtual int start() = 0;
virtual void stop() = 0;
virtual int queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs) = 0;
+ const std::map<const Stream *, FrameBuffer *> &outputs) = 0;
Signal<FrameBuffer *> inputBufferReady;
Signal<FrameBuffer *> outputBufferReady;
diff --git a/include/libcamera/internal/converter/converter_v4l2_m2m.h b/include/libcamera/internal/converter/converter_v4l2_m2m.h
index 1126050c..b9e59899 100644
--- a/include/libcamera/internal/converter/converter_v4l2_m2m.h
+++ b/include/libcamera/internal/converter/converter_v4l2_m2m.h
@@ -28,6 +28,7 @@ class FrameBuffer;
class MediaDevice;
class Size;
class SizeRange;
+class Stream;
struct StreamConfiguration;
class V4L2M2MDevice;
@@ -47,20 +48,20 @@ public:
int configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfg);
- int exportBuffers(unsigned int output, unsigned int count,
+ int exportBuffers(const Stream *stream, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers);
int start();
void stop();
int queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs);
+ const std::map<const Stream *, FrameBuffer *> &outputs);
private:
- class Stream : protected Loggable
+ class V4L2M2MStream : protected Loggable
{
public:
- Stream(V4L2M2MConverter *converter, unsigned int index);
+ V4L2M2MStream(V4L2M2MConverter *converter, const Stream *stream);
bool isValid() const { return m2m_ != nullptr; }
@@ -82,7 +83,7 @@ private:
void outputBufferReady(FrameBuffer *buffer);
V4L2M2MConverter *converter_;
- unsigned int index_;
+ const Stream *stream_;
std::unique_ptr<V4L2M2MDevice> m2m_;
unsigned int inputBufferCount_;
@@ -91,7 +92,7 @@ private:
std::unique_ptr<V4L2M2MDevice> m2m_;
- std::vector<Stream> streams_;
+ std::map<const Stream *, std::unique_ptr<V4L2M2MStream>> streams_;
std::map<FrameBuffer *, unsigned int> queue_;
};
diff --git a/include/libcamera/internal/software_isp/software_isp.h b/include/libcamera/internal/software_isp/software_isp.h
index c5338c05..f8e00003 100644
--- a/include/libcamera/internal/software_isp/software_isp.h
+++ b/include/libcamera/internal/software_isp/software_isp.h
@@ -37,6 +37,7 @@ namespace libcamera {
class DebayerCpu;
class FrameBuffer;
class PixelFormat;
+class Stream;
struct StreamConfiguration;
LOG_DECLARE_CATEGORY(SoftwareIsp)
@@ -62,7 +63,7 @@ public:
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs,
const ControlInfoMap &sensorControls);
- int exportBuffers(unsigned int output, unsigned int count,
+ int exportBuffers(const Stream *stream, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers);
void processStats(const ControlList &sensorControls);
@@ -71,7 +72,7 @@ public:
void stop();
int queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs);
+ const std::map<const Stream *, FrameBuffer *> &outputs);
void process(FrameBuffer *input, FrameBuffer *output);
diff --git a/src/apps/common/dng_writer.cpp b/src/apps/common/dng_writer.cpp
index 50db5eb3..355433b0 100644
--- a/src/apps/common/dng_writer.cpp
+++ b/src/apps/common/dng_writer.cpp
@@ -139,29 +139,29 @@ void packScanlineRaw8(void *output, const void *input, unsigned int width)
void packScanlineRaw10(void *output, const void *input, unsigned int width)
{
- const uint16_t *in = static_cast<const uint16_t *>(input);
+ const uint8_t *in = static_cast<const uint8_t *>(input);
uint8_t *out = static_cast<uint8_t *>(output);
for (unsigned int i = 0; i < width; i += 4) {
- *out++ = (in[0] & 0x3fc) >> 2;
- *out++ = (in[0] & 0x003) << 6 | (in[1] & 0x3f0) >> 4;
- *out++ = (in[1] & 0x00f) << 4 | (in[2] & 0x3c0) >> 6;
- *out++ = (in[2] & 0x03f) << 2 | (in[3] & 0x300) >> 8;
- *out++ = (in[3] & 0x0ff);
- in += 4;
+ *out++ = in[1] << 6 | in[0] >> 2;
+ *out++ = in[0] << 6 | (in[3] & 0x03) << 4 | in[2] >> 4;
+ *out++ = in[2] << 4 | (in[5] & 0x03) << 2 | in[4] >> 6;
+ *out++ = in[4] << 2 | (in[7] & 0x03) << 0;
+ *out++ = in[6];
+ in += 8;
}
}
void packScanlineRaw12(void *output, const void *input, unsigned int width)
{
- const uint16_t *in = static_cast<const uint16_t *>(input);
+ const uint8_t *in = static_cast<const uint8_t *>(input);
uint8_t *out = static_cast<uint8_t *>(output);
for (unsigned int i = 0; i < width; i += 2) {
- *out++ = (in[0] & 0xff0) >> 4;
- *out++ = (in[0] & 0x00f) << 4 | (in[1] & 0xf00) >> 8;
- *out++ = (in[1] & 0x0ff);
- in += 2;
+ *out++ = in[1] << 4 | in[0] >> 4;
+ *out++ = in[0] << 4 | (in[3] & 0x0f);
+ *out++ = in[2];
+ in += 4;
}
}
diff --git a/src/ipa/libipa/camera_sensor_helper.cpp b/src/ipa/libipa/camera_sensor_helper.cpp
index 782ff990..a1339c83 100644
--- a/src/ipa/libipa/camera_sensor_helper.cpp
+++ b/src/ipa/libipa/camera_sensor_helper.cpp
@@ -48,6 +48,33 @@ namespace ipa {
*/
/**
+ * \fn CameraSensorHelper::blackLevel()
+ * \brief Fetch the black level of the sensor
+ *
+ * This function returns the black level of the sensor scaled to a 16bit pixel
+ * width. If it is unknown an empty optional is returned.
+ *
+ * \todo Fill the blanks and add pedestal values for all supported sensors. Once
+ * done, drop the std::optional<>.
+ *
+ * Black levels are typically the result of the following phenomena:
+ * - Pedestal added by the sensor to pixel values. They are typically fixed,
+ * sometimes programmable and should be reported in datasheets (but
+ * documentation is not always available).
+ * - Dark currents and other physical effects that add charge to pixels in the
+ * absence of light. Those can depend on the integration time and the sensor
+ * die temperature, and their contribution to pixel values depend on the
+ * sensor gains.
+ *
+ * The pedestal is usually the value with the biggest contribution to the
+ * overall black level. In most cases it is either known before or in rare cases
+ * (there is not a single driver with such a control in the linux kernel) can be
+ * queried from the sensor. This function provides that fixed, known value.
+ *
+ * \return The black level of the sensor, or std::nullopt if not known
+ */
+
+/**
* \brief Compute gain code from the analogue gain absolute value
* \param[in] gain The real gain to pass
*
@@ -205,6 +232,11 @@ double CameraSensorHelper::gain(uint32_t gainCode) const
*/
/**
+ * \var CameraSensorHelper::blackLevel_
+ * \brief The black level of the sensor
+ */
+
+/**
* \var CameraSensorHelper::gainType_
* \brief The analogue gain model type
*/
@@ -396,6 +428,8 @@ class CameraSensorHelperImx219 : public CameraSensorHelper
public:
CameraSensorHelperImx219()
{
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
gainType_ = AnalogueGainLinear;
gainConstants_.linear = { 0, 256, -1, 256 };
}
@@ -407,6 +441,8 @@ class CameraSensorHelperImx258 : public CameraSensorHelper
public:
CameraSensorHelperImx258()
{
+ /* From datasheet: 0x40 at 10bits. */
+ blackLevel_ = 4096;
gainType_ = AnalogueGainLinear;
gainConstants_.linear = { 0, 512, -1, 512 };
}
@@ -456,6 +492,8 @@ class CameraSensorHelperImx335 : public CameraSensorHelper
public:
CameraSensorHelperImx335()
{
+ /* From datasheet: 0x32 at 10bits. */
+ blackLevel_ = 3200;
gainType_ = AnalogueGainExponential;
gainConstants_.exp = { 1.0, expGainDb(0.3) };
}
@@ -515,6 +553,8 @@ class CameraSensorHelperOv4689 : public CameraSensorHelper
public:
CameraSensorHelperOv4689()
{
+ /* From datasheet: 0x40 at 12bits. */
+ blackLevel_ = 1024;
gainType_ = AnalogueGainLinear;
gainConstants_.linear = { 1, 0, 0, 128 };
}
@@ -526,6 +566,8 @@ class CameraSensorHelperOv5640 : public CameraSensorHelper
public:
CameraSensorHelperOv5640()
{
+ /* From datasheet: 0x10 at 10bits. */
+ blackLevel_ = 1024;
gainType_ = AnalogueGainLinear;
gainConstants_.linear = { 1, 0, 0, 16 };
}
diff --git a/src/ipa/libipa/camera_sensor_helper.h b/src/ipa/libipa/camera_sensor_helper.h
index 0d99073b..ac276e27 100644
--- a/src/ipa/libipa/camera_sensor_helper.h
+++ b/src/ipa/libipa/camera_sensor_helper.h
@@ -10,6 +10,7 @@
#include <stdint.h>
#include <memory>
+#include <optional>
#include <string>
#include <vector>
@@ -25,6 +26,7 @@ public:
CameraSensorHelper() = default;
virtual ~CameraSensorHelper() = default;
+ std::optional<int16_t> blackLevel() const { return blackLevel_; }
virtual uint32_t gainCode(double gain) const;
virtual double gain(uint32_t gainCode) const;
@@ -51,6 +53,7 @@ protected:
AnalogueGainExpConstants exp;
};
+ std::optional<int16_t> blackLevel_;
AnalogueGainType gainType_;
AnalogueGainConstants gainConstants_;
diff --git a/src/ipa/rkisp1/algorithms/blc.cpp b/src/ipa/rkisp1/algorithms/blc.cpp
index d2e74354..871dd204 100644
--- a/src/ipa/rkisp1/algorithms/blc.cpp
+++ b/src/ipa/rkisp1/algorithms/blc.cpp
@@ -9,6 +9,8 @@
#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
+
#include "libcamera/internal/yaml_parser.h"
/**
@@ -38,18 +40,61 @@ LOG_DEFINE_CATEGORY(RkISP1Blc)
BlackLevelCorrection::BlackLevelCorrection()
: tuningParameters_(false)
{
+ /*
+ * This is a bit of a hack. In raw mode no black level correction
+ * happens. This flag is used to ensure the metadata gets populated with
+ * the black level which is needed to capture proper raw images for
+ * tuning.
+ */
+ supportsRaw_ = true;
}
/**
* \copydoc libcamera::ipa::Algorithm::init
*/
-int BlackLevelCorrection::init([[maybe_unused]] IPAContext &context,
- const YamlObject &tuningData)
+int BlackLevelCorrection::init(IPAContext &context, const YamlObject &tuningData)
{
- blackLevelRed_ = tuningData["R"].get<int16_t>(256);
- blackLevelGreenR_ = tuningData["Gr"].get<int16_t>(256);
- blackLevelGreenB_ = tuningData["Gb"].get<int16_t>(256);
- blackLevelBlue_ = tuningData["B"].get<int16_t>(256);
+ std::optional<int16_t> levelRed = tuningData["R"].get<int16_t>();
+ std::optional<int16_t> levelGreenR = tuningData["Gr"].get<int16_t>();
+ std::optional<int16_t> levelGreenB = tuningData["Gb"].get<int16_t>();
+ std::optional<int16_t> levelBlue = tuningData["B"].get<int16_t>();
+ bool tuningHasLevels = levelRed && levelGreenR && levelGreenB && levelBlue;
+
+ auto blackLevel = context.camHelper->blackLevel();
+ if (!blackLevel) {
+ /*
+ * Not all camera sensor helpers have been updated with black
+ * levels. Print a warning and fall back to the levels from the
+ * tuning data to preserve backward compatibility. This should
+ * be removed once all helpers provide the data.
+ */
+ LOG(RkISP1Blc, Warning)
+ << "No black levels provided by camera sensor helper"
+ << ", please fix";
+
+ blackLevelRed_ = levelRed.value_or(4096);
+ blackLevelGreenR_ = levelGreenR.value_or(4096);
+ blackLevelGreenB_ = levelGreenB.value_or(4096);
+ blackLevelBlue_ = levelBlue.value_or(4096);
+ } else if (tuningHasLevels) {
+ /*
+ * If black levels are provided in the tuning file, use them to
+ * avoid breaking existing camera tuning. This is deprecated and
+ * will be removed.
+ */
+ LOG(RkISP1Blc, Warning)
+ << "Deprecated: black levels overwritten by tuning file";
+
+ blackLevelRed_ = *levelRed;
+ blackLevelGreenR_ = *levelGreenR;
+ blackLevelGreenB_ = *levelGreenB;
+ blackLevelBlue_ = *levelBlue;
+ } else {
+ blackLevelRed_ = *blackLevel;
+ blackLevelGreenR_ = *blackLevel;
+ blackLevelGreenB_ = *blackLevel;
+ blackLevelBlue_ = *blackLevel;
+ }
tuningParameters_ = true;
@@ -70,6 +115,9 @@ void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context,
[[maybe_unused]] IPAFrameContext &frameContext,
rkisp1_params_cfg *params)
{
+ if (context.configuration.raw)
+ return;
+
if (frame > 0)
return;
@@ -77,16 +125,33 @@ void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context,
return;
params->others.bls_config.enable_auto = 0;
- params->others.bls_config.fixed_val.r = blackLevelRed_;
- params->others.bls_config.fixed_val.gr = blackLevelGreenR_;
- params->others.bls_config.fixed_val.gb = blackLevelGreenB_;
- params->others.bls_config.fixed_val.b = blackLevelBlue_;
+ /* The rkisp1 uses 12bit based black levels. Scale down accordingly. */
+ params->others.bls_config.fixed_val.r = blackLevelRed_ >> 4;
+ params->others.bls_config.fixed_val.gr = blackLevelGreenR_ >> 4;
+ params->others.bls_config.fixed_val.gb = blackLevelGreenB_ >> 4;
+ params->others.bls_config.fixed_val.b = blackLevelBlue_ >> 4;
params->module_en_update |= RKISP1_CIF_ISP_MODULE_BLS;
params->module_ens |= RKISP1_CIF_ISP_MODULE_BLS;
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_BLS;
}
+/**
+ * \copydoc libcamera::ipa::Algorithm::process
+ */
+void BlackLevelCorrection::process([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ [[maybe_unused]] const rkisp1_stat_buffer *stats,
+ ControlList &metadata)
+{
+ metadata.set(controls::SensorBlackLevels,
+ { static_cast<int32_t>(blackLevelRed_),
+ static_cast<int32_t>(blackLevelGreenR_),
+ static_cast<int32_t>(blackLevelGreenB_),
+ static_cast<int32_t>(blackLevelBlue_) });
+}
+
REGISTER_IPA_ALGORITHM(BlackLevelCorrection, "BlackLevelCorrection")
} /* namespace ipa::rkisp1::algorithms */
diff --git a/src/ipa/rkisp1/algorithms/blc.h b/src/ipa/rkisp1/algorithms/blc.h
index 460ebcc1..4ecac233 100644
--- a/src/ipa/rkisp1/algorithms/blc.h
+++ b/src/ipa/rkisp1/algorithms/blc.h
@@ -23,7 +23,10 @@ public:
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
rkisp1_params_cfg *params) override;
-
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata) override;
private:
bool tuningParameters_;
int16_t blackLevelRed_;
diff --git a/src/ipa/rkisp1/data/imx219.yaml b/src/ipa/rkisp1/data/imx219.yaml
index cbcc43b8..0d99cb52 100644
--- a/src/ipa/rkisp1/data/imx219.yaml
+++ b/src/ipa/rkisp1/data/imx219.yaml
@@ -6,10 +6,6 @@ algorithms:
- Agc:
- Awb:
- BlackLevelCorrection:
- R: 256
- Gr: 256
- Gb: 256
- B: 256
- LensShadingCorrection:
x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
diff --git a/src/ipa/rkisp1/data/imx258.yaml b/src/ipa/rkisp1/data/imx258.yaml
index 43dddf20..202af36a 100644
--- a/src/ipa/rkisp1/data/imx258.yaml
+++ b/src/ipa/rkisp1/data/imx258.yaml
@@ -5,6 +5,7 @@ version: 1
algorithms:
- Agc:
- Awb:
+ - BlackLevelCorrection:
- LensShadingCorrection:
x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
diff --git a/src/ipa/rkisp1/data/ov4689.yaml b/src/ipa/rkisp1/data/ov4689.yaml
index 2068684c..60901296 100644
--- a/src/ipa/rkisp1/data/ov4689.yaml
+++ b/src/ipa/rkisp1/data/ov4689.yaml
@@ -6,8 +6,4 @@ algorithms:
- Agc:
- Awb:
- BlackLevelCorrection:
- R: 66
- Gr: 66
- Gb: 66
- B: 66
...
diff --git a/src/ipa/rkisp1/data/ov5640.yaml b/src/ipa/rkisp1/data/ov5640.yaml
index 897b83cb..4b21d412 100644
--- a/src/ipa/rkisp1/data/ov5640.yaml
+++ b/src/ipa/rkisp1/data/ov5640.yaml
@@ -6,10 +6,6 @@ algorithms:
- Agc:
- Awb:
- BlackLevelCorrection:
- R: 256
- Gr: 256
- Gb: 256
- B: 256
- ColorProcessing:
- GammaSensorLinearization:
x-intervals: [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 ]
diff --git a/src/ipa/rkisp1/data/uncalibrated.yaml b/src/ipa/rkisp1/data/uncalibrated.yaml
index a7bbd8d8..60901296 100644
--- a/src/ipa/rkisp1/data/uncalibrated.yaml
+++ b/src/ipa/rkisp1/data/uncalibrated.yaml
@@ -5,4 +5,5 @@ version: 1
algorithms:
- Agc:
- Awb:
+ - BlackLevelCorrection:
...
diff --git a/src/ipa/rkisp1/ipa_context.h b/src/ipa/rkisp1/ipa_context.h
index 8602b408..1d0e9030 100644
--- a/src/ipa/rkisp1/ipa_context.h
+++ b/src/ipa/rkisp1/ipa_context.h
@@ -8,6 +8,8 @@
#pragma once
+#include <memory>
+
#include <linux/rkisp1-config.h>
#include <libcamera/base/utils.h>
@@ -16,6 +18,7 @@
#include <libcamera/controls.h>
#include <libcamera/geometry.h>
+#include <libipa/camera_sensor_helper.h>
#include <libipa/fc_queue.h>
#include <libipa/matrix.h>
@@ -178,6 +181,9 @@ struct IPAContext {
FCQueue<IPAFrameContext> frameContexts;
ControlInfoMap::Map ctrlMap;
+
+ /* Interface to the Camera Helper */
+ std::unique_ptr<CameraSensorHelper> camHelper;
};
} /* namespace ipa::rkisp1 */
diff --git a/src/ipa/rkisp1/rkisp1.cpp b/src/ipa/rkisp1/rkisp1.cpp
index d31cdbab..23e0826c 100644
--- a/src/ipa/rkisp1/rkisp1.cpp
+++ b/src/ipa/rkisp1/rkisp1.cpp
@@ -29,7 +29,6 @@
#include "libcamera/internal/yaml_parser.h"
#include "algorithms/algorithm.h"
-#include "libipa/camera_sensor_helper.h"
#include "ipa_context.h"
@@ -81,9 +80,6 @@ private:
ControlInfoMap sensorControls_;
- /* Interface to the Camera Helper */
- std::unique_ptr<CameraSensorHelper> camHelper_;
-
/* Local parameter storage */
struct IPAContext context_;
};
@@ -115,7 +111,7 @@ const ControlInfoMap::Map rkisp1Controls{
} /* namespace */
IPARkISP1::IPARkISP1()
- : context_({ {}, {}, {}, { kMaxFrameContexts }, {} })
+ : context_({ {}, {}, {}, { kMaxFrameContexts }, {}, {} })
{
}
@@ -147,8 +143,8 @@ int IPARkISP1::init(const IPASettings &settings, unsigned int hwRevision,
LOG(IPARkISP1, Debug) << "Hardware revision is " << hwRevision;
- camHelper_ = CameraSensorHelperFactoryBase::create(settings.sensorModel);
- if (!camHelper_) {
+ context_.camHelper = CameraSensorHelperFactoryBase::create(settings.sensorModel);
+ if (!context_.camHelper) {
LOG(IPARkISP1, Error)
<< "Failed to create camera sensor helper for "
<< settings.sensorModel;
@@ -250,8 +246,10 @@ int IPARkISP1::configure(const IPAConfigInfo &ipaConfig,
minExposure * context_.configuration.sensor.lineDuration;
context_.configuration.sensor.maxShutterSpeed =
maxExposure * context_.configuration.sensor.lineDuration;
- context_.configuration.sensor.minAnalogueGain = camHelper_->gain(minGain);
- context_.configuration.sensor.maxAnalogueGain = camHelper_->gain(maxGain);
+ context_.configuration.sensor.minAnalogueGain =
+ context_.camHelper->gain(minGain);
+ context_.configuration.sensor.maxAnalogueGain =
+ context_.camHelper->gain(maxGain);
context_.configuration.raw = std::any_of(streamConfig.begin(), streamConfig.end(),
[](auto &cfg) -> bool {
@@ -352,7 +350,7 @@ void IPARkISP1::processStatsBuffer(const uint32_t frame, const uint32_t bufferId
frameContext.sensor.exposure =
sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
frameContext.sensor.gain =
- camHelper_->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>());
+ context_.camHelper->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>());
ControlList metadata(controls::controls);
@@ -389,9 +387,9 @@ void IPARkISP1::updateControls(const IPACameraSensorInfo &sensorInfo,
/* Compute the analogue gain limits. */
const ControlInfo &v4l2Gain = sensorControls.find(V4L2_CID_ANALOGUE_GAIN)->second;
- float minGain = camHelper_->gain(v4l2Gain.min().get<int32_t>());
- float maxGain = camHelper_->gain(v4l2Gain.max().get<int32_t>());
- float defGain = camHelper_->gain(v4l2Gain.def().get<int32_t>());
+ float minGain = context_.camHelper->gain(v4l2Gain.min().get<int32_t>());
+ float maxGain = context_.camHelper->gain(v4l2Gain.max().get<int32_t>());
+ float defGain = context_.camHelper->gain(v4l2Gain.def().get<int32_t>());
ctrlMap.emplace(std::piecewise_construct,
std::forward_as_tuple(&controls::AnalogueGain),
std::forward_as_tuple(minGain, maxGain, defGain));
@@ -436,7 +434,7 @@ void IPARkISP1::setControls(unsigned int frame)
IPAFrameContext &frameContext = context_.frameContexts.get(frame);
uint32_t exposure = frameContext.agc.exposure;
- uint32_t gain = camHelper_->gainCode(frameContext.agc.gain);
+ uint32_t gain = context_.camHelper->gainCode(frameContext.agc.gain);
ControlList ctrls(sensorControls_);
ctrls.set(V4L2_CID_EXPOSURE, static_cast<int32_t>(exposure));
diff --git a/src/libcamera/converter.cpp b/src/libcamera/converter.cpp
index d3d38c1b..2ab46133 100644
--- a/src/libcamera/converter.cpp
+++ b/src/libcamera/converter.cpp
@@ -111,12 +111,12 @@ Converter::~Converter()
/**
* \fn Converter::exportBuffers()
* \brief Export buffers from the converter device
- * \param[in] output Output stream index exporting the buffers
+ * \param[in] stream Output stream pointer exporting the buffers
* \param[in] count Number of buffers to allocate
* \param[out] buffers Vector to store allocated buffers
*
* This function operates similarly to V4L2VideoDevice::exportBuffers() on the
- * output stream indicated by the \a output index.
+ * output stream indicated by the \a output.
*
* \return The number of allocated buffers on success or a negative error code
* otherwise
@@ -137,7 +137,7 @@ Converter::~Converter()
* \fn Converter::queueBuffers()
* \brief Queue buffers to converter device
* \param[in] input The frame buffer to apply the conversion
- * \param[out] outputs The container holding the output stream indexes and
+ * \param[out] outputs The container holding the output stream pointers and
* their respective frame buffer outputs.
*
* This function queues the \a input frame buffer on the output streams of the
diff --git a/src/libcamera/converter/converter_v4l2_m2m.cpp b/src/libcamera/converter/converter_v4l2_m2m.cpp
index d8929fc5..2e77872e 100644
--- a/src/libcamera/converter/converter_v4l2_m2m.cpp
+++ b/src/libcamera/converter/converter_v4l2_m2m.cpp
@@ -32,24 +32,24 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(Converter)
/* -----------------------------------------------------------------------------
- * V4L2M2MConverter::Stream
+ * V4L2M2MConverter::V4L2M2MStream
*/
-V4L2M2MConverter::Stream::Stream(V4L2M2MConverter *converter, unsigned int index)
- : converter_(converter), index_(index)
+V4L2M2MConverter::V4L2M2MStream::V4L2M2MStream(V4L2M2MConverter *converter, const Stream *stream)
+ : converter_(converter), stream_(stream)
{
m2m_ = std::make_unique<V4L2M2MDevice>(converter->deviceNode());
- m2m_->output()->bufferReady.connect(this, &Stream::outputBufferReady);
- m2m_->capture()->bufferReady.connect(this, &Stream::captureBufferReady);
+ m2m_->output()->bufferReady.connect(this, &V4L2M2MStream::outputBufferReady);
+ m2m_->capture()->bufferReady.connect(this, &V4L2M2MStream::captureBufferReady);
int ret = m2m_->open();
if (ret < 0)
m2m_.reset();
}
-int V4L2M2MConverter::Stream::configure(const StreamConfiguration &inputCfg,
- const StreamConfiguration &outputCfg)
+int V4L2M2MConverter::V4L2M2MStream::configure(const StreamConfiguration &inputCfg,
+ const StreamConfiguration &outputCfg)
{
V4L2PixelFormat videoFormat =
m2m_->output()->toV4L2PixelFormat(inputCfg.pixelFormat);
@@ -101,13 +101,13 @@ int V4L2M2MConverter::Stream::configure(const StreamConfiguration &inputCfg,
return 0;
}
-int V4L2M2MConverter::Stream::exportBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+int V4L2M2MConverter::V4L2M2MStream::exportBuffers(unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
return m2m_->capture()->exportBuffers(count, buffers);
}
-int V4L2M2MConverter::Stream::start()
+int V4L2M2MConverter::V4L2M2MStream::start()
{
int ret = m2m_->output()->importBuffers(inputBufferCount_);
if (ret < 0)
@@ -134,7 +134,7 @@ int V4L2M2MConverter::Stream::start()
return 0;
}
-void V4L2M2MConverter::Stream::stop()
+void V4L2M2MConverter::V4L2M2MStream::stop()
{
m2m_->capture()->streamOff();
m2m_->output()->streamOff();
@@ -142,7 +142,7 @@ void V4L2M2MConverter::Stream::stop()
m2m_->output()->releaseBuffers();
}
-int V4L2M2MConverter::Stream::queueBuffers(FrameBuffer *input, FrameBuffer *output)
+int V4L2M2MConverter::V4L2M2MStream::queueBuffers(FrameBuffer *input, FrameBuffer *output)
{
int ret = m2m_->output()->queueBuffer(input);
if (ret < 0)
@@ -155,12 +155,12 @@ int V4L2M2MConverter::Stream::queueBuffers(FrameBuffer *input, FrameBuffer *outp
return 0;
}
-std::string V4L2M2MConverter::Stream::logPrefix() const
+std::string V4L2M2MConverter::V4L2M2MStream::logPrefix() const
{
- return "stream" + std::to_string(index_);
+ return stream_->configuration().toString();
}
-void V4L2M2MConverter::Stream::outputBufferReady(FrameBuffer *buffer)
+void V4L2M2MConverter::V4L2M2MStream::outputBufferReady(FrameBuffer *buffer)
{
auto it = converter_->queue_.find(buffer);
if (it == converter_->queue_.end())
@@ -172,7 +172,7 @@ void V4L2M2MConverter::Stream::outputBufferReady(FrameBuffer *buffer)
}
}
-void V4L2M2MConverter::Stream::captureBufferReady(FrameBuffer *buffer)
+void V4L2M2MConverter::V4L2M2MStream::captureBufferReady(FrameBuffer *buffer)
{
converter_->outputBufferReady.emit(buffer);
}
@@ -333,21 +333,24 @@ int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg,
int ret = 0;
streams_.clear();
- streams_.reserve(outputCfgs.size());
for (unsigned int i = 0; i < outputCfgs.size(); ++i) {
- Stream &stream = streams_.emplace_back(this, i);
+ const StreamConfiguration &cfg = outputCfgs[i];
+ std::unique_ptr<V4L2M2MStream> stream =
+ std::make_unique<V4L2M2MStream>(this, cfg.stream());
- if (!stream.isValid()) {
+ if (!stream->isValid()) {
LOG(Converter, Error)
<< "Failed to create stream " << i;
ret = -EINVAL;
break;
}
- ret = stream.configure(inputCfg, outputCfgs[i]);
+ ret = stream->configure(inputCfg, cfg);
if (ret < 0)
break;
+
+ streams_.emplace(cfg.stream(), std::move(stream));
}
if (ret < 0) {
@@ -361,13 +364,14 @@ int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg,
/**
* \copydoc libcamera::Converter::exportBuffers
*/
-int V4L2M2MConverter::exportBuffers(unsigned int output, unsigned int count,
+int V4L2M2MConverter::exportBuffers(const Stream *stream, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
- if (output >= streams_.size())
+ auto iter = streams_.find(stream);
+ if (iter == streams_.end())
return -EINVAL;
- return streams_[output].exportBuffers(count, buffers);
+ return iter->second->exportBuffers(count, buffers);
}
/**
@@ -377,8 +381,8 @@ int V4L2M2MConverter::start()
{
int ret;
- for (Stream &stream : streams_) {
- ret = stream.start();
+ for (auto &iter : streams_) {
+ ret = iter.second->start();
if (ret < 0) {
stop();
return ret;
@@ -393,41 +397,40 @@ int V4L2M2MConverter::start()
*/
void V4L2M2MConverter::stop()
{
- for (Stream &stream : utils::reverse(streams_))
- stream.stop();
+ for (auto &iter : streams_)
+ iter.second->stop();
}
/**
* \copydoc libcamera::Converter::queueBuffers
*/
int V4L2M2MConverter::queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs)
+ const std::map<const Stream *, FrameBuffer *> &outputs)
{
- unsigned int mask = 0;
+ std::set<FrameBuffer *> outputBufs;
int ret;
/*
* Validate the outputs as a sanity check: at least one output is
* required, all outputs must reference a valid stream and no two
- * outputs can reference the same stream.
+ * streams can reference same output framebuffers.
*/
if (outputs.empty())
return -EINVAL;
- for (auto [index, buffer] : outputs) {
+ for (auto [stream, buffer] : outputs) {
if (!buffer)
return -EINVAL;
- if (index >= streams_.size())
- return -EINVAL;
- if (mask & (1 << index))
- return -EINVAL;
- mask |= 1 << index;
+ outputBufs.insert(buffer);
}
+ if (outputBufs.size() != streams_.size())
+ return -EINVAL;
+
/* Queue the input and output buffers to all the streams. */
- for (auto [index, buffer] : outputs) {
- ret = streams_[index].queueBuffers(input, buffer);
+ for (auto [stream, buffer] : outputs) {
+ ret = streams_.at(stream)->queueBuffers(input, buffer);
if (ret < 0)
return ret;
}
diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
index eb36578e..60aafc4e 100644
--- a/src/libcamera/pipeline/simple/simple.cpp
+++ b/src/libcamera/pipeline/simple/simple.cpp
@@ -198,6 +198,7 @@ namespace {
static const SimplePipelineInfo supportedDevices[] = {
{ "dcmipp", {}, false },
{ "imx7-csi", { { "pxp", 1 } }, false },
+ { "intel-ipu6", {}, true },
{ "j721e-csi2rx", {}, false },
{ "mtk-seninf", { { "mtk-mdp", 3 } }, false },
{ "mxc-isi", {}, false },
@@ -277,7 +278,7 @@ public:
std::map<PixelFormat, std::vector<const Configuration *>> formats_;
std::vector<std::unique_ptr<FrameBuffer>> conversionBuffers_;
- std::queue<std::map<unsigned int, FrameBuffer *>> conversionQueue_;
+ std::queue<std::map<const Stream *, FrameBuffer *>> conversionQueue_;
bool useConversion_;
std::unique_ptr<Converter> converter_;
@@ -836,7 +837,7 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
Request *request = buffer->request();
if (useConversion_ && !conversionQueue_.empty()) {
- const std::map<unsigned int, FrameBuffer *> &outputs =
+ const std::map<const Stream *, FrameBuffer *> &outputs =
conversionQueue_.front();
if (!outputs.empty()) {
FrameBuffer *outputBuffer = outputs.begin()->second;
@@ -1303,10 +1304,8 @@ int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
*/
if (data->useConversion_)
return data->converter_
- ? data->converter_->exportBuffers(data->streamIndex(stream),
- count, buffers)
- : data->swIsp_->exportBuffers(data->streamIndex(stream),
- count, buffers);
+ ? data->converter_->exportBuffers(stream, count, buffers)
+ : data->swIsp_->exportBuffers(stream, count, buffers);
else
return data->video_->exportBuffers(count, buffers);
}
@@ -1398,7 +1397,7 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
SimpleCameraData *data = cameraData(camera);
int ret;
- std::map<unsigned int, FrameBuffer *> buffers;
+ std::map<const Stream *, FrameBuffer *> buffers;
for (auto &[stream, buffer] : request->buffers()) {
/*
@@ -1407,7 +1406,7 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
* completion handler.
*/
if (data->useConversion_) {
- buffers.emplace(data->streamIndex(stream), buffer);
+ buffers.emplace(stream, buffer);
} else {
ret = data->video_->queueBuffer(buffer);
if (ret < 0)
diff --git a/src/libcamera/software_isp/software_isp.cpp b/src/libcamera/software_isp/software_isp.cpp
index 20fb6f48..c8748d88 100644
--- a/src/libcamera/software_isp/software_isp.cpp
+++ b/src/libcamera/software_isp/software_isp.cpp
@@ -241,19 +241,19 @@ int SoftwareIsp::configure(const StreamConfiguration &inputCfg,
/**
* \brief Export the buffers from the Software ISP
- * \param[in] output Output stream index exporting the buffers
+ * \param[in] stream Output stream exporting the buffers
* \param[in] count Number of buffers to allocate
* \param[out] buffers Vector to store the allocated buffers
* \return The number of allocated buffers on success or a negative error code
* otherwise
*/
-int SoftwareIsp::exportBuffers(unsigned int output, unsigned int count,
+int SoftwareIsp::exportBuffers(const Stream *stream, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
ASSERT(debayer_ != nullptr);
/* single output for now */
- if (output >= 1)
+ if (stream == nullptr)
return -EINVAL;
for (unsigned int i = 0; i < count; i++) {
@@ -280,35 +280,29 @@ int SoftwareIsp::exportBuffers(unsigned int output, unsigned int count,
/**
* \brief Queue buffers to Software ISP
* \param[in] input The input framebuffer
- * \param[in] outputs The container holding the output stream indexes and
+ * \param[in] outputs The container holding the output stream pointers and
* their respective frame buffer outputs
* \return 0 on success, a negative errno on failure
*/
int SoftwareIsp::queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs)
+ const std::map<const Stream *, FrameBuffer *> &outputs)
{
- unsigned int mask = 0;
-
/*
* Validate the outputs as a sanity check: at least one output is
- * required, all outputs must reference a valid stream and no two
- * outputs can reference the same stream.
+ * required, all outputs must reference a valid stream.
*/
if (outputs.empty())
return -EINVAL;
- for (auto [index, buffer] : outputs) {
+ for (auto [stream, buffer] : outputs) {
if (!buffer)
return -EINVAL;
- if (index >= 1) /* only single stream atm */
- return -EINVAL;
- if (mask & (1 << index))
+ if (outputs.size() != 1) /* only single stream atm */
return -EINVAL;
-
- mask |= 1 << index;
}
- process(input, outputs.at(0));
+ for (auto iter = outputs.begin(); iter != outputs.end(); iter++)
+ process(input, iter->second);
return 0;
}
diff --git a/test/utils.cpp b/test/utils.cpp
index 41af954b..d25475cb 100644
--- a/test/utils.cpp
+++ b/test/utils.cpp
@@ -176,6 +176,14 @@ protected:
std::ostringstream os;
std::string ref;
+ os << utils::hex(static_cast<int8_t>(0x42)) << " ";
+ ref += "0x42 ";
+ os << utils::hex(static_cast<uint8_t>(0x42)) << " ";
+ ref += "0x42 ";
+ os << utils::hex(static_cast<int16_t>(0x42)) << " ";
+ ref += "0x0042 ";
+ os << utils::hex(static_cast<uint16_t>(0x42)) << " ";
+ ref += "0x0042 ";
os << utils::hex(static_cast<int32_t>(0x42)) << " ";
ref += "0x00000042 ";
os << utils::hex(static_cast<uint32_t>(0x42)) << " ";
@@ -184,6 +192,15 @@ protected:
ref += "0x0000000000000042 ";
os << utils::hex(static_cast<uint64_t>(0x42)) << " ";
ref += "0x0000000000000042 ";
+
+ os << utils::hex(static_cast<int8_t>(0x42), 6) << " ";
+ ref += "0x000042 ";
+ os << utils::hex(static_cast<uint8_t>(0x42), 1) << " ";
+ ref += "0x42 ";
+ os << utils::hex(static_cast<int16_t>(0x42), 6) << " ";
+ ref += "0x000042 ";
+ os << utils::hex(static_cast<uint16_t>(0x42), 1) << " ";
+ ref += "0x42 ";
os << utils::hex(static_cast<int32_t>(0x42), 4) << " ";
ref += "0x0042 ";
os << utils::hex(static_cast<uint32_t>(0x42), 1) << " ";
diff --git a/utils/tuning/README.rst b/utils/tuning/README.rst
index ef3e6ad7..89a1d61e 100644
--- a/utils/tuning/README.rst
+++ b/utils/tuning/README.rst
@@ -1,11 +1,20 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
-.. TODO: Write an overview of libtuning
+libcamera tuning tools
+======================
-Dependencies
-------------
+.. Note:: The tuning tools are still very much work in progress. If in doubt,
+ please ask on the mailing list.
+
+.. todo::
+ Write documentation
+
+Installation of dependencies
+----------------------------
+
+::
+ # Using a venv
+ python3 -m venv venv
+ . ./venv/bin/activate
+ pip3 install -r requirements.txt
-- numpy
-- opencv-python
-- py3exiv2
-- rawpy
diff --git a/utils/tuning/config-example.yaml b/utils/tuning/config-example.yaml
new file mode 100644
index 00000000..1b7f52cd
--- /dev/null
+++ b/utils/tuning/config-example.yaml
@@ -0,0 +1,12 @@
+general:
+ disable: []
+ plot: []
+ alsc:
+ do_alsc_colour: 1
+ luminance_strength: 0.5
+ awb:
+ greyworld: 0
+ macbeth:
+ small: 1
+ show: 0
+# blacklevel: 32 \ No newline at end of file
diff --git a/utils/tuning/libtuning/ctt_awb.py b/utils/tuning/libtuning/ctt_awb.py
new file mode 100644
index 00000000..abf22321
--- /dev/null
+++ b/utils/tuning/libtuning/ctt_awb.py
@@ -0,0 +1,378 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# camera tuning tool for AWB
+
+import matplotlib.pyplot as plt
+from bisect import bisect_left
+from scipy.optimize import fmin
+import numpy as np
+
+from .image import Image
+
+
+"""
+obtain piecewise linear approximation for colour curve
+"""
+def awb(Cam, cal_cr_list, cal_cb_list, plot):
+ imgs = Cam.imgs
+ """
+ condense alsc calibration tables into one dictionary
+ """
+ if cal_cr_list is None:
+ colour_cals = None
+ else:
+ colour_cals = {}
+ for cr, cb in zip(cal_cr_list, cal_cb_list):
+ cr_tab = cr['table']
+ cb_tab = cb['table']
+ """
+ normalise tables so min value is 1
+ """
+ cr_tab = cr_tab/np.min(cr_tab)
+ cb_tab = cb_tab/np.min(cb_tab)
+ colour_cals[cr['ct']] = [cr_tab, cb_tab]
+ """
+ obtain data from greyscale macbeth patches
+ """
+ rb_raw = []
+ rbs_hat = []
+ for Img in imgs:
+ Cam.log += '\nProcessing '+Img.name
+ """
+ get greyscale patches with alsc applied if alsc enabled.
+ Note: if alsc is disabled then colour_cals will be set to None and the
+ function will just return the greyscale patches
+ """
+ r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals)
+ """
+ calculate ratio of r, b to g
+ """
+ r_g = np.mean(r_patchs/g_patchs)
+ b_g = np.mean(b_patchs/g_patchs)
+ Cam.log += '\n r : {:.4f} b : {:.4f}'.format(r_g, b_g)
+ """
+ The curve tends to be better behaved in so-called hatspace.
+ R, B, G represent the individual channels. The colour curve is plotted in
+ r, b space, where:
+ r = R/G
+ b = B/G
+ This will be referred to as dehatspace... (sorry)
+ Hatspace is defined as:
+ r_hat = R/(R+B+G)
+ b_hat = B/(R+B+G)
+ To convert from dehatspace to hastpace (hat operation):
+ r_hat = r/(1+r+b)
+ b_hat = b/(1+r+b)
+ To convert from hatspace to dehatspace (dehat operation):
+ r = r_hat/(1-r_hat-b_hat)
+ b = b_hat/(1-r_hat-b_hat)
+ Proof is left as an excercise to the reader...
+ Throughout the code, r and b are sometimes referred to as r_g and b_g
+ as a reminder that they are ratios
+ """
+ r_g_hat = r_g/(1+r_g+b_g)
+ b_g_hat = b_g/(1+r_g+b_g)
+ Cam.log += '\n r_hat : {:.4f} b_hat : {:.4f}'.format(r_g_hat, b_g_hat)
+ rbs_hat.append((r_g_hat, b_g_hat, Img.col))
+ rb_raw.append((r_g, b_g))
+ Cam.log += '\n'
+
+ Cam.log += '\nFinished processing images'
+ """
+ sort all lits simultaneously by r_hat
+ """
+ rbs_zip = list(zip(rbs_hat, rb_raw))
+ rbs_zip.sort(key=lambda x: x[0][0])
+ rbs_hat, rb_raw = list(zip(*rbs_zip))
+ """
+ unzip tuples ready for processing
+ """
+ rbs_hat = list(zip(*rbs_hat))
+ rb_raw = list(zip(*rb_raw))
+ """
+ fit quadratic fit to r_g hat and b_g_hat
+ """
+ a, b, c = np.polyfit(rbs_hat[0], rbs_hat[1], 2)
+ Cam.log += '\nFit quadratic curve in hatspace'
+ """
+ the algorithm now approximates the shortest distance from each point to the
+ curve in dehatspace. Since the fit is done in hatspace, it is easier to
+ find the actual shortest distance in hatspace and use the projection back
+ into dehatspace as an overestimate.
+ The distance will be used for two things:
+ 1) In the case that colour temperature does not strictly decrease with
+ increasing r/g, the closest point to the line will be chosen out of an
+ increasing pair of colours.
+
+ 2) To calculate transverse negative an dpositive, the maximum positive
+ and negative distance from the line are chosen. This benefits from the
+ overestimate as the transverse pos/neg are upper bound values.
+ """
+ """
+ define fit function
+ """
+ def f(x):
+ return a*x**2 + b*x + c
+ """
+ iterate over points (R, B are x and y coordinates of points) and calculate
+ distance to line in dehatspace
+ """
+ dists = []
+ for i, (R, B) in enumerate(zip(rbs_hat[0], rbs_hat[1])):
+ """
+ define function to minimise as square distance between datapoint and
+ point on curve. Squaring is monotonic so minimising radius squared is
+ equivalent to minimising radius
+ """
+ def f_min(x):
+ y = f(x)
+ return((x-R)**2+(y-B)**2)
+ """
+ perform optimisation with scipy.optmisie.fmin
+ """
+ x_hat = fmin(f_min, R, disp=0)[0]
+ y_hat = f(x_hat)
+ """
+ dehat
+ """
+ x = x_hat/(1-x_hat-y_hat)
+ y = y_hat/(1-x_hat-y_hat)
+ rr = R/(1-R-B)
+ bb = B/(1-R-B)
+ """
+ calculate euclidean distance in dehatspace
+ """
+ dist = ((x-rr)**2+(y-bb)**2)**0.5
+ """
+ return negative if point is below the fit curve
+ """
+ if (x+y) > (rr+bb):
+ dist *= -1
+ dists.append(dist)
+ Cam.log += '\nFound closest point on fit line to each point in dehatspace'
+ """
+ calculate wiggle factors in awb. 10% added since this is an upper bound
+ """
+ transverse_neg = - np.min(dists) * 1.1
+ transverse_pos = np.max(dists) * 1.1
+ Cam.log += '\nTransverse pos : {:.5f}'.format(transverse_pos)
+ Cam.log += '\nTransverse neg : {:.5f}'.format(transverse_neg)
+ """
+ set minimum transverse wiggles to 0.1 .
+ Wiggle factors dictate how far off of the curve the algorithm searches. 0.1
+ is a suitable minimum that gives better results for lighting conditions not
+ within calibration dataset. Anything less will generalise poorly.
+ """
+ if transverse_pos < 0.01:
+ transverse_pos = 0.01
+ Cam.log += '\nForced transverse pos to 0.01'
+ if transverse_neg < 0.01:
+ transverse_neg = 0.01
+ Cam.log += '\nForced transverse neg to 0.01'
+
+ """
+ generate new b_hat values at each r_hat according to fit
+ """
+ r_hat_fit = np.array(rbs_hat[0])
+ b_hat_fit = a*r_hat_fit**2 + b*r_hat_fit + c
+ """
+ transform from hatspace to dehatspace
+ """
+ r_fit = r_hat_fit/(1-r_hat_fit-b_hat_fit)
+ b_fit = b_hat_fit/(1-r_hat_fit-b_hat_fit)
+ c_fit = np.round(rbs_hat[2], 0)
+ """
+ round to 4dp
+ """
+ r_fit = np.where((1000*r_fit) % 1 <= 0.05, r_fit+0.0001, r_fit)
+ r_fit = np.where((1000*r_fit) % 1 >= 0.95, r_fit-0.0001, r_fit)
+ b_fit = np.where((1000*b_fit) % 1 <= 0.05, b_fit+0.0001, b_fit)
+ b_fit = np.where((1000*b_fit) % 1 >= 0.95, b_fit-0.0001, b_fit)
+ r_fit = np.round(r_fit, 4)
+ b_fit = np.round(b_fit, 4)
+ """
+ The following code ensures that colour temperature decreases with
+ increasing r/g
+ """
+ """
+ iterate backwards over list for easier indexing
+ """
+ i = len(c_fit) - 1
+ while i > 0:
+ if c_fit[i] > c_fit[i-1]:
+ Cam.log += '\nColour temperature increase found\n'
+ Cam.log += '{} K at r = {} to '.format(c_fit[i-1], r_fit[i-1])
+ Cam.log += '{} K at r = {}'.format(c_fit[i], r_fit[i])
+ """
+ if colour temperature increases then discard point furthest from
+ the transformed fit (dehatspace)
+ """
+ error_1 = abs(dists[i-1])
+ error_2 = abs(dists[i])
+ Cam.log += '\nDistances from fit:\n'
+ Cam.log += '{} K : {:.5f} , '.format(c_fit[i], error_1)
+ Cam.log += '{} K : {:.5f}'.format(c_fit[i-1], error_2)
+ """
+ find bad index
+ note that in python false = 0 and true = 1
+ """
+ bad = i - (error_1 < error_2)
+ Cam.log += '\nPoint at {} K deleted as '.format(c_fit[bad])
+ Cam.log += 'it is furthest from fit'
+ """
+ delete bad point
+ """
+ r_fit = np.delete(r_fit, bad)
+ b_fit = np.delete(b_fit, bad)
+ c_fit = np.delete(c_fit, bad).astype(np.uint16)
+ """
+ note that if a point has been discarded then the length has decreased
+ by one, meaning that decreasing the index by one will reassess the kept
+ point against the next point. It is therefore possible, in theory, for
+ two adjacent points to be discarded, although probably rare
+ """
+ i -= 1
+
+ """
+ return formatted ct curve, ordered by increasing colour temperature
+ """
+ ct_curve = list(np.array(list(zip(b_fit, r_fit, c_fit))).flatten())[::-1]
+ Cam.log += '\nFinal CT curve:'
+ for i in range(len(ct_curve)//3):
+ j = 3*i
+ Cam.log += '\n ct: {} '.format(ct_curve[j])
+ Cam.log += ' r: {} '.format(ct_curve[j+1])
+ Cam.log += ' b: {} '.format(ct_curve[j+2])
+
+ """
+ plotting code for debug
+ """
+ if plot:
+ x = np.linspace(np.min(rbs_hat[0]), np.max(rbs_hat[0]), 100)
+ y = a*x**2 + b*x + c
+ plt.subplot(2, 1, 1)
+ plt.title('hatspace')
+ plt.plot(rbs_hat[0], rbs_hat[1], ls='--', color='blue')
+ plt.plot(x, y, color='green', ls='-')
+ plt.scatter(rbs_hat[0], rbs_hat[1], color='red')
+ for i, ct in enumerate(rbs_hat[2]):
+ plt.annotate(str(ct), (rbs_hat[0][i], rbs_hat[1][i]))
+ plt.xlabel('$\\hat{r}$')
+ plt.ylabel('$\\hat{b}$')
+ """
+ optional set axes equal to shortest distance so line really does
+ looks perpendicular and everybody is happy
+ """
+ # ax = plt.gca()
+ # ax.set_aspect('equal')
+ plt.grid()
+ plt.subplot(2, 1, 2)
+ plt.title('dehatspace - indoors?')
+ plt.plot(r_fit, b_fit, color='blue')
+ plt.scatter(rb_raw[0], rb_raw[1], color='green')
+ plt.scatter(r_fit, b_fit, color='red')
+ for i, ct in enumerate(c_fit):
+ plt.annotate(str(ct), (r_fit[i], b_fit[i]))
+ plt.xlabel('$r$')
+ plt.ylabel('$b$')
+ """
+ optional set axes equal to shortest distance so line really does
+ looks perpendicular and everybody is happy
+ """
+ # ax = plt.gca()
+ # ax.set_aspect('equal')
+ plt.subplots_adjust(hspace=0.5)
+ plt.grid()
+ plt.show()
+ """
+ end of plotting code
+ """
+ return(ct_curve, np.round(transverse_pos, 5), np.round(transverse_neg, 5))
+
+
+"""
+obtain greyscale patches and perform alsc colour correction
+"""
+def get_alsc_patches(Img, colour_cals, grey=True):
+ """
+ get patch centre coordinates, image colour and the actual
+ patches for each channel, remembering to subtract blacklevel
+ If grey then only greyscale patches considered
+ """
+ if grey:
+ cen_coords = Img.cen_coords[3::4]
+ col = Img.col
+ patches = [np.array(Img.patches[i]) for i in Img.order]
+ r_patchs = patches[0][3::4] - Img.blacklevel_16
+ b_patchs = patches[3][3::4] - Img.blacklevel_16
+ """
+ note two green channels are averages
+ """
+ g_patchs = (patches[1][3::4]+patches[2][3::4])/2 - Img.blacklevel_16
+ else:
+ cen_coords = Img.cen_coords
+ col = Img.color
+ patches = [np.array(Img.patches[i]) for i in Img.order]
+ r_patchs = patches[0] - Img.blacklevel_16
+ b_patchs = patches[3] - Img.blacklevel_16
+ g_patchs = (patches[1]+patches[2])/2 - Img.blacklevel_16
+
+ if colour_cals is None:
+ return r_patchs, b_patchs, g_patchs
+ """
+ find where image colour fits in alsc colour calibration tables
+ """
+ cts = list(colour_cals.keys())
+ pos = bisect_left(cts, col)
+ """
+ if img colour is below minimum or above maximum alsc calibration colour, simply
+ pick extreme closest to img colour
+ """
+ if pos % len(cts) == 0:
+ """
+ this works because -0 = 0 = first and -1 = last index
+ """
+ col_tabs = np.array(colour_cals[cts[-pos//len(cts)]])
+ """
+ else, perform linear interpolation between existing alsc colour
+ calibration tables
+ """
+ else:
+ bef = cts[pos-1]
+ aft = cts[pos]
+ da = col-bef
+ db = aft-col
+ bef_tabs = np.array(colour_cals[bef])
+ aft_tabs = np.array(colour_cals[aft])
+ col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db)
+ col_tabs = np.reshape(col_tabs, (2, 12, 16))
+ """
+ calculate dx, dy used to calculate alsc table
+ """
+ w, h = Img.w/2, Img.h/2
+ dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
+ """
+ make list of pairs of gains for each patch by selecting the correct value
+ in alsc colour calibration table
+ """
+ patch_gains = []
+ for cen in cen_coords:
+ x, y = cen[0]//dx, cen[1]//dy
+ # We could probably do with some better spatial interpolation here?
+ col_gains = (col_tabs[0][y][x], col_tabs[1][y][x])
+ patch_gains.append(col_gains)
+
+ """
+ multiply the r and b channels in each patch by the respective gain, finally
+ performing the alsc colour correction
+ """
+ for i, gains in enumerate(patch_gains):
+ r_patchs[i] = r_patchs[i] * gains[0]
+ b_patchs[i] = b_patchs[i] * gains[1]
+
+ """
+ return greyscale patches, g channel and correct r, b channels
+ """
+ return r_patchs, b_patchs, g_patchs
diff --git a/utils/tuning/libtuning/ctt_ccm.py b/utils/tuning/libtuning/ctt_ccm.py
new file mode 100644
index 00000000..2e87a667
--- /dev/null
+++ b/utils/tuning/libtuning/ctt_ccm.py
@@ -0,0 +1,408 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# camera tuning tool for CCM (colour correction matrix)
+
+import logging
+
+import numpy as np
+from scipy.optimize import minimize
+
+from . import ctt_colors as colors
+from .image import Image
+from .ctt_awb import get_alsc_patches
+from .utils import visualise_macbeth_chart
+
+logger = logging.getLogger(__name__)
+
+"""
+takes 8-bit macbeth chart values, degammas and returns 16 bit
+"""
+
+'''
+This program has many options from which to derive the color matrix from.
+The first is average. This minimises the average delta E across all patches of
+the macbeth chart. Testing across all cameras yeilded this as the most color
+accurate and vivid. Other options are avalible however.
+Maximum minimises the maximum Delta E of the patches. It iterates through till
+a minimum maximum is found (so that there is
+not one patch that deviates wildly.)
+This yields generally good results but overall the colors are less accurate
+Have a fiddle with maximum and see what you think.
+The final option allows you to select the patches for which to average across.
+This means that you can bias certain patches, for instance if you want the
+reds to be more accurate.
+'''
+
+matrix_selection_types = ["average", "maximum", "patches"]
+typenum = 0 # select from array above, 0 = average, 1 = maximum, 2 = patches
+test_patches = [1, 2, 5, 8, 9, 12, 14]
+
+'''
+Enter patches to test for. Can also be entered twice if you
+would like twice as much bias on one patch.
+'''
+
+
+def degamma(x):
+ x = x / ((2 ** 8) - 1) # takes 255 and scales it down to one
+ x = np.where(x < 0.04045, x / 12.92, ((x + 0.055) / 1.055) ** 2.4)
+ x = x * ((2 ** 16) - 1) # takes one and scales up to 65535, 16 bit color
+ return x
+
+
+def gamma(x):
+ # Take 3 long array of color values and gamma them
+ return [((colour / 255) ** (1 / 2.4) * 1.055 - 0.055) * 255 for colour in x]
+
+
+"""
+FInds colour correction matrices for list of images
+"""
+
+
+def ccm(imgs, cal_cr_list, cal_cb_list):
+ global matrix_selection_types, typenum
+ """
+ standard macbeth chart colour values
+ """
+ m_rgb = np.array([ # these are in RGB
+ [116, 81, 67], # dark skin
+ [199, 147, 129], # light skin
+ [91, 122, 156], # blue sky
+ [90, 108, 64], # foliage
+ [130, 128, 176], # blue flower
+ [92, 190, 172], # bluish green
+ [224, 124, 47], # orange
+ [68, 91, 170], # purplish blue
+ [198, 82, 97], # moderate red
+ [94, 58, 106], # purple
+ [159, 189, 63], # yellow green
+ [230, 162, 39], # orange yellow
+ [35, 63, 147], # blue
+ [67, 149, 74], # green
+ [180, 49, 57], # red
+ [238, 198, 20], # yellow
+ [193, 84, 151], # magenta
+ [0, 136, 170], # cyan (goes out of gamut)
+ [245, 245, 243], # white 9.5
+ [200, 202, 202], # neutral 8
+ [161, 163, 163], # neutral 6.5
+ [121, 121, 122], # neutral 5
+ [82, 84, 86], # neutral 3.5
+ [49, 49, 51] # black 2
+ ])
+ """
+ convert reference colours from srgb to rgb
+ """
+ m_srgb = degamma(m_rgb) # now in 16 bit color.
+
+ # Produce array of LAB values for ideal color chart
+ m_lab = [colors.RGB_to_LAB(color / 256) for color in m_srgb]
+
+ """
+ reorder reference values to match how patches are ordered
+ """
+ m_srgb = np.array([m_srgb[i::6] for i in range(6)]).reshape((24, 3))
+ m_lab = np.array([m_lab[i::6] for i in range(6)]).reshape((24, 3))
+ m_rgb = np.array([m_rgb[i::6] for i in range(6)]).reshape((24, 3))
+ """
+ reformat alsc correction tables or set colour_cals to None if alsc is
+ deactivated
+ """
+ if cal_cr_list is None:
+ colour_cals = None
+ else:
+ colour_cals = {}
+ for cr, cb in zip(cal_cr_list, cal_cb_list):
+ cr_tab = cr['table']
+ cb_tab = cb['table']
+ """
+ normalise tables so min value is 1
+ """
+ cr_tab = cr_tab / np.min(cr_tab)
+ cb_tab = cb_tab / np.min(cb_tab)
+ colour_cals[cr['ct']] = [cr_tab, cb_tab]
+
+ """
+ for each image, perform awb and alsc corrections.
+ Then calculate the colour correction matrix for that image, recording the
+ ccm and the colour tempertaure.
+ """
+ ccm_tab = {}
+ for Img in imgs:
+ logger.info('Processing image: ' + Img.name)
+ """
+ get macbeth patches with alsc applied if alsc enabled.
+ Note: if alsc is disabled then colour_cals will be set to None and no
+ the function will simply return the macbeth patches
+ """
+ r, b, g = get_alsc_patches(Img, colour_cals, grey=False)
+ # 256 values for each patch of sRGB values
+
+ """
+ do awb
+ Note: awb is done by measuring the macbeth chart in the image, rather
+ than from the awb calibration. This is done so the awb will be perfect
+ and the ccm matrices will be more accurate.
+ """
+ r_greys, b_greys, g_greys = r[3::4], b[3::4], g[3::4]
+ r_g = np.mean(r_greys / g_greys)
+ b_g = np.mean(b_greys / g_greys)
+ r = r / r_g
+ b = b / b_g
+ """
+ normalise brightness wrt reference macbeth colours and then average
+ each channel for each patch
+ """
+ gain = np.mean(m_srgb) / np.mean((r, g, b))
+ logger.info(f'Gain with respect to standard colours: {gain:.3f}')
+ r = np.mean(gain * r, axis=1)
+ b = np.mean(gain * b, axis=1)
+ g = np.mean(gain * g, axis=1)
+ """
+ calculate ccm matrix
+ """
+ # ==== All of below should in sRGB ===##
+ sumde = 0
+ ccm = do_ccm(r, g, b, m_srgb)
+ # This is the initial guess that our optimisation code works with.
+ original_ccm = ccm
+ r1 = ccm[0]
+ r2 = ccm[1]
+ g1 = ccm[3]
+ g2 = ccm[4]
+ b1 = ccm[6]
+ b2 = ccm[7]
+ '''
+ COLOR MATRIX LOOKS AS BELOW
+ R1 R2 R3 Rval Outr
+ G1 G2 G3 * Gval = G
+ B1 B2 B3 Bval B
+ Will be optimising 6 elements and working out the third element using 1-r1-r2 = r3
+ '''
+
+ x0 = [r1, r2, g1, g2, b1, b2]
+ '''
+ We use our old CCM as the initial guess for the program to find the
+ optimised matrix
+ '''
+ result = minimize(guess, x0, args=(r, g, b, m_lab), tol=0.01)
+ '''
+ This produces a color matrix which has the lowest delta E possible,
+ based off the input data. Note it is impossible for this to reach
+ zero since the input data is imperfect
+ '''
+
+ [r1, r2, g1, g2, b1, b2] = result.x
+ # The new, optimised color correction matrix values
+ # This is the optimised Color Matrix (preserving greys by summing rows up to 1)
+ optimised_ccm = [r1, r2, (1 - r1 - r2), g1, g2, (1 - g1 - g2), b1, b2, (1 - b1 - b2)]
+
+ logger.info(f'Optimized Matrix: {np.round(optimised_ccm, 4)}')
+ logger.info(f'Old Matrix: {np.round(ccm, 4)}')
+
+ formatted_ccm = np.array(original_ccm).reshape((3, 3))
+
+ '''
+ below is a whole load of code that then applies the latest color
+ matrix, and returns LAB values for color. This can then be used
+ to calculate the final delta E
+ '''
+ optimised_ccm_rgb = [] # Original Color Corrected Matrix RGB / LAB
+ optimised_ccm_lab = []
+
+ formatted_optimised_ccm = np.array(optimised_ccm).reshape((3, 3))
+ after_gamma_rgb = []
+ after_gamma_lab = []
+
+ for RGB in zip(r, g, b):
+ ccm_applied_rgb = np.dot(formatted_ccm, (np.array(RGB) / 256))
+ optimised_ccm_rgb.append(gamma(ccm_applied_rgb))
+ optimised_ccm_lab.append(colors.RGB_to_LAB(ccm_applied_rgb))
+
+ optimised_ccm_applied_rgb = np.dot(formatted_optimised_ccm, np.array(RGB) / 256)
+ after_gamma_rgb.append(gamma(optimised_ccm_applied_rgb))
+ after_gamma_lab.append(colors.RGB_to_LAB(optimised_ccm_applied_rgb))
+ '''
+ Gamma After RGB / LAB - not used in calculations, only used for visualisation
+ We now want to spit out some data that shows
+ how the optimisation has improved the color matrices
+ '''
+ logger.info("Here are the Improvements")
+
+ # CALCULATE WORST CASE delta e
+ old_worst_delta_e = 0
+ before_average = transform_and_evaluate(formatted_ccm, r, g, b, m_lab)
+ new_worst_delta_e = 0
+ after_average = transform_and_evaluate(formatted_optimised_ccm, r, g, b, m_lab)
+ for i in range(24):
+ old_delta_e = deltae(optimised_ccm_lab[i], m_lab[i]) # Current Old Delta E
+ new_delta_e = deltae(after_gamma_lab[i], m_lab[i]) # Current New Delta E
+ if old_delta_e > old_worst_delta_e:
+ old_worst_delta_e = old_delta_e
+ if new_delta_e > new_worst_delta_e:
+ new_worst_delta_e = new_delta_e
+
+ logger.info(f'delta E optimized: average: {after_average:.2f} max:{new_worst_delta_e:.2f}')
+ logger.info(f'delta E old: average: {before_average:.2f} max:{old_worst_delta_e:.2f}')
+
+ visualise_macbeth_chart(m_rgb, optimised_ccm_rgb, after_gamma_rgb, str(Img.color) + str(matrix_selection_types[typenum]))
+ '''
+ The program will also save some visualisations of improvements.
+ Very pretty to look at. Top rectangle is ideal, Left square is
+ before optimisation, right square is after.
+ '''
+
+ """
+ if a ccm has already been calculated for that temperature then don't
+ overwrite but save both. They will then be averaged later on
+ """ # Now going to use optimised color matrix, optimised_ccm
+ if Img.color in ccm_tab.keys():
+ ccm_tab[Img.color].append(optimised_ccm)
+ else:
+ ccm_tab[Img.color] = [optimised_ccm]
+
+ logger.info('Finished processing images')
+ """
+ average any ccms that share a colour temperature
+ """
+ for k, v in ccm_tab.items():
+ tab = np.mean(v, axis=0)
+ tab = np.where((10000 * tab) % 1 <= 0.05, tab + 0.00001, tab)
+ tab = np.where((10000 * tab) % 1 >= 0.95, tab - 0.00001, tab)
+ ccm_tab[k] = list(np.round(tab, 5))
+ logger.info(f'Matrix calculated for colour temperature of {k} K')
+
+ """
+ return all ccms with respective colour temperature in the correct format,
+ sorted by their colour temperature
+ """
+ sorted_ccms = sorted(ccm_tab.items(), key=lambda kv: kv[0])
+ ccms = []
+ for i in sorted_ccms:
+ ccms.append({
+ 'ct': i[0],
+ 'ccm': i[1]
+ })
+ return ccms
+
+
+def guess(x0, r, g, b, m_lab): # provides a method of numerical feedback for the optimisation code
+ [r1, r2, g1, g2, b1, b2] = x0
+ ccm = np.array([r1, r2, (1 - r1 - r2),
+ g1, g2, (1 - g1 - g2),
+ b1, b2, (1 - b1 - b2)]).reshape((3, 3)) # format the matrix correctly
+ return transform_and_evaluate(ccm, r, g, b, m_lab)
+
+
+def transform_and_evaluate(ccm, r, g, b, m_lab): # Transforms colors to LAB and applies the correction matrix
+ # create list of matrix changed colors
+ realrgb = []
+ for RGB in zip(r, g, b):
+ rgb_post_ccm = np.dot(ccm, np.array(RGB) / 256) # This is RGB values after the color correction matrix has been applied
+ realrgb.append(colors.RGB_to_LAB(rgb_post_ccm))
+ # now compare that with m_lab and return numeric result, averaged for each patch
+ return (sumde(realrgb, m_lab) / 24) # returns an average result of delta E
+
+
+def sumde(listA, listB):
+ global typenum, test_patches
+ sumde = 0
+ maxde = 0
+ patchde = [] # Create array of the delta E values for each patch. useful for optimisation of certain patches
+ for listA_item, listB_item in zip(listA, listB):
+ if maxde < (deltae(listA_item, listB_item)):
+ maxde = deltae(listA_item, listB_item)
+ patchde.append(deltae(listA_item, listB_item))
+ sumde += deltae(listA_item, listB_item)
+ '''
+ The different options specified at the start allow for
+ the maximum to be returned, average or specific patches
+ '''
+ if typenum == 0:
+ return sumde
+ if typenum == 1:
+ return maxde
+ if typenum == 2:
+ output = sum([patchde[test_patch] for test_patch in test_patches])
+ # Selects only certain patches and returns the output for them
+ return output
+
+
+"""
+calculates the ccm for an individual image.
+ccms are calculated in rgb space, and are fit by hand. Although it is a 3x3
+matrix, each row must add up to 1 in order to conserve greyness, simplifying
+calculation.
+The initial CCM is calculated in RGB, and then optimised in LAB color space
+This simplifies the initial calculation but then gets us the accuracy of
+using LAB color space.
+"""
+
+
+def do_ccm(r, g, b, m_srgb):
+ rb = r-b
+ gb = g-b
+ rb_2s = (rb * rb)
+ rb_gbs = (rb * gb)
+ gb_2s = (gb * gb)
+
+ r_rbs = rb * (m_srgb[..., 0] - b)
+ r_gbs = gb * (m_srgb[..., 0] - b)
+ g_rbs = rb * (m_srgb[..., 1] - b)
+ g_gbs = gb * (m_srgb[..., 1] - b)
+ b_rbs = rb * (m_srgb[..., 2] - b)
+ b_gbs = gb * (m_srgb[..., 2] - b)
+
+ """
+ Obtain least squares fit
+ """
+ rb_2 = np.sum(rb_2s)
+ gb_2 = np.sum(gb_2s)
+ rb_gb = np.sum(rb_gbs)
+ r_rb = np.sum(r_rbs)
+ r_gb = np.sum(r_gbs)
+ g_rb = np.sum(g_rbs)
+ g_gb = np.sum(g_gbs)
+ b_rb = np.sum(b_rbs)
+ b_gb = np.sum(b_gbs)
+
+ det = rb_2 * gb_2 - rb_gb * rb_gb
+
+ """
+ Raise error if matrix is singular...
+ This shouldn't really happen with real data but if it does just take new
+ pictures and try again, not much else to be done unfortunately...
+ """
+ if det < 0.001:
+ raise ArithmeticError
+
+ r_a = (gb_2 * r_rb - rb_gb * r_gb) / det
+ r_b = (rb_2 * r_gb - rb_gb * r_rb) / det
+ """
+ Last row can be calculated by knowing the sum must be 1
+ """
+ r_c = 1 - r_a - r_b
+
+ g_a = (gb_2 * g_rb - rb_gb * g_gb) / det
+ g_b = (rb_2 * g_gb - rb_gb * g_rb) / det
+ g_c = 1 - g_a - g_b
+
+ b_a = (gb_2 * b_rb - rb_gb * b_gb) / det
+ b_b = (rb_2 * b_gb - rb_gb * b_rb) / det
+ b_c = 1 - b_a - b_b
+
+ """
+ format ccm
+ """
+ ccm = [r_a, r_b, r_c, g_a, g_b, g_c, b_a, b_b, b_c]
+
+ return ccm
+
+
+def deltae(colorA, colorB):
+ return ((colorA[0] - colorB[0]) ** 2 + (colorA[1] - colorB[1]) ** 2 + (colorA[2] - colorB[2]) ** 2) ** 0.5
+ # return ((colorA[1]-colorB[1]) * * 2 + (colorA[2]-colorB[2]) * * 2) * * 0.5
+ # UNCOMMENT IF YOU WANT TO NEGLECT LUMINANCE FROM CALCULATION OF DELTA E
diff --git a/utils/tuning/libtuning/ctt_colors.py b/utils/tuning/libtuning/ctt_colors.py
new file mode 100644
index 00000000..cb4d236b
--- /dev/null
+++ b/utils/tuning/libtuning/ctt_colors.py
@@ -0,0 +1,30 @@
+# Program to convert from RGB to LAB color space
+def RGB_to_LAB(RGB): # where RGB is a 1x3 array. e.g RGB = [100, 255, 230]
+ num = 0
+ XYZ = [0, 0, 0]
+ # converted all the three R, G, B to X, Y, Z
+ X = RGB[0] * 0.4124 + RGB[1] * 0.3576 + RGB[2] * 0.1805
+ Y = RGB[0] * 0.2126 + RGB[1] * 0.7152 + RGB[2] * 0.0722
+ Z = RGB[0] * 0.0193 + RGB[1] * 0.1192 + RGB[2] * 0.9505
+
+ XYZ[0] = X / 255 * 100
+ XYZ[1] = Y / 255 * 100 # XYZ Must be in range 0 -> 100, so scale down from 255
+ XYZ[2] = Z / 255 * 100
+ XYZ[0] = XYZ[0] / 95.047 # ref_X = 95.047 Observer= 2°, Illuminant= D65
+ XYZ[1] = XYZ[1] / 100.0 # ref_Y = 100.000
+ XYZ[2] = XYZ[2] / 108.883 # ref_Z = 108.883
+ num = 0
+ for value in XYZ:
+ if value > 0.008856:
+ value = value ** (0.3333333333333333)
+ else:
+ value = (7.787 * value) + (16 / 116)
+ XYZ[num] = value
+ num = num + 1
+
+ # L, A, B, values calculated below
+ L = (116 * XYZ[1]) - 16
+ a = 500 * (XYZ[0] - XYZ[1])
+ b = 200 * (XYZ[1] - XYZ[2])
+
+ return [L, a, b]
diff --git a/utils/tuning/libtuning/ctt_ransac.py b/utils/tuning/libtuning/ctt_ransac.py
new file mode 100644
index 00000000..01bba302
--- /dev/null
+++ b/utils/tuning/libtuning/ctt_ransac.py
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# camera tuning tool RANSAC selector for Macbeth chart locator
+
+import numpy as np
+
+scale = 2
+
+
+"""
+constructs normalised macbeth chart corners for ransac algorithm
+"""
+def get_square_verts(c_err=0.05, scale=scale):
+ """
+ define macbeth chart corners
+ """
+ b_bord_x, b_bord_y = scale*8.5, scale*13
+ s_bord = 6*scale
+ side = 41*scale
+ x_max = side*6 + 5*s_bord + 2*b_bord_x
+ y_max = side*4 + 3*s_bord + 2*b_bord_y
+ c1 = (0, 0)
+ c2 = (0, y_max)
+ c3 = (x_max, y_max)
+ c4 = (x_max, 0)
+ mac_norm = np.array((c1, c2, c3, c4), np.float32)
+ mac_norm = np.array([mac_norm])
+
+ square_verts = []
+ square_0 = np.array(((0, 0), (0, side),
+ (side, side), (side, 0)), np.float32)
+ offset_0 = np.array((b_bord_x, b_bord_y), np.float32)
+ c_off = side * c_err
+ offset_cont = np.array(((c_off, c_off), (c_off, -c_off),
+ (-c_off, -c_off), (-c_off, c_off)), np.float32)
+ square_0 += offset_0
+ square_0 += offset_cont
+ """
+ define macbeth square corners
+ """
+ for i in range(6):
+ shift_i = np.array(((i*side, 0), (i*side, 0),
+ (i*side, 0), (i*side, 0)), np.float32)
+ shift_bord = np.array(((i*s_bord, 0), (i*s_bord, 0),
+ (i*s_bord, 0), (i*s_bord, 0)), np.float32)
+ square_i = square_0 + shift_i + shift_bord
+ for j in range(4):
+ shift_j = np.array(((0, j*side), (0, j*side),
+ (0, j*side), (0, j*side)), np.float32)
+ shift_bord = np.array(((0, j*s_bord),
+ (0, j*s_bord), (0, j*s_bord),
+ (0, j*s_bord)), np.float32)
+ square_j = square_i + shift_j + shift_bord
+ square_verts.append(square_j)
+ # print('square_verts')
+ # print(square_verts)
+ return np.array(square_verts, np.float32), mac_norm
+
+
+def get_square_centres(c_err=0.05, scale=scale):
+ """
+ define macbeth square centres
+ """
+ verts, mac_norm = get_square_verts(c_err, scale=scale)
+
+ centres = np.mean(verts, axis=1)
+ # print('centres')
+ # print(centres)
+ return np.array(centres, np.float32)
diff --git a/utils/tuning/libtuning/generators/yaml_output.py b/utils/tuning/libtuning/generators/yaml_output.py
index 8f22d386..c490081d 100644
--- a/utils/tuning/libtuning/generators/yaml_output.py
+++ b/utils/tuning/libtuning/generators/yaml_output.py
@@ -9,8 +9,9 @@ from .generator import Generator
from numbers import Number
from pathlib import Path
-import libtuning.utils as utils
+import logging
+logger = logging.getLogger(__name__)
class YamlOutput(Generator):
def __init__(self):
@@ -106,13 +107,16 @@ class YamlOutput(Generator):
]
for module in output_order:
+ if module not in output_dict:
+ continue
+
out_lines.append(f' - {module.out_name}:')
if len(output_dict[module]) == 0:
continue
if not isinstance(output_dict[module], dict):
- utils.eprint(f'Error: Output of {module.type} is not a dictionary')
+ logger.error(f'Error: Output of {module.type} is not a dictionary')
continue
lines = self._stringify_dict(output_dict[module])
diff --git a/utils/tuning/libtuning/image.py b/utils/tuning/libtuning/image.py
index 6ff60ec1..c8911a0f 100644
--- a/utils/tuning/libtuning/image.py
+++ b/utils/tuning/libtuning/image.py
@@ -13,6 +13,9 @@ import re
import libtuning as lt
import libtuning.utils as utils
+import logging
+
+logger = logging.getLogger(__name__)
class Image:
@@ -21,17 +24,18 @@ class Image:
self.lsc_only = False
self.color = -1
self.lux = -1
+ self.macbeth = None
try:
self._load_metadata_exif()
except Exception as e:
- utils.eprint(f'Failed to load metadata from {self.path}: {e}')
+ logger.error(f'Failed to load metadata from {self.path}: {e}')
raise e
try:
self._read_image_dng()
except Exception as e:
- utils.eprint(f'Failed to load image data from {self.path}: {e}')
+ logger.error(f'Failed to load image data from {self.path}: {e}')
raise e
@property
diff --git a/utils/tuning/libtuning/libtuning.py b/utils/tuning/libtuning/libtuning.py
index 5e22288d..e7c63535 100644
--- a/utils/tuning/libtuning/libtuning.py
+++ b/utils/tuning/libtuning/libtuning.py
@@ -5,13 +5,14 @@
# An infrastructure for camera tuning tools
import argparse
+import logging
import libtuning as lt
import libtuning.utils as utils
-from libtuning.utils import eprint
from enum import Enum, IntEnum
+logger = logging.getLogger(__name__)
class Color(IntEnum):
R = 0
@@ -112,10 +113,10 @@ class Tuner(object):
for module_type in output_order:
modules = [module for module in self.modules if module.type == module_type.type]
if len(modules) > 1:
- eprint(f'Multiple modules found for module type "{module_type.type}"')
+ logger.error(f'Multiple modules found for module type "{module_type.type}"')
return False
if len(modules) < 1:
- eprint(f'No module found for module type "{module_type.type}"')
+ logger.error(f'No module found for module type "{module_type.type}"')
return False
self.output_order.append(modules[0])
@@ -124,19 +125,19 @@ class Tuner(object):
# \todo Validate parser and generator at Tuner construction time?
def _validate_settings(self):
if self.parser is None:
- eprint('Missing parser')
+ logger.error('Missing parser')
return False
if self.generator is None:
- eprint('Missing generator')
+ logger.error('Missing generator')
return False
if len(self.modules) == 0:
- eprint('No modules added')
+ logger.error('No modules added')
return False
if len(self.output_order) != len(self.modules):
- eprint('Number of outputs does not match number of modules')
+ logger.error('Number of outputs does not match number of modules')
return False
return True
@@ -183,7 +184,7 @@ class Tuner(object):
for module in self.modules:
if not module.validate_config(self.config):
- eprint(f'Config is invalid for module {module.type}')
+ logger.error(f'Config is invalid for module {module.type}')
return -1
has_lsc = any(isinstance(m, lt.modules.lsc.LSC) for m in self.modules)
@@ -192,15 +193,15 @@ class Tuner(object):
images = utils.load_images(args.input, self.config, not has_only_lsc, has_lsc)
if images is None or len(images) == 0:
- eprint(f'No images were found, or able to load')
+ logger.error(f'No images were found, or able to load')
return -1
# Do the tuning
for module in self.modules:
out = module.process(self.config, images, self.output)
if out is None:
- eprint(f'Module {module.name} failed to process, aborting')
- break
+ logger.warning(f'Module {module.hr_name} failed to process...')
+ continue
self.output[module] = out
self.generator.write(args.output, self.output, self.output_order)
diff --git a/utils/tuning/libtuning/macbeth.py b/utils/tuning/libtuning/macbeth.py
index e1182464..4a2006b0 100644
--- a/utils/tuning/libtuning/macbeth.py
+++ b/utils/tuning/libtuning/macbeth.py
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
+# Copyright (C) 2024, Ideas on Board Oy
#
# Locate and extract Macbeth charts from images
# (Copied from: ctt_macbeth_locator.py)
@@ -11,8 +12,18 @@ import cv2
import os
from pathlib import Path
import numpy as np
+import warnings
+import logging
+from sklearn import cluster as cluster
-from libtuning.image import Image
+from .ctt_ransac import get_square_verts, get_square_centres
+from .image import Image
+
+logger = logging.getLogger(__name__)
+
+
+class MacbethError(Exception):
+ pass
# Reshape image to fixed width without distorting returns image and scale
@@ -369,7 +380,9 @@ def get_macbeth_chart(img, ref_data):
# Catch macbeth errors and continue with code
except MacbethError as error:
- eprint(error)
+ # \todo: This happens so many times in a normal run, that it shadows
+ # all the relevant output
+ # logger.warning(error)
return (0, None, None, False)
@@ -403,10 +416,15 @@ def find_macbeth(img, mac_config):
# nothing more is tried as this is a high enough confidence to ensure
# reliable macbeth square centre placement.
+ # Keep a list that will include this and any brightened up versions of
+ # the image for reuse.
+ all_images = [img]
+
for brightness in [2, 4]:
if cor >= 0.75:
break
img_br = cv2.convertScaleAbs(img, alpha=brightness, beta=0)
+ all_images.append(img_br)
cor_b, mac_b, coords_b, ret_b = get_macbeth_chart(img_br, ref_data)
if cor_b > cor:
cor, mac, coords, ret = cor_b, mac_b, coords_b, ret_b
@@ -456,23 +474,24 @@ def find_macbeth(img, mac_config):
w_inc = int(w * pair['inc'])
h_inc = int(h * pair['inc'])
- loop = ((1 - pair['sel']) / pair['inc']) + 1
+ loop = int(((1 - pair['sel']) / pair['inc']) + 1)
# For each subselection, look for a macbeth chart
- for i in range(loop):
- for j in range(loop):
- w_s, h_s = i * w_inc, j * h_inc
- img_sel = img[w_s:w_s + w_sel, h_s:h_s + h_sel]
- cor_ij, mac_ij, coords_ij, ret_ij = get_macbeth_chart(img_sel, ref_data)
-
- # If the correlation is better than the best then record the
- # scale and current subselection at which macbeth chart was
- # found. Also record the coordinates, macbeth chart and message.
- if cor_ij > cor:
- cor = cor_ij
- mac, coords, ret = mac_ij, coords_ij, ret_ij
- ii, jj = i, j
- w_best, h_best = w_inc, h_inc
- d_best = index + 1
+ for img_br in all_images:
+ for i in range(loop):
+ for j in range(loop):
+ w_s, h_s = i * w_inc, j * h_inc
+ img_sel = img_br[w_s:w_s + w_sel, h_s:h_s + h_sel]
+ cor_ij, mac_ij, coords_ij, ret_ij = get_macbeth_chart(img_sel, ref_data)
+
+ # If the correlation is better than the best then record the
+ # scale and current subselection at which macbeth chart was
+ # found. Also record the coordinates, macbeth chart and message.
+ if cor_ij > cor:
+ cor = cor_ij
+ mac, coords, ret = mac_ij, coords_ij, ret_ij
+ ii, jj = i, j
+ w_best, h_best = w_inc, h_inc
+ d_best = index + 1
# Transform coordinates from subselection to original image
if ii != -1:
@@ -486,7 +505,7 @@ def find_macbeth(img, mac_config):
coords_fit = coords
if cor < 0.75:
- eprint(f'Warning: Low confidence {cor:.3f} for macbeth chart in {img.path.name}')
+ logger.warning(f'Low confidence {cor:.3f} for macbeth chart')
if show:
draw_macbeth_results(img, coords_fit)
@@ -499,18 +518,20 @@ def locate_macbeth(image: Image, config: dict):
av_chan = (np.mean(np.array(image.channels), axis=0) / (2**16))
av_val = np.mean(av_chan)
if av_val < image.blacklevel_16 / (2**16) + 1 / 64:
- eprint(f'Image {image.path.name} too dark')
+ logger.warning(f'Image {image.path.name} too dark')
return None
macbeth = find_macbeth(av_chan, config['general']['macbeth'])
if macbeth is None:
- eprint(f'No macbeth chart found in {image.path.name}')
+ logger.warning(f'No macbeth chart found in {image.path.name}')
return None
mac_cen_coords = macbeth[1]
if not image.get_patches(mac_cen_coords):
- eprint(f'Macbeth patches have saturated in {image.path.name}')
+ logger.warning(f'Macbeth patches have saturated in {image.path.name}')
return None
+ image.macbeth = macbeth
+
return macbeth
diff --git a/utils/tuning/libtuning/macbeth_ref.pgm b/utils/tuning/libtuning/macbeth_ref.pgm
index 37897140..089ea91f 100644
--- a/utils/tuning/libtuning/macbeth_ref.pgm
+++ b/utils/tuning/libtuning/macbeth_ref.pgm
@@ -1,5 +1,5 @@
-# SPDX-License-Identifier: BSD-2-Clause
P5
+# SPDX-License-Identifier: BSD-2-Clause
# Reference macbeth chart
120 80
255
diff --git a/utils/tuning/libtuning/modules/agc/rkisp1.py b/utils/tuning/libtuning/modules/agc/rkisp1.py
index 19a5555b..7147028a 100644
--- a/utils/tuning/libtuning/modules/agc/rkisp1.py
+++ b/utils/tuning/libtuning/modules/agc/rkisp1.py
@@ -64,7 +64,7 @@ class AGCRkISP1(AGC):
return {'ConstraintNormal': normal, 'ConstraintHighlight': highlight}
def _generate_y_target(self) -> list:
- return 0.16
+ return 0.5
def process(self, config: dict, images: list, outputs: dict) -> dict:
output = {}
diff --git a/utils/tuning/libtuning/modules/ccm/__init__.py b/utils/tuning/libtuning/modules/ccm/__init__.py
new file mode 100644
index 00000000..322602af
--- /dev/null
+++ b/utils/tuning/libtuning/modules/ccm/__init__.py
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+
+from libtuning.modules.ccm.ccm import CCM
+from libtuning.modules.ccm.rkisp1 import CCMRkISP1
diff --git a/utils/tuning/libtuning/modules/ccm/ccm.py b/utils/tuning/libtuning/modules/ccm/ccm.py
new file mode 100644
index 00000000..18702f8d
--- /dev/null
+++ b/utils/tuning/libtuning/modules/ccm/ccm.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+# Copyright (C) 2024, Ideas on Board
+#
+# Base Ccm tuning module
+
+from ..module import Module
+
+from libtuning.ctt_ccm import ccm
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class CCM(Module):
+ type = 'ccm'
+ hr_name = 'CCM (Base)'
+ out_name = 'GenericCCM'
+
+ def __init__(self, debug: list):
+ super().__init__()
+
+ self.debug = debug
+
+ def do_calibration(self, images):
+ logger.info('Starting CCM calibration')
+
+ imgs = [img for img in images if img.macbeth is not None]
+
+ # todo: Take LSC calibration results into account.
+ cal_cr_list = None
+ cal_cb_list = None
+
+ try:
+ ccms = ccm(imgs, cal_cr_list, cal_cb_list)
+ except ArithmeticError:
+ logger.error('CCM calibration failed')
+ return None
+
+ return ccms
diff --git a/utils/tuning/libtuning/modules/ccm/rkisp1.py b/utils/tuning/libtuning/modules/ccm/rkisp1.py
new file mode 100644
index 00000000..be0252d9
--- /dev/null
+++ b/utils/tuning/libtuning/modules/ccm/rkisp1.py
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+# Copyright (C) 2024, Ideas on Board
+#
+# Ccm module for tuning rkisp1
+
+from .ccm import CCM
+
+
+class CCMRkISP1(CCM):
+ hr_name = 'Crosstalk Correction (RkISP1)'
+ out_name = 'Ccm'
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ # We don't need anything from the config file.
+ def validate_config(self, config: dict) -> bool:
+ return True
+
+ def process(self, config: dict, images: list, outputs: dict) -> dict:
+ output = {}
+
+ ccms = self.do_calibration(images)
+ output['ccms'] = ccms
+
+ return output
diff --git a/utils/tuning/libtuning/modules/lsc/lsc.py b/utils/tuning/libtuning/modules/lsc/lsc.py
index 344a07a3..e0ca22eb 100644
--- a/utils/tuning/libtuning/modules/lsc/lsc.py
+++ b/utils/tuning/libtuning/modules/lsc/lsc.py
@@ -59,7 +59,10 @@ class LSC(Module):
def _lsc_single_channel(self, channel: np.array,
image: lt.Image, green_grid: np.array = None):
grid = self._get_grid(channel, image.w, image.h)
- grid -= image.blacklevel_16
+ # Clamp the values to a small positive, so that the following 1/grid
+ # doesn't produce negative results.
+ grid = np.maximum(grid - image.blacklevel_16, 0.1)
+
if green_grid is None:
table = np.reshape(1 / grid, self.sector_shape[::-1])
else:
diff --git a/utils/tuning/libtuning/modules/lsc/raspberrypi.py b/utils/tuning/libtuning/modules/lsc/raspberrypi.py
index f19c7163..99bc4fe6 100644
--- a/utils/tuning/libtuning/modules/lsc/raspberrypi.py
+++ b/utils/tuning/libtuning/modules/lsc/raspberrypi.py
@@ -12,7 +12,9 @@ import libtuning.utils as utils
from numbers import Number
import numpy as np
+import logging
+logger = logging.getLogger(__name__)
class ALSCRaspberryPi(LSC):
# Override the type name so that the parser can match the entry in the
@@ -35,7 +37,7 @@ class ALSCRaspberryPi(LSC):
def validate_config(self, config: dict) -> bool:
if self not in config:
- utils.eprint(f'{self.type} not in config')
+ logger.error(f'{self.type} not in config')
return False
valid = True
@@ -46,14 +48,14 @@ class ALSCRaspberryPi(LSC):
color_key = self.do_color.name
if lum_key not in conf and self.luminance_strength.required:
- utils.eprint(f'{lum_key} is not in config')
+ logger.error(f'{lum_key} is not in config')
valid = False
if lum_key in conf and (conf[lum_key] < 0 or conf[lum_key] > 1):
- utils.eprint(f'Warning: {lum_key} is not in range [0, 1]; defaulting to 0.5')
+ logger.warning(f'{lum_key} is not in range [0, 1]; defaulting to 0.5')
if color_key not in conf and self.do_color.required:
- utils.eprint(f'{color_key} is not in config')
+ logger.error(f'{color_key} is not in config')
valid = False
return valid
@@ -235,7 +237,7 @@ class ALSCRaspberryPi(LSC):
if count == 1:
output['sigma'] = 0.005
output['sigma_Cb'] = 0.005
- utils.eprint('Warning: Only one alsc calibration found; standard sigmas used for adaptive algorithm.')
+ logger.warning('Only one alsc calibration found; standard sigmas used for adaptive algorithm.')
return output
# Obtain worst-case scenario residual sigmas
diff --git a/utils/tuning/libtuning/modules/lsc/rkisp1.py b/utils/tuning/libtuning/modules/lsc/rkisp1.py
index 20406e43..c02b2306 100644
--- a/utils/tuning/libtuning/modules/lsc/rkisp1.py
+++ b/utils/tuning/libtuning/modules/lsc/rkisp1.py
@@ -33,13 +33,13 @@ class LSCRkISP1(LSC):
# table, flattened array of (blue's) green calibration table
def _do_single_lsc(self, image: lt.Image):
- cgr, gr = self._lsc_single_channel(image.channels[lt.Color.GR], image)
- cgb, gb = self._lsc_single_channel(image.channels[lt.Color.GB], image)
-
- # \todo Should these ratio against the average of both greens or just
- # each green like we've done here?
- cr, _ = self._lsc_single_channel(image.channels[lt.Color.R], image, gr)
- cb, _ = self._lsc_single_channel(image.channels[lt.Color.B], image, gb)
+ # Perform LSC on each colour channel independently. A future enhancement
+ # worth investigating would be splitting the luminance and chrominance
+ # LSC as done by Raspberry Pi.
+ cgr, _ = self._lsc_single_channel(image.channels[lt.Color.GR], image)
+ cgb, _ = self._lsc_single_channel(image.channels[lt.Color.GB], image)
+ cr, _ = self._lsc_single_channel(image.channels[lt.Color.R], image)
+ cb, _ = self._lsc_single_channel(image.channels[lt.Color.B], image)
return image.color, cr.flatten(), cb.flatten(), cgr.flatten(), cgb.flatten()
@@ -80,7 +80,8 @@ class LSCRkISP1(LSC):
tables = []
for lis in [list_cr, list_cgr, list_cgb, list_cb]:
table = np.mean(lis[indices], axis=0)
- table = output_map_func((1, 3.999), (1024, 4095), table)
+ table = output_map_func((1, 4), (1024, 4096), table)
+ table = np.clip(table, 1024, 4095)
table = np.round(table).astype('int32').tolist()
tables.append(table)
@@ -106,6 +107,9 @@ class LSCRkISP1(LSC):
output['sets'] = self._do_all_lsc(images)
+ if len(output['sets']) == 0:
+ return None
+
# \todo Validate images from greyscale camera and force grescale mode
# \todo Debug functionality
diff --git a/utils/tuning/libtuning/modules/static.py b/utils/tuning/libtuning/modules/static.py
new file mode 100644
index 00000000..4d0f7e18
--- /dev/null
+++ b/utils/tuning/libtuning/modules/static.py
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Ideas on Board
+#
+# Module implementation for static data
+
+from .module import Module
+
+
+# This module can be used in cases where the tuning file should contain
+# static data.
+class StaticModule(Module):
+ def __init__(self, out_name: str, output: dict = {}):
+ super().__init__()
+ self.out_name = out_name
+ self.hr_name = f'Static {out_name}'
+ self.type = f'static_{out_name}'
+ self.output = output
+
+ def validate_config(self, config: dict) -> bool:
+ return True
+
+ def process(self, config: dict, images: list, outputs: dict) -> dict:
+ return self.output
diff --git a/utils/tuning/libtuning/parsers/yaml_parser.py b/utils/tuning/libtuning/parsers/yaml_parser.py
index 244db24d..1fa6b7a8 100644
--- a/utils/tuning/libtuning/parsers/yaml_parser.py
+++ b/utils/tuning/libtuning/parsers/yaml_parser.py
@@ -5,13 +5,16 @@
# Parser for YAML format config file
from .parser import Parser
+import yaml
class YamlParser(Parser):
def __init__(self):
super().__init__()
- # \todo Implement this (it's fine for now as we don't need a config for
- # rkisp1 LSC, which is the only user of this so far)
def parse(self, config_file: str, modules: list) -> (dict, list):
- return {}, []
+ # Dummy implementation that just reads the file
+ with open(config_file, 'r') as f:
+ config = yaml.safe_load(f)
+
+ return config, []
diff --git a/utils/tuning/libtuning/utils.py b/utils/tuning/libtuning/utils.py
index 1e8128ea..e35cf409 100644
--- a/utils/tuning/libtuning/utils.py
+++ b/utils/tuning/libtuning/utils.py
@@ -5,6 +5,7 @@
#
# Utilities for libtuning
+import cv2
import decimal
import math
import numpy as np
@@ -12,16 +13,15 @@ import os
from pathlib import Path
import re
import sys
+import logging
import libtuning as lt
from libtuning.image import Image
-from libtuning.macbeth import locate_macbeth
-
-# Utility functions
+from .macbeth import locate_macbeth
+logger = logging.getLogger(__name__)
-def eprint(*args, **kwargs):
- print(*args, file=sys.stderr, **kwargs)
+# Utility functions
def get_module_by_type_name(modules, name):
@@ -43,16 +43,30 @@ def _list_image_files(directory):
def _parse_image_filename(fn: Path):
- result = re.search(r'^(alsc_)?(\d+)[kK]_(\d+)?[lLuU]?.\w{3,4}$', fn.name)
- if result is None:
- eprint(f'The file name of {fn.name} is incorrectly formatted')
- return None, None, None
+ lsc_only = False
+ color_temperature = None
+ lux = None
+
+ parts = fn.stem.split('_')
+ for part in parts:
+ if part == 'alsc':
+ lsc_only = True
+ continue
+ r = re.match(r'(\d+)[kK]', part)
+ if r:
+ color_temperature = int(r.group(1))
+ continue
+ r = re.match(r'(\d+)[lLuU]', part)
+ if r:
+ lux = int(r.group(1))
+
+ if color_temperature is None:
+ logger.error(f'The file name of "{fn.name}" does not contain a color temperature')
- color = int(result.group(2))
- lsc_only = result.group(1) is not None
- lux = None if lsc_only else int(result.group(3))
+ if lux is None and lsc_only is False:
+ logger.error(f'The file name of "{fn.name}" must either contain alsc or a lux level')
- return color, lux, lsc_only
+ return color_temperature, lux, lsc_only
# \todo Implement this from check_imgs() in ctt.py
@@ -72,30 +86,34 @@ def _validate_images(images):
def load_images(input_dir: str, config: dict, load_nonlsc: bool, load_lsc: bool) -> list:
files = _list_image_files(input_dir)
if len(files) == 0:
- eprint(f'No images found in {input_dir}')
+ logger.error(f'No images found in {input_dir}')
return None
images = []
for f in files:
color, lux, lsc_only = _parse_image_filename(f)
+
if color is None:
+ logger.warning(f'Ignoring "{f.name}" as it has no associated color temperature')
continue
+ logger.info(f'Process image "{f.name}" (color={color}, lux={lux}, lsc_only={lsc_only})')
+
# Skip lsc image if we don't need it
if lsc_only and not load_lsc:
- eprint(f'Skipping {f.name} as this tuner has no LSC module')
+ logger.warning(f'Skipping {f.name} as this tuner has no LSC module')
continue
# Skip non-lsc image if we don't need it
if not lsc_only and not load_nonlsc:
- eprint(f'Skipping {f.name} as this tuner only has an LSC module')
+ logger.warning(f'Skipping {f.name} as this tuner only has an LSC module')
continue
# Load image
try:
image = Image(f)
except Exception as e:
- eprint(f'Failed to load image {f.name}: {e}')
+ logger.error(f'Failed to load image {f.name}: {e}')
continue
# Populate simple fields
@@ -113,7 +131,7 @@ def load_images(input_dir: str, config: dict, load_nonlsc: bool, load_lsc: bool)
continue
# Handle macbeth
- macbeth = locate_macbeth(config)
+ macbeth = locate_macbeth(image, config)
if macbeth is None:
continue
@@ -123,3 +141,46 @@ def load_images(input_dir: str, config: dict, load_nonlsc: bool, load_lsc: bool)
return None
return images
+
+
+
+"""
+Some code that will save virtual macbeth charts that show the difference between optimised matrices and non optimised matrices
+
+The function creates an image that is 1550 by 1050 pixels wide, and fills it with patches which are 200x200 pixels in size
+Each patch contains the ideal color, the color from the original matrix, and the color from the final matrix
+_________________
+| |
+| Ideal Color |
+|_______________|
+| Old | new |
+| Color | Color |
+|_______|_______|
+
+Nice way of showing how the optimisation helps change the colors and the color matricies
+"""
+def visualise_macbeth_chart(macbeth_rgb, original_rgb, new_rgb, output_filename):
+ image = np.zeros((1050, 1550, 3), dtype=np.uint8)
+ colorindex = -1
+ for y in range(6):
+ for x in range(4): # Creates 6 x 4 grid of macbeth chart
+ colorindex += 1
+ xlocation = 50 + 250 * x # Means there is 50px of black gap between each square, more like the real macbeth chart.
+ ylocation = 50 + 250 * y
+ for g in range(200):
+ for i in range(100):
+ image[xlocation + i, ylocation + g] = macbeth_rgb[colorindex]
+ xlocation = 150 + 250 * x
+ ylocation = 50 + 250 * y
+ for i in range(100):
+ for g in range(100):
+ image[xlocation + i, ylocation + g] = original_rgb[colorindex] # Smaller squares below to compare the old colors with the new ones
+ xlocation = 150 + 250 * x
+ ylocation = 150 + 250 * y
+ for i in range(100):
+ for g in range(100):
+ image[xlocation + i, ylocation + g] = new_rgb[colorindex]
+
+ im_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
+ cv2.imwrite(f'{output_filename} Generated Macbeth Chart.png', im_bgr)
+
diff --git a/utils/tuning/requirements.txt b/utils/tuning/requirements.txt
new file mode 100644
index 00000000..3705769b
--- /dev/null
+++ b/utils/tuning/requirements.txt
@@ -0,0 +1,9 @@
+coloredlogs
+matplotlib
+numpy
+opencv-python
+py3exiv2
+pyyaml
+rawpy
+scikit-learn
+scipy
diff --git a/utils/tuning/rkisp1.py b/utils/tuning/rkisp1.py
index d0ce15d5..0d279a39 100755
--- a/utils/tuning/rkisp1.py
+++ b/utils/tuning/rkisp1.py
@@ -5,6 +5,8 @@
#
# Tuning script for rkisp1
+import coloredlogs
+import logging
import sys
import libtuning as lt
@@ -12,8 +14,25 @@ from libtuning.parsers import YamlParser
from libtuning.generators import YamlOutput
from libtuning.modules.lsc import LSCRkISP1
from libtuning.modules.agc import AGCRkISP1
+from libtuning.modules.ccm import CCMRkISP1
+from libtuning.modules.static import StaticModule
+
+coloredlogs.install(level=logging.INFO, fmt='%(name)s %(levelname)s %(message)s')
+
+awb = StaticModule('Awb')
+blc = StaticModule('BlackLevelCorrection')
+color_processing = StaticModule('ColorProcessing')
+filter = StaticModule('Filter')
+gamma_out = StaticModule('GammaOutCorrection', {'gamma': 2.2})
tuner = lt.Tuner('RkISP1')
+tuner.add(AGCRkISP1(debug=[lt.Debug.Plot]))
+tuner.add(awb)
+tuner.add(blc)
+tuner.add(CCMRkISP1(debug=[lt.Debug.Plot]))
+tuner.add(color_processing)
+tuner.add(filter)
+tuner.add(gamma_out)
tuner.add(LSCRkISP1(
debug=[lt.Debug.Plot],
# This is for the actual LSC tuning, and is part of the base LSC
@@ -33,10 +52,11 @@ tuner.add(LSCRkISP1(
# values. This can also be a custom function.
smoothing_function=lt.smoothing.MedianBlur(3),
))
-tuner.add(AGCRkISP1(debug=[lt.Debug.Plot]))
+
tuner.set_input_parser(YamlParser())
tuner.set_output_formatter(YamlOutput())
-tuner.set_output_order([AGCRkISP1, LSCRkISP1])
+tuner.set_output_order([AGCRkISP1, awb, blc, CCMRkISP1, color_processing,
+ filter, gamma_out, LSCRkISP1])
if __name__ == '__main__':
sys.exit(tuner.run(sys.argv))