summaryrefslogtreecommitdiff
path: root/utils/tuning
diff options
context:
space:
mode:
Diffstat (limited to 'utils/tuning')
-rw-r--r--utils/tuning/README.rst20
-rw-r--r--utils/tuning/config-example.yaml12
-rw-r--r--utils/tuning/libtuning/__init__.py13
-rw-r--r--utils/tuning/libtuning/average.py21
-rw-r--r--utils/tuning/libtuning/ctt_awb.py378
-rw-r--r--utils/tuning/libtuning/ctt_ccm.py408
-rw-r--r--utils/tuning/libtuning/ctt_colors.py30
-rw-r--r--utils/tuning/libtuning/ctt_ransac.py71
-rw-r--r--utils/tuning/libtuning/generators/__init__.py6
-rw-r--r--utils/tuning/libtuning/generators/generator.py15
-rw-r--r--utils/tuning/libtuning/generators/raspberrypi_output.py114
-rw-r--r--utils/tuning/libtuning/generators/yaml_output.py127
-rw-r--r--utils/tuning/libtuning/gradient.py75
-rw-r--r--utils/tuning/libtuning/image.py140
-rw-r--r--utils/tuning/libtuning/libtuning.py212
-rw-r--r--utils/tuning/libtuning/macbeth.py537
-rw-r--r--utils/tuning/libtuning/macbeth_ref.pgm6
-rw-r--r--utils/tuning/libtuning/modules/__init__.py3
-rw-r--r--utils/tuning/libtuning/modules/agc/__init__.py6
-rw-r--r--utils/tuning/libtuning/modules/agc/agc.py21
-rw-r--r--utils/tuning/libtuning/modules/agc/rkisp1.py79
-rw-r--r--utils/tuning/libtuning/modules/awb/__init__.py6
-rw-r--r--utils/tuning/libtuning/modules/awb/awb.py36
-rw-r--r--utils/tuning/libtuning/modules/awb/rkisp1.py27
-rw-r--r--utils/tuning/libtuning/modules/ccm/__init__.py6
-rw-r--r--utils/tuning/libtuning/modules/ccm/ccm.py41
-rw-r--r--utils/tuning/libtuning/modules/ccm/rkisp1.py28
-rw-r--r--utils/tuning/libtuning/modules/lsc/__init__.py7
-rw-r--r--utils/tuning/libtuning/modules/lsc/lsc.py75
-rw-r--r--utils/tuning/libtuning/modules/lsc/raspberrypi.py248
-rw-r--r--utils/tuning/libtuning/modules/lsc/rkisp1.py116
-rw-r--r--utils/tuning/libtuning/modules/module.py32
-rw-r--r--utils/tuning/libtuning/modules/static.py24
-rw-r--r--utils/tuning/libtuning/parsers/__init__.py6
-rw-r--r--utils/tuning/libtuning/parsers/parser.py21
-rw-r--r--utils/tuning/libtuning/parsers/raspberrypi_parser.py93
-rw-r--r--utils/tuning/libtuning/parsers/yaml_parser.py20
-rw-r--r--utils/tuning/libtuning/smoothing.py24
-rw-r--r--utils/tuning/libtuning/utils.py186
-rw-r--r--utils/tuning/raspberrypi/__init__.py3
-rw-r--r--utils/tuning/raspberrypi/alsc.py19
-rwxr-xr-xutils/tuning/raspberrypi_alsc_only.py23
-rw-r--r--utils/tuning/requirements.txt9
-rwxr-xr-xutils/tuning/rkisp1.py57
44 files changed, 3401 insertions, 0 deletions
diff --git a/utils/tuning/README.rst b/utils/tuning/README.rst
new file mode 100644
index 00000000..89a1d61e
--- /dev/null
+++ b/utils/tuning/README.rst
@@ -0,0 +1,20 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+libcamera tuning tools
+======================
+
+.. Note:: The tuning tools are still very much work in progress. If in doubt,
+ please ask on the mailing list.
+
+.. todo::
+ Write documentation
+
+Installation of dependencies
+----------------------------
+
+::
+ # Using a venv
+ python3 -m venv venv
+ . ./venv/bin/activate
+ pip3 install -r requirements.txt
+
diff --git a/utils/tuning/config-example.yaml b/utils/tuning/config-example.yaml
new file mode 100644
index 00000000..1b7f52cd
--- /dev/null
+++ b/utils/tuning/config-example.yaml
@@ -0,0 +1,12 @@
+general:
+ disable: []
+ plot: []
+ alsc:
+ do_alsc_colour: 1
+ luminance_strength: 0.5
+ awb:
+ greyworld: 0
+ macbeth:
+ small: 1
+ show: 0
+# blacklevel: 32 \ No newline at end of file
diff --git a/utils/tuning/libtuning/__init__.py b/utils/tuning/libtuning/__init__.py
new file mode 100644
index 00000000..93049976
--- /dev/null
+++ b/utils/tuning/libtuning/__init__.py
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+
+from libtuning.utils import *
+from libtuning.libtuning import *
+
+from libtuning.image import *
+from libtuning.macbeth import *
+
+from libtuning.average import *
+from libtuning.gradient import *
+from libtuning.smoothing import *
diff --git a/utils/tuning/libtuning/average.py b/utils/tuning/libtuning/average.py
new file mode 100644
index 00000000..c41075a1
--- /dev/null
+++ b/utils/tuning/libtuning/average.py
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+#
+# Wrapper for numpy averaging functions to enable duck-typing
+
+import numpy as np
+
+
+# @brief Wrapper for np averaging functions so that they can be duck-typed
+class Average(object):
+ def __init__(self):
+ pass
+
+ def average(self, np_array):
+ raise NotImplementedError
+
+
+class Mean(Average):
+ def average(self, np_array):
+ return np.mean(np_array)
diff --git a/utils/tuning/libtuning/ctt_awb.py b/utils/tuning/libtuning/ctt_awb.py
new file mode 100644
index 00000000..240f37e6
--- /dev/null
+++ b/utils/tuning/libtuning/ctt_awb.py
@@ -0,0 +1,378 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# camera tuning tool for AWB
+
+import logging
+
+import matplotlib.pyplot as plt
+from bisect import bisect_left
+from scipy.optimize import fmin
+import numpy as np
+
+from .image import Image
+
+logger = logging.getLogger(__name__)
+
+"""
+obtain piecewise linear approximation for colour curve
+"""
+def awb(imgs, cal_cr_list, cal_cb_list, plot):
+ """
+ condense alsc calibration tables into one dictionary
+ """
+ if cal_cr_list is None:
+ colour_cals = None
+ else:
+ colour_cals = {}
+ for cr, cb in zip(cal_cr_list, cal_cb_list):
+ cr_tab = cr['table']
+ cb_tab = cb['table']
+ """
+ normalise tables so min value is 1
+ """
+ cr_tab = cr_tab/np.min(cr_tab)
+ cb_tab = cb_tab/np.min(cb_tab)
+ colour_cals[cr['ct']] = [cr_tab, cb_tab]
+ """
+ obtain data from greyscale macbeth patches
+ """
+ rb_raw = []
+ rbs_hat = []
+ for Img in imgs:
+ logger.info(f'Processing {Img.name}')
+ """
+ get greyscale patches with alsc applied if alsc enabled.
+ Note: if alsc is disabled then colour_cals will be set to None and the
+ function will just return the greyscale patches
+ """
+ r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals)
+ """
+ calculate ratio of r, b to g
+ """
+ r_g = np.mean(r_patchs/g_patchs)
+ b_g = np.mean(b_patchs/g_patchs)
+ logger.info(f' r : {r_g:.4f} b : {b_g:.4f}')
+ """
+ The curve tends to be better behaved in so-called hatspace.
+ R, B, G represent the individual channels. The colour curve is plotted in
+ r, b space, where:
+ r = R/G
+ b = B/G
+ This will be referred to as dehatspace... (sorry)
+ Hatspace is defined as:
+ r_hat = R/(R+B+G)
+ b_hat = B/(R+B+G)
+ To convert from dehatspace to hastpace (hat operation):
+ r_hat = r/(1+r+b)
+ b_hat = b/(1+r+b)
+ To convert from hatspace to dehatspace (dehat operation):
+ r = r_hat/(1-r_hat-b_hat)
+ b = b_hat/(1-r_hat-b_hat)
+ Proof is left as an excercise to the reader...
+ Throughout the code, r and b are sometimes referred to as r_g and b_g
+ as a reminder that they are ratios
+ """
+ r_g_hat = r_g/(1+r_g+b_g)
+ b_g_hat = b_g/(1+r_g+b_g)
+ logger.info(f' r_hat : {r_g_hat:.4f} b_hat : {b_g_hat:.4f}')
+ rbs_hat.append((r_g_hat, b_g_hat, Img.color))
+ rb_raw.append((r_g, b_g))
+
+ logger.info('Finished processing images')
+ """
+ sort all lits simultaneously by r_hat
+ """
+ rbs_zip = list(zip(rbs_hat, rb_raw))
+ rbs_zip.sort(key=lambda x: x[0][0])
+ rbs_hat, rb_raw = list(zip(*rbs_zip))
+ """
+ unzip tuples ready for processing
+ """
+ rbs_hat = list(zip(*rbs_hat))
+ rb_raw = list(zip(*rb_raw))
+ """
+ fit quadratic fit to r_g hat and b_g_hat
+ """
+ a, b, c = np.polyfit(rbs_hat[0], rbs_hat[1], 2)
+ logger.info('Fit quadratic curve in hatspace')
+ """
+ the algorithm now approximates the shortest distance from each point to the
+ curve in dehatspace. Since the fit is done in hatspace, it is easier to
+ find the actual shortest distance in hatspace and use the projection back
+ into dehatspace as an overestimate.
+ The distance will be used for two things:
+ 1) In the case that colour temperature does not strictly decrease with
+ increasing r/g, the closest point to the line will be chosen out of an
+ increasing pair of colours.
+
+ 2) To calculate transverse negative an dpositive, the maximum positive
+ and negative distance from the line are chosen. This benefits from the
+ overestimate as the transverse pos/neg are upper bound values.
+ """
+ """
+ define fit function
+ """
+ def f(x):
+ return a*x**2 + b*x + c
+ """
+ iterate over points (R, B are x and y coordinates of points) and calculate
+ distance to line in dehatspace
+ """
+ dists = []
+ for i, (R, B) in enumerate(zip(rbs_hat[0], rbs_hat[1])):
+ """
+ define function to minimise as square distance between datapoint and
+ point on curve. Squaring is monotonic so minimising radius squared is
+ equivalent to minimising radius
+ """
+ def f_min(x):
+ y = f(x)
+ return((x-R)**2+(y-B)**2)
+ """
+ perform optimisation with scipy.optmisie.fmin
+ """
+ x_hat = fmin(f_min, R, disp=0)[0]
+ y_hat = f(x_hat)
+ """
+ dehat
+ """
+ x = x_hat/(1-x_hat-y_hat)
+ y = y_hat/(1-x_hat-y_hat)
+ rr = R/(1-R-B)
+ bb = B/(1-R-B)
+ """
+ calculate euclidean distance in dehatspace
+ """
+ dist = ((x-rr)**2+(y-bb)**2)**0.5
+ """
+ return negative if point is below the fit curve
+ """
+ if (x+y) > (rr+bb):
+ dist *= -1
+ dists.append(dist)
+ logger.info('Found closest point on fit line to each point in dehatspace')
+ """
+ calculate wiggle factors in awb. 10% added since this is an upper bound
+ """
+ transverse_neg = - np.min(dists) * 1.1
+ transverse_pos = np.max(dists) * 1.1
+ logger.info(f'Transverse pos : {transverse_pos:.5f}')
+ logger.info(f'Transverse neg : {transverse_neg:.5f}')
+ """
+ set minimum transverse wiggles to 0.1 .
+ Wiggle factors dictate how far off of the curve the algorithm searches. 0.1
+ is a suitable minimum that gives better results for lighting conditions not
+ within calibration dataset. Anything less will generalise poorly.
+ """
+ if transverse_pos < 0.01:
+ transverse_pos = 0.01
+ logger.info('Forced transverse pos to 0.01')
+ if transverse_neg < 0.01:
+ transverse_neg = 0.01
+ logger.info('Forced transverse neg to 0.01')
+
+ """
+ generate new b_hat values at each r_hat according to fit
+ """
+ r_hat_fit = np.array(rbs_hat[0])
+ b_hat_fit = a*r_hat_fit**2 + b*r_hat_fit + c
+ """
+ transform from hatspace to dehatspace
+ """
+ r_fit = r_hat_fit/(1-r_hat_fit-b_hat_fit)
+ b_fit = b_hat_fit/(1-r_hat_fit-b_hat_fit)
+ c_fit = np.round(rbs_hat[2], 0)
+ """
+ round to 4dp
+ """
+ r_fit = np.where((1000*r_fit) % 1 <= 0.05, r_fit+0.0001, r_fit)
+ r_fit = np.where((1000*r_fit) % 1 >= 0.95, r_fit-0.0001, r_fit)
+ b_fit = np.where((1000*b_fit) % 1 <= 0.05, b_fit+0.0001, b_fit)
+ b_fit = np.where((1000*b_fit) % 1 >= 0.95, b_fit-0.0001, b_fit)
+ r_fit = np.round(r_fit, 4)
+ b_fit = np.round(b_fit, 4)
+ """
+ The following code ensures that colour temperature decreases with
+ increasing r/g
+ """
+ """
+ iterate backwards over list for easier indexing
+ """
+ i = len(c_fit) - 1
+ while i > 0:
+ if c_fit[i] > c_fit[i-1]:
+ logger.info('Colour temperature increase found')
+ logger.info(f'{c_fit[i - 1]} K at r = {r_fit[i - 1]} to ')
+ logger.info(f'{c_fit[i]} K at r = {r_fit[i]}')
+ """
+ if colour temperature increases then discard point furthest from
+ the transformed fit (dehatspace)
+ """
+ error_1 = abs(dists[i-1])
+ error_2 = abs(dists[i])
+ logger.info('Distances from fit:')
+ logger.info(f'{c_fit[i]} K : {error_1:.5f}')
+ logger.info(f'{c_fit[i - 1]} K : {error_2:.5f}')
+ """
+ find bad index
+ note that in python false = 0 and true = 1
+ """
+ bad = i - (error_1 < error_2)
+ logger.info(f'Point at {c_fit[bad]} K deleted as ')
+ logger.info('it is furthest from fit')
+ """
+ delete bad point
+ """
+ r_fit = np.delete(r_fit, bad)
+ b_fit = np.delete(b_fit, bad)
+ c_fit = np.delete(c_fit, bad).astype(np.uint16)
+ """
+ note that if a point has been discarded then the length has decreased
+ by one, meaning that decreasing the index by one will reassess the kept
+ point against the next point. It is therefore possible, in theory, for
+ two adjacent points to be discarded, although probably rare
+ """
+ i -= 1
+
+ """
+ return formatted ct curve, ordered by increasing colour temperature
+ """
+ ct_curve = list(np.array(list(zip(b_fit, r_fit, c_fit))).flatten())[::-1]
+ logger.info('Final CT curve:')
+ for i in range(len(ct_curve)//3):
+ j = 3*i
+ logger.info(f' ct: {ct_curve[j]} ')
+ logger.info(f' r: {ct_curve[j + 1]} ')
+ logger.info(f' b: {ct_curve[j + 2]} ')
+
+ """
+ plotting code for debug
+ """
+ if plot:
+ x = np.linspace(np.min(rbs_hat[0]), np.max(rbs_hat[0]), 100)
+ y = a*x**2 + b*x + c
+ plt.subplot(2, 1, 1)
+ plt.title('hatspace')
+ plt.plot(rbs_hat[0], rbs_hat[1], ls='--', color='blue')
+ plt.plot(x, y, color='green', ls='-')
+ plt.scatter(rbs_hat[0], rbs_hat[1], color='red')
+ for i, ct in enumerate(rbs_hat[2]):
+ plt.annotate(str(ct), (rbs_hat[0][i], rbs_hat[1][i]))
+ plt.xlabel('$\\hat{r}$')
+ plt.ylabel('$\\hat{b}$')
+ """
+ optional set axes equal to shortest distance so line really does
+ looks perpendicular and everybody is happy
+ """
+ # ax = plt.gca()
+ # ax.set_aspect('equal')
+ plt.grid()
+ plt.subplot(2, 1, 2)
+ plt.title('dehatspace - indoors?')
+ plt.plot(r_fit, b_fit, color='blue')
+ plt.scatter(rb_raw[0], rb_raw[1], color='green')
+ plt.scatter(r_fit, b_fit, color='red')
+ for i, ct in enumerate(c_fit):
+ plt.annotate(str(ct), (r_fit[i], b_fit[i]))
+ plt.xlabel('$r$')
+ plt.ylabel('$b$')
+ """
+ optional set axes equal to shortest distance so line really does
+ looks perpendicular and everybody is happy
+ """
+ # ax = plt.gca()
+ # ax.set_aspect('equal')
+ plt.subplots_adjust(hspace=0.5)
+ plt.grid()
+ plt.show()
+ """
+ end of plotting code
+ """
+ return(ct_curve, np.round(transverse_pos, 5), np.round(transverse_neg, 5))
+
+
+"""
+obtain greyscale patches and perform alsc colour correction
+"""
+def get_alsc_patches(Img, colour_cals, grey=True):
+ """
+ get patch centre coordinates, image colour and the actual
+ patches for each channel, remembering to subtract blacklevel
+ If grey then only greyscale patches considered
+ """
+ patches = Img.patches
+ if grey:
+ cen_coords = Img.cen_coords[3::4]
+ col = Img.color
+ r_patchs = patches[0][3::4] - Img.blacklevel_16
+ b_patchs = patches[3][3::4] - Img.blacklevel_16
+ """
+ note two green channels are averages
+ """
+ g_patchs = (patches[1][3::4]+patches[2][3::4])/2 - Img.blacklevel_16
+ else:
+ cen_coords = Img.cen_coords
+ col = Img.color
+ r_patchs = patches[0] - Img.blacklevel_16
+ b_patchs = patches[3] - Img.blacklevel_16
+ g_patchs = (patches[1]+patches[2])/2 - Img.blacklevel_16
+
+ if colour_cals is None:
+ return r_patchs, b_patchs, g_patchs
+ """
+ find where image colour fits in alsc colour calibration tables
+ """
+ cts = list(colour_cals.keys())
+ pos = bisect_left(cts, col)
+ """
+ if img colour is below minimum or above maximum alsc calibration colour, simply
+ pick extreme closest to img colour
+ """
+ if pos % len(cts) == 0:
+ """
+ this works because -0 = 0 = first and -1 = last index
+ """
+ col_tabs = np.array(colour_cals[cts[-pos//len(cts)]])
+ """
+ else, perform linear interpolation between existing alsc colour
+ calibration tables
+ """
+ else:
+ bef = cts[pos-1]
+ aft = cts[pos]
+ da = col-bef
+ db = aft-col
+ bef_tabs = np.array(colour_cals[bef])
+ aft_tabs = np.array(colour_cals[aft])
+ col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db)
+ col_tabs = np.reshape(col_tabs, (2, 12, 16))
+ """
+ calculate dx, dy used to calculate alsc table
+ """
+ w, h = Img.w/2, Img.h/2
+ dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
+ """
+ make list of pairs of gains for each patch by selecting the correct value
+ in alsc colour calibration table
+ """
+ patch_gains = []
+ for cen in cen_coords:
+ x, y = cen[0]//dx, cen[1]//dy
+ # We could probably do with some better spatial interpolation here?
+ col_gains = (col_tabs[0][y][x], col_tabs[1][y][x])
+ patch_gains.append(col_gains)
+
+ """
+ multiply the r and b channels in each patch by the respective gain, finally
+ performing the alsc colour correction
+ """
+ for i, gains in enumerate(patch_gains):
+ r_patchs[i] = r_patchs[i] * gains[0]
+ b_patchs[i] = b_patchs[i] * gains[1]
+
+ """
+ return greyscale patches, g channel and correct r, b channels
+ """
+ return r_patchs, b_patchs, g_patchs
diff --git a/utils/tuning/libtuning/ctt_ccm.py b/utils/tuning/libtuning/ctt_ccm.py
new file mode 100644
index 00000000..2e87a667
--- /dev/null
+++ b/utils/tuning/libtuning/ctt_ccm.py
@@ -0,0 +1,408 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# camera tuning tool for CCM (colour correction matrix)
+
+import logging
+
+import numpy as np
+from scipy.optimize import minimize
+
+from . import ctt_colors as colors
+from .image import Image
+from .ctt_awb import get_alsc_patches
+from .utils import visualise_macbeth_chart
+
+logger = logging.getLogger(__name__)
+
+"""
+takes 8-bit macbeth chart values, degammas and returns 16 bit
+"""
+
+'''
+This program has many options from which to derive the color matrix from.
+The first is average. This minimises the average delta E across all patches of
+the macbeth chart. Testing across all cameras yeilded this as the most color
+accurate and vivid. Other options are avalible however.
+Maximum minimises the maximum Delta E of the patches. It iterates through till
+a minimum maximum is found (so that there is
+not one patch that deviates wildly.)
+This yields generally good results but overall the colors are less accurate
+Have a fiddle with maximum and see what you think.
+The final option allows you to select the patches for which to average across.
+This means that you can bias certain patches, for instance if you want the
+reds to be more accurate.
+'''
+
+matrix_selection_types = ["average", "maximum", "patches"]
+typenum = 0 # select from array above, 0 = average, 1 = maximum, 2 = patches
+test_patches = [1, 2, 5, 8, 9, 12, 14]
+
+'''
+Enter patches to test for. Can also be entered twice if you
+would like twice as much bias on one patch.
+'''
+
+
+def degamma(x):
+ x = x / ((2 ** 8) - 1) # takes 255 and scales it down to one
+ x = np.where(x < 0.04045, x / 12.92, ((x + 0.055) / 1.055) ** 2.4)
+ x = x * ((2 ** 16) - 1) # takes one and scales up to 65535, 16 bit color
+ return x
+
+
+def gamma(x):
+ # Take 3 long array of color values and gamma them
+ return [((colour / 255) ** (1 / 2.4) * 1.055 - 0.055) * 255 for colour in x]
+
+
+"""
+FInds colour correction matrices for list of images
+"""
+
+
+def ccm(imgs, cal_cr_list, cal_cb_list):
+ global matrix_selection_types, typenum
+ """
+ standard macbeth chart colour values
+ """
+ m_rgb = np.array([ # these are in RGB
+ [116, 81, 67], # dark skin
+ [199, 147, 129], # light skin
+ [91, 122, 156], # blue sky
+ [90, 108, 64], # foliage
+ [130, 128, 176], # blue flower
+ [92, 190, 172], # bluish green
+ [224, 124, 47], # orange
+ [68, 91, 170], # purplish blue
+ [198, 82, 97], # moderate red
+ [94, 58, 106], # purple
+ [159, 189, 63], # yellow green
+ [230, 162, 39], # orange yellow
+ [35, 63, 147], # blue
+ [67, 149, 74], # green
+ [180, 49, 57], # red
+ [238, 198, 20], # yellow
+ [193, 84, 151], # magenta
+ [0, 136, 170], # cyan (goes out of gamut)
+ [245, 245, 243], # white 9.5
+ [200, 202, 202], # neutral 8
+ [161, 163, 163], # neutral 6.5
+ [121, 121, 122], # neutral 5
+ [82, 84, 86], # neutral 3.5
+ [49, 49, 51] # black 2
+ ])
+ """
+ convert reference colours from srgb to rgb
+ """
+ m_srgb = degamma(m_rgb) # now in 16 bit color.
+
+ # Produce array of LAB values for ideal color chart
+ m_lab = [colors.RGB_to_LAB(color / 256) for color in m_srgb]
+
+ """
+ reorder reference values to match how patches are ordered
+ """
+ m_srgb = np.array([m_srgb[i::6] for i in range(6)]).reshape((24, 3))
+ m_lab = np.array([m_lab[i::6] for i in range(6)]).reshape((24, 3))
+ m_rgb = np.array([m_rgb[i::6] for i in range(6)]).reshape((24, 3))
+ """
+ reformat alsc correction tables or set colour_cals to None if alsc is
+ deactivated
+ """
+ if cal_cr_list is None:
+ colour_cals = None
+ else:
+ colour_cals = {}
+ for cr, cb in zip(cal_cr_list, cal_cb_list):
+ cr_tab = cr['table']
+ cb_tab = cb['table']
+ """
+ normalise tables so min value is 1
+ """
+ cr_tab = cr_tab / np.min(cr_tab)
+ cb_tab = cb_tab / np.min(cb_tab)
+ colour_cals[cr['ct']] = [cr_tab, cb_tab]
+
+ """
+ for each image, perform awb and alsc corrections.
+ Then calculate the colour correction matrix for that image, recording the
+ ccm and the colour tempertaure.
+ """
+ ccm_tab = {}
+ for Img in imgs:
+ logger.info('Processing image: ' + Img.name)
+ """
+ get macbeth patches with alsc applied if alsc enabled.
+ Note: if alsc is disabled then colour_cals will be set to None and no
+ the function will simply return the macbeth patches
+ """
+ r, b, g = get_alsc_patches(Img, colour_cals, grey=False)
+ # 256 values for each patch of sRGB values
+
+ """
+ do awb
+ Note: awb is done by measuring the macbeth chart in the image, rather
+ than from the awb calibration. This is done so the awb will be perfect
+ and the ccm matrices will be more accurate.
+ """
+ r_greys, b_greys, g_greys = r[3::4], b[3::4], g[3::4]
+ r_g = np.mean(r_greys / g_greys)
+ b_g = np.mean(b_greys / g_greys)
+ r = r / r_g
+ b = b / b_g
+ """
+ normalise brightness wrt reference macbeth colours and then average
+ each channel for each patch
+ """
+ gain = np.mean(m_srgb) / np.mean((r, g, b))
+ logger.info(f'Gain with respect to standard colours: {gain:.3f}')
+ r = np.mean(gain * r, axis=1)
+ b = np.mean(gain * b, axis=1)
+ g = np.mean(gain * g, axis=1)
+ """
+ calculate ccm matrix
+ """
+ # ==== All of below should in sRGB ===##
+ sumde = 0
+ ccm = do_ccm(r, g, b, m_srgb)
+ # This is the initial guess that our optimisation code works with.
+ original_ccm = ccm
+ r1 = ccm[0]
+ r2 = ccm[1]
+ g1 = ccm[3]
+ g2 = ccm[4]
+ b1 = ccm[6]
+ b2 = ccm[7]
+ '''
+ COLOR MATRIX LOOKS AS BELOW
+ R1 R2 R3 Rval Outr
+ G1 G2 G3 * Gval = G
+ B1 B2 B3 Bval B
+ Will be optimising 6 elements and working out the third element using 1-r1-r2 = r3
+ '''
+
+ x0 = [r1, r2, g1, g2, b1, b2]
+ '''
+ We use our old CCM as the initial guess for the program to find the
+ optimised matrix
+ '''
+ result = minimize(guess, x0, args=(r, g, b, m_lab), tol=0.01)
+ '''
+ This produces a color matrix which has the lowest delta E possible,
+ based off the input data. Note it is impossible for this to reach
+ zero since the input data is imperfect
+ '''
+
+ [r1, r2, g1, g2, b1, b2] = result.x
+ # The new, optimised color correction matrix values
+ # This is the optimised Color Matrix (preserving greys by summing rows up to 1)
+ optimised_ccm = [r1, r2, (1 - r1 - r2), g1, g2, (1 - g1 - g2), b1, b2, (1 - b1 - b2)]
+
+ logger.info(f'Optimized Matrix: {np.round(optimised_ccm, 4)}')
+ logger.info(f'Old Matrix: {np.round(ccm, 4)}')
+
+ formatted_ccm = np.array(original_ccm).reshape((3, 3))
+
+ '''
+ below is a whole load of code that then applies the latest color
+ matrix, and returns LAB values for color. This can then be used
+ to calculate the final delta E
+ '''
+ optimised_ccm_rgb = [] # Original Color Corrected Matrix RGB / LAB
+ optimised_ccm_lab = []
+
+ formatted_optimised_ccm = np.array(optimised_ccm).reshape((3, 3))
+ after_gamma_rgb = []
+ after_gamma_lab = []
+
+ for RGB in zip(r, g, b):
+ ccm_applied_rgb = np.dot(formatted_ccm, (np.array(RGB) / 256))
+ optimised_ccm_rgb.append(gamma(ccm_applied_rgb))
+ optimised_ccm_lab.append(colors.RGB_to_LAB(ccm_applied_rgb))
+
+ optimised_ccm_applied_rgb = np.dot(formatted_optimised_ccm, np.array(RGB) / 256)
+ after_gamma_rgb.append(gamma(optimised_ccm_applied_rgb))
+ after_gamma_lab.append(colors.RGB_to_LAB(optimised_ccm_applied_rgb))
+ '''
+ Gamma After RGB / LAB - not used in calculations, only used for visualisation
+ We now want to spit out some data that shows
+ how the optimisation has improved the color matrices
+ '''
+ logger.info("Here are the Improvements")
+
+ # CALCULATE WORST CASE delta e
+ old_worst_delta_e = 0
+ before_average = transform_and_evaluate(formatted_ccm, r, g, b, m_lab)
+ new_worst_delta_e = 0
+ after_average = transform_and_evaluate(formatted_optimised_ccm, r, g, b, m_lab)
+ for i in range(24):
+ old_delta_e = deltae(optimised_ccm_lab[i], m_lab[i]) # Current Old Delta E
+ new_delta_e = deltae(after_gamma_lab[i], m_lab[i]) # Current New Delta E
+ if old_delta_e > old_worst_delta_e:
+ old_worst_delta_e = old_delta_e
+ if new_delta_e > new_worst_delta_e:
+ new_worst_delta_e = new_delta_e
+
+ logger.info(f'delta E optimized: average: {after_average:.2f} max:{new_worst_delta_e:.2f}')
+ logger.info(f'delta E old: average: {before_average:.2f} max:{old_worst_delta_e:.2f}')
+
+ visualise_macbeth_chart(m_rgb, optimised_ccm_rgb, after_gamma_rgb, str(Img.color) + str(matrix_selection_types[typenum]))
+ '''
+ The program will also save some visualisations of improvements.
+ Very pretty to look at. Top rectangle is ideal, Left square is
+ before optimisation, right square is after.
+ '''
+
+ """
+ if a ccm has already been calculated for that temperature then don't
+ overwrite but save both. They will then be averaged later on
+ """ # Now going to use optimised color matrix, optimised_ccm
+ if Img.color in ccm_tab.keys():
+ ccm_tab[Img.color].append(optimised_ccm)
+ else:
+ ccm_tab[Img.color] = [optimised_ccm]
+
+ logger.info('Finished processing images')
+ """
+ average any ccms that share a colour temperature
+ """
+ for k, v in ccm_tab.items():
+ tab = np.mean(v, axis=0)
+ tab = np.where((10000 * tab) % 1 <= 0.05, tab + 0.00001, tab)
+ tab = np.where((10000 * tab) % 1 >= 0.95, tab - 0.00001, tab)
+ ccm_tab[k] = list(np.round(tab, 5))
+ logger.info(f'Matrix calculated for colour temperature of {k} K')
+
+ """
+ return all ccms with respective colour temperature in the correct format,
+ sorted by their colour temperature
+ """
+ sorted_ccms = sorted(ccm_tab.items(), key=lambda kv: kv[0])
+ ccms = []
+ for i in sorted_ccms:
+ ccms.append({
+ 'ct': i[0],
+ 'ccm': i[1]
+ })
+ return ccms
+
+
+def guess(x0, r, g, b, m_lab): # provides a method of numerical feedback for the optimisation code
+ [r1, r2, g1, g2, b1, b2] = x0
+ ccm = np.array([r1, r2, (1 - r1 - r2),
+ g1, g2, (1 - g1 - g2),
+ b1, b2, (1 - b1 - b2)]).reshape((3, 3)) # format the matrix correctly
+ return transform_and_evaluate(ccm, r, g, b, m_lab)
+
+
+def transform_and_evaluate(ccm, r, g, b, m_lab): # Transforms colors to LAB and applies the correction matrix
+ # create list of matrix changed colors
+ realrgb = []
+ for RGB in zip(r, g, b):
+ rgb_post_ccm = np.dot(ccm, np.array(RGB) / 256) # This is RGB values after the color correction matrix has been applied
+ realrgb.append(colors.RGB_to_LAB(rgb_post_ccm))
+ # now compare that with m_lab and return numeric result, averaged for each patch
+ return (sumde(realrgb, m_lab) / 24) # returns an average result of delta E
+
+
+def sumde(listA, listB):
+ global typenum, test_patches
+ sumde = 0
+ maxde = 0
+ patchde = [] # Create array of the delta E values for each patch. useful for optimisation of certain patches
+ for listA_item, listB_item in zip(listA, listB):
+ if maxde < (deltae(listA_item, listB_item)):
+ maxde = deltae(listA_item, listB_item)
+ patchde.append(deltae(listA_item, listB_item))
+ sumde += deltae(listA_item, listB_item)
+ '''
+ The different options specified at the start allow for
+ the maximum to be returned, average or specific patches
+ '''
+ if typenum == 0:
+ return sumde
+ if typenum == 1:
+ return maxde
+ if typenum == 2:
+ output = sum([patchde[test_patch] for test_patch in test_patches])
+ # Selects only certain patches and returns the output for them
+ return output
+
+
+"""
+calculates the ccm for an individual image.
+ccms are calculated in rgb space, and are fit by hand. Although it is a 3x3
+matrix, each row must add up to 1 in order to conserve greyness, simplifying
+calculation.
+The initial CCM is calculated in RGB, and then optimised in LAB color space
+This simplifies the initial calculation but then gets us the accuracy of
+using LAB color space.
+"""
+
+
+def do_ccm(r, g, b, m_srgb):
+ rb = r-b
+ gb = g-b
+ rb_2s = (rb * rb)
+ rb_gbs = (rb * gb)
+ gb_2s = (gb * gb)
+
+ r_rbs = rb * (m_srgb[..., 0] - b)
+ r_gbs = gb * (m_srgb[..., 0] - b)
+ g_rbs = rb * (m_srgb[..., 1] - b)
+ g_gbs = gb * (m_srgb[..., 1] - b)
+ b_rbs = rb * (m_srgb[..., 2] - b)
+ b_gbs = gb * (m_srgb[..., 2] - b)
+
+ """
+ Obtain least squares fit
+ """
+ rb_2 = np.sum(rb_2s)
+ gb_2 = np.sum(gb_2s)
+ rb_gb = np.sum(rb_gbs)
+ r_rb = np.sum(r_rbs)
+ r_gb = np.sum(r_gbs)
+ g_rb = np.sum(g_rbs)
+ g_gb = np.sum(g_gbs)
+ b_rb = np.sum(b_rbs)
+ b_gb = np.sum(b_gbs)
+
+ det = rb_2 * gb_2 - rb_gb * rb_gb
+
+ """
+ Raise error if matrix is singular...
+ This shouldn't really happen with real data but if it does just take new
+ pictures and try again, not much else to be done unfortunately...
+ """
+ if det < 0.001:
+ raise ArithmeticError
+
+ r_a = (gb_2 * r_rb - rb_gb * r_gb) / det
+ r_b = (rb_2 * r_gb - rb_gb * r_rb) / det
+ """
+ Last row can be calculated by knowing the sum must be 1
+ """
+ r_c = 1 - r_a - r_b
+
+ g_a = (gb_2 * g_rb - rb_gb * g_gb) / det
+ g_b = (rb_2 * g_gb - rb_gb * g_rb) / det
+ g_c = 1 - g_a - g_b
+
+ b_a = (gb_2 * b_rb - rb_gb * b_gb) / det
+ b_b = (rb_2 * b_gb - rb_gb * b_rb) / det
+ b_c = 1 - b_a - b_b
+
+ """
+ format ccm
+ """
+ ccm = [r_a, r_b, r_c, g_a, g_b, g_c, b_a, b_b, b_c]
+
+ return ccm
+
+
+def deltae(colorA, colorB):
+ return ((colorA[0] - colorB[0]) ** 2 + (colorA[1] - colorB[1]) ** 2 + (colorA[2] - colorB[2]) ** 2) ** 0.5
+ # return ((colorA[1]-colorB[1]) * * 2 + (colorA[2]-colorB[2]) * * 2) * * 0.5
+ # UNCOMMENT IF YOU WANT TO NEGLECT LUMINANCE FROM CALCULATION OF DELTA E
diff --git a/utils/tuning/libtuning/ctt_colors.py b/utils/tuning/libtuning/ctt_colors.py
new file mode 100644
index 00000000..cb4d236b
--- /dev/null
+++ b/utils/tuning/libtuning/ctt_colors.py
@@ -0,0 +1,30 @@
+# Program to convert from RGB to LAB color space
+def RGB_to_LAB(RGB): # where RGB is a 1x3 array. e.g RGB = [100, 255, 230]
+ num = 0
+ XYZ = [0, 0, 0]
+ # converted all the three R, G, B to X, Y, Z
+ X = RGB[0] * 0.4124 + RGB[1] * 0.3576 + RGB[2] * 0.1805
+ Y = RGB[0] * 0.2126 + RGB[1] * 0.7152 + RGB[2] * 0.0722
+ Z = RGB[0] * 0.0193 + RGB[1] * 0.1192 + RGB[2] * 0.9505
+
+ XYZ[0] = X / 255 * 100
+ XYZ[1] = Y / 255 * 100 # XYZ Must be in range 0 -> 100, so scale down from 255
+ XYZ[2] = Z / 255 * 100
+ XYZ[0] = XYZ[0] / 95.047 # ref_X = 95.047 Observer= 2°, Illuminant= D65
+ XYZ[1] = XYZ[1] / 100.0 # ref_Y = 100.000
+ XYZ[2] = XYZ[2] / 108.883 # ref_Z = 108.883
+ num = 0
+ for value in XYZ:
+ if value > 0.008856:
+ value = value ** (0.3333333333333333)
+ else:
+ value = (7.787 * value) + (16 / 116)
+ XYZ[num] = value
+ num = num + 1
+
+ # L, A, B, values calculated below
+ L = (116 * XYZ[1]) - 16
+ a = 500 * (XYZ[0] - XYZ[1])
+ b = 200 * (XYZ[1] - XYZ[2])
+
+ return [L, a, b]
diff --git a/utils/tuning/libtuning/ctt_ransac.py b/utils/tuning/libtuning/ctt_ransac.py
new file mode 100644
index 00000000..01bba302
--- /dev/null
+++ b/utils/tuning/libtuning/ctt_ransac.py
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# camera tuning tool RANSAC selector for Macbeth chart locator
+
+import numpy as np
+
+scale = 2
+
+
+"""
+constructs normalised macbeth chart corners for ransac algorithm
+"""
+def get_square_verts(c_err=0.05, scale=scale):
+ """
+ define macbeth chart corners
+ """
+ b_bord_x, b_bord_y = scale*8.5, scale*13
+ s_bord = 6*scale
+ side = 41*scale
+ x_max = side*6 + 5*s_bord + 2*b_bord_x
+ y_max = side*4 + 3*s_bord + 2*b_bord_y
+ c1 = (0, 0)
+ c2 = (0, y_max)
+ c3 = (x_max, y_max)
+ c4 = (x_max, 0)
+ mac_norm = np.array((c1, c2, c3, c4), np.float32)
+ mac_norm = np.array([mac_norm])
+
+ square_verts = []
+ square_0 = np.array(((0, 0), (0, side),
+ (side, side), (side, 0)), np.float32)
+ offset_0 = np.array((b_bord_x, b_bord_y), np.float32)
+ c_off = side * c_err
+ offset_cont = np.array(((c_off, c_off), (c_off, -c_off),
+ (-c_off, -c_off), (-c_off, c_off)), np.float32)
+ square_0 += offset_0
+ square_0 += offset_cont
+ """
+ define macbeth square corners
+ """
+ for i in range(6):
+ shift_i = np.array(((i*side, 0), (i*side, 0),
+ (i*side, 0), (i*side, 0)), np.float32)
+ shift_bord = np.array(((i*s_bord, 0), (i*s_bord, 0),
+ (i*s_bord, 0), (i*s_bord, 0)), np.float32)
+ square_i = square_0 + shift_i + shift_bord
+ for j in range(4):
+ shift_j = np.array(((0, j*side), (0, j*side),
+ (0, j*side), (0, j*side)), np.float32)
+ shift_bord = np.array(((0, j*s_bord),
+ (0, j*s_bord), (0, j*s_bord),
+ (0, j*s_bord)), np.float32)
+ square_j = square_i + shift_j + shift_bord
+ square_verts.append(square_j)
+ # print('square_verts')
+ # print(square_verts)
+ return np.array(square_verts, np.float32), mac_norm
+
+
+def get_square_centres(c_err=0.05, scale=scale):
+ """
+ define macbeth square centres
+ """
+ verts, mac_norm = get_square_verts(c_err, scale=scale)
+
+ centres = np.mean(verts, axis=1)
+ # print('centres')
+ # print(centres)
+ return np.array(centres, np.float32)
diff --git a/utils/tuning/libtuning/generators/__init__.py b/utils/tuning/libtuning/generators/__init__.py
new file mode 100644
index 00000000..f28b6149
--- /dev/null
+++ b/utils/tuning/libtuning/generators/__init__.py
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+
+from libtuning.generators.raspberrypi_output import RaspberryPiOutput
+from libtuning.generators.yaml_output import YamlOutput
diff --git a/utils/tuning/libtuning/generators/generator.py b/utils/tuning/libtuning/generators/generator.py
new file mode 100644
index 00000000..77a8ba4a
--- /dev/null
+++ b/utils/tuning/libtuning/generators/generator.py
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+#
+# Base class for a generator to convert dict to tuning file
+
+from pathlib import Path
+
+
+class Generator(object):
+ def __init__(self):
+ pass
+
+ def write(self, output_path: Path, output_dict: dict, output_order: list):
+ raise NotImplementedError
diff --git a/utils/tuning/libtuning/generators/raspberrypi_output.py b/utils/tuning/libtuning/generators/raspberrypi_output.py
new file mode 100644
index 00000000..47b49059
--- /dev/null
+++ b/utils/tuning/libtuning/generators/raspberrypi_output.py
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright 2022 Raspberry Pi Ltd
+#
+# Generate tuning file in Raspberry Pi's json format
+#
+# (Copied from ctt_pretty_print_json.py)
+
+from .generator import Generator
+
+import json
+from pathlib import Path
+import textwrap
+
+
+class Encoder(json.JSONEncoder):
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.indentation_level = 0
+ self.hard_break = 120
+ self.custom_elems = {
+ 'table': 16,
+ 'luminance_lut': 16,
+ 'ct_curve': 3,
+ 'ccm': 3,
+ 'gamma_curve': 2,
+ 'y_target': 2,
+ 'prior': 2
+ }
+
+ def encode(self, o, node_key=None):
+ if isinstance(o, (list, tuple)):
+ # Check if we are a flat list of numbers.
+ if not any(isinstance(el, (list, tuple, dict)) for el in o):
+ s = ', '.join(json.dumps(el) for el in o)
+ if node_key in self.custom_elems.keys():
+ # Special case handling to specify number of elements in a row for tables, ccm, etc.
+ self.indentation_level += 1
+ sl = s.split(', ')
+ num = self.custom_elems[node_key]
+ chunk = [self.indent_str + ', '.join(sl[x:x + num]) for x in range(0, len(sl), num)]
+ t = ',\n'.join(chunk)
+ self.indentation_level -= 1
+ output = f'\n{self.indent_str}[\n{t}\n{self.indent_str}]'
+ elif len(s) > self.hard_break - len(self.indent_str):
+ # Break a long list with wraps.
+ self.indentation_level += 1
+ t = textwrap.fill(s, self.hard_break, break_long_words=False,
+ initial_indent=self.indent_str, subsequent_indent=self.indent_str)
+ self.indentation_level -= 1
+ output = f'\n{self.indent_str}[\n{t}\n{self.indent_str}]'
+ else:
+ # Smaller lists can remain on a single line.
+ output = f' [ {s} ]'
+ return output
+ else:
+ # Sub-structures in the list case.
+ self.indentation_level += 1
+ output = [self.indent_str + self.encode(el) for el in o]
+ self.indentation_level -= 1
+ output = ',\n'.join(output)
+ return f' [\n{output}\n{self.indent_str}]'
+
+ elif isinstance(o, dict):
+ self.indentation_level += 1
+ output = []
+ for k, v in o.items():
+ if isinstance(v, dict) and len(v) == 0:
+ # Empty config block special case.
+ output.append(self.indent_str + f'{json.dumps(k)}: {{ }}')
+ else:
+ # Only linebreak if the next node is a config block.
+ sep = f'\n{self.indent_str}' if isinstance(v, dict) else ''
+ output.append(self.indent_str + f'{json.dumps(k)}:{sep}{self.encode(v, k)}')
+ output = ',\n'.join(output)
+ self.indentation_level -= 1
+ return f'{{\n{output}\n{self.indent_str}}}'
+
+ else:
+ return ' ' + json.dumps(o)
+
+ @property
+ def indent_str(self) -> str:
+ return ' ' * self.indentation_level * self.indent
+
+ def iterencode(self, o, **kwargs):
+ return self.encode(o)
+
+
+class RaspberryPiOutput(Generator):
+ def __init__(self):
+ super().__init__()
+
+ def _pretty_print(self, in_json: dict) -> str:
+
+ if 'version' not in in_json or \
+ 'target' not in in_json or \
+ 'algorithms' not in in_json or \
+ in_json['version'] < 2.0:
+ raise RuntimeError('Incompatible JSON dictionary has been provided')
+
+ return json.dumps(in_json, cls=Encoder, indent=4, sort_keys=False)
+
+ def write(self, output_file: Path, output_dict: dict, output_order: list):
+ # Write json dictionary to file using ctt's version 2 format
+ out_json = {
+ "version": 2.0,
+ 'target': 'bcm2835',
+ "algorithms": [{f'{module.out_name}': output_dict[module]} for module in output_order]
+ }
+
+ with open(output_file, 'w') as f:
+ f.write(self._pretty_print(out_json))
diff --git a/utils/tuning/libtuning/generators/yaml_output.py b/utils/tuning/libtuning/generators/yaml_output.py
new file mode 100644
index 00000000..c490081d
--- /dev/null
+++ b/utils/tuning/libtuning/generators/yaml_output.py
@@ -0,0 +1,127 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright 2022 Paul Elder <paul.elder@ideasonboard.com>
+#
+# Generate tuning file in YAML format
+
+from .generator import Generator
+
+from numbers import Number
+from pathlib import Path
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+class YamlOutput(Generator):
+ def __init__(self):
+ super().__init__()
+
+ def _stringify_number_list(self, listt: list):
+ line_wrap = 80
+
+ line = '[ ' + ', '.join([str(x) for x in listt]) + ' ]'
+ if len(line) <= line_wrap:
+ return [line]
+
+ out_lines = ['[']
+ line = ' '
+ for x in listt:
+ x_str = str(x)
+ # If the first number is longer than line_wrap, it'll add an extra line
+ if len(line) + len(x_str) > line_wrap:
+ out_lines.append(line)
+ line = f' {x_str},'
+ continue
+ line += f' {x_str},'
+ out_lines.append(line)
+ out_lines.append(']')
+
+ return out_lines
+
+ # @return Array of lines, and boolean of if all elements were numbers
+ def _stringify_list(self, listt: list):
+ out_lines = []
+
+ all_numbers = set([isinstance(x, Number) for x in listt]).issubset({True})
+
+ if all_numbers:
+ return self._stringify_number_list(listt), True
+
+ for value in listt:
+ if isinstance(value, Number):
+ out_lines.append(f'- {str(value)}')
+ elif isinstance(value, str):
+ out_lines.append(f'- "{value}"')
+ elif isinstance(value, list):
+ lines, all_numbers = self._stringify_list(value)
+
+ if all_numbers:
+ out_lines.append( f'- {lines[0]}')
+ out_lines += [f' {line}' for line in lines[1:]]
+ else:
+ out_lines.append( f'-')
+ out_lines += [f' {line}' for line in lines]
+ elif isinstance(value, dict):
+ lines = self._stringify_dict(value)
+ out_lines.append( f'- {lines[0]}')
+ out_lines += [f' {line}' for line in lines[1:]]
+
+ return out_lines, False
+
+ def _stringify_dict(self, dictt: dict):
+ out_lines = []
+
+ for key in dictt:
+ value = dictt[key]
+
+ if isinstance(value, Number):
+ out_lines.append(f'{key}: {str(value)}')
+ elif isinstance(value, str):
+ out_lines.append(f'{key}: "{value}"')
+ elif isinstance(value, list):
+ lines, all_numbers = self._stringify_list(value)
+
+ if all_numbers:
+ out_lines.append( f'{key}: {lines[0]}')
+ out_lines += [f'{" " * (len(key) + 2)}{line}' for line in lines[1:]]
+ else:
+ out_lines.append( f'{key}:')
+ out_lines += [f' {line}' for line in lines]
+ elif isinstance(value, dict):
+ lines = self._stringify_dict(value)
+ out_lines.append( f'{key}:')
+ out_lines += [f' {line}' for line in lines]
+
+ return out_lines
+
+ def write(self, output_file: Path, output_dict: dict, output_order: list):
+ out_lines = [
+ '%YAML 1.1',
+ '---',
+ 'version: 1',
+ # No need to condition this, as libtuning already guarantees that
+ # we have at least one module. Even if the module has no output,
+ # its prescence is meaningful.
+ 'algorithms:'
+ ]
+
+ for module in output_order:
+ if module not in output_dict:
+ continue
+
+ out_lines.append(f' - {module.out_name}:')
+
+ if len(output_dict[module]) == 0:
+ continue
+
+ if not isinstance(output_dict[module], dict):
+ logger.error(f'Error: Output of {module.type} is not a dictionary')
+ continue
+
+ lines = self._stringify_dict(output_dict[module])
+ out_lines += [f' {line}' for line in lines]
+
+ with open(output_file, 'w', encoding='utf-8') as f:
+ for line in out_lines:
+ f.write(f'{line}\n')
diff --git a/utils/tuning/libtuning/gradient.py b/utils/tuning/libtuning/gradient.py
new file mode 100644
index 00000000..b643f502
--- /dev/null
+++ b/utils/tuning/libtuning/gradient.py
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+#
+# Gradients that can be used to distribute or map numbers
+
+import libtuning as lt
+
+import math
+from numbers import Number
+
+
+# @brief Gradient for how to allocate pixels to sectors
+# @description There are no parameters for the gradients as the domain is the
+# number of pixels and the range is the number of sectors, and
+# there is only one curve that has a startpoint and endpoint at
+# (0, 0) and at (#pixels, #sectors). The exception is for curves
+# that *do* have multiple solutions for only two points, such as
+# gaussian, and curves of higher polynomial orders if we had them.
+#
+# \todo There will probably be a helper in the Gradient class, as I have a
+# feeling that all the other curves (besides Linear and Gaussian) can be
+# implemented in the same way.
+class Gradient(object):
+ def __init__(self):
+ pass
+
+ # @brief Distribute pixels into sectors (only in one dimension)
+ # @param domain Number of pixels
+ # @param sectors Number of sectors
+ # @return A list of number of pixels in each sector
+ def distribute(self, domain: list, sectors: list) -> list:
+ raise NotImplementedError
+
+ # @brief Map a number on a curve
+ # @param domain Domain of the curve
+ # @param rang Range of the curve
+ # @param x Input on the domain of the curve
+ # @return y from the range of the curve
+ def map(self, domain: tuple, rang: tuple, x: Number) -> Number:
+ raise NotImplementedError
+
+
+class Linear(Gradient):
+ # @param remainder Mode of handling remainder
+ def __init__(self, remainder: lt.Remainder = lt.Remainder.Float):
+ self.remainder = remainder
+
+ def distribute(self, domain: list, sectors: list) -> list:
+ size = domain / sectors
+ rem = domain % sectors
+
+ if rem == 0:
+ return [int(size)] * sectors
+
+ size = math.ceil(size)
+ rem = domain % size
+ output_sectors = [int(size)] * (sectors - 1)
+
+ if self.remainder == lt.Remainder.Float:
+ size = domain / sectors
+ output_sectors = [size] * sectors
+ elif self.remainder == lt.Remainder.DistributeFront:
+ output_sectors.append(int(rem))
+ elif self.remainder == lt.Remainder.DistributeBack:
+ output_sectors.insert(0, int(rem))
+ else:
+ raise ValueError
+
+ return output_sectors
+
+ def map(self, domain: tuple, rang: tuple, x: Number) -> Number:
+ m = (rang[1] - rang[0]) / (domain[1] - domain[0])
+ b = rang[0] - m * domain[0]
+ return m * x + b
diff --git a/utils/tuning/libtuning/image.py b/utils/tuning/libtuning/image.py
new file mode 100644
index 00000000..ecd334bd
--- /dev/null
+++ b/utils/tuning/libtuning/image.py
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# Container for an image and associated metadata
+
+import binascii
+import numpy as np
+from pathlib import Path
+import pyexiv2 as pyexif
+import rawpy as raw
+import re
+
+import libtuning as lt
+import libtuning.utils as utils
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class Image:
+ def __init__(self, path: Path):
+ self.path = path
+ self.lsc_only = False
+ self.color = -1
+ self.lux = -1
+ self.macbeth = None
+
+ try:
+ self._load_metadata_exif()
+ except Exception as e:
+ logger.error(f'Failed to load metadata from {self.path}: {e}')
+ raise e
+
+ try:
+ self._read_image_dng()
+ except Exception as e:
+ logger.error(f'Failed to load image data from {self.path}: {e}')
+ raise e
+
+ @property
+ def name(self):
+ return self.path.name
+
+ # May raise KeyError as there are too many to check
+ def _load_metadata_exif(self):
+ # RawPy doesn't load all the image tags that we need, so we use py3exiv2
+ metadata = pyexif.ImageMetadata(str(self.path))
+ metadata.read()
+
+ # The DNG and TIFF/EP specifications use different IFDs to store the
+ # raw image data and the Exif tags. DNG stores them in a SubIFD and in
+ # an Exif IFD respectively (named "SubImage1" and "Photo" by pyexiv2),
+ # while TIFF/EP stores them both in IFD0 (name "Image"). Both are used
+ # in "DNG" files, with libcamera-apps following the DNG recommendation
+ # and applications based on picamera2 following TIFF/EP.
+ #
+ # This code detects which tags are being used, and therefore extracts the
+ # correct values.
+ try:
+ self.w = metadata['Exif.SubImage1.ImageWidth'].value
+ subimage = 'SubImage1'
+ photo = 'Photo'
+ except KeyError:
+ self.w = metadata['Exif.Image.ImageWidth'].value
+ subimage = 'Image'
+ photo = 'Image'
+ self.pad = 0
+ self.h = metadata[f'Exif.{subimage}.ImageLength'].value
+ white = metadata[f'Exif.{subimage}.WhiteLevel'].value
+ self.sigbits = int(white).bit_length()
+ self.fmt = (self.sigbits - 4) // 2
+ self.exposure = int(metadata[f'Exif.{photo}.ExposureTime'].value * 1000000)
+ self.againQ8 = metadata[f'Exif.{photo}.ISOSpeedRatings'].value * 256 / 100
+ self.againQ8_norm = self.againQ8 / 256
+ self.camName = metadata['Exif.Image.Model'].value
+ self.blacklevel = int(metadata[f'Exif.{subimage}.BlackLevel'].value[0])
+ self.blacklevel_16 = self.blacklevel << (16 - self.sigbits)
+
+ # Channel order depending on bayer pattern
+ # The key is the order given by exif, where 0 is R, 1 is G, and 2 is B
+ # The value is the index where the color can be found, where the first
+ # is R, then G, then G, then B.
+ bayer_case = {
+ '0 1 1 2': (lt.Color.R, lt.Color.GR, lt.Color.GB, lt.Color.B),
+ '1 2 0 1': (lt.Color.GB, lt.Color.B, lt.Color.R, lt.Color.GR),
+ '2 1 1 0': (lt.Color.B, lt.Color.GB, lt.Color.GR, lt.Color.R),
+ '1 0 2 1': (lt.Color.GR, lt.Color.R, lt.Color.B, lt.Color.GB)
+ }
+ # Note: This needs to be in IFD0
+ cfa_pattern = metadata[f'Exif.{subimage}.CFAPattern'].value
+ self.order = bayer_case[cfa_pattern]
+
+ def _read_image_dng(self):
+ raw_im = raw.imread(str(self.path))
+ raw_data = raw_im.raw_image
+ shift = 16 - self.sigbits
+ c0 = np.left_shift(raw_data[0::2, 0::2].astype(np.int64), shift)
+ c1 = np.left_shift(raw_data[0::2, 1::2].astype(np.int64), shift)
+ c2 = np.left_shift(raw_data[1::2, 0::2].astype(np.int64), shift)
+ c3 = np.left_shift(raw_data[1::2, 1::2].astype(np.int64), shift)
+ self.channels = [c0, c1, c2, c3]
+ # Reorder the channels into R, GR, GB, B
+ self.channels = [self.channels[i] for i in self.order]
+
+ # \todo Move this to macbeth.py
+ def get_patches(self, cen_coords, size=16):
+ saturated = False
+
+ # Obtain channel widths and heights
+ ch_w, ch_h = self.w, self.h
+ cen_coords = list(np.array((cen_coords[0])).astype(np.int32))
+ self.cen_coords = cen_coords
+
+ # Squares are ordered by stacking macbeth chart columns from left to
+ # right. Some useful patch indices:
+ # white = 3
+ # black = 23
+ # 'reds' = 9, 10
+ # 'blues' = 2, 5, 8, 20, 22
+ # 'greens' = 6, 12, 17
+ # greyscale = 3, 7, 11, 15, 19, 23
+ all_patches = []
+ for ch in self.channels:
+ ch_patches = []
+ for cen in cen_coords:
+ # Macbeth centre is placed at top left of central 2x2 patch to
+ # account for rounding. Patch pixels are sorted by pixel
+ # brightness so spatial information is lost.
+ patch = ch[cen[1] - 7:cen[1] + 9, cen[0] - 7:cen[0] + 9].flatten()
+ patch.sort()
+ if patch[-5] == (2**self.sigbits - 1) * 2**(16 - self.sigbits):
+ saturated = True
+ ch_patches.append(patch)
+
+ all_patches.append(ch_patches)
+
+ self.patches = np.array(all_patches)
+
+ return not saturated
diff --git a/utils/tuning/libtuning/libtuning.py b/utils/tuning/libtuning/libtuning.py
new file mode 100644
index 00000000..bac57323
--- /dev/null
+++ b/utils/tuning/libtuning/libtuning.py
@@ -0,0 +1,212 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+#
+# An infrastructure for camera tuning tools
+
+import argparse
+import logging
+
+import libtuning as lt
+import libtuning.utils as utils
+
+from enum import Enum, IntEnum
+
+logger = logging.getLogger(__name__)
+
+class Color(IntEnum):
+ R = 0
+ GR = 1
+ GB = 2
+ B = 3
+
+
+class Debug(Enum):
+ Plot = 1
+
+
+# @brief What to do with the leftover pixels after dividing them into ALSC
+# sectors, when the division gradient is uniform
+# @var Float Force floating point division so all sectors divide equally
+# @var DistributeFront Divide the remainder equally (until running out,
+# obviously) into the existing sectors, starting from the front
+# @var DistributeBack Same as DistributeFront but starting from the back
+class Remainder(Enum):
+ Float = 0
+ DistributeFront = 1
+ DistributeBack = 2
+
+
+# @brief A helper class to contain a default value for a module configuration
+# parameter
+class Param(object):
+ # @var Required The value contained in this instance is irrelevant, and the
+ # value must be provided by the tuning configuration file.
+ # @var Optional If the value is not provided by the tuning configuration
+ # file, then the value contained in this instance will be used instead.
+ # @var Hardcode The value contained in this instance will be used
+ class Mode(Enum):
+ Required = 0
+ Optional = 1
+ Hardcode = 2
+
+ # @param name Name of the parameter. Shall match the name used in the
+ # configuration file for the parameter
+ # @param required Whether or not a value is required in the config
+ # parameter of get_value()
+ # @param val Default value (only relevant if mode is Optional)
+ def __init__(self, name: str, required: Mode, val=None):
+ self.name = name
+ self.__required = required
+ self.val = val
+
+ def get_value(self, config: dict):
+ if self.__required is self.Mode.Hardcode:
+ return self.val
+
+ if self.__required is self.Mode.Required and self.name not in config:
+ raise ValueError(f'Parameter {self.name} is required but not provided in the configuration')
+
+ return config[self.name] if self.required else self.val
+
+ @property
+ def required(self):
+ return self.__required is self.Mode.Required
+
+ # @brief Used by libtuning to auto-generate help information for the tuning
+ # script on the available parameters for the configuration file
+ # \todo Implement this
+ @property
+ def info(self):
+ raise NotImplementedError
+
+
+class Tuner(object):
+
+ # External functions
+
+ def __init__(self, platform_name):
+ self.name = platform_name
+ self.modules = []
+ self.parser = None
+ self.generator = None
+ self.output_order = []
+ self.config = {}
+ self.output = {}
+
+ def add(self, module):
+ if isinstance(module, list):
+ self.modules.extend(module)
+ else:
+ self.modules.append(module)
+
+ def set_input_parser(self, parser):
+ self.parser = parser
+
+ def set_output_formatter(self, output):
+ self.generator = output
+
+ def set_output_order(self, modules):
+ self.output_order = modules
+
+ # @brief Convert classes in self.output_order to the instances in self.modules
+ def _prepare_output_order(self):
+ output_order = self.output_order
+ self.output_order = []
+ for module_type in output_order:
+ modules = [module for module in self.modules if module.type == module_type.type]
+ if len(modules) > 1:
+ logger.error(f'Multiple modules found for module type "{module_type.type}"')
+ return False
+ if len(modules) < 1:
+ logger.error(f'No module found for module type "{module_type.type}"')
+ return False
+ self.output_order.append(modules[0])
+
+ return True
+
+ # \todo Validate parser and generator at Tuner construction time?
+ def _validate_settings(self):
+ if self.parser is None:
+ logger.error('Missing parser')
+ return False
+
+ if self.generator is None:
+ logger.error('Missing generator')
+ return False
+
+ if len(self.modules) == 0:
+ logger.error('No modules added')
+ return False
+
+ if len(self.output_order) != len(self.modules):
+ logger.error('Number of outputs does not match number of modules')
+ return False
+
+ return True
+
+ def _process_args(self, argv, platform_name):
+ parser = argparse.ArgumentParser(description=f'Camera Tuning for {platform_name}')
+ parser.add_argument('-i', '--input', type=str, required=True,
+ help='''Directory containing calibration images (required).
+ Images for ALSC must be named "alsc_{Color Temperature}k_1[u].dng",
+ and all other images must be named "{Color Temperature}k_{Lux Level}l.dng"''')
+ parser.add_argument('-o', '--output', type=str, required=True,
+ help='Output file (required)')
+ # It is not our duty to scan all modules to figure out their default
+ # options, so simply return an empty configuration if none is provided.
+ parser.add_argument('-c', '--config', type=str, default='',
+ help='Config file (optional)')
+ # \todo Check if we really need this or if stderr is good enough, or if
+ # we want a better logging infrastructure with log levels
+ parser.add_argument('-l', '--log', type=str, default=None,
+ help='Output log file (optional)')
+ return parser.parse_args(argv[1:])
+
+ def run(self, argv):
+ args = self._process_args(argv, self.name)
+ if args is None:
+ return -1
+
+ if not self._validate_settings():
+ return -1
+
+ if not self._prepare_output_order():
+ return -1
+
+ if len(args.config) > 0:
+ self.config, disable = self.parser.parse(args.config, self.modules)
+ else:
+ self.config = {'general': {}}
+ disable = []
+
+ # Remove disabled modules
+ for module in disable:
+ if module in self.modules:
+ self.modules.remove(module)
+
+ for module in self.modules:
+ if not module.validate_config(self.config):
+ logger.error(f'Config is invalid for module {module.type}')
+ return -1
+
+ has_lsc = any(isinstance(m, lt.modules.lsc.LSC) for m in self.modules)
+ # Only one LSC module allowed
+ has_only_lsc = has_lsc and len(self.modules) == 1
+
+ images = utils.load_images(args.input, self.config, not has_only_lsc, has_lsc)
+ if images is None or len(images) == 0:
+ logger.error(f'No images were found, or able to load')
+ return -1
+
+ # Do the tuning
+ for module in self.modules:
+ out = module.process(self.config, images, self.output)
+ if out is None:
+ logger.warning(f'Module {module.hr_name} failed to process...')
+ continue
+ self.output[module] = out
+
+ self.generator.write(args.output, self.output, self.output_order)
+
+ return 0
diff --git a/utils/tuning/libtuning/macbeth.py b/utils/tuning/libtuning/macbeth.py
new file mode 100644
index 00000000..4a2006b0
--- /dev/null
+++ b/utils/tuning/libtuning/macbeth.py
@@ -0,0 +1,537 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+# Copyright (C) 2024, Ideas on Board Oy
+#
+# Locate and extract Macbeth charts from images
+# (Copied from: ctt_macbeth_locator.py)
+
+# \todo Add debugging
+
+import cv2
+import os
+from pathlib import Path
+import numpy as np
+import warnings
+import logging
+from sklearn import cluster as cluster
+
+from .ctt_ransac import get_square_verts, get_square_centres
+from .image import Image
+
+logger = logging.getLogger(__name__)
+
+
+class MacbethError(Exception):
+ pass
+
+
+# Reshape image to fixed width without distorting returns image and scale
+# factor
+def reshape(img, width):
+ factor = width / img.shape[0]
+ return cv2.resize(img, None, fx=factor, fy=factor), factor
+
+
+# Correlation function to quantify match
+def correlate(im1, im2):
+ f1 = im1.flatten()
+ f2 = im2.flatten()
+ cor = np.corrcoef(f1, f2)
+ return cor[0][1]
+
+
+# @brief Compute coordinates of macbeth chart vertices and square centres
+# @return (max_cor, best_map_col_norm, fit_coords, success)
+#
+# Also returns an error/success message for debugging purposes. Additionally,
+# it scores the match with a confidence value.
+#
+# Brief explanation of the macbeth chart locating algorithm:
+# - Find rectangles within image
+# - Take rectangles within percentage offset of median perimeter. The
+# assumption is that these will be the macbeth squares
+# - For each potential square, find the 24 possible macbeth centre locations
+# that would produce a square in that location
+# - Find clusters of potential macbeth chart centres to find the potential
+# macbeth centres with the most votes, i.e. the most likely ones
+# - For each potential macbeth centre, use the centres of the squares that
+# voted for it to find macbeth chart corners
+# - For each set of corners, transform the possible match into normalised
+# space and correlate with a reference chart to evaluate the match
+# - Select the highest correlation as the macbeth chart match, returning the
+# correlation as the confidence score
+#
+# \todo Clean this up
+def get_macbeth_chart(img, ref_data):
+ ref, ref_w, ref_h, ref_corns = ref_data
+
+ # The code will raise and catch a MacbethError in case of a problem, trying
+ # to give some likely reasons why the problem occured, hence the try/except
+ try:
+ # Obtain image, convert to grayscale and normalise
+ src = img
+ src, factor = reshape(src, 200)
+ original = src.copy()
+ a = 125 / np.average(src)
+ src_norm = cv2.convertScaleAbs(src, alpha=a, beta=0)
+
+ # This code checks if there are seperate colour channels. In the past the
+ # macbeth locator ran on jpgs and this makes it robust to different
+ # filetypes. Note that running it on a jpg has 4x the pixels of the
+ # average bayer channel so coordinates must be doubled.
+
+ # This is best done in img_load.py in the get_patches method. The
+ # coordinates and image width, height must be divided by two if the
+ # macbeth locator has been run on a demosaicked image.
+ if len(src_norm.shape) == 3:
+ src_bw = cv2.cvtColor(src_norm, cv2.COLOR_BGR2GRAY)
+ else:
+ src_bw = src_norm
+ original_bw = src_bw.copy()
+
+ # Obtain image edges
+ sigma = 2
+ src_bw = cv2.GaussianBlur(src_bw, (0, 0), sigma)
+ t1, t2 = 50, 100
+ edges = cv2.Canny(src_bw, t1, t2)
+
+ # Dilate edges to prevent self-intersections in contours
+ k_size = 2
+ kernel = np.ones((k_size, k_size))
+ its = 1
+ edges = cv2.dilate(edges, kernel, iterations=its)
+
+ # Find contours in image
+ conts, _ = cv2.findContours(edges, cv2.RETR_TREE,
+ cv2.CHAIN_APPROX_NONE)
+ if len(conts) == 0:
+ raise MacbethError(
+ '\nWARNING: No macbeth chart found!'
+ '\nNo contours found in image\n'
+ 'Possible problems:\n'
+ '- Macbeth chart is too dark or bright\n'
+ '- Macbeth chart is occluded\n'
+ )
+
+ # Find quadrilateral contours
+ epsilon = 0.07
+ conts_per = []
+ for i in range(len(conts)):
+ per = cv2.arcLength(conts[i], True)
+ poly = cv2.approxPolyDP(conts[i], epsilon * per, True)
+ if len(poly) == 4 and cv2.isContourConvex(poly):
+ conts_per.append((poly, per))
+
+ if len(conts_per) == 0:
+ raise MacbethError(
+ '\nWARNING: No macbeth chart found!'
+ '\nNo quadrilateral contours found'
+ '\nPossible problems:\n'
+ '- Macbeth chart is too dark or bright\n'
+ '- Macbeth chart is occluded\n'
+ '- Macbeth chart is out of camera plane\n'
+ )
+
+ # Sort contours by perimeter and get perimeters within percent of median
+ conts_per = sorted(conts_per, key=lambda x: x[1])
+ med_per = conts_per[int(len(conts_per) / 2)][1]
+ side = med_per / 4
+ perc = 0.1
+ med_low, med_high = med_per * (1 - perc), med_per * (1 + perc)
+ squares = []
+ for i in conts_per:
+ if med_low <= i[1] and med_high >= i[1]:
+ squares.append(i[0])
+
+ # Obtain coordinates of nomralised macbeth and squares
+ square_verts, mac_norm = get_square_verts(0.06)
+ # For each square guess, find 24 possible macbeth chart centres
+ mac_mids = []
+ squares_raw = []
+ for i in range(len(squares)):
+ square = squares[i]
+ squares_raw.append(square)
+
+ # Convert quads to rotated rectangles. This is required as the
+ # 'squares' are usually quite irregular quadrilaterls, so
+ # performing a transform would result in exaggerated warping and
+ # inaccurate macbeth chart centre placement
+ rect = cv2.minAreaRect(square)
+ square = cv2.boxPoints(rect).astype(np.float32)
+
+ # Reorder vertices to prevent 'hourglass shape'
+ square = sorted(square, key=lambda x: x[0])
+ square_1 = sorted(square[:2], key=lambda x: x[1])
+ square_2 = sorted(square[2:], key=lambda x: -x[1])
+ square = np.array(np.concatenate((square_1, square_2)), np.float32)
+ square = np.reshape(square, (4, 2)).astype(np.float32)
+ squares[i] = square
+
+ # Find 24 possible macbeth chart centres by trasnforming normalised
+ # macbeth square vertices onto candidate square vertices found in image
+ for j in range(len(square_verts)):
+ verts = square_verts[j]
+ p_mat = cv2.getPerspectiveTransform(verts, square)
+ mac_guess = cv2.perspectiveTransform(mac_norm, p_mat)
+ mac_guess = np.round(mac_guess).astype(np.int32)
+
+ mac_mid = np.mean(mac_guess, axis=1)
+ mac_mids.append([mac_mid, (i, j)])
+
+ if len(mac_mids) == 0:
+ raise MacbethError(
+ '\nWARNING: No macbeth chart found!'
+ '\nNo possible macbeth charts found within image'
+ '\nPossible problems:\n'
+ '- Part of the macbeth chart is outside the image\n'
+ '- Quadrilaterals in image background\n'
+ )
+
+ # Reshape data
+ for i in range(len(mac_mids)):
+ mac_mids[i][0] = mac_mids[i][0][0]
+
+ # Find where midpoints cluster to identify most likely macbeth centres
+ clustering = cluster.AgglomerativeClustering(
+ n_clusters=None,
+ compute_full_tree=True,
+ distance_threshold=side * 2
+ )
+ mac_mids_list = [x[0] for x in mac_mids]
+
+ if len(mac_mids_list) == 1:
+ # Special case of only one valid centre found (probably not needed)
+ clus_list = []
+ clus_list.append([mac_mids, len(mac_mids)])
+
+ else:
+ clustering.fit(mac_mids_list)
+
+ # Create list of all clusters
+ clus_list = []
+ if clustering.n_clusters_ > 1:
+ for i in range(clustering.labels_.max() + 1):
+ indices = [j for j, x in enumerate(clustering.labels_) if x == i]
+ clus = []
+ for index in indices:
+ clus.append(mac_mids[index])
+ clus_list.append([clus, len(clus)])
+ clus_list.sort(key=lambda x: -x[1])
+
+ elif clustering.n_clusters_ == 1:
+ # Special case of only one cluster found
+ clus_list.append([mac_mids, len(mac_mids)])
+ else:
+ raise MacbethError(
+ '\nWARNING: No macebth chart found!'
+ '\nNo clusters found'
+ '\nPossible problems:\n'
+ '- NA\n'
+ )
+
+ # Keep only clusters with enough votes
+ clus_len_max = clus_list[0][1]
+ clus_tol = 0.7
+ for i in range(len(clus_list)):
+ if clus_list[i][1] < clus_len_max * clus_tol:
+ clus_list = clus_list[:i]
+ break
+ cent = np.mean(clus_list[i][0], axis=0)[0]
+ clus_list[i].append(cent)
+
+ # Get centres of each normalised square
+ reference = get_square_centres(0.06)
+
+ # For each possible macbeth chart, transform image into
+ # normalised space and find correlation with reference
+ max_cor = 0
+ best_map = None
+ best_fit = None
+ best_cen_fit = None
+ best_ref_mat = None
+
+ for clus in clus_list:
+ clus = clus[0]
+ sq_cents = []
+ ref_cents = []
+ i_list = [p[1][0] for p in clus]
+ for point in clus:
+ i, j = point[1]
+
+ # Remove any square that voted for two different points within
+ # the same cluster. This causes the same point in the image to be
+ # mapped to two different reference square centres, resulting in
+ # a very distorted perspective transform since cv2.findHomography
+ # simply minimises error.
+ # This phenomenon is not particularly likely to occur due to the
+ # enforced distance threshold in the clustering fit but it is
+ # best to keep this in just in case.
+ if i_list.count(i) == 1:
+ square = squares_raw[i]
+ sq_cent = np.mean(square, axis=0)
+ ref_cent = reference[j]
+ sq_cents.append(sq_cent)
+ ref_cents.append(ref_cent)
+
+ # At least four squares need to have voted for a centre in
+ # order for a transform to be found
+ if len(sq_cents) < 4:
+ raise MacbethError(
+ '\nWARNING: No macbeth chart found!'
+ '\nNot enough squares found'
+ '\nPossible problems:\n'
+ '- Macbeth chart is occluded\n'
+ '- Macbeth chart is too dark of bright\n'
+ )
+
+ ref_cents = np.array(ref_cents)
+ sq_cents = np.array(sq_cents)
+
+ # Find best fit transform from normalised centres to image
+ h_mat, mask = cv2.findHomography(ref_cents, sq_cents)
+ if 'None' in str(type(h_mat)):
+ raise MacbethError(
+ '\nERROR\n'
+ )
+
+ # Transform normalised corners and centres into image space
+ mac_fit = cv2.perspectiveTransform(mac_norm, h_mat)
+ mac_cen_fit = cv2.perspectiveTransform(np.array([reference]), h_mat)
+
+ # Transform located corners into reference space
+ ref_mat = cv2.getPerspectiveTransform(
+ mac_fit,
+ np.array([ref_corns])
+ )
+ map_to_ref = cv2.warpPerspective(
+ original_bw, ref_mat,
+ (ref_w, ref_h)
+ )
+
+ # Normalise brigthness
+ a = 125 / np.average(map_to_ref)
+ map_to_ref = cv2.convertScaleAbs(map_to_ref, alpha=a, beta=0)
+
+ # Find correlation with bw reference macbeth
+ cor = correlate(map_to_ref, ref)
+
+ # Keep only if best correlation
+ if cor > max_cor:
+ max_cor = cor
+ best_map = map_to_ref
+ best_fit = mac_fit
+ best_cen_fit = mac_cen_fit
+ best_ref_mat = ref_mat
+
+ # Rotate macbeth by pi and recorrelate in case macbeth chart is
+ # upside-down
+ mac_fit_inv = np.array(
+ ([[mac_fit[0][2], mac_fit[0][3],
+ mac_fit[0][0], mac_fit[0][1]]])
+ )
+ mac_cen_fit_inv = np.flip(mac_cen_fit, axis=1)
+ ref_mat = cv2.getPerspectiveTransform(
+ mac_fit_inv,
+ np.array([ref_corns])
+ )
+ map_to_ref = cv2.warpPerspective(
+ original_bw, ref_mat,
+ (ref_w, ref_h)
+ )
+ a = 125 / np.average(map_to_ref)
+ map_to_ref = cv2.convertScaleAbs(map_to_ref, alpha=a, beta=0)
+ cor = correlate(map_to_ref, ref)
+ if cor > max_cor:
+ max_cor = cor
+ best_map = map_to_ref
+ best_fit = mac_fit_inv
+ best_cen_fit = mac_cen_fit_inv
+ best_ref_mat = ref_mat
+
+ # Check best match is above threshold
+ cor_thresh = 0.6
+ if max_cor < cor_thresh:
+ raise MacbethError(
+ '\nWARNING: Correlation too low'
+ '\nPossible problems:\n'
+ '- Bad lighting conditions\n'
+ '- Macbeth chart is occluded\n'
+ '- Background is too noisy\n'
+ '- Macbeth chart is out of camera plane\n'
+ )
+
+ # Represent coloured macbeth in reference space
+ best_map_col = cv2.warpPerspective(
+ original, best_ref_mat, (ref_w, ref_h)
+ )
+ best_map_col = cv2.resize(
+ best_map_col, None, fx=4, fy=4
+ )
+ a = 125 / np.average(best_map_col)
+ best_map_col_norm = cv2.convertScaleAbs(
+ best_map_col, alpha=a, beta=0
+ )
+
+ # Rescale coordinates to original image size
+ fit_coords = (best_fit / factor, best_cen_fit / factor)
+
+ return (max_cor, best_map_col_norm, fit_coords, True)
+
+ # Catch macbeth errors and continue with code
+ except MacbethError as error:
+ # \todo: This happens so many times in a normal run, that it shadows
+ # all the relevant output
+ # logger.warning(error)
+ return (0, None, None, False)
+
+
+def find_macbeth(img, mac_config):
+ small_chart = mac_config['small']
+ show = mac_config['show']
+
+ # Catch the warnings
+ warnings.simplefilter("ignore")
+ warnings.warn("runtime", RuntimeWarning)
+
+ # Reference macbeth chart is created that will be correlated with the
+ # located macbeth chart guess to produce a confidence value for the match.
+ script_dir = Path(os.path.realpath(os.path.dirname(__file__)))
+ macbeth_ref_path = script_dir.joinpath('macbeth_ref.pgm')
+ ref = cv2.imread(str(macbeth_ref_path), flags=cv2.IMREAD_GRAYSCALE)
+ ref_w = 120
+ ref_h = 80
+ rc1 = (0, 0)
+ rc2 = (0, ref_h)
+ rc3 = (ref_w, ref_h)
+ rc4 = (ref_w, 0)
+ ref_corns = np.array((rc1, rc2, rc3, rc4), np.float32)
+ ref_data = (ref, ref_w, ref_h, ref_corns)
+
+ # Locate macbeth chart
+ cor, mac, coords, ret = get_macbeth_chart(img, ref_data)
+
+ # Following bits of code try to fix common problems with simple techniques.
+ # If now or at any point the best correlation is of above 0.75, then
+ # nothing more is tried as this is a high enough confidence to ensure
+ # reliable macbeth square centre placement.
+
+ # Keep a list that will include this and any brightened up versions of
+ # the image for reuse.
+ all_images = [img]
+
+ for brightness in [2, 4]:
+ if cor >= 0.75:
+ break
+ img_br = cv2.convertScaleAbs(img, alpha=brightness, beta=0)
+ all_images.append(img_br)
+ cor_b, mac_b, coords_b, ret_b = get_macbeth_chart(img_br, ref_data)
+ if cor_b > cor:
+ cor, mac, coords, ret = cor_b, mac_b, coords_b, ret_b
+
+ # In case macbeth chart is too small, take a selection of the image and
+ # attempt to locate macbeth chart within that. The scale increment is
+ # root 2
+
+ # These variables will be used to transform the found coordinates at
+ # smaller scales back into the original. If ii is still -1 after this
+ # section that means it was not successful
+ ii = -1
+ w_best = 0
+ h_best = 0
+ d_best = 100
+
+ # d_best records the scale of the best match. Macbeth charts are only looked
+ # for at one scale increment smaller than the current best match in order to avoid
+ # unecessarily searching for macbeth charts at small scales.
+ # If a macbeth chart ha already been found then set d_best to 0
+ if cor != 0:
+ d_best = 0
+
+ for index, pair in enumerate([{'sel': 2 / 3, 'inc': 1 / 6},
+ {'sel': 1 / 2, 'inc': 1 / 8},
+ {'sel': 1 / 3, 'inc': 1 / 12},
+ {'sel': 1 / 4, 'inc': 1 / 16}]):
+ if cor >= 0.75:
+ break
+
+ # Check if we need to check macbeth charts at even smaller scales. This
+ # slows the code down significantly and has therefore been omitted by
+ # default, however it is not unusably slow so might be useful if the
+ # macbeth chart is too small to be picked up to by the current
+ # subselections. Use this for macbeth charts with side lengths around
+ # 1/5 image dimensions (and smaller...?) it is, however, recommended
+ # that macbeth charts take up as large as possible a proportion of the
+ # image.
+ if index >= 2 and (not small_chart or d_best <= index - 1):
+ break
+
+ w, h = list(img.shape[:2])
+ # Set dimensions of the subselection and the step along each axis
+ # between selections
+ w_sel = int(w * pair['sel'])
+ h_sel = int(h * pair['sel'])
+ w_inc = int(w * pair['inc'])
+ h_inc = int(h * pair['inc'])
+
+ loop = int(((1 - pair['sel']) / pair['inc']) + 1)
+ # For each subselection, look for a macbeth chart
+ for img_br in all_images:
+ for i in range(loop):
+ for j in range(loop):
+ w_s, h_s = i * w_inc, j * h_inc
+ img_sel = img_br[w_s:w_s + w_sel, h_s:h_s + h_sel]
+ cor_ij, mac_ij, coords_ij, ret_ij = get_macbeth_chart(img_sel, ref_data)
+
+ # If the correlation is better than the best then record the
+ # scale and current subselection at which macbeth chart was
+ # found. Also record the coordinates, macbeth chart and message.
+ if cor_ij > cor:
+ cor = cor_ij
+ mac, coords, ret = mac_ij, coords_ij, ret_ij
+ ii, jj = i, j
+ w_best, h_best = w_inc, h_inc
+ d_best = index + 1
+
+ # Transform coordinates from subselection to original image
+ if ii != -1:
+ for a in range(len(coords)):
+ for b in range(len(coords[a][0])):
+ coords[a][0][b][1] += ii * w_best
+ coords[a][0][b][0] += jj * h_best
+
+ if not ret:
+ return None
+
+ coords_fit = coords
+ if cor < 0.75:
+ logger.warning(f'Low confidence {cor:.3f} for macbeth chart')
+
+ if show:
+ draw_macbeth_results(img, coords_fit)
+
+ return coords_fit
+
+
+def locate_macbeth(image: Image, config: dict):
+ # Find macbeth centres
+ av_chan = (np.mean(np.array(image.channels), axis=0) / (2**16))
+ av_val = np.mean(av_chan)
+ if av_val < image.blacklevel_16 / (2**16) + 1 / 64:
+ logger.warning(f'Image {image.path.name} too dark')
+ return None
+
+ macbeth = find_macbeth(av_chan, config['general']['macbeth'])
+
+ if macbeth is None:
+ logger.warning(f'No macbeth chart found in {image.path.name}')
+ return None
+
+ mac_cen_coords = macbeth[1]
+ if not image.get_patches(mac_cen_coords):
+ logger.warning(f'Macbeth patches have saturated in {image.path.name}')
+ return None
+
+ image.macbeth = macbeth
+
+ return macbeth
diff --git a/utils/tuning/libtuning/macbeth_ref.pgm b/utils/tuning/libtuning/macbeth_ref.pgm
new file mode 100644
index 00000000..089ea91f
--- /dev/null
+++ b/utils/tuning/libtuning/macbeth_ref.pgm
@@ -0,0 +1,6 @@
+P5
+# SPDX-License-Identifier: BSD-2-Clause
+# Reference macbeth chart
+120 80
+255
+  !#!" #!"&&$#$#'"%&#+2///..../.........-()))))))))))))))))))(((-,*)'(&)#($%(%"###""!%""&"&&!$" #!$ !"! $&**" !#5.,%+,-5"0<HBAA54" %##((()*+,---.........+*)))))))))))))))-.,,--+))('((''('%'%##"!""!"!""""#!   ! %‚/vÀ¯z:òøßãLñ©û¶ÑÔcÒ,!#""%%''')**+)-../..../.-*)))))))))))))**,,)**'(''&'((&&%%##$! !!!! ! !  !  5*"-)&7(1.75Rnge`\`$ ""!"%%%'')())++--/---,-..,-.,++**))))())*)*)''%'%&%&'&%%"""""        !  !!$&$$&##(+*,,/10122126545./66402006486869650*.1.***)*+)()&((('('##)('&%%&%$$$#$%$%$ (((*))('((('('(&%V0;>>;@@>@AAAACBCB=&<­·³µ¶¾¿ÃÇÇÆÇËÒÐÇÄ<5x–•ŠŽŒŠ‰„„„„|64RYVTSRRRMMNLKJJLH+&0gijgdeffmmnpnkji`#3™ ª¦¨¨£Ÿ›››š–—™šbY! 3FHHIIIHIJIIJHIII@#?¾ÈÊÍÏÑÔÖØÚÚÚÛßáßÔ=7}—š˜———˜—˜˜——˜——‘:5Wcbcbdcb`^^`^^_^Y,'6‰ŽŒ‰ˆˆˆ‡†…„††„‚r'<½ÆÅÅÅÄÂÀ¿¾¾¼»¼¼µl%2FHHIIHJJJJJJIIJI?%;ÁÌÌÒÓÖØÙÛÛÜÜÞßâãÕ>7|•™™ž™—˜˜˜—™™™š˜–;8Xfeeegeccb`^aba]Z+)<Ž“’‘‹Š‰‰‰‰ˆ†r)>¿ÇÇÇÆÅÅÄÂÁÁÀ¾¾¼·q#3GHIIIIJIIJJIHIJI@&5ÁÎÑÔÕØÙÚÜÜÞßßßàâ×=8~”•˜™š›šš™›šœ››“;8Zgghggedbdcbda^\Z+(;““’‘‘Ž‹‹ŠŠ‰ˆy)9¿ÈÈÈÇÇÅÄÂÁÁÀ¿½½¹z"3GIIJJJJJKJJJJJJJ@'4ÂÑÔÔÙÚÛÜÞÝßßààààØ>9|”—–—™ššš™›œŸ¥ ž˜=8Zhighgeeeedeca__[/)B’–•••“‘ŽŒŒŒŒŠv&:ÁÊÊÊÊÆÆÆÂÁÂÂÁ¿¿º|#3GJJIIJKKKJJJKKJK@&6ÆÒ××ÙÛÛÞÞßààààààÖ>9~”———˜˜—™šžž    ˜<8Yghegggffihccab^\/*C“™˜—––””’‘‘Žz'9ÄÍËÈÈÇÇÆÆÄÂÂÀÀ¿»‚$  6IKJJMMMKMKKMKKMLC&2É××ÙÛÜßÞàááâââââÖ@9•——˜˜™˜˜š››žŸžž—<9Yghhhhijiegdcebc^0)G—›š™˜˜˜–•“’‘Ž(7ÃÍÌËÊÈÇÇÅÆÄÂÂÂÁº‰% 6JLMMNMMKMMNMMMMMD&2ÊÙÙÛÝßßßààáââáãâÖ@:~”—™™š™™››žžžžž—=9Xfghhjiigdgddedc`1)M—œ›š˜™—•”‘’‘Ž}(:ÄÐÍÌËÊÇÆÆÆÅÂÄÁ¾& "8LNOONNOMONNMMNOND'3ÍÛÛÞßàààáââãâåãå×@;–˜˜™žŸŸ  ¡¡  —=:Ziiigheegegegggdc1,Q›ŸŸž›šš˜––““‘~)8ÂÍÎÌËÊÊÈÆÆÆÆÄÆÇÁ•%# "9NNNPPPQOOOOONNOOD'0ÎÜÜßßáàáââååäãåæ×?;–˜—™šœžŸ¡¡ ¡Ÿ  ™=;[iigeeegghgdedgea0-P› ¡ žš˜—–•”(8ÃÏÎÎÌÊÈÈÇÇÇÆÈÇÆÃ' "#$:NNOQPPRPQPOOPQPPD*1ÐßßàààâãããåææåææÛA;‚˜™™šœžžŸ  Ÿž Ÿ—;:Yfghgghgghghhdggc3.\¡£¡  Ÿœœš˜—•’‘~);ÅÎÎÑÐÌËÊÇÈÉÊÊÇŤ(&%%;OQQQRSSRPQQQQSQQF)3ÓßàááãâãåææææææçÜB<ƒ™šœœžžžžŸ žŸ Ÿž—=:Wfhghhhihggghfhee4/f ¥¤¢¡¡ŸŸš˜—””’‘‚*:ÇÏÍÍÎÎÍÌÉÈËÊÈÆÆÃ¤&%%%?RSSSSSTTTTSSSTTRE)5ÕàááãâäåæåæçççèèÛB=„šœœžŸ Ÿ ¡ žŸŸŸ˜@:Ygiihhiiiihihiiif72p £¤¤£ ŸŸœœ™—–•’‘}(9ÇÎÏÎÍÍÍÍÍËÌÊÈÈÇÆ©'#%&?TUTTTUUQSTTTTTVSF*3ÕàãâãäåæææçççèééßF>†žž  ¡¡£ £¡¡¡ Ÿ˜A;[ghjiihiiiihihije50r¢¦¥¥££ Ÿžœš™—–““‚)6ÈÏÏÎÌÎÎÌÏÏËÊÊÈÈÆ«& &#%?SVVVUUUUUTUUVVUUG*5ÖãããåæææçèèèèééëßF=…ŸŸ¢££££ ¡¡  £ ˜A;Yhijiiijjiiiiijje81t¦¦¦¥¥£¡ Ÿ›˜——•’~)5ÇÑÑÏÎËÍÍÑÑÌËÈÈÉÆ°' '$$=OQRRQQPRSRSSSSSSG+6ËÙÙÜÛÜÞÝßààààáããÙD@‚š›œŸœžœ›š”?;Wefgggggfffgeeefc41xŸžŸž››š˜•”’‘ŽŒ{*5¾ÈÈÇÅÃÃÄÄÃÂÂÂÀ¿¼«( &&&'++++,,*-,-00-0100*-SUX\]]`_ffgiooopo=;X\bedbadbca`]\]ZZ;;<::8:;9983433110/-,...1//12410/..--+)"",---,-./,,.-/-0-( &&%+/0103322011223233)(34534767::;;==:=B9;BFGEEGIKJKIJGIJCD=<:76566554111/0/1.*+00233300/00//..,+*#")(*)++,++))*++**'!!&$*w³½¾¿Â¼ÀÀ¼¼·¹¹¸´²Ž1-_addc`ceccdccedbb?A|ŒŒ‘‹ŒŒ‹ŠŠ‰‰ˆB>=>?@@?====;<:;:<:11r‹ŒŽ“–““•–˜™Ž+.’—”™ ¥¢¡¤žšŸŸœ( !'%*zÀÇÆÆÇÇÊÊÈÈÈÊËËËÉ 42gjmllklomooonpopmHG‘©¬«««¬©«««ª««ª©£D>AEDEFEECEECCCDDEC46µåçèçççæåäãáàÞÜÚ׿0:Î×Ö×××ÖÕÒÓÏÐÐÍÍѾ,!!&&,|ÂÇÇÇÇÇÇËËÇÈÊËËÍÊ¡61inknnoopoppoqqrqoEE”¬­­­®®®­®®¯­®®­¥FACGFFFFFFDFDDDDDDC57¹íñïîîíííëéçæãáßÝÄ09ÓÛÛÛÛÚÙØ×ÖÕÔÔÒÔÒÁ+!"%%-~ÀÆÈÊÇÇÈÉÌÌÊÊËÌÌÊ¡42inopppppoqqqrrsrnAB“«®®­®®®®®±­®¬°­¥C?DGGGGFFFFDFFDDEDC48ºíððïïîîíìëèçæãáßÅ1;ÔÞÞÝÜÚÚÙÙ×ÕÕÔÕÔÒÁ+!!"#*|¿ÄÉÊÈÈÈÈÉÍÉÈËÍÍÊ¡62imoppppqqqqrtrqtrGD•¬®®­°®°°°±±°®®­§H?CGGGGGGGGFFFFFFDB38»îðïïïïîíììëèçæâàÅ1<ÖààßÞÞÜÚÚÙÙÙ××ÔÔ½, !)}¿ÃÈÈÊÇÈÈËÎËÊËÌÍË¢63mooppqqqqqqrrtvtoDH—­­®±®°±°­°®­±°°¦JACHHGGHGGFFFDDGGFD29ÀðóòðïïïîííìêéèæâÆ3>ÖááààßÞÜÛÙÙÙØ×Ø×½, $){¼ÂÅÆÉÇÈÆËËÌÊËÊÍË¢53jpppqprqrrrttuvuo>H˜®°®±²±±°°°±°±°°ªJAFHHHHHGGHGGFGGFFE28ÁðôòòððïïîíëìëéçãÇ3:×ãáááßÞÝÛÛÚÙÚÚÚÚ½- "*{¸ÁÁÅÆÇÆÆÊËÌÉÊËÎÌ£53loqpqsqrrrtrutsvrAH—«®®±±°°°®±±±®­°©HCGHIHHHHHHGFGHGGGD5;ÀðóóòñððïîííìëëèäÇ28ØäãááààßÞÜÛÛÛÚÚÚÀ, +}¹¾ÀÂÂÅÅÅÇÉÍËÊËÌÊ¡52mqoqpqrttttttuurpFI–®°±°±±²°±±°±±¯°§OCEHHIHHHHGHGGFFIGF8<ÃðòòóóòððïíîìììéæÍ48ÚçåããáààßÝÜÜÜÜÛÛ¿, (|º¼¾ÀÀÃÄÄÆÇÍËÊÊËÊ¢41krqpqqqrrtrtuvtuoEH—­°°²±±±±¯²²®²±®«PBHHIIIHIIHIHGHGHHE7<ÃðóóòñððððïíííìêçÑ58ÜèæåãââáßßÜÞÜÞÞÚÄ* (zºº»¾ÀÂÂÂÄÄÇËÈÊËÊ¡63kpqprqqstttutrvvoFO˜¯°¯°±±°±±±±±°±²©LEHHIIHIHHHIGHGIHGF4=ÅñóóóððððïïîìíëéèÓ5<ÞêèçåââááßÞÞßßßÚÇ* 'zº½º»¾ÁÀÂÂÄÅÊÇÈÊÈ¡62lppqrqrrrtttuttvpAG›¯°±°±°°°°°±±°±±«MGHIIIIHIIIHHIIJHHG4<ÃñóóóðòððîîïííëéèÓ4<ÞëêççæãâáàßàÞÞÛØÇ+ !){º¼º»¾½ÀÁÁÂÄÉÇÇÉÈ 62jopqqqqqrtttutttrEH™¯±°°°¯°°±±²²°±±ªOHFIIIIIJIIIIHIHIHI7>ÅðôóòòòïðïîîíììëèÒ5;àíêèææãâáâßßÜÛÙÖÇ, !)z¼¾¼¹»»ÁÁ¿ÁÁÈÇÆÆÆŸ53lppqqrqrtttuuuutsFI™®±²±±±±²²²±°¯±²«RHGJIJHJKJJJIIIIIIH9>ÂñôôôòóððïîííìëééÓ5;àìééèææäááßÜÛØ×ÔÇ+  !({»¿¸º½½¿¾¿¾ÀÅÆÄÆÅœ41joppprqrrrutttvvrIH’­±°°°±±±²²°±²±±ªTHCJJJJJIJIJJIJJJIH7=ÂòôòóóñðïîîííììéèÒ5;ßìêêèæåäâàÞÛÙÖ×ÕÇ+ (u±±®¯±³µ²´´µº»¸»º‘65gjlmmmnoopnpprpqoIH¦©ª©«ªªª«¬«ªª¨ª¤OIBIJJJIJJJJIIIHHHG89ºåççæçåäããâáßàßÝÜÈ29ÔàßÝÛÛÙØÕÓÑÎÌÈÌʾ' "&,-*)-01/,0/12102-+04448789<>>??AFAD@DBCIJNRWTSUXT[WUQUOKFEBBABA?>>=<<;;67942:<<<>9999864565363&(13335422./1/-+..+ !"&$$""$"&$%'()(''*+-0124688:<>>??A>?EBCHKOLJLNOSQOXQQVMLACGHGHIGFHGDCCBB@??7432233210111.,++,++%(++)*(''%%%$$#%&$# ")0/001120024455520+-U]`addcdhefeekecYGFJRXYYVWWZWVXXVZTOBF}™œšœžœ›š™–™K7Ybccddfeg`^]^]\[Z[*)OTTPPQPOKOLLJJLIK  !1;:9:<<===;=???A@9*/„Ž‘’”•”––—™™š››’FJmxyxwyzzzxyzzz{zxLOÉÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿú]=‹§©¨¦§¥¦¤¤¢¡¡¡ ›Ž.-‹’’Œ‰‡…‚€€€y# !!2><=;==>=<<>@@@@A9-0‡‘‘”—˜˜™—š›žŸ —IKnz||{|{||{}}~}}{zLOÌÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿý]>ެ­¬««¨ª¨§¦¥¥££¡¡–..Œ––”“Ž‹‹‰‡…………„~% $2==;<>>?===>@A@AB;+1…Ž‘“•—™™˜˜™œžŸŸ—JJo{|y{||}{||}}}}}yMTÎÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿý_>ެ­¬«ª©©¦¦¦¤¤££¢ ”-.–”‘‘ŽŠŠ‰…„…„…„}# %2<=;=<@?>==>?A@AA9+3…Ž‘“–——˜™šœšœœžž•FMlz{{y|}}}}||}|}}{MTÍÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿýd>«¬«ªªª¨§¦¦¤¤¤¡  ”-,Œ“‘’Ї†……„„„…# %1<<<;==<<=>?A?@AA:,3†Ž‘’•——˜˜šœšœœž–INo{{y{||||}|}}|~}{RTÍÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿýd=Š©­¬«©©§¦¦¥¤¤£¡ Ÿ—/-‹’‘‹‹‰ˆ…………ƒƒ„}#!$0<<<=<<==>A@@>@AA:-2†‘“’–——™™šš™œ›œ—HInzz{{||{{}~~}}|}zMRÍÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿýd=‰ª«ªªª§¥¥¥£¤¡¡  ”++ˆŽŽ‹ŠŠ‰………„„„ƒ„~# "$/;<==>;===@@@@>AA:+2†Ž’’“•—–™˜šœ™œ–KHn||y|||||{}~}|}|xMSÍÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿýd=†©©ª©©§¦¦¥¤£¡Ÿ žœ’+,‡‹Š‰ˆ††…„„„„ƒ}# ! "/:<=>@<<>=@@@@@AA;-3„’’•–˜˜š™šššœ›˜MFs||{{{y}z}}|}|}}yMWÏÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿýc>„©ª©§¦¦¥¤£¡£  ŸŸ›’,)…ŒŠ‹‰ˆ‡†…„„„ƒƒƒ|! !1;>?>><<>@>>=>ABB;,0ƒŽ‘’––™™™™ššœœ›˜LHr{|{|}|y|}}}}}zNXÎÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿýc?„©«ª§§¦¥¥££   žžšŽ()„‹ŠŠ‰ˆ…†„„„„„‚ƒƒz# $/;;<=;<>>=>>>@@BB:,1†‘“•–—˜™šœšœšž˜IInyz||||||{||}{~|{NVÏÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿýc;§¨¨¦¦¦¤££¡¡ŸŸœš“('ƒŠŠˆ‰ˆ……ƒ„ƒƒƒƒ‚€}# $0:<==<;>@>>>>@ABB:,/„‘““–˜™™™šœšš—HLlx|}y{y{|y{|}}}}yMRÍÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿýd>~¥§¦¦§¥¤££¢ ŸžššŽ*(ƒŠ‡‡ˆˆ„ƒ‚„ƒƒ‚‚‚y" !&3:;<<;==@@=>AABBA;-3†‘“‘”–—˜˜™šœœš›–KLqz{|||y{}|}{}|~{zRQÍÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüc9w¤§¦¥¦¤¢£  Ÿžžš™Ž)'ˆ†……„…„ƒƒƒ‚€€€€y" !%1<<;=>===<=@@ABBC<.5†’’•–—™˜™™œœž•IIlz{|}~~~|}{||~}}zMUÌÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüd;p¤¦¥¥¤¤£¤¢ Ÿž›š˜)$€ˆˆ…„„„…‚‚€€€x" $2===<==@=<>=ABBBC?/0ˆ‘’•••˜—˜™™š™œž˜IGkz}}{||}{||y||}zyOVÊÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿüc7o¢¥¥¤££¡¤¡žŸš™š˜‘'&~‡„„„„ƒ…„€~€z"#"#/;<:<<?>;===@?AAA>07‹‹Ž’’’”“•–—•‘GGgwxz{yyxyzzyz{yuuHO½ùûüüüüüüûûûúúúúò\8v›žœ›š™˜—•••”‘†'$w~~}|||{~|{zxxxxv!"""'*+(+)*))()+,,.../0398;=<=>DCCDDCBBDHBCJMMLMPNPOJPKPSJDICCNMPONMNNOKHIFDBHE3/46433323.....*+,)( !##!!!!!$#$$#$#&"!!"(+**,,*+.//1478:<:33ACDFGGIIHIJLPKNMQFIPTTRVXVUXUUTXUSTNEGGFDEFAA>==;94877520-,))*(((('&$#!!" &%'FQPQR]dqŒ˜£«¹ÍàðÈ=FñûüÿÿÿÿÿÿÿÿÿÿÿÿÿúQN·èììêìæéììêéëëéêáLEœ˜…znki^[YTPUOS;.%-/12322221/10//,/%#0¯ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿß@QýÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿQMÁðôõôóôóôõõôõôôóæKE„¨©¨§§¤¥¥¢¤£ žžž˜H01NNQOQQOOMNNLKLJGB'&/¸ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿâAWþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿOLÀñóôôôóóóõôôõóôòèKE„¦¨©©§ª©ª¦¨§¥¢¤¢œF-,PQQPQPPQPOONMNNKE''0·ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿáCZþÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿRMÁñôóóòòôòôõóôôóòåJE‚¥©¬¬©ª©ª§¥¥¤¤¤¢™F,*NSQPPQOOOOMNNMKID('2·ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿáD[þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿQKÀðòòòóóòóõóõóòòðæIF€§©ª©§©§©¥¤¤¤¤¡ ˜F,*NPPPPPPNOONMMMJIF!'(2¶ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿáF]þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿRL¿íððòðòòóóòòñïòðäHD£¦©©§¨¦¦¦¤¤¤¤¢ ˜F+%MPPPPOOONONNMMKID)*4¸ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿáD^þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿPL¿ìðïïòòððòòðïðòïäIC€¢¦¨¨¥¦¥§¥¤££¡ŸŸ—F+&NPOOOPPOONMMKMKHD**6ºÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿáD_þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿQJ¾ëïïïòðððððïðïîïãFC~¢§¥¥¦¦¦¤££¤¡   ˜F,'MPOOOOONONNKKIIIG,+7»ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿáD^ýÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿQI¾êîîîïðïðïððïïïîâEB|£¥¤££¤¤¤£¤¢Ÿ ŸŸ—E+&MONOOONNNNKMJKJHH,-8¹ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿàD]þÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿPI¼éíìîîîîñðòóóöù÷èHE¥¨§¥¥¤¤£¡£¡  žŸ—C,#LOOOONONNNKKKMKJF,*6»ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿáCaýÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿþMH»éììíîðððôóõöööõçIF‚©ª§¦¦¥££¢ Ÿžž Ÿ–D*%KONOMNMMKMKJJJIJE,,6»ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿâB^ýÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿþMG¹èììîðòóòóóóóòóôéHB}£©¦¦§¥¤¤¢ŸŸžžš”D+&LONOOONNMMMMKLKIA,,6ºÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿàA\ýÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿþMF¹éìííïòóôððôöõööêIE¦ª©¦§¨§§¡¡Ÿš™”E+&LNNMONNMMKKKKKIHF --6¹ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿßA[üÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿþKF¶çìðïððïðóöõöõùúîJC©«­«¦§¦¥¤¡¤žžš—F*&LMONMNMNKKJMKJJIF **5»ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿß>WüÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿþKE¶èïíðîðóöõøòùóöôçF?}¨©²¯¬¬©¥¤¤£žœ˜˜‘C*%KONNNJKKKMKJKJKID,*4¶ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿØ<WöþÿÿÿÿÿÿÿÿÿÿÿÿÿÿøMA°áäååçêêïêëëåæéçÝGCxž¨¦ ©¥¤ šœ¡˜•’ŠB)%HKLKKJJJKIHIHHFGC!()*q ¡š›šš™““’‘‘’‹‹o39v|}wwwwwwrqtuspn=9^gadcfgce`dbUY[\^>;DIJDB?FEGE=7>8634.(&&(%&*&%%'+*)+*#%()''03364443233222243/-+133423333423766645789:><<<;<;<?=?;<<:78673/001113--.-+*)&&#"&$#%&""$!! ))+rbPpAD9-*******+*++)++--.//./.0/21453469:=;98<;<>=;><7766666741012.-13/-+-/(''&&&%%&$.%0()-%-#-#' #&(% )))hn›YQgÛ7(*))))*)**,--....../0/0001357666::;;>?>AA866666666656565300/20/.-*)(('((&&%)d=yoP¼<Ñ?ßFQFx;§2»1«0))*RQ.0*,,5*(*))))*,**,+/.../...02/22224456468;:>BB;>;:76666666666755303033/,.-*(())('&')#)"##(+$+*#)) & 
diff --git a/utils/tuning/libtuning/modules/__init__.py b/utils/tuning/libtuning/modules/__init__.py
new file mode 100644
index 00000000..9ccabb0e
--- /dev/null
+++ b/utils/tuning/libtuning/modules/__init__.py
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
diff --git a/utils/tuning/libtuning/modules/agc/__init__.py b/utils/tuning/libtuning/modules/agc/__init__.py
new file mode 100644
index 00000000..4db9ca37
--- /dev/null
+++ b/utils/tuning/libtuning/modules/agc/__init__.py
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+
+from libtuning.modules.agc.agc import AGC
+from libtuning.modules.agc.rkisp1 import AGCRkISP1
diff --git a/utils/tuning/libtuning/modules/agc/agc.py b/utils/tuning/libtuning/modules/agc/agc.py
new file mode 100644
index 00000000..9c8899ba
--- /dev/null
+++ b/utils/tuning/libtuning/modules/agc/agc.py
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+
+from ..module import Module
+
+import libtuning as lt
+
+
+class AGC(Module):
+ type = 'agc'
+ hr_name = 'AGC (Base)'
+ out_name = 'GenericAGC'
+
+ # \todo Add sector shapes and stuff just like lsc
+ def __init__(self, *,
+ debug: list):
+ super().__init__()
+
+ self.debug = debug
diff --git a/utils/tuning/libtuning/modules/agc/rkisp1.py b/utils/tuning/libtuning/modules/agc/rkisp1.py
new file mode 100644
index 00000000..2dad3a09
--- /dev/null
+++ b/utils/tuning/libtuning/modules/agc/rkisp1.py
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+#
+# rkisp1.py - AGC module for tuning rkisp1
+
+from .agc import AGC
+
+import libtuning as lt
+
+
+class AGCRkISP1(AGC):
+ hr_name = 'AGC (RkISP1)'
+ out_name = 'Agc'
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ # We don't actually need anything from the config file
+ def validate_config(self, config: dict) -> bool:
+ return True
+
+ def _generate_metering_modes(self) -> dict:
+ centre_weighted = [
+ 0, 0, 0, 0, 0,
+ 0, 6, 8, 6, 0,
+ 0, 8, 16, 8, 0,
+ 0, 6, 8, 6, 0,
+ 0, 0, 0, 0, 0
+ ]
+
+ spot = [
+ 0, 0, 0, 0, 0,
+ 0, 2, 4, 2, 0,
+ 0, 4, 16, 4, 0,
+ 0, 2, 4, 2, 0,
+ 0, 0, 0, 0, 0
+ ]
+
+ matrix = [1 for i in range(0, 25)]
+
+ return {
+ 'MeteringCentreWeighted': centre_weighted,
+ 'MeteringSpot': spot,
+ 'MeteringMatrix': matrix
+ }
+
+ def _generate_exposure_modes(self) -> dict:
+ normal = {'exposureTime': [100, 10000, 30000, 60000, 120000],
+ 'gain': [2.0, 4.0, 6.0, 6.0, 6.0]}
+ short = {'exposureTime': [100, 5000, 10000, 20000, 120000],
+ 'gain': [2.0, 4.0, 6.0, 6.0, 6.0]}
+
+ return {'ExposureNormal': normal, 'ExposureShort': short}
+
+ def _generate_constraint_modes(self) -> dict:
+ normal = {'lower': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.5}}
+ highlight = {
+ 'lower': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.5},
+ 'upper': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.8}
+ }
+
+ return {'ConstraintNormal': normal, 'ConstraintHighlight': highlight}
+
+ def _generate_y_target(self) -> list:
+ return 0.5
+
+ def process(self, config: dict, images: list, outputs: dict) -> dict:
+ output = {}
+
+ output['AeMeteringMode'] = self._generate_metering_modes()
+ output['AeExposureMode'] = self._generate_exposure_modes()
+ output['AeConstraintMode'] = self._generate_constraint_modes()
+ output['relativeLuminanceTarget'] = self._generate_y_target()
+
+ # \todo Debug functionality
+
+ return output
diff --git a/utils/tuning/libtuning/modules/awb/__init__.py b/utils/tuning/libtuning/modules/awb/__init__.py
new file mode 100644
index 00000000..2d67f10c
--- /dev/null
+++ b/utils/tuning/libtuning/modules/awb/__init__.py
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Ideas On Board
+
+from libtuning.modules.awb.awb import AWB
+from libtuning.modules.awb.rkisp1 import AWBRkISP1
diff --git a/utils/tuning/libtuning/modules/awb/awb.py b/utils/tuning/libtuning/modules/awb/awb.py
new file mode 100644
index 00000000..c154cf3b
--- /dev/null
+++ b/utils/tuning/libtuning/modules/awb/awb.py
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Ideas On Board
+
+import logging
+
+from ..module import Module
+
+from libtuning.ctt_awb import awb
+import numpy as np
+
+logger = logging.getLogger(__name__)
+
+
+class AWB(Module):
+ type = 'awb'
+ hr_name = 'AWB (Base)'
+ out_name = 'GenericAWB'
+
+ def __init__(self, *, debug: list):
+ super().__init__()
+
+ self.debug = debug
+
+ def do_calculation(self, images):
+ logger.info('Starting AWB calculation')
+
+ imgs = [img for img in images if img.macbeth is not None]
+
+ gains, _, _ = awb(imgs, None, None, False)
+ gains = np.reshape(gains, (-1, 3))
+
+ return [{
+ 'ct': int(v[0]),
+ 'gains': [float(1.0 / v[1]), float(1.0 / v[2])]
+ } for v in gains]
diff --git a/utils/tuning/libtuning/modules/awb/rkisp1.py b/utils/tuning/libtuning/modules/awb/rkisp1.py
new file mode 100644
index 00000000..0c95843b
--- /dev/null
+++ b/utils/tuning/libtuning/modules/awb/rkisp1.py
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Ideas On Board
+#
+# AWB module for tuning rkisp1
+
+from .awb import AWB
+
+import libtuning as lt
+
+
+class AWBRkISP1(AWB):
+ hr_name = 'AWB (RkISP1)'
+ out_name = 'Awb'
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ def validate_config(self, config: dict) -> bool:
+ return True
+
+ def process(self, config: dict, images: list, outputs: dict) -> dict:
+ output = {}
+
+ output['colourGains'] = self.do_calculation(images)
+
+ return output
diff --git a/utils/tuning/libtuning/modules/ccm/__init__.py b/utils/tuning/libtuning/modules/ccm/__init__.py
new file mode 100644
index 00000000..322602af
--- /dev/null
+++ b/utils/tuning/libtuning/modules/ccm/__init__.py
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+
+from libtuning.modules.ccm.ccm import CCM
+from libtuning.modules.ccm.rkisp1 import CCMRkISP1
diff --git a/utils/tuning/libtuning/modules/ccm/ccm.py b/utils/tuning/libtuning/modules/ccm/ccm.py
new file mode 100644
index 00000000..18702f8d
--- /dev/null
+++ b/utils/tuning/libtuning/modules/ccm/ccm.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+# Copyright (C) 2024, Ideas on Board
+#
+# Base Ccm tuning module
+
+from ..module import Module
+
+from libtuning.ctt_ccm import ccm
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class CCM(Module):
+ type = 'ccm'
+ hr_name = 'CCM (Base)'
+ out_name = 'GenericCCM'
+
+ def __init__(self, debug: list):
+ super().__init__()
+
+ self.debug = debug
+
+ def do_calibration(self, images):
+ logger.info('Starting CCM calibration')
+
+ imgs = [img for img in images if img.macbeth is not None]
+
+ # todo: Take LSC calibration results into account.
+ cal_cr_list = None
+ cal_cb_list = None
+
+ try:
+ ccms = ccm(imgs, cal_cr_list, cal_cb_list)
+ except ArithmeticError:
+ logger.error('CCM calibration failed')
+ return None
+
+ return ccms
diff --git a/utils/tuning/libtuning/modules/ccm/rkisp1.py b/utils/tuning/libtuning/modules/ccm/rkisp1.py
new file mode 100644
index 00000000..be0252d9
--- /dev/null
+++ b/utils/tuning/libtuning/modules/ccm/rkisp1.py
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+# Copyright (C) 2024, Ideas on Board
+#
+# Ccm module for tuning rkisp1
+
+from .ccm import CCM
+
+
+class CCMRkISP1(CCM):
+ hr_name = 'Crosstalk Correction (RkISP1)'
+ out_name = 'Ccm'
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ # We don't need anything from the config file.
+ def validate_config(self, config: dict) -> bool:
+ return True
+
+ def process(self, config: dict, images: list, outputs: dict) -> dict:
+ output = {}
+
+ ccms = self.do_calibration(images)
+ output['ccms'] = ccms
+
+ return output
diff --git a/utils/tuning/libtuning/modules/lsc/__init__.py b/utils/tuning/libtuning/modules/lsc/__init__.py
new file mode 100644
index 00000000..0ba4411b
--- /dev/null
+++ b/utils/tuning/libtuning/modules/lsc/__init__.py
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+
+from libtuning.modules.lsc.lsc import LSC
+from libtuning.modules.lsc.raspberrypi import ALSCRaspberryPi
+from libtuning.modules.lsc.rkisp1 import LSCRkISP1
diff --git a/utils/tuning/libtuning/modules/lsc/lsc.py b/utils/tuning/libtuning/modules/lsc/lsc.py
new file mode 100644
index 00000000..e0ca22eb
--- /dev/null
+++ b/utils/tuning/libtuning/modules/lsc/lsc.py
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+
+from ..module import Module
+
+import libtuning as lt
+import libtuning.utils as utils
+
+import numpy as np
+
+
+class LSC(Module):
+ type = 'lsc'
+ hr_name = 'LSC (Base)'
+ out_name = 'GenericLSC'
+
+ def __init__(self, *,
+ debug: list,
+ sector_shape: tuple,
+ sector_x_gradient: lt.Gradient,
+ sector_y_gradient: lt.Gradient,
+ sector_average_function: lt.Average,
+ smoothing_function: lt.Smoothing):
+ super().__init__()
+
+ self.debug = debug
+
+ self.sector_shape = sector_shape
+ self.sector_x_gradient = sector_x_gradient
+ self.sector_y_gradient = sector_y_gradient
+ self.sector_average_function = sector_average_function
+
+ self.smoothing_function = smoothing_function
+
+ def _enumerate_lsc_images(self, images):
+ for image in images:
+ if image.lsc_only:
+ yield image
+
+ def _get_grid(self, channel, img_w, img_h):
+ # List of number of pixels in each sector
+ sectors_x = self.sector_x_gradient.distribute(img_w / 2, self.sector_shape[0])
+ sectors_y = self.sector_y_gradient.distribute(img_h / 2, self.sector_shape[1])
+
+ grid = []
+
+ r = 0
+ for y in sectors_y:
+ c = 0
+ for x in sectors_x:
+ grid.append(self.sector_average_function.average(channel[r:r + y, c:c + x]))
+ c += x
+ r += y
+
+ return np.array(grid)
+
+ def _lsc_single_channel(self, channel: np.array,
+ image: lt.Image, green_grid: np.array = None):
+ grid = self._get_grid(channel, image.w, image.h)
+ # Clamp the values to a small positive, so that the following 1/grid
+ # doesn't produce negative results.
+ grid = np.maximum(grid - image.blacklevel_16, 0.1)
+
+ if green_grid is None:
+ table = np.reshape(1 / grid, self.sector_shape[::-1])
+ else:
+ table = np.reshape(green_grid / grid, self.sector_shape[::-1])
+ table = self.smoothing_function.smoothing(table)
+
+ if green_grid is None:
+ table = table / np.min(table)
+
+ return table, grid
diff --git a/utils/tuning/libtuning/modules/lsc/raspberrypi.py b/utils/tuning/libtuning/modules/lsc/raspberrypi.py
new file mode 100644
index 00000000..99bc4fe6
--- /dev/null
+++ b/utils/tuning/libtuning/modules/lsc/raspberrypi.py
@@ -0,0 +1,248 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+#
+# ALSC module for tuning Raspberry Pi
+
+from .lsc import LSC
+
+import libtuning as lt
+import libtuning.utils as utils
+
+from numbers import Number
+import numpy as np
+import logging
+
+logger = logging.getLogger(__name__)
+
+class ALSCRaspberryPi(LSC):
+ # Override the type name so that the parser can match the entry in the
+ # config file.
+ type = 'alsc'
+ hr_name = 'ALSC (Raspberry Pi)'
+ out_name = 'rpi.alsc'
+ compatible = ['raspberrypi']
+
+ def __init__(self, *,
+ do_color: lt.Param,
+ luminance_strength: lt.Param,
+ **kwargs):
+ super().__init__(**kwargs)
+
+ self.do_color = do_color
+ self.luminance_strength = luminance_strength
+
+ self.output_range = (0, 3.999)
+
+ def validate_config(self, config: dict) -> bool:
+ if self not in config:
+ logger.error(f'{self.type} not in config')
+ return False
+
+ valid = True
+
+ conf = config[self]
+
+ lum_key = self.luminance_strength.name
+ color_key = self.do_color.name
+
+ if lum_key not in conf and self.luminance_strength.required:
+ logger.error(f'{lum_key} is not in config')
+ valid = False
+
+ if lum_key in conf and (conf[lum_key] < 0 or conf[lum_key] > 1):
+ logger.warning(f'{lum_key} is not in range [0, 1]; defaulting to 0.5')
+
+ if color_key not in conf and self.do_color.required:
+ logger.error(f'{color_key} is not in config')
+ valid = False
+
+ return valid
+
+ # @return Image color temperature, flattened array of red calibration table
+ # (containing {sector size} elements), flattened array of blue
+ # calibration table, flattened array of green calibration
+ # table
+
+ def _do_single_alsc(self, image: lt.Image, do_alsc_colour: bool):
+ average_green = np.mean((image.channels[lt.Color.GR:lt.Color.GB + 1]), axis=0)
+
+ cg, g = self._lsc_single_channel(average_green, image)
+
+ if not do_alsc_colour:
+ return image.color, None, None, cg.flatten()
+
+ cr, _ = self._lsc_single_channel(image.channels[lt.Color.R], image, g)
+ cb, _ = self._lsc_single_channel(image.channels[lt.Color.B], image, g)
+
+ # \todo implement debug
+
+ return image.color, cr.flatten(), cb.flatten(), cg.flatten()
+
+ # @return Red shading table, Blue shading table, Green shading table,
+ # number of images processed
+
+ def _do_all_alsc(self, images: list, do_alsc_colour: bool, general_conf: dict) -> (list, list, list, Number, int):
+ # List of colour temperatures
+ list_col = []
+ # Associated calibration tables
+ list_cr = []
+ list_cb = []
+ list_cg = []
+ count = 0
+ for image in self._enumerate_lsc_images(images):
+ col, cr, cb, cg = self._do_single_alsc(image, do_alsc_colour)
+ list_col.append(col)
+ list_cr.append(cr)
+ list_cb.append(cb)
+ list_cg.append(cg)
+ count += 1
+
+ # Convert to numpy array for data manipulation
+ list_col = np.array(list_col)
+ list_cr = np.array(list_cr)
+ list_cb = np.array(list_cb)
+ list_cg = np.array(list_cg)
+
+ cal_cr_list = []
+ cal_cb_list = []
+
+ # Note: Calculation of average corners and center of the shading tables
+ # has been removed (which ctt had, as it was unused)
+
+ # Average all values for luminance shading and return one table for all temperatures
+ lum_lut = list(np.round(np.mean(list_cg, axis=0), 3))
+
+ if not do_alsc_colour:
+ return None, None, lum_lut, count
+
+ for ct in sorted(set(list_col)):
+ # Average tables for the same colour temperature
+ indices = np.where(list_col == ct)
+ ct = int(ct)
+ t_r = np.round(np.mean(list_cr[indices], axis=0), 3)
+ t_b = np.round(np.mean(list_cb[indices], axis=0), 3)
+
+ cr_dict = {
+ 'ct': ct,
+ 'table': list(t_r)
+ }
+ cb_dict = {
+ 'ct': ct,
+ 'table': list(t_b)
+ }
+ cal_cr_list.append(cr_dict)
+ cal_cb_list.append(cb_dict)
+
+ return cal_cr_list, cal_cb_list, lum_lut, count
+
+ # @brief Calculate sigma from two adjacent gain tables
+ def _calcSigma(self, g1, g2):
+ g1 = np.reshape(g1, self.sector_shape[::-1])
+ g2 = np.reshape(g2, self.sector_shape[::-1])
+
+ # Apply gains to gain table
+ gg = g1 / g2
+ if np.mean(gg) < 1:
+ gg = 1 / gg
+
+ # For each internal patch, compute average difference between it and
+ # its 4 neighbours, then append to list
+ diffs = []
+ for i in range(self.sector_shape[1] - 2):
+ for j in range(self.sector_shape[0] - 2):
+ # Indexing is incremented by 1 since all patches on borders are
+ # not counted
+ diff = np.abs(gg[i + 1][j + 1] - gg[i][j + 1])
+ diff += np.abs(gg[i + 1][j + 1] - gg[i + 2][j + 1])
+ diff += np.abs(gg[i + 1][j + 1] - gg[i + 1][j])
+ diff += np.abs(gg[i + 1][j + 1] - gg[i + 1][j + 2])
+ diffs.append(diff / 4)
+
+ mean_diff = np.mean(diffs)
+ return np.round(mean_diff, 5)
+
+ # @brief Obtains sigmas for red and blue, effectively a measure of the
+ # 'error'
+ def _get_sigma(self, cal_cr_list, cal_cb_list):
+ # Provided colour alsc tables were generated for two different colour
+ # temperatures sigma is calculated by comparing two calibration temperatures
+ # adjacent in colour space
+
+ color_temps = [cal['ct'] for cal in cal_cr_list]
+
+ # Calculate sigmas for each adjacent color_temps and return worst one
+ sigma_rs = []
+ sigma_bs = []
+ for i in range(len(color_temps) - 1):
+ sigma_rs.append(self._calcSigma(cal_cr_list[i]['table'], cal_cr_list[i + 1]['table']))
+ sigma_bs.append(self._calcSigma(cal_cb_list[i]['table'], cal_cb_list[i + 1]['table']))
+
+ # Return maximum sigmas, not necessarily from the same colour
+ # temperature interval
+ sigma_r = max(sigma_rs) if sigma_rs else 0.005
+ sigma_b = max(sigma_bs) if sigma_bs else 0.005
+
+ return sigma_r, sigma_b
+
+ def process(self, config: dict, images: list, outputs: dict) -> dict:
+ output = {
+ 'omega': 1.3,
+ 'n_iter': 100,
+ 'luminance_strength': 0.7
+ }
+
+ conf = config[self]
+ general_conf = config['general']
+
+ do_alsc_colour = self.do_color.get_value(conf)
+
+ # \todo I have no idea where this input parameter is used
+ luminance_strength = self.luminance_strength.get_value(conf)
+ if luminance_strength < 0 or luminance_strength > 1:
+ luminance_strength = 0.5
+
+ output['luminance_strength'] = luminance_strength
+
+ # \todo Validate images from greyscale camera and force grescale mode
+ # \todo Debug functionality
+
+ alsc_out = self._do_all_alsc(images, do_alsc_colour, general_conf)
+ # \todo Handle the second green lut
+ cal_cr_list, cal_cb_list, luminance_lut, count = alsc_out
+
+ if not do_alsc_colour:
+ output['luminance_lut'] = luminance_lut
+ output['n_iter'] = 0
+ return output
+
+ output['calibrations_Cr'] = cal_cr_list
+ output['calibrations_Cb'] = cal_cb_list
+ output['luminance_lut'] = luminance_lut
+
+ # The sigmas determine the strength of the adaptive algorithm, that
+ # cleans up any lens shading that has slipped through the alsc. These
+ # are determined by measuring a 'worst-case' difference between two
+ # alsc tables that are adjacent in colour space. If, however, only one
+ # colour temperature has been provided, then this difference can not be
+ # computed as only one table is available.
+ # To determine the sigmas you would have to estimate the error of an
+ # alsc table with only the image it was taken on as a check. To avoid
+ # circularity, dfault exaggerated sigmas are used, which can result in
+ # too much alsc and is therefore not advised.
+ # In general, just take another alsc picture at another colour
+ # temperature!
+
+ if count == 1:
+ output['sigma'] = 0.005
+ output['sigma_Cb'] = 0.005
+ logger.warning('Only one alsc calibration found; standard sigmas used for adaptive algorithm.')
+ return output
+
+ # Obtain worst-case scenario residual sigmas
+ sigma_r, sigma_b = self._get_sigma(cal_cr_list, cal_cb_list)
+ output['sigma'] = np.round(sigma_r, 5)
+ output['sigma_Cb'] = np.round(sigma_b, 5)
+
+ return output
diff --git a/utils/tuning/libtuning/modules/lsc/rkisp1.py b/utils/tuning/libtuning/modules/lsc/rkisp1.py
new file mode 100644
index 00000000..c02b2306
--- /dev/null
+++ b/utils/tuning/libtuning/modules/lsc/rkisp1.py
@@ -0,0 +1,116 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+#
+# LSC module for tuning rkisp1
+
+from .lsc import LSC
+
+import libtuning as lt
+import libtuning.utils as utils
+
+from numbers import Number
+import numpy as np
+
+
+class LSCRkISP1(LSC):
+ hr_name = 'LSC (RkISP1)'
+ out_name = 'LensShadingCorrection'
+ # \todo Not sure if this is useful. Probably will remove later.
+ compatible = ['rkisp1']
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(**kwargs)
+
+ # We don't actually need anything from the config file
+ def validate_config(self, config: dict) -> bool:
+ return True
+
+ # @return Image color temperature, flattened array of red calibration table
+ # (containing {sector size} elements), flattened array of blue
+ # calibration table, flattened array of (red's) green calibration
+ # table, flattened array of (blue's) green calibration table
+
+ def _do_single_lsc(self, image: lt.Image):
+ # Perform LSC on each colour channel independently. A future enhancement
+ # worth investigating would be splitting the luminance and chrominance
+ # LSC as done by Raspberry Pi.
+ cgr, _ = self._lsc_single_channel(image.channels[lt.Color.GR], image)
+ cgb, _ = self._lsc_single_channel(image.channels[lt.Color.GB], image)
+ cr, _ = self._lsc_single_channel(image.channels[lt.Color.R], image)
+ cb, _ = self._lsc_single_channel(image.channels[lt.Color.B], image)
+
+ return image.color, cr.flatten(), cb.flatten(), cgr.flatten(), cgb.flatten()
+
+ # @return List of dictionaries of color temperature, red table, red's green
+ # table, blue's green table, and blue table
+
+ def _do_all_lsc(self, images: list) -> list:
+ output_list = []
+ output_map_func = lt.gradient.Linear().map
+
+ # List of colour temperatures
+ list_col = []
+ # Associated calibration tables
+ list_cr = []
+ list_cb = []
+ list_cgr = []
+ list_cgb = []
+ for image in self._enumerate_lsc_images(images):
+ col, cr, cb, cgr, cgb = self._do_single_lsc(image)
+ list_col.append(col)
+ list_cr.append(cr)
+ list_cb.append(cb)
+ list_cgr.append(cgr)
+ list_cgb.append(cgb)
+
+ # Convert to numpy array for data manipulation
+ list_col = np.array(list_col)
+ list_cr = np.array(list_cr)
+ list_cb = np.array(list_cb)
+ list_cgr = np.array(list_cgr)
+ list_cgb = np.array(list_cgb)
+
+ for color_temperature in sorted(set(list_col)):
+ # Average tables for the same colour temperature
+ indices = np.where(list_col == color_temperature)
+ color_temperature = int(color_temperature)
+
+ tables = []
+ for lis in [list_cr, list_cgr, list_cgb, list_cb]:
+ table = np.mean(lis[indices], axis=0)
+ table = output_map_func((1, 4), (1024, 4096), table)
+ table = np.clip(table, 1024, 4095)
+ table = np.round(table).astype('int32').tolist()
+ tables.append(table)
+
+ entry = {
+ 'ct': color_temperature,
+ 'r': tables[0],
+ 'gr': tables[1],
+ 'gb': tables[2],
+ 'b': tables[3],
+ }
+
+ output_list.append(entry)
+
+ return output_list
+
+ def process(self, config: dict, images: list, outputs: dict) -> dict:
+ output = {}
+
+ # \todo This should actually come from self.sector_{x,y}_gradient
+ size_gradient = lt.gradient.Linear(lt.Remainder.Float)
+ output['x-size'] = size_gradient.distribute(0.5, 8)
+ output['y-size'] = size_gradient.distribute(0.5, 8)
+
+ output['sets'] = self._do_all_lsc(images)
+
+ if len(output['sets']) == 0:
+ return None
+
+ # \todo Validate images from greyscale camera and force grescale mode
+ # \todo Debug functionality
+
+ return output
diff --git a/utils/tuning/libtuning/modules/module.py b/utils/tuning/libtuning/modules/module.py
new file mode 100644
index 00000000..de624384
--- /dev/null
+++ b/utils/tuning/libtuning/modules/module.py
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+#
+# Base class for algorithm-specific tuning modules
+
+
+# @var type Type of the module. Defined in the base module.
+# @var out_name The key that will be used for the algorithm in the algorithms
+# dictionary in the tuning output file
+# @var hr_name Human-readable module name, mostly for debugging
+class Module(object):
+ type = 'base'
+ hr_name = 'Base Module'
+ out_name = 'GenericAlgorithm'
+
+ def __init__(self):
+ pass
+
+ def validate_config(self, config: dict) -> bool:
+ raise NotImplementedError
+
+ # @brief Do the module's processing
+ # @param config Full configuration from the input configuration file
+ # @param images List of images to process
+ # @param outputs The outputs of all modules that were executed before this
+ # module. Note that this is an input parameter, and the
+ # output of this module should be returned directly
+ # @return Result of the module's processing. It may be empty. None
+ # indicates failure and that the result should not be used.
+ def process(self, config: dict, images: list, outputs: dict) -> dict:
+ raise NotImplementedError
diff --git a/utils/tuning/libtuning/modules/static.py b/utils/tuning/libtuning/modules/static.py
new file mode 100644
index 00000000..4d0f7e18
--- /dev/null
+++ b/utils/tuning/libtuning/modules/static.py
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Ideas on Board
+#
+# Module implementation for static data
+
+from .module import Module
+
+
+# This module can be used in cases where the tuning file should contain
+# static data.
+class StaticModule(Module):
+ def __init__(self, out_name: str, output: dict = {}):
+ super().__init__()
+ self.out_name = out_name
+ self.hr_name = f'Static {out_name}'
+ self.type = f'static_{out_name}'
+ self.output = output
+
+ def validate_config(self, config: dict) -> bool:
+ return True
+
+ def process(self, config: dict, images: list, outputs: dict) -> dict:
+ return self.output
diff --git a/utils/tuning/libtuning/parsers/__init__.py b/utils/tuning/libtuning/parsers/__init__.py
new file mode 100644
index 00000000..022c1e5d
--- /dev/null
+++ b/utils/tuning/libtuning/parsers/__init__.py
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+
+from libtuning.parsers.raspberrypi_parser import RaspberryPiParser
+from libtuning.parsers.yaml_parser import YamlParser
diff --git a/utils/tuning/libtuning/parsers/parser.py b/utils/tuning/libtuning/parsers/parser.py
new file mode 100644
index 00000000..0c3944c7
--- /dev/null
+++ b/utils/tuning/libtuning/parsers/parser.py
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+#
+# Base class for a parser for a specific format of config file
+
+class Parser(object):
+ def __init__(self):
+ pass
+
+ # @brief Parse a config file into a config dict
+ # @details The config dict shall have one key 'general' with a dict value
+ # for general configuration options, and all other entries shall
+ # have the module as the key with its configuration options (as a
+ # dict) as the value. The config dict shall prune entries that are
+ # for modules that are not in @a modules.
+ # @param config (str) Path to config file
+ # @param modules (list) List of modules
+ # @return (dict, list) Configuration and list of modules to disable
+ def parse(self, config_file: str, modules: list) -> (dict, list):
+ raise NotImplementedError
diff --git a/utils/tuning/libtuning/parsers/raspberrypi_parser.py b/utils/tuning/libtuning/parsers/raspberrypi_parser.py
new file mode 100644
index 00000000..f1da4592
--- /dev/null
+++ b/utils/tuning/libtuning/parsers/raspberrypi_parser.py
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+#
+# Parser for Raspberry Pi config file format
+
+from .parser import Parser
+
+import json
+import numbers
+
+import libtuning.utils as utils
+
+
+class RaspberryPiParser(Parser):
+ def __init__(self):
+ super().__init__()
+
+ # The string in the 'disable' and 'plot' lists are formatted as
+ # 'rpi.{module_name}'.
+ # @brief Enumerate, as a module, @a listt if its value exists in @a dictt
+ # and it is the name of a valid module in @a modules
+ def _enumerate_rpi_modules(self, listt, dictt, modules):
+ for x in listt:
+ name = x.replace('rpi.', '')
+ if name not in dictt:
+ continue
+ module = utils.get_module_by_typename(modules, name)
+ if module is not None:
+ yield module
+
+ def _valid_macbeth_option(self, value):
+ if not isinstance(value, dict):
+ return False
+
+ if list(value.keys()) != ['small', 'show']:
+ return False
+
+ for val in value.values():
+ if not isinstance(val, numbers.Number):
+ return False
+
+ return True
+
+ def parse(self, config_file: str, modules: list) -> (dict, list):
+ with open(config_file, 'r') as config_json:
+ config = json.load(config_json)
+
+ disable = []
+ for module in self._enumerate_rpi_modules(config['disable'], config, modules):
+ disable.append(module)
+ # Remove the disabled module's config too
+ config.pop(module.name)
+ config.pop('disable')
+
+ # The raspberrypi config format has 'plot' map to a list of module
+ # names which should be plotted. libtuning has each module contain the
+ # plot information in itself so do this conversion.
+
+ for module in self._enumerate_rpi_modules(config['plot'], config, modules):
+ # It's fine to set the value of a potentially disabled module, as
+ # the object still exists at this point
+ module.appendValue('debug', 'plot')
+ config.pop('plot')
+
+ # Convert the keys from module name to module instance
+
+ new_config = {}
+
+ for module_name in config:
+ module = utils.get_module_by_type_name(modules, module_name)
+ if module is not None:
+ new_config[module] = config[module_name]
+
+ new_config['general'] = {}
+
+ if 'blacklevel' in config:
+ if not isinstance(config['blacklevel'], numbers.Number):
+ raise TypeError('Config "blacklevel" must be a number')
+ # Raspberry Pi's ctt config has magic blacklevel value -1 to mean
+ # "get it from the image metadata". Since we already do that in
+ # Image, don't save it to the config here.
+ if config['blacklevel'] >= 0:
+ new_config['general']['blacklevel'] = config['blacklevel']
+
+ if 'macbeth' in config:
+ if not self._valid_macbeth_option(config['macbeth']):
+ raise TypeError('Config "macbeth" must be a dict: {"small": number, "show": number}')
+ new_config['general']['macbeth'] = config['macbeth']
+ else:
+ new_config['general']['macbeth'] = {'small': 0, 'show': 0}
+
+ return new_config, disable
diff --git a/utils/tuning/libtuning/parsers/yaml_parser.py b/utils/tuning/libtuning/parsers/yaml_parser.py
new file mode 100644
index 00000000..1fa6b7a8
--- /dev/null
+++ b/utils/tuning/libtuning/parsers/yaml_parser.py
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+#
+# Parser for YAML format config file
+
+from .parser import Parser
+import yaml
+
+
+class YamlParser(Parser):
+ def __init__(self):
+ super().__init__()
+
+ def parse(self, config_file: str, modules: list) -> (dict, list):
+ # Dummy implementation that just reads the file
+ with open(config_file, 'r') as f:
+ config = yaml.safe_load(f)
+
+ return config, []
diff --git a/utils/tuning/libtuning/smoothing.py b/utils/tuning/libtuning/smoothing.py
new file mode 100644
index 00000000..de4d920c
--- /dev/null
+++ b/utils/tuning/libtuning/smoothing.py
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+#
+# Wrapper for cv2 smoothing functions to enable duck-typing
+
+import cv2
+
+
+# @brief Wrapper for cv2 smoothing functions so that they can be duck-typed
+class Smoothing(object):
+ def __init__(self):
+ pass
+
+ def smoothing(self, src):
+ raise NotImplementedError
+
+
+class MedianBlur(Smoothing):
+ def __init__(self, ksize):
+ self.ksize = ksize
+
+ def smoothing(self, src):
+ return cv2.medianBlur(src.astype('float32'), self.ksize).astype('float64')
diff --git a/utils/tuning/libtuning/utils.py b/utils/tuning/libtuning/utils.py
new file mode 100644
index 00000000..e35cf409
--- /dev/null
+++ b/utils/tuning/libtuning/utils.py
@@ -0,0 +1,186 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+#
+# Utilities for libtuning
+
+import cv2
+import decimal
+import math
+import numpy as np
+import os
+from pathlib import Path
+import re
+import sys
+import logging
+
+import libtuning as lt
+from libtuning.image import Image
+from .macbeth import locate_macbeth
+
+logger = logging.getLogger(__name__)
+
+# Utility functions
+
+
+def get_module_by_type_name(modules, name):
+ for module in modules:
+ if module.type == name:
+ return module
+ return None
+
+
+# Private utility functions
+
+
+def _list_image_files(directory):
+ d = Path(directory)
+ files = [d.joinpath(f) for f in os.listdir(d)
+ if re.search(r'\.(jp[e]g$)|(dng$)', f)]
+ files.sort()
+ return files
+
+
+def _parse_image_filename(fn: Path):
+ lsc_only = False
+ color_temperature = None
+ lux = None
+
+ parts = fn.stem.split('_')
+ for part in parts:
+ if part == 'alsc':
+ lsc_only = True
+ continue
+ r = re.match(r'(\d+)[kK]', part)
+ if r:
+ color_temperature = int(r.group(1))
+ continue
+ r = re.match(r'(\d+)[lLuU]', part)
+ if r:
+ lux = int(r.group(1))
+
+ if color_temperature is None:
+ logger.error(f'The file name of "{fn.name}" does not contain a color temperature')
+
+ if lux is None and lsc_only is False:
+ logger.error(f'The file name of "{fn.name}" must either contain alsc or a lux level')
+
+ return color_temperature, lux, lsc_only
+
+
+# \todo Implement this from check_imgs() in ctt.py
+def _validate_images(images):
+ return True
+
+
+# Public utility functions
+
+
+# @brief Load images into a single list of Image instances
+# @param input_dir Directory from which to load image files
+# @param config Configuration dictionary
+# @param load_nonlsc Whether or not to load non-lsc images
+# @param load_lsc Whether or not to load lsc-only images
+# @return A list of Image instances
+def load_images(input_dir: str, config: dict, load_nonlsc: bool, load_lsc: bool) -> list:
+ files = _list_image_files(input_dir)
+ if len(files) == 0:
+ logger.error(f'No images found in {input_dir}')
+ return None
+
+ images = []
+ for f in files:
+ color, lux, lsc_only = _parse_image_filename(f)
+
+ if color is None:
+ logger.warning(f'Ignoring "{f.name}" as it has no associated color temperature')
+ continue
+
+ logger.info(f'Process image "{f.name}" (color={color}, lux={lux}, lsc_only={lsc_only})')
+
+ # Skip lsc image if we don't need it
+ if lsc_only and not load_lsc:
+ logger.warning(f'Skipping {f.name} as this tuner has no LSC module')
+ continue
+
+ # Skip non-lsc image if we don't need it
+ if not lsc_only and not load_nonlsc:
+ logger.warning(f'Skipping {f.name} as this tuner only has an LSC module')
+ continue
+
+ # Load image
+ try:
+ image = Image(f)
+ except Exception as e:
+ logger.error(f'Failed to load image {f.name}: {e}')
+ continue
+
+ # Populate simple fields
+ image.lsc_only = lsc_only
+ image.color = color
+ image.lux = lux
+
+ # Black level comes from the TIFF tags, but they are overridable by the
+ # config file.
+ if 'blacklevel' in config['general']:
+ image.blacklevel_16 = config['general']['blacklevel']
+
+ if lsc_only:
+ images.append(image)
+ continue
+
+ # Handle macbeth
+ macbeth = locate_macbeth(image, config)
+ if macbeth is None:
+ continue
+
+ images.append(image)
+
+ if not _validate_images(images):
+ return None
+
+ return images
+
+
+
+"""
+Some code that will save virtual macbeth charts that show the difference between optimised matrices and non optimised matrices
+
+The function creates an image that is 1550 by 1050 pixels wide, and fills it with patches which are 200x200 pixels in size
+Each patch contains the ideal color, the color from the original matrix, and the color from the final matrix
+_________________
+| |
+| Ideal Color |
+|_______________|
+| Old | new |
+| Color | Color |
+|_______|_______|
+
+Nice way of showing how the optimisation helps change the colors and the color matricies
+"""
+def visualise_macbeth_chart(macbeth_rgb, original_rgb, new_rgb, output_filename):
+ image = np.zeros((1050, 1550, 3), dtype=np.uint8)
+ colorindex = -1
+ for y in range(6):
+ for x in range(4): # Creates 6 x 4 grid of macbeth chart
+ colorindex += 1
+ xlocation = 50 + 250 * x # Means there is 50px of black gap between each square, more like the real macbeth chart.
+ ylocation = 50 + 250 * y
+ for g in range(200):
+ for i in range(100):
+ image[xlocation + i, ylocation + g] = macbeth_rgb[colorindex]
+ xlocation = 150 + 250 * x
+ ylocation = 50 + 250 * y
+ for i in range(100):
+ for g in range(100):
+ image[xlocation + i, ylocation + g] = original_rgb[colorindex] # Smaller squares below to compare the old colors with the new ones
+ xlocation = 150 + 250 * x
+ ylocation = 150 + 250 * y
+ for i in range(100):
+ for g in range(100):
+ image[xlocation + i, ylocation + g] = new_rgb[colorindex]
+
+ im_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
+ cv2.imwrite(f'{output_filename} Generated Macbeth Chart.png', im_bgr)
+
diff --git a/utils/tuning/raspberrypi/__init__.py b/utils/tuning/raspberrypi/__init__.py
new file mode 100644
index 00000000..9ccabb0e
--- /dev/null
+++ b/utils/tuning/raspberrypi/__init__.py
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
diff --git a/utils/tuning/raspberrypi/alsc.py b/utils/tuning/raspberrypi/alsc.py
new file mode 100644
index 00000000..ba8fc9e1
--- /dev/null
+++ b/utils/tuning/raspberrypi/alsc.py
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+#
+# ALSC module instance for Raspberry Pi tuning scripts
+
+import libtuning as lt
+from libtuning.modules.lsc import ALSCRaspberryPi
+
+ALSC = \
+ ALSCRaspberryPi(do_color=lt.Param('do_alsc_colour', lt.Param.Mode.Optional, True),
+ luminance_strength=lt.Param('luminance_strength', lt.Param.Mode.Optional, 0.5),
+ debug=[lt.Debug.Plot],
+ sector_shape=(16, 12),
+ sector_x_gradient=lt.gradient.Linear(lt.Remainder.DistributeFront),
+ sector_y_gradient=lt.gradient.Linear(lt.Remainder.DistributeFront),
+ sector_average_function=lt.average.Mean(),
+ smoothing_function=lt.smoothing.MedianBlur(3),
+ )
diff --git a/utils/tuning/raspberrypi_alsc_only.py b/utils/tuning/raspberrypi_alsc_only.py
new file mode 100755
index 00000000..777d8007
--- /dev/null
+++ b/utils/tuning/raspberrypi_alsc_only.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+#
+# Tuning script for raspberrypi, ALSC only
+
+import sys
+
+import libtuning as lt
+from libtuning.parsers import RaspberryPiParser
+from libtuning.generators import RaspberryPiOutput
+
+from raspberrypi.alsc import ALSC
+
+tuner = lt.Tuner('Raspberry Pi (ALSC only)')
+tuner.add(ALSC)
+tuner.set_input_parser(RaspberryPiParser())
+tuner.set_output_formatter(RaspberryPiOutput())
+tuner.set_output_order([ALSC])
+
+if __name__ == '__main__':
+ sys.exit(tuner.run(sys.argv))
diff --git a/utils/tuning/requirements.txt b/utils/tuning/requirements.txt
new file mode 100644
index 00000000..3705769b
--- /dev/null
+++ b/utils/tuning/requirements.txt
@@ -0,0 +1,9 @@
+coloredlogs
+matplotlib
+numpy
+opencv-python
+py3exiv2
+pyyaml
+rawpy
+scikit-learn
+scipy
diff --git a/utils/tuning/rkisp1.py b/utils/tuning/rkisp1.py
new file mode 100755
index 00000000..9f40fd8b
--- /dev/null
+++ b/utils/tuning/rkisp1.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+# Copyright (C) 2024, Ideas On Board
+#
+# Tuning script for rkisp1
+
+import coloredlogs
+import logging
+import sys
+
+import libtuning as lt
+from libtuning.parsers import YamlParser
+from libtuning.generators import YamlOutput
+from libtuning.modules.lsc import LSCRkISP1
+from libtuning.modules.agc import AGCRkISP1
+from libtuning.modules.awb import AWBRkISP1
+from libtuning.modules.ccm import CCMRkISP1
+from libtuning.modules.static import StaticModule
+
+coloredlogs.install(level=logging.INFO, fmt='%(name)s %(levelname)s %(message)s')
+
+agc = AGCRkISP1(debug=[lt.Debug.Plot])
+awb = AWBRkISP1(debug=[lt.Debug.Plot])
+blc = StaticModule('BlackLevelCorrection')
+ccm = CCMRkISP1(debug=[lt.Debug.Plot])
+color_processing = StaticModule('ColorProcessing')
+filter = StaticModule('Filter')
+gamma_out = StaticModule('GammaOutCorrection', {'gamma': 2.2})
+lsc = LSCRkISP1(debug=[lt.Debug.Plot],
+ # This is for the actual LSC tuning, and is part of the base LSC
+ # module. rkisp1's table sector sizes (16x16 programmed as mirrored
+ # 8x8) are separate, and is hardcoded in its specific LSC tuning
+ # module.
+ sector_shape=(17, 17),
+
+ sector_x_gradient=lt.gradient.Linear(lt.Remainder.DistributeFront),
+ sector_y_gradient=lt.gradient.Linear(lt.Remainder.DistributeFront),
+
+ # This is the function that will be used to average the pixels in
+ # each sector. This can also be a custom function.
+ sector_average_function=lt.average.Mean(),
+
+ # This is the function that will be used to smooth the color ratio
+ # values. This can also be a custom function.
+ smoothing_function=lt.smoothing.MedianBlur(3),)
+
+tuner = lt.Tuner('RkISP1')
+tuner.add([agc, awb, blc, ccm, color_processing, filter, gamma_out, lsc])
+tuner.set_input_parser(YamlParser())
+tuner.set_output_formatter(YamlOutput())
+tuner.set_output_order([agc, awb, blc, ccm, color_processing,
+ filter, gamma_out, lsc])
+
+if __name__ == '__main__':
+ sys.exit(tuner.run(sys.argv))