summaryrefslogtreecommitdiff
path: root/utils/raspberrypi/ctt
diff options
context:
space:
mode:
Diffstat (limited to 'utils/raspberrypi/ctt')
-rwxr-xr-xutils/raspberrypi/ctt/alsc_only.py20
-rw-r--r--utils/raspberrypi/ctt/cac_only.py142
-rw-r--r--utils/raspberrypi/ctt/colors.py2
-rwxr-xr-xutils/raspberrypi/ctt/convert_tuning.py98
-rwxr-xr-xutils/raspberrypi/ctt/ctt.py257
-rw-r--r--utils/raspberrypi/ctt/ctt_alsc.py83
-rw-r--r--utils/raspberrypi/ctt/ctt_awb.py13
-rw-r--r--utils/raspberrypi/ctt/ctt_cac.py228
-rw-r--r--utils/raspberrypi/ctt/ctt_ccm.py8
-rw-r--r--utils/raspberrypi/ctt/ctt_config_example.json5
-rw-r--r--utils/raspberrypi/ctt/ctt_dots_locator.py118
-rw-r--r--utils/raspberrypi/ctt/ctt_geq.py2
-rw-r--r--utils/raspberrypi/ctt/ctt_image_load.py3
-rw-r--r--utils/raspberrypi/ctt/ctt_lux.py2
-rw-r--r--utils/raspberrypi/ctt/ctt_macbeth_locator.py2
-rw-r--r--utils/raspberrypi/ctt/ctt_noise.py2
-rwxr-xr-xutils/raspberrypi/ctt/ctt_pisp.py805
-rwxr-xr-xutils/raspberrypi/ctt/ctt_pretty_print_json.py22
-rw-r--r--utils/raspberrypi/ctt/ctt_ransac.py2
-rw-r--r--utils/raspberrypi/ctt/ctt_tools.py5
-rwxr-xr-xutils/raspberrypi/ctt/ctt_vc4.py126
21 files changed, 1718 insertions, 227 deletions
diff --git a/utils/raspberrypi/ctt/alsc_only.py b/utils/raspberrypi/ctt/alsc_only.py
index 7cd0ac01..a521c4ad 100755
--- a/utils/raspberrypi/ctt/alsc_only.py
+++ b/utils/raspberrypi/ctt/alsc_only.py
@@ -2,12 +2,14 @@
#
# SPDX-License-Identifier: BSD-2-Clause
#
-# Copyright (C) 2022, Raspberry Pi (Trading) Limited
+# Copyright (C) 2022, Raspberry Pi Ltd
#
-# alsc_only.py - alsc tuning tool
+# alsc tuning tool
-from ctt import *
+import sys
+from ctt import *
+from ctt_tools import parse_input
if __name__ == '__main__':
"""
@@ -15,13 +17,14 @@ if __name__ == '__main__':
"""
if len(sys.argv) == 1:
print("""
- Pisp Camera Tuning Tool version 1.0
+ PiSP Lens Shading Camera Tuning Tool version 1.0
Required Arguments:
'-i' : Calibration image directory.
'-o' : Name of output json file.
Optional Arguments:
+ '-t' : Target platform - 'pisp' or 'vc4'. Default 'vc4'
'-c' : Config file for the CTT. If not passed, default parameters used.
'-l' : Name of output log file. If not passed, 'ctt_log.txt' used.
""")
@@ -30,5 +33,10 @@ if __name__ == '__main__':
"""
parse input arguments
"""
- json_output, directory, config, log_output = parse_input()
- run_ctt(json_output, directory, config, log_output, alsc_only=True)
+ json_output, directory, config, log_output, target = parse_input()
+ if target == 'pisp':
+ from ctt_pisp import json_template, grid_size
+ elif target == 'vc4':
+ from ctt_vc4 import json_template, grid_size
+
+ run_ctt(json_output, directory, config, log_output, json_template, grid_size, target, alsc_only=True)
diff --git a/utils/raspberrypi/ctt/cac_only.py b/utils/raspberrypi/ctt/cac_only.py
new file mode 100644
index 00000000..1c0a8193
--- /dev/null
+++ b/utils/raspberrypi/ctt/cac_only.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2023, Raspberry Pi (Trading) Ltd.
+#
+# cac_only.py - cac tuning tool
+
+
+# This file allows you to tune only the chromatic aberration correction
+# Specify any number of files in the command line args, and it shall iterate through
+# and generate an averaged cac table from all the input images, which you can then
+# input into your tuning file.
+
+# Takes .dng files produced by the camera modules of the dots grid and calculates the chromatic abberation of each dot.
+# Then takes each dot, and works out where it was in the image, and uses that to output a tables of the shifts
+# across the whole image.
+
+from PIL import Image
+import numpy as np
+import rawpy
+import sys
+import getopt
+
+from ctt_cac import *
+
+
+def cac(filelist, output_filepath, plot_results=False):
+ np.set_printoptions(precision=3)
+ np.set_printoptions(suppress=True)
+
+ # Create arrays to hold all the dots data and their colour offsets
+ red_shift = [] # Format is: [[Dot Center X, Dot Center Y, x shift, y shift]]
+ blue_shift = []
+ # Iterate through the files
+ # Multiple files is reccomended to average out the lens aberration through rotations
+ for file in filelist:
+ print("\n Processing file " + str(file))
+ # Read the raw RGB values from the .dng file
+ with rawpy.imread(file) as raw:
+ rgb = raw.postprocess()
+ sizes = (raw.sizes)
+
+ image_size = [sizes[2], sizes[3]] # Image size, X, Y
+ # Create a colour copy of the RGB values to use later in the calibration
+ imout = Image.new(mode="RGB", size=image_size)
+ rgb_image = np.array(imout)
+ # The rgb values need reshaping from a 1d array to a 3d array to be worked with easily
+ rgb.reshape((image_size[0], image_size[1], 3))
+ rgb_image = rgb
+
+ # Pass the RGB image through to the dots locating program
+ # Returns an array of the dots (colour rectangles around the dots), and an array of their locations
+ print("Finding dots")
+ dots, dots_locations = find_dots_locations(rgb_image)
+
+ # Now, analyse each dot. Work out the centroid of each colour channel, and use that to work out
+ # by how far the chromatic aberration has shifted each channel
+ print('Dots found: ' + str(len(dots)))
+
+ for dot, dot_location in zip(dots, dots_locations):
+ if len(dot) > 0:
+ if (dot_location[0] > 0) and (dot_location[1] > 0):
+ ret = analyse_dot(dot, dot_location)
+ red_shift.append(ret[0])
+ blue_shift.append(ret[1])
+
+ # Take our arrays of red shifts and locations, push them through to be interpolated into a 9x9 matrix
+ # for the CAC block to handle and then store these as a .json file to be added to the camera
+ # tuning file
+ print("\nCreating output grid")
+ rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size)
+
+ print("CAC correction complete!")
+
+ # The json format that we then paste into the tuning file (manually)
+ sample = '''
+ {
+ "rpi.cac" :
+ {
+ "strength": 1.0,
+ "lut_rx" : [
+ rx_vals
+ ],
+ "lut_ry" : [
+ ry_vals
+ ],
+ "lut_bx" : [
+ bx_vals
+ ],
+ "lut_by" : [
+ by_vals
+ ]
+ }
+ }
+ '''
+
+ # Below, may look incorrect, however, the PiSP (standard) dimensions are flipped in comparison to
+ # PIL image coordinate directions, hence why xr -> yr. Also, the shifts calculated are colour shifts,
+ # and the PiSP block asks for the values it should shift (hence the * -1, to convert from colour shift to a pixel shift)
+ sample = sample.replace("rx_vals", pprint_array(ry * -1))
+ sample = sample.replace("ry_vals", pprint_array(rx * -1))
+ sample = sample.replace("bx_vals", pprint_array(by * -1))
+ sample = sample.replace("by_vals", pprint_array(bx * -1))
+ print("Successfully converted to JSON")
+ f = open(str(output_filepath), "w+")
+ f.write(sample)
+ f.close()
+ print("Successfully written to json file")
+ '''
+ If you wish to see a plot of the colour channel shifts, add the -p or --plots option
+ Can be a quick way of validating if the data/dots you've got are good, or if you need to
+ change some parameters/take some better images
+ '''
+ if plot_results:
+ plot_shifts(red_shift, blue_shift)
+
+
+if __name__ == "__main__":
+ argv = sys.argv
+ # Detect the input and output file paths
+ arg_output = "output.json"
+ arg_help = "{0} -i <input> -o <output> -p <plot results>".format(argv[0])
+ opts, args = getopt.getopt(argv[1:], "hi:o:p", ["help", "input=", "output=", "plot"])
+
+ output_location = 0
+ input_location = 0
+ filelist = []
+ plot_results = False
+ for i in range(len(argv)):
+ if ("-h") in argv[i]:
+ print(arg_help) # print the help message
+ sys.exit(2)
+ if "-o" in argv[i]:
+ output_location = i
+ if ".dng" in argv[i]:
+ filelist.append(argv[i])
+ if "-p" in argv[i]:
+ plot_results = True
+
+ arg_output = argv[output_location + 1]
+ cac(filelist, arg_output, plot_results)
diff --git a/utils/raspberrypi/ctt/colors.py b/utils/raspberrypi/ctt/colors.py
index 1ab986d6..cb4d236b 100644
--- a/utils/raspberrypi/ctt/colors.py
+++ b/utils/raspberrypi/ctt/colors.py
@@ -1,4 +1,4 @@
-# colors.py - Program to convert from RGB to LAB color space
+# Program to convert from RGB to LAB color space
def RGB_to_LAB(RGB): # where RGB is a 1x3 array. e.g RGB = [100, 255, 230]
num = 0
XYZ = [0, 0, 0]
diff --git a/utils/raspberrypi/ctt/convert_tuning.py b/utils/raspberrypi/ctt/convert_tuning.py
index f4504d45..83cf69d4 100755
--- a/utils/raspberrypi/ctt/convert_tuning.py
+++ b/utils/raspberrypi/ctt/convert_tuning.py
@@ -8,30 +8,104 @@
import argparse
import json
+import numpy as np
import sys
from ctt_pretty_print_json import pretty_print
+from ctt_pisp import grid_size as grid_size_pisp
+from ctt_pisp import json_template as json_template_pisp
+from ctt_vc4 import grid_size as grid_size_vc4
+from ctt_vc4 import json_template as json_template_vc4
-def convert_v2(in_json: dict) -> str:
+def interp_2d(in_ls, src_w, src_h, dst_w, dst_h):
- if 'version' in in_json.keys() and in_json['version'] != 1.0:
- print(f'The JSON config reports version {in_json["version"]} that is incompatible with this tool.')
- sys.exit(-1)
+ out_ls = np.zeros((dst_h, dst_w))
+ for i in range(src_h):
+ out_ls[i] = np.interp(np.linspace(0, dst_w - 1, dst_w),
+ np.linspace(0, dst_w - 1, src_w),
+ in_ls[i])
+ for i in range(dst_w):
+ out_ls[:,i] = np.interp(np.linspace(0, dst_h - 1, dst_h),
+ np.linspace(0, dst_h - 1, src_h),
+ out_ls[:src_h, i])
+ return out_ls
- converted = {
- 'version': 2.0,
- 'target': 'bcm2835',
- 'algorithms': [{algo: config} for algo, config in in_json.items()]
- }
- return pretty_print(converted)
+def convert_target(in_json: dict, target: str):
+
+ src_w, src_h = grid_size_pisp if target == 'vc4' else grid_size_vc4
+ dst_w, dst_h = grid_size_vc4 if target == 'vc4' else grid_size_pisp
+ json_template = json_template_vc4 if target == 'vc4' else json_template_pisp
+
+ # ALSC grid sizes
+ alsc = next(algo for algo in in_json['algorithms'] if 'rpi.alsc' in algo)['rpi.alsc']
+ for colour in ['calibrations_Cr', 'calibrations_Cb']:
+ if colour not in alsc:
+ continue
+ for temperature in alsc[colour]:
+ in_ls = np.reshape(temperature['table'], (src_h, src_w))
+ out_ls = interp_2d(in_ls, src_w, src_h, dst_w, dst_h)
+ temperature['table'] = np.round(out_ls.flatten(), 3).tolist()
+
+ if 'luminance_lut' in alsc:
+ in_ls = np.reshape(alsc['luminance_lut'], (src_h, src_w))
+ out_ls = interp_2d(in_ls, src_w, src_h, dst_w, dst_h)
+ alsc['luminance_lut'] = np.round(out_ls.flatten(), 3).tolist()
+
+ # Denoise blocks
+ for i, algo in enumerate(in_json['algorithms']):
+ if list(algo.keys())[0] == 'rpi.sdn':
+ in_json['algorithms'][i] = {'rpi.denoise': json_template['rpi.sdn'] if target == 'vc4' else json_template['rpi.denoise']}
+ break
+
+ # AGC mode weights
+ agc = next(algo for algo in in_json['algorithms'] if 'rpi.agc' in algo)['rpi.agc']
+ if 'channels' in agc:
+ for i, channel in enumerate(agc['channels']):
+ target_agc_metering = json_template['rpi.agc']['channels'][i]['metering_modes']
+ for mode, v in channel['metering_modes'].items():
+ v['weights'] = target_agc_metering[mode]['weights']
+ else:
+ for mode, v in agc["metering_modes"].items():
+ target_agc_metering = json_template['rpi.agc']['channels'][0]['metering_modes']
+ v['weights'] = target_agc_metering[mode]['weights']
+
+ # HDR
+ if target == 'pisp':
+ for i, algo in enumerate(in_json['algorithms']):
+ if list(algo.keys())[0] == 'rpi.hdr':
+ in_json['algorithms'][i] = {'rpi.hdr': json_template['rpi.hdr']}
+
+ return in_json
+
+
+def convert_v2(in_json: dict, target: str) -> str:
+
+ if 'version' in in_json.keys() and in_json['version'] == 1.0:
+ converted = {
+ 'version': 2.0,
+ 'target': target,
+ 'algorithms': [{algo: config} for algo, config in in_json.items()]
+ }
+ else:
+ converted = in_json
+
+ # Convert between vc4 <-> pisp targets. This is a best effort thing.
+ if converted['target'] != target:
+ converted = convert_target(converted, target)
+ converted['target'] = target
+
+ grid_size = grid_size_vc4[0] if target == 'vc4' else grid_size_pisp[0]
+ return pretty_print(converted, custom_elems={'table': grid_size, 'luminance_lut': grid_size})
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=
- 'Convert the format of the Raspberry Pi camera tuning file from v1.0 to v2.0.\n')
+ 'Convert the format of the Raspberry Pi camera tuning file from v1.0 to v2.0 and/or the vc4 <-> pisp targets.\n')
parser.add_argument('input', type=str, help='Input tuning file.')
+ parser.add_argument('-t', '--target', type=str, help='Target platform.',
+ choices=['pisp', 'vc4'], default='vc4')
parser.add_argument('output', type=str, nargs='?',
help='Output converted tuning file. If not provided, the input file will be updated in-place.',
default=None)
@@ -40,7 +114,7 @@ if __name__ == "__main__":
with open(args.input, 'r') as f:
in_json = json.load(f)
- out_json = convert_v2(in_json)
+ out_json = convert_v2(in_json, args.target)
with open(args.output if args.output is not None else args.input, 'w') as f:
f.write(out_json)
diff --git a/utils/raspberrypi/ctt/ctt.py b/utils/raspberrypi/ctt/ctt.py
index cd89f177..96f1b5e6 100755
--- a/utils/raspberrypi/ctt/ctt.py
+++ b/utils/raspberrypi/ctt/ctt.py
@@ -4,11 +4,12 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt.py - camera tuning tool
+# camera tuning tool
import os
import sys
from ctt_image_load import *
+from ctt_cac import *
from ctt_ccm import *
from ctt_awb import *
from ctt_alsc import *
@@ -22,9 +23,10 @@ import re
"""
This file houses the camera object, which is used to perform the calibrations.
-The camera object houses all the calibration images as attributes in two lists:
+The camera object houses all the calibration images as attributes in three lists:
- imgs (macbeth charts)
- imgs_alsc (alsc correction images)
+ - imgs_cac (cac correction images)
Various calibrations are methods of the camera object, and the output is stored
in a dictionary called self.json.
Once all the caibration has been completed, the Camera.json is written into a
@@ -67,139 +69,26 @@ Camera object that is the backbone of the tuning tool.
Input is the desired path of the output json.
"""
class Camera:
- def __init__(self, jfile):
+ def __init__(self, jfile, json):
self.path = os.path.dirname(os.path.expanduser(__file__)) + '/'
if self.path == '/':
self.path = ''
self.imgs = []
self.imgs_alsc = []
+ self.imgs_cac = []
self.log = 'Log created : ' + time.asctime(time.localtime(time.time()))
self.log_separator = '\n'+'-'*70+'\n'
self.jf = jfile
"""
initial json dict populated by uncalibrated values
"""
- self.json = {
- "rpi.black_level": {
- "black_level": 4096
- },
- "rpi.dpc": {
- },
- "rpi.lux": {
- "reference_shutter_speed": 10000,
- "reference_gain": 1,
- "reference_aperture": 1.0
- },
- "rpi.noise": {
- },
- "rpi.geq": {
- },
- "rpi.sdn": {
- },
- "rpi.awb": {
- "priors": [
- {"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]},
- {"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]},
- {"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
- ],
- "modes": {
- "auto": {"lo": 2500, "hi": 8000},
- "incandescent": {"lo": 2500, "hi": 3000},
- "tungsten": {"lo": 3000, "hi": 3500},
- "fluorescent": {"lo": 4000, "hi": 4700},
- "indoor": {"lo": 3000, "hi": 5000},
- "daylight": {"lo": 5500, "hi": 6500},
- "cloudy": {"lo": 7000, "hi": 8600}
- },
- "bayes": 1
- },
- "rpi.agc": {
- "metering_modes": {
- "centre-weighted": {
- "weights": [3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0]
- },
- "spot": {
- "weights": [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
- },
- "matrix": {
- "weights": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
- }
- },
- "exposure_modes": {
- "normal": {
- "shutter": [100, 10000, 30000, 60000, 120000],
- "gain": [1.0, 2.0, 4.0, 6.0, 6.0]
- },
- "short": {
- "shutter": [100, 5000, 10000, 20000, 120000],
- "gain": [1.0, 2.0, 4.0, 6.0, 6.0]
- }
- },
- "constraint_modes": {
- "normal": [
- {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]}
- ],
- "highlight": [
- {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]},
- {"bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.8, 1000, 0.8]}
- ]
- },
- "y_target": [0, 0.16, 1000, 0.165, 10000, 0.17]
- },
- "rpi.alsc": {
- 'omega': 1.3,
- 'n_iter': 100,
- 'luminance_strength': 0.7,
- },
- "rpi.contrast": {
- "ce_enable": 1,
- "gamma_curve": [
- 0, 0,
- 1024, 5040,
- 2048, 9338,
- 3072, 12356,
- 4096, 15312,
- 5120, 18051,
- 6144, 20790,
- 7168, 23193,
- 8192, 25744,
- 9216, 27942,
- 10240, 30035,
- 11264, 32005,
- 12288, 33975,
- 13312, 35815,
- 14336, 37600,
- 15360, 39168,
- 16384, 40642,
- 18432, 43379,
- 20480, 45749,
- 22528, 47753,
- 24576, 49621,
- 26624, 51253,
- 28672, 52698,
- 30720, 53796,
- 32768, 54876,
- 36864, 57012,
- 40960, 58656,
- 45056, 59954,
- 49152, 61183,
- 53248, 62355,
- 57344, 63419,
- 61440, 64476,
- 65535, 65535
- ]
- },
- "rpi.ccm": {
- },
- "rpi.sharpen": {
- }
- }
+ self.json = json
"""
Perform colour correction calibrations by comparing macbeth patch colours
to standard macbeth chart colours.
"""
- def ccm_cal(self, do_alsc_colour):
+ def ccm_cal(self, do_alsc_colour, grid_size):
if 'rpi.ccm' in self.disable:
return 1
print('\nStarting CCM calibration')
@@ -245,7 +134,7 @@ class Camera:
Do CCM calibration
"""
try:
- ccms = ccm(self, cal_cr_list, cal_cb_list)
+ ccms = ccm(self, cal_cr_list, cal_cb_list, grid_size)
except ArithmeticError:
print('ERROR: Matrix is singular!\nTake new pictures and try again...')
self.log += '\nERROR: Singular matrix encountered during fit!'
@@ -259,11 +148,67 @@ class Camera:
print('Finished CCM calibration')
"""
+ Perform chromatic abberation correction using multiple dots images.
+ """
+ def cac_cal(self, do_alsc_colour):
+ if 'rpi.cac' in self.disable:
+ return 1
+ print('\nStarting CAC calibration')
+ self.log_new_sec('CAC')
+ """
+ check if cac images have been taken
+ """
+ if len(self.imgs_cac) == 0:
+ print('\nError:\nNo cac calibration images found')
+ self.log += '\nERROR: No CAC calibration images found!'
+ self.log += '\nCAC calibration aborted!'
+ return 1
+ """
+ if image is greyscale then CAC makes no sense
+ """
+ if self.grey:
+ print('\nERROR: Can\'t do CAC on greyscale image!')
+ self.log += '\nERROR: Cannot perform CAC calibration '
+ self.log += 'on greyscale image!\nCAC aborted!'
+ del self.json['rpi.cac']
+ return 0
+ a = time.time()
+ """
+ Check if camera is greyscale or color. If not greyscale, then perform cac
+ """
+ if do_alsc_colour:
+ """
+ Here we have a color sensor. Perform cac
+ """
+ try:
+ cacs = cac(self)
+ except ArithmeticError:
+ print('ERROR: Matrix is singular!\nTake new pictures and try again...')
+ self.log += '\nERROR: Singular matrix encountered during fit!'
+ self.log += '\nCAC aborted!'
+ return 1
+ else:
+ """
+ case where config options suggest greyscale camera. No point in doing CAC
+ """
+ cal_cr_list, cal_cb_list = None, None
+ self.log += '\nWARNING: No ALSC tables found.\nCAC calibration '
+ self.log += 'performed without ALSC correction...'
+
+ """
+ Write output to json
+ """
+ self.json['rpi.cac']['cac'] = cacs
+ self.log += '\nCAC calibration written to json file'
+ print('Finished CAC calibration')
+
+
+ """
Auto white balance calibration produces a colour curve for
various colour temperatures, as well as providing a maximum 'wiggle room'
distance from this curve (transverse_neg/pos).
"""
- def awb_cal(self, greyworld, do_alsc_colour):
+ def awb_cal(self, greyworld, do_alsc_colour, grid_size):
if 'rpi.awb' in self.disable:
return 1
print('\nStarting AWB calibration')
@@ -306,7 +251,7 @@ class Camera:
call calibration function
"""
plot = "rpi.awb" in self.plot
- awb_out = awb(self, cal_cr_list, cal_cb_list, plot)
+ awb_out = awb(self, cal_cr_list, cal_cb_list, plot, grid_size)
ct_curve, transverse_neg, transverse_pos = awb_out
"""
write output to json
@@ -324,7 +269,7 @@ class Camera:
colour channel seperately, and then partially corrects for vignetting.
The extent of the correction depends on the 'luminance_strength' parameter.
"""
- def alsc_cal(self, luminance_strength, do_alsc_colour):
+ def alsc_cal(self, luminance_strength, do_alsc_colour, grid_size, max_gain=8.0):
if 'rpi.alsc' in self.disable:
return 1
print('\nStarting ALSC calibration')
@@ -347,10 +292,10 @@ class Camera:
call calibration function
"""
plot = "rpi.alsc" in self.plot
- alsc_out = alsc_all(self, do_alsc_colour, plot)
+ alsc_out = alsc_all(self, do_alsc_colour, plot, grid_size, max_gain=max_gain)
cal_cr_list, cal_cb_list, luminance_lut, av_corn = alsc_out
"""
- write ouput to json and finish if not do_alsc_colour
+ write output to json and finish if not do_alsc_colour
"""
if not do_alsc_colour:
self.json['rpi.alsc']['luminance_lut'] = luminance_lut
@@ -393,7 +338,7 @@ class Camera:
"""
obtain worst-case scenario residual sigmas
"""
- sigma_r, sigma_b = get_sigma(self, cal_cr_list, cal_cb_list)
+ sigma_r, sigma_b = get_sigma(self, cal_cr_list, cal_cb_list, grid_size)
"""
write output to json
"""
@@ -509,19 +454,20 @@ class Camera:
"""
writes the json dictionary to the raw json file then make pretty
"""
- def write_json(self):
+ def write_json(self, version=2.0, target='bcm2835', grid_size=(16, 12)):
"""
Write json dictionary to file using our version 2 format
"""
out_json = {
- "version": 2.0,
- 'target': 'bcm2835',
+ "version": version,
+ 'target': target if target != 'vc4' else 'bcm2835',
"algorithms": [{name: data} for name, data in self.json.items()],
}
with open(self.jf, 'w') as f:
- f.write(pretty_print(out_json))
+ f.write(pretty_print(out_json,
+ custom_elems={'table': grid_size[0], 'luminance_lut': grid_size[0]}))
"""
add a new section to the log file
@@ -627,6 +573,16 @@ class Camera:
self.log += '\nWARNING: Error reading colour temperature'
self.log += '\nImage discarded!'
print('DISCARDED')
+ elif 'cac' in filename:
+ Img = load_image(self, address, mac=False)
+ self.log += '\nIdentified as an CAC image'
+ Img.name = filename
+ self.log += '\nColour temperature: {} K'.format(col)
+ self.imgs_cac.append(Img)
+ if blacklevel != -1:
+ Img.blacklevel_16 = blacklevel
+ print(img_suc_msg)
+ continue
else:
self.log += '\nIdentified as macbeth chart image'
"""
@@ -672,6 +628,7 @@ class Camera:
self.log += '\n\nImages found:'
self.log += '\nMacbeth : {}'.format(len(self.imgs))
self.log += '\nALSC : {} '.format(len(self.imgs_alsc))
+ self.log += '\nCAC: {} '.format(len(self.imgs_cac))
self.log += '\n\nCamera metadata'
"""
check usable images found
@@ -680,22 +637,21 @@ class Camera:
print('\nERROR: No usable macbeth chart images found')
self.log += '\nERROR: No usable macbeth chart images found'
return 0
- elif len(self.imgs) == 0 and len(self.imgs_alsc) == 0:
+ elif len(self.imgs) == 0 and len(self.imgs_alsc) == 0 and len(self.imgs_cac) == 0:
print('\nERROR: No usable images found')
self.log += '\nERROR: No usable images found'
return 0
"""
Double check that every image has come from the same camera...
"""
- all_imgs = self.imgs + self.imgs_alsc
+ all_imgs = self.imgs + self.imgs_alsc + self.imgs_cac
camNames = list(set([Img.camName for Img in all_imgs]))
patterns = list(set([Img.pattern for Img in all_imgs]))
sigbitss = list(set([Img.sigbits for Img in all_imgs]))
blacklevels = list(set([Img.blacklevel_16 for Img in all_imgs]))
sizes = list(set([(Img.w, Img.h) for Img in all_imgs]))
- if len(camNames) == 1 and len(patterns) == 1 and len(sigbitss) == 1 and \
- len(blacklevels) == 1 and len(sizes) == 1:
+ if 1:
self.grey = (patterns[0] == 128)
self.blacklevel_16 = blacklevels[0]
self.log += '\nName: {}'.format(camNames[0])
@@ -712,7 +668,7 @@ class Camera:
return 0
-def run_ctt(json_output, directory, config, log_output, alsc_only=False):
+def run_ctt(json_output, directory, config, log_output, json_template, grid_size, target, alsc_only=False):
"""
check input files are jsons
"""
@@ -748,12 +704,14 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False):
greyworld = get_config(awb_d, "greyworld", 0, 'bool')
alsc_d = get_config(configs, "alsc", {}, 'dict')
do_alsc_colour = get_config(alsc_d, "do_alsc_colour", 1, 'bool')
- luminance_strength = get_config(alsc_d, "luminance_strength", 0.5, 'num')
+ luminance_strength = get_config(alsc_d, "luminance_strength", 0.8, 'num')
+ lsc_max_gain = get_config(alsc_d, "max_gain", 8.0, 'num')
blacklevel = get_config(configs, "blacklevel", -1, 'num')
macbeth_d = get_config(configs, "macbeth", {}, 'dict')
mac_small = get_config(macbeth_d, "small", 0, 'bool')
mac_show = get_config(macbeth_d, "show", 0, 'bool')
mac_config = (mac_small, mac_show)
+ print("Read lsc_max_gain", lsc_max_gain)
if blacklevel < -1 or blacklevel >= 2**16:
print('\nInvalid blacklevel, defaulted to 64')
@@ -772,7 +730,7 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False):
initialise tuning tool and load images
"""
try:
- Cam = Camera(json_output)
+ Cam = Camera(json_output, json=json_template)
Cam.log_user_input(json_output, directory, config, log_output)
if alsc_only:
disable = set(Cam.json.keys()).symmetric_difference({"rpi.alsc"})
@@ -794,14 +752,17 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False):
Cam.json['rpi.black_level']['black_level'] = Cam.blacklevel_16
Cam.json_remove(disable)
print('\nSTARTING CALIBRATIONS')
- Cam.alsc_cal(luminance_strength, do_alsc_colour)
+ Cam.alsc_cal(luminance_strength, do_alsc_colour, grid_size, max_gain=lsc_max_gain)
Cam.geq_cal()
Cam.lux_cal()
Cam.noise_cal()
- Cam.awb_cal(greyworld, do_alsc_colour)
- Cam.ccm_cal(do_alsc_colour)
+ if "rpi.cac" in json_template:
+ Cam.cac_cal(do_alsc_colour)
+ Cam.awb_cal(greyworld, do_alsc_colour, grid_size)
+ Cam.ccm_cal(do_alsc_colour, grid_size)
+
print('\nFINISHED CALIBRATIONS')
- Cam.write_json()
+ Cam.write_json(target=target, grid_size=grid_size)
Cam.write_log(log_output)
print('\nCalibrations written to: '+json_output)
if log_output is None:
@@ -811,20 +772,19 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False):
else:
Cam.write_log(log_output)
-
if __name__ == '__main__':
"""
initialise calibration
"""
if len(sys.argv) == 1:
print("""
- Pisp Camera Tuning Tool version 1.0
-
+ PiSP Tuning Tool version 1.0
Required Arguments:
'-i' : Calibration image directory.
'-o' : Name of output json file.
Optional Arguments:
+ '-t' : Target platform - 'pisp' or 'vc4'. Default 'vc4'
'-c' : Config file for the CTT. If not passed, default parameters used.
'-l' : Name of output log file. If not passed, 'ctt_log.txt' used.
""")
@@ -833,5 +793,10 @@ if __name__ == '__main__':
"""
parse input arguments
"""
- json_output, directory, config, log_output = parse_input()
- run_ctt(json_output, directory, config, log_output)
+ json_output, directory, config, log_output, target = parse_input()
+ if target == 'pisp':
+ from ctt_pisp import json_template, grid_size
+ elif target == 'vc4':
+ from ctt_vc4 import json_template, grid_size
+
+ run_ctt(json_output, directory, config, log_output, json_template, grid_size, target)
diff --git a/utils/raspberrypi/ctt/ctt_alsc.py b/utils/raspberrypi/ctt/ctt_alsc.py
index e51d6931..1d94dfa5 100644
--- a/utils/raspberrypi/ctt/ctt_alsc.py
+++ b/utils/raspberrypi/ctt/ctt_alsc.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_alsc.py - camera tuning tool for ALSC (auto lens shading correction)
+# camera tuning tool for ALSC (auto lens shading correction)
from ctt_image_load import *
import matplotlib.pyplot as plt
@@ -13,8 +13,9 @@ from mpl_toolkits.mplot3d import Axes3D
"""
preform alsc calibration on a set of images
"""
-def alsc_all(Cam, do_alsc_colour, plot):
+def alsc_all(Cam, do_alsc_colour, plot, grid_size=(16, 12), max_gain=8.0):
imgs_alsc = Cam.imgs_alsc
+ grid_w, grid_h = grid_size
"""
create list of colour temperatures and associated calibration tables
"""
@@ -23,7 +24,7 @@ def alsc_all(Cam, do_alsc_colour, plot):
list_cb = []
list_cg = []
for Img in imgs_alsc:
- col, cr, cb, cg, size = alsc(Cam, Img, do_alsc_colour, plot)
+ col, cr, cb, cg, size = alsc(Cam, Img, do_alsc_colour, plot, grid_size=grid_size, max_gain=max_gain)
list_col.append(col)
list_cr.append(cr)
list_cb.append(cb)
@@ -68,11 +69,12 @@ def alsc_all(Cam, do_alsc_colour, plot):
t_b = np.where((100*t_b) % 1 >= 0.95, t_b-0.001, t_b)
t_r = np.round(t_r, 3)
t_b = np.round(t_b, 3)
- r_corners = (t_r[0], t_r[15], t_r[-1], t_r[-16])
- b_corners = (t_b[0], t_b[15], t_b[-1], t_b[-16])
- r_cen = t_r[5*16+7]+t_r[5*16+8]+t_r[6*16+7]+t_r[6*16+8]
+ r_corners = (t_r[0], t_r[grid_w - 1], t_r[-1], t_r[-grid_w])
+ b_corners = (t_b[0], t_b[grid_w - 1], t_b[-1], t_b[-grid_w])
+ middle_pos = (grid_h // 2 - 1) * grid_w + grid_w - 1
+ r_cen = t_r[middle_pos]+t_r[middle_pos + 1]+t_r[middle_pos + grid_w]+t_r[middle_pos + grid_w + 1]
r_cen = round(r_cen/4, 3)
- b_cen = t_b[5*16+7]+t_b[5*16+8]+t_b[6*16+7]+t_b[6*16+8]
+ b_cen = t_b[middle_pos]+t_b[middle_pos + 1]+t_b[middle_pos + grid_w]+t_b[middle_pos + grid_w + 1]
b_cen = round(b_cen/4, 3)
Cam.log += '\nRed table corners: {}'.format(r_corners)
Cam.log += '\nRed table centre: {}'.format(r_cen)
@@ -116,8 +118,9 @@ def alsc_all(Cam, do_alsc_colour, plot):
"""
calculate g/r and g/b for 32x32 points arranged in a grid for a single image
"""
-def alsc(Cam, Img, do_alsc_colour, plot=False):
+def alsc(Cam, Img, do_alsc_colour, plot=False, grid_size=(16, 12), max_gain=8.0):
Cam.log += '\nProcessing image: ' + Img.name
+ grid_w, grid_h = grid_size
"""
get channel in correct order
"""
@@ -128,31 +131,34 @@ def alsc(Cam, Img, do_alsc_colour, plot=False):
where w is a multiple of 32.
"""
w, h = Img.w/2, Img.h/2
- dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
+ dx, dy = int(-(-(w-1)//grid_w)), int(-(-(h-1)//grid_h))
"""
average the green channels into one
"""
av_ch_g = np.mean((channels[1:3]), axis=0)
if do_alsc_colour:
"""
- obtain 16x12 grid of intensities for each channel and subtract black level
+ obtain grid_w x grid_h grid of intensities for each channel and subtract black level
"""
- g = get_16x12_grid(av_ch_g, dx, dy) - Img.blacklevel_16
- r = get_16x12_grid(channels[0], dx, dy) - Img.blacklevel_16
- b = get_16x12_grid(channels[3], dx, dy) - Img.blacklevel_16
+ g = get_grid(av_ch_g, dx, dy, grid_size) - Img.blacklevel_16
+ r = get_grid(channels[0], dx, dy, grid_size) - Img.blacklevel_16
+ b = get_grid(channels[3], dx, dy, grid_size) - Img.blacklevel_16
"""
calculate ratios as 32 bit in order to be supported by medianBlur function
"""
- cr = np.reshape(g/r, (12, 16)).astype('float32')
- cb = np.reshape(g/b, (12, 16)).astype('float32')
- cg = np.reshape(1/g, (12, 16)).astype('float32')
+ cr = np.reshape(g/r, (grid_h, grid_w)).astype('float32')
+ cb = np.reshape(g/b, (grid_h, grid_w)).astype('float32')
+ cg = np.reshape(1/g, (grid_h, grid_w)).astype('float32')
"""
median blur to remove peaks and save as float 64
"""
cr = cv2.medianBlur(cr, 3).astype('float64')
+ cr = cr/np.min(cr) # gain tables are easier for humans to read if the minimum is 1.0
cb = cv2.medianBlur(cb, 3).astype('float64')
+ cb = cb/np.min(cb)
cg = cv2.medianBlur(cg, 3).astype('float64')
cg = cg/np.min(cg)
+ cg = [min(v, max_gain) for v in cg.flatten()] # never exceed the max luminance gain
"""
debugging code showing 2D surface plot of vignetting. Quite useful for
@@ -164,7 +170,7 @@ def alsc(Cam, Img, do_alsc_colour, plot=False):
"""
note Y is plotted as -Y so plot has same axes as image
"""
- X, Y = np.meshgrid(range(16), range(12))
+ X, Y = np.meshgrid(range(grid_w), range(grid_h))
ha.plot_surface(X, -Y, cr, cmap=cm.coolwarm, linewidth=0)
ha.set_title('ALSC Plot\nImg: {}\n\ncr'.format(Img.str))
hb = hf.add_subplot(312, projection='3d')
@@ -176,21 +182,22 @@ def alsc(Cam, Img, do_alsc_colour, plot=False):
# print(Img.str)
plt.show()
- return Img.col, cr.flatten(), cb.flatten(), cg.flatten(), (w, h, dx, dy)
+ return Img.col, cr.flatten(), cb.flatten(), cg, (w, h, dx, dy)
else:
"""
only perform calculations for luminance shading
"""
- g = get_16x12_grid(av_ch_g, dx, dy) - Img.blacklevel_16
- cg = np.reshape(1/g, (12, 16)).astype('float32')
+ g = get_grid(av_ch_g, dx, dy, grid_size) - Img.blacklevel_16
+ cg = np.reshape(1/g, (grid_h, grid_w)).astype('float32')
cg = cv2.medianBlur(cg, 3).astype('float64')
cg = cg/np.min(cg)
+ cg = [min(v, max_gain) for v in cg.flatten()] # never exceed the max luminance gain
if plot:
hf = plt.figure(figssize=(8, 8))
ha = hf.add_subplot(1, 1, 1, projection='3d')
- X, Y = np.meashgrid(range(16), range(12))
+ X, Y = np.meashgrid(range(grid_w), range(grid_h))
ha.plot_surface(X, -Y, cg, cmap=cm.coolwarm, linewidth=0)
ha.set_title('ALSC Plot (Luminance only!)\nImg: {}\n\ncg').format(Img.str)
plt.show()
@@ -199,21 +206,22 @@ def alsc(Cam, Img, do_alsc_colour, plot=False):
"""
-Compresses channel down to a 16x12 grid
+Compresses channel down to a grid of the requested size
"""
-def get_16x12_grid(chan, dx, dy):
+def get_grid(chan, dx, dy, grid_size):
+ grid_w, grid_h = grid_size
grid = []
"""
since left and bottom border will not necessarily have rectangles of
dimension dx x dy, the 32nd iteration has to be handled separately.
"""
- for i in range(11):
- for j in range(15):
+ for i in range(grid_h - 1):
+ for j in range(grid_w - 1):
grid.append(np.mean(chan[dy*i:dy*(1+i), dx*j:dx*(1+j)]))
- grid.append(np.mean(chan[dy*i:dy*(1+i), 15*dx:]))
- for j in range(15):
- grid.append(np.mean(chan[11*dy:, dx*j:dx*(1+j)]))
- grid.append(np.mean(chan[11*dy:, 15*dx:]))
+ grid.append(np.mean(chan[dy*i:dy*(1+i), (grid_w - 1)*dx:]))
+ for j in range(grid_w - 1):
+ grid.append(np.mean(chan[(grid_h - 1)*dy:, dx*j:dx*(1+j)]))
+ grid.append(np.mean(chan[(grid_h - 1)*dy:, (grid_w - 1)*dx:]))
"""
return as np.array, ready for further manipulation
"""
@@ -223,7 +231,7 @@ def get_16x12_grid(chan, dx, dy):
"""
obtains sigmas for red and blue, effectively a measure of the 'error'
"""
-def get_sigma(Cam, cal_cr_list, cal_cb_list):
+def get_sigma(Cam, cal_cr_list, cal_cb_list, grid_size):
Cam.log += '\nCalculating sigmas'
"""
provided colour alsc tables were generated for two different colour
@@ -241,8 +249,8 @@ def get_sigma(Cam, cal_cr_list, cal_cb_list):
sigma_rs = []
sigma_bs = []
for i in range(len(cts)-1):
- sigma_rs.append(calc_sigma(cal_cr_list[i]['table'], cal_cr_list[i+1]['table']))
- sigma_bs.append(calc_sigma(cal_cb_list[i]['table'], cal_cb_list[i+1]['table']))
+ sigma_rs.append(calc_sigma(cal_cr_list[i]['table'], cal_cr_list[i+1]['table'], grid_size))
+ sigma_bs.append(calc_sigma(cal_cb_list[i]['table'], cal_cb_list[i+1]['table'], grid_size))
Cam.log += '\nColour temperature interval {} - {} K'.format(cts[i], cts[i+1])
Cam.log += '\nSigma red: {}'.format(sigma_rs[-1])
Cam.log += '\nSigma blue: {}'.format(sigma_bs[-1])
@@ -263,12 +271,13 @@ def get_sigma(Cam, cal_cr_list, cal_cb_list):
"""
calculate sigma from two adjacent gain tables
"""
-def calc_sigma(g1, g2):
+def calc_sigma(g1, g2, grid_size):
+ grid_w, grid_h = grid_size
"""
reshape into 16x12 matrix
"""
- g1 = np.reshape(g1, (12, 16))
- g2 = np.reshape(g2, (12, 16))
+ g1 = np.reshape(g1, (grid_h, grid_w))
+ g2 = np.reshape(g2, (grid_h, grid_w))
"""
apply gains to gain table
"""
@@ -280,8 +289,8 @@ def calc_sigma(g1, g2):
neighbours, then append to list
"""
diffs = []
- for i in range(10):
- for j in range(14):
+ for i in range(grid_h - 2):
+ for j in range(grid_w - 2):
"""
note indexing is incremented by 1 since all patches on borders are
not counted
diff --git a/utils/raspberrypi/ctt/ctt_awb.py b/utils/raspberrypi/ctt/ctt_awb.py
index bf45e54d..4af1fe41 100644
--- a/utils/raspberrypi/ctt/ctt_awb.py
+++ b/utils/raspberrypi/ctt/ctt_awb.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_awb.py - camera tuning tool for AWB
+# camera tuning tool for AWB
from ctt_image_load import *
import matplotlib.pyplot as plt
@@ -13,7 +13,7 @@ from scipy.optimize import fmin
"""
obtain piecewise linear approximation for colour curve
"""
-def awb(Cam, cal_cr_list, cal_cb_list, plot):
+def awb(Cam, cal_cr_list, cal_cb_list, plot, grid_size):
imgs = Cam.imgs
"""
condense alsc calibration tables into one dictionary
@@ -43,7 +43,7 @@ def awb(Cam, cal_cr_list, cal_cb_list, plot):
Note: if alsc is disabled then colour_cals will be set to None and the
function will just return the greyscale patches
"""
- r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals)
+ r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals, grid_size=grid_size)
"""
calculate ratio of r, b to g
"""
@@ -293,12 +293,13 @@ def awb(Cam, cal_cr_list, cal_cb_list, plot):
"""
obtain greyscale patches and perform alsc colour correction
"""
-def get_alsc_patches(Img, colour_cals, grey=True):
+def get_alsc_patches(Img, colour_cals, grey=True, grid_size=(16, 12)):
"""
get patch centre coordinates, image colour and the actual
patches for each channel, remembering to subtract blacklevel
If grey then only greyscale patches considered
"""
+ grid_w, grid_h = grid_size
if grey:
cen_coords = Img.cen_coords[3::4]
col = Img.col
@@ -345,12 +346,12 @@ def get_alsc_patches(Img, colour_cals, grey=True):
bef_tabs = np.array(colour_cals[bef])
aft_tabs = np.array(colour_cals[aft])
col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db)
- col_tabs = np.reshape(col_tabs, (2, 12, 16))
+ col_tabs = np.reshape(col_tabs, (2, grid_h, grid_w))
"""
calculate dx, dy used to calculate alsc table
"""
w, h = Img.w/2, Img.h/2
- dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
+ dx, dy = int(-(-(w-1)//grid_w)), int(-(-(h-1)//grid_h))
"""
make list of pairs of gains for each patch by selecting the correct value
in alsc colour calibration table
diff --git a/utils/raspberrypi/ctt/ctt_cac.py b/utils/raspberrypi/ctt/ctt_cac.py
new file mode 100644
index 00000000..5a4c5101
--- /dev/null
+++ b/utils/raspberrypi/ctt/ctt_cac.py
@@ -0,0 +1,228 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+# ctt_cac.py - CAC (Chromatic Aberration Correction) tuning tool
+
+from PIL import Image
+import numpy as np
+import matplotlib.pyplot as plt
+from matplotlib import cm
+
+from ctt_dots_locator import find_dots_locations
+
+
+# This is the wrapper file that creates a JSON entry for you to append
+# to your camera tuning file.
+# It calculates the chromatic aberration at different points throughout
+# the image and uses that to produce a martix that can then be used
+# in the camera tuning files to correct this aberration.
+
+
+def pprint_array(array):
+ # Function to print the array in a tidier format
+ array = array
+ output = ""
+ for i in range(len(array)):
+ for j in range(len(array[0])):
+ output += str(round(array[i, j], 2)) + ", "
+ # Add the necessary indentation to the array
+ output += "\n "
+ # Cut off the end of the array (nicely formats it)
+ return output[:-22]
+
+
+def plot_shifts(red_shifts, blue_shifts):
+ # If users want, they can pass a command line option to show the shifts on a graph
+ # Can be useful to check that the functions are all working, and that the sample
+ # images are doing the right thing
+ Xs = np.array(red_shifts)[:, 0]
+ Ys = np.array(red_shifts)[:, 1]
+ Zs = np.array(red_shifts)[:, 2]
+ Zs2 = np.array(red_shifts)[:, 3]
+ Zs3 = np.array(blue_shifts)[:, 2]
+ Zs4 = np.array(blue_shifts)[:, 3]
+
+ fig, axs = plt.subplots(2, 2)
+ ax = fig.add_subplot(2, 2, 1, projection='3d')
+ ax.scatter(Xs, Ys, Zs, cmap=cm.jet, linewidth=0)
+ ax.set_title('Red X Shift')
+ ax = fig.add_subplot(2, 2, 2, projection='3d')
+ ax.scatter(Xs, Ys, Zs2, cmap=cm.jet, linewidth=0)
+ ax.set_title('Red Y Shift')
+ ax = fig.add_subplot(2, 2, 3, projection='3d')
+ ax.scatter(Xs, Ys, Zs3, cmap=cm.jet, linewidth=0)
+ ax.set_title('Blue X Shift')
+ ax = fig.add_subplot(2, 2, 4, projection='3d')
+ ax.scatter(Xs, Ys, Zs4, cmap=cm.jet, linewidth=0)
+ ax.set_title('Blue Y Shift')
+ fig.tight_layout()
+ plt.show()
+
+
+def shifts_to_yaml(red_shift, blue_shift, image_dimensions, output_grid_size=9):
+ # Convert the shifts to a numpy array for easier handling and initialise other variables
+ red_shifts = np.array(red_shift)
+ blue_shifts = np.array(blue_shift)
+ # create a grid that's smaller than the output grid, which we then interpolate from to get the output values
+ xrgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
+ xbgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
+ yrgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
+ ybgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
+
+ xrsgrid = []
+ xbsgrid = []
+ yrsgrid = []
+ ybsgrid = []
+ xg = np.zeros((output_grid_size - 1, output_grid_size - 1))
+ yg = np.zeros((output_grid_size - 1, output_grid_size - 1))
+
+ # Format the grids - numpy doesn't work for this, it wants a
+ # nice uniformly spaced grid, which we don't know if we have yet, hence the rather mundane setup
+ for x in range(output_grid_size - 1):
+ xrsgrid.append([])
+ yrsgrid.append([])
+ xbsgrid.append([])
+ ybsgrid.append([])
+ for y in range(output_grid_size - 1):
+ xrsgrid[x].append([])
+ yrsgrid[x].append([])
+ xbsgrid[x].append([])
+ ybsgrid[x].append([])
+
+ image_size = (image_dimensions[0], image_dimensions[1])
+ gridxsize = image_size[0] / (output_grid_size - 1)
+ gridysize = image_size[1] / (output_grid_size - 1)
+
+ # Iterate through each dot, and it's shift values and put these into the correct grid location
+ for red_shift in red_shifts:
+ xgridloc = int(red_shift[0] / gridxsize)
+ ygridloc = int(red_shift[1] / gridysize)
+ xrsgrid[xgridloc][ygridloc].append(red_shift[2])
+ yrsgrid[xgridloc][ygridloc].append(red_shift[3])
+
+ for blue_shift in blue_shifts:
+ xgridloc = int(blue_shift[0] / gridxsize)
+ ygridloc = int(blue_shift[1] / gridysize)
+ xbsgrid[xgridloc][ygridloc].append(blue_shift[2])
+ ybsgrid[xgridloc][ygridloc].append(blue_shift[3])
+
+ # Now calculate the average pixel shift for each square in the grid
+ for x in range(output_grid_size - 1):
+ for y in range(output_grid_size - 1):
+ xrgrid[x, y] = np.mean(xrsgrid[x][y])
+ yrgrid[x, y] = np.mean(yrsgrid[x][y])
+ xbgrid[x, y] = np.mean(xbsgrid[x][y])
+ ybgrid[x, y] = np.mean(ybsgrid[x][y])
+
+ # Next, we start to interpolate the central points of the grid that gets passed to the tuning file
+ input_grids = np.array([xrgrid, yrgrid, xbgrid, ybgrid])
+ output_grids = np.zeros((4, output_grid_size, output_grid_size))
+
+ # Interpolate the centre of the grid
+ output_grids[:, 1:-1, 1:-1] = (input_grids[:, 1:, :-1] + input_grids[:, 1:, 1:] + input_grids[:, :-1, 1:] + input_grids[:, :-1, :-1]) / 4
+
+ # Edge cases:
+ output_grids[:, 1:-1, 0] = ((input_grids[:, :-1, 0] + input_grids[:, 1:, 0]) / 2 - output_grids[:, 1:-1, 1]) * 2 + output_grids[:, 1:-1, 1]
+ output_grids[:, 1:-1, -1] = ((input_grids[:, :-1, 7] + input_grids[:, 1:, 7]) / 2 - output_grids[:, 1:-1, -2]) * 2 + output_grids[:, 1:-1, -2]
+ output_grids[:, 0, 1:-1] = ((input_grids[:, 0, :-1] + input_grids[:, 0, 1:]) / 2 - output_grids[:, 1, 1:-1]) * 2 + output_grids[:, 1, 1:-1]
+ output_grids[:, -1, 1:-1] = ((input_grids[:, 7, :-1] + input_grids[:, 7, 1:]) / 2 - output_grids[:, -2, 1:-1]) * 2 + output_grids[:, -2, 1:-1]
+
+ # Corner Cases:
+ output_grids[:, 0, 0] = (output_grids[:, 0, 1] - output_grids[:, 1, 1]) + (output_grids[:, 1, 0] - output_grids[:, 1, 1]) + output_grids[:, 1, 1]
+ output_grids[:, 0, -1] = (output_grids[:, 0, -2] - output_grids[:, 1, -2]) + (output_grids[:, 1, -1] - output_grids[:, 1, -2]) + output_grids[:, 1, -2]
+ output_grids[:, -1, 0] = (output_grids[:, -1, 1] - output_grids[:, -2, 1]) + (output_grids[:, -2, 0] - output_grids[:, -2, 1]) + output_grids[:, -2, 1]
+ output_grids[:, -1, -1] = (output_grids[:, -2, -1] - output_grids[:, -2, -2]) + (output_grids[:, -1, -2] - output_grids[:, -2, -2]) + output_grids[:, -2, -2]
+
+ # Below, we swap the x and the y coordinates, and also multiply by a factor of -1
+ # This is due to the PiSP (standard) dimensions being flipped in comparison to
+ # PIL image coordinate directions, hence why xr -> yr. Also, the shifts calculated are colour shifts,
+ # and the PiSP block asks for the values it should shift by (hence the * -1, to convert from colour shift to a pixel shift)
+
+ output_grid_yr, output_grid_xr, output_grid_yb, output_grid_xb = output_grids * -1
+ return output_grid_xr, output_grid_yr, output_grid_xb, output_grid_yb
+
+
+def analyse_dot(dot, dot_location=[0, 0]):
+ # Scan through the dot, calculate the centroid of each colour channel by doing:
+ # pixel channel brightness * distance from top left corner
+ # Sum these, and divide by the sum of each channel's brightnesses to get a centroid for each channel
+ red_channel = np.array(dot)[:, :, 0]
+ y_num_pixels = len(red_channel[0])
+ x_num_pixels = len(red_channel)
+ yred_weight = np.sum(np.dot(red_channel, np.arange(y_num_pixels)))
+ xred_weight = np.sum(np.dot(np.arange(x_num_pixels), red_channel))
+ red_sum = np.sum(red_channel)
+
+ green_channel = np.array(dot)[:, :, 1]
+ ygreen_weight = np.sum(np.dot(green_channel, np.arange(y_num_pixels)))
+ xgreen_weight = np.sum(np.dot(np.arange(x_num_pixels), green_channel))
+ green_sum = np.sum(green_channel)
+
+ blue_channel = np.array(dot)[:, :, 2]
+ yblue_weight = np.sum(np.dot(blue_channel, np.arange(y_num_pixels)))
+ xblue_weight = np.sum(np.dot(np.arange(x_num_pixels), blue_channel))
+ blue_sum = np.sum(blue_channel)
+
+ # We return this structure. It contains 2 arrays that contain:
+ # the locations of the dot center, along with the channel shifts in the x and y direction:
+ # [ [red_center_x, red_center_y, red_x_shift, red_y_shift], [blue_center_x, blue_center_y, blue_x_shift, blue_y_shift] ]
+
+ return [[int(dot_location[0]) + int(len(dot) / 2), int(dot_location[1]) + int(len(dot[0]) / 2), xred_weight / red_sum - xgreen_weight / green_sum, yred_weight / red_sum - ygreen_weight / green_sum], [dot_location[0] + int(len(dot) / 2), dot_location[1] + int(len(dot[0]) / 2), xblue_weight / blue_sum - xgreen_weight / green_sum, yblue_weight / blue_sum - ygreen_weight / green_sum]]
+
+
+def cac(Cam):
+ filelist = Cam.imgs_cac
+
+ Cam.log += '\nCAC analysing files: {}'.format(str(filelist))
+ np.set_printoptions(precision=3)
+ np.set_printoptions(suppress=True)
+
+ # Create arrays to hold all the dots data and their colour offsets
+ red_shift = [] # Format is: [[Dot Center X, Dot Center Y, x shift, y shift]]
+ blue_shift = []
+ # Iterate through the files
+ # Multiple files is reccomended to average out the lens aberration through rotations
+ for file in filelist:
+ Cam.log += '\nCAC processing file'
+ print("\n Processing file")
+ # Read the raw RGB values
+ rgb = file.rgb
+ image_size = [file.h, file.w] # Image size, X, Y
+ # Create a colour copy of the RGB values to use later in the calibration
+ imout = Image.new(mode="RGB", size=image_size)
+ rgb_image = np.array(imout)
+ # The rgb values need reshaping from a 1d array to a 3d array to be worked with easily
+ rgb.reshape((image_size[0], image_size[1], 3))
+ rgb_image = rgb
+
+ # Pass the RGB image through to the dots locating program
+ # Returns an array of the dots (colour rectangles around the dots), and an array of their locations
+ print("Finding dots")
+ Cam.log += '\nFinding dots'
+ dots, dots_locations = find_dots_locations(rgb_image)
+
+ # Now, analyse each dot. Work out the centroid of each colour channel, and use that to work out
+ # by how far the chromatic aberration has shifted each channel
+ Cam.log += '\nDots found: {}'.format(str(len(dots)))
+ print('Dots found: ' + str(len(dots)))
+
+ for dot, dot_location in zip(dots, dots_locations):
+ if len(dot) > 0:
+ if (dot_location[0] > 0) and (dot_location[1] > 0):
+ ret = analyse_dot(dot, dot_location)
+ red_shift.append(ret[0])
+ blue_shift.append(ret[1])
+
+ # Take our arrays of red shifts and locations, push them through to be interpolated into a 9x9 matrix
+ # for the CAC block to handle and then store these as a .json file to be added to the camera
+ # tuning file
+ print("\nCreating output grid")
+ Cam.log += '\nCreating output grid'
+ rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size)
+
+ print("CAC correction complete!")
+ Cam.log += '\nCAC correction complete!'
+
+ # Give the JSON dict back to the main ctt program
+ return {"strength": 1.0, "lut_rx": list(rx.round(2).reshape(81)), "lut_ry": list(ry.round(2).reshape(81)), "lut_bx": list(bx.round(2).reshape(81)), "lut_by": list(by.round(2).reshape(81))}
diff --git a/utils/raspberrypi/ctt/ctt_ccm.py b/utils/raspberrypi/ctt/ctt_ccm.py
index a09bfd09..07c943a8 100644
--- a/utils/raspberrypi/ctt/ctt_ccm.py
+++ b/utils/raspberrypi/ctt/ctt_ccm.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_ccm.py - camera tuning tool for CCM (colour correction matrix)
+# camera tuning tool for CCM (colour correction matrix)
from ctt_image_load import *
from ctt_awb import get_alsc_patches
@@ -56,7 +56,7 @@ FInds colour correction matrices for list of images
"""
-def ccm(Cam, cal_cr_list, cal_cb_list):
+def ccm(Cam, cal_cr_list, cal_cb_list, grid_size):
global matrix_selection_types, typenum
imgs = Cam.imgs
"""
@@ -133,9 +133,7 @@ def ccm(Cam, cal_cr_list, cal_cb_list):
Note: if alsc is disabled then colour_cals will be set to None and no
the function will simply return the macbeth patches
"""
- r, b, g = get_alsc_patches(Img, colour_cals, grey=False)
- # 256 values for each patch of sRGB values
-
+ r, b, g = get_alsc_patches(Img, colour_cals, grey=False, grid_size=grid_size)
"""
do awb
Note: awb is done by measuring the macbeth chart in the image, rather
diff --git a/utils/raspberrypi/ctt/ctt_config_example.json b/utils/raspberrypi/ctt/ctt_config_example.json
index c7f90761..1105862c 100644
--- a/utils/raspberrypi/ctt/ctt_config_example.json
+++ b/utils/raspberrypi/ctt/ctt_config_example.json
@@ -3,7 +3,8 @@
"plot": [],
"alsc": {
"do_alsc_colour": 1,
- "luminance_strength": 0.5
+ "luminance_strength": 0.8,
+ "max_gain": 8.0
},
"awb": {
"greyworld": 0
@@ -13,4 +14,4 @@
"small": 0,
"show": 0
}
-} \ No newline at end of file
+}
diff --git a/utils/raspberrypi/ctt/ctt_dots_locator.py b/utils/raspberrypi/ctt/ctt_dots_locator.py
new file mode 100644
index 00000000..4945c04b
--- /dev/null
+++ b/utils/raspberrypi/ctt/ctt_dots_locator.py
@@ -0,0 +1,118 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+# find_dots.py - Used by CAC algorithm to convert image to set of dots
+
+'''
+This file takes the black and white version of the image, along with
+the color version. It then located the black dots on the image by
+thresholding dark pixels.
+In a rather fun way, the algorithm bounces around the thresholded area in a random path
+We then use the maximum and minimum of these paths to determine the dot shape and size
+This info is then used to return colored dots and locations back to the main file
+'''
+
+import numpy as np
+import random
+from PIL import Image, ImageEnhance, ImageFilter
+
+
+def find_dots_locations(rgb_image, color_threshold=100, dots_edge_avoid=75, image_edge_avoid=10, search_path_length=500, grid_scan_step_size=10, logfile=open("log.txt", "a+")):
+ # Initialise some starting variables
+ pixels = Image.fromarray(rgb_image)
+ pixels = pixels.convert("L")
+ enhancer = ImageEnhance.Contrast(pixels)
+ im_output = enhancer.enhance(1.4)
+ # We smooth it slightly to make it easier for the dot recognition program to locate the dots
+ im_output = im_output.filter(ImageFilter.GaussianBlur(radius=2))
+ bw_image = np.array(im_output)
+
+ location = [0, 0]
+ dots = []
+ dots_location = []
+ # the program takes away the edges - we don't want a dot that is half a circle, the
+ # centroids would all be wrong
+ for x in range(dots_edge_avoid, len(bw_image) - dots_edge_avoid, grid_scan_step_size):
+ for y in range(dots_edge_avoid, len(bw_image[0]) - dots_edge_avoid, grid_scan_step_size):
+ location = [x, y]
+ scrap_dot = False # A variable used to make sure that this is a valid dot
+ if (bw_image[location[0], location[1]] < color_threshold) and not (scrap_dot):
+ heading = "south" # Define a starting direction to move in
+ coords = []
+ for i in range(search_path_length): # Creates a path of length `search_path_length`. This turns out to always be enough to work out the rough shape of the dot.
+ # Now make sure that the thresholded area doesn't come within 10 pixels of the edge of the image, ensures we capture all the CA
+ if ((image_edge_avoid < location[0] < len(bw_image) - image_edge_avoid) and (image_edge_avoid < location[1] < len(bw_image[0]) - image_edge_avoid)) and not (scrap_dot):
+ if heading == "south":
+ if bw_image[location[0] + 1, location[1]] < color_threshold:
+ # Here, notice it does not go south, but actually goes southeast
+ # This is crucial in ensuring that we make our way around the majority of the dot
+ location[0] = location[0] + 1
+ location[1] = location[1] + 1
+ heading = "south"
+ else:
+ # This happens when we reach a thresholded edge. We now randomly change direction and keep searching
+ dir = random.randint(1, 2)
+ if dir == 1:
+ heading = "west"
+ if dir == 2:
+ heading = "east"
+
+ if heading == "east":
+ if bw_image[location[0], location[1] + 1] < color_threshold:
+ location[1] = location[1] + 1
+ heading = "east"
+ else:
+ dir = random.randint(1, 2)
+ if dir == 1:
+ heading = "north"
+ if dir == 2:
+ heading = "south"
+
+ if heading == "west":
+ if bw_image[location[0], location[1] - 1] < color_threshold:
+ location[1] = location[1] - 1
+ heading = "west"
+ else:
+ dir = random.randint(1, 2)
+ if dir == 1:
+ heading = "north"
+ if dir == 2:
+ heading = "south"
+
+ if heading == "north":
+ if bw_image[location[0] - 1, location[1]] < color_threshold:
+ location[0] = location[0] - 1
+ heading = "north"
+ else:
+ dir = random.randint(1, 2)
+ if dir == 1:
+ heading = "west"
+ if dir == 2:
+ heading = "east"
+ # Log where our particle travels across the dot
+ coords.append([location[0], location[1]])
+ else:
+ scrap_dot = True # We just don't have enough space around the dot, discard this one, and move on
+ if not scrap_dot:
+ # get the size of the dot surrounding the dot
+ x_coords = np.array(coords)[:, 0]
+ y_coords = np.array(coords)[:, 1]
+ hsquaresize = max(list(x_coords)) - min(list(x_coords))
+ vsquaresize = max(list(y_coords)) - min(list(y_coords))
+ # Create the bounding coordinates of the rectangle surrounding the dot
+ # Program uses the dotsize + half of the dotsize to ensure we get all that color fringing
+ extra_space_factor = 0.45
+ top_left_x = (min(list(x_coords)) - int(hsquaresize * extra_space_factor))
+ btm_right_x = max(list(x_coords)) + int(hsquaresize * extra_space_factor)
+ top_left_y = (min(list(y_coords)) - int(vsquaresize * extra_space_factor))
+ btm_right_y = max(list(y_coords)) + int(vsquaresize * extra_space_factor)
+ # Overwrite the area of the dot to ensure we don't use it again
+ bw_image[top_left_x:btm_right_x, top_left_y:btm_right_y] = 255
+ # Add the color version of the dot to the list to send off, along with some coordinates.
+ dots.append(rgb_image[top_left_x:btm_right_x, top_left_y:btm_right_y])
+ dots_location.append([top_left_x, top_left_y])
+ else:
+ # Dot was too close to the image border to be useable
+ pass
+ return dots, dots_location
diff --git a/utils/raspberrypi/ctt/ctt_geq.py b/utils/raspberrypi/ctt/ctt_geq.py
index c45addcd..5a91ebb4 100644
--- a/utils/raspberrypi/ctt/ctt_geq.py
+++ b/utils/raspberrypi/ctt/ctt_geq.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_geq.py - camera tuning tool for GEQ (green equalisation)
+# camera tuning tool for GEQ (green equalisation)
from ctt_tools import *
import matplotlib.pyplot as plt
diff --git a/utils/raspberrypi/ctt/ctt_image_load.py b/utils/raspberrypi/ctt/ctt_image_load.py
index 310c5e88..531de328 100644
--- a/utils/raspberrypi/ctt/ctt_image_load.py
+++ b/utils/raspberrypi/ctt/ctt_image_load.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019-2020, Raspberry Pi Ltd
#
-# ctt_image_load.py - camera tuning tool image loading
+# camera tuning tool image loading
from ctt_tools import *
from ctt_macbeth_locator import *
@@ -350,6 +350,7 @@ def dng_load_image(Cam, im_str):
c2 = np.left_shift(raw_data[1::2, 0::2].astype(np.int64), shift)
c3 = np.left_shift(raw_data[1::2, 1::2].astype(np.int64), shift)
Img.channels = [c0, c1, c2, c3]
+ Img.rgb = raw_im.postprocess()
except Exception:
print("\nERROR: failed to load DNG file", im_str)
diff --git a/utils/raspberrypi/ctt/ctt_lux.py b/utils/raspberrypi/ctt/ctt_lux.py
index 70855e1b..46be1512 100644
--- a/utils/raspberrypi/ctt/ctt_lux.py
+++ b/utils/raspberrypi/ctt/ctt_lux.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_lux.py - camera tuning tool for lux level
+# camera tuning tool for lux level
from ctt_tools import *
diff --git a/utils/raspberrypi/ctt/ctt_macbeth_locator.py b/utils/raspberrypi/ctt/ctt_macbeth_locator.py
index 178aeed0..f22dbf31 100644
--- a/utils/raspberrypi/ctt/ctt_macbeth_locator.py
+++ b/utils/raspberrypi/ctt/ctt_macbeth_locator.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_macbeth_locator.py - camera tuning tool Macbeth chart locator
+# camera tuning tool Macbeth chart locator
from ctt_ransac import *
from ctt_tools import *
diff --git a/utils/raspberrypi/ctt/ctt_noise.py b/utils/raspberrypi/ctt/ctt_noise.py
index 3270bf34..0b18d83f 100644
--- a/utils/raspberrypi/ctt/ctt_noise.py
+++ b/utils/raspberrypi/ctt/ctt_noise.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_noise.py - camera tuning tool noise calibration
+# camera tuning tool noise calibration
from ctt_image_load import *
import matplotlib.pyplot as plt
diff --git a/utils/raspberrypi/ctt/ctt_pisp.py b/utils/raspberrypi/ctt/ctt_pisp.py
new file mode 100755
index 00000000..a59b053c
--- /dev/null
+++ b/utils/raspberrypi/ctt/ctt_pisp.py
@@ -0,0 +1,805 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# ctt_pisp.py - camera tuning tool data for PiSP platforms
+
+
+json_template = {
+ "rpi.black_level": {
+ "black_level": 4096
+ },
+ "rpi.lux": {
+ "reference_shutter_speed": 10000,
+ "reference_gain": 1,
+ "reference_aperture": 1.0
+ },
+ "rpi.dpc": {
+ "strength": 1
+ },
+ "rpi.noise": {
+ },
+ "rpi.geq": {
+ },
+ "rpi.denoise":
+ {
+ "normal":
+ {
+ "sdn":
+ {
+ "deviation": 1.6,
+ "strength": 0.5,
+ "deviation2": 3.2,
+ "deviation_no_tdn": 3.2,
+ "strength_no_tdn": 0.75
+ },
+ "cdn":
+ {
+ "deviation": 200,
+ "strength": 0.3
+ },
+ "tdn":
+ {
+ "deviation": 0.8,
+ "threshold": 0.05
+ }
+ },
+ "hdr":
+ {
+ "sdn":
+ {
+ "deviation": 1.6,
+ "strength": 0.5,
+ "deviation2": 3.2,
+ "deviation_no_tdn": 3.2,
+ "strength_no_tdn": 0.75
+ },
+ "cdn":
+ {
+ "deviation": 200,
+ "strength": 0.3
+ },
+ "tdn":
+ {
+ "deviation": 1.3,
+ "threshold": 0.1
+ }
+ },
+ "night":
+ {
+ "sdn":
+ {
+ "deviation": 1.6,
+ "strength": 0.5,
+ "deviation2": 3.2,
+ "deviation_no_tdn": 3.2,
+ "strength_no_tdn": 0.75
+ },
+ "cdn":
+ {
+ "deviation": 200,
+ "strength": 0.3
+ },
+ "tdn":
+ {
+ "deviation": 1.3,
+ "threshold": 0.1
+ }
+ }
+ },
+ "rpi.awb": {
+ "priors": [
+ {"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]},
+ {"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]},
+ {"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
+ ],
+ "modes": {
+ "auto": {"lo": 2500, "hi": 7700},
+ "incandescent": {"lo": 2500, "hi": 3000},
+ "tungsten": {"lo": 3000, "hi": 3500},
+ "fluorescent": {"lo": 4000, "hi": 4700},
+ "indoor": {"lo": 3000, "hi": 5000},
+ "daylight": {"lo": 5500, "hi": 6500},
+ "cloudy": {"lo": 7000, "hi": 8000}
+ },
+ "bayes": 1
+ },
+ "rpi.agc":
+ {
+ "channels":
+ [
+ {
+ "comment": "Channel 0 is normal AGC",
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 1.5, 2.0, 4.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 60000 ],
+ "gain": [ 1.0, 1.5, 2.0, 4.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 1.5, 2.0, 4.0, 8.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ },
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "comment": "Channel 1 is the HDR short channel",
+ "desaturate": 0,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 20000, 60000 ],
+ "gain": [ 1.0, 1.0, 1.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 20000, 60000 ],
+ "gain": [ 1.0, 1.0, 1.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 20000, 60000 ],
+ "gain": [ 1.0, 1.0, 1.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.7,
+ 1000, 0.7
+ ]
+ },
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.2,
+ "y_target":
+ [
+ 0, 0.002,
+ 1000, 0.002
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.7,
+ 1000, 0.7
+ ]
+ },
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.2,
+ "y_target":
+ [
+ 0, 0.002,
+ 1000, 0.002
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.7,
+ 1000, 0.7
+ ]
+ },
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.2,
+ "y_target":
+ [
+ 0, 0.002,
+ 1000, 0.002
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "comment": "Channel 2 is the HDR long channel",
+ "desaturate": 0,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 20000, 30000, 60000 ],
+ "gain": [ 1.0, 2.0, 4.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 20000, 30000, 60000 ],
+ "gain": [ 1.0, 2.0, 4.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 20000, 30000, 60000 ],
+ "gain": [ 1.0, 2.0, 4.0, 8.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ ],
+ "highlight": [
+ ],
+ "shadows": [
+ ]
+ },
+ "channel_constraints":
+ [
+ {
+ "bound": "UPPER",
+ "channel": 4,
+ "factor": 8
+ },
+ {
+ "bound": "LOWER",
+ "channel": 4,
+ "factor": 2
+ }
+ ],
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "comment": "Channel 3 is the night mode channel",
+ "base_ev": 0.33,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 20000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 20000, 66666, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 4.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.16,
+ 10000, 0.17
+ ]
+ }
+ ]
+ },
+ "rpi.alsc": {
+ 'omega': 1.3,
+ 'n_iter': 100,
+ 'luminance_strength': 0.8,
+ },
+ "rpi.contrast": {
+ "ce_enable": 1,
+ "gamma_curve": [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ },
+ "rpi.ccm": {
+ },
+ "rpi.cac": {
+ },
+ "rpi.sharpen": {
+ "threshold": 0.25,
+ "limit": 1.0,
+ "strength": 1.0
+ },
+ "rpi.hdr":
+ {
+ "Off":
+ {
+ "cadence": [ 0 ]
+ },
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map": { "short": 1, "long": 2 }
+ },
+ "SingleExposure":
+ {
+ "cadence": [1],
+ "channel_map": { "short": 1 },
+ "spatial_gain": 2.0,
+ "tonemap_enable": 1
+ },
+ "MultiExposure":
+ {
+ "cadence": [1, 2],
+ "channel_map": { "short": 1, "long": 2 },
+ "stitch_enable": 1,
+ "spatial_gain": 2.0,
+ "tonemap_enable": 1
+ },
+ "Night":
+ {
+ "cadence": [ 3 ],
+ "channel_map": { "night": 3 },
+ "tonemap_enable": 1,
+ "tonemap":
+ [
+ 0, 0,
+ 5000, 20000,
+ 10000, 30000,
+ 20000, 47000,
+ 30000, 55000,
+ 65535, 65535
+ ]
+ }
+ }
+}
+
+grid_size = (32, 32)
diff --git a/utils/raspberrypi/ctt/ctt_pretty_print_json.py b/utils/raspberrypi/ctt/ctt_pretty_print_json.py
index 3e3b8475..a4cae62d 100755
--- a/utils/raspberrypi/ctt/ctt_pretty_print_json.py
+++ b/utils/raspberrypi/ctt/ctt_pretty_print_json.py
@@ -19,13 +19,19 @@ class Encoder(json.JSONEncoder):
self.indentation_level = 0
self.hard_break = 120
self.custom_elems = {
+ 'weights': 15,
'table': 16,
'luminance_lut': 16,
'ct_curve': 3,
'ccm': 3,
+ 'lut_rx': 9,
+ 'lut_bx': 9,
+ 'lut_by': 9,
+ 'lut_ry': 9,
'gamma_curve': 2,
'y_target': 2,
- 'prior': 2
+ 'prior': 2,
+ 'tonemap': 2
}
def encode(self, o, node_key=None):
@@ -87,7 +93,7 @@ class Encoder(json.JSONEncoder):
return self.encode(o)
-def pretty_print(in_json: dict) -> str:
+def pretty_print(in_json: dict, custom_elems={}) -> str:
if 'version' not in in_json or \
'target' not in in_json or \
@@ -95,12 +101,15 @@ def pretty_print(in_json: dict) -> str:
in_json['version'] < 2.0:
raise RuntimeError('Incompatible JSON dictionary has been provided')
- return json.dumps(in_json, cls=Encoder, indent=4, sort_keys=False)
+ encoder = Encoder(indent=4, sort_keys=False)
+ encoder.custom_elems |= custom_elems
+ return encoder.encode(in_json) #json.dumps(in_json, cls=Encoder, indent=4, sort_keys=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=
'Prettify a version 2.0 camera tuning config JSON file.')
+ parser.add_argument('-t', '--target', type=str, help='Target platform', choices=['pisp', 'vc4'], default='vc4')
parser.add_argument('input', type=str, help='Input tuning file.')
parser.add_argument('output', type=str, nargs='?',
help='Output converted tuning file. If not provided, the input file will be updated in-place.',
@@ -110,7 +119,12 @@ if __name__ == "__main__":
with open(args.input, 'r') as f:
in_json = json.load(f)
- out_json = pretty_print(in_json)
+ if args.target == 'pisp':
+ from ctt_pisp import grid_size
+ elif args.target == 'vc4':
+ from ctt_vc4 import grid_size
+
+ out_json = pretty_print(in_json, custom_elems={'table': grid_size[0], 'luminance_lut': grid_size[0]})
with open(args.output if args.output is not None else args.input, 'w') as f:
f.write(out_json)
diff --git a/utils/raspberrypi/ctt/ctt_ransac.py b/utils/raspberrypi/ctt/ctt_ransac.py
index 9ed7d93c..01bba302 100644
--- a/utils/raspberrypi/ctt/ctt_ransac.py
+++ b/utils/raspberrypi/ctt/ctt_ransac.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_ransac.py - camera tuning tool RANSAC selector for Macbeth chart locator
+# camera tuning tool RANSAC selector for Macbeth chart locator
import numpy as np
diff --git a/utils/raspberrypi/ctt/ctt_tools.py b/utils/raspberrypi/ctt/ctt_tools.py
index 79195289..50b01ecf 100644
--- a/utils/raspberrypi/ctt/ctt_tools.py
+++ b/utils/raspberrypi/ctt/ctt_tools.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_tools.py - camera tuning tool miscellaneous
+# camera tuning tool miscellaneous
import time
import re
@@ -65,11 +65,12 @@ def parse_input():
directory = get_config(args_dict, '-i', None, 'string')
config = get_config(args_dict, '-c', None, 'string')
log_path = get_config(args_dict, '-l', None, 'string')
+ target = get_config(args_dict, '-t', "vc4", 'string')
if directory is None:
raise ArgError('\n\nERROR! No input directory given.')
if json_output is None:
raise ArgError('\n\nERROR! No output json given.')
- return json_output, directory, config, log_path
+ return json_output, directory, config, log_path, target
"""
diff --git a/utils/raspberrypi/ctt/ctt_vc4.py b/utils/raspberrypi/ctt/ctt_vc4.py
new file mode 100755
index 00000000..7154e110
--- /dev/null
+++ b/utils/raspberrypi/ctt/ctt_vc4.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# ctt_vc4.py - camera tuning tool data for VC4 platforms
+
+
+json_template = {
+ "rpi.black_level": {
+ "black_level": 4096
+ },
+ "rpi.dpc": {
+ },
+ "rpi.lux": {
+ "reference_shutter_speed": 10000,
+ "reference_gain": 1,
+ "reference_aperture": 1.0
+ },
+ "rpi.noise": {
+ },
+ "rpi.geq": {
+ },
+ "rpi.sdn": {
+ },
+ "rpi.awb": {
+ "priors": [
+ {"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]},
+ {"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]},
+ {"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
+ ],
+ "modes": {
+ "auto": {"lo": 2500, "hi": 8000},
+ "incandescent": {"lo": 2500, "hi": 3000},
+ "tungsten": {"lo": 3000, "hi": 3500},
+ "fluorescent": {"lo": 4000, "hi": 4700},
+ "indoor": {"lo": 3000, "hi": 5000},
+ "daylight": {"lo": 5500, "hi": 6500},
+ "cloudy": {"lo": 7000, "hi": 8600}
+ },
+ "bayes": 1
+ },
+ "rpi.agc": {
+ "metering_modes": {
+ "centre-weighted": {
+ "weights": [3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0]
+ },
+ "spot": {
+ "weights": [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ },
+ "matrix": {
+ "weights": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ }
+ },
+ "exposure_modes": {
+ "normal": {
+ "shutter": [100, 10000, 30000, 60000, 120000],
+ "gain": [1.0, 2.0, 4.0, 6.0, 6.0]
+ },
+ "short": {
+ "shutter": [100, 5000, 10000, 20000, 120000],
+ "gain": [1.0, 2.0, 4.0, 6.0, 6.0]
+ }
+ },
+ "constraint_modes": {
+ "normal": [
+ {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]}
+ ],
+ "highlight": [
+ {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]},
+ {"bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.8, 1000, 0.8]}
+ ]
+ },
+ "y_target": [0, 0.16, 1000, 0.165, 10000, 0.17]
+ },
+ "rpi.alsc": {
+ 'omega': 1.3,
+ 'n_iter': 100,
+ 'luminance_strength': 0.7,
+ },
+ "rpi.contrast": {
+ "ce_enable": 1,
+ "gamma_curve": [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ },
+ "rpi.ccm": {
+ },
+ "rpi.sharpen": {
+ }
+}
+
+grid_size = (16, 12)