summaryrefslogtreecommitdiff
path: root/utils
diff options
context:
space:
mode:
Diffstat (limited to 'utils')
-rwxr-xr-xutils/checkstyle.py97
-rwxr-xr-xutils/gen-controls.py21
-rwxr-xr-xutils/gen-formats.py2
-rwxr-xr-xutils/gen-header.sh2
-rwxr-xr-xutils/gen-ipa-priv-key.sh2
-rwxr-xr-xutils/gen-ipa-pub-key.py2
-rwxr-xr-xutils/ipc/extract-docs.py4
-rwxr-xr-xutils/ipc/generate.py2
-rw-r--r--utils/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl2
-rw-r--r--utils/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl2
-rw-r--r--utils/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl2
-rw-r--r--utils/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl10
-rw-r--r--utils/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl2
-rw-r--r--utils/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl2
-rw-r--r--utils/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl2
-rw-r--r--utils/ipc/generators/mojom_libcamera_generator.py2
-rwxr-xr-xutils/ipc/parser.py2
-rwxr-xr-xutils/ipu3/ipu3-capture.sh2
-rw-r--r--utils/ipu3/ipu3-pack.c4
-rwxr-xr-xutils/ipu3/ipu3-process.sh2
-rw-r--r--utils/ipu3/ipu3-unpack.c3
-rwxr-xr-xutils/raspberrypi/ctt/alsc_only.py20
-rw-r--r--utils/raspberrypi/ctt/cac_only.py142
-rw-r--r--utils/raspberrypi/ctt/colors.py2
-rwxr-xr-xutils/raspberrypi/ctt/convert_tuning.py98
-rwxr-xr-xutils/raspberrypi/ctt/ctt.py257
-rw-r--r--utils/raspberrypi/ctt/ctt_alsc.py83
-rw-r--r--utils/raspberrypi/ctt/ctt_awb.py13
-rw-r--r--utils/raspberrypi/ctt/ctt_cac.py228
-rw-r--r--utils/raspberrypi/ctt/ctt_ccm.py8
-rw-r--r--utils/raspberrypi/ctt/ctt_config_example.json5
-rw-r--r--utils/raspberrypi/ctt/ctt_dots_locator.py118
-rw-r--r--utils/raspberrypi/ctt/ctt_geq.py2
-rw-r--r--utils/raspberrypi/ctt/ctt_image_load.py3
-rw-r--r--utils/raspberrypi/ctt/ctt_lux.py2
-rw-r--r--utils/raspberrypi/ctt/ctt_macbeth_locator.py2
-rw-r--r--utils/raspberrypi/ctt/ctt_noise.py2
-rwxr-xr-xutils/raspberrypi/ctt/ctt_pisp.py805
-rwxr-xr-xutils/raspberrypi/ctt/ctt_pretty_print_json.py22
-rw-r--r--utils/raspberrypi/ctt/ctt_ransac.py2
-rw-r--r--utils/raspberrypi/ctt/ctt_tools.py5
-rwxr-xr-xutils/raspberrypi/ctt/ctt_vc4.py126
-rwxr-xr-xutils/rkisp1/rkisp1-capture.sh3
-rwxr-xr-xutils/tracepoints/analyze-ipa-trace.py2
-rwxr-xr-xutils/tracepoints/gen-tp-header.py2
-rw-r--r--utils/tuning/libtuning/average.py2
-rw-r--r--utils/tuning/libtuning/generators/generator.py2
-rw-r--r--utils/tuning/libtuning/generators/raspberrypi_output.py2
-rw-r--r--utils/tuning/libtuning/generators/yaml_output.py2
-rw-r--r--utils/tuning/libtuning/gradient.py2
-rw-r--r--utils/tuning/libtuning/image.py2
-rw-r--r--utils/tuning/libtuning/libtuning.py2
-rw-r--r--utils/tuning/libtuning/macbeth.py2
-rw-r--r--utils/tuning/libtuning/modules/agc/__init__.py6
-rw-r--r--utils/tuning/libtuning/modules/agc/agc.py21
-rw-r--r--utils/tuning/libtuning/modules/agc/rkisp1.py79
-rw-r--r--utils/tuning/libtuning/modules/lsc/raspberrypi.py2
-rw-r--r--utils/tuning/libtuning/modules/lsc/rkisp1.py2
-rw-r--r--utils/tuning/libtuning/modules/module.py2
-rw-r--r--utils/tuning/libtuning/parsers/parser.py2
-rw-r--r--utils/tuning/libtuning/parsers/raspberrypi_parser.py2
-rw-r--r--utils/tuning/libtuning/parsers/yaml_parser.py2
-rw-r--r--utils/tuning/libtuning/smoothing.py2
-rw-r--r--utils/tuning/libtuning/utils.py2
-rw-r--r--utils/tuning/raspberrypi/alsc.py2
-rwxr-xr-xutils/tuning/raspberrypi_alsc_only.py2
-rwxr-xr-xutils/tuning/rkisp1.py6
-rwxr-xr-xutils/update-kernel-headers.sh1
68 files changed, 1969 insertions, 301 deletions
diff --git a/utils/checkstyle.py b/utils/checkstyle.py
index 84f44a42..4185c39a 100755
--- a/utils/checkstyle.py
+++ b/utils/checkstyle.py
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# checkstyle.py - A patch style checker script based on clang-format
+# A patch style checker script based on clang-format
#
# TODO:
#
@@ -556,20 +556,49 @@ class StyleChecker(metaclass=ClassRegistry):
class StyleIssue(object):
- def __init__(self, line_number, line, msg):
+ def __init__(self, line_number, position, line, msg):
self.line_number = line_number
+ self.position = position
self.line = line
self.msg = msg
+class HexValueChecker(StyleChecker):
+ patterns = ('*.c', '*.cpp', '*.h')
+
+ regex = re.compile(r'\b0[xX][0-9a-fA-F]+\b')
+
+ def __init__(self, content):
+ super().__init__()
+ self.__content = content
+
+ def check(self, line_numbers):
+ issues = []
+
+ for line_number in line_numbers:
+ line = self.__content[line_number - 1]
+ match = HexValueChecker.regex.search(line)
+ if not match:
+ continue
+
+ value = match.group(0)
+ if value == value.lower():
+ continue
+
+ issues.append(StyleIssue(line_number, match.span(0), line,
+ f'Use lowercase hex constant {value.lower()}'))
+
+ return issues
+
+
class IncludeChecker(StyleChecker):
patterns = ('*.cpp', '*.h')
- headers = ('assert', 'ctype', 'errno', 'fenv', 'float', 'inttypes',
- 'limits', 'locale', 'setjmp', 'signal', 'stdarg', 'stddef',
- 'stdint', 'stdio', 'stdlib', 'string', 'time', 'uchar', 'wchar',
- 'wctype')
- include_regex = re.compile('^#include <c([a-z]*)>')
+ headers = ('cassert', 'cctype', 'cerrno', 'cfenv', 'cfloat', 'cinttypes',
+ 'climits', 'clocale', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
+ 'cstdint', 'cstdio', 'cstdlib', 'cstring', 'ctime', 'cuchar',
+ 'cwchar', 'cwctype', 'math.h')
+ include_regex = re.compile(r'^#include <([a-z.]*)>')
def __init__(self, content):
super().__init__()
@@ -588,8 +617,15 @@ class IncludeChecker(StyleChecker):
if header not in IncludeChecker.headers:
continue
- issues.append(StyleIssue(line_number, line,
- 'C compatibility header <%s.h> is preferred' % header))
+ if header.endswith('.h'):
+ header_type = 'C++'
+ header = 'c' + header[:-2]
+ else:
+ header_type = 'C compatibility'
+ header = header[1:] + '.h'
+
+ issues.append(StyleIssue(line_number, match.span(1), line,
+ f'{header_type} header <{header}> is preferred'))
return issues
@@ -606,10 +642,12 @@ class LogCategoryChecker(StyleChecker):
issues = []
for line_number in line_numbers:
line = self.__content[line_number-1]
- if not LogCategoryChecker.log_regex.search(line):
+ match = LogCategoryChecker.log_regex.search(line)
+ if not match:
continue
- issues.append(StyleIssue(line_number, line, 'LOG() should use categories'))
+ issues.append(StyleIssue(line_number, match.span(1), line,
+ 'LOG() should use categories'))
return issues
@@ -625,14 +663,16 @@ class MesonChecker(StyleChecker):
issues = []
for line_number in line_numbers:
line = self.__content[line_number-1]
- if line.find('\t') != -1:
- issues.append(StyleIssue(line_number, line, 'meson.build should use spaces for indentation'))
+ pos = line.find('\t')
+ if pos != -1:
+ issues.append(StyleIssue(line_number, [pos, pos], line,
+ 'meson.build should use spaces for indentation'))
return issues
class Pep8Checker(StyleChecker):
patterns = ('*.py',)
- results_regex = re.compile('stdin:([0-9]+):([0-9]+)(.*)')
+ results_regex = re.compile(r'stdin:([0-9]+):([0-9]+)(.*)')
def __init__(self, content):
super().__init__()
@@ -646,7 +686,7 @@ class Pep8Checker(StyleChecker):
ret = subprocess.run(['pycodestyle', '--ignore=E501', '-'],
input=data, stdout=subprocess.PIPE)
except FileNotFoundError:
- issues.append(StyleIssue(0, None, 'Please install pycodestyle to validate python additions'))
+ issues.append(StyleIssue(0, None, None, 'Please install pycodestyle to validate python additions'))
return issues
results = ret.stdout.decode('utf-8').splitlines()
@@ -658,14 +698,14 @@ class Pep8Checker(StyleChecker):
if line_number in line_numbers:
line = self.__content[line_number - 1]
- issues.append(StyleIssue(line_number, line, msg))
+ issues.append(StyleIssue(line_number, None, line, msg))
return issues
class ShellChecker(StyleChecker):
patterns = ('*.sh',)
- results_line_regex = re.compile('In - line ([0-9]+):')
+ results_line_regex = re.compile(r'In - line ([0-9]+):')
def __init__(self, content):
super().__init__()
@@ -679,7 +719,7 @@ class ShellChecker(StyleChecker):
ret = subprocess.run(['shellcheck', '-Cnever', '-'],
input=data, stdout=subprocess.PIPE)
except FileNotFoundError:
- issues.append(StyleIssue(0, None, 'Please install shellcheck to validate shell script additions'))
+ issues.append(StyleIssue(0, None, None, 'Please install shellcheck to validate shell script additions'))
return issues
results = ret.stdout.decode('utf-8').splitlines()
@@ -692,11 +732,8 @@ class ShellChecker(StyleChecker):
line = results[nr + 1]
msg = results[nr + 2]
- # Determined, but not yet used
- position = msg.find('^') + 1
-
if line_number in line_numbers:
- issues.append(StyleIssue(line_number, line, msg))
+ issues.append(StyleIssue(line_number, None, line, msg))
return issues
@@ -753,7 +790,8 @@ class CLangFormatter(Formatter):
class DoxygenFormatter(Formatter):
patterns = ('*.c', '*.cpp')
- return_regex = re.compile(' +\\* +\\\\return +[a-z]')
+ oneliner_regex = re.compile(r'^ +\* +\\(brief|param|return)\b.*\.$')
+ return_regex = re.compile(r' +\* +\\return +[a-z]')
@classmethod
def format(cls, filename, data):
@@ -768,6 +806,7 @@ class DoxygenFormatter(Formatter):
lines.append(line)
continue
+ line = cls.oneliner_regex.sub(lambda m: m.group(0)[:-1], line)
line = cls.return_regex.sub(lambda m: m.group(0)[:-1] + m.group(0)[-1].upper(), line)
if line.find('*/') != -1:
@@ -813,7 +852,7 @@ class DPointerFormatter(Formatter):
class IncludeOrderFormatter(Formatter):
patterns = ('*.cpp', '*.h')
- include_regex = re.compile('^#include (["<])([^">]*)([">])')
+ include_regex = re.compile(r'^#include (["<])([^">]*)([">])')
@classmethod
def format(cls, filename, data):
@@ -936,6 +975,16 @@ def check_file(top_level, commit, filename, checkers):
print('%s+%s%s' % (Colours.fg(Colours.Yellow), issue.line.rstrip(),
Colours.reset()))
+ if issue.position is not None:
+ # Align the position marker by using the original line with
+ # all characters except for tabs replaced with spaces. This
+ # ensures proper alignment regardless of how the code is
+ # indented.
+ start = issue.position[0]
+ prefix = ''.join([c if c == '\t' else ' ' for c in issue.line[:start]])
+ length = issue.position[1] - start - 1
+ print(' ' + prefix + '^' + '~' * length)
+
return len(formatted_diff) + len(issues)
diff --git a/utils/gen-controls.py b/utils/gen-controls.py
index 6cd5e362..56315f50 100755
--- a/utils/gen-controls.py
+++ b/utils/gen-controls.py
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# gen-controls.py - Generate control definitions from YAML
+# Generate control definitions from YAML
import argparse
from functools import reduce
@@ -140,6 +140,12 @@ ${description}
*/''')
enum_values_start = string.Template('''extern const std::array<const ControlValue, ${size}> ${name}Values = {''')
enum_values_values = string.Template('''\tstatic_cast<int32_t>(${name}),''')
+ name_value_map_doc = string.Template('''/**
+ * \\var ${name}NameValueMap
+ * \\brief Map of all $name supported value names (in std::string format) to value
+ */''')
+ name_value_map_start = string.Template('''extern const std::map<std::string, ${type}> ${name}NameValueMap = {''')
+ name_value_values = string.Template('''\t{ "${name}", ${name} },''')
ctrls_doc = {}
ctrls_def = {}
@@ -183,6 +189,7 @@ ${description}
values_info = {
'name': info['name'],
+ 'type': ctrl.type,
'size': num_entries,
}
target_doc.append(enum_values_doc.substitute(values_info))
@@ -194,6 +201,15 @@ ${description}
target_def.append(enum_values_values.substitute(value_info))
target_def.append("};")
+ target_doc.append(name_value_map_doc.substitute(values_info))
+ target_def.append(name_value_map_start.substitute(values_info))
+ for enum in ctrl.enum_values:
+ value_info = {
+ 'name': enum.name
+ }
+ target_def.append(name_value_values.substitute(value_info))
+ target_def.append("};")
+
target_doc.append(doc_template.substitute(info))
target_def.append(def_template.substitute(info))
@@ -231,6 +247,7 @@ def generate_h(controls, mode, ranges):
enum_template_start = string.Template('''enum ${name}Enum {''')
enum_value_template = string.Template('''\t${name} = ${value},''')
enum_values_template = string.Template('''extern const std::array<const ControlValue, ${size}> ${name}Values;''')
+ name_value_map_template = string.Template('''extern const std::map<std::string, ${type}> ${name}NameValueMap;''')
template = string.Template('''extern const Control<${type}> ${name};''')
ctrls = {}
@@ -273,9 +290,11 @@ def generate_h(controls, mode, ranges):
values_info = {
'name': info['name'],
+ 'type': ctrl.type,
'size': num_entries,
}
target_ctrls.append(enum_values_template.substitute(values_info))
+ target_ctrls.append(name_value_map_template.substitute(values_info))
target_ctrls.append(template.substitute(info))
id_value[vendor] += 1
diff --git a/utils/gen-formats.py b/utils/gen-formats.py
index da79a8bb..0c0932a5 100755
--- a/utils/gen-formats.py
+++ b/utils/gen-formats.py
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# gen-formats.py - Generate formats definitions from YAML
+# Generate formats definitions from YAML
import argparse
import re
diff --git a/utils/gen-header.sh b/utils/gen-header.sh
index 8b66c5dd..d4692758 100755
--- a/utils/gen-header.sh
+++ b/utils/gen-header.sh
@@ -9,7 +9,7 @@ cat <<EOF > "$dst_file"
/*
* Copyright (C) 2018-2019, Google Inc.
*
- * libcamera.h - libcamera public API
+ * libcamera public API
*/
#pragma once
diff --git a/utils/gen-ipa-priv-key.sh b/utils/gen-ipa-priv-key.sh
index 919751f2..2ca7b883 100755
--- a/utils/gen-ipa-priv-key.sh
+++ b/utils/gen-ipa-priv-key.sh
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# gen-ipa-priv-key.sh - Generate an RSA private key to sign IPA modules
+# Generate an RSA private key to sign IPA modules
key="$1"
diff --git a/utils/gen-ipa-pub-key.py b/utils/gen-ipa-pub-key.py
index a4a1f7b7..dc3e7d5f 100755
--- a/utils/gen-ipa-pub-key.py
+++ b/utils/gen-ipa-pub-key.py
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# ipa-gen-key.py - Generate the IPA module signing public key
+# Generate the IPA module signing public key
import string
import subprocess
diff --git a/utils/ipc/extract-docs.py b/utils/ipc/extract-docs.py
index c2050c99..61f44cae 100755
--- a/utils/ipc/extract-docs.py
+++ b/utils/ipc/extract-docs.py
@@ -4,7 +4,7 @@
#
# Author: Paul Elder <paul.elder@ideasonboard.com>
#
-# extract-docs.py - Extract doxygen documentation from mojom files
+# Extract doxygen documentation from mojom files
import argparse
import re
@@ -38,7 +38,7 @@ def main(argv):
/*
* Copyright (C) 2021, Google Inc.
*
- * {pipeline}_ipa_interface.cpp - Docs file for generated {pipeline}.mojom
+ * Docs file for generated {pipeline}.mojom
*
* This file is auto-generated. Do not edit.
*/
diff --git a/utils/ipc/generate.py b/utils/ipc/generate.py
index 71bdee3b..c2b3fcb7 100755
--- a/utils/ipc/generate.py
+++ b/utils/ipc/generate.py
@@ -4,7 +4,7 @@
#
# Author: Paul Elder <paul.elder@ideasonboard.com>
#
-# generate.py - Run mojo code generator for generating libcamera IPC files
+# Run mojo code generator for generating libcamera IPC files
import os
import sys
diff --git a/utils/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl b/utils/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl
index c60b99b8..7f2d0810 100644
--- a/utils/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl
+++ b/utils/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl
@@ -7,7 +7,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * core_ipa_interface.h - libcamera core definitions for Image Processing Algorithms
+ * libcamera core definitions for Image Processing Algorithms
*
* This file is auto-generated. Do not edit.
*/
diff --git a/utils/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl b/utils/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl
index 5738a1aa..036518f6 100644
--- a/utils/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl
+++ b/utils/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl
@@ -8,7 +8,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * core_ipa_serializer.h - Data serializer for core libcamera definitions for IPA
+ * Data serializer for core libcamera definitions for IPA
*
* This file is auto-generated. Do not edit.
*/
diff --git a/utils/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl b/utils/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl
index 160601f7..4d88a3d7 100644
--- a/utils/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl
+++ b/utils/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl
@@ -7,7 +7,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * {{module_name}}_ipa_interface.h - Image Processing Algorithm interface for {{module_name}}
+ * Image Processing Algorithm interface for {{module_name}}
*
* This file is auto-generated. Do not edit.
*/
diff --git a/utils/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl b/utils/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl
index c37c4941..ce3cc5ab 100644
--- a/utils/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl
+++ b/utils/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl
@@ -8,7 +8,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * {{module_name}}_ipa_proxy.cpp - Image Processing Algorithm proxy for {{module_name}}
+ * Image Processing Algorithm proxy for {{module_name}}
*
* This file is auto-generated. Do not edit.
*/
@@ -175,9 +175,9 @@ void {{proxy_name}}::recvMessage(const IPCMessage &data)
);
{% elif method|is_async %}
ASSERT(state_ == ProxyRunning);
- proxy_.invokeMethod(&ThreadProxy::{{method.mojom_name}}, ConnectionTypeQueued,
+ proxy_.invokeMethod(&ThreadProxy::{{method.mojom_name}}, ConnectionTypeQueued
{%- for param in method|method_param_names -%}
- {{param}}{{- ", " if not loop.last}}
+ , {{param}}
{%- endfor -%}
);
{%- endif %}
@@ -235,8 +235,8 @@ void {{proxy_name}}::recvMessage(const IPCMessage &data)
}
void {{proxy_name}}::{{method.mojom_name}}IPC(
- std::vector<uint8_t>::const_iterator data,
- size_t dataSize,
+ [[maybe_unused]] std::vector<uint8_t>::const_iterator data,
+ [[maybe_unused]] size_t dataSize,
[[maybe_unused]] const std::vector<SharedFD> &fds)
{
{%- for param in method.parameters %}
diff --git a/utils/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl b/utils/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl
index 6e823598..e213b18a 100644
--- a/utils/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl
+++ b/utils/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl
@@ -8,7 +8,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * {{module_name}}_ipa_proxy.h - Image Processing Algorithm proxy for {{module_name}}
+ * Image Processing Algorithm proxy for {{module_name}}
*
* This file is auto-generated. Do not edit.
*/
diff --git a/utils/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl b/utils/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl
index b65dc4cf..1f990d3f 100644
--- a/utils/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl
+++ b/utils/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl
@@ -8,7 +8,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * {{module_name}}_ipa_proxy_worker.cpp - Image Processing Algorithm proxy worker for {{module_name}}
+ * Image Processing Algorithm proxy worker for {{module_name}}
*
* This file is auto-generated. Do not edit.
*/
diff --git a/utils/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl b/utils/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl
index 8b709705..cd5a65a9 100644
--- a/utils/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl
+++ b/utils/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl
@@ -8,7 +8,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * {{module_name}}_ipa_serializer.h - Image Processing Algorithm data serializer for {{module_name}}
+ * Image Processing Algorithm data serializer for {{module_name}}
*
* This file is auto-generated. Do not edit.
*/
diff --git a/utils/ipc/generators/mojom_libcamera_generator.py b/utils/ipc/generators/mojom_libcamera_generator.py
index 99d905de..b8209e51 100644
--- a/utils/ipc/generators/mojom_libcamera_generator.py
+++ b/utils/ipc/generators/mojom_libcamera_generator.py
@@ -4,7 +4,7 @@
#
# Author: Paul Elder <paul.elder@ideasonboard.com>
#
-# mojom_libcamera_generator.py - Generates libcamera files from a mojom.Module.
+# Generates libcamera files from a mojom.Module.
import argparse
import datetime
diff --git a/utils/ipc/parser.py b/utils/ipc/parser.py
index 231a3266..cb5608b7 100755
--- a/utils/ipc/parser.py
+++ b/utils/ipc/parser.py
@@ -4,7 +4,7 @@
#
# Author: Paul Elder <paul.elder@ideasonboard.com>
#
-# parser.py - Run mojo parser with python3
+# Run mojo parser with python3
import os
import sys
diff --git a/utils/ipu3/ipu3-capture.sh b/utils/ipu3/ipu3-capture.sh
index 9294d025..004a92b0 100755
--- a/utils/ipu3/ipu3-capture.sh
+++ b/utils/ipu3/ipu3-capture.sh
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# ipu3-capture.sh - Capture raw frames from cameras based on the Intel IPU3
+# Capture raw frames from cameras based on the Intel IPU3
#
# The scripts makes use of the following tools, which are expected to be
# executable from the system-wide path or from the local directory:
diff --git a/utils/ipu3/ipu3-pack.c b/utils/ipu3/ipu3-pack.c
index decbfc6c..23d2db8b 100644
--- a/utils/ipu3/ipu3-pack.c
+++ b/utils/ipu3/ipu3-pack.c
@@ -8,6 +8,7 @@
#include <errno.h>
#include <fcntl.h>
+#include <libgen.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
@@ -15,9 +16,8 @@
#include <sys/types.h>
#include <unistd.h>
-static void usage(const char *argv0)
+static void usage(char *argv0)
{
-
printf("Usage: %s input-file output-file\n", basename(argv0));
printf("Convert unpacked RAW10 Bayer data to the IPU3 packed Bayer formats\n");
printf("If the output-file '-', output data will be written to standard output\n");
diff --git a/utils/ipu3/ipu3-process.sh b/utils/ipu3/ipu3-process.sh
index bb4abbe8..25bc849f 100755
--- a/utils/ipu3/ipu3-process.sh
+++ b/utils/ipu3/ipu3-process.sh
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# ipu3-process.sh - Process raw frames with the Intel IPU3
+# Process raw frames with the Intel IPU3
#
# The scripts makes use of the following tools, which are expected to be
# found in $PATH:
diff --git a/utils/ipu3/ipu3-unpack.c b/utils/ipu3/ipu3-unpack.c
index c96fafed..6ee8c45a 100644
--- a/utils/ipu3/ipu3-unpack.c
+++ b/utils/ipu3/ipu3-unpack.c
@@ -8,6 +8,7 @@
#include <errno.h>
#include <fcntl.h>
+#include <libgen.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
@@ -15,7 +16,7 @@
#include <sys/types.h>
#include <unistd.h>
-static void usage(const char *argv0)
+static void usage(char *argv0)
{
printf("Usage: %s input-file output-file\n", basename(argv0));
printf("Unpack the IPU3 raw Bayer format to 16-bit Bayer\n");
diff --git a/utils/raspberrypi/ctt/alsc_only.py b/utils/raspberrypi/ctt/alsc_only.py
index 7cd0ac01..a521c4ad 100755
--- a/utils/raspberrypi/ctt/alsc_only.py
+++ b/utils/raspberrypi/ctt/alsc_only.py
@@ -2,12 +2,14 @@
#
# SPDX-License-Identifier: BSD-2-Clause
#
-# Copyright (C) 2022, Raspberry Pi (Trading) Limited
+# Copyright (C) 2022, Raspberry Pi Ltd
#
-# alsc_only.py - alsc tuning tool
+# alsc tuning tool
-from ctt import *
+import sys
+from ctt import *
+from ctt_tools import parse_input
if __name__ == '__main__':
"""
@@ -15,13 +17,14 @@ if __name__ == '__main__':
"""
if len(sys.argv) == 1:
print("""
- Pisp Camera Tuning Tool version 1.0
+ PiSP Lens Shading Camera Tuning Tool version 1.0
Required Arguments:
'-i' : Calibration image directory.
'-o' : Name of output json file.
Optional Arguments:
+ '-t' : Target platform - 'pisp' or 'vc4'. Default 'vc4'
'-c' : Config file for the CTT. If not passed, default parameters used.
'-l' : Name of output log file. If not passed, 'ctt_log.txt' used.
""")
@@ -30,5 +33,10 @@ if __name__ == '__main__':
"""
parse input arguments
"""
- json_output, directory, config, log_output = parse_input()
- run_ctt(json_output, directory, config, log_output, alsc_only=True)
+ json_output, directory, config, log_output, target = parse_input()
+ if target == 'pisp':
+ from ctt_pisp import json_template, grid_size
+ elif target == 'vc4':
+ from ctt_vc4 import json_template, grid_size
+
+ run_ctt(json_output, directory, config, log_output, json_template, grid_size, target, alsc_only=True)
diff --git a/utils/raspberrypi/ctt/cac_only.py b/utils/raspberrypi/ctt/cac_only.py
new file mode 100644
index 00000000..1c0a8193
--- /dev/null
+++ b/utils/raspberrypi/ctt/cac_only.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2023, Raspberry Pi (Trading) Ltd.
+#
+# cac_only.py - cac tuning tool
+
+
+# This file allows you to tune only the chromatic aberration correction
+# Specify any number of files in the command line args, and it shall iterate through
+# and generate an averaged cac table from all the input images, which you can then
+# input into your tuning file.
+
+# Takes .dng files produced by the camera modules of the dots grid and calculates the chromatic abberation of each dot.
+# Then takes each dot, and works out where it was in the image, and uses that to output a tables of the shifts
+# across the whole image.
+
+from PIL import Image
+import numpy as np
+import rawpy
+import sys
+import getopt
+
+from ctt_cac import *
+
+
+def cac(filelist, output_filepath, plot_results=False):
+ np.set_printoptions(precision=3)
+ np.set_printoptions(suppress=True)
+
+ # Create arrays to hold all the dots data and their colour offsets
+ red_shift = [] # Format is: [[Dot Center X, Dot Center Y, x shift, y shift]]
+ blue_shift = []
+ # Iterate through the files
+ # Multiple files is reccomended to average out the lens aberration through rotations
+ for file in filelist:
+ print("\n Processing file " + str(file))
+ # Read the raw RGB values from the .dng file
+ with rawpy.imread(file) as raw:
+ rgb = raw.postprocess()
+ sizes = (raw.sizes)
+
+ image_size = [sizes[2], sizes[3]] # Image size, X, Y
+ # Create a colour copy of the RGB values to use later in the calibration
+ imout = Image.new(mode="RGB", size=image_size)
+ rgb_image = np.array(imout)
+ # The rgb values need reshaping from a 1d array to a 3d array to be worked with easily
+ rgb.reshape((image_size[0], image_size[1], 3))
+ rgb_image = rgb
+
+ # Pass the RGB image through to the dots locating program
+ # Returns an array of the dots (colour rectangles around the dots), and an array of their locations
+ print("Finding dots")
+ dots, dots_locations = find_dots_locations(rgb_image)
+
+ # Now, analyse each dot. Work out the centroid of each colour channel, and use that to work out
+ # by how far the chromatic aberration has shifted each channel
+ print('Dots found: ' + str(len(dots)))
+
+ for dot, dot_location in zip(dots, dots_locations):
+ if len(dot) > 0:
+ if (dot_location[0] > 0) and (dot_location[1] > 0):
+ ret = analyse_dot(dot, dot_location)
+ red_shift.append(ret[0])
+ blue_shift.append(ret[1])
+
+ # Take our arrays of red shifts and locations, push them through to be interpolated into a 9x9 matrix
+ # for the CAC block to handle and then store these as a .json file to be added to the camera
+ # tuning file
+ print("\nCreating output grid")
+ rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size)
+
+ print("CAC correction complete!")
+
+ # The json format that we then paste into the tuning file (manually)
+ sample = '''
+ {
+ "rpi.cac" :
+ {
+ "strength": 1.0,
+ "lut_rx" : [
+ rx_vals
+ ],
+ "lut_ry" : [
+ ry_vals
+ ],
+ "lut_bx" : [
+ bx_vals
+ ],
+ "lut_by" : [
+ by_vals
+ ]
+ }
+ }
+ '''
+
+ # Below, may look incorrect, however, the PiSP (standard) dimensions are flipped in comparison to
+ # PIL image coordinate directions, hence why xr -> yr. Also, the shifts calculated are colour shifts,
+ # and the PiSP block asks for the values it should shift (hence the * -1, to convert from colour shift to a pixel shift)
+ sample = sample.replace("rx_vals", pprint_array(ry * -1))
+ sample = sample.replace("ry_vals", pprint_array(rx * -1))
+ sample = sample.replace("bx_vals", pprint_array(by * -1))
+ sample = sample.replace("by_vals", pprint_array(bx * -1))
+ print("Successfully converted to JSON")
+ f = open(str(output_filepath), "w+")
+ f.write(sample)
+ f.close()
+ print("Successfully written to json file")
+ '''
+ If you wish to see a plot of the colour channel shifts, add the -p or --plots option
+ Can be a quick way of validating if the data/dots you've got are good, or if you need to
+ change some parameters/take some better images
+ '''
+ if plot_results:
+ plot_shifts(red_shift, blue_shift)
+
+
+if __name__ == "__main__":
+ argv = sys.argv
+ # Detect the input and output file paths
+ arg_output = "output.json"
+ arg_help = "{0} -i <input> -o <output> -p <plot results>".format(argv[0])
+ opts, args = getopt.getopt(argv[1:], "hi:o:p", ["help", "input=", "output=", "plot"])
+
+ output_location = 0
+ input_location = 0
+ filelist = []
+ plot_results = False
+ for i in range(len(argv)):
+ if ("-h") in argv[i]:
+ print(arg_help) # print the help message
+ sys.exit(2)
+ if "-o" in argv[i]:
+ output_location = i
+ if ".dng" in argv[i]:
+ filelist.append(argv[i])
+ if "-p" in argv[i]:
+ plot_results = True
+
+ arg_output = argv[output_location + 1]
+ cac(filelist, arg_output, plot_results)
diff --git a/utils/raspberrypi/ctt/colors.py b/utils/raspberrypi/ctt/colors.py
index 1ab986d6..cb4d236b 100644
--- a/utils/raspberrypi/ctt/colors.py
+++ b/utils/raspberrypi/ctt/colors.py
@@ -1,4 +1,4 @@
-# colors.py - Program to convert from RGB to LAB color space
+# Program to convert from RGB to LAB color space
def RGB_to_LAB(RGB): # where RGB is a 1x3 array. e.g RGB = [100, 255, 230]
num = 0
XYZ = [0, 0, 0]
diff --git a/utils/raspberrypi/ctt/convert_tuning.py b/utils/raspberrypi/ctt/convert_tuning.py
index f4504d45..83cf69d4 100755
--- a/utils/raspberrypi/ctt/convert_tuning.py
+++ b/utils/raspberrypi/ctt/convert_tuning.py
@@ -8,30 +8,104 @@
import argparse
import json
+import numpy as np
import sys
from ctt_pretty_print_json import pretty_print
+from ctt_pisp import grid_size as grid_size_pisp
+from ctt_pisp import json_template as json_template_pisp
+from ctt_vc4 import grid_size as grid_size_vc4
+from ctt_vc4 import json_template as json_template_vc4
-def convert_v2(in_json: dict) -> str:
+def interp_2d(in_ls, src_w, src_h, dst_w, dst_h):
- if 'version' in in_json.keys() and in_json['version'] != 1.0:
- print(f'The JSON config reports version {in_json["version"]} that is incompatible with this tool.')
- sys.exit(-1)
+ out_ls = np.zeros((dst_h, dst_w))
+ for i in range(src_h):
+ out_ls[i] = np.interp(np.linspace(0, dst_w - 1, dst_w),
+ np.linspace(0, dst_w - 1, src_w),
+ in_ls[i])
+ for i in range(dst_w):
+ out_ls[:,i] = np.interp(np.linspace(0, dst_h - 1, dst_h),
+ np.linspace(0, dst_h - 1, src_h),
+ out_ls[:src_h, i])
+ return out_ls
- converted = {
- 'version': 2.0,
- 'target': 'bcm2835',
- 'algorithms': [{algo: config} for algo, config in in_json.items()]
- }
- return pretty_print(converted)
+def convert_target(in_json: dict, target: str):
+
+ src_w, src_h = grid_size_pisp if target == 'vc4' else grid_size_vc4
+ dst_w, dst_h = grid_size_vc4 if target == 'vc4' else grid_size_pisp
+ json_template = json_template_vc4 if target == 'vc4' else json_template_pisp
+
+ # ALSC grid sizes
+ alsc = next(algo for algo in in_json['algorithms'] if 'rpi.alsc' in algo)['rpi.alsc']
+ for colour in ['calibrations_Cr', 'calibrations_Cb']:
+ if colour not in alsc:
+ continue
+ for temperature in alsc[colour]:
+ in_ls = np.reshape(temperature['table'], (src_h, src_w))
+ out_ls = interp_2d(in_ls, src_w, src_h, dst_w, dst_h)
+ temperature['table'] = np.round(out_ls.flatten(), 3).tolist()
+
+ if 'luminance_lut' in alsc:
+ in_ls = np.reshape(alsc['luminance_lut'], (src_h, src_w))
+ out_ls = interp_2d(in_ls, src_w, src_h, dst_w, dst_h)
+ alsc['luminance_lut'] = np.round(out_ls.flatten(), 3).tolist()
+
+ # Denoise blocks
+ for i, algo in enumerate(in_json['algorithms']):
+ if list(algo.keys())[0] == 'rpi.sdn':
+ in_json['algorithms'][i] = {'rpi.denoise': json_template['rpi.sdn'] if target == 'vc4' else json_template['rpi.denoise']}
+ break
+
+ # AGC mode weights
+ agc = next(algo for algo in in_json['algorithms'] if 'rpi.agc' in algo)['rpi.agc']
+ if 'channels' in agc:
+ for i, channel in enumerate(agc['channels']):
+ target_agc_metering = json_template['rpi.agc']['channels'][i]['metering_modes']
+ for mode, v in channel['metering_modes'].items():
+ v['weights'] = target_agc_metering[mode]['weights']
+ else:
+ for mode, v in agc["metering_modes"].items():
+ target_agc_metering = json_template['rpi.agc']['channels'][0]['metering_modes']
+ v['weights'] = target_agc_metering[mode]['weights']
+
+ # HDR
+ if target == 'pisp':
+ for i, algo in enumerate(in_json['algorithms']):
+ if list(algo.keys())[0] == 'rpi.hdr':
+ in_json['algorithms'][i] = {'rpi.hdr': json_template['rpi.hdr']}
+
+ return in_json
+
+
+def convert_v2(in_json: dict, target: str) -> str:
+
+ if 'version' in in_json.keys() and in_json['version'] == 1.0:
+ converted = {
+ 'version': 2.0,
+ 'target': target,
+ 'algorithms': [{algo: config} for algo, config in in_json.items()]
+ }
+ else:
+ converted = in_json
+
+ # Convert between vc4 <-> pisp targets. This is a best effort thing.
+ if converted['target'] != target:
+ converted = convert_target(converted, target)
+ converted['target'] = target
+
+ grid_size = grid_size_vc4[0] if target == 'vc4' else grid_size_pisp[0]
+ return pretty_print(converted, custom_elems={'table': grid_size, 'luminance_lut': grid_size})
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=
- 'Convert the format of the Raspberry Pi camera tuning file from v1.0 to v2.0.\n')
+ 'Convert the format of the Raspberry Pi camera tuning file from v1.0 to v2.0 and/or the vc4 <-> pisp targets.\n')
parser.add_argument('input', type=str, help='Input tuning file.')
+ parser.add_argument('-t', '--target', type=str, help='Target platform.',
+ choices=['pisp', 'vc4'], default='vc4')
parser.add_argument('output', type=str, nargs='?',
help='Output converted tuning file. If not provided, the input file will be updated in-place.',
default=None)
@@ -40,7 +114,7 @@ if __name__ == "__main__":
with open(args.input, 'r') as f:
in_json = json.load(f)
- out_json = convert_v2(in_json)
+ out_json = convert_v2(in_json, args.target)
with open(args.output if args.output is not None else args.input, 'w') as f:
f.write(out_json)
diff --git a/utils/raspberrypi/ctt/ctt.py b/utils/raspberrypi/ctt/ctt.py
index cd89f177..96f1b5e6 100755
--- a/utils/raspberrypi/ctt/ctt.py
+++ b/utils/raspberrypi/ctt/ctt.py
@@ -4,11 +4,12 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt.py - camera tuning tool
+# camera tuning tool
import os
import sys
from ctt_image_load import *
+from ctt_cac import *
from ctt_ccm import *
from ctt_awb import *
from ctt_alsc import *
@@ -22,9 +23,10 @@ import re
"""
This file houses the camera object, which is used to perform the calibrations.
-The camera object houses all the calibration images as attributes in two lists:
+The camera object houses all the calibration images as attributes in three lists:
- imgs (macbeth charts)
- imgs_alsc (alsc correction images)
+ - imgs_cac (cac correction images)
Various calibrations are methods of the camera object, and the output is stored
in a dictionary called self.json.
Once all the caibration has been completed, the Camera.json is written into a
@@ -67,139 +69,26 @@ Camera object that is the backbone of the tuning tool.
Input is the desired path of the output json.
"""
class Camera:
- def __init__(self, jfile):
+ def __init__(self, jfile, json):
self.path = os.path.dirname(os.path.expanduser(__file__)) + '/'
if self.path == '/':
self.path = ''
self.imgs = []
self.imgs_alsc = []
+ self.imgs_cac = []
self.log = 'Log created : ' + time.asctime(time.localtime(time.time()))
self.log_separator = '\n'+'-'*70+'\n'
self.jf = jfile
"""
initial json dict populated by uncalibrated values
"""
- self.json = {
- "rpi.black_level": {
- "black_level": 4096
- },
- "rpi.dpc": {
- },
- "rpi.lux": {
- "reference_shutter_speed": 10000,
- "reference_gain": 1,
- "reference_aperture": 1.0
- },
- "rpi.noise": {
- },
- "rpi.geq": {
- },
- "rpi.sdn": {
- },
- "rpi.awb": {
- "priors": [
- {"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]},
- {"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]},
- {"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
- ],
- "modes": {
- "auto": {"lo": 2500, "hi": 8000},
- "incandescent": {"lo": 2500, "hi": 3000},
- "tungsten": {"lo": 3000, "hi": 3500},
- "fluorescent": {"lo": 4000, "hi": 4700},
- "indoor": {"lo": 3000, "hi": 5000},
- "daylight": {"lo": 5500, "hi": 6500},
- "cloudy": {"lo": 7000, "hi": 8600}
- },
- "bayes": 1
- },
- "rpi.agc": {
- "metering_modes": {
- "centre-weighted": {
- "weights": [3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0]
- },
- "spot": {
- "weights": [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
- },
- "matrix": {
- "weights": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
- }
- },
- "exposure_modes": {
- "normal": {
- "shutter": [100, 10000, 30000, 60000, 120000],
- "gain": [1.0, 2.0, 4.0, 6.0, 6.0]
- },
- "short": {
- "shutter": [100, 5000, 10000, 20000, 120000],
- "gain": [1.0, 2.0, 4.0, 6.0, 6.0]
- }
- },
- "constraint_modes": {
- "normal": [
- {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]}
- ],
- "highlight": [
- {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]},
- {"bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.8, 1000, 0.8]}
- ]
- },
- "y_target": [0, 0.16, 1000, 0.165, 10000, 0.17]
- },
- "rpi.alsc": {
- 'omega': 1.3,
- 'n_iter': 100,
- 'luminance_strength': 0.7,
- },
- "rpi.contrast": {
- "ce_enable": 1,
- "gamma_curve": [
- 0, 0,
- 1024, 5040,
- 2048, 9338,
- 3072, 12356,
- 4096, 15312,
- 5120, 18051,
- 6144, 20790,
- 7168, 23193,
- 8192, 25744,
- 9216, 27942,
- 10240, 30035,
- 11264, 32005,
- 12288, 33975,
- 13312, 35815,
- 14336, 37600,
- 15360, 39168,
- 16384, 40642,
- 18432, 43379,
- 20480, 45749,
- 22528, 47753,
- 24576, 49621,
- 26624, 51253,
- 28672, 52698,
- 30720, 53796,
- 32768, 54876,
- 36864, 57012,
- 40960, 58656,
- 45056, 59954,
- 49152, 61183,
- 53248, 62355,
- 57344, 63419,
- 61440, 64476,
- 65535, 65535
- ]
- },
- "rpi.ccm": {
- },
- "rpi.sharpen": {
- }
- }
+ self.json = json
"""
Perform colour correction calibrations by comparing macbeth patch colours
to standard macbeth chart colours.
"""
- def ccm_cal(self, do_alsc_colour):
+ def ccm_cal(self, do_alsc_colour, grid_size):
if 'rpi.ccm' in self.disable:
return 1
print('\nStarting CCM calibration')
@@ -245,7 +134,7 @@ class Camera:
Do CCM calibration
"""
try:
- ccms = ccm(self, cal_cr_list, cal_cb_list)
+ ccms = ccm(self, cal_cr_list, cal_cb_list, grid_size)
except ArithmeticError:
print('ERROR: Matrix is singular!\nTake new pictures and try again...')
self.log += '\nERROR: Singular matrix encountered during fit!'
@@ -259,11 +148,67 @@ class Camera:
print('Finished CCM calibration')
"""
+ Perform chromatic abberation correction using multiple dots images.
+ """
+ def cac_cal(self, do_alsc_colour):
+ if 'rpi.cac' in self.disable:
+ return 1
+ print('\nStarting CAC calibration')
+ self.log_new_sec('CAC')
+ """
+ check if cac images have been taken
+ """
+ if len(self.imgs_cac) == 0:
+ print('\nError:\nNo cac calibration images found')
+ self.log += '\nERROR: No CAC calibration images found!'
+ self.log += '\nCAC calibration aborted!'
+ return 1
+ """
+ if image is greyscale then CAC makes no sense
+ """
+ if self.grey:
+ print('\nERROR: Can\'t do CAC on greyscale image!')
+ self.log += '\nERROR: Cannot perform CAC calibration '
+ self.log += 'on greyscale image!\nCAC aborted!'
+ del self.json['rpi.cac']
+ return 0
+ a = time.time()
+ """
+ Check if camera is greyscale or color. If not greyscale, then perform cac
+ """
+ if do_alsc_colour:
+ """
+ Here we have a color sensor. Perform cac
+ """
+ try:
+ cacs = cac(self)
+ except ArithmeticError:
+ print('ERROR: Matrix is singular!\nTake new pictures and try again...')
+ self.log += '\nERROR: Singular matrix encountered during fit!'
+ self.log += '\nCAC aborted!'
+ return 1
+ else:
+ """
+ case where config options suggest greyscale camera. No point in doing CAC
+ """
+ cal_cr_list, cal_cb_list = None, None
+ self.log += '\nWARNING: No ALSC tables found.\nCAC calibration '
+ self.log += 'performed without ALSC correction...'
+
+ """
+ Write output to json
+ """
+ self.json['rpi.cac']['cac'] = cacs
+ self.log += '\nCAC calibration written to json file'
+ print('Finished CAC calibration')
+
+
+ """
Auto white balance calibration produces a colour curve for
various colour temperatures, as well as providing a maximum 'wiggle room'
distance from this curve (transverse_neg/pos).
"""
- def awb_cal(self, greyworld, do_alsc_colour):
+ def awb_cal(self, greyworld, do_alsc_colour, grid_size):
if 'rpi.awb' in self.disable:
return 1
print('\nStarting AWB calibration')
@@ -306,7 +251,7 @@ class Camera:
call calibration function
"""
plot = "rpi.awb" in self.plot
- awb_out = awb(self, cal_cr_list, cal_cb_list, plot)
+ awb_out = awb(self, cal_cr_list, cal_cb_list, plot, grid_size)
ct_curve, transverse_neg, transverse_pos = awb_out
"""
write output to json
@@ -324,7 +269,7 @@ class Camera:
colour channel seperately, and then partially corrects for vignetting.
The extent of the correction depends on the 'luminance_strength' parameter.
"""
- def alsc_cal(self, luminance_strength, do_alsc_colour):
+ def alsc_cal(self, luminance_strength, do_alsc_colour, grid_size, max_gain=8.0):
if 'rpi.alsc' in self.disable:
return 1
print('\nStarting ALSC calibration')
@@ -347,10 +292,10 @@ class Camera:
call calibration function
"""
plot = "rpi.alsc" in self.plot
- alsc_out = alsc_all(self, do_alsc_colour, plot)
+ alsc_out = alsc_all(self, do_alsc_colour, plot, grid_size, max_gain=max_gain)
cal_cr_list, cal_cb_list, luminance_lut, av_corn = alsc_out
"""
- write ouput to json and finish if not do_alsc_colour
+ write output to json and finish if not do_alsc_colour
"""
if not do_alsc_colour:
self.json['rpi.alsc']['luminance_lut'] = luminance_lut
@@ -393,7 +338,7 @@ class Camera:
"""
obtain worst-case scenario residual sigmas
"""
- sigma_r, sigma_b = get_sigma(self, cal_cr_list, cal_cb_list)
+ sigma_r, sigma_b = get_sigma(self, cal_cr_list, cal_cb_list, grid_size)
"""
write output to json
"""
@@ -509,19 +454,20 @@ class Camera:
"""
writes the json dictionary to the raw json file then make pretty
"""
- def write_json(self):
+ def write_json(self, version=2.0, target='bcm2835', grid_size=(16, 12)):
"""
Write json dictionary to file using our version 2 format
"""
out_json = {
- "version": 2.0,
- 'target': 'bcm2835',
+ "version": version,
+ 'target': target if target != 'vc4' else 'bcm2835',
"algorithms": [{name: data} for name, data in self.json.items()],
}
with open(self.jf, 'w') as f:
- f.write(pretty_print(out_json))
+ f.write(pretty_print(out_json,
+ custom_elems={'table': grid_size[0], 'luminance_lut': grid_size[0]}))
"""
add a new section to the log file
@@ -627,6 +573,16 @@ class Camera:
self.log += '\nWARNING: Error reading colour temperature'
self.log += '\nImage discarded!'
print('DISCARDED')
+ elif 'cac' in filename:
+ Img = load_image(self, address, mac=False)
+ self.log += '\nIdentified as an CAC image'
+ Img.name = filename
+ self.log += '\nColour temperature: {} K'.format(col)
+ self.imgs_cac.append(Img)
+ if blacklevel != -1:
+ Img.blacklevel_16 = blacklevel
+ print(img_suc_msg)
+ continue
else:
self.log += '\nIdentified as macbeth chart image'
"""
@@ -672,6 +628,7 @@ class Camera:
self.log += '\n\nImages found:'
self.log += '\nMacbeth : {}'.format(len(self.imgs))
self.log += '\nALSC : {} '.format(len(self.imgs_alsc))
+ self.log += '\nCAC: {} '.format(len(self.imgs_cac))
self.log += '\n\nCamera metadata'
"""
check usable images found
@@ -680,22 +637,21 @@ class Camera:
print('\nERROR: No usable macbeth chart images found')
self.log += '\nERROR: No usable macbeth chart images found'
return 0
- elif len(self.imgs) == 0 and len(self.imgs_alsc) == 0:
+ elif len(self.imgs) == 0 and len(self.imgs_alsc) == 0 and len(self.imgs_cac) == 0:
print('\nERROR: No usable images found')
self.log += '\nERROR: No usable images found'
return 0
"""
Double check that every image has come from the same camera...
"""
- all_imgs = self.imgs + self.imgs_alsc
+ all_imgs = self.imgs + self.imgs_alsc + self.imgs_cac
camNames = list(set([Img.camName for Img in all_imgs]))
patterns = list(set([Img.pattern for Img in all_imgs]))
sigbitss = list(set([Img.sigbits for Img in all_imgs]))
blacklevels = list(set([Img.blacklevel_16 for Img in all_imgs]))
sizes = list(set([(Img.w, Img.h) for Img in all_imgs]))
- if len(camNames) == 1 and len(patterns) == 1 and len(sigbitss) == 1 and \
- len(blacklevels) == 1 and len(sizes) == 1:
+ if 1:
self.grey = (patterns[0] == 128)
self.blacklevel_16 = blacklevels[0]
self.log += '\nName: {}'.format(camNames[0])
@@ -712,7 +668,7 @@ class Camera:
return 0
-def run_ctt(json_output, directory, config, log_output, alsc_only=False):
+def run_ctt(json_output, directory, config, log_output, json_template, grid_size, target, alsc_only=False):
"""
check input files are jsons
"""
@@ -748,12 +704,14 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False):
greyworld = get_config(awb_d, "greyworld", 0, 'bool')
alsc_d = get_config(configs, "alsc", {}, 'dict')
do_alsc_colour = get_config(alsc_d, "do_alsc_colour", 1, 'bool')
- luminance_strength = get_config(alsc_d, "luminance_strength", 0.5, 'num')
+ luminance_strength = get_config(alsc_d, "luminance_strength", 0.8, 'num')
+ lsc_max_gain = get_config(alsc_d, "max_gain", 8.0, 'num')
blacklevel = get_config(configs, "blacklevel", -1, 'num')
macbeth_d = get_config(configs, "macbeth", {}, 'dict')
mac_small = get_config(macbeth_d, "small", 0, 'bool')
mac_show = get_config(macbeth_d, "show", 0, 'bool')
mac_config = (mac_small, mac_show)
+ print("Read lsc_max_gain", lsc_max_gain)
if blacklevel < -1 or blacklevel >= 2**16:
print('\nInvalid blacklevel, defaulted to 64')
@@ -772,7 +730,7 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False):
initialise tuning tool and load images
"""
try:
- Cam = Camera(json_output)
+ Cam = Camera(json_output, json=json_template)
Cam.log_user_input(json_output, directory, config, log_output)
if alsc_only:
disable = set(Cam.json.keys()).symmetric_difference({"rpi.alsc"})
@@ -794,14 +752,17 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False):
Cam.json['rpi.black_level']['black_level'] = Cam.blacklevel_16
Cam.json_remove(disable)
print('\nSTARTING CALIBRATIONS')
- Cam.alsc_cal(luminance_strength, do_alsc_colour)
+ Cam.alsc_cal(luminance_strength, do_alsc_colour, grid_size, max_gain=lsc_max_gain)
Cam.geq_cal()
Cam.lux_cal()
Cam.noise_cal()
- Cam.awb_cal(greyworld, do_alsc_colour)
- Cam.ccm_cal(do_alsc_colour)
+ if "rpi.cac" in json_template:
+ Cam.cac_cal(do_alsc_colour)
+ Cam.awb_cal(greyworld, do_alsc_colour, grid_size)
+ Cam.ccm_cal(do_alsc_colour, grid_size)
+
print('\nFINISHED CALIBRATIONS')
- Cam.write_json()
+ Cam.write_json(target=target, grid_size=grid_size)
Cam.write_log(log_output)
print('\nCalibrations written to: '+json_output)
if log_output is None:
@@ -811,20 +772,19 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False):
else:
Cam.write_log(log_output)
-
if __name__ == '__main__':
"""
initialise calibration
"""
if len(sys.argv) == 1:
print("""
- Pisp Camera Tuning Tool version 1.0
-
+ PiSP Tuning Tool version 1.0
Required Arguments:
'-i' : Calibration image directory.
'-o' : Name of output json file.
Optional Arguments:
+ '-t' : Target platform - 'pisp' or 'vc4'. Default 'vc4'
'-c' : Config file for the CTT. If not passed, default parameters used.
'-l' : Name of output log file. If not passed, 'ctt_log.txt' used.
""")
@@ -833,5 +793,10 @@ if __name__ == '__main__':
"""
parse input arguments
"""
- json_output, directory, config, log_output = parse_input()
- run_ctt(json_output, directory, config, log_output)
+ json_output, directory, config, log_output, target = parse_input()
+ if target == 'pisp':
+ from ctt_pisp import json_template, grid_size
+ elif target == 'vc4':
+ from ctt_vc4 import json_template, grid_size
+
+ run_ctt(json_output, directory, config, log_output, json_template, grid_size, target)
diff --git a/utils/raspberrypi/ctt/ctt_alsc.py b/utils/raspberrypi/ctt/ctt_alsc.py
index e51d6931..1d94dfa5 100644
--- a/utils/raspberrypi/ctt/ctt_alsc.py
+++ b/utils/raspberrypi/ctt/ctt_alsc.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_alsc.py - camera tuning tool for ALSC (auto lens shading correction)
+# camera tuning tool for ALSC (auto lens shading correction)
from ctt_image_load import *
import matplotlib.pyplot as plt
@@ -13,8 +13,9 @@ from mpl_toolkits.mplot3d import Axes3D
"""
preform alsc calibration on a set of images
"""
-def alsc_all(Cam, do_alsc_colour, plot):
+def alsc_all(Cam, do_alsc_colour, plot, grid_size=(16, 12), max_gain=8.0):
imgs_alsc = Cam.imgs_alsc
+ grid_w, grid_h = grid_size
"""
create list of colour temperatures and associated calibration tables
"""
@@ -23,7 +24,7 @@ def alsc_all(Cam, do_alsc_colour, plot):
list_cb = []
list_cg = []
for Img in imgs_alsc:
- col, cr, cb, cg, size = alsc(Cam, Img, do_alsc_colour, plot)
+ col, cr, cb, cg, size = alsc(Cam, Img, do_alsc_colour, plot, grid_size=grid_size, max_gain=max_gain)
list_col.append(col)
list_cr.append(cr)
list_cb.append(cb)
@@ -68,11 +69,12 @@ def alsc_all(Cam, do_alsc_colour, plot):
t_b = np.where((100*t_b) % 1 >= 0.95, t_b-0.001, t_b)
t_r = np.round(t_r, 3)
t_b = np.round(t_b, 3)
- r_corners = (t_r[0], t_r[15], t_r[-1], t_r[-16])
- b_corners = (t_b[0], t_b[15], t_b[-1], t_b[-16])
- r_cen = t_r[5*16+7]+t_r[5*16+8]+t_r[6*16+7]+t_r[6*16+8]
+ r_corners = (t_r[0], t_r[grid_w - 1], t_r[-1], t_r[-grid_w])
+ b_corners = (t_b[0], t_b[grid_w - 1], t_b[-1], t_b[-grid_w])
+ middle_pos = (grid_h // 2 - 1) * grid_w + grid_w - 1
+ r_cen = t_r[middle_pos]+t_r[middle_pos + 1]+t_r[middle_pos + grid_w]+t_r[middle_pos + grid_w + 1]
r_cen = round(r_cen/4, 3)
- b_cen = t_b[5*16+7]+t_b[5*16+8]+t_b[6*16+7]+t_b[6*16+8]
+ b_cen = t_b[middle_pos]+t_b[middle_pos + 1]+t_b[middle_pos + grid_w]+t_b[middle_pos + grid_w + 1]
b_cen = round(b_cen/4, 3)
Cam.log += '\nRed table corners: {}'.format(r_corners)
Cam.log += '\nRed table centre: {}'.format(r_cen)
@@ -116,8 +118,9 @@ def alsc_all(Cam, do_alsc_colour, plot):
"""
calculate g/r and g/b for 32x32 points arranged in a grid for a single image
"""
-def alsc(Cam, Img, do_alsc_colour, plot=False):
+def alsc(Cam, Img, do_alsc_colour, plot=False, grid_size=(16, 12), max_gain=8.0):
Cam.log += '\nProcessing image: ' + Img.name
+ grid_w, grid_h = grid_size
"""
get channel in correct order
"""
@@ -128,31 +131,34 @@ def alsc(Cam, Img, do_alsc_colour, plot=False):
where w is a multiple of 32.
"""
w, h = Img.w/2, Img.h/2
- dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
+ dx, dy = int(-(-(w-1)//grid_w)), int(-(-(h-1)//grid_h))
"""
average the green channels into one
"""
av_ch_g = np.mean((channels[1:3]), axis=0)
if do_alsc_colour:
"""
- obtain 16x12 grid of intensities for each channel and subtract black level
+ obtain grid_w x grid_h grid of intensities for each channel and subtract black level
"""
- g = get_16x12_grid(av_ch_g, dx, dy) - Img.blacklevel_16
- r = get_16x12_grid(channels[0], dx, dy) - Img.blacklevel_16
- b = get_16x12_grid(channels[3], dx, dy) - Img.blacklevel_16
+ g = get_grid(av_ch_g, dx, dy, grid_size) - Img.blacklevel_16
+ r = get_grid(channels[0], dx, dy, grid_size) - Img.blacklevel_16
+ b = get_grid(channels[3], dx, dy, grid_size) - Img.blacklevel_16
"""
calculate ratios as 32 bit in order to be supported by medianBlur function
"""
- cr = np.reshape(g/r, (12, 16)).astype('float32')
- cb = np.reshape(g/b, (12, 16)).astype('float32')
- cg = np.reshape(1/g, (12, 16)).astype('float32')
+ cr = np.reshape(g/r, (grid_h, grid_w)).astype('float32')
+ cb = np.reshape(g/b, (grid_h, grid_w)).astype('float32')
+ cg = np.reshape(1/g, (grid_h, grid_w)).astype('float32')
"""
median blur to remove peaks and save as float 64
"""
cr = cv2.medianBlur(cr, 3).astype('float64')
+ cr = cr/np.min(cr) # gain tables are easier for humans to read if the minimum is 1.0
cb = cv2.medianBlur(cb, 3).astype('float64')
+ cb = cb/np.min(cb)
cg = cv2.medianBlur(cg, 3).astype('float64')
cg = cg/np.min(cg)
+ cg = [min(v, max_gain) for v in cg.flatten()] # never exceed the max luminance gain
"""
debugging code showing 2D surface plot of vignetting. Quite useful for
@@ -164,7 +170,7 @@ def alsc(Cam, Img, do_alsc_colour, plot=False):
"""
note Y is plotted as -Y so plot has same axes as image
"""
- X, Y = np.meshgrid(range(16), range(12))
+ X, Y = np.meshgrid(range(grid_w), range(grid_h))
ha.plot_surface(X, -Y, cr, cmap=cm.coolwarm, linewidth=0)
ha.set_title('ALSC Plot\nImg: {}\n\ncr'.format(Img.str))
hb = hf.add_subplot(312, projection='3d')
@@ -176,21 +182,22 @@ def alsc(Cam, Img, do_alsc_colour, plot=False):
# print(Img.str)
plt.show()
- return Img.col, cr.flatten(), cb.flatten(), cg.flatten(), (w, h, dx, dy)
+ return Img.col, cr.flatten(), cb.flatten(), cg, (w, h, dx, dy)
else:
"""
only perform calculations for luminance shading
"""
- g = get_16x12_grid(av_ch_g, dx, dy) - Img.blacklevel_16
- cg = np.reshape(1/g, (12, 16)).astype('float32')
+ g = get_grid(av_ch_g, dx, dy, grid_size) - Img.blacklevel_16
+ cg = np.reshape(1/g, (grid_h, grid_w)).astype('float32')
cg = cv2.medianBlur(cg, 3).astype('float64')
cg = cg/np.min(cg)
+ cg = [min(v, max_gain) for v in cg.flatten()] # never exceed the max luminance gain
if plot:
hf = plt.figure(figssize=(8, 8))
ha = hf.add_subplot(1, 1, 1, projection='3d')
- X, Y = np.meashgrid(range(16), range(12))
+ X, Y = np.meashgrid(range(grid_w), range(grid_h))
ha.plot_surface(X, -Y, cg, cmap=cm.coolwarm, linewidth=0)
ha.set_title('ALSC Plot (Luminance only!)\nImg: {}\n\ncg').format(Img.str)
plt.show()
@@ -199,21 +206,22 @@ def alsc(Cam, Img, do_alsc_colour, plot=False):
"""
-Compresses channel down to a 16x12 grid
+Compresses channel down to a grid of the requested size
"""
-def get_16x12_grid(chan, dx, dy):
+def get_grid(chan, dx, dy, grid_size):
+ grid_w, grid_h = grid_size
grid = []
"""
since left and bottom border will not necessarily have rectangles of
dimension dx x dy, the 32nd iteration has to be handled separately.
"""
- for i in range(11):
- for j in range(15):
+ for i in range(grid_h - 1):
+ for j in range(grid_w - 1):
grid.append(np.mean(chan[dy*i:dy*(1+i), dx*j:dx*(1+j)]))
- grid.append(np.mean(chan[dy*i:dy*(1+i), 15*dx:]))
- for j in range(15):
- grid.append(np.mean(chan[11*dy:, dx*j:dx*(1+j)]))
- grid.append(np.mean(chan[11*dy:, 15*dx:]))
+ grid.append(np.mean(chan[dy*i:dy*(1+i), (grid_w - 1)*dx:]))
+ for j in range(grid_w - 1):
+ grid.append(np.mean(chan[(grid_h - 1)*dy:, dx*j:dx*(1+j)]))
+ grid.append(np.mean(chan[(grid_h - 1)*dy:, (grid_w - 1)*dx:]))
"""
return as np.array, ready for further manipulation
"""
@@ -223,7 +231,7 @@ def get_16x12_grid(chan, dx, dy):
"""
obtains sigmas for red and blue, effectively a measure of the 'error'
"""
-def get_sigma(Cam, cal_cr_list, cal_cb_list):
+def get_sigma(Cam, cal_cr_list, cal_cb_list, grid_size):
Cam.log += '\nCalculating sigmas'
"""
provided colour alsc tables were generated for two different colour
@@ -241,8 +249,8 @@ def get_sigma(Cam, cal_cr_list, cal_cb_list):
sigma_rs = []
sigma_bs = []
for i in range(len(cts)-1):
- sigma_rs.append(calc_sigma(cal_cr_list[i]['table'], cal_cr_list[i+1]['table']))
- sigma_bs.append(calc_sigma(cal_cb_list[i]['table'], cal_cb_list[i+1]['table']))
+ sigma_rs.append(calc_sigma(cal_cr_list[i]['table'], cal_cr_list[i+1]['table'], grid_size))
+ sigma_bs.append(calc_sigma(cal_cb_list[i]['table'], cal_cb_list[i+1]['table'], grid_size))
Cam.log += '\nColour temperature interval {} - {} K'.format(cts[i], cts[i+1])
Cam.log += '\nSigma red: {}'.format(sigma_rs[-1])
Cam.log += '\nSigma blue: {}'.format(sigma_bs[-1])
@@ -263,12 +271,13 @@ def get_sigma(Cam, cal_cr_list, cal_cb_list):
"""
calculate sigma from two adjacent gain tables
"""
-def calc_sigma(g1, g2):
+def calc_sigma(g1, g2, grid_size):
+ grid_w, grid_h = grid_size
"""
reshape into 16x12 matrix
"""
- g1 = np.reshape(g1, (12, 16))
- g2 = np.reshape(g2, (12, 16))
+ g1 = np.reshape(g1, (grid_h, grid_w))
+ g2 = np.reshape(g2, (grid_h, grid_w))
"""
apply gains to gain table
"""
@@ -280,8 +289,8 @@ def calc_sigma(g1, g2):
neighbours, then append to list
"""
diffs = []
- for i in range(10):
- for j in range(14):
+ for i in range(grid_h - 2):
+ for j in range(grid_w - 2):
"""
note indexing is incremented by 1 since all patches on borders are
not counted
diff --git a/utils/raspberrypi/ctt/ctt_awb.py b/utils/raspberrypi/ctt/ctt_awb.py
index bf45e54d..4af1fe41 100644
--- a/utils/raspberrypi/ctt/ctt_awb.py
+++ b/utils/raspberrypi/ctt/ctt_awb.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_awb.py - camera tuning tool for AWB
+# camera tuning tool for AWB
from ctt_image_load import *
import matplotlib.pyplot as plt
@@ -13,7 +13,7 @@ from scipy.optimize import fmin
"""
obtain piecewise linear approximation for colour curve
"""
-def awb(Cam, cal_cr_list, cal_cb_list, plot):
+def awb(Cam, cal_cr_list, cal_cb_list, plot, grid_size):
imgs = Cam.imgs
"""
condense alsc calibration tables into one dictionary
@@ -43,7 +43,7 @@ def awb(Cam, cal_cr_list, cal_cb_list, plot):
Note: if alsc is disabled then colour_cals will be set to None and the
function will just return the greyscale patches
"""
- r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals)
+ r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals, grid_size=grid_size)
"""
calculate ratio of r, b to g
"""
@@ -293,12 +293,13 @@ def awb(Cam, cal_cr_list, cal_cb_list, plot):
"""
obtain greyscale patches and perform alsc colour correction
"""
-def get_alsc_patches(Img, colour_cals, grey=True):
+def get_alsc_patches(Img, colour_cals, grey=True, grid_size=(16, 12)):
"""
get patch centre coordinates, image colour and the actual
patches for each channel, remembering to subtract blacklevel
If grey then only greyscale patches considered
"""
+ grid_w, grid_h = grid_size
if grey:
cen_coords = Img.cen_coords[3::4]
col = Img.col
@@ -345,12 +346,12 @@ def get_alsc_patches(Img, colour_cals, grey=True):
bef_tabs = np.array(colour_cals[bef])
aft_tabs = np.array(colour_cals[aft])
col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db)
- col_tabs = np.reshape(col_tabs, (2, 12, 16))
+ col_tabs = np.reshape(col_tabs, (2, grid_h, grid_w))
"""
calculate dx, dy used to calculate alsc table
"""
w, h = Img.w/2, Img.h/2
- dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
+ dx, dy = int(-(-(w-1)//grid_w)), int(-(-(h-1)//grid_h))
"""
make list of pairs of gains for each patch by selecting the correct value
in alsc colour calibration table
diff --git a/utils/raspberrypi/ctt/ctt_cac.py b/utils/raspberrypi/ctt/ctt_cac.py
new file mode 100644
index 00000000..5a4c5101
--- /dev/null
+++ b/utils/raspberrypi/ctt/ctt_cac.py
@@ -0,0 +1,228 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+# ctt_cac.py - CAC (Chromatic Aberration Correction) tuning tool
+
+from PIL import Image
+import numpy as np
+import matplotlib.pyplot as plt
+from matplotlib import cm
+
+from ctt_dots_locator import find_dots_locations
+
+
+# This is the wrapper file that creates a JSON entry for you to append
+# to your camera tuning file.
+# It calculates the chromatic aberration at different points throughout
+# the image and uses that to produce a martix that can then be used
+# in the camera tuning files to correct this aberration.
+
+
+def pprint_array(array):
+ # Function to print the array in a tidier format
+ array = array
+ output = ""
+ for i in range(len(array)):
+ for j in range(len(array[0])):
+ output += str(round(array[i, j], 2)) + ", "
+ # Add the necessary indentation to the array
+ output += "\n "
+ # Cut off the end of the array (nicely formats it)
+ return output[:-22]
+
+
+def plot_shifts(red_shifts, blue_shifts):
+ # If users want, they can pass a command line option to show the shifts on a graph
+ # Can be useful to check that the functions are all working, and that the sample
+ # images are doing the right thing
+ Xs = np.array(red_shifts)[:, 0]
+ Ys = np.array(red_shifts)[:, 1]
+ Zs = np.array(red_shifts)[:, 2]
+ Zs2 = np.array(red_shifts)[:, 3]
+ Zs3 = np.array(blue_shifts)[:, 2]
+ Zs4 = np.array(blue_shifts)[:, 3]
+
+ fig, axs = plt.subplots(2, 2)
+ ax = fig.add_subplot(2, 2, 1, projection='3d')
+ ax.scatter(Xs, Ys, Zs, cmap=cm.jet, linewidth=0)
+ ax.set_title('Red X Shift')
+ ax = fig.add_subplot(2, 2, 2, projection='3d')
+ ax.scatter(Xs, Ys, Zs2, cmap=cm.jet, linewidth=0)
+ ax.set_title('Red Y Shift')
+ ax = fig.add_subplot(2, 2, 3, projection='3d')
+ ax.scatter(Xs, Ys, Zs3, cmap=cm.jet, linewidth=0)
+ ax.set_title('Blue X Shift')
+ ax = fig.add_subplot(2, 2, 4, projection='3d')
+ ax.scatter(Xs, Ys, Zs4, cmap=cm.jet, linewidth=0)
+ ax.set_title('Blue Y Shift')
+ fig.tight_layout()
+ plt.show()
+
+
+def shifts_to_yaml(red_shift, blue_shift, image_dimensions, output_grid_size=9):
+ # Convert the shifts to a numpy array for easier handling and initialise other variables
+ red_shifts = np.array(red_shift)
+ blue_shifts = np.array(blue_shift)
+ # create a grid that's smaller than the output grid, which we then interpolate from to get the output values
+ xrgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
+ xbgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
+ yrgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
+ ybgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
+
+ xrsgrid = []
+ xbsgrid = []
+ yrsgrid = []
+ ybsgrid = []
+ xg = np.zeros((output_grid_size - 1, output_grid_size - 1))
+ yg = np.zeros((output_grid_size - 1, output_grid_size - 1))
+
+ # Format the grids - numpy doesn't work for this, it wants a
+ # nice uniformly spaced grid, which we don't know if we have yet, hence the rather mundane setup
+ for x in range(output_grid_size - 1):
+ xrsgrid.append([])
+ yrsgrid.append([])
+ xbsgrid.append([])
+ ybsgrid.append([])
+ for y in range(output_grid_size - 1):
+ xrsgrid[x].append([])
+ yrsgrid[x].append([])
+ xbsgrid[x].append([])
+ ybsgrid[x].append([])
+
+ image_size = (image_dimensions[0], image_dimensions[1])
+ gridxsize = image_size[0] / (output_grid_size - 1)
+ gridysize = image_size[1] / (output_grid_size - 1)
+
+ # Iterate through each dot, and it's shift values and put these into the correct grid location
+ for red_shift in red_shifts:
+ xgridloc = int(red_shift[0] / gridxsize)
+ ygridloc = int(red_shift[1] / gridysize)
+ xrsgrid[xgridloc][ygridloc].append(red_shift[2])
+ yrsgrid[xgridloc][ygridloc].append(red_shift[3])
+
+ for blue_shift in blue_shifts:
+ xgridloc = int(blue_shift[0] / gridxsize)
+ ygridloc = int(blue_shift[1] / gridysize)
+ xbsgrid[xgridloc][ygridloc].append(blue_shift[2])
+ ybsgrid[xgridloc][ygridloc].append(blue_shift[3])
+
+ # Now calculate the average pixel shift for each square in the grid
+ for x in range(output_grid_size - 1):
+ for y in range(output_grid_size - 1):
+ xrgrid[x, y] = np.mean(xrsgrid[x][y])
+ yrgrid[x, y] = np.mean(yrsgrid[x][y])
+ xbgrid[x, y] = np.mean(xbsgrid[x][y])
+ ybgrid[x, y] = np.mean(ybsgrid[x][y])
+
+ # Next, we start to interpolate the central points of the grid that gets passed to the tuning file
+ input_grids = np.array([xrgrid, yrgrid, xbgrid, ybgrid])
+ output_grids = np.zeros((4, output_grid_size, output_grid_size))
+
+ # Interpolate the centre of the grid
+ output_grids[:, 1:-1, 1:-1] = (input_grids[:, 1:, :-1] + input_grids[:, 1:, 1:] + input_grids[:, :-1, 1:] + input_grids[:, :-1, :-1]) / 4
+
+ # Edge cases:
+ output_grids[:, 1:-1, 0] = ((input_grids[:, :-1, 0] + input_grids[:, 1:, 0]) / 2 - output_grids[:, 1:-1, 1]) * 2 + output_grids[:, 1:-1, 1]
+ output_grids[:, 1:-1, -1] = ((input_grids[:, :-1, 7] + input_grids[:, 1:, 7]) / 2 - output_grids[:, 1:-1, -2]) * 2 + output_grids[:, 1:-1, -2]
+ output_grids[:, 0, 1:-1] = ((input_grids[:, 0, :-1] + input_grids[:, 0, 1:]) / 2 - output_grids[:, 1, 1:-1]) * 2 + output_grids[:, 1, 1:-1]
+ output_grids[:, -1, 1:-1] = ((input_grids[:, 7, :-1] + input_grids[:, 7, 1:]) / 2 - output_grids[:, -2, 1:-1]) * 2 + output_grids[:, -2, 1:-1]
+
+ # Corner Cases:
+ output_grids[:, 0, 0] = (output_grids[:, 0, 1] - output_grids[:, 1, 1]) + (output_grids[:, 1, 0] - output_grids[:, 1, 1]) + output_grids[:, 1, 1]
+ output_grids[:, 0, -1] = (output_grids[:, 0, -2] - output_grids[:, 1, -2]) + (output_grids[:, 1, -1] - output_grids[:, 1, -2]) + output_grids[:, 1, -2]
+ output_grids[:, -1, 0] = (output_grids[:, -1, 1] - output_grids[:, -2, 1]) + (output_grids[:, -2, 0] - output_grids[:, -2, 1]) + output_grids[:, -2, 1]
+ output_grids[:, -1, -1] = (output_grids[:, -2, -1] - output_grids[:, -2, -2]) + (output_grids[:, -1, -2] - output_grids[:, -2, -2]) + output_grids[:, -2, -2]
+
+ # Below, we swap the x and the y coordinates, and also multiply by a factor of -1
+ # This is due to the PiSP (standard) dimensions being flipped in comparison to
+ # PIL image coordinate directions, hence why xr -> yr. Also, the shifts calculated are colour shifts,
+ # and the PiSP block asks for the values it should shift by (hence the * -1, to convert from colour shift to a pixel shift)
+
+ output_grid_yr, output_grid_xr, output_grid_yb, output_grid_xb = output_grids * -1
+ return output_grid_xr, output_grid_yr, output_grid_xb, output_grid_yb
+
+
+def analyse_dot(dot, dot_location=[0, 0]):
+ # Scan through the dot, calculate the centroid of each colour channel by doing:
+ # pixel channel brightness * distance from top left corner
+ # Sum these, and divide by the sum of each channel's brightnesses to get a centroid for each channel
+ red_channel = np.array(dot)[:, :, 0]
+ y_num_pixels = len(red_channel[0])
+ x_num_pixels = len(red_channel)
+ yred_weight = np.sum(np.dot(red_channel, np.arange(y_num_pixels)))
+ xred_weight = np.sum(np.dot(np.arange(x_num_pixels), red_channel))
+ red_sum = np.sum(red_channel)
+
+ green_channel = np.array(dot)[:, :, 1]
+ ygreen_weight = np.sum(np.dot(green_channel, np.arange(y_num_pixels)))
+ xgreen_weight = np.sum(np.dot(np.arange(x_num_pixels), green_channel))
+ green_sum = np.sum(green_channel)
+
+ blue_channel = np.array(dot)[:, :, 2]
+ yblue_weight = np.sum(np.dot(blue_channel, np.arange(y_num_pixels)))
+ xblue_weight = np.sum(np.dot(np.arange(x_num_pixels), blue_channel))
+ blue_sum = np.sum(blue_channel)
+
+ # We return this structure. It contains 2 arrays that contain:
+ # the locations of the dot center, along with the channel shifts in the x and y direction:
+ # [ [red_center_x, red_center_y, red_x_shift, red_y_shift], [blue_center_x, blue_center_y, blue_x_shift, blue_y_shift] ]
+
+ return [[int(dot_location[0]) + int(len(dot) / 2), int(dot_location[1]) + int(len(dot[0]) / 2), xred_weight / red_sum - xgreen_weight / green_sum, yred_weight / red_sum - ygreen_weight / green_sum], [dot_location[0] + int(len(dot) / 2), dot_location[1] + int(len(dot[0]) / 2), xblue_weight / blue_sum - xgreen_weight / green_sum, yblue_weight / blue_sum - ygreen_weight / green_sum]]
+
+
+def cac(Cam):
+ filelist = Cam.imgs_cac
+
+ Cam.log += '\nCAC analysing files: {}'.format(str(filelist))
+ np.set_printoptions(precision=3)
+ np.set_printoptions(suppress=True)
+
+ # Create arrays to hold all the dots data and their colour offsets
+ red_shift = [] # Format is: [[Dot Center X, Dot Center Y, x shift, y shift]]
+ blue_shift = []
+ # Iterate through the files
+ # Multiple files is reccomended to average out the lens aberration through rotations
+ for file in filelist:
+ Cam.log += '\nCAC processing file'
+ print("\n Processing file")
+ # Read the raw RGB values
+ rgb = file.rgb
+ image_size = [file.h, file.w] # Image size, X, Y
+ # Create a colour copy of the RGB values to use later in the calibration
+ imout = Image.new(mode="RGB", size=image_size)
+ rgb_image = np.array(imout)
+ # The rgb values need reshaping from a 1d array to a 3d array to be worked with easily
+ rgb.reshape((image_size[0], image_size[1], 3))
+ rgb_image = rgb
+
+ # Pass the RGB image through to the dots locating program
+ # Returns an array of the dots (colour rectangles around the dots), and an array of their locations
+ print("Finding dots")
+ Cam.log += '\nFinding dots'
+ dots, dots_locations = find_dots_locations(rgb_image)
+
+ # Now, analyse each dot. Work out the centroid of each colour channel, and use that to work out
+ # by how far the chromatic aberration has shifted each channel
+ Cam.log += '\nDots found: {}'.format(str(len(dots)))
+ print('Dots found: ' + str(len(dots)))
+
+ for dot, dot_location in zip(dots, dots_locations):
+ if len(dot) > 0:
+ if (dot_location[0] > 0) and (dot_location[1] > 0):
+ ret = analyse_dot(dot, dot_location)
+ red_shift.append(ret[0])
+ blue_shift.append(ret[1])
+
+ # Take our arrays of red shifts and locations, push them through to be interpolated into a 9x9 matrix
+ # for the CAC block to handle and then store these as a .json file to be added to the camera
+ # tuning file
+ print("\nCreating output grid")
+ Cam.log += '\nCreating output grid'
+ rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size)
+
+ print("CAC correction complete!")
+ Cam.log += '\nCAC correction complete!'
+
+ # Give the JSON dict back to the main ctt program
+ return {"strength": 1.0, "lut_rx": list(rx.round(2).reshape(81)), "lut_ry": list(ry.round(2).reshape(81)), "lut_bx": list(bx.round(2).reshape(81)), "lut_by": list(by.round(2).reshape(81))}
diff --git a/utils/raspberrypi/ctt/ctt_ccm.py b/utils/raspberrypi/ctt/ctt_ccm.py
index a09bfd09..07c943a8 100644
--- a/utils/raspberrypi/ctt/ctt_ccm.py
+++ b/utils/raspberrypi/ctt/ctt_ccm.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_ccm.py - camera tuning tool for CCM (colour correction matrix)
+# camera tuning tool for CCM (colour correction matrix)
from ctt_image_load import *
from ctt_awb import get_alsc_patches
@@ -56,7 +56,7 @@ FInds colour correction matrices for list of images
"""
-def ccm(Cam, cal_cr_list, cal_cb_list):
+def ccm(Cam, cal_cr_list, cal_cb_list, grid_size):
global matrix_selection_types, typenum
imgs = Cam.imgs
"""
@@ -133,9 +133,7 @@ def ccm(Cam, cal_cr_list, cal_cb_list):
Note: if alsc is disabled then colour_cals will be set to None and no
the function will simply return the macbeth patches
"""
- r, b, g = get_alsc_patches(Img, colour_cals, grey=False)
- # 256 values for each patch of sRGB values
-
+ r, b, g = get_alsc_patches(Img, colour_cals, grey=False, grid_size=grid_size)
"""
do awb
Note: awb is done by measuring the macbeth chart in the image, rather
diff --git a/utils/raspberrypi/ctt/ctt_config_example.json b/utils/raspberrypi/ctt/ctt_config_example.json
index c7f90761..1105862c 100644
--- a/utils/raspberrypi/ctt/ctt_config_example.json
+++ b/utils/raspberrypi/ctt/ctt_config_example.json
@@ -3,7 +3,8 @@
"plot": [],
"alsc": {
"do_alsc_colour": 1,
- "luminance_strength": 0.5
+ "luminance_strength": 0.8,
+ "max_gain": 8.0
},
"awb": {
"greyworld": 0
@@ -13,4 +14,4 @@
"small": 0,
"show": 0
}
-} \ No newline at end of file
+}
diff --git a/utils/raspberrypi/ctt/ctt_dots_locator.py b/utils/raspberrypi/ctt/ctt_dots_locator.py
new file mode 100644
index 00000000..4945c04b
--- /dev/null
+++ b/utils/raspberrypi/ctt/ctt_dots_locator.py
@@ -0,0 +1,118 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+# find_dots.py - Used by CAC algorithm to convert image to set of dots
+
+'''
+This file takes the black and white version of the image, along with
+the color version. It then located the black dots on the image by
+thresholding dark pixels.
+In a rather fun way, the algorithm bounces around the thresholded area in a random path
+We then use the maximum and minimum of these paths to determine the dot shape and size
+This info is then used to return colored dots and locations back to the main file
+'''
+
+import numpy as np
+import random
+from PIL import Image, ImageEnhance, ImageFilter
+
+
+def find_dots_locations(rgb_image, color_threshold=100, dots_edge_avoid=75, image_edge_avoid=10, search_path_length=500, grid_scan_step_size=10, logfile=open("log.txt", "a+")):
+ # Initialise some starting variables
+ pixels = Image.fromarray(rgb_image)
+ pixels = pixels.convert("L")
+ enhancer = ImageEnhance.Contrast(pixels)
+ im_output = enhancer.enhance(1.4)
+ # We smooth it slightly to make it easier for the dot recognition program to locate the dots
+ im_output = im_output.filter(ImageFilter.GaussianBlur(radius=2))
+ bw_image = np.array(im_output)
+
+ location = [0, 0]
+ dots = []
+ dots_location = []
+ # the program takes away the edges - we don't want a dot that is half a circle, the
+ # centroids would all be wrong
+ for x in range(dots_edge_avoid, len(bw_image) - dots_edge_avoid, grid_scan_step_size):
+ for y in range(dots_edge_avoid, len(bw_image[0]) - dots_edge_avoid, grid_scan_step_size):
+ location = [x, y]
+ scrap_dot = False # A variable used to make sure that this is a valid dot
+ if (bw_image[location[0], location[1]] < color_threshold) and not (scrap_dot):
+ heading = "south" # Define a starting direction to move in
+ coords = []
+ for i in range(search_path_length): # Creates a path of length `search_path_length`. This turns out to always be enough to work out the rough shape of the dot.
+ # Now make sure that the thresholded area doesn't come within 10 pixels of the edge of the image, ensures we capture all the CA
+ if ((image_edge_avoid < location[0] < len(bw_image) - image_edge_avoid) and (image_edge_avoid < location[1] < len(bw_image[0]) - image_edge_avoid)) and not (scrap_dot):
+ if heading == "south":
+ if bw_image[location[0] + 1, location[1]] < color_threshold:
+ # Here, notice it does not go south, but actually goes southeast
+ # This is crucial in ensuring that we make our way around the majority of the dot
+ location[0] = location[0] + 1
+ location[1] = location[1] + 1
+ heading = "south"
+ else:
+ # This happens when we reach a thresholded edge. We now randomly change direction and keep searching
+ dir = random.randint(1, 2)
+ if dir == 1:
+ heading = "west"
+ if dir == 2:
+ heading = "east"
+
+ if heading == "east":
+ if bw_image[location[0], location[1] + 1] < color_threshold:
+ location[1] = location[1] + 1
+ heading = "east"
+ else:
+ dir = random.randint(1, 2)
+ if dir == 1:
+ heading = "north"
+ if dir == 2:
+ heading = "south"
+
+ if heading == "west":
+ if bw_image[location[0], location[1] - 1] < color_threshold:
+ location[1] = location[1] - 1
+ heading = "west"
+ else:
+ dir = random.randint(1, 2)
+ if dir == 1:
+ heading = "north"
+ if dir == 2:
+ heading = "south"
+
+ if heading == "north":
+ if bw_image[location[0] - 1, location[1]] < color_threshold:
+ location[0] = location[0] - 1
+ heading = "north"
+ else:
+ dir = random.randint(1, 2)
+ if dir == 1:
+ heading = "west"
+ if dir == 2:
+ heading = "east"
+ # Log where our particle travels across the dot
+ coords.append([location[0], location[1]])
+ else:
+ scrap_dot = True # We just don't have enough space around the dot, discard this one, and move on
+ if not scrap_dot:
+ # get the size of the dot surrounding the dot
+ x_coords = np.array(coords)[:, 0]
+ y_coords = np.array(coords)[:, 1]
+ hsquaresize = max(list(x_coords)) - min(list(x_coords))
+ vsquaresize = max(list(y_coords)) - min(list(y_coords))
+ # Create the bounding coordinates of the rectangle surrounding the dot
+ # Program uses the dotsize + half of the dotsize to ensure we get all that color fringing
+ extra_space_factor = 0.45
+ top_left_x = (min(list(x_coords)) - int(hsquaresize * extra_space_factor))
+ btm_right_x = max(list(x_coords)) + int(hsquaresize * extra_space_factor)
+ top_left_y = (min(list(y_coords)) - int(vsquaresize * extra_space_factor))
+ btm_right_y = max(list(y_coords)) + int(vsquaresize * extra_space_factor)
+ # Overwrite the area of the dot to ensure we don't use it again
+ bw_image[top_left_x:btm_right_x, top_left_y:btm_right_y] = 255
+ # Add the color version of the dot to the list to send off, along with some coordinates.
+ dots.append(rgb_image[top_left_x:btm_right_x, top_left_y:btm_right_y])
+ dots_location.append([top_left_x, top_left_y])
+ else:
+ # Dot was too close to the image border to be useable
+ pass
+ return dots, dots_location
diff --git a/utils/raspberrypi/ctt/ctt_geq.py b/utils/raspberrypi/ctt/ctt_geq.py
index c45addcd..5a91ebb4 100644
--- a/utils/raspberrypi/ctt/ctt_geq.py
+++ b/utils/raspberrypi/ctt/ctt_geq.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_geq.py - camera tuning tool for GEQ (green equalisation)
+# camera tuning tool for GEQ (green equalisation)
from ctt_tools import *
import matplotlib.pyplot as plt
diff --git a/utils/raspberrypi/ctt/ctt_image_load.py b/utils/raspberrypi/ctt/ctt_image_load.py
index 310c5e88..531de328 100644
--- a/utils/raspberrypi/ctt/ctt_image_load.py
+++ b/utils/raspberrypi/ctt/ctt_image_load.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019-2020, Raspberry Pi Ltd
#
-# ctt_image_load.py - camera tuning tool image loading
+# camera tuning tool image loading
from ctt_tools import *
from ctt_macbeth_locator import *
@@ -350,6 +350,7 @@ def dng_load_image(Cam, im_str):
c2 = np.left_shift(raw_data[1::2, 0::2].astype(np.int64), shift)
c3 = np.left_shift(raw_data[1::2, 1::2].astype(np.int64), shift)
Img.channels = [c0, c1, c2, c3]
+ Img.rgb = raw_im.postprocess()
except Exception:
print("\nERROR: failed to load DNG file", im_str)
diff --git a/utils/raspberrypi/ctt/ctt_lux.py b/utils/raspberrypi/ctt/ctt_lux.py
index 70855e1b..46be1512 100644
--- a/utils/raspberrypi/ctt/ctt_lux.py
+++ b/utils/raspberrypi/ctt/ctt_lux.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_lux.py - camera tuning tool for lux level
+# camera tuning tool for lux level
from ctt_tools import *
diff --git a/utils/raspberrypi/ctt/ctt_macbeth_locator.py b/utils/raspberrypi/ctt/ctt_macbeth_locator.py
index 178aeed0..f22dbf31 100644
--- a/utils/raspberrypi/ctt/ctt_macbeth_locator.py
+++ b/utils/raspberrypi/ctt/ctt_macbeth_locator.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_macbeth_locator.py - camera tuning tool Macbeth chart locator
+# camera tuning tool Macbeth chart locator
from ctt_ransac import *
from ctt_tools import *
diff --git a/utils/raspberrypi/ctt/ctt_noise.py b/utils/raspberrypi/ctt/ctt_noise.py
index 3270bf34..0b18d83f 100644
--- a/utils/raspberrypi/ctt/ctt_noise.py
+++ b/utils/raspberrypi/ctt/ctt_noise.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_noise.py - camera tuning tool noise calibration
+# camera tuning tool noise calibration
from ctt_image_load import *
import matplotlib.pyplot as plt
diff --git a/utils/raspberrypi/ctt/ctt_pisp.py b/utils/raspberrypi/ctt/ctt_pisp.py
new file mode 100755
index 00000000..a59b053c
--- /dev/null
+++ b/utils/raspberrypi/ctt/ctt_pisp.py
@@ -0,0 +1,805 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# ctt_pisp.py - camera tuning tool data for PiSP platforms
+
+
+json_template = {
+ "rpi.black_level": {
+ "black_level": 4096
+ },
+ "rpi.lux": {
+ "reference_shutter_speed": 10000,
+ "reference_gain": 1,
+ "reference_aperture": 1.0
+ },
+ "rpi.dpc": {
+ "strength": 1
+ },
+ "rpi.noise": {
+ },
+ "rpi.geq": {
+ },
+ "rpi.denoise":
+ {
+ "normal":
+ {
+ "sdn":
+ {
+ "deviation": 1.6,
+ "strength": 0.5,
+ "deviation2": 3.2,
+ "deviation_no_tdn": 3.2,
+ "strength_no_tdn": 0.75
+ },
+ "cdn":
+ {
+ "deviation": 200,
+ "strength": 0.3
+ },
+ "tdn":
+ {
+ "deviation": 0.8,
+ "threshold": 0.05
+ }
+ },
+ "hdr":
+ {
+ "sdn":
+ {
+ "deviation": 1.6,
+ "strength": 0.5,
+ "deviation2": 3.2,
+ "deviation_no_tdn": 3.2,
+ "strength_no_tdn": 0.75
+ },
+ "cdn":
+ {
+ "deviation": 200,
+ "strength": 0.3
+ },
+ "tdn":
+ {
+ "deviation": 1.3,
+ "threshold": 0.1
+ }
+ },
+ "night":
+ {
+ "sdn":
+ {
+ "deviation": 1.6,
+ "strength": 0.5,
+ "deviation2": 3.2,
+ "deviation_no_tdn": 3.2,
+ "strength_no_tdn": 0.75
+ },
+ "cdn":
+ {
+ "deviation": 200,
+ "strength": 0.3
+ },
+ "tdn":
+ {
+ "deviation": 1.3,
+ "threshold": 0.1
+ }
+ }
+ },
+ "rpi.awb": {
+ "priors": [
+ {"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]},
+ {"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]},
+ {"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
+ ],
+ "modes": {
+ "auto": {"lo": 2500, "hi": 7700},
+ "incandescent": {"lo": 2500, "hi": 3000},
+ "tungsten": {"lo": 3000, "hi": 3500},
+ "fluorescent": {"lo": 4000, "hi": 4700},
+ "indoor": {"lo": 3000, "hi": 5000},
+ "daylight": {"lo": 5500, "hi": 6500},
+ "cloudy": {"lo": 7000, "hi": 8000}
+ },
+ "bayes": 1
+ },
+ "rpi.agc":
+ {
+ "channels":
+ [
+ {
+ "comment": "Channel 0 is normal AGC",
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 1.5, 2.0, 4.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 60000 ],
+ "gain": [ 1.0, 1.5, 2.0, 4.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 1.5, 2.0, 4.0, 8.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ },
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "comment": "Channel 1 is the HDR short channel",
+ "desaturate": 0,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 20000, 60000 ],
+ "gain": [ 1.0, 1.0, 1.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 20000, 60000 ],
+ "gain": [ 1.0, 1.0, 1.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 20000, 60000 ],
+ "gain": [ 1.0, 1.0, 1.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.7,
+ 1000, 0.7
+ ]
+ },
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.2,
+ "y_target":
+ [
+ 0, 0.002,
+ 1000, 0.002
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.7,
+ 1000, 0.7
+ ]
+ },
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.2,
+ "y_target":
+ [
+ 0, 0.002,
+ 1000, 0.002
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.7,
+ 1000, 0.7
+ ]
+ },
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.2,
+ "y_target":
+ [
+ 0, 0.002,
+ 1000, 0.002
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "comment": "Channel 2 is the HDR long channel",
+ "desaturate": 0,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 20000, 30000, 60000 ],
+ "gain": [ 1.0, 2.0, 4.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 20000, 30000, 60000 ],
+ "gain": [ 1.0, 2.0, 4.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 20000, 30000, 60000 ],
+ "gain": [ 1.0, 2.0, 4.0, 8.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ ],
+ "highlight": [
+ ],
+ "shadows": [
+ ]
+ },
+ "channel_constraints":
+ [
+ {
+ "bound": "UPPER",
+ "channel": 4,
+ "factor": 8
+ },
+ {
+ "bound": "LOWER",
+ "channel": 4,
+ "factor": 2
+ }
+ ],
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "comment": "Channel 3 is the night mode channel",
+ "base_ev": 0.33,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 20000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 20000, 66666, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 4.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.16,
+ 10000, 0.17
+ ]
+ }
+ ]
+ },
+ "rpi.alsc": {
+ 'omega': 1.3,
+ 'n_iter': 100,
+ 'luminance_strength': 0.8,
+ },
+ "rpi.contrast": {
+ "ce_enable": 1,
+ "gamma_curve": [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ },
+ "rpi.ccm": {
+ },
+ "rpi.cac": {
+ },
+ "rpi.sharpen": {
+ "threshold": 0.25,
+ "limit": 1.0,
+ "strength": 1.0
+ },
+ "rpi.hdr":
+ {
+ "Off":
+ {
+ "cadence": [ 0 ]
+ },
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map": { "short": 1, "long": 2 }
+ },
+ "SingleExposure":
+ {
+ "cadence": [1],
+ "channel_map": { "short": 1 },
+ "spatial_gain": 2.0,
+ "tonemap_enable": 1
+ },
+ "MultiExposure":
+ {
+ "cadence": [1, 2],
+ "channel_map": { "short": 1, "long": 2 },
+ "stitch_enable": 1,
+ "spatial_gain": 2.0,
+ "tonemap_enable": 1
+ },
+ "Night":
+ {
+ "cadence": [ 3 ],
+ "channel_map": { "night": 3 },
+ "tonemap_enable": 1,
+ "tonemap":
+ [
+ 0, 0,
+ 5000, 20000,
+ 10000, 30000,
+ 20000, 47000,
+ 30000, 55000,
+ 65535, 65535
+ ]
+ }
+ }
+}
+
+grid_size = (32, 32)
diff --git a/utils/raspberrypi/ctt/ctt_pretty_print_json.py b/utils/raspberrypi/ctt/ctt_pretty_print_json.py
index 3e3b8475..a4cae62d 100755
--- a/utils/raspberrypi/ctt/ctt_pretty_print_json.py
+++ b/utils/raspberrypi/ctt/ctt_pretty_print_json.py
@@ -19,13 +19,19 @@ class Encoder(json.JSONEncoder):
self.indentation_level = 0
self.hard_break = 120
self.custom_elems = {
+ 'weights': 15,
'table': 16,
'luminance_lut': 16,
'ct_curve': 3,
'ccm': 3,
+ 'lut_rx': 9,
+ 'lut_bx': 9,
+ 'lut_by': 9,
+ 'lut_ry': 9,
'gamma_curve': 2,
'y_target': 2,
- 'prior': 2
+ 'prior': 2,
+ 'tonemap': 2
}
def encode(self, o, node_key=None):
@@ -87,7 +93,7 @@ class Encoder(json.JSONEncoder):
return self.encode(o)
-def pretty_print(in_json: dict) -> str:
+def pretty_print(in_json: dict, custom_elems={}) -> str:
if 'version' not in in_json or \
'target' not in in_json or \
@@ -95,12 +101,15 @@ def pretty_print(in_json: dict) -> str:
in_json['version'] < 2.0:
raise RuntimeError('Incompatible JSON dictionary has been provided')
- return json.dumps(in_json, cls=Encoder, indent=4, sort_keys=False)
+ encoder = Encoder(indent=4, sort_keys=False)
+ encoder.custom_elems |= custom_elems
+ return encoder.encode(in_json) #json.dumps(in_json, cls=Encoder, indent=4, sort_keys=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=
'Prettify a version 2.0 camera tuning config JSON file.')
+ parser.add_argument('-t', '--target', type=str, help='Target platform', choices=['pisp', 'vc4'], default='vc4')
parser.add_argument('input', type=str, help='Input tuning file.')
parser.add_argument('output', type=str, nargs='?',
help='Output converted tuning file. If not provided, the input file will be updated in-place.',
@@ -110,7 +119,12 @@ if __name__ == "__main__":
with open(args.input, 'r') as f:
in_json = json.load(f)
- out_json = pretty_print(in_json)
+ if args.target == 'pisp':
+ from ctt_pisp import grid_size
+ elif args.target == 'vc4':
+ from ctt_vc4 import grid_size
+
+ out_json = pretty_print(in_json, custom_elems={'table': grid_size[0], 'luminance_lut': grid_size[0]})
with open(args.output if args.output is not None else args.input, 'w') as f:
f.write(out_json)
diff --git a/utils/raspberrypi/ctt/ctt_ransac.py b/utils/raspberrypi/ctt/ctt_ransac.py
index 9ed7d93c..01bba302 100644
--- a/utils/raspberrypi/ctt/ctt_ransac.py
+++ b/utils/raspberrypi/ctt/ctt_ransac.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_ransac.py - camera tuning tool RANSAC selector for Macbeth chart locator
+# camera tuning tool RANSAC selector for Macbeth chart locator
import numpy as np
diff --git a/utils/raspberrypi/ctt/ctt_tools.py b/utils/raspberrypi/ctt/ctt_tools.py
index 79195289..50b01ecf 100644
--- a/utils/raspberrypi/ctt/ctt_tools.py
+++ b/utils/raspberrypi/ctt/ctt_tools.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_tools.py - camera tuning tool miscellaneous
+# camera tuning tool miscellaneous
import time
import re
@@ -65,11 +65,12 @@ def parse_input():
directory = get_config(args_dict, '-i', None, 'string')
config = get_config(args_dict, '-c', None, 'string')
log_path = get_config(args_dict, '-l', None, 'string')
+ target = get_config(args_dict, '-t', "vc4", 'string')
if directory is None:
raise ArgError('\n\nERROR! No input directory given.')
if json_output is None:
raise ArgError('\n\nERROR! No output json given.')
- return json_output, directory, config, log_path
+ return json_output, directory, config, log_path, target
"""
diff --git a/utils/raspberrypi/ctt/ctt_vc4.py b/utils/raspberrypi/ctt/ctt_vc4.py
new file mode 100755
index 00000000..7154e110
--- /dev/null
+++ b/utils/raspberrypi/ctt/ctt_vc4.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# ctt_vc4.py - camera tuning tool data for VC4 platforms
+
+
+json_template = {
+ "rpi.black_level": {
+ "black_level": 4096
+ },
+ "rpi.dpc": {
+ },
+ "rpi.lux": {
+ "reference_shutter_speed": 10000,
+ "reference_gain": 1,
+ "reference_aperture": 1.0
+ },
+ "rpi.noise": {
+ },
+ "rpi.geq": {
+ },
+ "rpi.sdn": {
+ },
+ "rpi.awb": {
+ "priors": [
+ {"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]},
+ {"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]},
+ {"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
+ ],
+ "modes": {
+ "auto": {"lo": 2500, "hi": 8000},
+ "incandescent": {"lo": 2500, "hi": 3000},
+ "tungsten": {"lo": 3000, "hi": 3500},
+ "fluorescent": {"lo": 4000, "hi": 4700},
+ "indoor": {"lo": 3000, "hi": 5000},
+ "daylight": {"lo": 5500, "hi": 6500},
+ "cloudy": {"lo": 7000, "hi": 8600}
+ },
+ "bayes": 1
+ },
+ "rpi.agc": {
+ "metering_modes": {
+ "centre-weighted": {
+ "weights": [3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0]
+ },
+ "spot": {
+ "weights": [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ },
+ "matrix": {
+ "weights": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ }
+ },
+ "exposure_modes": {
+ "normal": {
+ "shutter": [100, 10000, 30000, 60000, 120000],
+ "gain": [1.0, 2.0, 4.0, 6.0, 6.0]
+ },
+ "short": {
+ "shutter": [100, 5000, 10000, 20000, 120000],
+ "gain": [1.0, 2.0, 4.0, 6.0, 6.0]
+ }
+ },
+ "constraint_modes": {
+ "normal": [
+ {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]}
+ ],
+ "highlight": [
+ {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]},
+ {"bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.8, 1000, 0.8]}
+ ]
+ },
+ "y_target": [0, 0.16, 1000, 0.165, 10000, 0.17]
+ },
+ "rpi.alsc": {
+ 'omega': 1.3,
+ 'n_iter': 100,
+ 'luminance_strength': 0.7,
+ },
+ "rpi.contrast": {
+ "ce_enable": 1,
+ "gamma_curve": [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ },
+ "rpi.ccm": {
+ },
+ "rpi.sharpen": {
+ }
+}
+
+grid_size = (16, 12)
diff --git a/utils/rkisp1/rkisp1-capture.sh b/utils/rkisp1/rkisp1-capture.sh
index c5f859f2..d767e31d 100755
--- a/utils/rkisp1/rkisp1-capture.sh
+++ b/utils/rkisp1/rkisp1-capture.sh
@@ -4,8 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# rkisp-capture.sh - Capture processed frames from cameras based on the
-# Rockchip ISP1
+# Capture processed frames from cameras based on the Rockchip ISP1
#
# The scripts makes use of the following tools, which are expected to be
# executable from the system-wide path or from the local directory:
diff --git a/utils/tracepoints/analyze-ipa-trace.py b/utils/tracepoints/analyze-ipa-trace.py
index 50fbbf42..92e8a235 100755
--- a/utils/tracepoints/analyze-ipa-trace.py
+++ b/utils/tracepoints/analyze-ipa-trace.py
@@ -4,7 +4,7 @@
#
# Author: Paul Elder <paul.elder@ideasonboard.com>
#
-# analyze-ipa-trace.py - Example of how to extract information from libcamera lttng traces
+# Example of how to extract information from libcamera lttng traces
import argparse
import bt2
diff --git a/utils/tracepoints/gen-tp-header.py b/utils/tracepoints/gen-tp-header.py
index a454615e..83606c32 100755
--- a/utils/tracepoints/gen-tp-header.py
+++ b/utils/tracepoints/gen-tp-header.py
@@ -4,7 +4,7 @@
#
# Author: Paul Elder <paul.elder@ideasonboard.com>
#
-# gen-tp-header.py - Generate header file to contain lttng tracepoints
+# Generate header file to contain lttng tracepoints
import datetime
import jinja2
diff --git a/utils/tuning/libtuning/average.py b/utils/tuning/libtuning/average.py
index e28770d7..c41075a1 100644
--- a/utils/tuning/libtuning/average.py
+++ b/utils/tuning/libtuning/average.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# average.py - Wrapper for numpy averaging functions to enable duck-typing
+# Wrapper for numpy averaging functions to enable duck-typing
import numpy as np
diff --git a/utils/tuning/libtuning/generators/generator.py b/utils/tuning/libtuning/generators/generator.py
index 7c8c9b99..77a8ba4a 100644
--- a/utils/tuning/libtuning/generators/generator.py
+++ b/utils/tuning/libtuning/generators/generator.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# generator.py - Base class for a generator to convert dict to tuning file
+# Base class for a generator to convert dict to tuning file
from pathlib import Path
diff --git a/utils/tuning/libtuning/generators/raspberrypi_output.py b/utils/tuning/libtuning/generators/raspberrypi_output.py
index 813491cd..47b49059 100644
--- a/utils/tuning/libtuning/generators/raspberrypi_output.py
+++ b/utils/tuning/libtuning/generators/raspberrypi_output.py
@@ -2,7 +2,7 @@
#
# Copyright 2022 Raspberry Pi Ltd
#
-# raspberrypi_output.py - Generate tuning file in Raspberry Pi's json format
+# Generate tuning file in Raspberry Pi's json format
#
# (Copied from ctt_pretty_print_json.py)
diff --git a/utils/tuning/libtuning/generators/yaml_output.py b/utils/tuning/libtuning/generators/yaml_output.py
index effb4fb3..8f22d386 100644
--- a/utils/tuning/libtuning/generators/yaml_output.py
+++ b/utils/tuning/libtuning/generators/yaml_output.py
@@ -2,7 +2,7 @@
#
# Copyright 2022 Paul Elder <paul.elder@ideasonboard.com>
#
-# yaml_output.py - Generate tuning file in YAML format
+# Generate tuning file in YAML format
from .generator import Generator
diff --git a/utils/tuning/libtuning/gradient.py b/utils/tuning/libtuning/gradient.py
index 5106f821..b643f502 100644
--- a/utils/tuning/libtuning/gradient.py
+++ b/utils/tuning/libtuning/gradient.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# gradient.py - Gradients that can be used to distribute or map numbers
+# Gradients that can be used to distribute or map numbers
import libtuning as lt
diff --git a/utils/tuning/libtuning/image.py b/utils/tuning/libtuning/image.py
index aa9d20b5..e2181b11 100644
--- a/utils/tuning/libtuning/image.py
+++ b/utils/tuning/libtuning/image.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# image.py - Container for an image and associated metadata
+# Container for an image and associated metadata
import binascii
import numpy as np
diff --git a/utils/tuning/libtuning/libtuning.py b/utils/tuning/libtuning/libtuning.py
index d84c148f..5e22288d 100644
--- a/utils/tuning/libtuning/libtuning.py
+++ b/utils/tuning/libtuning/libtuning.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# libtuning.py - An infrastructure for camera tuning tools
+# An infrastructure for camera tuning tools
import argparse
diff --git a/utils/tuning/libtuning/macbeth.py b/utils/tuning/libtuning/macbeth.py
index 5faddf66..e1182464 100644
--- a/utils/tuning/libtuning/macbeth.py
+++ b/utils/tuning/libtuning/macbeth.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# macbeth.py - Locate and extract Macbeth charts from images
+# Locate and extract Macbeth charts from images
# (Copied from: ctt_macbeth_locator.py)
# \todo Add debugging
diff --git a/utils/tuning/libtuning/modules/agc/__init__.py b/utils/tuning/libtuning/modules/agc/__init__.py
new file mode 100644
index 00000000..4db9ca37
--- /dev/null
+++ b/utils/tuning/libtuning/modules/agc/__init__.py
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+
+from libtuning.modules.agc.agc import AGC
+from libtuning.modules.agc.rkisp1 import AGCRkISP1
diff --git a/utils/tuning/libtuning/modules/agc/agc.py b/utils/tuning/libtuning/modules/agc/agc.py
new file mode 100644
index 00000000..9c8899ba
--- /dev/null
+++ b/utils/tuning/libtuning/modules/agc/agc.py
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+
+from ..module import Module
+
+import libtuning as lt
+
+
+class AGC(Module):
+ type = 'agc'
+ hr_name = 'AGC (Base)'
+ out_name = 'GenericAGC'
+
+ # \todo Add sector shapes and stuff just like lsc
+ def __init__(self, *,
+ debug: list):
+ super().__init__()
+
+ self.debug = debug
diff --git a/utils/tuning/libtuning/modules/agc/rkisp1.py b/utils/tuning/libtuning/modules/agc/rkisp1.py
new file mode 100644
index 00000000..19a5555b
--- /dev/null
+++ b/utils/tuning/libtuning/modules/agc/rkisp1.py
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+#
+# rkisp1.py - AGC module for tuning rkisp1
+
+from .agc import AGC
+
+import libtuning as lt
+
+
+class AGCRkISP1(AGC):
+ hr_name = 'AGC (RkISP1)'
+ out_name = 'Agc'
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ # We don't actually need anything from the config file
+ def validate_config(self, config: dict) -> bool:
+ return True
+
+ def _generate_metering_modes(self) -> dict:
+ centre_weighted = [
+ 0, 0, 0, 0, 0,
+ 0, 6, 8, 6, 0,
+ 0, 8, 16, 8, 0,
+ 0, 6, 8, 6, 0,
+ 0, 0, 0, 0, 0
+ ]
+
+ spot = [
+ 0, 0, 0, 0, 0,
+ 0, 2, 4, 2, 0,
+ 0, 4, 16, 4, 0,
+ 0, 2, 4, 2, 0,
+ 0, 0, 0, 0, 0
+ ]
+
+ matrix = [1 for i in range(0, 25)]
+
+ return {
+ 'MeteringCentreWeighted': centre_weighted,
+ 'MeteringSpot': spot,
+ 'MeteringMatrix': matrix
+ }
+
+ def _generate_exposure_modes(self) -> dict:
+ normal = {'shutter': [100, 10000, 30000, 60000, 120000],
+ 'gain': [2.0, 4.0, 6.0, 6.0, 6.0]}
+ short = {'shutter': [100, 5000, 10000, 20000, 120000],
+ 'gain': [2.0, 4.0, 6.0, 6.0, 6.0]}
+
+ return {'ExposureNormal': normal, 'ExposureShort': short}
+
+ def _generate_constraint_modes(self) -> dict:
+ normal = {'lower': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.5}}
+ highlight = {
+ 'lower': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.5},
+ 'upper': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.8}
+ }
+
+ return {'ConstraintNormal': normal, 'ConstraintHighlight': highlight}
+
+ def _generate_y_target(self) -> list:
+ return 0.16
+
+ def process(self, config: dict, images: list, outputs: dict) -> dict:
+ output = {}
+
+ output['AeMeteringMode'] = self._generate_metering_modes()
+ output['AeExposureMode'] = self._generate_exposure_modes()
+ output['AeConstraintMode'] = self._generate_constraint_modes()
+ output['relativeLuminanceTarget'] = self._generate_y_target()
+
+ # \todo Debug functionality
+
+ return output
diff --git a/utils/tuning/libtuning/modules/lsc/raspberrypi.py b/utils/tuning/libtuning/modules/lsc/raspberrypi.py
index 58f5000d..f19c7163 100644
--- a/utils/tuning/libtuning/modules/lsc/raspberrypi.py
+++ b/utils/tuning/libtuning/modules/lsc/raspberrypi.py
@@ -3,7 +3,7 @@
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# raspberrypi.py - ALSC module for tuning Raspberry Pi
+# ALSC module for tuning Raspberry Pi
from .lsc import LSC
diff --git a/utils/tuning/libtuning/modules/lsc/rkisp1.py b/utils/tuning/libtuning/modules/lsc/rkisp1.py
index 5701ae0a..20406e43 100644
--- a/utils/tuning/libtuning/modules/lsc/rkisp1.py
+++ b/utils/tuning/libtuning/modules/lsc/rkisp1.py
@@ -3,7 +3,7 @@
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# rkisp1.py - LSC module for tuning rkisp1
+# LSC module for tuning rkisp1
from .lsc import LSC
diff --git a/utils/tuning/libtuning/modules/module.py b/utils/tuning/libtuning/modules/module.py
index 12e2fc7c..de624384 100644
--- a/utils/tuning/libtuning/modules/module.py
+++ b/utils/tuning/libtuning/modules/module.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# module.py - Base class for algorithm-specific tuning modules
+# Base class for algorithm-specific tuning modules
# @var type Type of the module. Defined in the base module.
diff --git a/utils/tuning/libtuning/parsers/parser.py b/utils/tuning/libtuning/parsers/parser.py
index a17d8d71..0c3944c7 100644
--- a/utils/tuning/libtuning/parsers/parser.py
+++ b/utils/tuning/libtuning/parsers/parser.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# parser.py - Base class for a parser for a specific format of config file
+# Base class for a parser for a specific format of config file
class Parser(object):
def __init__(self):
diff --git a/utils/tuning/libtuning/parsers/raspberrypi_parser.py b/utils/tuning/libtuning/parsers/raspberrypi_parser.py
index d26586ba..f1da4592 100644
--- a/utils/tuning/libtuning/parsers/raspberrypi_parser.py
+++ b/utils/tuning/libtuning/parsers/raspberrypi_parser.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# raspberrypi_parser.py - Parser for Raspberry Pi config file format
+# Parser for Raspberry Pi config file format
from .parser import Parser
diff --git a/utils/tuning/libtuning/parsers/yaml_parser.py b/utils/tuning/libtuning/parsers/yaml_parser.py
index 5c1673a5..244db24d 100644
--- a/utils/tuning/libtuning/parsers/yaml_parser.py
+++ b/utils/tuning/libtuning/parsers/yaml_parser.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# yaml_parser.py - Parser for YAML format config file
+# Parser for YAML format config file
from .parser import Parser
diff --git a/utils/tuning/libtuning/smoothing.py b/utils/tuning/libtuning/smoothing.py
index b8a5a242..de4d920c 100644
--- a/utils/tuning/libtuning/smoothing.py
+++ b/utils/tuning/libtuning/smoothing.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# smoothing.py - Wrapper for cv2 smoothing functions to enable duck-typing
+# Wrapper for cv2 smoothing functions to enable duck-typing
import cv2
diff --git a/utils/tuning/libtuning/utils.py b/utils/tuning/libtuning/utils.py
index b60f2c9b..1e8128ea 100644
--- a/utils/tuning/libtuning/utils.py
+++ b/utils/tuning/libtuning/utils.py
@@ -3,7 +3,7 @@
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# utils.py - Utilities for libtuning
+# Utilities for libtuning
import decimal
import math
diff --git a/utils/tuning/raspberrypi/alsc.py b/utils/tuning/raspberrypi/alsc.py
index 024eb5a3..ba8fc9e1 100644
--- a/utils/tuning/raspberrypi/alsc.py
+++ b/utils/tuning/raspberrypi/alsc.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# alsc.py - ALSC module instance for Raspberry Pi tuning scripts
+# ALSC module instance for Raspberry Pi tuning scripts
import libtuning as lt
from libtuning.modules.lsc import ALSCRaspberryPi
diff --git a/utils/tuning/raspberrypi_alsc_only.py b/utils/tuning/raspberrypi_alsc_only.py
index af04e6a8..777d8007 100755
--- a/utils/tuning/raspberrypi_alsc_only.py
+++ b/utils/tuning/raspberrypi_alsc_only.py
@@ -3,7 +3,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# raspberrypi_alsc_only.py - Tuning script for raspberrypi, ALSC only
+# Tuning script for raspberrypi, ALSC only
import sys
diff --git a/utils/tuning/rkisp1.py b/utils/tuning/rkisp1.py
index 1cea6ddb..d0ce15d5 100755
--- a/utils/tuning/rkisp1.py
+++ b/utils/tuning/rkisp1.py
@@ -3,7 +3,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# rkisp1.py - Tuning script for rkisp1
+# Tuning script for rkisp1
import sys
@@ -11,6 +11,7 @@ import libtuning as lt
from libtuning.parsers import YamlParser
from libtuning.generators import YamlOutput
from libtuning.modules.lsc import LSCRkISP1
+from libtuning.modules.agc import AGCRkISP1
tuner = lt.Tuner('RkISP1')
tuner.add(LSCRkISP1(
@@ -32,9 +33,10 @@ tuner.add(LSCRkISP1(
# values. This can also be a custom function.
smoothing_function=lt.smoothing.MedianBlur(3),
))
+tuner.add(AGCRkISP1(debug=[lt.Debug.Plot]))
tuner.set_input_parser(YamlParser())
tuner.set_output_formatter(YamlOutput())
-tuner.set_output_order([LSCRkISP1])
+tuner.set_output_order([AGCRkISP1, LSCRkISP1])
if __name__ == '__main__':
sys.exit(tuner.run(sys.argv))
diff --git a/utils/update-kernel-headers.sh b/utils/update-kernel-headers.sh
index 590986d2..8405be0a 100755
--- a/utils/update-kernel-headers.sh
+++ b/utils/update-kernel-headers.sh
@@ -52,6 +52,7 @@ headers="
linux/media-bus-format.h
linux/media.h
linux/rkisp1-config.h
+ linux/udmabuf.h
linux/v4l2-common.h
linux/v4l2-controls.h
linux/v4l2-mediabus.h