diff options
Diffstat (limited to 'utils')
-rwxr-xr-x | utils/abi-compat.sh | 13 | ||||
-rwxr-xr-x | utils/checkstyle.py | 244 | ||||
-rw-r--r-- | utils/codegen/controls.py | 112 | ||||
-rwxr-xr-x | utils/codegen/gen-controls.py | 109 | ||||
-rwxr-xr-x | utils/codegen/gen-formats.py (renamed from utils/gen-formats.py) | 2 | ||||
-rwxr-xr-x | utils/codegen/gen-header.sh (renamed from utils/gen-header.sh) | 9 | ||||
-rwxr-xr-x | utils/codegen/gen-ipa-pub-key.py (renamed from utils/gen-ipa-pub-key.py) | 2 | ||||
-rwxr-xr-x | utils/codegen/gen-tp-header.py (renamed from utils/tracepoints/gen-tp-header.py) | 6 | ||||
-rwxr-xr-x | utils/codegen/ipc/extract-docs.py (renamed from utils/ipc/extract-docs.py) | 4 | ||||
-rwxr-xr-x | utils/codegen/ipc/generate.py (renamed from utils/ipc/generate.py) | 5 | ||||
-rw-r--r-- | utils/codegen/ipc/generators/__init__.py (renamed from utils/ipc/generators/__init__.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl (renamed from utils/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl) | 7 | ||||
-rw-r--r-- | utils/codegen/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl (renamed from utils/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl) | 2 | ||||
-rw-r--r-- | utils/codegen/ipc/generators/libcamera_templates/definition_functions.tmpl (renamed from utils/ipc/generators/libcamera_templates/definition_functions.tmpl) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/generators/libcamera_templates/meson.build (renamed from utils/ipc/generators/libcamera_templates/meson.build) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl (renamed from utils/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl) | 16 | ||||
-rw-r--r-- | utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl (renamed from utils/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl) | 6 | ||||
-rw-r--r-- | utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl (renamed from utils/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl) | 2 | ||||
-rw-r--r-- | utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl (renamed from utils/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl) | 2 | ||||
-rw-r--r-- | utils/codegen/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl (renamed from utils/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl) | 2 | ||||
-rw-r--r-- | utils/codegen/ipc/generators/libcamera_templates/proxy_functions.tmpl (renamed from utils/ipc/generators/libcamera_templates/proxy_functions.tmpl) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/generators/libcamera_templates/serializer.tmpl (renamed from utils/ipc/generators/libcamera_templates/serializer.tmpl) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/generators/meson.build (renamed from utils/ipc/generators/meson.build) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/generators/mojom_libcamera_generator.py (renamed from utils/ipc/generators/mojom_libcamera_generator.py) | 4 | ||||
-rw-r--r-- | utils/codegen/ipc/meson.build (renamed from utils/ipc/meson.build) | 3 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/README (renamed from utils/ipc/mojo/README) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/LICENSE (renamed from utils/ipc/mojo/public/LICENSE) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/.style.yapf (renamed from utils/ipc/mojo/public/tools/.style.yapf) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/BUILD.gn (renamed from utils/ipc/mojo/public/tools/BUILD.gn) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/bindings/BUILD.gn (renamed from utils/ipc/mojo/public/tools/bindings/BUILD.gn) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/bindings/README.md (renamed from utils/ipc/mojo/public/tools/bindings/README.md) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/bindings/checks/__init__.py (renamed from utils/ipc/mojo/public/tools/bindings/checks/__init__.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check.py (renamed from utils/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check_unittest.py (renamed from utils/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_definitions_check.py (renamed from utils/ipc/mojo/public/tools/bindings/checks/mojom_definitions_check.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check.py (renamed from utils/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check_unittest.py (renamed from utils/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_check.py (renamed from utils/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_check.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_checks_unittest.py (renamed from utils/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_checks_unittest.py) | 0 | ||||
-rwxr-xr-x | utils/codegen/ipc/mojo/public/tools/bindings/concatenate-files.py (renamed from utils/ipc/mojo/public/tools/bindings/concatenate-files.py) | 0 | ||||
-rwxr-xr-x | utils/codegen/ipc/mojo/public/tools/bindings/concatenate_and_replace_closure_exports.py (renamed from utils/ipc/mojo/public/tools/bindings/concatenate_and_replace_closure_exports.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/bindings/gen_data_files_list.py (renamed from utils/ipc/mojo/public/tools/bindings/gen_data_files_list.py) | 0 | ||||
-rwxr-xr-x | utils/codegen/ipc/mojo/public/tools/bindings/generate_type_mappings.py (renamed from utils/ipc/mojo/public/tools/bindings/generate_type_mappings.py) | 0 | ||||
-rwxr-xr-x | utils/codegen/ipc/mojo/public/tools/bindings/minify_with_terser.py (renamed from utils/ipc/mojo/public/tools/bindings/minify_with_terser.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/bindings/mojom.gni (renamed from utils/ipc/mojo/public/tools/bindings/mojom.gni) | 0 | ||||
-rwxr-xr-x | utils/codegen/ipc/mojo/public/tools/bindings/mojom_bindings_generator.py (renamed from utils/ipc/mojo/public/tools/bindings/mojom_bindings_generator.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/bindings/mojom_bindings_generator_unittest.py (renamed from utils/ipc/mojo/public/tools/bindings/mojom_bindings_generator_unittest.py) | 0 | ||||
-rwxr-xr-x | utils/codegen/ipc/mojo/public/tools/bindings/validate_typemap_config.py (renamed from utils/ipc/mojo/public/tools/bindings/validate_typemap_config.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/BUILD.gn (renamed from utils/ipc/mojo/public/tools/mojom/BUILD.gn) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/README.md (renamed from utils/ipc/mojo/public/tools/mojom/README.md) | 0 | ||||
-rwxr-xr-x | utils/codegen/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility.py (renamed from utils/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility.py) | 0 | ||||
-rwxr-xr-x | utils/codegen/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/const_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/const_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/enum_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/enum_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/feature_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/feature_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/BUILD.gn (renamed from utils/ipc/mojo/public/tools/mojom/mojom/BUILD.gn) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/__init__.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/__init__.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/error.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/error.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/fileutil.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/fileutil.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/fileutil_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/fileutil_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/__init__.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/__init__.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/check.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/check.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/generator.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/generator.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/generator_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/generator_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/module.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/module.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/module_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/module_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/pack.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/pack.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/pack_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/pack_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/template_expander.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/template_expander.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/translate.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/translate.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/translate_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/translate_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/__init__.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/__init__.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/ast.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/ast.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/ast_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/ast_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/lexer.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/lexer.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/lexer_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/lexer_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/parser.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/parser.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/parser_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/parser_unittest.py) | 0 | ||||
-rwxr-xr-x | utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom_parser.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser_test_case.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom_parser_test_case.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom_parser_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/stable_attribute_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/stable_attribute_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/union_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/union_unittest.py) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/mojo/public/tools/mojom/version_compatibility_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/version_compatibility_unittest.py) | 0 | ||||
-rwxr-xr-x | utils/codegen/ipc/mojo/public/tools/run_all_python_unittests.py (renamed from utils/ipc/mojo/public/tools/run_all_python_unittests.py) | 0 | ||||
-rwxr-xr-x | utils/codegen/ipc/parser.py (renamed from utils/ipc/parser.py) | 5 | ||||
-rw-r--r-- | utils/codegen/ipc/tools/README (renamed from utils/ipc/tools/README) | 0 | ||||
-rw-r--r-- | utils/codegen/ipc/tools/diagnosis/crbug_1001171.py (renamed from utils/ipc/tools/diagnosis/crbug_1001171.py) | 0 | ||||
-rw-r--r-- | utils/codegen/meson.build | 18 | ||||
-rwxr-xr-x | utils/gen-controls.py | 370 | ||||
-rwxr-xr-x | utils/gen-ipa-priv-key.sh | 2 | ||||
-rwxr-xr-x | utils/gen-version.sh | 2 | ||||
-rwxr-xr-x | utils/hooks/pre-push | 11 | ||||
-rwxr-xr-x | utils/ipu3/ipu3-capture.sh | 2 | ||||
-rw-r--r-- | utils/ipu3/ipu3-pack.c | 4 | ||||
-rwxr-xr-x | utils/ipu3/ipu3-process.sh | 2 | ||||
-rw-r--r-- | utils/ipu3/ipu3-unpack.c | 3 | ||||
-rw-r--r-- | utils/meson.build | 10 | ||||
-rwxr-xr-x | utils/raspberrypi/ctt/alsc_only.py | 20 | ||||
-rw-r--r-- | utils/raspberrypi/ctt/cac_only.py | 142 | ||||
-rw-r--r-- | utils/raspberrypi/ctt/colors.py | 2 | ||||
-rwxr-xr-x | utils/raspberrypi/ctt/convert_tuning.py | 98 | ||||
-rwxr-xr-x | utils/raspberrypi/ctt/ctt.py | 257 | ||||
-rw-r--r-- | utils/raspberrypi/ctt/ctt_alsc.py | 83 | ||||
-rw-r--r-- | utils/raspberrypi/ctt/ctt_awb.py | 13 | ||||
-rw-r--r-- | utils/raspberrypi/ctt/ctt_cac.py | 228 | ||||
-rw-r--r-- | utils/raspberrypi/ctt/ctt_ccm.py | 8 | ||||
-rw-r--r-- | utils/raspberrypi/ctt/ctt_config_example.json | 5 | ||||
-rw-r--r-- | utils/raspberrypi/ctt/ctt_dots_locator.py | 118 | ||||
-rw-r--r-- | utils/raspberrypi/ctt/ctt_geq.py | 2 | ||||
-rw-r--r-- | utils/raspberrypi/ctt/ctt_image_load.py | 3 | ||||
-rw-r--r-- | utils/raspberrypi/ctt/ctt_lux.py | 2 | ||||
-rw-r--r-- | utils/raspberrypi/ctt/ctt_macbeth_locator.py | 2 | ||||
-rw-r--r-- | utils/raspberrypi/ctt/ctt_noise.py | 2 | ||||
-rwxr-xr-x | utils/raspberrypi/ctt/ctt_pisp.py | 805 | ||||
-rwxr-xr-x | utils/raspberrypi/ctt/ctt_pretty_print_json.py | 22 | ||||
-rw-r--r-- | utils/raspberrypi/ctt/ctt_ransac.py | 2 | ||||
-rw-r--r-- | utils/raspberrypi/ctt/ctt_tools.py | 5 | ||||
-rwxr-xr-x | utils/raspberrypi/ctt/ctt_vc4.py | 126 | ||||
-rwxr-xr-x | utils/rkisp1/rkisp1-capture.sh | 3 | ||||
-rwxr-xr-x | utils/tracepoints/analyze-ipa-trace.py | 2 | ||||
-rw-r--r-- | utils/tracepoints/meson.build | 5 | ||||
-rw-r--r-- | utils/tuning/README.rst | 23 | ||||
-rw-r--r-- | utils/tuning/config-example.yaml | 12 | ||||
-rw-r--r-- | utils/tuning/libtuning/average.py | 2 | ||||
-rw-r--r-- | utils/tuning/libtuning/ctt_awb.py | 378 | ||||
-rw-r--r-- | utils/tuning/libtuning/ctt_ccm.py | 408 | ||||
-rw-r--r-- | utils/tuning/libtuning/ctt_colors.py | 30 | ||||
-rw-r--r-- | utils/tuning/libtuning/ctt_ransac.py | 71 | ||||
-rw-r--r-- | utils/tuning/libtuning/generators/generator.py | 2 | ||||
-rw-r--r-- | utils/tuning/libtuning/generators/raspberrypi_output.py | 2 | ||||
-rw-r--r-- | utils/tuning/libtuning/generators/yaml_output.py | 10 | ||||
-rw-r--r-- | utils/tuning/libtuning/gradient.py | 2 | ||||
-rw-r--r-- | utils/tuning/libtuning/image.py | 12 | ||||
-rw-r--r-- | utils/tuning/libtuning/libtuning.py | 30 | ||||
-rw-r--r-- | utils/tuning/libtuning/macbeth.py | 67 | ||||
-rw-r--r-- | utils/tuning/libtuning/macbeth_ref.pgm | 2 | ||||
-rw-r--r-- | utils/tuning/libtuning/modules/agc/__init__.py | 6 | ||||
-rw-r--r-- | utils/tuning/libtuning/modules/agc/agc.py | 21 | ||||
-rw-r--r-- | utils/tuning/libtuning/modules/agc/rkisp1.py | 79 | ||||
-rw-r--r-- | utils/tuning/libtuning/modules/ccm/__init__.py | 6 | ||||
-rw-r--r-- | utils/tuning/libtuning/modules/ccm/ccm.py | 41 | ||||
-rw-r--r-- | utils/tuning/libtuning/modules/ccm/rkisp1.py | 28 | ||||
-rw-r--r-- | utils/tuning/libtuning/modules/lsc/lsc.py | 5 | ||||
-rw-r--r-- | utils/tuning/libtuning/modules/lsc/raspberrypi.py | 14 | ||||
-rw-r--r-- | utils/tuning/libtuning/modules/lsc/rkisp1.py | 22 | ||||
-rw-r--r-- | utils/tuning/libtuning/modules/module.py | 2 | ||||
-rw-r--r-- | utils/tuning/libtuning/modules/static.py | 24 | ||||
-rw-r--r-- | utils/tuning/libtuning/parsers/parser.py | 2 | ||||
-rw-r--r-- | utils/tuning/libtuning/parsers/raspberrypi_parser.py | 2 | ||||
-rw-r--r-- | utils/tuning/libtuning/parsers/yaml_parser.py | 11 | ||||
-rw-r--r-- | utils/tuning/libtuning/smoothing.py | 2 | ||||
-rw-r--r-- | utils/tuning/libtuning/utils.py | 99 | ||||
-rw-r--r-- | utils/tuning/raspberrypi/alsc.py | 2 | ||||
-rwxr-xr-x | utils/tuning/raspberrypi_alsc_only.py | 2 | ||||
-rw-r--r-- | utils/tuning/requirements.txt | 9 | ||||
-rwxr-xr-x | utils/tuning/rkisp1.py | 57 | ||||
-rwxr-xr-x | utils/update-kernel-headers.sh | 3 |
160 files changed, 3550 insertions, 872 deletions
diff --git a/utils/abi-compat.sh b/utils/abi-compat.sh index c936ac05..31f61e32 100755 --- a/utils/abi-compat.sh +++ b/utils/abi-compat.sh @@ -156,15 +156,16 @@ create_abi_dump() { # Generate a minimal libcamera build. "lib" and "prefix" are # defined explicitly to avoid system default ambiguities. meson setup "$build" "$worktree" \ - -Dlibdir=lib \ - -Dprefix=/usr/local/ \ - -Ddocumentation=disabled \ -Dcam=disabled \ - -Dqcam=disabled \ + -Ddocumentation=disabled \ -Dgstreamer=disabled \ -Dlc-compliance=disabled \ - -Dtracing=disabled \ - -Dpipelines= + -Dlibdir=lib \ + -Dpipelines= \ + -Dprefix=/usr/local/ \ + -Dpycamera=disabled \ + -Dqcam=disabled \ + -Dtracing=disabled ninja -C "$build" DESTDIR="$install" ninja -C "$build" install diff --git a/utils/checkstyle.py b/utils/checkstyle.py index 84f44a42..ab89c0a1 100755 --- a/utils/checkstyle.py +++ b/utils/checkstyle.py @@ -4,7 +4,7 @@ # # Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> # -# checkstyle.py - A patch style checker script based on clang-format +# A patch style checker script based on clang-format # # TODO: # @@ -211,36 +211,66 @@ class CommitFile: class Commit: def __init__(self, commit): - self.commit = commit + self._commit = commit + self._author = None self._trailers = [] self._parse() - def _parse_trailers(self, lines): - for index in range(1, len(lines)): - line = lines[index] - if not line: - break + def _parse_commit(self): + # Get and parse the commit message. + ret = subprocess.run(['git', 'show', '--format=%H%n%an <%ae>%n%s%n%b', + '--no-patch', self.commit], + stdout=subprocess.PIPE).stdout.decode('utf-8') + lines = ret.splitlines() - self._trailers.append(line) + self._commit = lines[0] + self._author = lines[1] + self._title = lines[2] + self._body = lines[3:] - return index + # Parse the trailers. Feed git-interpret-trailers with a full commit + # message that includes both the title and the body, as it otherwise + # fails to find trailers when the body contains trailers only. + message = self._title + '\n\n' + '\n'.join(self._body) + trailers = subprocess.run(['git', 'interpret-trailers', '--parse'], + input=message.encode('utf-8'), + stdout=subprocess.PIPE).stdout.decode('utf-8') + + self._trailers = trailers.splitlines() def _parse(self): - # Get the commit title and list of files. - ret = subprocess.run(['git', 'show', '--format=%s%n%(trailers:only,unfold)', '--name-status', + self._parse_commit() + + # Get the list of files. Use an empty format specifier to suppress the + # commit message completely. + ret = subprocess.run(['git', 'show', '--format=', '--name-status', self.commit], stdout=subprocess.PIPE).stdout.decode('utf-8') - lines = ret.splitlines() - - self._title = lines[0] + self._files = [CommitFile(f) for f in ret.splitlines()] - index = self._parse_trailers(lines) - self._files = [CommitFile(f) for f in lines[index:] if f] + def __repr__(self): + return '\n'.join([ + f'commit {self.commit}', + f'Author: {self.author}', + f'', + f' {self.title}', + '', + '\n'.join([line and f' {line}' or '' for line in self._body]), + 'Trailers:', + ] + self.trailers) def files(self, filter='AMR'): return [f.filename for f in self._files if f.status in filter] @property + def author(self): + return self._author + + @property + def commit(self): + return self._commit + + @property def title(self): return self._title @@ -278,20 +308,14 @@ class StagedChanges(Commit): class Amendment(Commit): def __init__(self): - Commit.__init__(self, '') + Commit.__init__(self, 'HEAD') def _parse(self): - # Create a title using HEAD commit and parse the trailers. - ret = subprocess.run(['git', 'show', '--format=%H %s%n%(trailers:only,unfold)', - '--no-patch'], - stdout=subprocess.PIPE).stdout.decode('utf-8') - lines = ret.splitlines() - - self._title = 'Amendment of ' + lines[0].strip() + self._parse_commit() - self._parse_trailers(lines) + self._title = f'Amendment of "{self.title}"' - # Extract the list of modified files + # Extract the list of modified files. ret = subprocess.run(['git', 'diff', '--staged', '--name-status', 'HEAD~'], stdout=subprocess.PIPE).stdout.decode('utf-8') self._files = [CommitFile(f) for f in ret.splitlines()] @@ -331,11 +355,16 @@ class CommitChecker(metaclass=ClassRegistry): # Class methods # @classmethod - def checkers(cls, names): + def checkers(cls, commit, names): for checker in cls.subclasses: if names and checker.__name__ not in names: continue - yield checker + if checker.supports(commit): + yield checker + + @classmethod + def supports(cls, commit): + return type(commit) in cls.commit_types class CommitIssue(object): @@ -344,6 +373,8 @@ class CommitIssue(object): class HeaderAddChecker(CommitChecker): + commit_types = (Commit, StagedChanges, Amendment) + @classmethod def check(cls, commit, top_level): issues = [] @@ -388,6 +419,8 @@ class HeaderAddChecker(CommitChecker): class TitleChecker(CommitChecker): + commit_types = (Commit,) + prefix_regex = re.compile(r'^([a-zA-Z0-9_.-]+: )+') release_regex = re.compile(r'libcamera v[0-9]+\.[0-9]+\.[0-9]+') @@ -395,11 +428,6 @@ class TitleChecker(CommitChecker): def check(cls, commit, top_level): title = commit.title - # Skip the check when validating staged changes (as done through a - # pre-commit hook) as there is no title to check in that case. - if isinstance(commit, StagedChanges): - return [] - # Ignore release commits, they don't need a prefix. if TitleChecker.release_regex.fullmatch(title): return [] @@ -455,6 +483,8 @@ class TitleChecker(CommitChecker): class TrailersChecker(CommitChecker): + commit_types = (Commit,) + commit_regex = re.compile(r'[0-9a-f]{12}[0-9a-f]* \(".*"\)') coverity_regex = re.compile(r'Coverity CID=.*') @@ -493,6 +523,8 @@ class TrailersChecker(CommitChecker): def check(cls, commit, top_level): issues = [] + sob_found = False + for trailer in commit.trailers: match = TrailersChecker.trailer_regex.fullmatch(trailer) if not match: @@ -515,6 +547,13 @@ class TrailersChecker(CommitChecker): issues.append(CommitIssue(f"Malformed value '{value}' for commit trailer '{key}'")) continue + if key == 'Signed-off-by': + if value == commit.author: + sob_found = True + + if not sob_found: + issues.append(CommitIssue(f"No 'Signed-off-by' trailer matching author '{commit.author}', see Documentation/contributing.rst")) + return issues @@ -556,20 +595,49 @@ class StyleChecker(metaclass=ClassRegistry): class StyleIssue(object): - def __init__(self, line_number, line, msg): + def __init__(self, line_number, position, line, msg): self.line_number = line_number + self.position = position self.line = line self.msg = msg +class HexValueChecker(StyleChecker): + patterns = ('*.c', '*.cpp', '*.h') + + regex = re.compile(r'\b0[xX][0-9a-fA-F]+\b') + + def __init__(self, content): + super().__init__() + self.__content = content + + def check(self, line_numbers): + issues = [] + + for line_number in line_numbers: + line = self.__content[line_number - 1] + match = HexValueChecker.regex.search(line) + if not match: + continue + + value = match.group(0) + if value == value.lower(): + continue + + issues.append(StyleIssue(line_number, match.span(0), line, + f'Use lowercase hex constant {value.lower()}')) + + return issues + + class IncludeChecker(StyleChecker): patterns = ('*.cpp', '*.h') - headers = ('assert', 'ctype', 'errno', 'fenv', 'float', 'inttypes', - 'limits', 'locale', 'setjmp', 'signal', 'stdarg', 'stddef', - 'stdint', 'stdio', 'stdlib', 'string', 'time', 'uchar', 'wchar', - 'wctype') - include_regex = re.compile('^#include <c([a-z]*)>') + headers = ('cassert', 'cctype', 'cerrno', 'cfenv', 'cfloat', 'cinttypes', + 'climits', 'clocale', 'csetjmp', 'csignal', 'cstdarg', 'cstddef', + 'cstdint', 'cstdio', 'cstdlib', 'cstring', 'ctime', 'cuchar', + 'cwchar', 'cwctype', 'math.h') + include_regex = re.compile(r'^#include <([a-z.]*)>') def __init__(self, content): super().__init__() @@ -588,8 +656,15 @@ class IncludeChecker(StyleChecker): if header not in IncludeChecker.headers: continue - issues.append(StyleIssue(line_number, line, - 'C compatibility header <%s.h> is preferred' % header)) + if header.endswith('.h'): + header_type = 'C++' + header = 'c' + header[:-2] + else: + header_type = 'C compatibility' + header = header[1:] + '.h' + + issues.append(StyleIssue(line_number, match.span(1), line, + f'{header_type} header <{header}> is preferred')) return issues @@ -606,10 +681,12 @@ class LogCategoryChecker(StyleChecker): issues = [] for line_number in line_numbers: line = self.__content[line_number-1] - if not LogCategoryChecker.log_regex.search(line): + match = LogCategoryChecker.log_regex.search(line) + if not match: continue - issues.append(StyleIssue(line_number, line, 'LOG() should use categories')) + issues.append(StyleIssue(line_number, match.span(1), line, + 'LOG() should use categories')) return issues @@ -625,47 +702,16 @@ class MesonChecker(StyleChecker): issues = [] for line_number in line_numbers: line = self.__content[line_number-1] - if line.find('\t') != -1: - issues.append(StyleIssue(line_number, line, 'meson.build should use spaces for indentation')) - return issues - - -class Pep8Checker(StyleChecker): - patterns = ('*.py',) - results_regex = re.compile('stdin:([0-9]+):([0-9]+)(.*)') - - def __init__(self, content): - super().__init__() - self.__content = content - - def check(self, line_numbers): - issues = [] - data = ''.join(self.__content).encode('utf-8') - - try: - ret = subprocess.run(['pycodestyle', '--ignore=E501', '-'], - input=data, stdout=subprocess.PIPE) - except FileNotFoundError: - issues.append(StyleIssue(0, None, 'Please install pycodestyle to validate python additions')) - return issues - - results = ret.stdout.decode('utf-8').splitlines() - for item in results: - search = re.search(Pep8Checker.results_regex, item) - line_number = int(search.group(1)) - position = int(search.group(2)) - msg = search.group(3) - - if line_number in line_numbers: - line = self.__content[line_number - 1] - issues.append(StyleIssue(line_number, line, msg)) - + pos = line.find('\t') + if pos != -1: + issues.append(StyleIssue(line_number, [pos, pos], line, + 'meson.build should use spaces for indentation')) return issues class ShellChecker(StyleChecker): patterns = ('*.sh',) - results_line_regex = re.compile('In - line ([0-9]+):') + results_line_regex = re.compile(r'In - line ([0-9]+):') def __init__(self, content): super().__init__() @@ -679,7 +725,7 @@ class ShellChecker(StyleChecker): ret = subprocess.run(['shellcheck', '-Cnever', '-'], input=data, stdout=subprocess.PIPE) except FileNotFoundError: - issues.append(StyleIssue(0, None, 'Please install shellcheck to validate shell script additions')) + issues.append(StyleIssue(0, None, None, 'Please install shellcheck to validate shell script additions')) return issues results = ret.stdout.decode('utf-8').splitlines() @@ -692,11 +738,8 @@ class ShellChecker(StyleChecker): line = results[nr + 1] msg = results[nr + 2] - # Determined, but not yet used - position = msg.find('^') + 1 - if line_number in line_numbers: - issues.append(StyleIssue(line_number, line, msg)) + issues.append(StyleIssue(line_number, None, line, msg)) return issues @@ -753,7 +796,8 @@ class CLangFormatter(Formatter): class DoxygenFormatter(Formatter): patterns = ('*.c', '*.cpp') - return_regex = re.compile(' +\\* +\\\\return +[a-z]') + oneliner_regex = re.compile(r'^ +\* +\\(brief|param|return)\b.*\.$') + return_regex = re.compile(r' +\* +\\return +[a-z]') @classmethod def format(cls, filename, data): @@ -768,6 +812,7 @@ class DoxygenFormatter(Formatter): lines.append(line) continue + line = cls.oneliner_regex.sub(lambda m: m.group(0)[:-1], line) line = cls.return_regex.sub(lambda m: m.group(0)[:-1] + m.group(0)[-1].upper(), line) if line.find('*/') != -1: @@ -813,7 +858,7 @@ class DPointerFormatter(Formatter): class IncludeOrderFormatter(Formatter): patterns = ('*.cpp', '*.h') - include_regex = re.compile('^#include (["<])([^">]*)([">])') + include_regex = re.compile(r'^#include (["<])([^">]*)([">])') @classmethod def format(cls, filename, data): @@ -865,6 +910,21 @@ class IncludeOrderFormatter(Formatter): return '\n'.join(lines) +class Pep8Formatter(Formatter): + patterns = ('*.py',) + + @classmethod + def format(cls, filename, data): + try: + ret = subprocess.run(['autopep8', '--ignore=E501', '-'], + input=data.encode('utf-8'), stdout=subprocess.PIPE) + except FileNotFoundError: + issues.append(StyleIssue(0, None, None, 'Please install autopep8 to format python additions')) + return issues + + return ret.stdout.decode('utf-8') + + class StripTrailingSpaceFormatter(Formatter): patterns = ('*.c', '*.cpp', '*.h', '*.py', 'meson.build') @@ -936,6 +996,16 @@ def check_file(top_level, commit, filename, checkers): print('%s+%s%s' % (Colours.fg(Colours.Yellow), issue.line.rstrip(), Colours.reset())) + if issue.position is not None: + # Align the position marker by using the original line with + # all characters except for tabs replaced with spaces. This + # ensures proper alignment regardless of how the code is + # indented. + start = issue.position[0] + prefix = ''.join([c if c == '\t' else ' ' for c in issue.line[:start]]) + length = issue.position[1] - start - 1 + print(' ' + prefix + '^' + '~' * length) + return len(formatted_diff) + len(issues) @@ -949,7 +1019,7 @@ def check_style(top_level, commit, checkers): issues = 0 # Apply the commit checkers first. - for checker in CommitChecker.checkers(checkers): + for checker in CommitChecker.checkers(commit, checkers): for issue in checker.check(commit, top_level): print('%s%s%s' % (Colours.fg(Colours.Yellow), issue.msg, Colours.reset())) issues += 1 diff --git a/utils/codegen/controls.py b/utils/codegen/controls.py new file mode 100644 index 00000000..7bafee59 --- /dev/null +++ b/utils/codegen/controls.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0-or-later +# Copyright (C) 2019, Google Inc. +# +# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> +# +# Helper classes to handle source code generation for libcamera controls + + +class ControlEnum(object): + def __init__(self, data): + self.__data = data + + @property + def description(self): + """The enum description""" + return self.__data.get('description') + + @property + def name(self): + """The enum name""" + return self.__data.get('name') + + @property + def value(self): + """The enum value""" + return self.__data.get('value') + + +class Control(object): + def __init__(self, name, data, vendor): + self.__name = name + self.__data = data + self.__enum_values = None + self.__size = None + self.__vendor = vendor + + enum_values = data.get('enum') + if enum_values is not None: + self.__enum_values = [ControlEnum(enum) for enum in enum_values] + + size = self.__data.get('size') + if size is not None: + if len(size) == 0: + raise RuntimeError(f'Control `{self.__name}` size must have at least one dimension') + + # Compute the total number of elements in the array. If any of the + # array dimension is a string, the array is variable-sized. + num_elems = 1 + for dim in size: + if type(dim) is str: + num_elems = 0 + break + + dim = int(dim) + if dim <= 0: + raise RuntimeError(f'Control `{self.__name}` size must have positive values only') + + num_elems *= dim + + self.__size = num_elems + + @property + def description(self): + """The control description""" + return self.__data.get('description') + + @property + def enum_values(self): + """The enum values, if the control is an enumeration""" + if self.__enum_values is None: + return + for enum in self.__enum_values: + yield enum + + @property + def enum_values_count(self): + """The number of enum values, if the control is an enumeration""" + if self.__enum_values is None: + return 0 + return len(self.__enum_values) + + @property + def is_enum(self): + """Is the control an enumeration""" + return self.__enum_values is not None + + @property + def vendor(self): + """The vendor string, or None""" + return self.__vendor + + @property + def name(self): + """The control name (CamelCase)""" + return self.__name + + @property + def type(self): + typ = self.__data.get('type') + size = self.__data.get('size') + + if typ == 'string': + return 'std::string' + + if self.__size is None: + return typ + + if self.__size: + return f"Span<const {typ}, {self.__size}>" + else: + return f"Span<const {typ}>" diff --git a/utils/codegen/gen-controls.py b/utils/codegen/gen-controls.py new file mode 100755 index 00000000..3034e9a5 --- /dev/null +++ b/utils/codegen/gen-controls.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0-or-later +# Copyright (C) 2019, Google Inc. +# +# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> +# +# Generate control definitions from YAML + +import argparse +import jinja2 +import os +import sys +import yaml + +from controls import Control + + +def snake_case(s): + return ''.join([c.isupper() and ('_' + c) or c for c in s]).strip('_') + + +def format_description(description): + description = description.strip('\n').split('\n') + for i in range(1, len(description)): + line = description[i] + description[i] = (line and ' * ' or ' *') + line + return '\n'.join(description) + + +def extend_control(ctrl, id, ranges): + ctrl.id = ranges[ctrl.vendor] + id + 1 + + if ctrl.vendor != 'libcamera': + ctrl.namespace = f'{ctrl.vendor}::' + else: + ctrl.namespace = '' + + return ctrl + + +def main(argv): + + # Parse command line arguments + parser = argparse.ArgumentParser() + parser.add_argument('--mode', '-m', type=str, required=True, choices=['controls', 'properties'], + help='Mode of operation') + parser.add_argument('--output', '-o', metavar='file', type=str, + help='Output file name. Defaults to standard output if not specified.') + parser.add_argument('--ranges', '-r', type=str, required=True, + help='Control id range reservation file.') + parser.add_argument('--template', '-t', dest='template', type=str, required=True, + help='Template file name.') + parser.add_argument('input', type=str, nargs='+', + help='Input file name.') + + args = parser.parse_args(argv[1:]) + + ranges = {} + with open(args.ranges, 'rb') as f: + data = open(args.ranges, 'rb').read() + ranges = yaml.safe_load(data)['ranges'] + + controls = {} + for input in args.input: + data = yaml.safe_load(open(input, 'rb').read()) + + vendor = data['vendor'] + if vendor not in ranges.keys(): + raise RuntimeError(f'Control id range is not defined for vendor {vendor}') + + ctrls = controls.setdefault(vendor, []) + + for i, ctrl in enumerate(data['controls']): + ctrl = Control(*ctrl.popitem(), vendor) + ctrls.append(extend_control(ctrl, i, ranges)) + + # Sort the vendors by range numerical value + controls = [[vendor, ctrls] for vendor, ctrls in controls.items()] + controls.sort(key=lambda item: ranges[item[0]]) + + filename = { + 'controls': 'control_ids', + 'properties': 'property_ids', + }[args.mode] + + data = { + 'filename': filename, + 'mode': args.mode, + 'controls': controls, + } + + env = jinja2.Environment() + env.filters['format_description'] = format_description + env.filters['snake_case'] = snake_case + template = env.from_string(open(args.template, 'r', encoding='utf-8').read()) + string = template.render(data) + + if args.output: + output = open(args.output, 'w', encoding='utf-8') + output.write(string) + output.close() + else: + sys.stdout.write(string) + + return 0 + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/utils/gen-formats.py b/utils/codegen/gen-formats.py index da79a8bb..0c0932a5 100755 --- a/utils/gen-formats.py +++ b/utils/codegen/gen-formats.py @@ -4,7 +4,7 @@ # # Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> # -# gen-formats.py - Generate formats definitions from YAML +# Generate formats definitions from YAML import argparse import re diff --git a/utils/gen-header.sh b/utils/codegen/gen-header.sh index 8b66c5dd..c78f0859 100755 --- a/utils/gen-header.sh +++ b/utils/codegen/gen-header.sh @@ -1,7 +1,7 @@ #!/bin/sh -src_dir="$1" -dst_file="$2" +dst_file="$1" +shift cat <<EOF > "$dst_file" /* SPDX-License-Identifier: LGPL-2.1-or-later */ @@ -9,16 +9,15 @@ cat <<EOF > "$dst_file" /* * Copyright (C) 2018-2019, Google Inc. * - * libcamera.h - libcamera public API + * libcamera public API */ #pragma once EOF -headers=$(for header in "$src_dir"/*.h "$src_dir"/*.h.in ; do +headers=$(for header in "$@" ; do header=$(basename "$header") - header="${header%.in}" echo "$header" done | sort) diff --git a/utils/gen-ipa-pub-key.py b/utils/codegen/gen-ipa-pub-key.py index a4a1f7b7..dc3e7d5f 100755 --- a/utils/gen-ipa-pub-key.py +++ b/utils/codegen/gen-ipa-pub-key.py @@ -4,7 +4,7 @@ # # Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> # -# ipa-gen-key.py - Generate the IPA module signing public key +# Generate the IPA module signing public key import string import subprocess diff --git a/utils/tracepoints/gen-tp-header.py b/utils/codegen/gen-tp-header.py index a454615e..6769c7ce 100755 --- a/utils/tracepoints/gen-tp-header.py +++ b/utils/codegen/gen-tp-header.py @@ -4,9 +4,8 @@ # # Author: Paul Elder <paul.elder@ideasonboard.com> # -# gen-tp-header.py - Generate header file to contain lttng tracepoints +# Generate header file to contain lttng tracepoints -import datetime import jinja2 import pathlib import os @@ -20,7 +19,6 @@ def main(argv): output = argv[2] template = argv[3] - year = datetime.datetime.now().year path = pathlib.Path(output).absolute().relative_to(argv[1]) source = '' @@ -28,7 +26,7 @@ def main(argv): source += open(fname, 'r', encoding='utf-8').read() + '\n\n' template = jinja2.Template(open(template, 'r', encoding='utf-8').read()) - string = template.render(year=year, path=path, source=source) + string = template.render(path=path, source=source) f = open(output, 'w', encoding='utf-8').write(string) diff --git a/utils/ipc/extract-docs.py b/utils/codegen/ipc/extract-docs.py index c2050c99..61f44cae 100755 --- a/utils/ipc/extract-docs.py +++ b/utils/codegen/ipc/extract-docs.py @@ -4,7 +4,7 @@ # # Author: Paul Elder <paul.elder@ideasonboard.com> # -# extract-docs.py - Extract doxygen documentation from mojom files +# Extract doxygen documentation from mojom files import argparse import re @@ -38,7 +38,7 @@ def main(argv): /* * Copyright (C) 2021, Google Inc. * - * {pipeline}_ipa_interface.cpp - Docs file for generated {pipeline}.mojom + * Docs file for generated {pipeline}.mojom * * This file is auto-generated. Do not edit. */ diff --git a/utils/ipc/generate.py b/utils/codegen/ipc/generate.py index 71bdee3b..dfbe659b 100755 --- a/utils/ipc/generate.py +++ b/utils/codegen/ipc/generate.py @@ -4,14 +4,11 @@ # # Author: Paul Elder <paul.elder@ideasonboard.com> # -# generate.py - Run mojo code generator for generating libcamera IPC files +# Run mojo code generator for generating libcamera IPC files import os import sys -# TODO set sys.pycache_prefix for >= python3.8 -sys.dont_write_bytecode = True - sys.path.insert(0, f'{os.path.dirname(__file__)}/mojo/public/tools/bindings') import mojo.public.tools.bindings.mojom_bindings_generator as generator diff --git a/utils/ipc/generators/__init__.py b/utils/codegen/ipc/generators/__init__.py index e69de29b..e69de29b 100644 --- a/utils/ipc/generators/__init__.py +++ b/utils/codegen/ipc/generators/__init__.py diff --git a/utils/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl b/utils/codegen/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl index c60b99b8..3942e570 100644 --- a/utils/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl +++ b/utils/codegen/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl @@ -7,7 +7,7 @@ /* * Copyright (C) 2020, Google Inc. * - * core_ipa_interface.h - libcamera core definitions for Image Processing Algorithms + * libcamera core definitions for Image Processing Algorithms * * This file is auto-generated. Do not edit. */ @@ -15,8 +15,13 @@ #pragma once {% if has_map %}#include <map>{% endif %} +{% if has_string %}#include <string>{% endif %} {% if has_array %}#include <vector>{% endif %} +#include <libcamera/controls.h> +#include <libcamera/framebuffer.h> +#include <libcamera/geometry.h> + #include <libcamera/ipa/ipa_interface.h> namespace libcamera { diff --git a/utils/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl b/utils/codegen/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl index 5738a1aa..036518f6 100644 --- a/utils/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl +++ b/utils/codegen/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl @@ -8,7 +8,7 @@ /* * Copyright (C) 2020, Google Inc. * - * core_ipa_serializer.h - Data serializer for core libcamera definitions for IPA + * Data serializer for core libcamera definitions for IPA * * This file is auto-generated. Do not edit. */ diff --git a/utils/ipc/generators/libcamera_templates/definition_functions.tmpl b/utils/codegen/ipc/generators/libcamera_templates/definition_functions.tmpl index 8b8509f3..8b8509f3 100644 --- a/utils/ipc/generators/libcamera_templates/definition_functions.tmpl +++ b/utils/codegen/ipc/generators/libcamera_templates/definition_functions.tmpl diff --git a/utils/ipc/generators/libcamera_templates/meson.build b/utils/codegen/ipc/generators/libcamera_templates/meson.build index 70664eab..70664eab 100644 --- a/utils/ipc/generators/libcamera_templates/meson.build +++ b/utils/codegen/ipc/generators/libcamera_templates/meson.build diff --git a/utils/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl index 160601f7..5d70ea6a 100644 --- a/utils/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl +++ b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl @@ -7,19 +7,27 @@ /* * Copyright (C) 2020, Google Inc. * - * {{module_name}}_ipa_interface.h - Image Processing Algorithm interface for {{module_name}} + * Image Processing Algorithm interface for {{module_name}} * * This file is auto-generated. Do not edit. */ #pragma once -#include <libcamera/ipa/core_ipa_interface.h> -#include <libcamera/ipa/ipa_interface.h> - {% if has_map %}#include <map>{% endif %} +{% if has_string %}#include <string>{% endif %} {% if has_array %}#include <vector>{% endif %} +#include <libcamera/base/flags.h> +#include <libcamera/base/signal.h> + +#include <libcamera/controls.h> +#include <libcamera/framebuffer.h> +#include <libcamera/geometry.h> + +#include <libcamera/ipa/core_ipa_interface.h> +#include <libcamera/ipa/ipa_interface.h> + namespace libcamera { {%- if has_namespace %} {% for ns in namespace %} diff --git a/utils/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl index 238cf4a5..ce3cc5ab 100644 --- a/utils/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl +++ b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl @@ -8,7 +8,7 @@ /* * Copyright (C) 2020, Google Inc. * - * {{module_name}}_ipa_proxy.cpp - Image Processing Algorithm proxy for {{module_name}} + * Image Processing Algorithm proxy for {{module_name}} * * This file is auto-generated. Do not edit. */ @@ -175,9 +175,9 @@ void {{proxy_name}}::recvMessage(const IPCMessage &data) ); {% elif method|is_async %} ASSERT(state_ == ProxyRunning); - proxy_.invokeMethod(&ThreadProxy::{{method.mojom_name}}, ConnectionTypeQueued, + proxy_.invokeMethod(&ThreadProxy::{{method.mojom_name}}, ConnectionTypeQueued {%- for param in method|method_param_names -%} - {{param}}{{- ", " if not loop.last}} + , {{param}} {%- endfor -%} ); {%- endif %} diff --git a/utils/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl index 6e823598..e213b18a 100644 --- a/utils/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl +++ b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl @@ -8,7 +8,7 @@ /* * Copyright (C) 2020, Google Inc. * - * {{module_name}}_ipa_proxy.h - Image Processing Algorithm proxy for {{module_name}} + * Image Processing Algorithm proxy for {{module_name}} * * This file is auto-generated. Do not edit. */ diff --git a/utils/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl index b65dc4cf..1f990d3f 100644 --- a/utils/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl +++ b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl @@ -8,7 +8,7 @@ /* * Copyright (C) 2020, Google Inc. * - * {{module_name}}_ipa_proxy_worker.cpp - Image Processing Algorithm proxy worker for {{module_name}} + * Image Processing Algorithm proxy worker for {{module_name}} * * This file is auto-generated. Do not edit. */ diff --git a/utils/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl index 8b709705..cd5a65a9 100644 --- a/utils/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl +++ b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl @@ -8,7 +8,7 @@ /* * Copyright (C) 2020, Google Inc. * - * {{module_name}}_ipa_serializer.h - Image Processing Algorithm data serializer for {{module_name}} + * Image Processing Algorithm data serializer for {{module_name}} * * This file is auto-generated. Do not edit. */ diff --git a/utils/ipc/generators/libcamera_templates/proxy_functions.tmpl b/utils/codegen/ipc/generators/libcamera_templates/proxy_functions.tmpl index b5797b14..b5797b14 100644 --- a/utils/ipc/generators/libcamera_templates/proxy_functions.tmpl +++ b/utils/codegen/ipc/generators/libcamera_templates/proxy_functions.tmpl diff --git a/utils/ipc/generators/libcamera_templates/serializer.tmpl b/utils/codegen/ipc/generators/libcamera_templates/serializer.tmpl index 323e1293..323e1293 100644 --- a/utils/ipc/generators/libcamera_templates/serializer.tmpl +++ b/utils/codegen/ipc/generators/libcamera_templates/serializer.tmpl diff --git a/utils/ipc/generators/meson.build b/utils/codegen/ipc/generators/meson.build index 504f1a46..504f1a46 100644 --- a/utils/ipc/generators/meson.build +++ b/utils/codegen/ipc/generators/meson.build diff --git a/utils/ipc/generators/mojom_libcamera_generator.py b/utils/codegen/ipc/generators/mojom_libcamera_generator.py index 99d905de..d9c620a0 100644 --- a/utils/ipc/generators/mojom_libcamera_generator.py +++ b/utils/codegen/ipc/generators/mojom_libcamera_generator.py @@ -4,7 +4,7 @@ # # Author: Paul Elder <paul.elder@ideasonboard.com> # -# mojom_libcamera_generator.py - Generates libcamera files from a mojom.Module. +# Generates libcamera files from a mojom.Module. import argparse import datetime @@ -467,6 +467,7 @@ class Generator(generator.Generator): 'enums': self.module.enums, 'has_array': len([x for x in self.module.kinds.keys() if x[0] == 'a']) > 0, 'has_map': len([x for x in self.module.kinds.keys() if x[0] == 'm']) > 0, + 'has_string': len([x for x in self.module.kinds.keys() if x[0] == 's']) > 0, 'has_namespace': self.module.mojom_namespace != '', 'interface_event': GetEventInterface(self.module.interfaces), 'interface_main': GetMainInterface(self.module.interfaces), @@ -486,6 +487,7 @@ class Generator(generator.Generator): 'enums_gen_header': [x for x in self.module.enums if x.attributes is None or 'skipHeader' not in x.attributes], 'has_array': len([x for x in self.module.kinds.keys() if x[0] == 'a']) > 0, 'has_map': len([x for x in self.module.kinds.keys() if x[0] == 'm']) > 0, + 'has_string': len([x for x in self.module.kinds.keys() if x[0] == 's']) > 0, 'structs_gen_header': [x for x in self.module.structs if x.attributes is None or 'skipHeader' not in x.attributes], 'structs_gen_serializer': [x for x in self.module.structs if x.attributes is None or 'skipSerdes' not in x.attributes], } diff --git a/utils/ipc/meson.build b/utils/codegen/ipc/meson.build index 973a5417..f77bf324 100644 --- a/utils/ipc/meson.build +++ b/utils/codegen/ipc/meson.build @@ -13,6 +13,7 @@ mojom_docs_extractor = find_program('./extract-docs.py') mojom_templates = custom_target('mojom_templates', input : mojom_template_files, output : 'libcamera_templates.zip', - command : [mojom_generator, '-o', '@OUTDIR@', 'precompile']) + command : [mojom_generator, '-o', '@OUTDIR@', 'precompile'], + env : py_build_env) mojom_templates_dir = meson.current_build_dir() diff --git a/utils/ipc/mojo/README b/utils/codegen/ipc/mojo/README index 961cabd2..961cabd2 100644 --- a/utils/ipc/mojo/README +++ b/utils/codegen/ipc/mojo/README diff --git a/utils/ipc/mojo/public/LICENSE b/utils/codegen/ipc/mojo/public/LICENSE index 513e8a6a..513e8a6a 100644 --- a/utils/ipc/mojo/public/LICENSE +++ b/utils/codegen/ipc/mojo/public/LICENSE diff --git a/utils/ipc/mojo/public/tools/.style.yapf b/utils/codegen/ipc/mojo/public/tools/.style.yapf index b4ebbe24..b4ebbe24 100644 --- a/utils/ipc/mojo/public/tools/.style.yapf +++ b/utils/codegen/ipc/mojo/public/tools/.style.yapf diff --git a/utils/ipc/mojo/public/tools/BUILD.gn b/utils/codegen/ipc/mojo/public/tools/BUILD.gn index 5328a34a..5328a34a 100644 --- a/utils/ipc/mojo/public/tools/BUILD.gn +++ b/utils/codegen/ipc/mojo/public/tools/BUILD.gn diff --git a/utils/ipc/mojo/public/tools/bindings/BUILD.gn b/utils/codegen/ipc/mojo/public/tools/bindings/BUILD.gn index eeca73ea..eeca73ea 100644 --- a/utils/ipc/mojo/public/tools/bindings/BUILD.gn +++ b/utils/codegen/ipc/mojo/public/tools/bindings/BUILD.gn diff --git a/utils/ipc/mojo/public/tools/bindings/README.md b/utils/codegen/ipc/mojo/public/tools/bindings/README.md index b27b2d01..b27b2d01 100644 --- a/utils/ipc/mojo/public/tools/bindings/README.md +++ b/utils/codegen/ipc/mojo/public/tools/bindings/README.md diff --git a/utils/ipc/mojo/public/tools/bindings/checks/__init__.py b/utils/codegen/ipc/mojo/public/tools/bindings/checks/__init__.py index e69de29b..e69de29b 100644 --- a/utils/ipc/mojo/public/tools/bindings/checks/__init__.py +++ b/utils/codegen/ipc/mojo/public/tools/bindings/checks/__init__.py diff --git a/utils/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check.py b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check.py index e6e4f2c9..e6e4f2c9 100644 --- a/utils/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check.py +++ b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check.py diff --git a/utils/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check_unittest.py b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check_unittest.py index f1a50a4a..f1a50a4a 100644 --- a/utils/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check_unittest.py diff --git a/utils/ipc/mojo/public/tools/bindings/checks/mojom_definitions_check.py b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_definitions_check.py index 702d41c3..702d41c3 100644 --- a/utils/ipc/mojo/public/tools/bindings/checks/mojom_definitions_check.py +++ b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_definitions_check.py diff --git a/utils/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check.py b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check.py index 07f51a64..07f51a64 100644 --- a/utils/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check.py +++ b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check.py diff --git a/utils/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check_unittest.py b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check_unittest.py index e96152fd..e96152fd 100644 --- a/utils/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check_unittest.py diff --git a/utils/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_check.py b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_check.py index d570e26c..d570e26c 100644 --- a/utils/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_check.py +++ b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_check.py diff --git a/utils/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_checks_unittest.py b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_checks_unittest.py index a6cd71e2..a6cd71e2 100644 --- a/utils/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_checks_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_checks_unittest.py diff --git a/utils/ipc/mojo/public/tools/bindings/concatenate-files.py b/utils/codegen/ipc/mojo/public/tools/bindings/concatenate-files.py index 4dd26d4a..4dd26d4a 100755 --- a/utils/ipc/mojo/public/tools/bindings/concatenate-files.py +++ b/utils/codegen/ipc/mojo/public/tools/bindings/concatenate-files.py diff --git a/utils/ipc/mojo/public/tools/bindings/concatenate_and_replace_closure_exports.py b/utils/codegen/ipc/mojo/public/tools/bindings/concatenate_and_replace_closure_exports.py index 7d56c9f9..7d56c9f9 100755 --- a/utils/ipc/mojo/public/tools/bindings/concatenate_and_replace_closure_exports.py +++ b/utils/codegen/ipc/mojo/public/tools/bindings/concatenate_and_replace_closure_exports.py diff --git a/utils/ipc/mojo/public/tools/bindings/gen_data_files_list.py b/utils/codegen/ipc/mojo/public/tools/bindings/gen_data_files_list.py index c6daff03..c6daff03 100644 --- a/utils/ipc/mojo/public/tools/bindings/gen_data_files_list.py +++ b/utils/codegen/ipc/mojo/public/tools/bindings/gen_data_files_list.py diff --git a/utils/ipc/mojo/public/tools/bindings/generate_type_mappings.py b/utils/codegen/ipc/mojo/public/tools/bindings/generate_type_mappings.py index 4a53e2bf..4a53e2bf 100755 --- a/utils/ipc/mojo/public/tools/bindings/generate_type_mappings.py +++ b/utils/codegen/ipc/mojo/public/tools/bindings/generate_type_mappings.py diff --git a/utils/ipc/mojo/public/tools/bindings/minify_with_terser.py b/utils/codegen/ipc/mojo/public/tools/bindings/minify_with_terser.py index cefee7a4..cefee7a4 100755 --- a/utils/ipc/mojo/public/tools/bindings/minify_with_terser.py +++ b/utils/codegen/ipc/mojo/public/tools/bindings/minify_with_terser.py diff --git a/utils/ipc/mojo/public/tools/bindings/mojom.gni b/utils/codegen/ipc/mojo/public/tools/bindings/mojom.gni index 3f6e54e0..3f6e54e0 100644 --- a/utils/ipc/mojo/public/tools/bindings/mojom.gni +++ b/utils/codegen/ipc/mojo/public/tools/bindings/mojom.gni diff --git a/utils/ipc/mojo/public/tools/bindings/mojom_bindings_generator.py b/utils/codegen/ipc/mojo/public/tools/bindings/mojom_bindings_generator.py index 8c641c2a..8c641c2a 100755 --- a/utils/ipc/mojo/public/tools/bindings/mojom_bindings_generator.py +++ b/utils/codegen/ipc/mojo/public/tools/bindings/mojom_bindings_generator.py diff --git a/utils/ipc/mojo/public/tools/bindings/mojom_bindings_generator_unittest.py b/utils/codegen/ipc/mojo/public/tools/bindings/mojom_bindings_generator_unittest.py index 761922b6..761922b6 100644 --- a/utils/ipc/mojo/public/tools/bindings/mojom_bindings_generator_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/bindings/mojom_bindings_generator_unittest.py diff --git a/utils/ipc/mojo/public/tools/bindings/validate_typemap_config.py b/utils/codegen/ipc/mojo/public/tools/bindings/validate_typemap_config.py index 6bb7a209..6bb7a209 100755 --- a/utils/ipc/mojo/public/tools/bindings/validate_typemap_config.py +++ b/utils/codegen/ipc/mojo/public/tools/bindings/validate_typemap_config.py diff --git a/utils/ipc/mojo/public/tools/mojom/BUILD.gn b/utils/codegen/ipc/mojo/public/tools/mojom/BUILD.gn index eafb95a1..eafb95a1 100644 --- a/utils/ipc/mojo/public/tools/mojom/BUILD.gn +++ b/utils/codegen/ipc/mojo/public/tools/mojom/BUILD.gn diff --git a/utils/ipc/mojo/public/tools/mojom/README.md b/utils/codegen/ipc/mojo/public/tools/mojom/README.md index e5d17ab0..e5d17ab0 100644 --- a/utils/ipc/mojo/public/tools/mojom/README.md +++ b/utils/codegen/ipc/mojo/public/tools/mojom/README.md diff --git a/utils/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility.py b/utils/codegen/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility.py index 35cd1cfd..35cd1cfd 100755 --- a/utils/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility.py diff --git a/utils/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility_unittest.py index 06769c95..06769c95 100755 --- a/utils/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility_unittest.py diff --git a/utils/ipc/mojo/public/tools/mojom/const_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/const_unittest.py index e8ed36a7..e8ed36a7 100644 --- a/utils/ipc/mojo/public/tools/mojom/const_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/const_unittest.py diff --git a/utils/ipc/mojo/public/tools/mojom/enum_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/enum_unittest.py index 9269cde5..9269cde5 100644 --- a/utils/ipc/mojo/public/tools/mojom/enum_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/enum_unittest.py diff --git a/utils/ipc/mojo/public/tools/mojom/feature_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/feature_unittest.py index 5f014e1c..5f014e1c 100644 --- a/utils/ipc/mojo/public/tools/mojom/feature_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/feature_unittest.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/BUILD.gn b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/BUILD.gn index a0edf0eb..a0edf0eb 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/BUILD.gn +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/BUILD.gn diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/__init__.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/__init__.py index e69de29b..e69de29b 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/__init__.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/__init__.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/error.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/error.py index dd53b835..dd53b835 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/error.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/error.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/fileutil.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/fileutil.py index 124f12c1..124f12c1 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/fileutil.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/fileutil.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/fileutil_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/fileutil_unittest.py index c93d2289..c93d2289 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/fileutil_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/fileutil_unittest.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/__init__.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/__init__.py index e69de29b..e69de29b 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/__init__.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/__init__.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/check.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/check.py index 1efe2022..1efe2022 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/check.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/check.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/generator.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/generator.py index 96fe3a2d..96fe3a2d 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/generator.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/generator.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/generator_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/generator_unittest.py index 7143e07c..7143e07c 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/generator_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/generator_unittest.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/module.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/module.py index ca71059d..ca71059d 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/module.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/module.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/module_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/module_unittest.py index 2a4e852c..2a4e852c 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/module_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/module_unittest.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/pack.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/pack.py index 61240426..61240426 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/pack.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/pack.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/pack_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/pack_unittest.py index 7d8e4e01..7d8e4e01 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/pack_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/pack_unittest.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/template_expander.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/template_expander.py index 807e2a4f..807e2a4f 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/template_expander.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/template_expander.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/translate.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/translate.py index 83bb297f..83bb297f 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/translate.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/translate.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/translate_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/translate_unittest.py index b4fea924..b4fea924 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/translate_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/translate_unittest.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/__init__.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/__init__.py index e69de29b..e69de29b 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/__init__.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/__init__.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/ast.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/ast.py index aae9cdb6..aae9cdb6 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/ast.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/ast.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/ast_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/ast_unittest.py index b289f7b1..b289f7b1 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/ast_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/ast_unittest.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features.py index 9687edbf..9687edbf 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features_unittest.py index cca1764b..cca1764b 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features_unittest.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/lexer.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/lexer.py index 00136a8b..00136a8b 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/lexer.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/lexer.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/lexer_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/lexer_unittest.py index bc9f8354..bc9f8354 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/lexer_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/lexer_unittest.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/parser.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/parser.py index 1dffd98b..1dffd98b 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/parser.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/parser.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/parser_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/parser_unittest.py index 0a26307b..0a26307b 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/parser_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/parser_unittest.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom_parser.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser.py index 9693090e..9693090e 100755 --- a/utils/ipc/mojo/public/tools/mojom/mojom_parser.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom_parser_test_case.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser_test_case.py index f0ee6966..f0ee6966 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom_parser_test_case.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser_test_case.py diff --git a/utils/ipc/mojo/public/tools/mojom/mojom_parser_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser_unittest.py index 353a2b6e..353a2b6e 100644 --- a/utils/ipc/mojo/public/tools/mojom/mojom_parser_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser_unittest.py diff --git a/utils/ipc/mojo/public/tools/mojom/stable_attribute_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/stable_attribute_unittest.py index d10d69c6..d10d69c6 100644 --- a/utils/ipc/mojo/public/tools/mojom/stable_attribute_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/stable_attribute_unittest.py diff --git a/utils/ipc/mojo/public/tools/mojom/union_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/union_unittest.py index 6b2525e5..6b2525e5 100644 --- a/utils/ipc/mojo/public/tools/mojom/union_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/union_unittest.py diff --git a/utils/ipc/mojo/public/tools/mojom/version_compatibility_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/version_compatibility_unittest.py index 45e45ec5..45e45ec5 100644 --- a/utils/ipc/mojo/public/tools/mojom/version_compatibility_unittest.py +++ b/utils/codegen/ipc/mojo/public/tools/mojom/version_compatibility_unittest.py diff --git a/utils/ipc/mojo/public/tools/run_all_python_unittests.py b/utils/codegen/ipc/mojo/public/tools/run_all_python_unittests.py index 98bce18c..98bce18c 100755 --- a/utils/ipc/mojo/public/tools/run_all_python_unittests.py +++ b/utils/codegen/ipc/mojo/public/tools/run_all_python_unittests.py diff --git a/utils/ipc/parser.py b/utils/codegen/ipc/parser.py index 231a3266..8e70322d 100755 --- a/utils/ipc/parser.py +++ b/utils/codegen/ipc/parser.py @@ -4,14 +4,11 @@ # # Author: Paul Elder <paul.elder@ideasonboard.com> # -# parser.py - Run mojo parser with python3 +# Run mojo parser with python3 import os import sys -# TODO set sys.pycache_prefix for >= python3.8 -sys.dont_write_bytecode = True - # Make sure that mojom_parser.py can import mojom sys.path.insert(0, f'{os.path.dirname(__file__)}/mojo/public/tools/mojom') diff --git a/utils/ipc/tools/README b/utils/codegen/ipc/tools/README index 961cabd2..961cabd2 100644 --- a/utils/ipc/tools/README +++ b/utils/codegen/ipc/tools/README diff --git a/utils/ipc/tools/diagnosis/crbug_1001171.py b/utils/codegen/ipc/tools/diagnosis/crbug_1001171.py index 40900d10..40900d10 100644 --- a/utils/ipc/tools/diagnosis/crbug_1001171.py +++ b/utils/codegen/ipc/tools/diagnosis/crbug_1001171.py diff --git a/utils/codegen/meson.build b/utils/codegen/meson.build new file mode 100644 index 00000000..adf33bba --- /dev/null +++ b/utils/codegen/meson.build @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: CC0-1.0 + +## Code generation + +py_build_env = environment() +# \todo Investigate usage of PYTHONPYCACHEPREFIX for Python >= 3.8 +py_build_env.set('PYTHONDONTWRITEBYTECODE', '1') +py_build_env.prepend('PYTHONPATH', meson.current_source_dir()) + +py_modules += ['jinja2', 'yaml'] + +gen_controls = files('gen-controls.py') +gen_formats = files('gen-formats.py') +gen_header = files('gen-header.sh') +gen_ipa_pub_key = files('gen-ipa-pub-key.py') +gen_tracepoints = files('gen-tp-header.py') + +subdir('ipc') diff --git a/utils/gen-controls.py b/utils/gen-controls.py deleted file mode 100755 index 6cd5e362..00000000 --- a/utils/gen-controls.py +++ /dev/null @@ -1,370 +0,0 @@ -#!/usr/bin/env python3 -# SPDX-License-Identifier: GPL-2.0-or-later -# Copyright (C) 2019, Google Inc. -# -# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> -# -# gen-controls.py - Generate control definitions from YAML - -import argparse -from functools import reduce -import operator -import string -import sys -import yaml -import os - - -class ControlEnum(object): - def __init__(self, data): - self.__data = data - - @property - def description(self): - """The enum description""" - return self.__data.get('description') - - @property - def name(self): - """The enum name""" - return self.__data.get('name') - - @property - def value(self): - """The enum value""" - return self.__data.get('value') - - -class Control(object): - def __init__(self, name, data, vendor): - self.__name = name - self.__data = data - self.__enum_values = None - self.__size = None - self.__vendor = vendor - - enum_values = data.get('enum') - if enum_values is not None: - self.__enum_values = [ControlEnum(enum) for enum in enum_values] - - size = self.__data.get('size') - if size is not None: - if len(size) == 0: - raise RuntimeError(f'Control `{self.__name}` size must have at least one dimension') - - # Compute the total number of elements in the array. If any of the - # array dimension is a string, the array is variable-sized. - num_elems = 1 - for dim in size: - if type(dim) is str: - num_elems = 0 - break - - dim = int(dim) - if dim <= 0: - raise RuntimeError(f'Control `{self.__name}` size must have positive values only') - - num_elems *= dim - - self.__size = num_elems - - @property - def description(self): - """The control description""" - return self.__data.get('description') - - @property - def enum_values(self): - """The enum values, if the control is an enumeration""" - if self.__enum_values is None: - return - for enum in self.__enum_values: - yield enum - - @property - def is_enum(self): - """Is the control an enumeration""" - return self.__enum_values is not None - - @property - def vendor(self): - """The vendor string, or None""" - return self.__vendor - - @property - def name(self): - """The control name (CamelCase)""" - return self.__name - - @property - def type(self): - typ = self.__data.get('type') - size = self.__data.get('size') - - if typ == 'string': - return 'std::string' - - if self.__size is None: - return typ - - if self.__size: - return f"Span<const {typ}, {self.__size}>" - else: - return f"Span<const {typ}>" - - -def snake_case(s): - return ''.join([c.isupper() and ('_' + c) or c for c in s]).strip('_') - - -def format_description(description): - description = description.strip('\n').split('\n') - description[0] = '\\brief ' + description[0] - return '\n'.join([(line and ' * ' or ' *') + line for line in description]) - - -def generate_cpp(controls): - enum_doc_start_template = string.Template('''/** - * \\enum ${name}Enum - * \\brief Supported ${name} values''') - enum_doc_value_template = string.Template(''' * \\var ${value} -${description}''') - doc_template = string.Template('''/** - * \\var ${name} -${description} - */''') - def_template = string.Template('extern const Control<${type}> ${name}(${id_name}, "${name}");') - enum_values_doc = string.Template('''/** - * \\var ${name}Values - * \\brief List of all $name supported values - */''') - enum_values_start = string.Template('''extern const std::array<const ControlValue, ${size}> ${name}Values = {''') - enum_values_values = string.Template('''\tstatic_cast<int32_t>(${name}),''') - - ctrls_doc = {} - ctrls_def = {} - ctrls_map = [] - - for ctrl in controls: - id_name = snake_case(ctrl.name).upper() - - vendor = ctrl.vendor - if vendor not in ctrls_doc: - ctrls_doc[vendor] = [] - ctrls_def[vendor] = [] - - info = { - 'name': ctrl.name, - 'type': ctrl.type, - 'description': format_description(ctrl.description), - 'id_name': id_name, - } - - target_doc = ctrls_doc[vendor] - target_def = ctrls_def[vendor] - - if ctrl.is_enum: - enum_doc = [] - enum_doc.append(enum_doc_start_template.substitute(info)) - - num_entries = 0 - for enum in ctrl.enum_values: - value_info = { - 'name': ctrl.name, - 'value': enum.name, - 'description': format_description(enum.description), - } - enum_doc.append(enum_doc_value_template.substitute(value_info)) - num_entries += 1 - - enum_doc = '\n *\n'.join(enum_doc) - enum_doc += '\n */' - target_doc.append(enum_doc) - - values_info = { - 'name': info['name'], - 'size': num_entries, - } - target_doc.append(enum_values_doc.substitute(values_info)) - target_def.append(enum_values_start.substitute(values_info)) - for enum in ctrl.enum_values: - value_info = { - 'name': enum.name - } - target_def.append(enum_values_values.substitute(value_info)) - target_def.append("};") - - target_doc.append(doc_template.substitute(info)) - target_def.append(def_template.substitute(info)) - - vendor_ns = vendor + '::' if vendor != "libcamera" else '' - ctrls_map.append('\t{ ' + vendor_ns + id_name + ', &' + vendor_ns + ctrl.name + ' },') - - vendor_ctrl_doc_sub = [] - vendor_ctrl_template = string.Template(''' -/** - * \\brief Namespace for ${vendor} controls - */ -namespace ${vendor} { - -${vendor_controls_str} - -} /* namespace ${vendor} */''') - - for vendor in [v for v in ctrls_doc.keys() if v not in ['libcamera']]: - vendor_ctrl_doc_sub.append(vendor_ctrl_template.substitute({'vendor': vendor, 'vendor_controls_str': '\n\n'.join(ctrls_doc[vendor])})) - - vendor_ctrl_def_sub = [] - for vendor in [v for v in ctrls_def.keys() if v not in ['libcamera']]: - vendor_ctrl_def_sub.append(vendor_ctrl_template.substitute({'vendor': vendor, 'vendor_controls_str': '\n'.join(ctrls_def[vendor])})) - - return { - 'controls_doc': '\n\n'.join(ctrls_doc['libcamera']), - 'controls_def': '\n'.join(ctrls_def['libcamera']), - 'controls_map': '\n'.join(ctrls_map), - 'vendor_controls_doc': '\n'.join(vendor_ctrl_doc_sub), - 'vendor_controls_def': '\n'.join(vendor_ctrl_def_sub), - } - - -def generate_h(controls, mode, ranges): - enum_template_start = string.Template('''enum ${name}Enum {''') - enum_value_template = string.Template('''\t${name} = ${value},''') - enum_values_template = string.Template('''extern const std::array<const ControlValue, ${size}> ${name}Values;''') - template = string.Template('''extern const Control<${type}> ${name};''') - - ctrls = {} - ids = {} - id_value = {} - - for ctrl in controls: - id_name = snake_case(ctrl.name).upper() - - vendor = ctrl.vendor - if vendor not in ctrls: - if vendor not in ranges.keys(): - raise RuntimeError(f'Control id range is not defined for vendor {vendor}') - id_value[vendor] = ranges[vendor] + 1 - ids[vendor] = [] - ctrls[vendor] = [] - - target_ids = ids[vendor] - target_ids.append('\t' + id_name + ' = ' + str(id_value[vendor]) + ',') - - info = { - 'name': ctrl.name, - 'type': ctrl.type, - } - - target_ctrls = ctrls[vendor] - - if ctrl.is_enum: - target_ctrls.append(enum_template_start.substitute(info)) - - num_entries = 0 - for enum in ctrl.enum_values: - value_info = { - 'name': enum.name, - 'value': enum.value, - } - target_ctrls.append(enum_value_template.substitute(value_info)) - num_entries += 1 - target_ctrls.append("};") - - values_info = { - 'name': info['name'], - 'size': num_entries, - } - target_ctrls.append(enum_values_template.substitute(values_info)) - - target_ctrls.append(template.substitute(info)) - id_value[vendor] += 1 - - vendor_template = string.Template(''' -namespace ${vendor} { - -#define LIBCAMERA_HAS_${vendor_def}_VENDOR_${mode} - -enum { -${vendor_enums} -}; - -${vendor_controls} - -} /* namespace ${vendor} */ -''') - - vendor_sub = [] - for vendor in [v for v in ctrls.keys() if v != 'libcamera']: - vendor_sub.append(vendor_template.substitute({'mode': mode.upper(), - 'vendor': vendor, - 'vendor_def': vendor.upper(), - 'vendor_enums': '\n'.join(ids[vendor]), - 'vendor_controls': '\n'.join(ctrls[vendor])})) - - return { - 'ids': '\n'.join(ids['libcamera']), - 'controls': '\n'.join(ctrls['libcamera']), - 'vendor_controls': '\n'.join(vendor_sub) - } - - -def fill_template(template, data): - - template = open(template, 'rb').read() - template = template.decode('utf-8') - template = string.Template(template) - return template.substitute(data) - - -def main(argv): - - # Parse command line arguments - parser = argparse.ArgumentParser() - parser.add_argument('--mode', '-m', type=str, required=True, choices=['controls', 'properties'], - help='Mode of operation') - parser.add_argument('--output', '-o', metavar='file', type=str, - help='Output file name. Defaults to standard output if not specified.') - parser.add_argument('--ranges', '-r', type=str, required=True, - help='Control id range reservation file.') - parser.add_argument('--template', '-t', dest='template', type=str, required=True, - help='Template file name.') - parser.add_argument('input', type=str, nargs='+', - help='Input file name.') - - args = parser.parse_args(argv[1:]) - - ranges = {} - with open(args.ranges, 'rb') as f: - data = open(args.ranges, 'rb').read() - ranges = yaml.safe_load(data)['ranges'] - - controls = [] - for input in args.input: - with open(input, 'rb') as f: - data = f.read() - vendor = yaml.safe_load(data)['vendor'] - ctrls = yaml.safe_load(data)['controls'] - controls = controls + [Control(*ctrl.popitem(), vendor) for ctrl in ctrls] - - if args.template.endswith('.cpp.in'): - data = generate_cpp(controls) - elif args.template.endswith('.h.in'): - data = generate_h(controls, args.mode, ranges) - else: - raise RuntimeError('Unknown template type') - - data = fill_template(args.template, data) - - if args.output: - output = open(args.output, 'wb') - output.write(data.encode('utf-8')) - output.close() - else: - sys.stdout.write(data) - - return 0 - - -if __name__ == '__main__': - sys.exit(main(sys.argv)) diff --git a/utils/gen-ipa-priv-key.sh b/utils/gen-ipa-priv-key.sh index 919751f2..2ca7b883 100755 --- a/utils/gen-ipa-priv-key.sh +++ b/utils/gen-ipa-priv-key.sh @@ -4,7 +4,7 @@ # # Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> # -# gen-ipa-priv-key.sh - Generate an RSA private key to sign IPA modules +# Generate an RSA private key to sign IPA modules key="$1" diff --git a/utils/gen-version.sh b/utils/gen-version.sh index e1f7ca7b..1b818e9e 100755 --- a/utils/gen-version.sh +++ b/utils/gen-version.sh @@ -42,7 +42,7 @@ if [ -z "$build_dir" ] || (echo "$build_dir" | grep -q "$src_dir") then git update-index --refresh > /dev/null 2>&1 fi -git diff-index --quiet HEAD || version="$version-dirty ($(date --iso-8601=seconds))" +git diff-index --quiet HEAD || version="$version-dirty ($(date +%Y-%m-%dT%H:%M:%S%Z))" # If a project version is provided, use it to replace the version number. if [ -n "$project_version" ] diff --git a/utils/hooks/pre-push b/utils/hooks/pre-push index 9918b286..68dcbd0c 100755 --- a/utils/hooks/pre-push +++ b/utils/hooks/pre-push @@ -68,7 +68,7 @@ do fi # 2. The commit message shall have Signed-off-by lines - # corresponding the committer and the author. + # corresponding the committer, author, and all co-developers. committer=$(echo "$msg" | grep '^committer ' | head -1 | \ cut -d ' ' -f 2- | rev | cut -d ' ' -f 3- | rev) if ! echo -E "$msg" | grep -F -q "Signed-off-by: ${committer}" @@ -85,6 +85,15 @@ do errors=$((errors+1)) fi + while read -r codev + do + if ! echo -E "$msg" | grep -F -q "Signed-off-by: ${codev}" + then + echo >&2 "Missing co-developer '${codev}' Signed-off-by in commit $commit" + errors=$((errors+1)) + fi + done < <(echo "$msg" | grep '^Co-developed-by: ' | cut -d ' ' -f 2-) + # 3. A Reviewed-by or Acked-by is required. if ! echo -E "$msg" | grep -q '^\(Reviewed\|Acked\)-by: ' then diff --git a/utils/ipu3/ipu3-capture.sh b/utils/ipu3/ipu3-capture.sh index 9294d025..004a92b0 100755 --- a/utils/ipu3/ipu3-capture.sh +++ b/utils/ipu3/ipu3-capture.sh @@ -4,7 +4,7 @@ # # Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> # -# ipu3-capture.sh - Capture raw frames from cameras based on the Intel IPU3 +# Capture raw frames from cameras based on the Intel IPU3 # # The scripts makes use of the following tools, which are expected to be # executable from the system-wide path or from the local directory: diff --git a/utils/ipu3/ipu3-pack.c b/utils/ipu3/ipu3-pack.c index decbfc6c..23d2db8b 100644 --- a/utils/ipu3/ipu3-pack.c +++ b/utils/ipu3/ipu3-pack.c @@ -8,6 +8,7 @@ #include <errno.h> #include <fcntl.h> +#include <libgen.h> #include <stdint.h> #include <stdio.h> #include <string.h> @@ -15,9 +16,8 @@ #include <sys/types.h> #include <unistd.h> -static void usage(const char *argv0) +static void usage(char *argv0) { - printf("Usage: %s input-file output-file\n", basename(argv0)); printf("Convert unpacked RAW10 Bayer data to the IPU3 packed Bayer formats\n"); printf("If the output-file '-', output data will be written to standard output\n"); diff --git a/utils/ipu3/ipu3-process.sh b/utils/ipu3/ipu3-process.sh index bb4abbe8..25bc849f 100755 --- a/utils/ipu3/ipu3-process.sh +++ b/utils/ipu3/ipu3-process.sh @@ -4,7 +4,7 @@ # # Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> # -# ipu3-process.sh - Process raw frames with the Intel IPU3 +# Process raw frames with the Intel IPU3 # # The scripts makes use of the following tools, which are expected to be # found in $PATH: diff --git a/utils/ipu3/ipu3-unpack.c b/utils/ipu3/ipu3-unpack.c index c96fafed..6ee8c45a 100644 --- a/utils/ipu3/ipu3-unpack.c +++ b/utils/ipu3/ipu3-unpack.c @@ -8,6 +8,7 @@ #include <errno.h> #include <fcntl.h> +#include <libgen.h> #include <stdint.h> #include <stdio.h> #include <string.h> @@ -15,7 +16,7 @@ #include <sys/types.h> #include <unistd.h> -static void usage(const char *argv0) +static void usage(char *argv0) { printf("Usage: %s input-file output-file\n", basename(argv0)); printf("Unpack the IPU3 raw Bayer format to 16-bit Bayer\n"); diff --git a/utils/meson.build b/utils/meson.build index 8e28ada7..95d657ac 100644 --- a/utils/meson.build +++ b/utils/meson.build @@ -1,15 +1,7 @@ # SPDX-License-Identifier: CC0-1.0 -subdir('ipc') +subdir('codegen') subdir('ipu3') -subdir('tracepoints') - -## Code generation -py_modules += ['yaml'] -gen_controls = files('gen-controls.py') -gen_formats = files('gen-formats.py') -gen_header = files('gen-header.sh') ## Module signing gen_ipa_priv_key = files('gen-ipa-priv-key.sh') -gen_ipa_pub_key = files('gen-ipa-pub-key.py') diff --git a/utils/raspberrypi/ctt/alsc_only.py b/utils/raspberrypi/ctt/alsc_only.py index 7cd0ac01..a521c4ad 100755 --- a/utils/raspberrypi/ctt/alsc_only.py +++ b/utils/raspberrypi/ctt/alsc_only.py @@ -2,12 +2,14 @@ # # SPDX-License-Identifier: BSD-2-Clause # -# Copyright (C) 2022, Raspberry Pi (Trading) Limited +# Copyright (C) 2022, Raspberry Pi Ltd # -# alsc_only.py - alsc tuning tool +# alsc tuning tool -from ctt import * +import sys +from ctt import * +from ctt_tools import parse_input if __name__ == '__main__': """ @@ -15,13 +17,14 @@ if __name__ == '__main__': """ if len(sys.argv) == 1: print(""" - Pisp Camera Tuning Tool version 1.0 + PiSP Lens Shading Camera Tuning Tool version 1.0 Required Arguments: '-i' : Calibration image directory. '-o' : Name of output json file. Optional Arguments: + '-t' : Target platform - 'pisp' or 'vc4'. Default 'vc4' '-c' : Config file for the CTT. If not passed, default parameters used. '-l' : Name of output log file. If not passed, 'ctt_log.txt' used. """) @@ -30,5 +33,10 @@ if __name__ == '__main__': """ parse input arguments """ - json_output, directory, config, log_output = parse_input() - run_ctt(json_output, directory, config, log_output, alsc_only=True) + json_output, directory, config, log_output, target = parse_input() + if target == 'pisp': + from ctt_pisp import json_template, grid_size + elif target == 'vc4': + from ctt_vc4 import json_template, grid_size + + run_ctt(json_output, directory, config, log_output, json_template, grid_size, target, alsc_only=True) diff --git a/utils/raspberrypi/ctt/cac_only.py b/utils/raspberrypi/ctt/cac_only.py new file mode 100644 index 00000000..1c0a8193 --- /dev/null +++ b/utils/raspberrypi/ctt/cac_only.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (C) 2023, Raspberry Pi (Trading) Ltd. +# +# cac_only.py - cac tuning tool + + +# This file allows you to tune only the chromatic aberration correction +# Specify any number of files in the command line args, and it shall iterate through +# and generate an averaged cac table from all the input images, which you can then +# input into your tuning file. + +# Takes .dng files produced by the camera modules of the dots grid and calculates the chromatic abberation of each dot. +# Then takes each dot, and works out where it was in the image, and uses that to output a tables of the shifts +# across the whole image. + +from PIL import Image +import numpy as np +import rawpy +import sys +import getopt + +from ctt_cac import * + + +def cac(filelist, output_filepath, plot_results=False): + np.set_printoptions(precision=3) + np.set_printoptions(suppress=True) + + # Create arrays to hold all the dots data and their colour offsets + red_shift = [] # Format is: [[Dot Center X, Dot Center Y, x shift, y shift]] + blue_shift = [] + # Iterate through the files + # Multiple files is reccomended to average out the lens aberration through rotations + for file in filelist: + print("\n Processing file " + str(file)) + # Read the raw RGB values from the .dng file + with rawpy.imread(file) as raw: + rgb = raw.postprocess() + sizes = (raw.sizes) + + image_size = [sizes[2], sizes[3]] # Image size, X, Y + # Create a colour copy of the RGB values to use later in the calibration + imout = Image.new(mode="RGB", size=image_size) + rgb_image = np.array(imout) + # The rgb values need reshaping from a 1d array to a 3d array to be worked with easily + rgb.reshape((image_size[0], image_size[1], 3)) + rgb_image = rgb + + # Pass the RGB image through to the dots locating program + # Returns an array of the dots (colour rectangles around the dots), and an array of their locations + print("Finding dots") + dots, dots_locations = find_dots_locations(rgb_image) + + # Now, analyse each dot. Work out the centroid of each colour channel, and use that to work out + # by how far the chromatic aberration has shifted each channel + print('Dots found: ' + str(len(dots))) + + for dot, dot_location in zip(dots, dots_locations): + if len(dot) > 0: + if (dot_location[0] > 0) and (dot_location[1] > 0): + ret = analyse_dot(dot, dot_location) + red_shift.append(ret[0]) + blue_shift.append(ret[1]) + + # Take our arrays of red shifts and locations, push them through to be interpolated into a 9x9 matrix + # for the CAC block to handle and then store these as a .json file to be added to the camera + # tuning file + print("\nCreating output grid") + rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size) + + print("CAC correction complete!") + + # The json format that we then paste into the tuning file (manually) + sample = ''' + { + "rpi.cac" : + { + "strength": 1.0, + "lut_rx" : [ + rx_vals + ], + "lut_ry" : [ + ry_vals + ], + "lut_bx" : [ + bx_vals + ], + "lut_by" : [ + by_vals + ] + } + } + ''' + + # Below, may look incorrect, however, the PiSP (standard) dimensions are flipped in comparison to + # PIL image coordinate directions, hence why xr -> yr. Also, the shifts calculated are colour shifts, + # and the PiSP block asks for the values it should shift (hence the * -1, to convert from colour shift to a pixel shift) + sample = sample.replace("rx_vals", pprint_array(ry * -1)) + sample = sample.replace("ry_vals", pprint_array(rx * -1)) + sample = sample.replace("bx_vals", pprint_array(by * -1)) + sample = sample.replace("by_vals", pprint_array(bx * -1)) + print("Successfully converted to JSON") + f = open(str(output_filepath), "w+") + f.write(sample) + f.close() + print("Successfully written to json file") + ''' + If you wish to see a plot of the colour channel shifts, add the -p or --plots option + Can be a quick way of validating if the data/dots you've got are good, or if you need to + change some parameters/take some better images + ''' + if plot_results: + plot_shifts(red_shift, blue_shift) + + +if __name__ == "__main__": + argv = sys.argv + # Detect the input and output file paths + arg_output = "output.json" + arg_help = "{0} -i <input> -o <output> -p <plot results>".format(argv[0]) + opts, args = getopt.getopt(argv[1:], "hi:o:p", ["help", "input=", "output=", "plot"]) + + output_location = 0 + input_location = 0 + filelist = [] + plot_results = False + for i in range(len(argv)): + if ("-h") in argv[i]: + print(arg_help) # print the help message + sys.exit(2) + if "-o" in argv[i]: + output_location = i + if ".dng" in argv[i]: + filelist.append(argv[i]) + if "-p" in argv[i]: + plot_results = True + + arg_output = argv[output_location + 1] + cac(filelist, arg_output, plot_results) diff --git a/utils/raspberrypi/ctt/colors.py b/utils/raspberrypi/ctt/colors.py index 1ab986d6..cb4d236b 100644 --- a/utils/raspberrypi/ctt/colors.py +++ b/utils/raspberrypi/ctt/colors.py @@ -1,4 +1,4 @@ -# colors.py - Program to convert from RGB to LAB color space +# Program to convert from RGB to LAB color space def RGB_to_LAB(RGB): # where RGB is a 1x3 array. e.g RGB = [100, 255, 230] num = 0 XYZ = [0, 0, 0] diff --git a/utils/raspberrypi/ctt/convert_tuning.py b/utils/raspberrypi/ctt/convert_tuning.py index f4504d45..83cf69d4 100755 --- a/utils/raspberrypi/ctt/convert_tuning.py +++ b/utils/raspberrypi/ctt/convert_tuning.py @@ -8,30 +8,104 @@ import argparse import json +import numpy as np import sys from ctt_pretty_print_json import pretty_print +from ctt_pisp import grid_size as grid_size_pisp +from ctt_pisp import json_template as json_template_pisp +from ctt_vc4 import grid_size as grid_size_vc4 +from ctt_vc4 import json_template as json_template_vc4 -def convert_v2(in_json: dict) -> str: +def interp_2d(in_ls, src_w, src_h, dst_w, dst_h): - if 'version' in in_json.keys() and in_json['version'] != 1.0: - print(f'The JSON config reports version {in_json["version"]} that is incompatible with this tool.') - sys.exit(-1) + out_ls = np.zeros((dst_h, dst_w)) + for i in range(src_h): + out_ls[i] = np.interp(np.linspace(0, dst_w - 1, dst_w), + np.linspace(0, dst_w - 1, src_w), + in_ls[i]) + for i in range(dst_w): + out_ls[:,i] = np.interp(np.linspace(0, dst_h - 1, dst_h), + np.linspace(0, dst_h - 1, src_h), + out_ls[:src_h, i]) + return out_ls - converted = { - 'version': 2.0, - 'target': 'bcm2835', - 'algorithms': [{algo: config} for algo, config in in_json.items()] - } - return pretty_print(converted) +def convert_target(in_json: dict, target: str): + + src_w, src_h = grid_size_pisp if target == 'vc4' else grid_size_vc4 + dst_w, dst_h = grid_size_vc4 if target == 'vc4' else grid_size_pisp + json_template = json_template_vc4 if target == 'vc4' else json_template_pisp + + # ALSC grid sizes + alsc = next(algo for algo in in_json['algorithms'] if 'rpi.alsc' in algo)['rpi.alsc'] + for colour in ['calibrations_Cr', 'calibrations_Cb']: + if colour not in alsc: + continue + for temperature in alsc[colour]: + in_ls = np.reshape(temperature['table'], (src_h, src_w)) + out_ls = interp_2d(in_ls, src_w, src_h, dst_w, dst_h) + temperature['table'] = np.round(out_ls.flatten(), 3).tolist() + + if 'luminance_lut' in alsc: + in_ls = np.reshape(alsc['luminance_lut'], (src_h, src_w)) + out_ls = interp_2d(in_ls, src_w, src_h, dst_w, dst_h) + alsc['luminance_lut'] = np.round(out_ls.flatten(), 3).tolist() + + # Denoise blocks + for i, algo in enumerate(in_json['algorithms']): + if list(algo.keys())[0] == 'rpi.sdn': + in_json['algorithms'][i] = {'rpi.denoise': json_template['rpi.sdn'] if target == 'vc4' else json_template['rpi.denoise']} + break + + # AGC mode weights + agc = next(algo for algo in in_json['algorithms'] if 'rpi.agc' in algo)['rpi.agc'] + if 'channels' in agc: + for i, channel in enumerate(agc['channels']): + target_agc_metering = json_template['rpi.agc']['channels'][i]['metering_modes'] + for mode, v in channel['metering_modes'].items(): + v['weights'] = target_agc_metering[mode]['weights'] + else: + for mode, v in agc["metering_modes"].items(): + target_agc_metering = json_template['rpi.agc']['channels'][0]['metering_modes'] + v['weights'] = target_agc_metering[mode]['weights'] + + # HDR + if target == 'pisp': + for i, algo in enumerate(in_json['algorithms']): + if list(algo.keys())[0] == 'rpi.hdr': + in_json['algorithms'][i] = {'rpi.hdr': json_template['rpi.hdr']} + + return in_json + + +def convert_v2(in_json: dict, target: str) -> str: + + if 'version' in in_json.keys() and in_json['version'] == 1.0: + converted = { + 'version': 2.0, + 'target': target, + 'algorithms': [{algo: config} for algo, config in in_json.items()] + } + else: + converted = in_json + + # Convert between vc4 <-> pisp targets. This is a best effort thing. + if converted['target'] != target: + converted = convert_target(converted, target) + converted['target'] = target + + grid_size = grid_size_vc4[0] if target == 'vc4' else grid_size_pisp[0] + return pretty_print(converted, custom_elems={'table': grid_size, 'luminance_lut': grid_size}) if __name__ == "__main__": parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description= - 'Convert the format of the Raspberry Pi camera tuning file from v1.0 to v2.0.\n') + 'Convert the format of the Raspberry Pi camera tuning file from v1.0 to v2.0 and/or the vc4 <-> pisp targets.\n') parser.add_argument('input', type=str, help='Input tuning file.') + parser.add_argument('-t', '--target', type=str, help='Target platform.', + choices=['pisp', 'vc4'], default='vc4') parser.add_argument('output', type=str, nargs='?', help='Output converted tuning file. If not provided, the input file will be updated in-place.', default=None) @@ -40,7 +114,7 @@ if __name__ == "__main__": with open(args.input, 'r') as f: in_json = json.load(f) - out_json = convert_v2(in_json) + out_json = convert_v2(in_json, args.target) with open(args.output if args.output is not None else args.input, 'w') as f: f.write(out_json) diff --git a/utils/raspberrypi/ctt/ctt.py b/utils/raspberrypi/ctt/ctt.py index cd89f177..96f1b5e6 100755 --- a/utils/raspberrypi/ctt/ctt.py +++ b/utils/raspberrypi/ctt/ctt.py @@ -4,11 +4,12 @@ # # Copyright (C) 2019, Raspberry Pi Ltd # -# ctt.py - camera tuning tool +# camera tuning tool import os import sys from ctt_image_load import * +from ctt_cac import * from ctt_ccm import * from ctt_awb import * from ctt_alsc import * @@ -22,9 +23,10 @@ import re """ This file houses the camera object, which is used to perform the calibrations. -The camera object houses all the calibration images as attributes in two lists: +The camera object houses all the calibration images as attributes in three lists: - imgs (macbeth charts) - imgs_alsc (alsc correction images) + - imgs_cac (cac correction images) Various calibrations are methods of the camera object, and the output is stored in a dictionary called self.json. Once all the caibration has been completed, the Camera.json is written into a @@ -67,139 +69,26 @@ Camera object that is the backbone of the tuning tool. Input is the desired path of the output json. """ class Camera: - def __init__(self, jfile): + def __init__(self, jfile, json): self.path = os.path.dirname(os.path.expanduser(__file__)) + '/' if self.path == '/': self.path = '' self.imgs = [] self.imgs_alsc = [] + self.imgs_cac = [] self.log = 'Log created : ' + time.asctime(time.localtime(time.time())) self.log_separator = '\n'+'-'*70+'\n' self.jf = jfile """ initial json dict populated by uncalibrated values """ - self.json = { - "rpi.black_level": { - "black_level": 4096 - }, - "rpi.dpc": { - }, - "rpi.lux": { - "reference_shutter_speed": 10000, - "reference_gain": 1, - "reference_aperture": 1.0 - }, - "rpi.noise": { - }, - "rpi.geq": { - }, - "rpi.sdn": { - }, - "rpi.awb": { - "priors": [ - {"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]}, - {"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]}, - {"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]} - ], - "modes": { - "auto": {"lo": 2500, "hi": 8000}, - "incandescent": {"lo": 2500, "hi": 3000}, - "tungsten": {"lo": 3000, "hi": 3500}, - "fluorescent": {"lo": 4000, "hi": 4700}, - "indoor": {"lo": 3000, "hi": 5000}, - "daylight": {"lo": 5500, "hi": 6500}, - "cloudy": {"lo": 7000, "hi": 8600} - }, - "bayes": 1 - }, - "rpi.agc": { - "metering_modes": { - "centre-weighted": { - "weights": [3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0] - }, - "spot": { - "weights": [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - }, - "matrix": { - "weights": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - } - }, - "exposure_modes": { - "normal": { - "shutter": [100, 10000, 30000, 60000, 120000], - "gain": [1.0, 2.0, 4.0, 6.0, 6.0] - }, - "short": { - "shutter": [100, 5000, 10000, 20000, 120000], - "gain": [1.0, 2.0, 4.0, 6.0, 6.0] - } - }, - "constraint_modes": { - "normal": [ - {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]} - ], - "highlight": [ - {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]}, - {"bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.8, 1000, 0.8]} - ] - }, - "y_target": [0, 0.16, 1000, 0.165, 10000, 0.17] - }, - "rpi.alsc": { - 'omega': 1.3, - 'n_iter': 100, - 'luminance_strength': 0.7, - }, - "rpi.contrast": { - "ce_enable": 1, - "gamma_curve": [ - 0, 0, - 1024, 5040, - 2048, 9338, - 3072, 12356, - 4096, 15312, - 5120, 18051, - 6144, 20790, - 7168, 23193, - 8192, 25744, - 9216, 27942, - 10240, 30035, - 11264, 32005, - 12288, 33975, - 13312, 35815, - 14336, 37600, - 15360, 39168, - 16384, 40642, - 18432, 43379, - 20480, 45749, - 22528, 47753, - 24576, 49621, - 26624, 51253, - 28672, 52698, - 30720, 53796, - 32768, 54876, - 36864, 57012, - 40960, 58656, - 45056, 59954, - 49152, 61183, - 53248, 62355, - 57344, 63419, - 61440, 64476, - 65535, 65535 - ] - }, - "rpi.ccm": { - }, - "rpi.sharpen": { - } - } + self.json = json """ Perform colour correction calibrations by comparing macbeth patch colours to standard macbeth chart colours. """ - def ccm_cal(self, do_alsc_colour): + def ccm_cal(self, do_alsc_colour, grid_size): if 'rpi.ccm' in self.disable: return 1 print('\nStarting CCM calibration') @@ -245,7 +134,7 @@ class Camera: Do CCM calibration """ try: - ccms = ccm(self, cal_cr_list, cal_cb_list) + ccms = ccm(self, cal_cr_list, cal_cb_list, grid_size) except ArithmeticError: print('ERROR: Matrix is singular!\nTake new pictures and try again...') self.log += '\nERROR: Singular matrix encountered during fit!' @@ -259,11 +148,67 @@ class Camera: print('Finished CCM calibration') """ + Perform chromatic abberation correction using multiple dots images. + """ + def cac_cal(self, do_alsc_colour): + if 'rpi.cac' in self.disable: + return 1 + print('\nStarting CAC calibration') + self.log_new_sec('CAC') + """ + check if cac images have been taken + """ + if len(self.imgs_cac) == 0: + print('\nError:\nNo cac calibration images found') + self.log += '\nERROR: No CAC calibration images found!' + self.log += '\nCAC calibration aborted!' + return 1 + """ + if image is greyscale then CAC makes no sense + """ + if self.grey: + print('\nERROR: Can\'t do CAC on greyscale image!') + self.log += '\nERROR: Cannot perform CAC calibration ' + self.log += 'on greyscale image!\nCAC aborted!' + del self.json['rpi.cac'] + return 0 + a = time.time() + """ + Check if camera is greyscale or color. If not greyscale, then perform cac + """ + if do_alsc_colour: + """ + Here we have a color sensor. Perform cac + """ + try: + cacs = cac(self) + except ArithmeticError: + print('ERROR: Matrix is singular!\nTake new pictures and try again...') + self.log += '\nERROR: Singular matrix encountered during fit!' + self.log += '\nCAC aborted!' + return 1 + else: + """ + case where config options suggest greyscale camera. No point in doing CAC + """ + cal_cr_list, cal_cb_list = None, None + self.log += '\nWARNING: No ALSC tables found.\nCAC calibration ' + self.log += 'performed without ALSC correction...' + + """ + Write output to json + """ + self.json['rpi.cac']['cac'] = cacs + self.log += '\nCAC calibration written to json file' + print('Finished CAC calibration') + + + """ Auto white balance calibration produces a colour curve for various colour temperatures, as well as providing a maximum 'wiggle room' distance from this curve (transverse_neg/pos). """ - def awb_cal(self, greyworld, do_alsc_colour): + def awb_cal(self, greyworld, do_alsc_colour, grid_size): if 'rpi.awb' in self.disable: return 1 print('\nStarting AWB calibration') @@ -306,7 +251,7 @@ class Camera: call calibration function """ plot = "rpi.awb" in self.plot - awb_out = awb(self, cal_cr_list, cal_cb_list, plot) + awb_out = awb(self, cal_cr_list, cal_cb_list, plot, grid_size) ct_curve, transverse_neg, transverse_pos = awb_out """ write output to json @@ -324,7 +269,7 @@ class Camera: colour channel seperately, and then partially corrects for vignetting. The extent of the correction depends on the 'luminance_strength' parameter. """ - def alsc_cal(self, luminance_strength, do_alsc_colour): + def alsc_cal(self, luminance_strength, do_alsc_colour, grid_size, max_gain=8.0): if 'rpi.alsc' in self.disable: return 1 print('\nStarting ALSC calibration') @@ -347,10 +292,10 @@ class Camera: call calibration function """ plot = "rpi.alsc" in self.plot - alsc_out = alsc_all(self, do_alsc_colour, plot) + alsc_out = alsc_all(self, do_alsc_colour, plot, grid_size, max_gain=max_gain) cal_cr_list, cal_cb_list, luminance_lut, av_corn = alsc_out """ - write ouput to json and finish if not do_alsc_colour + write output to json and finish if not do_alsc_colour """ if not do_alsc_colour: self.json['rpi.alsc']['luminance_lut'] = luminance_lut @@ -393,7 +338,7 @@ class Camera: """ obtain worst-case scenario residual sigmas """ - sigma_r, sigma_b = get_sigma(self, cal_cr_list, cal_cb_list) + sigma_r, sigma_b = get_sigma(self, cal_cr_list, cal_cb_list, grid_size) """ write output to json """ @@ -509,19 +454,20 @@ class Camera: """ writes the json dictionary to the raw json file then make pretty """ - def write_json(self): + def write_json(self, version=2.0, target='bcm2835', grid_size=(16, 12)): """ Write json dictionary to file using our version 2 format """ out_json = { - "version": 2.0, - 'target': 'bcm2835', + "version": version, + 'target': target if target != 'vc4' else 'bcm2835', "algorithms": [{name: data} for name, data in self.json.items()], } with open(self.jf, 'w') as f: - f.write(pretty_print(out_json)) + f.write(pretty_print(out_json, + custom_elems={'table': grid_size[0], 'luminance_lut': grid_size[0]})) """ add a new section to the log file @@ -627,6 +573,16 @@ class Camera: self.log += '\nWARNING: Error reading colour temperature' self.log += '\nImage discarded!' print('DISCARDED') + elif 'cac' in filename: + Img = load_image(self, address, mac=False) + self.log += '\nIdentified as an CAC image' + Img.name = filename + self.log += '\nColour temperature: {} K'.format(col) + self.imgs_cac.append(Img) + if blacklevel != -1: + Img.blacklevel_16 = blacklevel + print(img_suc_msg) + continue else: self.log += '\nIdentified as macbeth chart image' """ @@ -672,6 +628,7 @@ class Camera: self.log += '\n\nImages found:' self.log += '\nMacbeth : {}'.format(len(self.imgs)) self.log += '\nALSC : {} '.format(len(self.imgs_alsc)) + self.log += '\nCAC: {} '.format(len(self.imgs_cac)) self.log += '\n\nCamera metadata' """ check usable images found @@ -680,22 +637,21 @@ class Camera: print('\nERROR: No usable macbeth chart images found') self.log += '\nERROR: No usable macbeth chart images found' return 0 - elif len(self.imgs) == 0 and len(self.imgs_alsc) == 0: + elif len(self.imgs) == 0 and len(self.imgs_alsc) == 0 and len(self.imgs_cac) == 0: print('\nERROR: No usable images found') self.log += '\nERROR: No usable images found' return 0 """ Double check that every image has come from the same camera... """ - all_imgs = self.imgs + self.imgs_alsc + all_imgs = self.imgs + self.imgs_alsc + self.imgs_cac camNames = list(set([Img.camName for Img in all_imgs])) patterns = list(set([Img.pattern for Img in all_imgs])) sigbitss = list(set([Img.sigbits for Img in all_imgs])) blacklevels = list(set([Img.blacklevel_16 for Img in all_imgs])) sizes = list(set([(Img.w, Img.h) for Img in all_imgs])) - if len(camNames) == 1 and len(patterns) == 1 and len(sigbitss) == 1 and \ - len(blacklevels) == 1 and len(sizes) == 1: + if 1: self.grey = (patterns[0] == 128) self.blacklevel_16 = blacklevels[0] self.log += '\nName: {}'.format(camNames[0]) @@ -712,7 +668,7 @@ class Camera: return 0 -def run_ctt(json_output, directory, config, log_output, alsc_only=False): +def run_ctt(json_output, directory, config, log_output, json_template, grid_size, target, alsc_only=False): """ check input files are jsons """ @@ -748,12 +704,14 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False): greyworld = get_config(awb_d, "greyworld", 0, 'bool') alsc_d = get_config(configs, "alsc", {}, 'dict') do_alsc_colour = get_config(alsc_d, "do_alsc_colour", 1, 'bool') - luminance_strength = get_config(alsc_d, "luminance_strength", 0.5, 'num') + luminance_strength = get_config(alsc_d, "luminance_strength", 0.8, 'num') + lsc_max_gain = get_config(alsc_d, "max_gain", 8.0, 'num') blacklevel = get_config(configs, "blacklevel", -1, 'num') macbeth_d = get_config(configs, "macbeth", {}, 'dict') mac_small = get_config(macbeth_d, "small", 0, 'bool') mac_show = get_config(macbeth_d, "show", 0, 'bool') mac_config = (mac_small, mac_show) + print("Read lsc_max_gain", lsc_max_gain) if blacklevel < -1 or blacklevel >= 2**16: print('\nInvalid blacklevel, defaulted to 64') @@ -772,7 +730,7 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False): initialise tuning tool and load images """ try: - Cam = Camera(json_output) + Cam = Camera(json_output, json=json_template) Cam.log_user_input(json_output, directory, config, log_output) if alsc_only: disable = set(Cam.json.keys()).symmetric_difference({"rpi.alsc"}) @@ -794,14 +752,17 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False): Cam.json['rpi.black_level']['black_level'] = Cam.blacklevel_16 Cam.json_remove(disable) print('\nSTARTING CALIBRATIONS') - Cam.alsc_cal(luminance_strength, do_alsc_colour) + Cam.alsc_cal(luminance_strength, do_alsc_colour, grid_size, max_gain=lsc_max_gain) Cam.geq_cal() Cam.lux_cal() Cam.noise_cal() - Cam.awb_cal(greyworld, do_alsc_colour) - Cam.ccm_cal(do_alsc_colour) + if "rpi.cac" in json_template: + Cam.cac_cal(do_alsc_colour) + Cam.awb_cal(greyworld, do_alsc_colour, grid_size) + Cam.ccm_cal(do_alsc_colour, grid_size) + print('\nFINISHED CALIBRATIONS') - Cam.write_json() + Cam.write_json(target=target, grid_size=grid_size) Cam.write_log(log_output) print('\nCalibrations written to: '+json_output) if log_output is None: @@ -811,20 +772,19 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False): else: Cam.write_log(log_output) - if __name__ == '__main__': """ initialise calibration """ if len(sys.argv) == 1: print(""" - Pisp Camera Tuning Tool version 1.0 - + PiSP Tuning Tool version 1.0 Required Arguments: '-i' : Calibration image directory. '-o' : Name of output json file. Optional Arguments: + '-t' : Target platform - 'pisp' or 'vc4'. Default 'vc4' '-c' : Config file for the CTT. If not passed, default parameters used. '-l' : Name of output log file. If not passed, 'ctt_log.txt' used. """) @@ -833,5 +793,10 @@ if __name__ == '__main__': """ parse input arguments """ - json_output, directory, config, log_output = parse_input() - run_ctt(json_output, directory, config, log_output) + json_output, directory, config, log_output, target = parse_input() + if target == 'pisp': + from ctt_pisp import json_template, grid_size + elif target == 'vc4': + from ctt_vc4 import json_template, grid_size + + run_ctt(json_output, directory, config, log_output, json_template, grid_size, target) diff --git a/utils/raspberrypi/ctt/ctt_alsc.py b/utils/raspberrypi/ctt/ctt_alsc.py index e51d6931..1d94dfa5 100644 --- a/utils/raspberrypi/ctt/ctt_alsc.py +++ b/utils/raspberrypi/ctt/ctt_alsc.py @@ -2,7 +2,7 @@ # # Copyright (C) 2019, Raspberry Pi Ltd # -# ctt_alsc.py - camera tuning tool for ALSC (auto lens shading correction) +# camera tuning tool for ALSC (auto lens shading correction) from ctt_image_load import * import matplotlib.pyplot as plt @@ -13,8 +13,9 @@ from mpl_toolkits.mplot3d import Axes3D """ preform alsc calibration on a set of images """ -def alsc_all(Cam, do_alsc_colour, plot): +def alsc_all(Cam, do_alsc_colour, plot, grid_size=(16, 12), max_gain=8.0): imgs_alsc = Cam.imgs_alsc + grid_w, grid_h = grid_size """ create list of colour temperatures and associated calibration tables """ @@ -23,7 +24,7 @@ def alsc_all(Cam, do_alsc_colour, plot): list_cb = [] list_cg = [] for Img in imgs_alsc: - col, cr, cb, cg, size = alsc(Cam, Img, do_alsc_colour, plot) + col, cr, cb, cg, size = alsc(Cam, Img, do_alsc_colour, plot, grid_size=grid_size, max_gain=max_gain) list_col.append(col) list_cr.append(cr) list_cb.append(cb) @@ -68,11 +69,12 @@ def alsc_all(Cam, do_alsc_colour, plot): t_b = np.where((100*t_b) % 1 >= 0.95, t_b-0.001, t_b) t_r = np.round(t_r, 3) t_b = np.round(t_b, 3) - r_corners = (t_r[0], t_r[15], t_r[-1], t_r[-16]) - b_corners = (t_b[0], t_b[15], t_b[-1], t_b[-16]) - r_cen = t_r[5*16+7]+t_r[5*16+8]+t_r[6*16+7]+t_r[6*16+8] + r_corners = (t_r[0], t_r[grid_w - 1], t_r[-1], t_r[-grid_w]) + b_corners = (t_b[0], t_b[grid_w - 1], t_b[-1], t_b[-grid_w]) + middle_pos = (grid_h // 2 - 1) * grid_w + grid_w - 1 + r_cen = t_r[middle_pos]+t_r[middle_pos + 1]+t_r[middle_pos + grid_w]+t_r[middle_pos + grid_w + 1] r_cen = round(r_cen/4, 3) - b_cen = t_b[5*16+7]+t_b[5*16+8]+t_b[6*16+7]+t_b[6*16+8] + b_cen = t_b[middle_pos]+t_b[middle_pos + 1]+t_b[middle_pos + grid_w]+t_b[middle_pos + grid_w + 1] b_cen = round(b_cen/4, 3) Cam.log += '\nRed table corners: {}'.format(r_corners) Cam.log += '\nRed table centre: {}'.format(r_cen) @@ -116,8 +118,9 @@ def alsc_all(Cam, do_alsc_colour, plot): """ calculate g/r and g/b for 32x32 points arranged in a grid for a single image """ -def alsc(Cam, Img, do_alsc_colour, plot=False): +def alsc(Cam, Img, do_alsc_colour, plot=False, grid_size=(16, 12), max_gain=8.0): Cam.log += '\nProcessing image: ' + Img.name + grid_w, grid_h = grid_size """ get channel in correct order """ @@ -128,31 +131,34 @@ def alsc(Cam, Img, do_alsc_colour, plot=False): where w is a multiple of 32. """ w, h = Img.w/2, Img.h/2 - dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12)) + dx, dy = int(-(-(w-1)//grid_w)), int(-(-(h-1)//grid_h)) """ average the green channels into one """ av_ch_g = np.mean((channels[1:3]), axis=0) if do_alsc_colour: """ - obtain 16x12 grid of intensities for each channel and subtract black level + obtain grid_w x grid_h grid of intensities for each channel and subtract black level """ - g = get_16x12_grid(av_ch_g, dx, dy) - Img.blacklevel_16 - r = get_16x12_grid(channels[0], dx, dy) - Img.blacklevel_16 - b = get_16x12_grid(channels[3], dx, dy) - Img.blacklevel_16 + g = get_grid(av_ch_g, dx, dy, grid_size) - Img.blacklevel_16 + r = get_grid(channels[0], dx, dy, grid_size) - Img.blacklevel_16 + b = get_grid(channels[3], dx, dy, grid_size) - Img.blacklevel_16 """ calculate ratios as 32 bit in order to be supported by medianBlur function """ - cr = np.reshape(g/r, (12, 16)).astype('float32') - cb = np.reshape(g/b, (12, 16)).astype('float32') - cg = np.reshape(1/g, (12, 16)).astype('float32') + cr = np.reshape(g/r, (grid_h, grid_w)).astype('float32') + cb = np.reshape(g/b, (grid_h, grid_w)).astype('float32') + cg = np.reshape(1/g, (grid_h, grid_w)).astype('float32') """ median blur to remove peaks and save as float 64 """ cr = cv2.medianBlur(cr, 3).astype('float64') + cr = cr/np.min(cr) # gain tables are easier for humans to read if the minimum is 1.0 cb = cv2.medianBlur(cb, 3).astype('float64') + cb = cb/np.min(cb) cg = cv2.medianBlur(cg, 3).astype('float64') cg = cg/np.min(cg) + cg = [min(v, max_gain) for v in cg.flatten()] # never exceed the max luminance gain """ debugging code showing 2D surface plot of vignetting. Quite useful for @@ -164,7 +170,7 @@ def alsc(Cam, Img, do_alsc_colour, plot=False): """ note Y is plotted as -Y so plot has same axes as image """ - X, Y = np.meshgrid(range(16), range(12)) + X, Y = np.meshgrid(range(grid_w), range(grid_h)) ha.plot_surface(X, -Y, cr, cmap=cm.coolwarm, linewidth=0) ha.set_title('ALSC Plot\nImg: {}\n\ncr'.format(Img.str)) hb = hf.add_subplot(312, projection='3d') @@ -176,21 +182,22 @@ def alsc(Cam, Img, do_alsc_colour, plot=False): # print(Img.str) plt.show() - return Img.col, cr.flatten(), cb.flatten(), cg.flatten(), (w, h, dx, dy) + return Img.col, cr.flatten(), cb.flatten(), cg, (w, h, dx, dy) else: """ only perform calculations for luminance shading """ - g = get_16x12_grid(av_ch_g, dx, dy) - Img.blacklevel_16 - cg = np.reshape(1/g, (12, 16)).astype('float32') + g = get_grid(av_ch_g, dx, dy, grid_size) - Img.blacklevel_16 + cg = np.reshape(1/g, (grid_h, grid_w)).astype('float32') cg = cv2.medianBlur(cg, 3).astype('float64') cg = cg/np.min(cg) + cg = [min(v, max_gain) for v in cg.flatten()] # never exceed the max luminance gain if plot: hf = plt.figure(figssize=(8, 8)) ha = hf.add_subplot(1, 1, 1, projection='3d') - X, Y = np.meashgrid(range(16), range(12)) + X, Y = np.meashgrid(range(grid_w), range(grid_h)) ha.plot_surface(X, -Y, cg, cmap=cm.coolwarm, linewidth=0) ha.set_title('ALSC Plot (Luminance only!)\nImg: {}\n\ncg').format(Img.str) plt.show() @@ -199,21 +206,22 @@ def alsc(Cam, Img, do_alsc_colour, plot=False): """ -Compresses channel down to a 16x12 grid +Compresses channel down to a grid of the requested size """ -def get_16x12_grid(chan, dx, dy): +def get_grid(chan, dx, dy, grid_size): + grid_w, grid_h = grid_size grid = [] """ since left and bottom border will not necessarily have rectangles of dimension dx x dy, the 32nd iteration has to be handled separately. """ - for i in range(11): - for j in range(15): + for i in range(grid_h - 1): + for j in range(grid_w - 1): grid.append(np.mean(chan[dy*i:dy*(1+i), dx*j:dx*(1+j)])) - grid.append(np.mean(chan[dy*i:dy*(1+i), 15*dx:])) - for j in range(15): - grid.append(np.mean(chan[11*dy:, dx*j:dx*(1+j)])) - grid.append(np.mean(chan[11*dy:, 15*dx:])) + grid.append(np.mean(chan[dy*i:dy*(1+i), (grid_w - 1)*dx:])) + for j in range(grid_w - 1): + grid.append(np.mean(chan[(grid_h - 1)*dy:, dx*j:dx*(1+j)])) + grid.append(np.mean(chan[(grid_h - 1)*dy:, (grid_w - 1)*dx:])) """ return as np.array, ready for further manipulation """ @@ -223,7 +231,7 @@ def get_16x12_grid(chan, dx, dy): """ obtains sigmas for red and blue, effectively a measure of the 'error' """ -def get_sigma(Cam, cal_cr_list, cal_cb_list): +def get_sigma(Cam, cal_cr_list, cal_cb_list, grid_size): Cam.log += '\nCalculating sigmas' """ provided colour alsc tables were generated for two different colour @@ -241,8 +249,8 @@ def get_sigma(Cam, cal_cr_list, cal_cb_list): sigma_rs = [] sigma_bs = [] for i in range(len(cts)-1): - sigma_rs.append(calc_sigma(cal_cr_list[i]['table'], cal_cr_list[i+1]['table'])) - sigma_bs.append(calc_sigma(cal_cb_list[i]['table'], cal_cb_list[i+1]['table'])) + sigma_rs.append(calc_sigma(cal_cr_list[i]['table'], cal_cr_list[i+1]['table'], grid_size)) + sigma_bs.append(calc_sigma(cal_cb_list[i]['table'], cal_cb_list[i+1]['table'], grid_size)) Cam.log += '\nColour temperature interval {} - {} K'.format(cts[i], cts[i+1]) Cam.log += '\nSigma red: {}'.format(sigma_rs[-1]) Cam.log += '\nSigma blue: {}'.format(sigma_bs[-1]) @@ -263,12 +271,13 @@ def get_sigma(Cam, cal_cr_list, cal_cb_list): """ calculate sigma from two adjacent gain tables """ -def calc_sigma(g1, g2): +def calc_sigma(g1, g2, grid_size): + grid_w, grid_h = grid_size """ reshape into 16x12 matrix """ - g1 = np.reshape(g1, (12, 16)) - g2 = np.reshape(g2, (12, 16)) + g1 = np.reshape(g1, (grid_h, grid_w)) + g2 = np.reshape(g2, (grid_h, grid_w)) """ apply gains to gain table """ @@ -280,8 +289,8 @@ def calc_sigma(g1, g2): neighbours, then append to list """ diffs = [] - for i in range(10): - for j in range(14): + for i in range(grid_h - 2): + for j in range(grid_w - 2): """ note indexing is incremented by 1 since all patches on borders are not counted diff --git a/utils/raspberrypi/ctt/ctt_awb.py b/utils/raspberrypi/ctt/ctt_awb.py index bf45e54d..4af1fe41 100644 --- a/utils/raspberrypi/ctt/ctt_awb.py +++ b/utils/raspberrypi/ctt/ctt_awb.py @@ -2,7 +2,7 @@ # # Copyright (C) 2019, Raspberry Pi Ltd # -# ctt_awb.py - camera tuning tool for AWB +# camera tuning tool for AWB from ctt_image_load import * import matplotlib.pyplot as plt @@ -13,7 +13,7 @@ from scipy.optimize import fmin """ obtain piecewise linear approximation for colour curve """ -def awb(Cam, cal_cr_list, cal_cb_list, plot): +def awb(Cam, cal_cr_list, cal_cb_list, plot, grid_size): imgs = Cam.imgs """ condense alsc calibration tables into one dictionary @@ -43,7 +43,7 @@ def awb(Cam, cal_cr_list, cal_cb_list, plot): Note: if alsc is disabled then colour_cals will be set to None and the function will just return the greyscale patches """ - r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals) + r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals, grid_size=grid_size) """ calculate ratio of r, b to g """ @@ -293,12 +293,13 @@ def awb(Cam, cal_cr_list, cal_cb_list, plot): """ obtain greyscale patches and perform alsc colour correction """ -def get_alsc_patches(Img, colour_cals, grey=True): +def get_alsc_patches(Img, colour_cals, grey=True, grid_size=(16, 12)): """ get patch centre coordinates, image colour and the actual patches for each channel, remembering to subtract blacklevel If grey then only greyscale patches considered """ + grid_w, grid_h = grid_size if grey: cen_coords = Img.cen_coords[3::4] col = Img.col @@ -345,12 +346,12 @@ def get_alsc_patches(Img, colour_cals, grey=True): bef_tabs = np.array(colour_cals[bef]) aft_tabs = np.array(colour_cals[aft]) col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db) - col_tabs = np.reshape(col_tabs, (2, 12, 16)) + col_tabs = np.reshape(col_tabs, (2, grid_h, grid_w)) """ calculate dx, dy used to calculate alsc table """ w, h = Img.w/2, Img.h/2 - dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12)) + dx, dy = int(-(-(w-1)//grid_w)), int(-(-(h-1)//grid_h)) """ make list of pairs of gains for each patch by selecting the correct value in alsc colour calibration table diff --git a/utils/raspberrypi/ctt/ctt_cac.py b/utils/raspberrypi/ctt/ctt_cac.py new file mode 100644 index 00000000..5a4c5101 --- /dev/null +++ b/utils/raspberrypi/ctt/ctt_cac.py @@ -0,0 +1,228 @@ +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (C) 2023, Raspberry Pi Ltd +# +# ctt_cac.py - CAC (Chromatic Aberration Correction) tuning tool + +from PIL import Image +import numpy as np +import matplotlib.pyplot as plt +from matplotlib import cm + +from ctt_dots_locator import find_dots_locations + + +# This is the wrapper file that creates a JSON entry for you to append +# to your camera tuning file. +# It calculates the chromatic aberration at different points throughout +# the image and uses that to produce a martix that can then be used +# in the camera tuning files to correct this aberration. + + +def pprint_array(array): + # Function to print the array in a tidier format + array = array + output = "" + for i in range(len(array)): + for j in range(len(array[0])): + output += str(round(array[i, j], 2)) + ", " + # Add the necessary indentation to the array + output += "\n " + # Cut off the end of the array (nicely formats it) + return output[:-22] + + +def plot_shifts(red_shifts, blue_shifts): + # If users want, they can pass a command line option to show the shifts on a graph + # Can be useful to check that the functions are all working, and that the sample + # images are doing the right thing + Xs = np.array(red_shifts)[:, 0] + Ys = np.array(red_shifts)[:, 1] + Zs = np.array(red_shifts)[:, 2] + Zs2 = np.array(red_shifts)[:, 3] + Zs3 = np.array(blue_shifts)[:, 2] + Zs4 = np.array(blue_shifts)[:, 3] + + fig, axs = plt.subplots(2, 2) + ax = fig.add_subplot(2, 2, 1, projection='3d') + ax.scatter(Xs, Ys, Zs, cmap=cm.jet, linewidth=0) + ax.set_title('Red X Shift') + ax = fig.add_subplot(2, 2, 2, projection='3d') + ax.scatter(Xs, Ys, Zs2, cmap=cm.jet, linewidth=0) + ax.set_title('Red Y Shift') + ax = fig.add_subplot(2, 2, 3, projection='3d') + ax.scatter(Xs, Ys, Zs3, cmap=cm.jet, linewidth=0) + ax.set_title('Blue X Shift') + ax = fig.add_subplot(2, 2, 4, projection='3d') + ax.scatter(Xs, Ys, Zs4, cmap=cm.jet, linewidth=0) + ax.set_title('Blue Y Shift') + fig.tight_layout() + plt.show() + + +def shifts_to_yaml(red_shift, blue_shift, image_dimensions, output_grid_size=9): + # Convert the shifts to a numpy array for easier handling and initialise other variables + red_shifts = np.array(red_shift) + blue_shifts = np.array(blue_shift) + # create a grid that's smaller than the output grid, which we then interpolate from to get the output values + xrgrid = np.zeros((output_grid_size - 1, output_grid_size - 1)) + xbgrid = np.zeros((output_grid_size - 1, output_grid_size - 1)) + yrgrid = np.zeros((output_grid_size - 1, output_grid_size - 1)) + ybgrid = np.zeros((output_grid_size - 1, output_grid_size - 1)) + + xrsgrid = [] + xbsgrid = [] + yrsgrid = [] + ybsgrid = [] + xg = np.zeros((output_grid_size - 1, output_grid_size - 1)) + yg = np.zeros((output_grid_size - 1, output_grid_size - 1)) + + # Format the grids - numpy doesn't work for this, it wants a + # nice uniformly spaced grid, which we don't know if we have yet, hence the rather mundane setup + for x in range(output_grid_size - 1): + xrsgrid.append([]) + yrsgrid.append([]) + xbsgrid.append([]) + ybsgrid.append([]) + for y in range(output_grid_size - 1): + xrsgrid[x].append([]) + yrsgrid[x].append([]) + xbsgrid[x].append([]) + ybsgrid[x].append([]) + + image_size = (image_dimensions[0], image_dimensions[1]) + gridxsize = image_size[0] / (output_grid_size - 1) + gridysize = image_size[1] / (output_grid_size - 1) + + # Iterate through each dot, and it's shift values and put these into the correct grid location + for red_shift in red_shifts: + xgridloc = int(red_shift[0] / gridxsize) + ygridloc = int(red_shift[1] / gridysize) + xrsgrid[xgridloc][ygridloc].append(red_shift[2]) + yrsgrid[xgridloc][ygridloc].append(red_shift[3]) + + for blue_shift in blue_shifts: + xgridloc = int(blue_shift[0] / gridxsize) + ygridloc = int(blue_shift[1] / gridysize) + xbsgrid[xgridloc][ygridloc].append(blue_shift[2]) + ybsgrid[xgridloc][ygridloc].append(blue_shift[3]) + + # Now calculate the average pixel shift for each square in the grid + for x in range(output_grid_size - 1): + for y in range(output_grid_size - 1): + xrgrid[x, y] = np.mean(xrsgrid[x][y]) + yrgrid[x, y] = np.mean(yrsgrid[x][y]) + xbgrid[x, y] = np.mean(xbsgrid[x][y]) + ybgrid[x, y] = np.mean(ybsgrid[x][y]) + + # Next, we start to interpolate the central points of the grid that gets passed to the tuning file + input_grids = np.array([xrgrid, yrgrid, xbgrid, ybgrid]) + output_grids = np.zeros((4, output_grid_size, output_grid_size)) + + # Interpolate the centre of the grid + output_grids[:, 1:-1, 1:-1] = (input_grids[:, 1:, :-1] + input_grids[:, 1:, 1:] + input_grids[:, :-1, 1:] + input_grids[:, :-1, :-1]) / 4 + + # Edge cases: + output_grids[:, 1:-1, 0] = ((input_grids[:, :-1, 0] + input_grids[:, 1:, 0]) / 2 - output_grids[:, 1:-1, 1]) * 2 + output_grids[:, 1:-1, 1] + output_grids[:, 1:-1, -1] = ((input_grids[:, :-1, 7] + input_grids[:, 1:, 7]) / 2 - output_grids[:, 1:-1, -2]) * 2 + output_grids[:, 1:-1, -2] + output_grids[:, 0, 1:-1] = ((input_grids[:, 0, :-1] + input_grids[:, 0, 1:]) / 2 - output_grids[:, 1, 1:-1]) * 2 + output_grids[:, 1, 1:-1] + output_grids[:, -1, 1:-1] = ((input_grids[:, 7, :-1] + input_grids[:, 7, 1:]) / 2 - output_grids[:, -2, 1:-1]) * 2 + output_grids[:, -2, 1:-1] + + # Corner Cases: + output_grids[:, 0, 0] = (output_grids[:, 0, 1] - output_grids[:, 1, 1]) + (output_grids[:, 1, 0] - output_grids[:, 1, 1]) + output_grids[:, 1, 1] + output_grids[:, 0, -1] = (output_grids[:, 0, -2] - output_grids[:, 1, -2]) + (output_grids[:, 1, -1] - output_grids[:, 1, -2]) + output_grids[:, 1, -2] + output_grids[:, -1, 0] = (output_grids[:, -1, 1] - output_grids[:, -2, 1]) + (output_grids[:, -2, 0] - output_grids[:, -2, 1]) + output_grids[:, -2, 1] + output_grids[:, -1, -1] = (output_grids[:, -2, -1] - output_grids[:, -2, -2]) + (output_grids[:, -1, -2] - output_grids[:, -2, -2]) + output_grids[:, -2, -2] + + # Below, we swap the x and the y coordinates, and also multiply by a factor of -1 + # This is due to the PiSP (standard) dimensions being flipped in comparison to + # PIL image coordinate directions, hence why xr -> yr. Also, the shifts calculated are colour shifts, + # and the PiSP block asks for the values it should shift by (hence the * -1, to convert from colour shift to a pixel shift) + + output_grid_yr, output_grid_xr, output_grid_yb, output_grid_xb = output_grids * -1 + return output_grid_xr, output_grid_yr, output_grid_xb, output_grid_yb + + +def analyse_dot(dot, dot_location=[0, 0]): + # Scan through the dot, calculate the centroid of each colour channel by doing: + # pixel channel brightness * distance from top left corner + # Sum these, and divide by the sum of each channel's brightnesses to get a centroid for each channel + red_channel = np.array(dot)[:, :, 0] + y_num_pixels = len(red_channel[0]) + x_num_pixels = len(red_channel) + yred_weight = np.sum(np.dot(red_channel, np.arange(y_num_pixels))) + xred_weight = np.sum(np.dot(np.arange(x_num_pixels), red_channel)) + red_sum = np.sum(red_channel) + + green_channel = np.array(dot)[:, :, 1] + ygreen_weight = np.sum(np.dot(green_channel, np.arange(y_num_pixels))) + xgreen_weight = np.sum(np.dot(np.arange(x_num_pixels), green_channel)) + green_sum = np.sum(green_channel) + + blue_channel = np.array(dot)[:, :, 2] + yblue_weight = np.sum(np.dot(blue_channel, np.arange(y_num_pixels))) + xblue_weight = np.sum(np.dot(np.arange(x_num_pixels), blue_channel)) + blue_sum = np.sum(blue_channel) + + # We return this structure. It contains 2 arrays that contain: + # the locations of the dot center, along with the channel shifts in the x and y direction: + # [ [red_center_x, red_center_y, red_x_shift, red_y_shift], [blue_center_x, blue_center_y, blue_x_shift, blue_y_shift] ] + + return [[int(dot_location[0]) + int(len(dot) / 2), int(dot_location[1]) + int(len(dot[0]) / 2), xred_weight / red_sum - xgreen_weight / green_sum, yred_weight / red_sum - ygreen_weight / green_sum], [dot_location[0] + int(len(dot) / 2), dot_location[1] + int(len(dot[0]) / 2), xblue_weight / blue_sum - xgreen_weight / green_sum, yblue_weight / blue_sum - ygreen_weight / green_sum]] + + +def cac(Cam): + filelist = Cam.imgs_cac + + Cam.log += '\nCAC analysing files: {}'.format(str(filelist)) + np.set_printoptions(precision=3) + np.set_printoptions(suppress=True) + + # Create arrays to hold all the dots data and their colour offsets + red_shift = [] # Format is: [[Dot Center X, Dot Center Y, x shift, y shift]] + blue_shift = [] + # Iterate through the files + # Multiple files is reccomended to average out the lens aberration through rotations + for file in filelist: + Cam.log += '\nCAC processing file' + print("\n Processing file") + # Read the raw RGB values + rgb = file.rgb + image_size = [file.h, file.w] # Image size, X, Y + # Create a colour copy of the RGB values to use later in the calibration + imout = Image.new(mode="RGB", size=image_size) + rgb_image = np.array(imout) + # The rgb values need reshaping from a 1d array to a 3d array to be worked with easily + rgb.reshape((image_size[0], image_size[1], 3)) + rgb_image = rgb + + # Pass the RGB image through to the dots locating program + # Returns an array of the dots (colour rectangles around the dots), and an array of their locations + print("Finding dots") + Cam.log += '\nFinding dots' + dots, dots_locations = find_dots_locations(rgb_image) + + # Now, analyse each dot. Work out the centroid of each colour channel, and use that to work out + # by how far the chromatic aberration has shifted each channel + Cam.log += '\nDots found: {}'.format(str(len(dots))) + print('Dots found: ' + str(len(dots))) + + for dot, dot_location in zip(dots, dots_locations): + if len(dot) > 0: + if (dot_location[0] > 0) and (dot_location[1] > 0): + ret = analyse_dot(dot, dot_location) + red_shift.append(ret[0]) + blue_shift.append(ret[1]) + + # Take our arrays of red shifts and locations, push them through to be interpolated into a 9x9 matrix + # for the CAC block to handle and then store these as a .json file to be added to the camera + # tuning file + print("\nCreating output grid") + Cam.log += '\nCreating output grid' + rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size) + + print("CAC correction complete!") + Cam.log += '\nCAC correction complete!' + + # Give the JSON dict back to the main ctt program + return {"strength": 1.0, "lut_rx": list(rx.round(2).reshape(81)), "lut_ry": list(ry.round(2).reshape(81)), "lut_bx": list(bx.round(2).reshape(81)), "lut_by": list(by.round(2).reshape(81))} diff --git a/utils/raspberrypi/ctt/ctt_ccm.py b/utils/raspberrypi/ctt/ctt_ccm.py index a09bfd09..07c943a8 100644 --- a/utils/raspberrypi/ctt/ctt_ccm.py +++ b/utils/raspberrypi/ctt/ctt_ccm.py @@ -2,7 +2,7 @@ # # Copyright (C) 2019, Raspberry Pi Ltd # -# ctt_ccm.py - camera tuning tool for CCM (colour correction matrix) +# camera tuning tool for CCM (colour correction matrix) from ctt_image_load import * from ctt_awb import get_alsc_patches @@ -56,7 +56,7 @@ FInds colour correction matrices for list of images """ -def ccm(Cam, cal_cr_list, cal_cb_list): +def ccm(Cam, cal_cr_list, cal_cb_list, grid_size): global matrix_selection_types, typenum imgs = Cam.imgs """ @@ -133,9 +133,7 @@ def ccm(Cam, cal_cr_list, cal_cb_list): Note: if alsc is disabled then colour_cals will be set to None and no the function will simply return the macbeth patches """ - r, b, g = get_alsc_patches(Img, colour_cals, grey=False) - # 256 values for each patch of sRGB values - + r, b, g = get_alsc_patches(Img, colour_cals, grey=False, grid_size=grid_size) """ do awb Note: awb is done by measuring the macbeth chart in the image, rather diff --git a/utils/raspberrypi/ctt/ctt_config_example.json b/utils/raspberrypi/ctt/ctt_config_example.json index c7f90761..1105862c 100644 --- a/utils/raspberrypi/ctt/ctt_config_example.json +++ b/utils/raspberrypi/ctt/ctt_config_example.json @@ -3,7 +3,8 @@ "plot": [], "alsc": { "do_alsc_colour": 1, - "luminance_strength": 0.5 + "luminance_strength": 0.8, + "max_gain": 8.0 }, "awb": { "greyworld": 0 @@ -13,4 +14,4 @@ "small": 0, "show": 0 } -}
\ No newline at end of file +} diff --git a/utils/raspberrypi/ctt/ctt_dots_locator.py b/utils/raspberrypi/ctt/ctt_dots_locator.py new file mode 100644 index 00000000..4945c04b --- /dev/null +++ b/utils/raspberrypi/ctt/ctt_dots_locator.py @@ -0,0 +1,118 @@ +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (C) 2023, Raspberry Pi Ltd +# +# find_dots.py - Used by CAC algorithm to convert image to set of dots + +''' +This file takes the black and white version of the image, along with +the color version. It then located the black dots on the image by +thresholding dark pixels. +In a rather fun way, the algorithm bounces around the thresholded area in a random path +We then use the maximum and minimum of these paths to determine the dot shape and size +This info is then used to return colored dots and locations back to the main file +''' + +import numpy as np +import random +from PIL import Image, ImageEnhance, ImageFilter + + +def find_dots_locations(rgb_image, color_threshold=100, dots_edge_avoid=75, image_edge_avoid=10, search_path_length=500, grid_scan_step_size=10, logfile=open("log.txt", "a+")): + # Initialise some starting variables + pixels = Image.fromarray(rgb_image) + pixels = pixels.convert("L") + enhancer = ImageEnhance.Contrast(pixels) + im_output = enhancer.enhance(1.4) + # We smooth it slightly to make it easier for the dot recognition program to locate the dots + im_output = im_output.filter(ImageFilter.GaussianBlur(radius=2)) + bw_image = np.array(im_output) + + location = [0, 0] + dots = [] + dots_location = [] + # the program takes away the edges - we don't want a dot that is half a circle, the + # centroids would all be wrong + for x in range(dots_edge_avoid, len(bw_image) - dots_edge_avoid, grid_scan_step_size): + for y in range(dots_edge_avoid, len(bw_image[0]) - dots_edge_avoid, grid_scan_step_size): + location = [x, y] + scrap_dot = False # A variable used to make sure that this is a valid dot + if (bw_image[location[0], location[1]] < color_threshold) and not (scrap_dot): + heading = "south" # Define a starting direction to move in + coords = [] + for i in range(search_path_length): # Creates a path of length `search_path_length`. This turns out to always be enough to work out the rough shape of the dot. + # Now make sure that the thresholded area doesn't come within 10 pixels of the edge of the image, ensures we capture all the CA + if ((image_edge_avoid < location[0] < len(bw_image) - image_edge_avoid) and (image_edge_avoid < location[1] < len(bw_image[0]) - image_edge_avoid)) and not (scrap_dot): + if heading == "south": + if bw_image[location[0] + 1, location[1]] < color_threshold: + # Here, notice it does not go south, but actually goes southeast + # This is crucial in ensuring that we make our way around the majority of the dot + location[0] = location[0] + 1 + location[1] = location[1] + 1 + heading = "south" + else: + # This happens when we reach a thresholded edge. We now randomly change direction and keep searching + dir = random.randint(1, 2) + if dir == 1: + heading = "west" + if dir == 2: + heading = "east" + + if heading == "east": + if bw_image[location[0], location[1] + 1] < color_threshold: + location[1] = location[1] + 1 + heading = "east" + else: + dir = random.randint(1, 2) + if dir == 1: + heading = "north" + if dir == 2: + heading = "south" + + if heading == "west": + if bw_image[location[0], location[1] - 1] < color_threshold: + location[1] = location[1] - 1 + heading = "west" + else: + dir = random.randint(1, 2) + if dir == 1: + heading = "north" + if dir == 2: + heading = "south" + + if heading == "north": + if bw_image[location[0] - 1, location[1]] < color_threshold: + location[0] = location[0] - 1 + heading = "north" + else: + dir = random.randint(1, 2) + if dir == 1: + heading = "west" + if dir == 2: + heading = "east" + # Log where our particle travels across the dot + coords.append([location[0], location[1]]) + else: + scrap_dot = True # We just don't have enough space around the dot, discard this one, and move on + if not scrap_dot: + # get the size of the dot surrounding the dot + x_coords = np.array(coords)[:, 0] + y_coords = np.array(coords)[:, 1] + hsquaresize = max(list(x_coords)) - min(list(x_coords)) + vsquaresize = max(list(y_coords)) - min(list(y_coords)) + # Create the bounding coordinates of the rectangle surrounding the dot + # Program uses the dotsize + half of the dotsize to ensure we get all that color fringing + extra_space_factor = 0.45 + top_left_x = (min(list(x_coords)) - int(hsquaresize * extra_space_factor)) + btm_right_x = max(list(x_coords)) + int(hsquaresize * extra_space_factor) + top_left_y = (min(list(y_coords)) - int(vsquaresize * extra_space_factor)) + btm_right_y = max(list(y_coords)) + int(vsquaresize * extra_space_factor) + # Overwrite the area of the dot to ensure we don't use it again + bw_image[top_left_x:btm_right_x, top_left_y:btm_right_y] = 255 + # Add the color version of the dot to the list to send off, along with some coordinates. + dots.append(rgb_image[top_left_x:btm_right_x, top_left_y:btm_right_y]) + dots_location.append([top_left_x, top_left_y]) + else: + # Dot was too close to the image border to be useable + pass + return dots, dots_location diff --git a/utils/raspberrypi/ctt/ctt_geq.py b/utils/raspberrypi/ctt/ctt_geq.py index c45addcd..5a91ebb4 100644 --- a/utils/raspberrypi/ctt/ctt_geq.py +++ b/utils/raspberrypi/ctt/ctt_geq.py @@ -2,7 +2,7 @@ # # Copyright (C) 2019, Raspberry Pi Ltd # -# ctt_geq.py - camera tuning tool for GEQ (green equalisation) +# camera tuning tool for GEQ (green equalisation) from ctt_tools import * import matplotlib.pyplot as plt diff --git a/utils/raspberrypi/ctt/ctt_image_load.py b/utils/raspberrypi/ctt/ctt_image_load.py index 310c5e88..531de328 100644 --- a/utils/raspberrypi/ctt/ctt_image_load.py +++ b/utils/raspberrypi/ctt/ctt_image_load.py @@ -2,7 +2,7 @@ # # Copyright (C) 2019-2020, Raspberry Pi Ltd # -# ctt_image_load.py - camera tuning tool image loading +# camera tuning tool image loading from ctt_tools import * from ctt_macbeth_locator import * @@ -350,6 +350,7 @@ def dng_load_image(Cam, im_str): c2 = np.left_shift(raw_data[1::2, 0::2].astype(np.int64), shift) c3 = np.left_shift(raw_data[1::2, 1::2].astype(np.int64), shift) Img.channels = [c0, c1, c2, c3] + Img.rgb = raw_im.postprocess() except Exception: print("\nERROR: failed to load DNG file", im_str) diff --git a/utils/raspberrypi/ctt/ctt_lux.py b/utils/raspberrypi/ctt/ctt_lux.py index 70855e1b..46be1512 100644 --- a/utils/raspberrypi/ctt/ctt_lux.py +++ b/utils/raspberrypi/ctt/ctt_lux.py @@ -2,7 +2,7 @@ # # Copyright (C) 2019, Raspberry Pi Ltd # -# ctt_lux.py - camera tuning tool for lux level +# camera tuning tool for lux level from ctt_tools import * diff --git a/utils/raspberrypi/ctt/ctt_macbeth_locator.py b/utils/raspberrypi/ctt/ctt_macbeth_locator.py index 178aeed0..f22dbf31 100644 --- a/utils/raspberrypi/ctt/ctt_macbeth_locator.py +++ b/utils/raspberrypi/ctt/ctt_macbeth_locator.py @@ -2,7 +2,7 @@ # # Copyright (C) 2019, Raspberry Pi Ltd # -# ctt_macbeth_locator.py - camera tuning tool Macbeth chart locator +# camera tuning tool Macbeth chart locator from ctt_ransac import * from ctt_tools import * diff --git a/utils/raspberrypi/ctt/ctt_noise.py b/utils/raspberrypi/ctt/ctt_noise.py index 3270bf34..0b18d83f 100644 --- a/utils/raspberrypi/ctt/ctt_noise.py +++ b/utils/raspberrypi/ctt/ctt_noise.py @@ -2,7 +2,7 @@ # # Copyright (C) 2019, Raspberry Pi Ltd # -# ctt_noise.py - camera tuning tool noise calibration +# camera tuning tool noise calibration from ctt_image_load import * import matplotlib.pyplot as plt diff --git a/utils/raspberrypi/ctt/ctt_pisp.py b/utils/raspberrypi/ctt/ctt_pisp.py new file mode 100755 index 00000000..a59b053c --- /dev/null +++ b/utils/raspberrypi/ctt/ctt_pisp.py @@ -0,0 +1,805 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (C) 2019, Raspberry Pi Ltd +# +# ctt_pisp.py - camera tuning tool data for PiSP platforms + + +json_template = { + "rpi.black_level": { + "black_level": 4096 + }, + "rpi.lux": { + "reference_shutter_speed": 10000, + "reference_gain": 1, + "reference_aperture": 1.0 + }, + "rpi.dpc": { + "strength": 1 + }, + "rpi.noise": { + }, + "rpi.geq": { + }, + "rpi.denoise": + { + "normal": + { + "sdn": + { + "deviation": 1.6, + "strength": 0.5, + "deviation2": 3.2, + "deviation_no_tdn": 3.2, + "strength_no_tdn": 0.75 + }, + "cdn": + { + "deviation": 200, + "strength": 0.3 + }, + "tdn": + { + "deviation": 0.8, + "threshold": 0.05 + } + }, + "hdr": + { + "sdn": + { + "deviation": 1.6, + "strength": 0.5, + "deviation2": 3.2, + "deviation_no_tdn": 3.2, + "strength_no_tdn": 0.75 + }, + "cdn": + { + "deviation": 200, + "strength": 0.3 + }, + "tdn": + { + "deviation": 1.3, + "threshold": 0.1 + } + }, + "night": + { + "sdn": + { + "deviation": 1.6, + "strength": 0.5, + "deviation2": 3.2, + "deviation_no_tdn": 3.2, + "strength_no_tdn": 0.75 + }, + "cdn": + { + "deviation": 200, + "strength": 0.3 + }, + "tdn": + { + "deviation": 1.3, + "threshold": 0.1 + } + } + }, + "rpi.awb": { + "priors": [ + {"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]}, + {"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]}, + {"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]} + ], + "modes": { + "auto": {"lo": 2500, "hi": 7700}, + "incandescent": {"lo": 2500, "hi": 3000}, + "tungsten": {"lo": 3000, "hi": 3500}, + "fluorescent": {"lo": 4000, "hi": 4700}, + "indoor": {"lo": 3000, "hi": 5000}, + "daylight": {"lo": 5500, "hi": 6500}, + "cloudy": {"lo": 7000, "hi": 8000} + }, + "bayes": 1 + }, + "rpi.agc": + { + "channels": + [ + { + "comment": "Channel 0 is normal AGC", + "metering_modes": + { + "centre-weighted": + { + "weights": + [ + 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, + 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1, + 1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1, + 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, + 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0, + 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 + ] + }, + "spot": + { + "weights": + [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ] + }, + "matrix": + { + "weights": + [ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 + ] + } + }, + "exposure_modes": + { + "normal": + { + "shutter": [ 100, 10000, 30000, 60000, 66666 ], + "gain": [ 1.0, 1.5, 2.0, 4.0, 8.0 ] + }, + "short": + { + "shutter": [ 100, 5000, 10000, 20000, 60000 ], + "gain": [ 1.0, 1.5, 2.0, 4.0, 8.0 ] + }, + "long": + { + "shutter": [ 100, 10000, 30000, 60000, 90000, 120000 ], + "gain": [ 1.0, 1.5, 2.0, 4.0, 8.0, 12.0 ] + } + }, + "constraint_modes": + { + "normal": [ + { + "bound": "LOWER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.5, + 1000, 0.5 + ] + } + ], + "highlight": [ + { + "bound": "LOWER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.5, + 1000, 0.5 + ] + }, + { + "bound": "UPPER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.8, + 1000, 0.8 + ] + }, + ], + "shadows": [ + { + "bound": "LOWER", + "q_lo": 0.0, + "q_hi": 0.5, + "y_target": + [ + 0, 0.17, + 1000, 0.17 + ] + } + ] + }, + "y_target": + [ + 0, 0.16, + 1000, 0.165, + 10000, 0.17 + ] + }, + { + "comment": "Channel 1 is the HDR short channel", + "desaturate": 0, + "metering_modes": + { + "centre-weighted": + { + "weights": + [ + 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, + 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1, + 1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1, + 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, + 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0, + 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 + ] + }, + "spot": + { + "weights": + [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ] + }, + "matrix": + { + "weights": + [ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 + ] + } + }, + "exposure_modes": + { + "normal": + { + "shutter": [ 100, 20000, 60000 ], + "gain": [ 1.0, 1.0, 1.0 ] + }, + "short": + { + "shutter": [ 100, 20000, 60000 ], + "gain": [ 1.0, 1.0, 1.0 ] + }, + "long": + { + "shutter": [ 100, 20000, 60000 ], + "gain": [ 1.0, 1.0, 1.0 ] + } + }, + "constraint_modes": + { + "normal": [ + { + "bound": "LOWER", + "q_lo": 0.95, + "q_hi": 1.0, + "y_target": + [ + 0, 0.5, + 1000, 0.5 + ] + }, + { + "bound": "UPPER", + "q_lo": 0.95, + "q_hi": 1.0, + "y_target": + [ + 0, 0.7, + 1000, 0.7 + ] + }, + { + "bound": "LOWER", + "q_lo": 0.0, + "q_hi": 0.2, + "y_target": + [ + 0, 0.002, + 1000, 0.002 + ] + } + ], + "highlight": [ + { + "bound": "LOWER", + "q_lo": 0.95, + "q_hi": 1.0, + "y_target": + [ + 0, 0.5, + 1000, 0.5 + ] + }, + { + "bound": "UPPER", + "q_lo": 0.95, + "q_hi": 1.0, + "y_target": + [ + 0, 0.7, + 1000, 0.7 + ] + }, + { + "bound": "LOWER", + "q_lo": 0.0, + "q_hi": 0.2, + "y_target": + [ + 0, 0.002, + 1000, 0.002 + ] + } + ], + "shadows": [ + { + "bound": "LOWER", + "q_lo": 0.95, + "q_hi": 1.0, + "y_target": + [ + 0, 0.5, + 1000, 0.5 + ] + }, + { + "bound": "UPPER", + "q_lo": 0.95, + "q_hi": 1.0, + "y_target": + [ + 0, 0.7, + 1000, 0.7 + ] + }, + { + "bound": "LOWER", + "q_lo": 0.0, + "q_hi": 0.2, + "y_target": + [ + 0, 0.002, + 1000, 0.002 + ] + } + ] + }, + "y_target": + [ + 0, 0.16, + 1000, 0.165, + 10000, 0.17 + ] + }, + { + "comment": "Channel 2 is the HDR long channel", + "desaturate": 0, + "metering_modes": + { + "centre-weighted": + { + "weights": + [ + 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, + 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1, + 1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1, + 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, + 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0, + 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 + ] + }, + "spot": + { + "weights": + [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ] + }, + "matrix": + { + "weights": + [ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 + ] + } + }, + "exposure_modes": + { + "normal": + { + "shutter": [ 100, 20000, 30000, 60000 ], + "gain": [ 1.0, 2.0, 4.0, 8.0 ] + }, + "short": + { + "shutter": [ 100, 20000, 30000, 60000 ], + "gain": [ 1.0, 2.0, 4.0, 8.0 ] + }, + "long": + { + "shutter": [ 100, 20000, 30000, 60000 ], + "gain": [ 1.0, 2.0, 4.0, 8.0 ] + } + }, + "constraint_modes": + { + "normal": [ + ], + "highlight": [ + ], + "shadows": [ + ] + }, + "channel_constraints": + [ + { + "bound": "UPPER", + "channel": 4, + "factor": 8 + }, + { + "bound": "LOWER", + "channel": 4, + "factor": 2 + } + ], + "y_target": + [ + 0, 0.16, + 1000, 0.165, + 10000, 0.17 + ] + }, + { + "comment": "Channel 3 is the night mode channel", + "base_ev": 0.33, + "metering_modes": + { + "centre-weighted": + { + "weights": + [ + 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, + 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1, + 1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1, + 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, + 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0, + 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 + ] + }, + "spot": + { + "weights": + [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ] + }, + "matrix": + { + "weights": + [ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 + ] + } + }, + "exposure_modes": + { + "normal": + { + "shutter": [ 100, 20000, 66666 ], + "gain": [ 1.0, 2.0, 4.0 ] + }, + "short": + { + "shutter": [ 100, 20000, 33333 ], + "gain": [ 1.0, 2.0, 4.0 ] + }, + "long": + { + "shutter": [ 100, 20000, 66666, 120000 ], + "gain": [ 1.0, 2.0, 4.0, 4.0 ] + } + }, + "constraint_modes": + { + "normal": [ + { + "bound": "LOWER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.5, + 1000, 0.5 + ] + } + ], + "highlight": [ + { + "bound": "LOWER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.5, + 1000, 0.5 + ] + }, + { + "bound": "UPPER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.8, + 1000, 0.8 + ] + } + ], + "shadows": [ + { + "bound": "LOWER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.5, + 1000, 0.5 + ] + } + ] + }, + "y_target": + [ + 0, 0.16, + 1000, 0.16, + 10000, 0.17 + ] + } + ] + }, + "rpi.alsc": { + 'omega': 1.3, + 'n_iter': 100, + 'luminance_strength': 0.8, + }, + "rpi.contrast": { + "ce_enable": 1, + "gamma_curve": [ + 0, 0, + 1024, 5040, + 2048, 9338, + 3072, 12356, + 4096, 15312, + 5120, 18051, + 6144, 20790, + 7168, 23193, + 8192, 25744, + 9216, 27942, + 10240, 30035, + 11264, 32005, + 12288, 33975, + 13312, 35815, + 14336, 37600, + 15360, 39168, + 16384, 40642, + 18432, 43379, + 20480, 45749, + 22528, 47753, + 24576, 49621, + 26624, 51253, + 28672, 52698, + 30720, 53796, + 32768, 54876, + 36864, 57012, + 40960, 58656, + 45056, 59954, + 49152, 61183, + 53248, 62355, + 57344, 63419, + 61440, 64476, + 65535, 65535 + ] + }, + "rpi.ccm": { + }, + "rpi.cac": { + }, + "rpi.sharpen": { + "threshold": 0.25, + "limit": 1.0, + "strength": 1.0 + }, + "rpi.hdr": + { + "Off": + { + "cadence": [ 0 ] + }, + "MultiExposureUnmerged": + { + "cadence": [ 1, 2 ], + "channel_map": { "short": 1, "long": 2 } + }, + "SingleExposure": + { + "cadence": [1], + "channel_map": { "short": 1 }, + "spatial_gain": 2.0, + "tonemap_enable": 1 + }, + "MultiExposure": + { + "cadence": [1, 2], + "channel_map": { "short": 1, "long": 2 }, + "stitch_enable": 1, + "spatial_gain": 2.0, + "tonemap_enable": 1 + }, + "Night": + { + "cadence": [ 3 ], + "channel_map": { "night": 3 }, + "tonemap_enable": 1, + "tonemap": + [ + 0, 0, + 5000, 20000, + 10000, 30000, + 20000, 47000, + 30000, 55000, + 65535, 65535 + ] + } + } +} + +grid_size = (32, 32) diff --git a/utils/raspberrypi/ctt/ctt_pretty_print_json.py b/utils/raspberrypi/ctt/ctt_pretty_print_json.py index 3e3b8475..a4cae62d 100755 --- a/utils/raspberrypi/ctt/ctt_pretty_print_json.py +++ b/utils/raspberrypi/ctt/ctt_pretty_print_json.py @@ -19,13 +19,19 @@ class Encoder(json.JSONEncoder): self.indentation_level = 0 self.hard_break = 120 self.custom_elems = { + 'weights': 15, 'table': 16, 'luminance_lut': 16, 'ct_curve': 3, 'ccm': 3, + 'lut_rx': 9, + 'lut_bx': 9, + 'lut_by': 9, + 'lut_ry': 9, 'gamma_curve': 2, 'y_target': 2, - 'prior': 2 + 'prior': 2, + 'tonemap': 2 } def encode(self, o, node_key=None): @@ -87,7 +93,7 @@ class Encoder(json.JSONEncoder): return self.encode(o) -def pretty_print(in_json: dict) -> str: +def pretty_print(in_json: dict, custom_elems={}) -> str: if 'version' not in in_json or \ 'target' not in in_json or \ @@ -95,12 +101,15 @@ def pretty_print(in_json: dict) -> str: in_json['version'] < 2.0: raise RuntimeError('Incompatible JSON dictionary has been provided') - return json.dumps(in_json, cls=Encoder, indent=4, sort_keys=False) + encoder = Encoder(indent=4, sort_keys=False) + encoder.custom_elems |= custom_elems + return encoder.encode(in_json) #json.dumps(in_json, cls=Encoder, indent=4, sort_keys=False) if __name__ == "__main__": parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description= 'Prettify a version 2.0 camera tuning config JSON file.') + parser.add_argument('-t', '--target', type=str, help='Target platform', choices=['pisp', 'vc4'], default='vc4') parser.add_argument('input', type=str, help='Input tuning file.') parser.add_argument('output', type=str, nargs='?', help='Output converted tuning file. If not provided, the input file will be updated in-place.', @@ -110,7 +119,12 @@ if __name__ == "__main__": with open(args.input, 'r') as f: in_json = json.load(f) - out_json = pretty_print(in_json) + if args.target == 'pisp': + from ctt_pisp import grid_size + elif args.target == 'vc4': + from ctt_vc4 import grid_size + + out_json = pretty_print(in_json, custom_elems={'table': grid_size[0], 'luminance_lut': grid_size[0]}) with open(args.output if args.output is not None else args.input, 'w') as f: f.write(out_json) diff --git a/utils/raspberrypi/ctt/ctt_ransac.py b/utils/raspberrypi/ctt/ctt_ransac.py index 9ed7d93c..01bba302 100644 --- a/utils/raspberrypi/ctt/ctt_ransac.py +++ b/utils/raspberrypi/ctt/ctt_ransac.py @@ -2,7 +2,7 @@ # # Copyright (C) 2019, Raspberry Pi Ltd # -# ctt_ransac.py - camera tuning tool RANSAC selector for Macbeth chart locator +# camera tuning tool RANSAC selector for Macbeth chart locator import numpy as np diff --git a/utils/raspberrypi/ctt/ctt_tools.py b/utils/raspberrypi/ctt/ctt_tools.py index 79195289..50b01ecf 100644 --- a/utils/raspberrypi/ctt/ctt_tools.py +++ b/utils/raspberrypi/ctt/ctt_tools.py @@ -2,7 +2,7 @@ # # Copyright (C) 2019, Raspberry Pi Ltd # -# ctt_tools.py - camera tuning tool miscellaneous +# camera tuning tool miscellaneous import time import re @@ -65,11 +65,12 @@ def parse_input(): directory = get_config(args_dict, '-i', None, 'string') config = get_config(args_dict, '-c', None, 'string') log_path = get_config(args_dict, '-l', None, 'string') + target = get_config(args_dict, '-t', "vc4", 'string') if directory is None: raise ArgError('\n\nERROR! No input directory given.') if json_output is None: raise ArgError('\n\nERROR! No output json given.') - return json_output, directory, config, log_path + return json_output, directory, config, log_path, target """ diff --git a/utils/raspberrypi/ctt/ctt_vc4.py b/utils/raspberrypi/ctt/ctt_vc4.py new file mode 100755 index 00000000..7154e110 --- /dev/null +++ b/utils/raspberrypi/ctt/ctt_vc4.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (C) 2019, Raspberry Pi Ltd +# +# ctt_vc4.py - camera tuning tool data for VC4 platforms + + +json_template = { + "rpi.black_level": { + "black_level": 4096 + }, + "rpi.dpc": { + }, + "rpi.lux": { + "reference_shutter_speed": 10000, + "reference_gain": 1, + "reference_aperture": 1.0 + }, + "rpi.noise": { + }, + "rpi.geq": { + }, + "rpi.sdn": { + }, + "rpi.awb": { + "priors": [ + {"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]}, + {"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]}, + {"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]} + ], + "modes": { + "auto": {"lo": 2500, "hi": 8000}, + "incandescent": {"lo": 2500, "hi": 3000}, + "tungsten": {"lo": 3000, "hi": 3500}, + "fluorescent": {"lo": 4000, "hi": 4700}, + "indoor": {"lo": 3000, "hi": 5000}, + "daylight": {"lo": 5500, "hi": 6500}, + "cloudy": {"lo": 7000, "hi": 8600} + }, + "bayes": 1 + }, + "rpi.agc": { + "metering_modes": { + "centre-weighted": { + "weights": [3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0] + }, + "spot": { + "weights": [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + }, + "matrix": { + "weights": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + } + }, + "exposure_modes": { + "normal": { + "shutter": [100, 10000, 30000, 60000, 120000], + "gain": [1.0, 2.0, 4.0, 6.0, 6.0] + }, + "short": { + "shutter": [100, 5000, 10000, 20000, 120000], + "gain": [1.0, 2.0, 4.0, 6.0, 6.0] + } + }, + "constraint_modes": { + "normal": [ + {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]} + ], + "highlight": [ + {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]}, + {"bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.8, 1000, 0.8]} + ] + }, + "y_target": [0, 0.16, 1000, 0.165, 10000, 0.17] + }, + "rpi.alsc": { + 'omega': 1.3, + 'n_iter': 100, + 'luminance_strength': 0.7, + }, + "rpi.contrast": { + "ce_enable": 1, + "gamma_curve": [ + 0, 0, + 1024, 5040, + 2048, 9338, + 3072, 12356, + 4096, 15312, + 5120, 18051, + 6144, 20790, + 7168, 23193, + 8192, 25744, + 9216, 27942, + 10240, 30035, + 11264, 32005, + 12288, 33975, + 13312, 35815, + 14336, 37600, + 15360, 39168, + 16384, 40642, + 18432, 43379, + 20480, 45749, + 22528, 47753, + 24576, 49621, + 26624, 51253, + 28672, 52698, + 30720, 53796, + 32768, 54876, + 36864, 57012, + 40960, 58656, + 45056, 59954, + 49152, 61183, + 53248, 62355, + 57344, 63419, + 61440, 64476, + 65535, 65535 + ] + }, + "rpi.ccm": { + }, + "rpi.sharpen": { + } +} + +grid_size = (16, 12) diff --git a/utils/rkisp1/rkisp1-capture.sh b/utils/rkisp1/rkisp1-capture.sh index c5f859f2..d767e31d 100755 --- a/utils/rkisp1/rkisp1-capture.sh +++ b/utils/rkisp1/rkisp1-capture.sh @@ -4,8 +4,7 @@ # # Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> # -# rkisp-capture.sh - Capture processed frames from cameras based on the -# Rockchip ISP1 +# Capture processed frames from cameras based on the Rockchip ISP1 # # The scripts makes use of the following tools, which are expected to be # executable from the system-wide path or from the local directory: diff --git a/utils/tracepoints/analyze-ipa-trace.py b/utils/tracepoints/analyze-ipa-trace.py index 50fbbf42..92e8a235 100755 --- a/utils/tracepoints/analyze-ipa-trace.py +++ b/utils/tracepoints/analyze-ipa-trace.py @@ -4,7 +4,7 @@ # # Author: Paul Elder <paul.elder@ideasonboard.com> # -# analyze-ipa-trace.py - Example of how to extract information from libcamera lttng traces +# Example of how to extract information from libcamera lttng traces import argparse import bt2 diff --git a/utils/tracepoints/meson.build b/utils/tracepoints/meson.build deleted file mode 100644 index 807230fc..00000000 --- a/utils/tracepoints/meson.build +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-License-Identifier: CC0-1.0 - -py_modules += ['jinja2'] - -gen_tracepoints_header = find_program('./gen-tp-header.py') diff --git a/utils/tuning/README.rst b/utils/tuning/README.rst index ef3e6ad7..89a1d61e 100644 --- a/utils/tuning/README.rst +++ b/utils/tuning/README.rst @@ -1,11 +1,20 @@ .. SPDX-License-Identifier: CC-BY-SA-4.0 -.. TODO: Write an overview of libtuning +libcamera tuning tools +====================== -Dependencies ------------- +.. Note:: The tuning tools are still very much work in progress. If in doubt, + please ask on the mailing list. + +.. todo:: + Write documentation + +Installation of dependencies +---------------------------- + +:: + # Using a venv + python3 -m venv venv + . ./venv/bin/activate + pip3 install -r requirements.txt -- numpy -- opencv-python -- py3exiv2 -- rawpy diff --git a/utils/tuning/config-example.yaml b/utils/tuning/config-example.yaml new file mode 100644 index 00000000..1b7f52cd --- /dev/null +++ b/utils/tuning/config-example.yaml @@ -0,0 +1,12 @@ +general: + disable: [] + plot: [] + alsc: + do_alsc_colour: 1 + luminance_strength: 0.5 + awb: + greyworld: 0 + macbeth: + small: 1 + show: 0 +# blacklevel: 32
\ No newline at end of file diff --git a/utils/tuning/libtuning/average.py b/utils/tuning/libtuning/average.py index e28770d7..c41075a1 100644 --- a/utils/tuning/libtuning/average.py +++ b/utils/tuning/libtuning/average.py @@ -2,7 +2,7 @@ # # Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com> # -# average.py - Wrapper for numpy averaging functions to enable duck-typing +# Wrapper for numpy averaging functions to enable duck-typing import numpy as np diff --git a/utils/tuning/libtuning/ctt_awb.py b/utils/tuning/libtuning/ctt_awb.py new file mode 100644 index 00000000..abf22321 --- /dev/null +++ b/utils/tuning/libtuning/ctt_awb.py @@ -0,0 +1,378 @@ +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (C) 2019, Raspberry Pi Ltd +# +# camera tuning tool for AWB + +import matplotlib.pyplot as plt +from bisect import bisect_left +from scipy.optimize import fmin +import numpy as np + +from .image import Image + + +""" +obtain piecewise linear approximation for colour curve +""" +def awb(Cam, cal_cr_list, cal_cb_list, plot): + imgs = Cam.imgs + """ + condense alsc calibration tables into one dictionary + """ + if cal_cr_list is None: + colour_cals = None + else: + colour_cals = {} + for cr, cb in zip(cal_cr_list, cal_cb_list): + cr_tab = cr['table'] + cb_tab = cb['table'] + """ + normalise tables so min value is 1 + """ + cr_tab = cr_tab/np.min(cr_tab) + cb_tab = cb_tab/np.min(cb_tab) + colour_cals[cr['ct']] = [cr_tab, cb_tab] + """ + obtain data from greyscale macbeth patches + """ + rb_raw = [] + rbs_hat = [] + for Img in imgs: + Cam.log += '\nProcessing '+Img.name + """ + get greyscale patches with alsc applied if alsc enabled. + Note: if alsc is disabled then colour_cals will be set to None and the + function will just return the greyscale patches + """ + r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals) + """ + calculate ratio of r, b to g + """ + r_g = np.mean(r_patchs/g_patchs) + b_g = np.mean(b_patchs/g_patchs) + Cam.log += '\n r : {:.4f} b : {:.4f}'.format(r_g, b_g) + """ + The curve tends to be better behaved in so-called hatspace. + R, B, G represent the individual channels. The colour curve is plotted in + r, b space, where: + r = R/G + b = B/G + This will be referred to as dehatspace... (sorry) + Hatspace is defined as: + r_hat = R/(R+B+G) + b_hat = B/(R+B+G) + To convert from dehatspace to hastpace (hat operation): + r_hat = r/(1+r+b) + b_hat = b/(1+r+b) + To convert from hatspace to dehatspace (dehat operation): + r = r_hat/(1-r_hat-b_hat) + b = b_hat/(1-r_hat-b_hat) + Proof is left as an excercise to the reader... + Throughout the code, r and b are sometimes referred to as r_g and b_g + as a reminder that they are ratios + """ + r_g_hat = r_g/(1+r_g+b_g) + b_g_hat = b_g/(1+r_g+b_g) + Cam.log += '\n r_hat : {:.4f} b_hat : {:.4f}'.format(r_g_hat, b_g_hat) + rbs_hat.append((r_g_hat, b_g_hat, Img.col)) + rb_raw.append((r_g, b_g)) + Cam.log += '\n' + + Cam.log += '\nFinished processing images' + """ + sort all lits simultaneously by r_hat + """ + rbs_zip = list(zip(rbs_hat, rb_raw)) + rbs_zip.sort(key=lambda x: x[0][0]) + rbs_hat, rb_raw = list(zip(*rbs_zip)) + """ + unzip tuples ready for processing + """ + rbs_hat = list(zip(*rbs_hat)) + rb_raw = list(zip(*rb_raw)) + """ + fit quadratic fit to r_g hat and b_g_hat + """ + a, b, c = np.polyfit(rbs_hat[0], rbs_hat[1], 2) + Cam.log += '\nFit quadratic curve in hatspace' + """ + the algorithm now approximates the shortest distance from each point to the + curve in dehatspace. Since the fit is done in hatspace, it is easier to + find the actual shortest distance in hatspace and use the projection back + into dehatspace as an overestimate. + The distance will be used for two things: + 1) In the case that colour temperature does not strictly decrease with + increasing r/g, the closest point to the line will be chosen out of an + increasing pair of colours. + + 2) To calculate transverse negative an dpositive, the maximum positive + and negative distance from the line are chosen. This benefits from the + overestimate as the transverse pos/neg are upper bound values. + """ + """ + define fit function + """ + def f(x): + return a*x**2 + b*x + c + """ + iterate over points (R, B are x and y coordinates of points) and calculate + distance to line in dehatspace + """ + dists = [] + for i, (R, B) in enumerate(zip(rbs_hat[0], rbs_hat[1])): + """ + define function to minimise as square distance between datapoint and + point on curve. Squaring is monotonic so minimising radius squared is + equivalent to minimising radius + """ + def f_min(x): + y = f(x) + return((x-R)**2+(y-B)**2) + """ + perform optimisation with scipy.optmisie.fmin + """ + x_hat = fmin(f_min, R, disp=0)[0] + y_hat = f(x_hat) + """ + dehat + """ + x = x_hat/(1-x_hat-y_hat) + y = y_hat/(1-x_hat-y_hat) + rr = R/(1-R-B) + bb = B/(1-R-B) + """ + calculate euclidean distance in dehatspace + """ + dist = ((x-rr)**2+(y-bb)**2)**0.5 + """ + return negative if point is below the fit curve + """ + if (x+y) > (rr+bb): + dist *= -1 + dists.append(dist) + Cam.log += '\nFound closest point on fit line to each point in dehatspace' + """ + calculate wiggle factors in awb. 10% added since this is an upper bound + """ + transverse_neg = - np.min(dists) * 1.1 + transverse_pos = np.max(dists) * 1.1 + Cam.log += '\nTransverse pos : {:.5f}'.format(transverse_pos) + Cam.log += '\nTransverse neg : {:.5f}'.format(transverse_neg) + """ + set minimum transverse wiggles to 0.1 . + Wiggle factors dictate how far off of the curve the algorithm searches. 0.1 + is a suitable minimum that gives better results for lighting conditions not + within calibration dataset. Anything less will generalise poorly. + """ + if transverse_pos < 0.01: + transverse_pos = 0.01 + Cam.log += '\nForced transverse pos to 0.01' + if transverse_neg < 0.01: + transverse_neg = 0.01 + Cam.log += '\nForced transverse neg to 0.01' + + """ + generate new b_hat values at each r_hat according to fit + """ + r_hat_fit = np.array(rbs_hat[0]) + b_hat_fit = a*r_hat_fit**2 + b*r_hat_fit + c + """ + transform from hatspace to dehatspace + """ + r_fit = r_hat_fit/(1-r_hat_fit-b_hat_fit) + b_fit = b_hat_fit/(1-r_hat_fit-b_hat_fit) + c_fit = np.round(rbs_hat[2], 0) + """ + round to 4dp + """ + r_fit = np.where((1000*r_fit) % 1 <= 0.05, r_fit+0.0001, r_fit) + r_fit = np.where((1000*r_fit) % 1 >= 0.95, r_fit-0.0001, r_fit) + b_fit = np.where((1000*b_fit) % 1 <= 0.05, b_fit+0.0001, b_fit) + b_fit = np.where((1000*b_fit) % 1 >= 0.95, b_fit-0.0001, b_fit) + r_fit = np.round(r_fit, 4) + b_fit = np.round(b_fit, 4) + """ + The following code ensures that colour temperature decreases with + increasing r/g + """ + """ + iterate backwards over list for easier indexing + """ + i = len(c_fit) - 1 + while i > 0: + if c_fit[i] > c_fit[i-1]: + Cam.log += '\nColour temperature increase found\n' + Cam.log += '{} K at r = {} to '.format(c_fit[i-1], r_fit[i-1]) + Cam.log += '{} K at r = {}'.format(c_fit[i], r_fit[i]) + """ + if colour temperature increases then discard point furthest from + the transformed fit (dehatspace) + """ + error_1 = abs(dists[i-1]) + error_2 = abs(dists[i]) + Cam.log += '\nDistances from fit:\n' + Cam.log += '{} K : {:.5f} , '.format(c_fit[i], error_1) + Cam.log += '{} K : {:.5f}'.format(c_fit[i-1], error_2) + """ + find bad index + note that in python false = 0 and true = 1 + """ + bad = i - (error_1 < error_2) + Cam.log += '\nPoint at {} K deleted as '.format(c_fit[bad]) + Cam.log += 'it is furthest from fit' + """ + delete bad point + """ + r_fit = np.delete(r_fit, bad) + b_fit = np.delete(b_fit, bad) + c_fit = np.delete(c_fit, bad).astype(np.uint16) + """ + note that if a point has been discarded then the length has decreased + by one, meaning that decreasing the index by one will reassess the kept + point against the next point. It is therefore possible, in theory, for + two adjacent points to be discarded, although probably rare + """ + i -= 1 + + """ + return formatted ct curve, ordered by increasing colour temperature + """ + ct_curve = list(np.array(list(zip(b_fit, r_fit, c_fit))).flatten())[::-1] + Cam.log += '\nFinal CT curve:' + for i in range(len(ct_curve)//3): + j = 3*i + Cam.log += '\n ct: {} '.format(ct_curve[j]) + Cam.log += ' r: {} '.format(ct_curve[j+1]) + Cam.log += ' b: {} '.format(ct_curve[j+2]) + + """ + plotting code for debug + """ + if plot: + x = np.linspace(np.min(rbs_hat[0]), np.max(rbs_hat[0]), 100) + y = a*x**2 + b*x + c + plt.subplot(2, 1, 1) + plt.title('hatspace') + plt.plot(rbs_hat[0], rbs_hat[1], ls='--', color='blue') + plt.plot(x, y, color='green', ls='-') + plt.scatter(rbs_hat[0], rbs_hat[1], color='red') + for i, ct in enumerate(rbs_hat[2]): + plt.annotate(str(ct), (rbs_hat[0][i], rbs_hat[1][i])) + plt.xlabel('$\\hat{r}$') + plt.ylabel('$\\hat{b}$') + """ + optional set axes equal to shortest distance so line really does + looks perpendicular and everybody is happy + """ + # ax = plt.gca() + # ax.set_aspect('equal') + plt.grid() + plt.subplot(2, 1, 2) + plt.title('dehatspace - indoors?') + plt.plot(r_fit, b_fit, color='blue') + plt.scatter(rb_raw[0], rb_raw[1], color='green') + plt.scatter(r_fit, b_fit, color='red') + for i, ct in enumerate(c_fit): + plt.annotate(str(ct), (r_fit[i], b_fit[i])) + plt.xlabel('$r$') + plt.ylabel('$b$') + """ + optional set axes equal to shortest distance so line really does + looks perpendicular and everybody is happy + """ + # ax = plt.gca() + # ax.set_aspect('equal') + plt.subplots_adjust(hspace=0.5) + plt.grid() + plt.show() + """ + end of plotting code + """ + return(ct_curve, np.round(transverse_pos, 5), np.round(transverse_neg, 5)) + + +""" +obtain greyscale patches and perform alsc colour correction +""" +def get_alsc_patches(Img, colour_cals, grey=True): + """ + get patch centre coordinates, image colour and the actual + patches for each channel, remembering to subtract blacklevel + If grey then only greyscale patches considered + """ + if grey: + cen_coords = Img.cen_coords[3::4] + col = Img.col + patches = [np.array(Img.patches[i]) for i in Img.order] + r_patchs = patches[0][3::4] - Img.blacklevel_16 + b_patchs = patches[3][3::4] - Img.blacklevel_16 + """ + note two green channels are averages + """ + g_patchs = (patches[1][3::4]+patches[2][3::4])/2 - Img.blacklevel_16 + else: + cen_coords = Img.cen_coords + col = Img.color + patches = [np.array(Img.patches[i]) for i in Img.order] + r_patchs = patches[0] - Img.blacklevel_16 + b_patchs = patches[3] - Img.blacklevel_16 + g_patchs = (patches[1]+patches[2])/2 - Img.blacklevel_16 + + if colour_cals is None: + return r_patchs, b_patchs, g_patchs + """ + find where image colour fits in alsc colour calibration tables + """ + cts = list(colour_cals.keys()) + pos = bisect_left(cts, col) + """ + if img colour is below minimum or above maximum alsc calibration colour, simply + pick extreme closest to img colour + """ + if pos % len(cts) == 0: + """ + this works because -0 = 0 = first and -1 = last index + """ + col_tabs = np.array(colour_cals[cts[-pos//len(cts)]]) + """ + else, perform linear interpolation between existing alsc colour + calibration tables + """ + else: + bef = cts[pos-1] + aft = cts[pos] + da = col-bef + db = aft-col + bef_tabs = np.array(colour_cals[bef]) + aft_tabs = np.array(colour_cals[aft]) + col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db) + col_tabs = np.reshape(col_tabs, (2, 12, 16)) + """ + calculate dx, dy used to calculate alsc table + """ + w, h = Img.w/2, Img.h/2 + dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12)) + """ + make list of pairs of gains for each patch by selecting the correct value + in alsc colour calibration table + """ + patch_gains = [] + for cen in cen_coords: + x, y = cen[0]//dx, cen[1]//dy + # We could probably do with some better spatial interpolation here? + col_gains = (col_tabs[0][y][x], col_tabs[1][y][x]) + patch_gains.append(col_gains) + + """ + multiply the r and b channels in each patch by the respective gain, finally + performing the alsc colour correction + """ + for i, gains in enumerate(patch_gains): + r_patchs[i] = r_patchs[i] * gains[0] + b_patchs[i] = b_patchs[i] * gains[1] + + """ + return greyscale patches, g channel and correct r, b channels + """ + return r_patchs, b_patchs, g_patchs diff --git a/utils/tuning/libtuning/ctt_ccm.py b/utils/tuning/libtuning/ctt_ccm.py new file mode 100644 index 00000000..2e87a667 --- /dev/null +++ b/utils/tuning/libtuning/ctt_ccm.py @@ -0,0 +1,408 @@ +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (C) 2019, Raspberry Pi Ltd +# +# camera tuning tool for CCM (colour correction matrix) + +import logging + +import numpy as np +from scipy.optimize import minimize + +from . import ctt_colors as colors +from .image import Image +from .ctt_awb import get_alsc_patches +from .utils import visualise_macbeth_chart + +logger = logging.getLogger(__name__) + +""" +takes 8-bit macbeth chart values, degammas and returns 16 bit +""" + +''' +This program has many options from which to derive the color matrix from. +The first is average. This minimises the average delta E across all patches of +the macbeth chart. Testing across all cameras yeilded this as the most color +accurate and vivid. Other options are avalible however. +Maximum minimises the maximum Delta E of the patches. It iterates through till +a minimum maximum is found (so that there is +not one patch that deviates wildly.) +This yields generally good results but overall the colors are less accurate +Have a fiddle with maximum and see what you think. +The final option allows you to select the patches for which to average across. +This means that you can bias certain patches, for instance if you want the +reds to be more accurate. +''' + +matrix_selection_types = ["average", "maximum", "patches"] +typenum = 0 # select from array above, 0 = average, 1 = maximum, 2 = patches +test_patches = [1, 2, 5, 8, 9, 12, 14] + +''' +Enter patches to test for. Can also be entered twice if you +would like twice as much bias on one patch. +''' + + +def degamma(x): + x = x / ((2 ** 8) - 1) # takes 255 and scales it down to one + x = np.where(x < 0.04045, x / 12.92, ((x + 0.055) / 1.055) ** 2.4) + x = x * ((2 ** 16) - 1) # takes one and scales up to 65535, 16 bit color + return x + + +def gamma(x): + # Take 3 long array of color values and gamma them + return [((colour / 255) ** (1 / 2.4) * 1.055 - 0.055) * 255 for colour in x] + + +""" +FInds colour correction matrices for list of images +""" + + +def ccm(imgs, cal_cr_list, cal_cb_list): + global matrix_selection_types, typenum + """ + standard macbeth chart colour values + """ + m_rgb = np.array([ # these are in RGB + [116, 81, 67], # dark skin + [199, 147, 129], # light skin + [91, 122, 156], # blue sky + [90, 108, 64], # foliage + [130, 128, 176], # blue flower + [92, 190, 172], # bluish green + [224, 124, 47], # orange + [68, 91, 170], # purplish blue + [198, 82, 97], # moderate red + [94, 58, 106], # purple + [159, 189, 63], # yellow green + [230, 162, 39], # orange yellow + [35, 63, 147], # blue + [67, 149, 74], # green + [180, 49, 57], # red + [238, 198, 20], # yellow + [193, 84, 151], # magenta + [0, 136, 170], # cyan (goes out of gamut) + [245, 245, 243], # white 9.5 + [200, 202, 202], # neutral 8 + [161, 163, 163], # neutral 6.5 + [121, 121, 122], # neutral 5 + [82, 84, 86], # neutral 3.5 + [49, 49, 51] # black 2 + ]) + """ + convert reference colours from srgb to rgb + """ + m_srgb = degamma(m_rgb) # now in 16 bit color. + + # Produce array of LAB values for ideal color chart + m_lab = [colors.RGB_to_LAB(color / 256) for color in m_srgb] + + """ + reorder reference values to match how patches are ordered + """ + m_srgb = np.array([m_srgb[i::6] for i in range(6)]).reshape((24, 3)) + m_lab = np.array([m_lab[i::6] for i in range(6)]).reshape((24, 3)) + m_rgb = np.array([m_rgb[i::6] for i in range(6)]).reshape((24, 3)) + """ + reformat alsc correction tables or set colour_cals to None if alsc is + deactivated + """ + if cal_cr_list is None: + colour_cals = None + else: + colour_cals = {} + for cr, cb in zip(cal_cr_list, cal_cb_list): + cr_tab = cr['table'] + cb_tab = cb['table'] + """ + normalise tables so min value is 1 + """ + cr_tab = cr_tab / np.min(cr_tab) + cb_tab = cb_tab / np.min(cb_tab) + colour_cals[cr['ct']] = [cr_tab, cb_tab] + + """ + for each image, perform awb and alsc corrections. + Then calculate the colour correction matrix for that image, recording the + ccm and the colour tempertaure. + """ + ccm_tab = {} + for Img in imgs: + logger.info('Processing image: ' + Img.name) + """ + get macbeth patches with alsc applied if alsc enabled. + Note: if alsc is disabled then colour_cals will be set to None and no + the function will simply return the macbeth patches + """ + r, b, g = get_alsc_patches(Img, colour_cals, grey=False) + # 256 values for each patch of sRGB values + + """ + do awb + Note: awb is done by measuring the macbeth chart in the image, rather + than from the awb calibration. This is done so the awb will be perfect + and the ccm matrices will be more accurate. + """ + r_greys, b_greys, g_greys = r[3::4], b[3::4], g[3::4] + r_g = np.mean(r_greys / g_greys) + b_g = np.mean(b_greys / g_greys) + r = r / r_g + b = b / b_g + """ + normalise brightness wrt reference macbeth colours and then average + each channel for each patch + """ + gain = np.mean(m_srgb) / np.mean((r, g, b)) + logger.info(f'Gain with respect to standard colours: {gain:.3f}') + r = np.mean(gain * r, axis=1) + b = np.mean(gain * b, axis=1) + g = np.mean(gain * g, axis=1) + """ + calculate ccm matrix + """ + # ==== All of below should in sRGB ===## + sumde = 0 + ccm = do_ccm(r, g, b, m_srgb) + # This is the initial guess that our optimisation code works with. + original_ccm = ccm + r1 = ccm[0] + r2 = ccm[1] + g1 = ccm[3] + g2 = ccm[4] + b1 = ccm[6] + b2 = ccm[7] + ''' + COLOR MATRIX LOOKS AS BELOW + R1 R2 R3 Rval Outr + G1 G2 G3 * Gval = G + B1 B2 B3 Bval B + Will be optimising 6 elements and working out the third element using 1-r1-r2 = r3 + ''' + + x0 = [r1, r2, g1, g2, b1, b2] + ''' + We use our old CCM as the initial guess for the program to find the + optimised matrix + ''' + result = minimize(guess, x0, args=(r, g, b, m_lab), tol=0.01) + ''' + This produces a color matrix which has the lowest delta E possible, + based off the input data. Note it is impossible for this to reach + zero since the input data is imperfect + ''' + + [r1, r2, g1, g2, b1, b2] = result.x + # The new, optimised color correction matrix values + # This is the optimised Color Matrix (preserving greys by summing rows up to 1) + optimised_ccm = [r1, r2, (1 - r1 - r2), g1, g2, (1 - g1 - g2), b1, b2, (1 - b1 - b2)] + + logger.info(f'Optimized Matrix: {np.round(optimised_ccm, 4)}') + logger.info(f'Old Matrix: {np.round(ccm, 4)}') + + formatted_ccm = np.array(original_ccm).reshape((3, 3)) + + ''' + below is a whole load of code that then applies the latest color + matrix, and returns LAB values for color. This can then be used + to calculate the final delta E + ''' + optimised_ccm_rgb = [] # Original Color Corrected Matrix RGB / LAB + optimised_ccm_lab = [] + + formatted_optimised_ccm = np.array(optimised_ccm).reshape((3, 3)) + after_gamma_rgb = [] + after_gamma_lab = [] + + for RGB in zip(r, g, b): + ccm_applied_rgb = np.dot(formatted_ccm, (np.array(RGB) / 256)) + optimised_ccm_rgb.append(gamma(ccm_applied_rgb)) + optimised_ccm_lab.append(colors.RGB_to_LAB(ccm_applied_rgb)) + + optimised_ccm_applied_rgb = np.dot(formatted_optimised_ccm, np.array(RGB) / 256) + after_gamma_rgb.append(gamma(optimised_ccm_applied_rgb)) + after_gamma_lab.append(colors.RGB_to_LAB(optimised_ccm_applied_rgb)) + ''' + Gamma After RGB / LAB - not used in calculations, only used for visualisation + We now want to spit out some data that shows + how the optimisation has improved the color matrices + ''' + logger.info("Here are the Improvements") + + # CALCULATE WORST CASE delta e + old_worst_delta_e = 0 + before_average = transform_and_evaluate(formatted_ccm, r, g, b, m_lab) + new_worst_delta_e = 0 + after_average = transform_and_evaluate(formatted_optimised_ccm, r, g, b, m_lab) + for i in range(24): + old_delta_e = deltae(optimised_ccm_lab[i], m_lab[i]) # Current Old Delta E + new_delta_e = deltae(after_gamma_lab[i], m_lab[i]) # Current New Delta E + if old_delta_e > old_worst_delta_e: + old_worst_delta_e = old_delta_e + if new_delta_e > new_worst_delta_e: + new_worst_delta_e = new_delta_e + + logger.info(f'delta E optimized: average: {after_average:.2f} max:{new_worst_delta_e:.2f}') + logger.info(f'delta E old: average: {before_average:.2f} max:{old_worst_delta_e:.2f}') + + visualise_macbeth_chart(m_rgb, optimised_ccm_rgb, after_gamma_rgb, str(Img.color) + str(matrix_selection_types[typenum])) + ''' + The program will also save some visualisations of improvements. + Very pretty to look at. Top rectangle is ideal, Left square is + before optimisation, right square is after. + ''' + + """ + if a ccm has already been calculated for that temperature then don't + overwrite but save both. They will then be averaged later on + """ # Now going to use optimised color matrix, optimised_ccm + if Img.color in ccm_tab.keys(): + ccm_tab[Img.color].append(optimised_ccm) + else: + ccm_tab[Img.color] = [optimised_ccm] + + logger.info('Finished processing images') + """ + average any ccms that share a colour temperature + """ + for k, v in ccm_tab.items(): + tab = np.mean(v, axis=0) + tab = np.where((10000 * tab) % 1 <= 0.05, tab + 0.00001, tab) + tab = np.where((10000 * tab) % 1 >= 0.95, tab - 0.00001, tab) + ccm_tab[k] = list(np.round(tab, 5)) + logger.info(f'Matrix calculated for colour temperature of {k} K') + + """ + return all ccms with respective colour temperature in the correct format, + sorted by their colour temperature + """ + sorted_ccms = sorted(ccm_tab.items(), key=lambda kv: kv[0]) + ccms = [] + for i in sorted_ccms: + ccms.append({ + 'ct': i[0], + 'ccm': i[1] + }) + return ccms + + +def guess(x0, r, g, b, m_lab): # provides a method of numerical feedback for the optimisation code + [r1, r2, g1, g2, b1, b2] = x0 + ccm = np.array([r1, r2, (1 - r1 - r2), + g1, g2, (1 - g1 - g2), + b1, b2, (1 - b1 - b2)]).reshape((3, 3)) # format the matrix correctly + return transform_and_evaluate(ccm, r, g, b, m_lab) + + +def transform_and_evaluate(ccm, r, g, b, m_lab): # Transforms colors to LAB and applies the correction matrix + # create list of matrix changed colors + realrgb = [] + for RGB in zip(r, g, b): + rgb_post_ccm = np.dot(ccm, np.array(RGB) / 256) # This is RGB values after the color correction matrix has been applied + realrgb.append(colors.RGB_to_LAB(rgb_post_ccm)) + # now compare that with m_lab and return numeric result, averaged for each patch + return (sumde(realrgb, m_lab) / 24) # returns an average result of delta E + + +def sumde(listA, listB): + global typenum, test_patches + sumde = 0 + maxde = 0 + patchde = [] # Create array of the delta E values for each patch. useful for optimisation of certain patches + for listA_item, listB_item in zip(listA, listB): + if maxde < (deltae(listA_item, listB_item)): + maxde = deltae(listA_item, listB_item) + patchde.append(deltae(listA_item, listB_item)) + sumde += deltae(listA_item, listB_item) + ''' + The different options specified at the start allow for + the maximum to be returned, average or specific patches + ''' + if typenum == 0: + return sumde + if typenum == 1: + return maxde + if typenum == 2: + output = sum([patchde[test_patch] for test_patch in test_patches]) + # Selects only certain patches and returns the output for them + return output + + +""" +calculates the ccm for an individual image. +ccms are calculated in rgb space, and are fit by hand. Although it is a 3x3 +matrix, each row must add up to 1 in order to conserve greyness, simplifying +calculation. +The initial CCM is calculated in RGB, and then optimised in LAB color space +This simplifies the initial calculation but then gets us the accuracy of +using LAB color space. +""" + + +def do_ccm(r, g, b, m_srgb): + rb = r-b + gb = g-b + rb_2s = (rb * rb) + rb_gbs = (rb * gb) + gb_2s = (gb * gb) + + r_rbs = rb * (m_srgb[..., 0] - b) + r_gbs = gb * (m_srgb[..., 0] - b) + g_rbs = rb * (m_srgb[..., 1] - b) + g_gbs = gb * (m_srgb[..., 1] - b) + b_rbs = rb * (m_srgb[..., 2] - b) + b_gbs = gb * (m_srgb[..., 2] - b) + + """ + Obtain least squares fit + """ + rb_2 = np.sum(rb_2s) + gb_2 = np.sum(gb_2s) + rb_gb = np.sum(rb_gbs) + r_rb = np.sum(r_rbs) + r_gb = np.sum(r_gbs) + g_rb = np.sum(g_rbs) + g_gb = np.sum(g_gbs) + b_rb = np.sum(b_rbs) + b_gb = np.sum(b_gbs) + + det = rb_2 * gb_2 - rb_gb * rb_gb + + """ + Raise error if matrix is singular... + This shouldn't really happen with real data but if it does just take new + pictures and try again, not much else to be done unfortunately... + """ + if det < 0.001: + raise ArithmeticError + + r_a = (gb_2 * r_rb - rb_gb * r_gb) / det + r_b = (rb_2 * r_gb - rb_gb * r_rb) / det + """ + Last row can be calculated by knowing the sum must be 1 + """ + r_c = 1 - r_a - r_b + + g_a = (gb_2 * g_rb - rb_gb * g_gb) / det + g_b = (rb_2 * g_gb - rb_gb * g_rb) / det + g_c = 1 - g_a - g_b + + b_a = (gb_2 * b_rb - rb_gb * b_gb) / det + b_b = (rb_2 * b_gb - rb_gb * b_rb) / det + b_c = 1 - b_a - b_b + + """ + format ccm + """ + ccm = [r_a, r_b, r_c, g_a, g_b, g_c, b_a, b_b, b_c] + + return ccm + + +def deltae(colorA, colorB): + return ((colorA[0] - colorB[0]) ** 2 + (colorA[1] - colorB[1]) ** 2 + (colorA[2] - colorB[2]) ** 2) ** 0.5 + # return ((colorA[1]-colorB[1]) * * 2 + (colorA[2]-colorB[2]) * * 2) * * 0.5 + # UNCOMMENT IF YOU WANT TO NEGLECT LUMINANCE FROM CALCULATION OF DELTA E diff --git a/utils/tuning/libtuning/ctt_colors.py b/utils/tuning/libtuning/ctt_colors.py new file mode 100644 index 00000000..cb4d236b --- /dev/null +++ b/utils/tuning/libtuning/ctt_colors.py @@ -0,0 +1,30 @@ +# Program to convert from RGB to LAB color space +def RGB_to_LAB(RGB): # where RGB is a 1x3 array. e.g RGB = [100, 255, 230] + num = 0 + XYZ = [0, 0, 0] + # converted all the three R, G, B to X, Y, Z + X = RGB[0] * 0.4124 + RGB[1] * 0.3576 + RGB[2] * 0.1805 + Y = RGB[0] * 0.2126 + RGB[1] * 0.7152 + RGB[2] * 0.0722 + Z = RGB[0] * 0.0193 + RGB[1] * 0.1192 + RGB[2] * 0.9505 + + XYZ[0] = X / 255 * 100 + XYZ[1] = Y / 255 * 100 # XYZ Must be in range 0 -> 100, so scale down from 255 + XYZ[2] = Z / 255 * 100 + XYZ[0] = XYZ[0] / 95.047 # ref_X = 95.047 Observer= 2°, Illuminant= D65 + XYZ[1] = XYZ[1] / 100.0 # ref_Y = 100.000 + XYZ[2] = XYZ[2] / 108.883 # ref_Z = 108.883 + num = 0 + for value in XYZ: + if value > 0.008856: + value = value ** (0.3333333333333333) + else: + value = (7.787 * value) + (16 / 116) + XYZ[num] = value + num = num + 1 + + # L, A, B, values calculated below + L = (116 * XYZ[1]) - 16 + a = 500 * (XYZ[0] - XYZ[1]) + b = 200 * (XYZ[1] - XYZ[2]) + + return [L, a, b] diff --git a/utils/tuning/libtuning/ctt_ransac.py b/utils/tuning/libtuning/ctt_ransac.py new file mode 100644 index 00000000..01bba302 --- /dev/null +++ b/utils/tuning/libtuning/ctt_ransac.py @@ -0,0 +1,71 @@ +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (C) 2019, Raspberry Pi Ltd +# +# camera tuning tool RANSAC selector for Macbeth chart locator + +import numpy as np + +scale = 2 + + +""" +constructs normalised macbeth chart corners for ransac algorithm +""" +def get_square_verts(c_err=0.05, scale=scale): + """ + define macbeth chart corners + """ + b_bord_x, b_bord_y = scale*8.5, scale*13 + s_bord = 6*scale + side = 41*scale + x_max = side*6 + 5*s_bord + 2*b_bord_x + y_max = side*4 + 3*s_bord + 2*b_bord_y + c1 = (0, 0) + c2 = (0, y_max) + c3 = (x_max, y_max) + c4 = (x_max, 0) + mac_norm = np.array((c1, c2, c3, c4), np.float32) + mac_norm = np.array([mac_norm]) + + square_verts = [] + square_0 = np.array(((0, 0), (0, side), + (side, side), (side, 0)), np.float32) + offset_0 = np.array((b_bord_x, b_bord_y), np.float32) + c_off = side * c_err + offset_cont = np.array(((c_off, c_off), (c_off, -c_off), + (-c_off, -c_off), (-c_off, c_off)), np.float32) + square_0 += offset_0 + square_0 += offset_cont + """ + define macbeth square corners + """ + for i in range(6): + shift_i = np.array(((i*side, 0), (i*side, 0), + (i*side, 0), (i*side, 0)), np.float32) + shift_bord = np.array(((i*s_bord, 0), (i*s_bord, 0), + (i*s_bord, 0), (i*s_bord, 0)), np.float32) + square_i = square_0 + shift_i + shift_bord + for j in range(4): + shift_j = np.array(((0, j*side), (0, j*side), + (0, j*side), (0, j*side)), np.float32) + shift_bord = np.array(((0, j*s_bord), + (0, j*s_bord), (0, j*s_bord), + (0, j*s_bord)), np.float32) + square_j = square_i + shift_j + shift_bord + square_verts.append(square_j) + # print('square_verts') + # print(square_verts) + return np.array(square_verts, np.float32), mac_norm + + +def get_square_centres(c_err=0.05, scale=scale): + """ + define macbeth square centres + """ + verts, mac_norm = get_square_verts(c_err, scale=scale) + + centres = np.mean(verts, axis=1) + # print('centres') + # print(centres) + return np.array(centres, np.float32) diff --git a/utils/tuning/libtuning/generators/generator.py b/utils/tuning/libtuning/generators/generator.py index 7c8c9b99..77a8ba4a 100644 --- a/utils/tuning/libtuning/generators/generator.py +++ b/utils/tuning/libtuning/generators/generator.py @@ -2,7 +2,7 @@ # # Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com> # -# generator.py - Base class for a generator to convert dict to tuning file +# Base class for a generator to convert dict to tuning file from pathlib import Path diff --git a/utils/tuning/libtuning/generators/raspberrypi_output.py b/utils/tuning/libtuning/generators/raspberrypi_output.py index 813491cd..47b49059 100644 --- a/utils/tuning/libtuning/generators/raspberrypi_output.py +++ b/utils/tuning/libtuning/generators/raspberrypi_output.py @@ -2,7 +2,7 @@ # # Copyright 2022 Raspberry Pi Ltd # -# raspberrypi_output.py - Generate tuning file in Raspberry Pi's json format +# Generate tuning file in Raspberry Pi's json format # # (Copied from ctt_pretty_print_json.py) diff --git a/utils/tuning/libtuning/generators/yaml_output.py b/utils/tuning/libtuning/generators/yaml_output.py index effb4fb3..c490081d 100644 --- a/utils/tuning/libtuning/generators/yaml_output.py +++ b/utils/tuning/libtuning/generators/yaml_output.py @@ -2,15 +2,16 @@ # # Copyright 2022 Paul Elder <paul.elder@ideasonboard.com> # -# yaml_output.py - Generate tuning file in YAML format +# Generate tuning file in YAML format from .generator import Generator from numbers import Number from pathlib import Path -import libtuning.utils as utils +import logging +logger = logging.getLogger(__name__) class YamlOutput(Generator): def __init__(self): @@ -106,13 +107,16 @@ class YamlOutput(Generator): ] for module in output_order: + if module not in output_dict: + continue + out_lines.append(f' - {module.out_name}:') if len(output_dict[module]) == 0: continue if not isinstance(output_dict[module], dict): - utils.eprint(f'Error: Output of {module.type} is not a dictionary') + logger.error(f'Error: Output of {module.type} is not a dictionary') continue lines = self._stringify_dict(output_dict[module]) diff --git a/utils/tuning/libtuning/gradient.py b/utils/tuning/libtuning/gradient.py index 5106f821..b643f502 100644 --- a/utils/tuning/libtuning/gradient.py +++ b/utils/tuning/libtuning/gradient.py @@ -2,7 +2,7 @@ # # Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com> # -# gradient.py - Gradients that can be used to distribute or map numbers +# Gradients that can be used to distribute or map numbers import libtuning as lt diff --git a/utils/tuning/libtuning/image.py b/utils/tuning/libtuning/image.py index aa9d20b5..c8911a0f 100644 --- a/utils/tuning/libtuning/image.py +++ b/utils/tuning/libtuning/image.py @@ -2,7 +2,7 @@ # # Copyright (C) 2019, Raspberry Pi Ltd # -# image.py - Container for an image and associated metadata +# Container for an image and associated metadata import binascii import numpy as np @@ -13,6 +13,9 @@ import re import libtuning as lt import libtuning.utils as utils +import logging + +logger = logging.getLogger(__name__) class Image: @@ -21,17 +24,18 @@ class Image: self.lsc_only = False self.color = -1 self.lux = -1 + self.macbeth = None try: self._load_metadata_exif() except Exception as e: - utils.eprint(f'Failed to load metadata from {self.path}: {e}') + logger.error(f'Failed to load metadata from {self.path}: {e}') raise e try: self._read_image_dng() except Exception as e: - utils.eprint(f'Failed to load image data from {self.path}: {e}') + logger.error(f'Failed to load image data from {self.path}: {e}') raise e @property @@ -79,7 +83,7 @@ class Image: # is R, then G, then G, then B. bayer_case = { '0 1 1 2': (lt.Color.R, lt.Color.GR, lt.Color.GB, lt.Color.B), - '1 2 0 1': (lt.Color.GB, lt.Color.R, lt.Color.B, lt.Color.GR), + '1 2 0 1': (lt.Color.GB, lt.Color.B, lt.Color.R, lt.Color.GR), '2 1 1 0': (lt.Color.B, lt.Color.GB, lt.Color.GR, lt.Color.R), '1 0 2 1': (lt.Color.GR, lt.Color.R, lt.Color.B, lt.Color.GB) } diff --git a/utils/tuning/libtuning/libtuning.py b/utils/tuning/libtuning/libtuning.py index d84c148f..bac57323 100644 --- a/utils/tuning/libtuning/libtuning.py +++ b/utils/tuning/libtuning/libtuning.py @@ -2,16 +2,17 @@ # # Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com> # -# libtuning.py - An infrastructure for camera tuning tools +# An infrastructure for camera tuning tools import argparse +import logging import libtuning as lt import libtuning.utils as utils -from libtuning.utils import eprint from enum import Enum, IntEnum +logger = logging.getLogger(__name__) class Color(IntEnum): R = 0 @@ -94,7 +95,10 @@ class Tuner(object): self.output = {} def add(self, module): - self.modules.append(module) + if isinstance(module, list): + self.modules.extend(module) + else: + self.modules.append(module) def set_input_parser(self, parser): self.parser = parser @@ -112,10 +116,10 @@ class Tuner(object): for module_type in output_order: modules = [module for module in self.modules if module.type == module_type.type] if len(modules) > 1: - eprint(f'Multiple modules found for module type "{module_type.type}"') + logger.error(f'Multiple modules found for module type "{module_type.type}"') return False if len(modules) < 1: - eprint(f'No module found for module type "{module_type.type}"') + logger.error(f'No module found for module type "{module_type.type}"') return False self.output_order.append(modules[0]) @@ -124,19 +128,19 @@ class Tuner(object): # \todo Validate parser and generator at Tuner construction time? def _validate_settings(self): if self.parser is None: - eprint('Missing parser') + logger.error('Missing parser') return False if self.generator is None: - eprint('Missing generator') + logger.error('Missing generator') return False if len(self.modules) == 0: - eprint('No modules added') + logger.error('No modules added') return False if len(self.output_order) != len(self.modules): - eprint('Number of outputs does not match number of modules') + logger.error('Number of outputs does not match number of modules') return False return True @@ -183,7 +187,7 @@ class Tuner(object): for module in self.modules: if not module.validate_config(self.config): - eprint(f'Config is invalid for module {module.type}') + logger.error(f'Config is invalid for module {module.type}') return -1 has_lsc = any(isinstance(m, lt.modules.lsc.LSC) for m in self.modules) @@ -192,15 +196,15 @@ class Tuner(object): images = utils.load_images(args.input, self.config, not has_only_lsc, has_lsc) if images is None or len(images) == 0: - eprint(f'No images were found, or able to load') + logger.error(f'No images were found, or able to load') return -1 # Do the tuning for module in self.modules: out = module.process(self.config, images, self.output) if out is None: - eprint(f'Module {module.name} failed to process, aborting') - break + logger.warning(f'Module {module.hr_name} failed to process...') + continue self.output[module] = out self.generator.write(args.output, self.output, self.output_order) diff --git a/utils/tuning/libtuning/macbeth.py b/utils/tuning/libtuning/macbeth.py index 5faddf66..4a2006b0 100644 --- a/utils/tuning/libtuning/macbeth.py +++ b/utils/tuning/libtuning/macbeth.py @@ -1,8 +1,9 @@ # SPDX-License-Identifier: BSD-2-Clause # # Copyright (C) 2019, Raspberry Pi Ltd +# Copyright (C) 2024, Ideas on Board Oy # -# macbeth.py - Locate and extract Macbeth charts from images +# Locate and extract Macbeth charts from images # (Copied from: ctt_macbeth_locator.py) # \todo Add debugging @@ -11,8 +12,18 @@ import cv2 import os from pathlib import Path import numpy as np +import warnings +import logging +from sklearn import cluster as cluster -from libtuning.image import Image +from .ctt_ransac import get_square_verts, get_square_centres +from .image import Image + +logger = logging.getLogger(__name__) + + +class MacbethError(Exception): + pass # Reshape image to fixed width without distorting returns image and scale @@ -369,7 +380,9 @@ def get_macbeth_chart(img, ref_data): # Catch macbeth errors and continue with code except MacbethError as error: - eprint(error) + # \todo: This happens so many times in a normal run, that it shadows + # all the relevant output + # logger.warning(error) return (0, None, None, False) @@ -403,10 +416,15 @@ def find_macbeth(img, mac_config): # nothing more is tried as this is a high enough confidence to ensure # reliable macbeth square centre placement. + # Keep a list that will include this and any brightened up versions of + # the image for reuse. + all_images = [img] + for brightness in [2, 4]: if cor >= 0.75: break img_br = cv2.convertScaleAbs(img, alpha=brightness, beta=0) + all_images.append(img_br) cor_b, mac_b, coords_b, ret_b = get_macbeth_chart(img_br, ref_data) if cor_b > cor: cor, mac, coords, ret = cor_b, mac_b, coords_b, ret_b @@ -456,23 +474,24 @@ def find_macbeth(img, mac_config): w_inc = int(w * pair['inc']) h_inc = int(h * pair['inc']) - loop = ((1 - pair['sel']) / pair['inc']) + 1 + loop = int(((1 - pair['sel']) / pair['inc']) + 1) # For each subselection, look for a macbeth chart - for i in range(loop): - for j in range(loop): - w_s, h_s = i * w_inc, j * h_inc - img_sel = img[w_s:w_s + w_sel, h_s:h_s + h_sel] - cor_ij, mac_ij, coords_ij, ret_ij = get_macbeth_chart(img_sel, ref_data) - - # If the correlation is better than the best then record the - # scale and current subselection at which macbeth chart was - # found. Also record the coordinates, macbeth chart and message. - if cor_ij > cor: - cor = cor_ij - mac, coords, ret = mac_ij, coords_ij, ret_ij - ii, jj = i, j - w_best, h_best = w_inc, h_inc - d_best = index + 1 + for img_br in all_images: + for i in range(loop): + for j in range(loop): + w_s, h_s = i * w_inc, j * h_inc + img_sel = img_br[w_s:w_s + w_sel, h_s:h_s + h_sel] + cor_ij, mac_ij, coords_ij, ret_ij = get_macbeth_chart(img_sel, ref_data) + + # If the correlation is better than the best then record the + # scale and current subselection at which macbeth chart was + # found. Also record the coordinates, macbeth chart and message. + if cor_ij > cor: + cor = cor_ij + mac, coords, ret = mac_ij, coords_ij, ret_ij + ii, jj = i, j + w_best, h_best = w_inc, h_inc + d_best = index + 1 # Transform coordinates from subselection to original image if ii != -1: @@ -486,7 +505,7 @@ def find_macbeth(img, mac_config): coords_fit = coords if cor < 0.75: - eprint(f'Warning: Low confidence {cor:.3f} for macbeth chart in {img.path.name}') + logger.warning(f'Low confidence {cor:.3f} for macbeth chart') if show: draw_macbeth_results(img, coords_fit) @@ -499,18 +518,20 @@ def locate_macbeth(image: Image, config: dict): av_chan = (np.mean(np.array(image.channels), axis=0) / (2**16)) av_val = np.mean(av_chan) if av_val < image.blacklevel_16 / (2**16) + 1 / 64: - eprint(f'Image {image.path.name} too dark') + logger.warning(f'Image {image.path.name} too dark') return None macbeth = find_macbeth(av_chan, config['general']['macbeth']) if macbeth is None: - eprint(f'No macbeth chart found in {image.path.name}') + logger.warning(f'No macbeth chart found in {image.path.name}') return None mac_cen_coords = macbeth[1] if not image.get_patches(mac_cen_coords): - eprint(f'Macbeth patches have saturated in {image.path.name}') + logger.warning(f'Macbeth patches have saturated in {image.path.name}') return None + image.macbeth = macbeth + return macbeth diff --git a/utils/tuning/libtuning/macbeth_ref.pgm b/utils/tuning/libtuning/macbeth_ref.pgm index 37897140..089ea91f 100644 --- a/utils/tuning/libtuning/macbeth_ref.pgm +++ b/utils/tuning/libtuning/macbeth_ref.pgm @@ -1,5 +1,5 @@ -# SPDX-License-Identifier: BSD-2-Clause P5 +# SPDX-License-Identifier: BSD-2-Clause # Reference macbeth chart 120 80 255 diff --git a/utils/tuning/libtuning/modules/agc/__init__.py b/utils/tuning/libtuning/modules/agc/__init__.py new file mode 100644 index 00000000..4db9ca37 --- /dev/null +++ b/utils/tuning/libtuning/modules/agc/__init__.py @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> + +from libtuning.modules.agc.agc import AGC +from libtuning.modules.agc.rkisp1 import AGCRkISP1 diff --git a/utils/tuning/libtuning/modules/agc/agc.py b/utils/tuning/libtuning/modules/agc/agc.py new file mode 100644 index 00000000..9c8899ba --- /dev/null +++ b/utils/tuning/libtuning/modules/agc/agc.py @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (C) 2019, Raspberry Pi Ltd +# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> + +from ..module import Module + +import libtuning as lt + + +class AGC(Module): + type = 'agc' + hr_name = 'AGC (Base)' + out_name = 'GenericAGC' + + # \todo Add sector shapes and stuff just like lsc + def __init__(self, *, + debug: list): + super().__init__() + + self.debug = debug diff --git a/utils/tuning/libtuning/modules/agc/rkisp1.py b/utils/tuning/libtuning/modules/agc/rkisp1.py new file mode 100644 index 00000000..7147028a --- /dev/null +++ b/utils/tuning/libtuning/modules/agc/rkisp1.py @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: BSD-2-Clause +# +# Copyright (C) 2019, Raspberry Pi Ltd +# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> +# +# rkisp1.py - AGC module for tuning rkisp1 + +from .agc import AGC + +import libtuning as lt + + +class AGCRkISP1(AGC): + hr_name = 'AGC (RkISP1)' + out_name = 'Agc' + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # We don't actually need anything from the config file + def validate_config(self, config: dict) -> bool: + return True + + def _generate_metering_modes(self) -> dict: + centre_weighted = [ + 0, 0, 0, 0, 0, + 0, 6, 8, 6, 0, + 0, 8, 16, 8, 0, + 0, 6, 8, 6, 0, + 0, 0, 0, 0, 0 + ] + + spot = [ + 0, 0, 0, 0, 0, + 0, 2, 4, 2, 0, + 0, 4, 16, 4, 0, + 0, 2, 4, 2, 0, + 0, 0, 0, 0, 0 + ] + + matrix = [1 for i in range(0, 25)] + + return { + 'MeteringCentreWeighted': centre_weighted, + 'MeteringSpot': spot, + 'MeteringMatrix': matrix + } + + def _generate_exposure_modes(self) -> dict: + normal = {'shutter': [100, 10000, 30000, 60000, 120000], + 'gain': [2.0, 4.0, 6.0, 6.0, 6.0]} + short = {'shutter': [100, 5000, 10000, 20000, 120000], + 'gain': [2.0, 4.0, 6.0, 6.0, 6.0]} + + return {'ExposureNormal': normal, 'ExposureShort': short} + + def _generate_constraint_modes(self) -> dict: + normal = {'lower': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.5}} + highlight = { + 'lower': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.5}, + 'upper': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.8} + } + + return {'ConstraintNormal': normal, 'ConstraintHighlight': highlight} + + def _generate_y_target(self) -> list: + return 0.5 + + def process(self, config: dict, images: list, outputs: dict) -> dict: + output = {} + + output['AeMeteringMode'] = self._generate_metering_modes() + output['AeExposureMode'] = self._generate_exposure_modes() + output['AeConstraintMode'] = self._generate_constraint_modes() + output['relativeLuminanceTarget'] = self._generate_y_target() + + # \todo Debug functionality + + return output diff --git a/utils/tuning/libtuning/modules/ccm/__init__.py b/utils/tuning/libtuning/modules/ccm/__init__.py new file mode 100644 index 00000000..322602af --- /dev/null +++ b/utils/tuning/libtuning/modules/ccm/__init__.py @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> + +from libtuning.modules.ccm.ccm import CCM +from libtuning.modules.ccm.rkisp1 import CCMRkISP1 diff --git a/utils/tuning/libtuning/modules/ccm/ccm.py b/utils/tuning/libtuning/modules/ccm/ccm.py new file mode 100644 index 00000000..18702f8d --- /dev/null +++ b/utils/tuning/libtuning/modules/ccm/ccm.py @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> +# Copyright (C) 2024, Ideas on Board +# +# Base Ccm tuning module + +from ..module import Module + +from libtuning.ctt_ccm import ccm +import logging + +logger = logging.getLogger(__name__) + + +class CCM(Module): + type = 'ccm' + hr_name = 'CCM (Base)' + out_name = 'GenericCCM' + + def __init__(self, debug: list): + super().__init__() + + self.debug = debug + + def do_calibration(self, images): + logger.info('Starting CCM calibration') + + imgs = [img for img in images if img.macbeth is not None] + + # todo: Take LSC calibration results into account. + cal_cr_list = None + cal_cb_list = None + + try: + ccms = ccm(imgs, cal_cr_list, cal_cb_list) + except ArithmeticError: + logger.error('CCM calibration failed') + return None + + return ccms diff --git a/utils/tuning/libtuning/modules/ccm/rkisp1.py b/utils/tuning/libtuning/modules/ccm/rkisp1.py new file mode 100644 index 00000000..be0252d9 --- /dev/null +++ b/utils/tuning/libtuning/modules/ccm/rkisp1.py @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> +# Copyright (C) 2024, Ideas on Board +# +# Ccm module for tuning rkisp1 + +from .ccm import CCM + + +class CCMRkISP1(CCM): + hr_name = 'Crosstalk Correction (RkISP1)' + out_name = 'Ccm' + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # We don't need anything from the config file. + def validate_config(self, config: dict) -> bool: + return True + + def process(self, config: dict, images: list, outputs: dict) -> dict: + output = {} + + ccms = self.do_calibration(images) + output['ccms'] = ccms + + return output diff --git a/utils/tuning/libtuning/modules/lsc/lsc.py b/utils/tuning/libtuning/modules/lsc/lsc.py index 344a07a3..e0ca22eb 100644 --- a/utils/tuning/libtuning/modules/lsc/lsc.py +++ b/utils/tuning/libtuning/modules/lsc/lsc.py @@ -59,7 +59,10 @@ class LSC(Module): def _lsc_single_channel(self, channel: np.array, image: lt.Image, green_grid: np.array = None): grid = self._get_grid(channel, image.w, image.h) - grid -= image.blacklevel_16 + # Clamp the values to a small positive, so that the following 1/grid + # doesn't produce negative results. + grid = np.maximum(grid - image.blacklevel_16, 0.1) + if green_grid is None: table = np.reshape(1 / grid, self.sector_shape[::-1]) else: diff --git a/utils/tuning/libtuning/modules/lsc/raspberrypi.py b/utils/tuning/libtuning/modules/lsc/raspberrypi.py index 58f5000d..99bc4fe6 100644 --- a/utils/tuning/libtuning/modules/lsc/raspberrypi.py +++ b/utils/tuning/libtuning/modules/lsc/raspberrypi.py @@ -3,7 +3,7 @@ # Copyright (C) 2019, Raspberry Pi Ltd # Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com> # -# raspberrypi.py - ALSC module for tuning Raspberry Pi +# ALSC module for tuning Raspberry Pi from .lsc import LSC @@ -12,7 +12,9 @@ import libtuning.utils as utils from numbers import Number import numpy as np +import logging +logger = logging.getLogger(__name__) class ALSCRaspberryPi(LSC): # Override the type name so that the parser can match the entry in the @@ -35,7 +37,7 @@ class ALSCRaspberryPi(LSC): def validate_config(self, config: dict) -> bool: if self not in config: - utils.eprint(f'{self.type} not in config') + logger.error(f'{self.type} not in config') return False valid = True @@ -46,14 +48,14 @@ class ALSCRaspberryPi(LSC): color_key = self.do_color.name if lum_key not in conf and self.luminance_strength.required: - utils.eprint(f'{lum_key} is not in config') + logger.error(f'{lum_key} is not in config') valid = False if lum_key in conf and (conf[lum_key] < 0 or conf[lum_key] > 1): - utils.eprint(f'Warning: {lum_key} is not in range [0, 1]; defaulting to 0.5') + logger.warning(f'{lum_key} is not in range [0, 1]; defaulting to 0.5') if color_key not in conf and self.do_color.required: - utils.eprint(f'{color_key} is not in config') + logger.error(f'{color_key} is not in config') valid = False return valid @@ -235,7 +237,7 @@ class ALSCRaspberryPi(LSC): if count == 1: output['sigma'] = 0.005 output['sigma_Cb'] = 0.005 - utils.eprint('Warning: Only one alsc calibration found; standard sigmas used for adaptive algorithm.') + logger.warning('Only one alsc calibration found; standard sigmas used for adaptive algorithm.') return output # Obtain worst-case scenario residual sigmas diff --git a/utils/tuning/libtuning/modules/lsc/rkisp1.py b/utils/tuning/libtuning/modules/lsc/rkisp1.py index 5701ae0a..c02b2306 100644 --- a/utils/tuning/libtuning/modules/lsc/rkisp1.py +++ b/utils/tuning/libtuning/modules/lsc/rkisp1.py @@ -3,7 +3,7 @@ # Copyright (C) 2019, Raspberry Pi Ltd # Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com> # -# rkisp1.py - LSC module for tuning rkisp1 +# LSC module for tuning rkisp1 from .lsc import LSC @@ -33,13 +33,13 @@ class LSCRkISP1(LSC): # table, flattened array of (blue's) green calibration table def _do_single_lsc(self, image: lt.Image): - cgr, gr = self._lsc_single_channel(image.channels[lt.Color.GR], image) - cgb, gb = self._lsc_single_channel(image.channels[lt.Color.GB], image) - - # \todo Should these ratio against the average of both greens or just - # each green like we've done here? - cr, _ = self._lsc_single_channel(image.channels[lt.Color.R], image, gr) - cb, _ = self._lsc_single_channel(image.channels[lt.Color.B], image, gb) + # Perform LSC on each colour channel independently. A future enhancement + # worth investigating would be splitting the luminance and chrominance + # LSC as done by Raspberry Pi. + cgr, _ = self._lsc_single_channel(image.channels[lt.Color.GR], image) + cgb, _ = self._lsc_single_channel(image.channels[lt.Color.GB], image) + cr, _ = self._lsc_single_channel(image.channels[lt.Color.R], image) + cb, _ = self._lsc_single_channel(image.channels[lt.Color.B], image) return image.color, cr.flatten(), cb.flatten(), cgr.flatten(), cgb.flatten() @@ -80,7 +80,8 @@ class LSCRkISP1(LSC): tables = [] for lis in [list_cr, list_cgr, list_cgb, list_cb]: table = np.mean(lis[indices], axis=0) - table = output_map_func((1, 3.999), (1024, 4095), table) + table = output_map_func((1, 4), (1024, 4096), table) + table = np.clip(table, 1024, 4095) table = np.round(table).astype('int32').tolist() tables.append(table) @@ -106,6 +107,9 @@ class LSCRkISP1(LSC): output['sets'] = self._do_all_lsc(images) + if len(output['sets']) == 0: + return None + # \todo Validate images from greyscale camera and force grescale mode # \todo Debug functionality diff --git a/utils/tuning/libtuning/modules/module.py b/utils/tuning/libtuning/modules/module.py index 12e2fc7c..de624384 100644 --- a/utils/tuning/libtuning/modules/module.py +++ b/utils/tuning/libtuning/modules/module.py @@ -2,7 +2,7 @@ # # Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com> # -# module.py - Base class for algorithm-specific tuning modules +# Base class for algorithm-specific tuning modules # @var type Type of the module. Defined in the base module. diff --git a/utils/tuning/libtuning/modules/static.py b/utils/tuning/libtuning/modules/static.py new file mode 100644 index 00000000..4d0f7e18 --- /dev/null +++ b/utils/tuning/libtuning/modules/static.py @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Copyright (C) 2024, Ideas on Board +# +# Module implementation for static data + +from .module import Module + + +# This module can be used in cases where the tuning file should contain +# static data. +class StaticModule(Module): + def __init__(self, out_name: str, output: dict = {}): + super().__init__() + self.out_name = out_name + self.hr_name = f'Static {out_name}' + self.type = f'static_{out_name}' + self.output = output + + def validate_config(self, config: dict) -> bool: + return True + + def process(self, config: dict, images: list, outputs: dict) -> dict: + return self.output diff --git a/utils/tuning/libtuning/parsers/parser.py b/utils/tuning/libtuning/parsers/parser.py index a17d8d71..0c3944c7 100644 --- a/utils/tuning/libtuning/parsers/parser.py +++ b/utils/tuning/libtuning/parsers/parser.py @@ -2,7 +2,7 @@ # # Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com> # -# parser.py - Base class for a parser for a specific format of config file +# Base class for a parser for a specific format of config file class Parser(object): def __init__(self): diff --git a/utils/tuning/libtuning/parsers/raspberrypi_parser.py b/utils/tuning/libtuning/parsers/raspberrypi_parser.py index d26586ba..f1da4592 100644 --- a/utils/tuning/libtuning/parsers/raspberrypi_parser.py +++ b/utils/tuning/libtuning/parsers/raspberrypi_parser.py @@ -2,7 +2,7 @@ # # Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com> # -# raspberrypi_parser.py - Parser for Raspberry Pi config file format +# Parser for Raspberry Pi config file format from .parser import Parser diff --git a/utils/tuning/libtuning/parsers/yaml_parser.py b/utils/tuning/libtuning/parsers/yaml_parser.py index 5c1673a5..1fa6b7a8 100644 --- a/utils/tuning/libtuning/parsers/yaml_parser.py +++ b/utils/tuning/libtuning/parsers/yaml_parser.py @@ -2,16 +2,19 @@ # # Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com> # -# yaml_parser.py - Parser for YAML format config file +# Parser for YAML format config file from .parser import Parser +import yaml class YamlParser(Parser): def __init__(self): super().__init__() - # \todo Implement this (it's fine for now as we don't need a config for - # rkisp1 LSC, which is the only user of this so far) def parse(self, config_file: str, modules: list) -> (dict, list): - return {}, [] + # Dummy implementation that just reads the file + with open(config_file, 'r') as f: + config = yaml.safe_load(f) + + return config, [] diff --git a/utils/tuning/libtuning/smoothing.py b/utils/tuning/libtuning/smoothing.py index b8a5a242..de4d920c 100644 --- a/utils/tuning/libtuning/smoothing.py +++ b/utils/tuning/libtuning/smoothing.py @@ -2,7 +2,7 @@ # # Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com> # -# smoothing.py - Wrapper for cv2 smoothing functions to enable duck-typing +# Wrapper for cv2 smoothing functions to enable duck-typing import cv2 diff --git a/utils/tuning/libtuning/utils.py b/utils/tuning/libtuning/utils.py index b60f2c9b..e35cf409 100644 --- a/utils/tuning/libtuning/utils.py +++ b/utils/tuning/libtuning/utils.py @@ -3,8 +3,9 @@ # Copyright (C) 2019, Raspberry Pi Ltd # Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com> # -# utils.py - Utilities for libtuning +# Utilities for libtuning +import cv2 import decimal import math import numpy as np @@ -12,16 +13,15 @@ import os from pathlib import Path import re import sys +import logging import libtuning as lt from libtuning.image import Image -from libtuning.macbeth import locate_macbeth - -# Utility functions +from .macbeth import locate_macbeth +logger = logging.getLogger(__name__) -def eprint(*args, **kwargs): - print(*args, file=sys.stderr, **kwargs) +# Utility functions def get_module_by_type_name(modules, name): @@ -43,16 +43,30 @@ def _list_image_files(directory): def _parse_image_filename(fn: Path): - result = re.search(r'^(alsc_)?(\d+)[kK]_(\d+)?[lLuU]?.\w{3,4}$', fn.name) - if result is None: - eprint(f'The file name of {fn.name} is incorrectly formatted') - return None, None, None + lsc_only = False + color_temperature = None + lux = None + + parts = fn.stem.split('_') + for part in parts: + if part == 'alsc': + lsc_only = True + continue + r = re.match(r'(\d+)[kK]', part) + if r: + color_temperature = int(r.group(1)) + continue + r = re.match(r'(\d+)[lLuU]', part) + if r: + lux = int(r.group(1)) + + if color_temperature is None: + logger.error(f'The file name of "{fn.name}" does not contain a color temperature') - color = int(result.group(2)) - lsc_only = result.group(1) is not None - lux = None if lsc_only else int(result.group(3)) + if lux is None and lsc_only is False: + logger.error(f'The file name of "{fn.name}" must either contain alsc or a lux level') - return color, lux, lsc_only + return color_temperature, lux, lsc_only # \todo Implement this from check_imgs() in ctt.py @@ -72,30 +86,34 @@ def _validate_images(images): def load_images(input_dir: str, config: dict, load_nonlsc: bool, load_lsc: bool) -> list: files = _list_image_files(input_dir) if len(files) == 0: - eprint(f'No images found in {input_dir}') + logger.error(f'No images found in {input_dir}') return None images = [] for f in files: color, lux, lsc_only = _parse_image_filename(f) + if color is None: + logger.warning(f'Ignoring "{f.name}" as it has no associated color temperature') continue + logger.info(f'Process image "{f.name}" (color={color}, lux={lux}, lsc_only={lsc_only})') + # Skip lsc image if we don't need it if lsc_only and not load_lsc: - eprint(f'Skipping {f.name} as this tuner has no LSC module') + logger.warning(f'Skipping {f.name} as this tuner has no LSC module') continue # Skip non-lsc image if we don't need it if not lsc_only and not load_nonlsc: - eprint(f'Skipping {f.name} as this tuner only has an LSC module') + logger.warning(f'Skipping {f.name} as this tuner only has an LSC module') continue # Load image try: image = Image(f) except Exception as e: - eprint(f'Failed to load image {f.name}: {e}') + logger.error(f'Failed to load image {f.name}: {e}') continue # Populate simple fields @@ -113,7 +131,7 @@ def load_images(input_dir: str, config: dict, load_nonlsc: bool, load_lsc: bool) continue # Handle macbeth - macbeth = locate_macbeth(config) + macbeth = locate_macbeth(image, config) if macbeth is None: continue @@ -123,3 +141,46 @@ def load_images(input_dir: str, config: dict, load_nonlsc: bool, load_lsc: bool) return None return images + + + +""" +Some code that will save virtual macbeth charts that show the difference between optimised matrices and non optimised matrices + +The function creates an image that is 1550 by 1050 pixels wide, and fills it with patches which are 200x200 pixels in size +Each patch contains the ideal color, the color from the original matrix, and the color from the final matrix +_________________ +| | +| Ideal Color | +|_______________| +| Old | new | +| Color | Color | +|_______|_______| + +Nice way of showing how the optimisation helps change the colors and the color matricies +""" +def visualise_macbeth_chart(macbeth_rgb, original_rgb, new_rgb, output_filename): + image = np.zeros((1050, 1550, 3), dtype=np.uint8) + colorindex = -1 + for y in range(6): + for x in range(4): # Creates 6 x 4 grid of macbeth chart + colorindex += 1 + xlocation = 50 + 250 * x # Means there is 50px of black gap between each square, more like the real macbeth chart. + ylocation = 50 + 250 * y + for g in range(200): + for i in range(100): + image[xlocation + i, ylocation + g] = macbeth_rgb[colorindex] + xlocation = 150 + 250 * x + ylocation = 50 + 250 * y + for i in range(100): + for g in range(100): + image[xlocation + i, ylocation + g] = original_rgb[colorindex] # Smaller squares below to compare the old colors with the new ones + xlocation = 150 + 250 * x + ylocation = 150 + 250 * y + for i in range(100): + for g in range(100): + image[xlocation + i, ylocation + g] = new_rgb[colorindex] + + im_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + cv2.imwrite(f'{output_filename} Generated Macbeth Chart.png', im_bgr) + diff --git a/utils/tuning/raspberrypi/alsc.py b/utils/tuning/raspberrypi/alsc.py index 024eb5a3..ba8fc9e1 100644 --- a/utils/tuning/raspberrypi/alsc.py +++ b/utils/tuning/raspberrypi/alsc.py @@ -2,7 +2,7 @@ # # Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com> # -# alsc.py - ALSC module instance for Raspberry Pi tuning scripts +# ALSC module instance for Raspberry Pi tuning scripts import libtuning as lt from libtuning.modules.lsc import ALSCRaspberryPi diff --git a/utils/tuning/raspberrypi_alsc_only.py b/utils/tuning/raspberrypi_alsc_only.py index af04e6a8..777d8007 100755 --- a/utils/tuning/raspberrypi_alsc_only.py +++ b/utils/tuning/raspberrypi_alsc_only.py @@ -3,7 +3,7 @@ # # Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com> # -# raspberrypi_alsc_only.py - Tuning script for raspberrypi, ALSC only +# Tuning script for raspberrypi, ALSC only import sys diff --git a/utils/tuning/requirements.txt b/utils/tuning/requirements.txt new file mode 100644 index 00000000..3705769b --- /dev/null +++ b/utils/tuning/requirements.txt @@ -0,0 +1,9 @@ +coloredlogs +matplotlib +numpy +opencv-python +py3exiv2 +pyyaml +rawpy +scikit-learn +scipy diff --git a/utils/tuning/rkisp1.py b/utils/tuning/rkisp1.py index 1cea6ddb..f5c42a61 100755 --- a/utils/tuning/rkisp1.py +++ b/utils/tuning/rkisp1.py @@ -3,38 +3,53 @@ # # Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com> # -# rkisp1.py - Tuning script for rkisp1 +# Tuning script for rkisp1 +import coloredlogs +import logging import sys import libtuning as lt from libtuning.parsers import YamlParser from libtuning.generators import YamlOutput from libtuning.modules.lsc import LSCRkISP1 +from libtuning.modules.agc import AGCRkISP1 +from libtuning.modules.ccm import CCMRkISP1 +from libtuning.modules.static import StaticModule + +coloredlogs.install(level=logging.INFO, fmt='%(name)s %(levelname)s %(message)s') + +agc = AGCRkISP1(debug=[lt.Debug.Plot]) +awb = StaticModule('Awb') +blc = StaticModule('BlackLevelCorrection') +ccm = CCMRkISP1(debug=[lt.Debug.Plot]) +color_processing = StaticModule('ColorProcessing') +filter = StaticModule('Filter') +gamma_out = StaticModule('GammaOutCorrection', {'gamma': 2.2}) +lsc = LSCRkISP1(debug=[lt.Debug.Plot], + # This is for the actual LSC tuning, and is part of the base LSC + # module. rkisp1's table sector sizes (16x16 programmed as mirrored + # 8x8) are separate, and is hardcoded in its specific LSC tuning + # module. + sector_shape=(17, 17), + + sector_x_gradient=lt.gradient.Linear(lt.Remainder.DistributeFront), + sector_y_gradient=lt.gradient.Linear(lt.Remainder.DistributeFront), + + # This is the function that will be used to average the pixels in + # each sector. This can also be a custom function. + sector_average_function=lt.average.Mean(), + + # This is the function that will be used to smooth the color ratio + # values. This can also be a custom function. + smoothing_function=lt.smoothing.MedianBlur(3),) tuner = lt.Tuner('RkISP1') -tuner.add(LSCRkISP1( - debug=[lt.Debug.Plot], - # This is for the actual LSC tuning, and is part of the base LSC - # module. rkisp1's table sector sizes (16x16 programmed as mirrored - # 8x8) are separate, and is hardcoded in its specific LSC tuning - # module. - sector_shape=(17, 17), - - sector_x_gradient=lt.gradient.Linear(lt.Remainder.DistributeFront), - sector_y_gradient=lt.gradient.Linear(lt.Remainder.DistributeFront), - - # This is the function that will be used to average the pixels in - # each sector. This can also be a custom function. - sector_average_function=lt.average.Mean(), - - # This is the function that will be used to smooth the color ratio - # values. This can also be a custom function. - smoothing_function=lt.smoothing.MedianBlur(3), - )) +tuner.add([agc, awb, blc, ccm, color_processing, filter, gamma_out, lsc]) tuner.set_input_parser(YamlParser()) tuner.set_output_formatter(YamlOutput()) -tuner.set_output_order([LSCRkISP1]) +tuner.set_output_order([agc, awb, blc, ccm, color_processing, + filter, gamma_out, lsc]) if __name__ == '__main__': sys.exit(tuner.run(sys.argv)) diff --git a/utils/update-kernel-headers.sh b/utils/update-kernel-headers.sh index 590986d2..9a64dfb5 100755 --- a/utils/update-kernel-headers.sh +++ b/utils/update-kernel-headers.sh @@ -9,7 +9,7 @@ if [ $# != 1 ] ; then fi header_dir="$(dirname "$(realpath "$0")")/../include/linux" -kernel_dir="$1" +kernel_dir="$(realpath "$1")" # Bail out if the directory doesn't contain kernel sources line=$(head -3 "${kernel_dir}/Kbuild" 2>/dev/null | tail -1) @@ -52,6 +52,7 @@ headers=" linux/media-bus-format.h linux/media.h linux/rkisp1-config.h + linux/udmabuf.h linux/v4l2-common.h linux/v4l2-controls.h linux/v4l2-mediabus.h |