summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format7
-rw-r--r--.editorconfig29
-rw-r--r--.gitignore1
-rw-r--r--Documentation/Doxyfile-common.in (renamed from Documentation/Doxyfile.in)24
-rw-r--r--Documentation/Doxyfile-internal.in33
-rw-r--r--Documentation/Doxyfile-public.in20
-rw-r--r--Documentation/api-html/index.rst4
-rw-r--r--Documentation/camera-sensor-model.rst2
-rw-r--r--Documentation/code-of-conduct.rst2
-rw-r--r--Documentation/coding-style.rst6
-rw-r--r--Documentation/conf.py10
-rw-r--r--Documentation/design/ae.rst331
-rw-r--r--Documentation/docs.rst400
-rw-r--r--Documentation/documentation-contents.rst35
-rw-r--r--Documentation/environment_variables.rst20
-rw-r--r--Documentation/feature_requirements.rst150
-rwxr-xr-xDocumentation/gen-doxyfile.py46
-rw-r--r--Documentation/getting-started.rst1
-rw-r--r--Documentation/guides/application-developer.rst16
-rw-r--r--Documentation/guides/introduction.rst319
-rw-r--r--Documentation/guides/ipa.rst2
-rw-r--r--Documentation/guides/pipeline-handler.rst15
-rw-r--r--Documentation/guides/tracing.rst2
-rw-r--r--Documentation/index.rst27
-rw-r--r--Documentation/internal-api-html/index.rst8
-rw-r--r--Documentation/introduction.rst224
-rw-r--r--Documentation/lens_driver_requirements.rst2
-rw-r--r--Documentation/libcamera_architecture.rst168
-rw-r--r--Documentation/mainpage.dox33
-rw-r--r--Documentation/mali-c55.dot25
-rw-r--r--Documentation/meson.build92
-rw-r--r--Documentation/python-bindings.rst2
-rw-r--r--Documentation/sensor_driver_requirements.rst2
-rw-r--r--Documentation/software-isp-benchmarking.rst79
-rw-r--r--Documentation/theme/static/css/theme.css6
-rw-r--r--Documentation/thread-safety.dox44
-rw-r--r--README.rst24
-rw-r--r--include/libcamera/base/backtrace.h2
-rw-r--r--include/libcamera/base/bound_method.h2
-rw-r--r--include/libcamera/base/class.h2
-rw-r--r--include/libcamera/base/compiler.h14
-rw-r--r--include/libcamera/base/event_dispatcher.h4
-rw-r--r--include/libcamera/base/event_dispatcher_poll.h2
-rw-r--r--include/libcamera/base/event_notifier.h2
-rw-r--r--include/libcamera/base/file.h6
-rw-r--r--include/libcamera/base/flags.h2
-rw-r--r--include/libcamera/base/log.h27
-rw-r--r--include/libcamera/base/memfd.h32
-rw-r--r--include/libcamera/base/meson.build2
-rw-r--r--include/libcamera/base/message.h2
-rw-r--r--include/libcamera/base/mutex.h2
-rw-r--r--include/libcamera/base/object.h5
-rw-r--r--include/libcamera/base/private.h2
-rw-r--r--include/libcamera/base/semaphore.h2
-rw-r--r--include/libcamera/base/shared_fd.h2
-rw-r--r--include/libcamera/base/signal.h17
-rw-r--r--include/libcamera/base/span.h3
-rw-r--r--include/libcamera/base/thread.h13
-rw-r--r--include/libcamera/base/thread_annotations.h2
-rw-r--r--include/libcamera/base/timer.h3
-rw-r--r--include/libcamera/base/unique_fd.h5
-rw-r--r--include/libcamera/base/utils.h65
-rw-r--r--include/libcamera/camera.h2
-rw-r--r--include/libcamera/camera_manager.h2
-rw-r--r--include/libcamera/color_space.h2
-rw-r--r--include/libcamera/control_ids.h.in46
-rw-r--r--include/libcamera/controls.h77
-rw-r--r--include/libcamera/fence.h2
-rw-r--r--include/libcamera/formats.h.in2
-rw-r--r--include/libcamera/framebuffer.h3
-rw-r--r--include/libcamera/framebuffer_allocator.h2
-rw-r--r--include/libcamera/geometry.h48
-rw-r--r--include/libcamera/internal/bayer_format.h4
-rw-r--r--include/libcamera/internal/byte_stream_buffer.h2
-rw-r--r--include/libcamera/internal/camera.h4
-rw-r--r--include/libcamera/internal/camera_controls.h2
-rw-r--r--include/libcamera/internal/camera_lens.h3
-rw-r--r--include/libcamera/internal/camera_manager.h11
-rw-r--r--include/libcamera/internal/camera_sensor.h157
-rw-r--r--include/libcamera/internal/camera_sensor_properties.h11
-rw-r--r--include/libcamera/internal/control_serializer.h2
-rw-r--r--include/libcamera/internal/control_validator.h2
-rw-r--r--include/libcamera/internal/converter.h43
-rw-r--r--include/libcamera/internal/converter/converter_v4l2_m2m.h59
-rw-r--r--include/libcamera/internal/debug_controls.h46
-rw-r--r--include/libcamera/internal/delayed_controls.h2
-rw-r--r--include/libcamera/internal/device_enumerator.h2
-rw-r--r--include/libcamera/internal/device_enumerator_sysfs.h3
-rw-r--r--include/libcamera/internal/device_enumerator_udev.h2
-rw-r--r--include/libcamera/internal/dma_buf_allocator.h80
-rw-r--r--include/libcamera/internal/formats.h3
-rw-r--r--include/libcamera/internal/framebuffer.h3
-rw-r--r--include/libcamera/internal/ipa_data_serializer.h14
-rw-r--r--include/libcamera/internal/ipa_manager.h11
-rw-r--r--include/libcamera/internal/ipa_module.h2
-rw-r--r--include/libcamera/internal/ipa_proxy.h7
-rw-r--r--include/libcamera/internal/ipc_pipe.h3
-rw-r--r--include/libcamera/internal/ipc_pipe_unixsocket.h4
-rw-r--r--include/libcamera/internal/ipc_unixsocket.h2
-rw-r--r--include/libcamera/internal/mapped_framebuffer.h2
-rw-r--r--include/libcamera/internal/matrix.h201
-rw-r--r--include/libcamera/internal/media_device.h3
-rw-r--r--include/libcamera/internal/media_object.h10
-rw-r--r--include/libcamera/internal/meson.build24
-rw-r--r--include/libcamera/internal/pipeline_handler.h24
-rw-r--r--include/libcamera/internal/process.h2
-rw-r--r--include/libcamera/internal/pub_key.h2
-rw-r--r--include/libcamera/internal/request.h4
-rw-r--r--include/libcamera/internal/shared_mem_object.h126
-rw-r--r--include/libcamera/internal/software_isp/debayer_params.h28
-rw-r--r--include/libcamera/internal/software_isp/meson.build7
-rw-r--r--include/libcamera/internal/software_isp/software_isp.h108
-rw-r--r--include/libcamera/internal/software_isp/swisp_stats.h49
-rw-r--r--include/libcamera/internal/source_paths.h2
-rw-r--r--include/libcamera/internal/sysfs.h2
-rw-r--r--include/libcamera/internal/tracepoints.h.in4
-rw-r--r--include/libcamera/internal/tracepoints/request.tp2
-rw-r--r--include/libcamera/internal/v4l2_device.h3
-rw-r--r--include/libcamera/internal/v4l2_pixelformat.h4
-rw-r--r--include/libcamera/internal/v4l2_subdevice.h6
-rw-r--r--include/libcamera/internal/v4l2_videodevice.h3
-rw-r--r--include/libcamera/internal/vector.h366
-rw-r--r--include/libcamera/internal/yaml_parser.h53
-rw-r--r--include/libcamera/ipa/ipa_controls.h5
-rw-r--r--include/libcamera/ipa/ipa_interface.h19
-rw-r--r--include/libcamera/ipa/ipa_module_info.h2
-rw-r--r--include/libcamera/ipa/ipu3.mojom8
-rw-r--r--include/libcamera/ipa/mali-c55.mojom34
-rw-r--r--include/libcamera/ipa/meson.build31
-rw-r--r--include/libcamera/ipa/raspberrypi.mojom6
-rw-r--r--include/libcamera/ipa/rkisp1.mojom9
-rw-r--r--include/libcamera/ipa/soft.mojom36
-rw-r--r--include/libcamera/ipa/vimc.mojom4
-rw-r--r--include/libcamera/logging.h4
-rw-r--r--include/libcamera/meson.build43
-rw-r--r--include/libcamera/orientation.h2
-rw-r--r--include/libcamera/pixel_format.h3
-rw-r--r--include/libcamera/property_ids.h.in32
-rw-r--r--include/libcamera/request.h3
-rw-r--r--include/libcamera/stream.h5
-rw-r--r--include/libcamera/transform.h4
-rw-r--r--include/libcamera/version.h.in2
-rw-r--r--include/linux/README2
-rw-r--r--include/linux/dma-heap.h2
-rw-r--r--include/linux/drm_fourcc.h36
-rw-r--r--include/linux/intel-ipu3.h3
-rw-r--r--include/linux/mali-c55-config.h909
-rw-r--r--include/linux/media-bus-format.h22
-rw-r--r--include/linux/media.h1
-rw-r--r--include/linux/rkisp1-config.h634
-rw-r--r--include/linux/udmabuf.h33
-rw-r--r--include/linux/v4l2-controls.h8
-rw-r--r--include/linux/v4l2-mediabus.h18
-rw-r--r--include/linux/v4l2-subdev.h34
-rw-r--r--include/linux/videodev2.h111
-rw-r--r--meson.build56
-rw-r--r--meson_options.txt17
-rw-r--r--src/android/camera3_hal.cpp2
-rw-r--r--src/android/camera_buffer.h2
-rw-r--r--src/android/camera_capabilities.cpp48
-rw-r--r--src/android/camera_capabilities.h2
-rw-r--r--src/android/camera_device.cpp70
-rw-r--r--src/android/camera_device.h2
-rw-r--r--src/android/camera_hal_config.cpp2
-rw-r--r--src/android/camera_hal_config.h2
-rw-r--r--src/android/camera_hal_manager.cpp2
-rw-r--r--src/android/camera_hal_manager.h2
-rw-r--r--src/android/camera_metadata.cpp2
-rw-r--r--src/android/camera_metadata.h2
-rw-r--r--src/android/camera_ops.cpp2
-rw-r--r--src/android/camera_ops.h2
-rw-r--r--src/android/camera_request.cpp2
-rw-r--r--src/android/camera_request.h2
-rw-r--r--src/android/camera_stream.cpp2
-rw-r--r--src/android/camera_stream.h2
-rw-r--r--src/android/cros/camera3_hal.cpp2
-rw-r--r--src/android/cros_mojo_token.h2
-rw-r--r--src/android/frame_buffer_allocator.h2
-rw-r--r--src/android/hal_framebuffer.cpp2
-rw-r--r--src/android/hal_framebuffer.h2
-rw-r--r--src/android/jpeg/encoder.h2
-rw-r--r--src/android/jpeg/encoder_jea.cpp2
-rw-r--r--src/android/jpeg/encoder_jea.h2
-rw-r--r--src/android/jpeg/encoder_libjpeg.cpp8
-rw-r--r--src/android/jpeg/encoder_libjpeg.h2
-rw-r--r--src/android/jpeg/exif.cpp2
-rw-r--r--src/android/jpeg/exif.h2
-rw-r--r--src/android/jpeg/post_processor_jpeg.cpp2
-rw-r--r--src/android/jpeg/post_processor_jpeg.h2
-rw-r--r--src/android/jpeg/thumbnailer.cpp2
-rw-r--r--src/android/jpeg/thumbnailer.h2
-rw-r--r--src/android/meson.build22
-rw-r--r--src/android/mm/cros_camera_buffer.cpp2
-rw-r--r--src/android/mm/cros_frame_buffer_allocator.cpp3
-rw-r--r--src/android/mm/generic_camera_buffer.cpp2
-rw-r--r--src/android/mm/generic_frame_buffer_allocator.cpp2
-rw-r--r--src/android/mm/libhardware_stub.c2
-rw-r--r--src/android/post_processor.h2
-rw-r--r--src/android/yuv/post_processor_yuv.cpp2
-rw-r--r--src/android/yuv/post_processor_yuv.h2
-rw-r--r--src/apps/cam/camera_session.cpp69
-rw-r--r--src/apps/cam/camera_session.h2
-rw-r--r--src/apps/cam/capture_script.cpp2
-rw-r--r--src/apps/cam/capture_script.h2
-rw-r--r--src/apps/cam/drm.cpp2
-rw-r--r--src/apps/cam/drm.h2
-rw-r--r--src/apps/cam/file_sink.cpp62
-rw-r--r--src/apps/cam/file_sink.h20
-rw-r--r--src/apps/cam/frame_sink.cpp2
-rw-r--r--src/apps/cam/frame_sink.h2
-rw-r--r--src/apps/cam/kms_sink.cpp2
-rw-r--r--src/apps/cam/kms_sink.h2
-rw-r--r--src/apps/cam/main.cpp6
-rw-r--r--src/apps/cam/main.h2
-rw-r--r--src/apps/cam/sdl_sink.cpp2
-rw-r--r--src/apps/cam/sdl_sink.h2
-rw-r--r--src/apps/cam/sdl_texture.cpp2
-rw-r--r--src/apps/cam/sdl_texture.h2
-rw-r--r--src/apps/cam/sdl_texture_mjpg.cpp2
-rw-r--r--src/apps/cam/sdl_texture_mjpg.h2
-rw-r--r--src/apps/cam/sdl_texture_yuv.cpp2
-rw-r--r--src/apps/cam/sdl_texture_yuv.h2
-rw-r--r--src/apps/common/dng_writer.cpp225
-rw-r--r--src/apps/common/dng_writer.h3
-rw-r--r--src/apps/common/event_loop.cpp66
-rw-r--r--src/apps/common/event_loop.h25
-rw-r--r--src/apps/common/image.cpp2
-rw-r--r--src/apps/common/image.h2
-rw-r--r--src/apps/common/options.cpp12
-rw-r--r--src/apps/common/options.h2
-rw-r--r--src/apps/common/ppm_writer.cpp9
-rw-r--r--src/apps/common/ppm_writer.h2
-rw-r--r--src/apps/common/stream_options.cpp2
-rw-r--r--src/apps/common/stream_options.h2
-rw-r--r--src/apps/ipa-verify/main.cpp2
-rw-r--r--src/apps/lc-compliance/environment.cpp2
-rw-r--r--src/apps/lc-compliance/environment.h4
-rw-r--r--src/apps/lc-compliance/helpers/capture.cpp164
-rw-r--r--src/apps/lc-compliance/helpers/capture.h53
-rw-r--r--src/apps/lc-compliance/main.cpp44
-rw-r--r--src/apps/lc-compliance/tests/capture_test.cpp23
-rw-r--r--src/apps/qcam/assets/shader/bayer_8.vert4
-rw-r--r--src/apps/qcam/assets/shader/identity.vert3
-rw-r--r--src/apps/qcam/cam_select_dialog.cpp12
-rw-r--r--src/apps/qcam/cam_select_dialog.h2
-rw-r--r--src/apps/qcam/format_converter.cpp4
-rw-r--r--src/apps/qcam/format_converter.h2
-rw-r--r--src/apps/qcam/main.cpp6
-rw-r--r--src/apps/qcam/main_window.cpp69
-rw-r--r--src/apps/qcam/main_window.h4
-rw-r--r--src/apps/qcam/meson.build49
-rw-r--r--src/apps/qcam/message_handler.cpp2
-rw-r--r--src/apps/qcam/message_handler.h2
-rw-r--r--src/apps/qcam/viewfinder.h2
-rw-r--r--src/apps/qcam/viewfinder_gl.cpp54
-rw-r--r--src/apps/qcam/viewfinder_gl.h5
-rw-r--r--src/apps/qcam/viewfinder_qt.cpp53
-rw-r--r--src/apps/qcam/viewfinder_qt.h4
-rw-r--r--src/gstreamer/gstlibcamera-controls.cpp.in332
-rw-r--r--src/gstreamer/gstlibcamera-controls.h43
-rw-r--r--src/gstreamer/gstlibcamera-utils.cpp129
-rw-r--r--src/gstreamer/gstlibcamera-utils.h7
-rw-r--r--src/gstreamer/gstlibcamera.cpp2
-rw-r--r--src/gstreamer/gstlibcameraallocator.cpp26
-rw-r--r--src/gstreamer/gstlibcameraallocator.h2
-rw-r--r--src/gstreamer/gstlibcamerapad.cpp2
-rw-r--r--src/gstreamer/gstlibcamerapad.h2
-rw-r--r--src/gstreamer/gstlibcamerapool.cpp43
-rw-r--r--src/gstreamer/gstlibcamerapool.h2
-rw-r--r--src/gstreamer/gstlibcameraprovider.cpp17
-rw-r--r--src/gstreamer/gstlibcameraprovider.h2
-rw-r--r--src/gstreamer/gstlibcamerasrc.cpp114
-rw-r--r--src/gstreamer/gstlibcamerasrc.h33
-rw-r--r--src/gstreamer/meson.build10
-rwxr-xr-xsrc/ipa/ipa-sign-install.sh2
-rwxr-xr-xsrc/ipa/ipa-sign.sh2
-rw-r--r--src/ipa/ipu3/algorithms/af.cpp5
-rw-r--r--src/ipa/ipu3/algorithms/af.h2
-rw-r--r--src/ipa/ipu3/algorithms/agc.cpp310
-rw-r--r--src/ipa/ipu3/algorithms/agc.h33
-rw-r--r--src/ipa/ipu3/algorithms/algorithm.h2
-rw-r--r--src/ipa/ipu3/algorithms/awb.cpp71
-rw-r--r--src/ipa/ipu3/algorithms/awb.h21
-rw-r--r--src/ipa/ipu3/algorithms/blc.cpp8
-rw-r--r--src/ipa/ipu3/algorithms/blc.h2
-rw-r--r--src/ipa/ipu3/algorithms/tone_mapping.cpp2
-rw-r--r--src/ipa/ipu3/algorithms/tone_mapping.h2
-rw-r--r--src/ipa/ipu3/ipa_context.cpp17
-rw-r--r--src/ipa/ipu3/ipa_context.h16
-rw-r--r--src/ipa/ipu3/ipu3-ipa-design-guide.rst14
-rw-r--r--src/ipa/ipu3/ipu3.cpp61
-rw-r--r--src/ipa/ipu3/meson.build8
-rw-r--r--src/ipa/ipu3/module.h2
-rw-r--r--src/ipa/libipa/agc_mean_luminance.cpp578
-rw-r--r--src/ipa/libipa/agc_mean_luminance.h98
-rw-r--r--src/ipa/libipa/algorithm.cpp2
-rw-r--r--src/ipa/libipa/algorithm.h2
-rw-r--r--src/ipa/libipa/awb.cpp265
-rw-r--r--src/ipa/libipa/awb.h66
-rw-r--r--src/ipa/libipa/awb_bayes.cpp505
-rw-r--r--src/ipa/libipa/awb_bayes.h59
-rw-r--r--src/ipa/libipa/awb_grey.cpp114
-rw-r--r--src/ipa/libipa/awb_grey.h36
-rw-r--r--src/ipa/libipa/camera_sensor_helper.cpp413
-rw-r--r--src/ipa/libipa/camera_sensor_helper.h26
-rw-r--r--src/ipa/libipa/colours.cpp81
-rw-r--r--src/ipa/libipa/colours.h23
-rw-r--r--src/ipa/libipa/exposure_mode_helper.cpp242
-rw-r--r--src/ipa/libipa/exposure_mode_helper.h53
-rw-r--r--src/ipa/libipa/fc_queue.cpp2
-rw-r--r--src/ipa/libipa/fc_queue.h23
-rw-r--r--src/ipa/libipa/fixedpoint.cpp42
-rw-r--r--src/ipa/libipa/fixedpoint.h65
-rw-r--r--src/ipa/libipa/histogram.cpp34
-rw-r--r--src/ipa/libipa/histogram.h19
-rw-r--r--src/ipa/libipa/interpolator.cpp163
-rw-r--r--src/ipa/libipa/interpolator.h136
-rw-r--r--src/ipa/libipa/lsc_polynomial.cpp81
-rw-r--r--src/ipa/libipa/lsc_polynomial.h105
-rw-r--r--src/ipa/libipa/lux.cpp173
-rw-r--r--src/ipa/libipa/lux.h41
-rw-r--r--src/ipa/libipa/meson.build26
-rw-r--r--src/ipa/libipa/module.cpp2
-rw-r--r--src/ipa/libipa/module.h2
-rw-r--r--src/ipa/libipa/pwl.cpp462
-rw-r--r--src/ipa/libipa/pwl.h86
-rw-r--r--src/ipa/mali-c55/algorithms/agc.cpp410
-rw-r--r--src/ipa/mali-c55/algorithms/agc.h81
-rw-r--r--src/ipa/mali-c55/algorithms/algorithm.h39
-rw-r--r--src/ipa/mali-c55/algorithms/awb.cpp230
-rw-r--r--src/ipa/mali-c55/algorithms/awb.h40
-rw-r--r--src/ipa/mali-c55/algorithms/blc.cpp140
-rw-r--r--src/ipa/mali-c55/algorithms/blc.h42
-rw-r--r--src/ipa/mali-c55/algorithms/lsc.cpp216
-rw-r--r--src/ipa/mali-c55/algorithms/lsc.h45
-rw-r--r--src/ipa/mali-c55/algorithms/meson.build8
-rw-r--r--src/ipa/mali-c55/data/imx415.yaml325
-rw-r--r--src/ipa/mali-c55/data/meson.build9
-rw-r--r--src/ipa/mali-c55/data/uncalibrated.yaml7
-rw-r--r--src/ipa/mali-c55/ipa_context.cpp101
-rw-r--r--src/ipa/mali-c55/ipa_context.h90
-rw-r--r--src/ipa/mali-c55/mali-c55.cpp399
-rw-r--r--src/ipa/mali-c55/meson.build33
-rw-r--r--src/ipa/mali-c55/module.h27
-rw-r--r--src/ipa/meson.build3
-rw-r--r--src/ipa/rkisp1/algorithms/agc.cpp600
-rw-r--r--src/ipa/rkisp1/algorithms/agc.h26
-rw-r--r--src/ipa/rkisp1/algorithms/algorithm.h2
-rw-r--r--src/ipa/rkisp1/algorithms/awb.cpp385
-rw-r--r--src/ipa/rkisp1/algorithms/awb.h17
-rw-r--r--src/ipa/rkisp1/algorithms/blc.cpp137
-rw-r--r--src/ipa/rkisp1/algorithms/blc.h13
-rw-r--r--src/ipa/rkisp1/algorithms/ccm.cpp130
-rw-r--r--src/ipa/rkisp1/algorithms/ccm.h50
-rw-r--r--src/ipa/rkisp1/algorithms/cproc.cpp75
-rw-r--r--src/ipa/rkisp1/algorithms/cproc.h7
-rw-r--r--src/ipa/rkisp1/algorithms/dpcc.cpp12
-rw-r--r--src/ipa/rkisp1/algorithms/dpcc.h4
-rw-r--r--src/ipa/rkisp1/algorithms/dpf.cpp33
-rw-r--r--src/ipa/rkisp1/algorithms/dpf.h4
-rw-r--r--src/ipa/rkisp1/algorithms/filter.cpp58
-rw-r--r--src/ipa/rkisp1/algorithms/filter.h4
-rw-r--r--src/ipa/rkisp1/algorithms/goc.cpp149
-rw-r--r--src/ipa/rkisp1/algorithms/goc.h42
-rw-r--r--src/ipa/rkisp1/algorithms/gsl.cpp22
-rw-r--r--src/ipa/rkisp1/algorithms/gsl.h4
-rw-r--r--src/ipa/rkisp1/algorithms/lsc.cpp438
-rw-r--r--src/ipa/rkisp1/algorithms/lsc.h19
-rw-r--r--src/ipa/rkisp1/algorithms/lux.cpp76
-rw-r--r--src/ipa/rkisp1/algorithms/lux.h36
-rw-r--r--src/ipa/rkisp1/algorithms/meson.build3
-rw-r--r--src/ipa/rkisp1/data/imx219.yaml4
-rw-r--r--src/ipa/rkisp1/data/imx258.yaml1
-rw-r--r--src/ipa/rkisp1/data/meson.build4
-rw-r--r--src/ipa/rkisp1/data/ov4689.yaml4
-rw-r--r--src/ipa/rkisp1/data/ov5640.yaml4
-rw-r--r--src/ipa/rkisp1/data/uncalibrated.yaml1
-rw-r--r--src/ipa/rkisp1/ipa_context.cpp163
-rw-r--r--src/ipa/rkisp1/ipa_context.h95
-rw-r--r--src/ipa/rkisp1/meson.build9
-rw-r--r--src/ipa/rkisp1/module.h5
-rw-r--r--src/ipa/rkisp1/params.cpp222
-rw-r--r--src/ipa/rkisp1/params.h163
-rw-r--r--src/ipa/rkisp1/rkisp1.cpp111
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper.cpp16
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper.h11
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx219.cpp4
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx283.cpp61
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx290.cpp21
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx296.cpp13
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx415.cpp64
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx477.cpp17
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx519.cpp17
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx708.cpp19
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp17
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp14
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_ov7251.cpp54
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp16
-rw-r--r--src/ipa/rpi/cam_helper/md_parser.h2
-rw-r--r--src/ipa/rpi/cam_helper/md_parser_smia.cpp2
-rw-r--r--src/ipa/rpi/cam_helper/meson.build3
-rw-r--r--src/ipa/rpi/common/ipa_base.cpp300
-rw-r--r--src/ipa/rpi/common/ipa_base.h14
-rw-r--r--src/ipa/rpi/controller/af_status.h2
-rw-r--r--src/ipa/rpi/controller/agc_algorithm.h16
-rw-r--r--src/ipa/rpi/controller/agc_status.h6
-rw-r--r--src/ipa/rpi/controller/algorithm.cpp2
-rw-r--r--src/ipa/rpi/controller/algorithm.h2
-rw-r--r--src/ipa/rpi/controller/alsc_status.h2
-rw-r--r--src/ipa/rpi/controller/awb_algorithm.h3
-rw-r--r--src/ipa/rpi/controller/awb_status.h2
-rw-r--r--src/ipa/rpi/controller/black_level_algorithm.h2
-rw-r--r--src/ipa/rpi/controller/black_level_status.h2
-rw-r--r--src/ipa/rpi/controller/cac_status.h2
-rw-r--r--src/ipa/rpi/controller/camera_mode.h8
-rw-r--r--src/ipa/rpi/controller/ccm_algorithm.h2
-rw-r--r--src/ipa/rpi/controller/ccm_status.h2
-rw-r--r--src/ipa/rpi/controller/contrast_algorithm.h2
-rw-r--r--src/ipa/rpi/controller/contrast_status.h6
-rw-r--r--src/ipa/rpi/controller/controller.cpp4
-rw-r--r--src/ipa/rpi/controller/controller.h3
-rw-r--r--src/ipa/rpi/controller/denoise_algorithm.h2
-rw-r--r--src/ipa/rpi/controller/denoise_status.h2
-rw-r--r--src/ipa/rpi/controller/device_status.cpp4
-rw-r--r--src/ipa/rpi/controller/device_status.h10
-rw-r--r--src/ipa/rpi/controller/dpc_status.h2
-rw-r--r--src/ipa/rpi/controller/geq_status.h2
-rw-r--r--src/ipa/rpi/controller/hdr_algorithm.h2
-rw-r--r--src/ipa/rpi/controller/hdr_status.h2
-rw-r--r--src/ipa/rpi/controller/histogram.cpp8
-rw-r--r--src/ipa/rpi/controller/histogram.h2
-rw-r--r--src/ipa/rpi/controller/lux_status.h2
-rw-r--r--src/ipa/rpi/controller/meson.build2
-rw-r--r--src/ipa/rpi/controller/metadata.h25
-rw-r--r--src/ipa/rpi/controller/noise_status.h2
-rw-r--r--src/ipa/rpi/controller/pdaf_data.h2
-rw-r--r--src/ipa/rpi/controller/pwl.cpp269
-rw-r--r--src/ipa/rpi/controller/pwl.h127
-rw-r--r--src/ipa/rpi/controller/region_stats.h2
-rw-r--r--src/ipa/rpi/controller/rpi/af.cpp8
-rw-r--r--src/ipa/rpi/controller/rpi/af.h7
-rw-r--r--src/ipa/rpi/controller/rpi/agc.cpp64
-rw-r--r--src/ipa/rpi/controller/rpi/agc.h16
-rw-r--r--src/ipa/rpi/controller/rpi/agc_channel.cpp255
-rw-r--r--src/ipa/rpi/controller/rpi/agc_channel.h31
-rw-r--r--src/ipa/rpi/controller/rpi/alsc.cpp26
-rw-r--r--src/ipa/rpi/controller/rpi/alsc.h2
-rw-r--r--src/ipa/rpi/controller/rpi/awb.cpp144
-rw-r--r--src/ipa/rpi/controller/rpi/awb.h30
-rw-r--r--src/ipa/rpi/controller/rpi/black_level.cpp3
-rw-r--r--src/ipa/rpi/controller/rpi/black_level.h2
-rw-r--r--src/ipa/rpi/controller/rpi/cac.cpp2
-rw-r--r--src/ipa/rpi/controller/rpi/ccm.cpp81
-rw-r--r--src/ipa/rpi/controller/rpi/ccm.h42
-rw-r--r--src/ipa/rpi/controller/rpi/contrast.cpp22
-rw-r--r--src/ipa/rpi/controller/rpi/contrast.h7
-rw-r--r--src/ipa/rpi/controller/rpi/denoise.cpp2
-rw-r--r--src/ipa/rpi/controller/rpi/dpc.cpp2
-rw-r--r--src/ipa/rpi/controller/rpi/dpc.h2
-rw-r--r--src/ipa/rpi/controller/rpi/focus.h2
-rw-r--r--src/ipa/rpi/controller/rpi/geq.cpp11
-rw-r--r--src/ipa/rpi/controller/rpi/geq.h6
-rw-r--r--src/ipa/rpi/controller/rpi/hdr.cpp118
-rw-r--r--src/ipa/rpi/controller/rpi/hdr.h19
-rw-r--r--src/ipa/rpi/controller/rpi/lux.cpp11
-rw-r--r--src/ipa/rpi/controller/rpi/lux.h4
-rw-r--r--src/ipa/rpi/controller/rpi/noise.cpp6
-rw-r--r--src/ipa/rpi/controller/rpi/noise.h2
-rw-r--r--src/ipa/rpi/controller/rpi/saturation.cpp2
-rw-r--r--src/ipa/rpi/controller/rpi/sdn.cpp2
-rw-r--r--src/ipa/rpi/controller/rpi/sdn.h2
-rw-r--r--src/ipa/rpi/controller/rpi/sharpen.cpp6
-rw-r--r--src/ipa/rpi/controller/rpi/sharpen.h2
-rw-r--r--src/ipa/rpi/controller/rpi/tonemap.cpp4
-rw-r--r--src/ipa/rpi/controller/rpi/tonemap.h5
-rw-r--r--src/ipa/rpi/controller/saturation_status.h2
-rw-r--r--src/ipa/rpi/controller/sharpen_algorithm.h2
-rw-r--r--src/ipa/rpi/controller/sharpen_status.h2
-rw-r--r--src/ipa/rpi/controller/statistics.h2
-rw-r--r--src/ipa/rpi/controller/stitch_status.h2
-rw-r--r--src/ipa/rpi/controller/tonemap_status.h6
-rw-r--r--src/ipa/rpi/vc4/data/imx219.json607
-rw-r--r--src/ipa/rpi/vc4/data/imx219_noir.json607
-rw-r--r--src/ipa/rpi/vc4/data/imx283.json313
-rw-r--r--src/ipa/rpi/vc4/data/imx290.json15
-rw-r--r--src/ipa/rpi/vc4/data/imx296.json17
-rw-r--r--src/ipa/rpi/vc4/data/imx296_mono.json17
-rw-r--r--src/ipa/rpi/vc4/data/imx327.json215
-rw-r--r--src/ipa/rpi/vc4/data/imx378.json15
-rwxr-xr-xsrc/ipa/rpi/vc4/data/imx415.json413
-rw-r--r--src/ipa/rpi/vc4/data/imx462.json215
-rw-r--r--src/ipa/rpi/vc4/data/imx477.json615
-rw-r--r--src/ipa/rpi/vc4/data/imx477_noir.json607
-rw-r--r--src/ipa/rpi/vc4/data/imx477_scientific.json15
-rw-r--r--src/ipa/rpi/vc4/data/imx477_v1.json15
-rw-r--r--src/ipa/rpi/vc4/data/imx519.json15
-rw-r--r--src/ipa/rpi/vc4/data/imx708.json555
-rw-r--r--src/ipa/rpi/vc4/data/imx708_noir.json555
-rw-r--r--src/ipa/rpi/vc4/data/imx708_wide.json555
-rw-r--r--src/ipa/rpi/vc4/data/imx708_wide_noir.json555
-rw-r--r--src/ipa/rpi/vc4/data/meson.build5
-rw-r--r--src/ipa/rpi/vc4/data/ov5647.json611
-rw-r--r--src/ipa/rpi/vc4/data/ov5647_noir.json15
-rw-r--r--src/ipa/rpi/vc4/data/ov7251_mono.json136
-rw-r--r--src/ipa/rpi/vc4/data/ov9281_mono.json5
-rw-r--r--src/ipa/rpi/vc4/data/se327m12.json15
-rw-r--r--src/ipa/rpi/vc4/data/uncalibrated.json5
-rw-r--r--src/ipa/rpi/vc4/meson.build7
-rw-r--r--src/ipa/rpi/vc4/vc4.cpp4
-rw-r--r--src/ipa/simple/algorithms/agc.cpp139
-rw-r--r--src/ipa/simple/algorithms/agc.h33
-rw-r--r--src/ipa/simple/algorithms/algorithm.h22
-rw-r--r--src/ipa/simple/algorithms/awb.cpp69
-rw-r--r--src/ipa/simple/algorithms/awb.h32
-rw-r--r--src/ipa/simple/algorithms/blc.cpp98
-rw-r--r--src/ipa/simple/algorithms/blc.h40
-rw-r--r--src/ipa/simple/algorithms/lut.cpp123
-rw-r--r--src/ipa/simple/algorithms/lut.h40
-rw-r--r--src/ipa/simple/algorithms/meson.build8
-rw-r--r--src/ipa/simple/data/meson.build10
-rw-r--r--src/ipa/simple/data/uncalibrated.yaml10
-rw-r--r--src/ipa/simple/ipa_context.cpp102
-rw-r--r--src/ipa/simple/ipa_context.h77
-rw-r--r--src/ipa/simple/meson.build31
-rw-r--r--src/ipa/simple/module.h30
-rw-r--r--src/ipa/simple/soft_simple.cpp350
-rw-r--r--src/ipa/vimc/meson.build8
-rw-r--r--src/ipa/vimc/vimc.cpp11
-rw-r--r--src/libcamera/base/backtrace.cpp2
-rw-r--r--src/libcamera/base/bound_method.cpp2
-rw-r--r--src/libcamera/base/class.cpp2
-rw-r--r--src/libcamera/base/event_dispatcher.cpp2
-rw-r--r--src/libcamera/base/event_dispatcher_poll.cpp5
-rw-r--r--src/libcamera/base/event_notifier.cpp2
-rw-r--r--src/libcamera/base/file.cpp2
-rw-r--r--src/libcamera/base/flags.cpp2
-rw-r--r--src/libcamera/base/log.cpp157
-rw-r--r--src/libcamera/base/memfd.cpp123
-rw-r--r--src/libcamera/base/meson.build26
-rw-r--r--src/libcamera/base/message.cpp2
-rw-r--r--src/libcamera/base/mutex.cpp2
-rw-r--r--src/libcamera/base/object.cpp4
-rw-r--r--src/libcamera/base/semaphore.cpp2
-rw-r--r--src/libcamera/base/shared_fd.cpp2
-rw-r--r--src/libcamera/base/signal.cpp4
-rw-r--r--src/libcamera/base/thread.cpp108
-rw-r--r--src/libcamera/base/timer.cpp2
-rw-r--r--src/libcamera/base/unique_fd.cpp2
-rw-r--r--src/libcamera/base/utils.cpp151
-rw-r--r--src/libcamera/bayer_format.cpp26
-rw-r--r--src/libcamera/byte_stream_buffer.cpp2
-rw-r--r--src/libcamera/camera.cpp55
-rw-r--r--src/libcamera/camera_controls.cpp2
-rw-r--r--src/libcamera/camera_lens.cpp2
-rw-r--r--src/libcamera/camera_manager.cpp73
-rw-r--r--src/libcamera/color_space.cpp2
-rw-r--r--src/libcamera/control_ids.cpp.in103
-rw-r--r--src/libcamera/control_ids_core.yaml841
-rw-r--r--src/libcamera/control_ids_debug.yaml6
-rw-r--r--src/libcamera/control_ids_draft.yaml125
-rw-r--r--src/libcamera/control_ids_rpi.yaml44
-rw-r--r--src/libcamera/control_ranges.yaml4
-rw-r--r--src/libcamera/control_serializer.cpp10
-rw-r--r--src/libcamera/control_validator.cpp2
-rw-r--r--src/libcamera/controls.cpp125
-rw-r--r--src/libcamera/converter.cpp139
-rw-r--r--src/libcamera/converter/converter_v4l2_m2m.cpp377
-rw-r--r--src/libcamera/converter/meson.build2
-rw-r--r--src/libcamera/debug_controls.cpp164
-rw-r--r--src/libcamera/delayed_controls.cpp2
-rw-r--r--src/libcamera/device_enumerator.cpp4
-rw-r--r--src/libcamera/device_enumerator_sysfs.cpp4
-rw-r--r--src/libcamera/device_enumerator_udev.cpp10
-rw-r--r--src/libcamera/dma_buf_allocator.cpp356
-rw-r--r--src/libcamera/fence.cpp4
-rw-r--r--src/libcamera/formats.cpp92
-rw-r--r--src/libcamera/formats.yaml24
-rw-r--r--src/libcamera/framebuffer.cpp9
-rw-r--r--src/libcamera/framebuffer_allocator.cpp2
-rw-r--r--src/libcamera/geometry.cpp71
-rw-r--r--src/libcamera/ipa_controls.cpp6
-rw-r--r--src/libcamera/ipa_data_serializer.cpp7
-rw-r--r--src/libcamera/ipa_interface.cpp2
-rw-r--r--src/libcamera/ipa_manager.cpp12
-rw-r--r--src/libcamera/ipa_module.cpp7
-rw-r--r--src/libcamera/ipa_proxy.cpp52
-rw-r--r--src/libcamera/ipa_pub_key.cpp.in2
-rw-r--r--src/libcamera/ipc_pipe.cpp2
-rw-r--r--src/libcamera/ipc_pipe_unixsocket.cpp2
-rw-r--r--src/libcamera/ipc_unixsocket.cpp13
-rw-r--r--src/libcamera/mapped_framebuffer.cpp6
-rw-r--r--src/libcamera/matrix.cpp156
-rw-r--r--src/libcamera/media_device.cpp14
-rw-r--r--src/libcamera/media_object.cpp52
-rw-r--r--src/libcamera/meson.build92
-rw-r--r--src/libcamera/orientation.cpp17
-rw-r--r--src/libcamera/pipeline/imx8-isi/imx8-isi.cpp13
-rw-r--r--src/libcamera/pipeline/imx8-isi/meson.build2
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.cpp13
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.h2
-rw-r--r--src/libcamera/pipeline/ipu3/frames.cpp5
-rw-r--r--src/libcamera/pipeline/ipu3/frames.h2
-rw-r--r--src/libcamera/pipeline/ipu3/imgu.cpp2
-rw-r--r--src/libcamera/pipeline/ipu3/imgu.h2
-rw-r--r--src/libcamera/pipeline/ipu3/ipu3.cpp52
-rw-r--r--src/libcamera/pipeline/ipu3/meson.build2
-rw-r--r--src/libcamera/pipeline/mali-c55/mali-c55.cpp897
-rw-r--r--src/libcamera/pipeline/mali-c55/meson.build2
-rw-r--r--src/libcamera/pipeline/rkisp1/meson.build2
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1.cpp542
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1_path.cpp134
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1_path.h16
-rw-r--r--src/libcamera/pipeline/rpi/common/delayed_controls.cpp2
-rw-r--r--src/libcamera/pipeline/rpi/common/delayed_controls.h2
-rw-r--r--src/libcamera/pipeline/rpi/common/meson.build2
-rw-r--r--src/libcamera/pipeline/rpi/common/pipeline_base.cpp142
-rw-r--r--src/libcamera/pipeline/rpi/common/pipeline_base.h24
-rw-r--r--src/libcamera/pipeline/rpi/common/rpi_stream.cpp2
-rw-r--r--src/libcamera/pipeline/rpi/common/rpi_stream.h2
-rw-r--r--src/libcamera/pipeline/rpi/common/shared_mem_object.h128
-rw-r--r--src/libcamera/pipeline/rpi/vc4/dma_heaps.cpp90
-rw-r--r--src/libcamera/pipeline/rpi/vc4/dma_heaps.h32
-rw-r--r--src/libcamera/pipeline/rpi/vc4/meson.build3
-rw-r--r--src/libcamera/pipeline/rpi/vc4/vc4.cpp28
-rw-r--r--src/libcamera/pipeline/simple/meson.build2
-rw-r--r--src/libcamera/pipeline/simple/simple.cpp344
-rw-r--r--src/libcamera/pipeline/uvcvideo/meson.build2
-rw-r--r--src/libcamera/pipeline/uvcvideo/uvcvideo.cpp131
-rw-r--r--src/libcamera/pipeline/vimc/meson.build2
-rw-r--r--src/libcamera/pipeline/vimc/vimc.cpp81
-rw-r--r--src/libcamera/pipeline/virtual/README.md65
-rw-r--r--src/libcamera/pipeline/virtual/config_parser.cpp260
-rw-r--r--src/libcamera/pipeline/virtual/config_parser.h39
-rw-r--r--src/libcamera/pipeline/virtual/data/meson.build4
-rw-r--r--src/libcamera/pipeline/virtual/data/virtual.yaml36
-rw-r--r--src/libcamera/pipeline/virtual/frame_generator.h29
-rw-r--r--src/libcamera/pipeline/virtual/image_frame_generator.cpp172
-rw-r--r--src/libcamera/pipeline/virtual/image_frame_generator.h49
-rw-r--r--src/libcamera/pipeline/virtual/meson.build15
-rw-r--r--src/libcamera/pipeline/virtual/test_pattern_generator.cpp125
-rw-r--r--src/libcamera/pipeline/virtual/test_pattern_generator.h48
-rw-r--r--src/libcamera/pipeline/virtual/virtual.cpp419
-rw-r--r--src/libcamera/pipeline/virtual/virtual.h62
-rw-r--r--src/libcamera/pipeline_handler.cpp147
-rw-r--r--src/libcamera/pixel_format.cpp2
-rw-r--r--src/libcamera/process.cpp10
-rw-r--r--src/libcamera/property_ids.cpp.in48
-rw-r--r--src/libcamera/proxy/meson.build5
-rw-r--r--src/libcamera/proxy/worker/meson.build6
-rw-r--r--src/libcamera/pub_key.cpp2
-rw-r--r--src/libcamera/request.cpp28
-rw-r--r--src/libcamera/sensor/camera_sensor.cpp1114
-rw-r--r--src/libcamera/sensor/camera_sensor_legacy.cpp1045
-rw-r--r--src/libcamera/sensor/camera_sensor_properties.cpp203
-rw-r--r--src/libcamera/sensor/camera_sensor_raw.cpp1157
-rw-r--r--src/libcamera/sensor/meson.build4
-rw-r--r--src/libcamera/shared_mem_object.cpp231
-rw-r--r--src/libcamera/software_isp/TODO208
-rw-r--r--src/libcamera/software_isp/debayer.cpp127
-rw-r--r--src/libcamera/software_isp/debayer.h54
-rw-r--r--src/libcamera/software_isp/debayer_cpu.cpp835
-rw-r--r--src/libcamera/software_isp/debayer_cpu.h163
-rw-r--r--src/libcamera/software_isp/meson.build15
-rw-r--r--src/libcamera/software_isp/software_isp.cpp404
-rw-r--r--src/libcamera/software_isp/swstats_cpu.cpp434
-rw-r--r--src/libcamera/software_isp/swstats_cpu.h97
-rw-r--r--src/libcamera/source_paths.cpp2
-rw-r--r--src/libcamera/stream.cpp28
-rw-r--r--src/libcamera/sysfs.cpp2
-rw-r--r--src/libcamera/tracepoints.cpp2
-rw-r--r--src/libcamera/transform.cpp2
-rw-r--r--src/libcamera/v4l2_device.cpp81
-rw-r--r--src/libcamera/v4l2_pixelformat.cpp52
-rw-r--r--src/libcamera/v4l2_subdevice.cpp270
-rw-r--r--src/libcamera/v4l2_videodevice.cpp159
-rw-r--r--src/libcamera/vector.cpp347
-rw-r--r--src/libcamera/version.cpp.in2
-rw-r--r--src/libcamera/yaml_parser.cpp261
-rw-r--r--src/meson.build31
-rw-r--r--src/py/cam/cam_qt.py6
-rw-r--r--src/py/cam/cam_qtgl.py12
-rwxr-xr-xsrc/py/libcamera/gen-py-controls.py126
-rw-r--r--src/py/libcamera/meson.build27
-rw-r--r--src/py/libcamera/py_color_space.cpp2
-rw-r--r--src/py/libcamera/py_controls_generated.cpp.in35
-rw-r--r--src/py/libcamera/py_enums.cpp5
-rw-r--r--src/py/libcamera/py_formats_generated.cpp.in2
-rw-r--r--src/py/libcamera/py_geometry.cpp2
-rw-r--r--src/py/libcamera/py_helpers.cpp16
-rw-r--r--src/py/libcamera/py_main.cpp29
-rw-r--r--src/py/libcamera/py_main.h10
-rw-r--r--src/py/libcamera/py_properties_generated.cpp.in28
-rw-r--r--src/py/libcamera/py_transform.cpp2
-rw-r--r--src/v4l2/meson.build7
-rw-r--r--src/v4l2/v4l2_camera.cpp14
-rw-r--r--src/v4l2/v4l2_camera.h11
-rw-r--r--src/v4l2/v4l2_camera_file.cpp2
-rw-r--r--src/v4l2/v4l2_camera_file.h2
-rw-r--r--src/v4l2/v4l2_camera_proxy.cpp88
-rw-r--r--src/v4l2/v4l2_camera_proxy.h5
-rw-r--r--src/v4l2/v4l2_compat.cpp80
-rw-r--r--src/v4l2/v4l2_compat_manager.cpp3
-rw-r--r--src/v4l2/v4l2_compat_manager.h2
-rw-r--r--test/bayer-format.cpp2
-rw-r--r--test/byte-stream-buffer.cpp2
-rw-r--r--test/camera-sensor.cpp9
-rw-r--r--test/camera/buffer_import.cpp21
-rw-r--r--test/camera/capture.cpp22
-rw-r--r--test/controls/control_info.cpp2
-rw-r--r--test/controls/control_info_map.cpp2
-rw-r--r--test/controls/control_list.cpp2
-rw-r--r--test/controls/control_value.cpp82
-rw-r--r--test/delayed_controls.cpp2
-rw-r--r--test/event-dispatcher.cpp2
-rw-r--r--test/event-thread.cpp2
-rw-r--r--test/event.cpp2
-rw-r--r--test/fence.cpp77
-rw-r--r--test/file.cpp2
-rw-r--r--test/flags.cpp2
-rw-r--r--test/geometry.cpp27
-rw-r--r--test/gstreamer/gstreamer_device_provider_test.cpp13
-rw-r--r--test/gstreamer/gstreamer_memory_lifetime_test.cpp90
-rw-r--r--test/gstreamer/gstreamer_multi_stream_test.cpp2
-rw-r--r--test/gstreamer/gstreamer_single_stream_test.cpp29
-rw-r--r--test/gstreamer/gstreamer_test.cpp6
-rw-r--r--test/gstreamer/gstreamer_test.h2
-rw-r--r--test/gstreamer/meson.build14
-rw-r--r--test/hotplug-cameras.cpp2
-rw-r--r--test/ipa/ipa_interface_test.cpp16
-rw-r--r--test/ipa/ipa_module_test.cpp4
-rw-r--r--test/ipa/libipa/fixedpoint.cpp108
-rw-r--r--test/ipa/libipa/interpolator.cpp54
-rw-r--r--test/ipa/libipa/meson.build17
-rw-r--r--test/ipa/meson.build10
-rw-r--r--test/ipc/unixsocket.cpp13
-rw-r--r--test/ipc/unixsocket_ipc.cpp2
-rw-r--r--test/libtest/buffer_source.h2
-rw-r--r--test/libtest/camera_test.h2
-rw-r--r--test/libtest/test.cpp2
-rw-r--r--test/libtest/test.h2
-rw-r--r--test/log/log_api.cpp2
-rw-r--r--test/log/log_process.cpp2
-rw-r--r--test/media_device/media_device_link_test.cpp2
-rw-r--r--test/media_device/media_device_print_test.cpp2
-rw-r--r--test/media_device/media_device_test.cpp2
-rw-r--r--test/media_device/media_device_test.h2
-rw-r--r--test/meson.build4
-rw-r--r--test/message.cpp2
-rw-r--r--test/object-delete.cpp2
-rw-r--r--test/object-invoke.cpp2
-rw-r--r--test/object.cpp2
-rw-r--r--test/process/process_test.cpp2
-rw-r--r--test/public-api.cpp2
-rw-r--r--test/py/meson.build14
-rwxr-xr-xtest/py/unittests.py2
-rw-r--r--test/serialization/control_serialization.cpp2
-rw-r--r--test/serialization/generated_serializer/generated_serializer_test.cpp3
-rw-r--r--test/serialization/generated_serializer/include/libcamera/ipa/meson.build9
-rw-r--r--test/serialization/ipa_data_serializer_test.cpp4
-rw-r--r--test/serialization/serialization_test.cpp2
-rw-r--r--test/serialization/serialization_test.h2
-rw-r--r--test/shared-fd.cpp2
-rw-r--r--test/signal-threads.cpp2
-rw-r--r--test/signal.cpp2
-rw-r--r--test/span.cpp8
-rw-r--r--test/stream/stream_colorspace.cpp2
-rw-r--r--test/stream/stream_formats.cpp2
-rw-r--r--test/threads.cpp42
-rw-r--r--test/timer-fail.cpp2
-rw-r--r--test/timer-thread.cpp2
-rw-r--r--test/timer.cpp2
-rw-r--r--test/transform.cpp2
-rw-r--r--test/unique-fd.cpp2
-rw-r--r--test/utils.cpp19
-rwxr-xr-xtest/v4l2_compat/v4l2_compat_test.py2
-rw-r--r--test/v4l2_subdevice/v4l2_subdevice_test.cpp2
-rw-r--r--test/v4l2_subdevice/v4l2_subdevice_test.h2
-rw-r--r--test/v4l2_videodevice/capture_async.cpp11
-rw-r--r--test/v4l2_videodevice/controls.cpp2
-rw-r--r--test/v4l2_videodevice/v4l2_videodevice_test.cpp5
-rw-r--r--test/v4l2_videodevice/v4l2_videodevice_test.h4
-rw-r--r--test/vector.cpp100
-rw-r--r--test/yaml-parser.cpp54
-rwxr-xr-xutils/abi-compat.sh13
-rwxr-xr-xutils/checkstyle.py474
-rw-r--r--utils/codegen/controls.py142
-rwxr-xr-xutils/codegen/gen-controls.py109
-rwxr-xr-xutils/codegen/gen-formats.py (renamed from utils/gen-formats.py)2
-rwxr-xr-xutils/codegen/gen-gst-controls.py183
-rwxr-xr-xutils/codegen/gen-header.sh (renamed from utils/gen-header.sh)9
-rwxr-xr-xutils/codegen/gen-ipa-pub-key.py (renamed from utils/gen-ipa-pub-key.py)2
-rwxr-xr-xutils/codegen/gen-tp-header.py (renamed from utils/tracepoints/gen-tp-header.py)6
-rwxr-xr-xutils/codegen/ipc/extract-docs.py (renamed from utils/ipc/extract-docs.py)4
-rwxr-xr-xutils/codegen/ipc/generate.py (renamed from utils/ipc/generate.py)5
-rw-r--r--utils/codegen/ipc/generators/__init__.py (renamed from utils/ipc/generators/__init__.py)0
-rw-r--r--utils/codegen/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl (renamed from utils/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl)7
-rw-r--r--utils/codegen/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl (renamed from utils/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl)2
-rw-r--r--utils/codegen/ipc/generators/libcamera_templates/definition_functions.tmpl (renamed from utils/ipc/generators/libcamera_templates/definition_functions.tmpl)0
-rw-r--r--utils/codegen/ipc/generators/libcamera_templates/meson.build (renamed from utils/ipc/generators/libcamera_templates/meson.build)0
-rw-r--r--utils/codegen/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl (renamed from utils/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl)16
-rw-r--r--utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl (renamed from utils/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl)6
-rw-r--r--utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl (renamed from utils/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl)2
-rw-r--r--utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl (renamed from utils/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl)2
-rw-r--r--utils/codegen/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl (renamed from utils/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl)2
-rw-r--r--utils/codegen/ipc/generators/libcamera_templates/proxy_functions.tmpl (renamed from utils/ipc/generators/libcamera_templates/proxy_functions.tmpl)2
-rw-r--r--utils/codegen/ipc/generators/libcamera_templates/serializer.tmpl (renamed from utils/ipc/generators/libcamera_templates/serializer.tmpl)0
-rw-r--r--utils/codegen/ipc/generators/meson.build (renamed from utils/ipc/generators/meson.build)0
-rw-r--r--utils/codegen/ipc/generators/mojom_libcamera_generator.py (renamed from utils/ipc/generators/mojom_libcamera_generator.py)4
-rw-r--r--utils/codegen/ipc/meson.build (renamed from utils/ipc/meson.build)3
-rw-r--r--utils/codegen/ipc/mojo/README (renamed from utils/ipc/mojo/README)0
-rw-r--r--utils/codegen/ipc/mojo/public/LICENSE (renamed from utils/ipc/mojo/public/LICENSE)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/.style.yapf (renamed from utils/ipc/mojo/public/tools/.style.yapf)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/BUILD.gn (renamed from utils/ipc/mojo/public/tools/BUILD.gn)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/bindings/BUILD.gn (renamed from utils/ipc/mojo/public/tools/bindings/BUILD.gn)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/bindings/README.md (renamed from utils/ipc/mojo/public/tools/bindings/README.md)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/bindings/checks/__init__.py (renamed from utils/ipc/mojo/public/tools/bindings/checks/__init__.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check.py (renamed from utils/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check_unittest.py (renamed from utils/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_definitions_check.py (renamed from utils/ipc/mojo/public/tools/bindings/checks/mojom_definitions_check.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check.py (renamed from utils/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check_unittest.py (renamed from utils/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_check.py (renamed from utils/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_check.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_checks_unittest.py (renamed from utils/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_checks_unittest.py)0
-rwxr-xr-xutils/codegen/ipc/mojo/public/tools/bindings/concatenate-files.py (renamed from utils/ipc/mojo/public/tools/bindings/concatenate-files.py)0
-rwxr-xr-xutils/codegen/ipc/mojo/public/tools/bindings/concatenate_and_replace_closure_exports.py (renamed from utils/ipc/mojo/public/tools/bindings/concatenate_and_replace_closure_exports.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/bindings/gen_data_files_list.py (renamed from utils/ipc/mojo/public/tools/bindings/gen_data_files_list.py)0
-rwxr-xr-xutils/codegen/ipc/mojo/public/tools/bindings/generate_type_mappings.py (renamed from utils/ipc/mojo/public/tools/bindings/generate_type_mappings.py)0
-rwxr-xr-xutils/codegen/ipc/mojo/public/tools/bindings/minify_with_terser.py (renamed from utils/ipc/mojo/public/tools/bindings/minify_with_terser.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/bindings/mojom.gni (renamed from utils/ipc/mojo/public/tools/bindings/mojom.gni)0
-rwxr-xr-xutils/codegen/ipc/mojo/public/tools/bindings/mojom_bindings_generator.py (renamed from utils/ipc/mojo/public/tools/bindings/mojom_bindings_generator.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/bindings/mojom_bindings_generator_unittest.py (renamed from utils/ipc/mojo/public/tools/bindings/mojom_bindings_generator_unittest.py)0
-rwxr-xr-xutils/codegen/ipc/mojo/public/tools/bindings/validate_typemap_config.py (renamed from utils/ipc/mojo/public/tools/bindings/validate_typemap_config.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/BUILD.gn (renamed from utils/ipc/mojo/public/tools/mojom/BUILD.gn)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/README.md (renamed from utils/ipc/mojo/public/tools/mojom/README.md)0
-rwxr-xr-xutils/codegen/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility.py (renamed from utils/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility.py)0
-rwxr-xr-xutils/codegen/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/const_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/const_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/enum_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/enum_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/feature_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/feature_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/BUILD.gn (renamed from utils/ipc/mojo/public/tools/mojom/mojom/BUILD.gn)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/__init__.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/__init__.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/error.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/error.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/fileutil.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/fileutil.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/fileutil_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/fileutil_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/__init__.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/__init__.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/check.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/check.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/generator.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/generator.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/generator_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/generator_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/module.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/module.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/module_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/module_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/pack.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/pack.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/pack_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/pack_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/template_expander.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/template_expander.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/translate.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/translate.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/translate_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/generate/translate_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/__init__.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/__init__.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/ast.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/ast.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/ast_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/ast_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/lexer.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/lexer.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/lexer_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/lexer_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/parser.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/parser.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/parser_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom/parse/parser_unittest.py)0
-rwxr-xr-xutils/codegen/ipc/mojo/public/tools/mojom/mojom_parser.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom_parser.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser_test_case.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom_parser_test_case.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/mojom_parser_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/stable_attribute_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/stable_attribute_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/union_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/union_unittest.py)0
-rw-r--r--utils/codegen/ipc/mojo/public/tools/mojom/version_compatibility_unittest.py (renamed from utils/ipc/mojo/public/tools/mojom/version_compatibility_unittest.py)0
-rwxr-xr-xutils/codegen/ipc/mojo/public/tools/run_all_python_unittests.py (renamed from utils/ipc/mojo/public/tools/run_all_python_unittests.py)0
-rwxr-xr-xutils/codegen/ipc/parser.py (renamed from utils/ipc/parser.py)5
-rw-r--r--utils/codegen/ipc/tools/README (renamed from utils/ipc/tools/README)0
-rw-r--r--utils/codegen/ipc/tools/diagnosis/crbug_1001171.py (renamed from utils/ipc/tools/diagnosis/crbug_1001171.py)0
-rw-r--r--utils/codegen/meson.build19
-rwxr-xr-xutils/gen-controls.py370
-rwxr-xr-xutils/gen-debug-controls.py163
-rwxr-xr-xutils/gen-ipa-priv-key.sh2
-rwxr-xr-xutils/gen-version.sh2
-rwxr-xr-xutils/hooks/pre-push11
-rwxr-xr-xutils/ipu3/ipu3-capture.sh2
-rw-r--r--utils/ipu3/ipu3-pack.c4
-rwxr-xr-xutils/ipu3/ipu3-process.sh2
-rw-r--r--utils/ipu3/ipu3-unpack.c3
-rw-r--r--utils/meson.build10
-rwxr-xr-xutils/raspberrypi/ctt/alsc_only.py20
-rw-r--r--utils/raspberrypi/ctt/cac_only.py142
-rw-r--r--utils/raspberrypi/ctt/colors.py2
-rwxr-xr-xutils/raspberrypi/ctt/convert_tuning.py98
-rwxr-xr-xutils/raspberrypi/ctt/ctt.py257
-rw-r--r--utils/raspberrypi/ctt/ctt_alsc.py83
-rw-r--r--utils/raspberrypi/ctt/ctt_awb.py13
-rw-r--r--utils/raspberrypi/ctt/ctt_cac.py228
-rw-r--r--utils/raspberrypi/ctt/ctt_ccm.py8
-rw-r--r--utils/raspberrypi/ctt/ctt_config_example.json5
-rw-r--r--utils/raspberrypi/ctt/ctt_dots_locator.py118
-rw-r--r--utils/raspberrypi/ctt/ctt_geq.py2
-rw-r--r--utils/raspberrypi/ctt/ctt_image_load.py3
-rw-r--r--utils/raspberrypi/ctt/ctt_lux.py2
-rw-r--r--utils/raspberrypi/ctt/ctt_macbeth_locator.py2
-rw-r--r--utils/raspberrypi/ctt/ctt_noise.py2
-rwxr-xr-xutils/raspberrypi/ctt/ctt_pisp.py805
-rwxr-xr-xutils/raspberrypi/ctt/ctt_pretty_print_json.py22
-rw-r--r--utils/raspberrypi/ctt/ctt_ransac.py2
-rw-r--r--utils/raspberrypi/ctt/ctt_tools.py5
-rwxr-xr-xutils/raspberrypi/ctt/ctt_vc4.py126
-rwxr-xr-xutils/rkisp1/rkisp1-capture.sh3
-rwxr-xr-xutils/tracepoints/analyze-ipa-trace.py2
-rw-r--r--utils/tracepoints/meson.build5
-rw-r--r--utils/tuning/README.rst23
-rw-r--r--utils/tuning/config-example.yaml54
-rw-r--r--utils/tuning/libtuning/average.py2
-rw-r--r--utils/tuning/libtuning/ctt_awb.py378
-rw-r--r--utils/tuning/libtuning/ctt_ccm.py408
-rw-r--r--utils/tuning/libtuning/ctt_colors.py30
-rw-r--r--utils/tuning/libtuning/ctt_ransac.py71
-rw-r--r--utils/tuning/libtuning/generators/generator.py2
-rw-r--r--utils/tuning/libtuning/generators/raspberrypi_output.py2
-rw-r--r--utils/tuning/libtuning/generators/yaml_output.py10
-rw-r--r--utils/tuning/libtuning/gradient.py2
-rw-r--r--utils/tuning/libtuning/image.py14
-rw-r--r--utils/tuning/libtuning/libtuning.py30
-rw-r--r--utils/tuning/libtuning/macbeth.py67
-rw-r--r--utils/tuning/libtuning/macbeth_ref.pgm2
-rw-r--r--utils/tuning/libtuning/modules/agc/__init__.py6
-rw-r--r--utils/tuning/libtuning/modules/agc/agc.py21
-rw-r--r--utils/tuning/libtuning/modules/agc/rkisp1.py79
-rw-r--r--utils/tuning/libtuning/modules/awb/__init__.py6
-rw-r--r--utils/tuning/libtuning/modules/awb/awb.py40
-rw-r--r--utils/tuning/libtuning/modules/awb/rkisp1.py36
-rw-r--r--utils/tuning/libtuning/modules/ccm/__init__.py6
-rw-r--r--utils/tuning/libtuning/modules/ccm/ccm.py41
-rw-r--r--utils/tuning/libtuning/modules/ccm/rkisp1.py28
-rw-r--r--utils/tuning/libtuning/modules/lsc/lsc.py5
-rw-r--r--utils/tuning/libtuning/modules/lsc/raspberrypi.py14
-rw-r--r--utils/tuning/libtuning/modules/lsc/rkisp1.py22
-rw-r--r--utils/tuning/libtuning/modules/lux/__init__.py6
-rw-r--r--utils/tuning/libtuning/modules/lux/lux.py70
-rw-r--r--utils/tuning/libtuning/modules/lux/rkisp1.py22
-rw-r--r--utils/tuning/libtuning/modules/module.py2
-rw-r--r--utils/tuning/libtuning/modules/static.py24
-rw-r--r--utils/tuning/libtuning/parsers/parser.py2
-rw-r--r--utils/tuning/libtuning/parsers/raspberrypi_parser.py2
-rw-r--r--utils/tuning/libtuning/parsers/yaml_parser.py11
-rw-r--r--utils/tuning/libtuning/smoothing.py2
-rw-r--r--utils/tuning/libtuning/utils.py99
-rw-r--r--utils/tuning/raspberrypi/alsc.py2
-rwxr-xr-xutils/tuning/raspberrypi_alsc_only.py2
-rw-r--r--utils/tuning/requirements.txt9
-rwxr-xr-xutils/tuning/rkisp1.py65
-rwxr-xr-xutils/update-kernel-headers.sh3
951 files changed, 39553 insertions, 11035 deletions
diff --git a/.clang-format b/.clang-format
index cac7029f..7fc30f61 100644
--- a/.clang-format
+++ b/.clang-format
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# clang-format configuration file. Intended for clang-format >= 7.
+# clang-format configuration file. Intended for clang-format >= 12.
#
# For more information, see:
#
@@ -75,11 +75,12 @@ IncludeCategories:
Priority: 9
# Qt includes (match before C++ standard library)
- Regex: '<Q([A-Za-z0-9\-_])+>'
+ CaseSensitive: true
Priority: 9
# Headers in <> with an extension. (+system libraries)
- Regex: '<([A-Za-z0-9\-_])+\.h>'
Priority: 2
- # System headers
+ # System headers
- Regex: '<sys/.*>'
Priority: 2
# C++ standard library includes (no extension)
@@ -99,7 +100,7 @@ IncludeCategories:
# IPA Interfaces
- Regex: '<libcamera/ipa/.*\.h>'
Priority: 7
- # libcamera Internal headers in ""
+ # libcamera Internal headers in ""
- Regex: '"libcamera/internal/.*\.h"'
Priority: 8
# Other libraries headers with one group per library (.h or .hpp)
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 00000000..a76b36a7
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: CC0-1.0
+
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*.{cpp,h}]
+indent_size = 8
+indent_style = tab
+
+[*.json]
+indent_size = 4
+indent_style = space
+
+[*.py]
+indent_size = 4
+indent_style = space
+
+[*.yaml]
+indent_size = 2
+indent_style = space
+
+[{meson.build,meson_options.txt}]
+indent_size = 4
+indent_style = space
diff --git a/.gitignore b/.gitignore
index f6d1d68e..51d31440 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,3 +6,4 @@
*.patch
*.pyc
__pycache__/
+venv/
diff --git a/Documentation/Doxyfile.in b/Documentation/Doxyfile-common.in
index a86ea6c1..045c19dd 100644
--- a/Documentation/Doxyfile.in
+++ b/Documentation/Doxyfile-common.in
@@ -20,35 +20,19 @@ TOC_INCLUDE_HEADINGS = 0
CASE_SENSE_NAMES = YES
QUIET = YES
-
-INPUT = "@TOP_SRCDIR@/include/libcamera" \
- "@TOP_SRCDIR@/src/ipa/ipu3" \
- "@TOP_SRCDIR@/src/ipa/libipa" \
- "@TOP_SRCDIR@/src/libcamera" \
- "@TOP_BUILDDIR@/include/libcamera" \
- "@TOP_BUILDDIR@/src/libcamera"
+WARN_AS_ERROR = @WARN_AS_ERROR@
FILE_PATTERNS = *.c \
*.cpp \
+ *.dox \
*.h
RECURSIVE = YES
-EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
- @TOP_SRCDIR@/include/libcamera/internal/device_enumerator_sysfs.h \
- @TOP_SRCDIR@/include/libcamera/internal/device_enumerator_udev.h \
- @TOP_SRCDIR@/include/libcamera/internal/ipc_pipe_unixsocket.h \
- @TOP_SRCDIR@/src/libcamera/device_enumerator_sysfs.cpp \
- @TOP_SRCDIR@/src/libcamera/device_enumerator_udev.cpp \
- @TOP_SRCDIR@/src/libcamera/ipc_pipe_unixsocket.cpp \
- @TOP_SRCDIR@/src/libcamera/pipeline/ \
- @TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
- @TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
- @TOP_BUILDDIR@/src/libcamera/proxy/
-
EXCLUDE_PATTERNS = @TOP_BUILDDIR@/include/libcamera/ipa/*_serializer.h \
@TOP_BUILDDIR@/include/libcamera/ipa/*_proxy.h \
@TOP_BUILDDIR@/include/libcamera/ipa/ipu3_*.h \
+ @TOP_BUILDDIR@/include/libcamera/ipa/mali-c55_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/raspberrypi_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/rkisp1_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/vimc_*.h
@@ -68,8 +52,6 @@ EXCLUDE_SYMBOLS = libcamera::BoundMethodArgs \
EXCLUDE_SYMLINKS = YES
-HTML_OUTPUT = api-html
-
GENERATE_LATEX = NO
MACRO_EXPANSION = YES
diff --git a/Documentation/Doxyfile-internal.in b/Documentation/Doxyfile-internal.in
new file mode 100644
index 00000000..5343bc2b
--- /dev/null
+++ b/Documentation/Doxyfile-internal.in
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: CC-BY-SA-4.0
+
+@INCLUDE_PATH = @TOP_BUILDDIR@/Documentation
+@INCLUDE = Doxyfile-common
+
+HIDE_UNDOC_CLASSES = NO
+HIDE_UNDOC_MEMBERS = NO
+HTML_OUTPUT = internal-api-html
+INTERNAL_DOCS = YES
+ENABLED_SECTIONS = internal
+
+INPUT = "@TOP_SRCDIR@/Documentation" \
+ "@TOP_SRCDIR@/include/libcamera" \
+ "@TOP_SRCDIR@/src/ipa/ipu3" \
+ "@TOP_SRCDIR@/src/ipa/libipa" \
+ "@TOP_SRCDIR@/src/libcamera" \
+ "@TOP_BUILDDIR@/include/libcamera" \
+ "@TOP_BUILDDIR@/src/libcamera"
+
+EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
+ @TOP_SRCDIR@/include/libcamera/internal/device_enumerator_sysfs.h \
+ @TOP_SRCDIR@/include/libcamera/internal/device_enumerator_udev.h \
+ @TOP_SRCDIR@/include/libcamera/internal/ipc_pipe_unixsocket.h \
+ @TOP_SRCDIR@/src/libcamera/device_enumerator_sysfs.cpp \
+ @TOP_SRCDIR@/src/libcamera/device_enumerator_udev.cpp \
+ @TOP_SRCDIR@/src/libcamera/ipc_pipe_unixsocket.cpp \
+ @TOP_SRCDIR@/src/libcamera/pipeline/ \
+ @TOP_SRCDIR@/src/libcamera/sensor/camera_sensor_legacy.cpp \
+ @TOP_SRCDIR@/src/libcamera/sensor/camera_sensor_raw.cpp \
+ @TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
+ @TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
+ @TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \
+ @TOP_BUILDDIR@/src/libcamera/proxy/
diff --git a/Documentation/Doxyfile-public.in b/Documentation/Doxyfile-public.in
new file mode 100644
index 00000000..36bb5758
--- /dev/null
+++ b/Documentation/Doxyfile-public.in
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: CC-BY-SA-4.0
+
+@INCLUDE_PATH = @TOP_BUILDDIR@/Documentation
+@INCLUDE = Doxyfile-common
+
+HIDE_UNDOC_CLASSES = YES
+HIDE_UNDOC_MEMBERS = YES
+HTML_OUTPUT = api-html
+INTERNAL_DOCS = NO
+
+INPUT = "@TOP_SRCDIR@/Documentation" \
+ ${inputs}
+
+EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/class.h \
+ @TOP_SRCDIR@/include/libcamera/base/object.h \
+ @TOP_SRCDIR@/include/libcamera/base/span.h \
+ @TOP_SRCDIR@/src/libcamera/base/class.cpp \
+ @TOP_SRCDIR@/src/libcamera/base/object.cpp
+
+PREDEFINED += __DOXYGEN_PUBLIC__
diff --git a/Documentation/api-html/index.rst b/Documentation/api-html/index.rst
index 9e630fc0..2f09833d 100644
--- a/Documentation/api-html/index.rst
+++ b/Documentation/api-html/index.rst
@@ -2,7 +2,7 @@
.. _api:
-API
-===
+API Reference
+=============
:: Placeholder for Doxygen documentation
diff --git a/Documentation/camera-sensor-model.rst b/Documentation/camera-sensor-model.rst
index b66c880a..87a25bf4 100644
--- a/Documentation/camera-sensor-model.rst
+++ b/Documentation/camera-sensor-model.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: documentation-contents.rst
+
.. _camera-sensor-model:
.. todo: Move to Doxygen-generated documentation
diff --git a/Documentation/code-of-conduct.rst b/Documentation/code-of-conduct.rst
index 38b7d7ad..0edd1e99 100644
--- a/Documentation/code-of-conduct.rst
+++ b/Documentation/code-of-conduct.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-4.0
+.. include:: documentation-contents.rst
+
.. _code-of-conduct:
Contributor Covenant Code of Conduct
diff --git a/Documentation/coding-style.rst b/Documentation/coding-style.rst
index 053fdd99..6ac3a4a0 100644
--- a/Documentation/coding-style.rst
+++ b/Documentation/coding-style.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: documentation-contents.rst
+
.. _coding-style-guidelines:
Coding Style Guidelines
@@ -59,7 +61,7 @@ document:
underscores in between
* All formatting rules specified in the selected sections of the Linux kernel
Code Style for indentation, braces, spacing, etc
-* Header guards are formatted as '__LIBCAMERA_FILE_NAME_H__'
+* Headers are guarded by the use of '#pragma once'
Order of Includes
~~~~~~~~~~~~~~~~~
@@ -215,7 +217,7 @@ shall be avoided when possible, but are allowed when required (for instance to
implement factories with auto-registration). They shall not depend on any other
global variable, should run a minimal amount of code in the constructor and
destructor, and code that contains dependencies should be moved to a later
-point in time.
+point in time.
Error Handling
~~~~~~~~~~~~~~
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 7eeea7f3..089f114c 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -37,8 +37,11 @@ author = u'Kieran Bingham, Jacopo Mondi, Laurent Pinchart, Niklas Söderlund'
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
+ 'sphinx.ext.graphviz'
]
+graphviz_output_format = 'svg'
+
# Add any paths that contain templates here, relative to this directory.
templates_path = []
@@ -61,7 +64,12 @@ language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+exclude_patterns = [
+ '_build',
+ 'Thumbs.db',
+ '.DS_Store',
+ 'documentation-contents.rst',
+]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
diff --git a/Documentation/design/ae.rst b/Documentation/design/ae.rst
new file mode 100644
index 00000000..df9b1fa7
--- /dev/null
+++ b/Documentation/design/ae.rst
@@ -0,0 +1,331 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+Design of Exposure and Gain controls
+====================================
+
+This document explains the design and rationale of the controls related to
+exposure and gain. This includes the all-encompassing auto-exposure (AE), the
+manual exposure control, and the manual gain control.
+
+Description of the problem
+--------------------------
+
+Sub controls
+^^^^^^^^^^^^
+
+There are more than one control that make up total exposure: exposure time,
+gain, and aperture (though for now we will not consider aperture). We already
+had individual controls for setting the values of manual exposure and manual
+gain, but for switching between auto mode and manual mode we only had a
+high-level boolean AeEnable control that would set *both* exposure and gain to
+auto mode or manual mode; we had no way to set one to auto and the other to
+manual.
+
+So, we need to introduce two new controls to act as "levers" to indicate
+individually for exposure and gain if the value would come from AEGC or if it
+would come from the manual control value.
+
+Aperture priority
+^^^^^^^^^^^^^^^^^
+
+We eventually may need to support aperture, and so whatever our solution is for
+having only some controls on auto and the others on manual needs to be
+extensible.
+
+Flickering when going from auto to manual
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When a manual exposure or gain value is requested by the application, it costs
+a few frames worth of time for them to take effect. This means that during a
+transition from auto to manual, there would be flickering in the control values
+and the transition won't be smooth.
+
+Take for instance the following flow, where we start on auto exposure (which
+for the purposes of the example increments by 1 each frame) and we want to
+switch seamlessly to manual exposure, which involves copying the exposure value
+computed by the auto exposure algorithm:
+
+::
+
+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
+ | N | | N+1 | | N+2 | | N+3 | | N+4 | | N+5 | | N+6 |
+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
+
+ Mode requested: Auto Auto Auto Manual Manual Manual Manual
+ Exp requested: N/A N/A N/A 2 2 2 2
+ Set in Frame: N+2 N+3 N+4 N+5 N+6 N+7 N+8
+
+ Mode used: Auto Auto Auto Auto Auto Manual Manual
+ Exp used: 0 1 2 3 4 2 2
+
+As we can see, after frame N+2 completes, we copy the exposure value that was
+used for frame N+2 (which was computed by AE algorithm), and queue that value
+into request N+3 with manual mode on. However, as it takes two frames for the
+exposure to be set, the exposure still changes since it is set by AE, and we
+get a flicker in the exposure during the switch from auto to manual.
+
+A solution is to *not submit* any exposure value when manual mode is enabled,
+and wait until the manual mode as been "applied" before copying the exposure
+value:
+
+::
+
+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
+ | N | | N+1 | | N+2 | | N+3 | | N+4 | | N+5 | | N+6 |
+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
+
+ Mode requested: Auto Auto Auto Manual Manual Manual Manual
+ Exp requested: N/A N/A N/A None None None 5
+ Set in Frame: N+2 N+3 N+4 N+5 N+6 N+7 N+8
+
+ Mode used: Auto Auto Auto Auto Auto Manual Manual
+ Exp used: 0 1 2 3 4 5 5
+
+In practice, this works. However, libcamera has a policy where once a control
+is submitted, its value is saved and does not need to be resubmitted. If the
+manual exposure value was set while auto mode was on, in theory the value would
+be saved, so when manual mode is enabled, the exposure value that was
+previously set would immediately be used. Clearly this solution isn't correct,
+but it can serve as the basis for a proper solution, with some more rigorous
+rules.
+
+Existing solutions
+------------------
+
+Raspberry Pi
+^^^^^^^^^^^^
+
+The Raspberry Pi IPA gets around the lack of individual AeEnable controls for
+exposure and gain by using magic values. When AeEnable is false, if one of the
+manual control values was set to 0 then the value computed by AEGC would be
+used for just that control. This solution isn't desirable, as it prevents
+that magic value from being used as a valid value.
+
+To get around the flickering issue, when AeEnable is false, the Raspberry Pi
+AEGC simply stops updating the values to be set, without restoring the
+previously set manual exposure time and gain. This works, but is not a proper
+solution.
+
+Android
+^^^^^^^
+
+The Android HAL specification requires that exposure and gain (sensitivity)
+must both be manual or both be auto. It cannot be that one is manual while the
+other is auto, so they simply don't support sub controls.
+
+For the flickering issue, the Android HAL has an AeLock control. To transition
+from auto to manual, the application would keep AE on auto, and turn on the
+lock. Once the lock has propagated through, then the value can be copied from
+the result into the request and the lock disabled and the mode set to manual.
+
+The problem with this solution is, besides the extra complexity, that it is
+ambiguous what happens if there is a state transition from manual to locked
+(even though it's a state transition that doesn't make sense). If locked is
+defined to "use the last automatically computed values" then it could use the
+values from the last time it AE was set to auto, or it would be undefined if AE
+was never auto (eg. it started out as manual), or if AE is implemented to run
+in the background it could just use the current values that are computed. If
+locked is defined to "use the last value that was set" there would be less
+ambiguity. Still, it's better if we can make it impossible to execute this
+nonsensical state transition, and if we can reduce the complexity of having
+this extra control or extra setting on a lever.
+
+Summary of goals
+----------------
+
+- We need a lock of some sort, to instruct the AEGC to not update output
+ results
+
+- We need manual modes, to override the values computed by the AEGC
+
+- We need to support seamless transitions from auto to manual, and do so
+ without flickering
+
+- We need custom minimum values for the manual controls; that is, no magic
+ values for enabling/disabling auto
+
+- All of these need to be done with AE sub-controls (exposure time, analogue
+ gain) and be extensible to aperture in the future
+
+Our solution
+------------
+
+A diagram of our solution:
+
+::
+
+ +----------------------------+-------------+------------------+-----------------+
+ | INPUT | ALGORITHM | RESULT | OUTPUT |
+ +----------------------------+-------------+------------------+-----------------+
+
+ ExposureTimeMode ExposureTimeMode
+ ---------------------+----------------------------------------+----------------->
+ 0: Auto | |
+ 1: Manual | V
+ | |\
+ | | \
+ | /----------------------------------> | 1| ExposureTime
+ | | +-------------+ exposure time | | -------------->
+ \--)--> | | --------------> | 0|
+ ExposureTime | | | | /
+ ------------------------+--> | | |/
+ | | AeState
+ | AEGC | ----------------------------------->
+ AnalogueGain | |
+ ------------------------+--> | | |\
+ | | | | \
+ /--)--> | | --------------> | 0| AnalogueGain
+ | | +-------------+ analogue gain | | -------------->
+ | \----------------------------------> | 1|
+ | | /
+ | |/
+ | ^
+ AnalogueGainMode | | AnalogueGainMode
+ ---------------------+----------------------------------------+----------------->
+ 0: Auto
+ 1: Manual
+
+ AeEnable
+ - True -> ExposureTimeMode:Auto + AnalogueGainMode:Auto
+ - False -> ExposureTimeMode:Manual + AnalogueGainMode:Manual
+
+
+The diagram is divided in four sections horizontally:
+
+- Input: The values received from the request controls
+
+- Algorithm: The algorithm itself
+
+- Result: The values calculated by the algorithm
+
+- Output: The values reported in result metadata and applied to the device
+
+The four input controls are divided between manual values (ExposureTime and
+AnalogueGain), and operation modes (ExposureTimeMode and AnalogueGainMode). The
+former are the manual values, the latter control how they're applied. The two
+modes are independent from each other, and each can take one of two values:
+
+- Auto (0): The AGC computes the value normally. The AGC result is applied
+ to the output. The manual value is ignored *and is not retained*.
+
+- Manual (1): The AGC uses the manual value internally. The corresponding
+ manual control from the request is applied to the output. The AGC result
+ is ignored.
+
+The AeState control reports the state of the unified AEGC block. If both
+ExposureTimeMode and AnalogueGainMode are set to manual then it will report
+Idle. If at least one of the two is set to auto, then AeState will report
+if the AEGC has Converged or not (Searching). This control replaces the old
+AeLocked control, as it was insufficient for reporting the AE state.
+
+There is a caveat to manual mode: the manual control value is not retained if
+it is set during auto mode. This means that if manual mode is entered without
+also setting the manual value, then it will enter a state similar to "locked",
+where the last automatically computed value while the mode was auto will be
+used. Once the manual value is set, then that will be used and retained as
+usual.
+
+This simulates an auto -> locked -> manual or auto -> manual state transition,
+and makes it impossible to do the nonsensical manual -> locked state
+transition.
+
+AeEnable still exists to allow applications to set the mode of all the
+sub-controls at once. Besides being for convenience, this will also be useful
+when we eventually implement an aperture control. This is because applications
+that will be made before aperture will have been available would still be able
+to set aperture mode to auto or manual, as opposed to having the aperture stuck
+at auto while the application really wanted manual. Although the aperture would
+still be stuck at an uncontrollable value, at least it would be at a static
+usable value as opposed to varying via the AEGC algorithm.
+
+With this solution, the earlier example would become:
+
+::
+
+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
+ | N+2 | | N+3 | | N+4 | | N+5 | | N+6 | | N+7 | | N+8 | | N+9 | | N+10|
+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
+ Mode requested: Auto Manual Manual Manual Manual Manual Manual Manual Manual
+ Exp requested: N/A None None None None 10 None 10 10
+ Set in Frame: N+4 N+5 N+6 N+7 N+8 N+9 N+10 N+11 N+12
+
+ Mode used: Auto Auto Auto Manual Manual Manual Manual Manual Manual
+ Exp used: 2 3 4 5 5 5 5 10 10
+
+This example is extended by a few frames to exhibit the simulated "locked"
+state. At frame N+5 the application has confirmed that the manual mode has been
+entered, but does not provide a manual value until request N+7. Thus, the value
+that is used in requests N+5 and N+6 (where the mode is disabled), comes from
+the last value that was used when the mode was auto, which comes from frame
+N+4.
+
+Then, in N+7, a manual value of 10 is supplied. It takes until frame N+9 for
+the exposure to be applied. N+8 does not supply a manual value, but the last
+supplied value is retained, so a manual value of 10 is still used and set in
+frame N+10.
+
+Although this behavior is the same as what we had with waiting for the manual
+mode to propagate (in the section "Description of the problem"), this time it
+is correct as we have defined specifically that if a manual value was specified
+while the mode was auto, it will not be retained.
+
+Description of the controls
+---------------------------
+
+As described above, libcamera offers the following controls related to exposure
+and gain:
+
+- AnalogueGain
+
+- AnalogueGainMode
+
+- ExposureTime
+
+- ExposureTimeMode
+
+- AeState
+
+- AeEnable
+
+Auto-exposure and auto-gain can be enabled and disabled separately using the
+ExposureTimeMode and AnalogueGainMode controls respectively. The AeEnable
+control can also be used, as it sets both of the modes simultaneously. The
+AeEnable control is not returned in metadata.
+
+When the respective mode is set to auto, the respective value that is computed
+by the AEGC algorithm is applied to the image sensor. Any value that is
+supplied in the manual ExposureTime/AnalogueGain control is ignored and not
+retained. Another way to understand this is that when the mode transitions from
+auto to manual, the internally stored control value is overwritten with the
+last value computed by the auto algorithm.
+
+This means that when we transition from auto to manual without supplying a
+manual control value, the last value that was set by the AEGC algorithm will
+keep be used. This can be used to do a flickerless transition from auto to
+manual as described earlier. If the camera started out in manual mode and no
+corresponding value has been supplied yet, then a best-effort default value
+shall be set.
+
+The manual control value can be set in the same request as setting the mode to
+auto if the desired manual control value is already known.
+
+Transitioning from manual to auto shall be implicitly flickerless, as the AEGC
+algorithms are expected to start running from the last manual value.
+
+The AeState metadata reports the state of the AE algorithm. As AE cannot
+compute exposure and gain separately, the state of the AE component is
+unified. There are three states: Idle, Searching, and Converged.
+
+The state shall be Idle if both ExposureTimeMode and AnalogueGainMode
+are set to Manual. If the camera only supports one of the two controls,
+then the state shall be Idle if that one control is set to Manual. If
+the camera does not support Manual for at least one of the two controls,
+then the state will never be Idle, as AE will always be running.
+
+The state shall be Searching if at least one of exposure or gain calculated
+by the AE algorithm is used (that is, at least one of the two modes is Auto),
+*and* the value(s) have not converged yet.
+
+The state shall be Converged if at least one of exposure or gain calculated
+by the AE algorithm is used (that is, at least one of the two modes is Auto),
+*and* the value(s) have converged.
diff --git a/Documentation/docs.rst b/Documentation/docs.rst
deleted file mode 100644
index a6e8a59a..00000000
--- a/Documentation/docs.rst
+++ /dev/null
@@ -1,400 +0,0 @@
-.. SPDX-License-Identifier: CC-BY-SA-4.0
-
-.. contents::
- :local:
-
-*************
-Documentation
-*************
-
-.. toctree::
- :hidden:
-
- API <api-html/index>
-
-API
-===
-
-The libcamera API is extensively documented using Doxygen. The :ref:`API
-nightly build <api>` contains the most up-to-date API documentation, built from
-the latest master branch.
-
-Feature Requirements
-====================
-
-Device enumeration
-------------------
-
-The library shall support enumerating all camera devices available in the
-system, including both fixed cameras and hotpluggable cameras. It shall
-support cameras plugged and unplugged after the initialization of the
-library, and shall offer a mechanism to notify applications of camera plug
-and unplug.
-
-The following types of cameras shall be supported:
-
-* Internal cameras designed for point-and-shoot still image and video
- capture usage, either controlled directly by the CPU, or exposed through
- an internal USB bus as a UVC device.
-
-* External UVC cameras designed for video conferencing usage.
-
-Other types of camera, including analog cameras, depth cameras, thermal
-cameras, external digital picture or movie cameras, are out of scope for
-this project.
-
-A hardware device that includes independent camera sensors, such as front
-and back sensors in a phone, shall be considered as multiple camera devices
-for the purpose of this library.
-
-Independent Camera Devices
---------------------------
-
-When multiple cameras are present in the system and are able to operate
-independently from each other, the library shall expose them as multiple
-camera devices and support parallel operation without any additional usage
-restriction apart from the limitations inherent to the hardware (such as
-memory bandwidth, CPU usage or number of CSI-2 receivers for instance).
-
-Independent processes shall be able to use independent cameras devices
-without interfering with each other. A single camera device shall be
-usable by a single process at a time.
-
-Multiple streams support
-------------------------
-
-The library shall support multiple video streams running in parallel
-for each camera device, within the limits imposed by the system.
-
-Per frame controls
-------------------
-
-The library shall support controlling capture parameters for each stream
-on a per-frame basis, on a best effort basis based on the capabilities of the
-hardware and underlying software stack (including kernel drivers and
-firmware). It shall apply capture parameters to the frame they target, and
-report the value of the parameters that have effectively been used for each
-captured frame.
-
-When a camera device supports multiple streams, the library shall allow both
-control of each stream independently, and control of multiple streams
-together. Streams that are controlled together shall be synchronized. No
-synchronization is required for streams controlled independently.
-
-Capability Enumeration
-----------------------
-
-The library shall expose capabilities of each camera device in a way that
-allows applications to discover those capabilities dynamically. Applications
-shall be allowed to cache capabilities for as long as they are using the
-library. If capabilities can change at runtime, the library shall offer a
-mechanism to notify applications of such changes. Applications shall not
-cache capabilities in long term storage between runs.
-
-Capabilities shall be discovered dynamically at runtime from the device when
-possible, and may come, in part or in full, from platform configuration
-data.
-
-Device Profiles
----------------
-
-The library may define different camera device profiles, each with a minimum
-set of required capabilities. Applications may use those profiles to quickly
-determine the level of features exposed by a device without parsing the full
-list of capabilities. Camera devices may implement additional capabilities
-on top of the minimum required set for the profile they expose.
-
-3A and Image Enhancement Algorithms
------------------------------------
-
-The camera devices shall implement auto exposure, auto gain and auto white
-balance. Camera devices that include a focus lens shall implement auto
-focus. Additional image enhancement algorithms, such as noise reduction or
-video stabilization, may be implemented.
-
-All algorithms may be implemented in hardware or firmware outside of the
-library, or in software in the library. They shall all be controllable by
-applications.
-
-The library shall be architectured to isolate the 3A and image enhancement
-algorithms in a component with a documented API, respectively called the 3A
-component and the 3A API. The 3A API shall be stable, and shall allow both
-open-source and closed-source implementations of the 3A component.
-
-The library may include statically-linked open-source 3A components, and
-shall support dynamically-linked open-source and closed-source 3A
-components.
-
-Closed-source 3A Component Sandboxing
--------------------------------------
-
-For security purposes, it may be desired to run closed-source 3A components
-in a separate process. The 3A API would in such a case be transported over
-IPC. The 3A API shall make it possible to use any IPC mechanism that
-supports passing file descriptors.
-
-The library may implement an IPC mechanism, and shall support third-party
-platform-specific IPC mechanisms through the implementation of a
-platform-specific 3A API wrapper. No modification to the library shall be
-needed to use such third-party IPC mechanisms.
-
-The 3A component shall not directly access any device node on the system.
-Such accesses shall instead be performed through the 3A API. The library
-shall validate all accesses and restrict them to what is absolutely required
-by 3A components.
-
-V4L2 Compatibility Layer
-------------------------
-
-The project shall support traditional V4L2 application through an additional
-libcamera wrapper library. The wrapper library shall trap all accesses to
-camera devices through `LD_PRELOAD`, and route them through libcamera to
-emulate a high-level V4L2 camera device. It shall expose camera device
-features on a best-effort basis, and aim for the level of features
-traditionally available from a UVC camera designed for video conferencing.
-
-Android Camera HAL v3 Compatibility
------------------------------------
-
-The library API shall expose all the features required to implement an
-Android Camera HAL v3 on top of libcamera. Some features of the HAL may be
-omitted as long as they can be implemented separately in the HAL, such as
-JPEG encoding, or YUV reprocessing.
-
-
-Camera Stack
-============
-
-::
-
- a c / +-------------+ +-------------+ +-------------+ +-------------+
- p a | | Native | | Framework | | Native | | Android |
- p t | | V4L2 | | Application | | libcamera | | Camera |
- l i | | Application | | (gstreamer) | | Application | | Framework |
- i o \ +-------------+ +-------------+ +-------------+ +-------------+
- n ^ ^ ^ ^
- | | | |
- l a | | | |
- i d v v | v
- b a / +-------------+ +-------------+ | +-------------+
- c p | | V4L2 | | Camera | | | Android |
- a t | | Compat. | | Framework | | | Camera |
- m a | | | | (gstreamer) | | | HAL |
- e t \ +-------------+ +-------------+ | +-------------+
- r i ^ ^ | ^
- a o | | | |
- n | | | |
- / | ,................................................
- | | ! : Language : !
- l f | | ! : Bindings : !
- i r | | ! : (optional) : !
- b a | | \...............................................'
- c m | | | | |
- a e | | | | |
- m w | v v v v
- e o | +----------------------------------------------------------------+
- r r | | |
- a k | | libcamera |
- | | |
- \ +----------------------------------------------------------------+
- ^ ^ ^
- Userspace | | |
- ------------------------ | ---------------- | ---------------- | ---------------
- Kernel | | |
- v v v
- +-----------+ +-----------+ +-----------+
- | Media | <--> | Video | <--> | V4L2 |
- | Device | | Device | | Subdev |
- +-----------+ +-----------+ +-----------+
-
-The camera stack comprises four software layers. From bottom to top:
-
-* The kernel drivers control the camera hardware and expose a
- low-level interface to userspace through the Linux kernel V4L2
- family of APIs (Media Controller API, V4L2 Video Device API and
- V4L2 Subdev API).
-
-* The libcamera framework is the core part of the stack. It
- handles all control of the camera devices in its core component,
- libcamera, and exposes a native C++ API to upper layers. Optional
- language bindings allow interfacing to libcamera from other
- programming languages.
-
- Those components live in the same source code repository and
- all together constitute the libcamera framework.
-
-* The libcamera adaptation is an umbrella term designating the
- components that interface to libcamera in other frameworks.
- Notable examples are a V4L2 compatibility layer, a gstreamer
- libcamera element, and an Android camera HAL implementation based
- on libcamera.
-
- Those components can live in the libcamera project source code
- in separate repositories, or move to their respective project's
- repository (for instance the gstreamer libcamera element).
-
-* The applications and upper level frameworks are based on the
- libcamera framework or libcamera adaptation, and are outside of
- the scope of the libcamera project.
-
-
-libcamera Architecture
-======================
-
-::
-
- ---------------------------< libcamera Public API >---------------------------
- ^ ^
- | |
- v v
- +-------------+ +-------------------------------------------------+
- | Camera | | Camera Device |
- | Devices | | +---------------------------------------------+ |
- | Manager | | | Device-Agnostic | |
- +-------------+ | | | |
- ^ | | +------------------------+ |
- | | | | ~~~~~~~~~~~~~~~~~~~~~ |
- | | | | { +---------------+ } |
- | | | | } | ////Image//// | { |
- | | | | <-> | /Processing// | } |
- | | | | } | /Algorithms// | { |
- | | | | { +---------------+ } |
- | | | | ~~~~~~~~~~~~~~~~~~~~~ |
- | | | | ======================== |
- | | | | +---------------+ |
- | | | | | //Pipeline/// | |
- | | | | <-> | ///Handler/// | |
- | | | | | ///////////// | |
- | | +--------------------+ +---------------+ |
- | | Device-Specific |
- | +-------------------------------------------------+
- | ^ ^
- | | |
- v v v
- +--------------------------------------------------------------------+
- | Helpers and Support Classes |
- | +-------------+ +-------------+ +-------------+ +-------------+ |
- | | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | |
- | | Support | | Allocator | | IPC | | Manager | |
- | +-------------+ +-------------+ +-------------+ +-------------+ |
- | +-------------+ +-------------+ |
- | | Pipeline | | ... | |
- | | Runner | | | |
- | +-------------+ +-------------+ |
- +--------------------------------------------------------------------+
-
- /// Device-Specific Components
- ~~~ Sandboxing
-
-While offering a unified API towards upper layers, and presenting
-itself as a single library, libcamera isn't monolithic. It exposes
-multiple components through its public API, is built around a set of
-separate helpers internally, uses device-specific components and can
-load dynamic plugins.
-
-Camera Devices Manager
- The Camera Devices Manager provides a view of available cameras
- in the system. It performs cold enumeration and runtime camera
- management, and supports a hotplug notification mechanism in its
- public API.
-
- To avoid the cost associated with cold enumeration of all devices
- at application start, and to arbitrate concurrent access to camera
- devices, the Camera Devices Manager could later be split to a
- separate service, possibly with integration in platform-specific
- device management.
-
-Camera Device
- The Camera Device represents a camera device to upper layers. It
- exposes full control of the device through the public API, and is
- thus the highest level object exposed by libcamera.
-
- Camera Device instances are created by the Camera Devices
- Manager. An optional function to create new instances could be exposed
- through the public API to speed up initialization when the upper
- layer knows how to directly address camera devices present in the
- system.
-
-Pipeline Handler
- The Pipeline Handler manages complex pipelines exposed by the kernel drivers
- through the Media Controller and V4L2 APIs. It abstracts pipeline handling to
- hide device-specific details to the rest of the library, and implements both
- pipeline configuration based on stream configuration, and pipeline runtime
- execution and scheduling when needed by the device.
-
- This component is device-specific and is part of the libcamera code base. As
- such it is covered by the same free software license as the rest of libcamera
- and needs to be contributed upstream by device vendors. The Pipeline Handler
- lives in the same process as the rest of the library, and has access to all
- helpers and kernel camera-related devices.
-
-Image Processing Algorithms
- Together with the hardware image processing and hardware statistics
- collection, the Image Processing Algorithms implement 3A (Auto-Exposure,
- Auto-White Balance and Auto-Focus) and other algorithms. They run on the CPU
- and interact with the kernel camera devices to control hardware image
- processing based on the parameters supplied by upper layers, closing the
- control loop of the ISP.
-
- This component is device-specific and is loaded as an external plugin. It can
- be part of the libcamera code base, in which case it is covered by the same
- license, or provided externally as an open-source or closed-source component.
-
- The component is sandboxed and can only interact with libcamera through
- internal APIs specifically marked as such. In particular it will have no
- direct access to kernel camera devices, and all its accesses to image and
- metadata will be mediated by dmabuf instances explicitly passed to the
- component. The component must be prepared to run in a process separate from
- the main libcamera process, and to have a very restricted view of the system,
- including no access to networking APIs and limited access to file systems.
-
- The sandboxing mechanism isn't defined by libcamera. One example
- implementation will be provided as part of the project, and platforms vendors
- will be able to provide their own sandboxing mechanism as a plugin.
-
- libcamera should provide a basic implementation of Image Processing
- Algorithms, to serve as a reference for the internal API. Device vendors are
- expected to provide a full-fledged implementation compatible with their
- Pipeline Handler. One goal of the libcamera project is to create an
- environment in which the community will be able to compete with the
- closed-source vendor binaries and develop a high quality open source
- implementation.
-
-Helpers and Support Classes
- While Pipeline Handlers are device-specific, implementations are expected to
- share code due to usage of identical APIs towards the kernel camera drivers
- and the Image Processing Algorithms. This includes without limitation handling
- of the MC and V4L2 APIs, buffer management through dmabuf, and pipeline
- discovery, configuration and scheduling. Such code will be factored out to
- helpers when applicable.
-
- Other parts of libcamera will also benefit from factoring code out to
- self-contained support classes, even if such code is present only once in the
- code base, in order to keep the source code clean and easy to read. This
- should be the case for instance for plugin management.
-
-
-V4L2 Compatibility Layer
-------------------------
-
-V4L2 compatibility is achieved through a shared library that traps all
-accesses to camera devices and routes them to libcamera to emulate high-level
-V4L2 camera devices. It is injected in a process address space through
-`LD_PRELOAD` and is completely transparent for applications.
-
-The compatibility layer exposes camera device features on a best-effort basis,
-and aims for the level of features traditionally available from a UVC camera
-designed for video conferencing.
-
-
-Android Camera HAL
-------------------
-
-Camera support for Android is achieved through a generic Android
-camera HAL implementation on top of libcamera. The HAL will implement internally
-features required by Android and missing from libcamera, such as JPEG encoding
-support.
-
-The Android camera HAL implementation will initially target the
-LIMITED hardware level, with support for the FULL level then being gradually
-implemented.
diff --git a/Documentation/documentation-contents.rst b/Documentation/documentation-contents.rst
new file mode 100644
index 00000000..5c111849
--- /dev/null
+++ b/Documentation/documentation-contents.rst
@@ -0,0 +1,35 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+.. container:: documentation-nav
+
+ * **Documentation for Users**
+ * :doc:`Introduction </introduction>`
+ * :doc:`/feature_requirements`
+ * :doc:`/guides/application-developer`
+ * :doc:`/python-bindings`
+ * :doc:`/environment_variables`
+ * :doc:`/api-html/index`
+ * :doc:`/code-of-conduct`
+ * |
+ * **Documentation for Developers**
+ * :doc:`/libcamera_architecture`
+ * :doc:`/guides/pipeline-handler`
+ * :doc:`/guides/ipa`
+ * :doc:`/camera-sensor-model`
+ * :doc:`/guides/tracing`
+ * :doc:`/software-isp-benchmarking`
+ * :doc:`/coding-style`
+ * :doc:`/internal-api-html/index`
+ * |
+ * **Documentation for System Integrators**
+ * :doc:`/lens_driver_requirements`
+ * :doc:`/sensor_driver_requirements`
+
+..
+ The following directive adds the "documentation" class to all of the pages
+ generated by sphinx. This is not relevant in libcamera nor addressed in the
+ theme's CSS, since all of the pages here are documentation. It **is** used
+ to properly format the documentation pages on libcamera.org and so should not
+ be removed.
+
+.. rst-class:: documentation
diff --git a/Documentation/environment_variables.rst b/Documentation/environment_variables.rst
index a9b230bc..6f123558 100644
--- a/Documentation/environment_variables.rst
+++ b/Documentation/environment_variables.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: documentation-contents.rst
+
Environment variables
=====================
@@ -37,11 +39,29 @@ LIBCAMERA_IPA_MODULE_PATH
Example value: ``${HOME}/.libcamera/lib:/opt/libcamera/vendor/lib``
+LIBCAMERA_IPA_PROXY_PATH
+ Define custom full path for a proxy worker for a given executable name.
+
+ Example value: ``${HOME}/.libcamera/proxy/worker:/opt/libcamera/vendor/proxy/worker``
+
+LIBCAMERA_PIPELINES_MATCH_LIST
+ Define an ordered list of pipeline names to be used to match the media
+ devices in the system. The pipeline handler names used to populate the
+ variable are the ones passed to the REGISTER_PIPELINE_HANDLER() macro in the
+ source code.
+
+ Example value: ``rkisp1,simple``
+
LIBCAMERA_RPI_CONFIG_FILE
Define a custom configuration file to use in the Raspberry Pi pipeline handler.
Example value: ``/usr/local/share/libcamera/pipeline/rpi/vc4/minimal_mem.yaml``
+LIBCAMERA_<NAME>_TUNING_FILE
+ Define a custom IPA tuning file to use with the pipeline handler `NAME`.
+
+ Example value: ``/usr/local/share/libcamera/ipa/rpi/vc4/custom_sensor.json``
+
Further details
---------------
diff --git a/Documentation/feature_requirements.rst b/Documentation/feature_requirements.rst
new file mode 100644
index 00000000..e6b74a62
--- /dev/null
+++ b/Documentation/feature_requirements.rst
@@ -0,0 +1,150 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+.. include:: documentation-contents.rst
+
+Feature Requirements
+====================
+
+Device enumeration
+------------------
+
+The library shall support enumerating all camera devices available in the
+system, including both fixed cameras and hotpluggable cameras. It shall
+support cameras plugged and unplugged after the initialization of the
+library, and shall offer a mechanism to notify applications of camera plug
+and unplug.
+
+The following types of cameras shall be supported:
+
+* Internal cameras designed for point-and-shoot still image and video
+ capture usage, either controlled directly by the CPU, or exposed through
+ an internal USB bus as a UVC device.
+
+* External UVC cameras designed for video conferencing usage.
+
+Other types of camera, including analog cameras, depth cameras, thermal
+cameras, external digital picture or movie cameras, are out of scope for
+this project.
+
+A hardware device that includes independent camera sensors, such as front
+and back sensors in a phone, shall be considered as multiple camera devices
+for the purpose of this library.
+
+Independent Camera Devices
+--------------------------
+
+When multiple cameras are present in the system and are able to operate
+independently from each other, the library shall expose them as multiple
+camera devices and support parallel operation without any additional usage
+restriction apart from the limitations inherent to the hardware (such as
+memory bandwidth, CPU usage or number of CSI-2 receivers for instance).
+
+Independent processes shall be able to use independent cameras devices
+without interfering with each other. A single camera device shall be
+usable by a single process at a time.
+
+Multiple streams support
+------------------------
+
+The library shall support multiple video streams running in parallel
+for each camera device, within the limits imposed by the system.
+
+Per frame controls
+------------------
+
+The library shall support controlling capture parameters for each stream
+on a per-frame basis, on a best effort basis based on the capabilities of the
+hardware and underlying software stack (including kernel drivers and
+firmware). It shall apply capture parameters to the frame they target, and
+report the value of the parameters that have effectively been used for each
+captured frame.
+
+When a camera device supports multiple streams, the library shall allow both
+control of each stream independently, and control of multiple streams
+together. Streams that are controlled together shall be synchronized. No
+synchronization is required for streams controlled independently.
+
+Capability Enumeration
+----------------------
+
+The library shall expose capabilities of each camera device in a way that
+allows applications to discover those capabilities dynamically. Applications
+shall be allowed to cache capabilities for as long as they are using the
+library. If capabilities can change at runtime, the library shall offer a
+mechanism to notify applications of such changes. Applications shall not
+cache capabilities in long term storage between runs.
+
+Capabilities shall be discovered dynamically at runtime from the device when
+possible, and may come, in part or in full, from platform configuration
+data.
+
+Device Profiles
+---------------
+
+The library may define different camera device profiles, each with a minimum
+set of required capabilities. Applications may use those profiles to quickly
+determine the level of features exposed by a device without parsing the full
+list of capabilities. Camera devices may implement additional capabilities
+on top of the minimum required set for the profile they expose.
+
+3A and Image Enhancement Algorithms
+-----------------------------------
+
+The library shall provide a basic implementation of Image Processing Algorithms
+to serve as a reference for the internal API. This shall including auto exposure
+and gain and auto white balance. Camera devices that include a focus lens shall
+implement auto focus. Additional image enhancement algorithms, such as noise
+reduction or video stabilization, may be implemented. Device vendors are
+expected to provide a fully-fledged implementation compatible with their
+Pipeline Handler. One goal of the libcamera project is to create an environment
+in which the community will be able to compete with the closed-source vendor
+biaries and develop a high quality open source implementation.
+
+All algorithms may be implemented in hardware or firmware outside of the
+library, or in software in the library. They shall all be controllable by
+applications.
+
+The library shall be architectured to isolate the 3A and image enhancement
+algorithms in a component with a documented API, respectively called the 3A
+component and the 3A API. The 3A API shall be stable, and shall allow both
+open-source and closed-source implementations of the 3A component.
+
+The library may include statically-linked open-source 3A components, and
+shall support dynamically-linked open-source and closed-source 3A
+components.
+
+Closed-source 3A Component Sandboxing
+-------------------------------------
+
+For security purposes, it may be desired to run closed-source 3A components
+in a separate process. The 3A API would in such a case be transported over
+IPC. The 3A API shall make it possible to use any IPC mechanism that
+supports passing file descriptors.
+
+The library may implement an IPC mechanism, and shall support third-party
+platform-specific IPC mechanisms through the implementation of a
+platform-specific 3A API wrapper. No modification to the library shall be
+needed to use such third-party IPC mechanisms.
+
+The 3A component shall not directly access any device node on the system.
+Such accesses shall instead be performed through the 3A API. The library
+shall validate all accesses and restrict them to what is absolutely required
+by 3A components.
+
+V4L2 Compatibility Layer
+------------------------
+
+The project shall support traditional V4L2 application through an additional
+libcamera wrapper library. The wrapper library shall trap all accesses to
+camera devices through `LD_PRELOAD`, and route them through libcamera to
+emulate a high-level V4L2 camera device. It shall expose camera device
+features on a best-effort basis, and aim for the level of features
+traditionally available from a UVC camera designed for video conferencing.
+
+Android Camera HAL v3 Compatibility
+-----------------------------------
+
+The library API shall expose all the features required to implement an
+Android Camera HAL v3 on top of libcamera. Some features of the HAL may be
+omitted as long as they can be implemented separately in the HAL, such as
+JPEG encoding, or YUV reprocessing.
diff --git a/Documentation/gen-doxyfile.py b/Documentation/gen-doxyfile.py
new file mode 100755
index 00000000..c265bc2f
--- /dev/null
+++ b/Documentation/gen-doxyfile.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2024, Google Inc.
+#
+# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+#
+# Generate Doxyfile from a template
+
+import argparse
+import os
+import string
+import sys
+
+
+def fill_template(template, data):
+
+ template = open(template, 'rb').read()
+ template = template.decode('utf-8')
+ template = string.Template(template)
+
+ return template.substitute(data)
+
+
+def main(argv):
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-o', dest='output', metavar='file',
+ type=argparse.FileType('w', encoding='utf-8'),
+ default=sys.stdout,
+ help='Output file name (default: standard output)')
+ parser.add_argument('template', metavar='doxyfile.tmpl', type=str,
+ help='Doxyfile template')
+ parser.add_argument('inputs', type=str, nargs='*',
+ help='Input files')
+
+ args = parser.parse_args(argv[1:])
+
+ inputs = [f'"{os.path.realpath(input)}"' for input in args.inputs]
+ data = fill_template(args.template, {'inputs': (' \\\n' + ' ' * 25).join(inputs)})
+ args.output.write(data)
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/Documentation/getting-started.rst b/Documentation/getting-started.rst
index 987f43f7..63b050eb 100644
--- a/Documentation/getting-started.rst
+++ b/Documentation/getting-started.rst
@@ -1,4 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+
.. Getting started information is defined in the project README file.
.. include:: ../README.rst
:start-after: .. section-begin-getting-started
diff --git a/Documentation/guides/application-developer.rst b/Documentation/guides/application-developer.rst
index 9a9905b1..f3798d17 100644
--- a/Documentation/guides/application-developer.rst
+++ b/Documentation/guides/application-developer.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: ../documentation-contents.rst
+
Using libcamera in a C++ application
====================================
@@ -116,19 +118,21 @@ available.
.. code:: cpp
- if (cm->cameras().empty()) {
+ auto cameras = cm->cameras();
+ if (cameras.empty()) {
std::cout << "No cameras were identified on the system."
<< std::endl;
cm->stop();
return EXIT_FAILURE;
}
- std::string cameraId = cm->cameras()[0]->id();
- camera = cm->get(cameraId);
+ std::string cameraId = cameras[0]->id();
+ camera = cm->get(cameraId);
/*
- * Note that is equivalent to:
- * camera = cm->cameras()[0];
+ * Note that `camera` may not compare equal to `cameras[0]`.
+ * In fact, it might simply be a `nullptr`, as the particular
+ * device might have disappeared (and reappeared) in the meantime.
*/
Once a camera has been selected an application needs to acquire an exclusive
@@ -479,7 +483,7 @@ instance. An example of how to write image data to disk is available in the
`FileSink class`_ which is a part of the ``cam`` utility application in the
libcamera repository.
-.. _FileSink class: https://git.libcamera.org/libcamera/libcamera.git/tree/src/cam/file_sink.cpp
+.. _FileSink class: https://git.libcamera.org/libcamera/libcamera.git/tree/src/apps/cam/file_sink.cpp
With the handling of this request completed, it is possible to re-use the
request and the associated buffers and re-queue it to the camera
diff --git a/Documentation/guides/introduction.rst b/Documentation/guides/introduction.rst
deleted file mode 100644
index 700ec2d3..00000000
--- a/Documentation/guides/introduction.rst
+++ /dev/null
@@ -1,319 +0,0 @@
-.. SPDX-License-Identifier: CC-BY-SA-4.0
-
-Developers guide to libcamera
-=============================
-
-The Linux kernel handles multimedia devices through the 'Linux media' subsystem
-and provides a set of APIs (application programming interfaces) known
-collectively as V4L2 (`Video for Linux 2`_) and the `Media Controller`_ API
-which provide an interface to interact and control media devices.
-
-Included in this subsystem are drivers for camera sensors, CSI2 (Camera
-Serial Interface) receivers, and ISPs (Image Signal Processors)
-
-The usage of these drivers to provide a functioning camera stack is a
-responsibility that lies in userspace which is commonly implemented separately
-by vendors without a common architecture or API for application developers.
-
-libcamera provides a complete camera stack for Linux based systems to abstract
-functionality desired by camera application developers and process the
-configuration of hardware and image control algorithms required to obtain
-desirable results from the camera.
-
-.. _Video for Linux 2: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/v4l/v4l2.html
-.. _Media Controller: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/mediactl/media-controller.html
-
-
-In this developers guide, we will explore the `Camera Stack`_ and how it is
-can be visualised at a high level, and explore the internal `Architecture`_ of
-the libcamera library with its components. The current `Platform Support`_ is
-detailed, as well as an overview of the `Licensing`_ requirements of the
-project.
-
-This introduction is followed by a walkthrough tutorial to newcomers wishing to
-support a new platform with the `Pipeline Handler Writers Guide`_ and for those
-looking to make use of the libcamera native API an `Application Writers Guide`_
-provides a tutorial of the key APIs exposed by libcamera.
-
-.. _Pipeline Handler Writers Guide: pipeline-handler.html
-.. _Application Writers Guide: application-developer.html
-
-.. TODO: Correctly link to the other articles of the guide
-
-Camera Stack
-------------
-
-The libcamera library is implemented in userspace, and makes use of underlying
-kernel drivers that directly interact with hardware.
-
-Applications can make use of libcamera through the native `libcamera API`_'s or
-through an adaptation layer integrating libcamera into a larger framework.
-
-.. _libcamera API: https://www.libcamera.org/api-html/index.html
-
-::
-
- Application Layer
- / +--------------+ +--------------+ +--------------+ +--------------+
- | | Native | | Framework | | Native | | Android |
- | | V4L2 | | Application | | libcamera | | Camera |
- | | Application | | (gstreamer) | | Application | | Framework |
- \ +--------------+ +--------------+ +--------------+ +--------------+
-
- ^ ^ ^ ^
- | | | |
- | | | |
- v v | v
- Adaptation Layer |
- / +--------------+ +--------------+ | +--------------+
- | | V4L2 | | gstreamer | | | Android |
- | | Compatibility| | element | | | Camera |
- | | (preload) | |(libcamerasrc)| | | HAL |
- \ +--------------+ +--------------+ | +--------------+
- |
- ^ ^ | ^
- | | | |
- | | | |
- v v v v
- libcamera Framework
- / +--------------------------------------------------------------------+
- | | |
- | | libcamera |
- | | |
- \ +--------------------------------------------------------------------+
-
- ^ ^ ^
- Userspace | | |
- --------------------- | ---------------- | ---------------- | ---------------
- Kernel | | |
- v v v
-
- +-----------+ +-----------+ +-----------+
- | Media | <--> | Video | <--> | V4L2 |
- | Device | | Device | | Subdev |
- +-----------+ +-----------+ +-----------+
-
-The camera stack comprises of four software layers. From bottom to top:
-
-* The kernel drivers control the camera hardware and expose a low-level
- interface to userspace through the Linux kernel V4L2 family of APIs
- (Media Controller API, V4L2 Video Device API and V4L2 Subdev API).
-
-* The libcamera framework is the core part of the stack. It handles all control
- of the camera devices in its core component, libcamera, and exposes a native
- C++ API to upper layers.
-
-* The libcamera adaptation layer is an umbrella term designating the components
- that interface to libcamera in other frameworks. Notable examples are the V4L2
- compatibility layer, the gstreamer libcamera element, and the Android camera
- HAL implementation based on libcamera which are provided as a part of the
- libcamera project.
-
-* The applications and upper level frameworks are based on the libcamera
- framework or libcamera adaptation, and are outside of the scope of the
- libcamera project, however example native applications (cam, qcam) are
- provided for testing.
-
-
-V4L2 Compatibility Layer
- V4L2 compatibility is achieved through a shared library that traps all
- accesses to camera devices and routes them to libcamera to emulate high-level
- V4L2 camera devices. It is injected in a process address space through
- ``LD_PRELOAD`` and is completely transparent for applications.
-
- The compatibility layer exposes camera device features on a best-effort basis,
- and aims for the level of features traditionally available from a UVC camera
- designed for video conferencing.
-
-Android Camera HAL
- Camera support for Android is achieved through a generic Android camera HAL
- implementation on top of libcamera. The HAL implements features required by
- Android and out of scope from libcamera, such as JPEG encoding support.
-
- This component is used to provide support for ChromeOS platforms
-
-GStreamer element (gstlibcamerasrc)
- A `GStreamer element`_ is provided to allow capture from libcamera supported
- devices through GStreamer pipelines, and connect to other elements for further
- processing.
-
- Development of this element is ongoing and is limited to a single stream.
-
-Native libcamera API
- Applications can make use of the libcamera API directly using the C++
- API. An example application and walkthrough using the libcamera API can be
- followed in the `Application Writers Guide`_
-
-.. _GStreamer element: https://gstreamer.freedesktop.org/documentation/application-development/basics/elements.html
-
-Architecture
-------------
-
-While offering a unified API towards upper layers, and presenting itself as a
-single library, libcamera isn't monolithic. It exposes multiple components
-through its public API and is built around a set of separate helpers internally.
-Hardware abstractions are handled through the use of device-specific components
-where required and dynamically loadable plugins are used to separate image
-processing algorithms from the core libcamera codebase.
-
-::
-
- --------------------------< libcamera Public API >---------------------------
- ^ ^
- | |
- v v
- +-------------+ +---------------------------------------------------+
- | Camera | | Camera Device |
- | Manager | | +-----------------------------------------------+ |
- +-------------+ | | Device-Agnostic | |
- ^ | | | |
- | | | +--------------------------+ |
- | | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
- | | | | { +-----------------+ } |
- | | | | } | //// Image //// | { |
- | | | | <-> | / Processing // | } |
- | | | | } | / Algorithms // | { |
- | | | | { +-----------------+ } |
- | | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
- | | | | ========================== |
- | | | | +-----------------+ |
- | | | | | // Pipeline /// | |
- | | | | <-> | /// Handler /// | |
- | | | | | /////////////// | |
- | | +--------------------+ +-----------------+ |
- | | Device-Specific |
- | +---------------------------------------------------+
- | ^ ^
- | | |
- v v v
- +--------------------------------------------------------------------+
- | Helpers and Support Classes |
- | +-------------+ +-------------+ +-------------+ +-------------+ |
- | | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | |
- | | Support | | Allocator | | IPC | | Manager | |
- | +-------------+ +-------------+ +-------------+ +-------------+ |
- | +-------------+ +-------------+ |
- | | Pipeline | | ... | |
- | | Runner | | | |
- | +-------------+ +-------------+ |
- +--------------------------------------------------------------------+
-
- /// Device-Specific Components
- ~~~ Sandboxing
-
-
-Camera Manager
- The Camera Manager enumerates cameras and instantiates Pipeline Handlers to
- manage each Camera that libcamera supports. The Camera Manager supports
- hotplug detection and notification events when supported by the underlying
- kernel devices.
-
- There is only ever one instance of the Camera Manager running per application.
- Each application's instance of the Camera Manager ensures that only a single
- application can take control of a camera device at once.
-
- Read the `Camera Manager API`_ documentation for more details.
-
-.. _Camera Manager API: https://libcamera.org/api-html/classlibcamera_1_1CameraManager.html
-
-Camera Device
- The Camera class represents a single item of camera hardware that is capable
- of producing one or more image streams, and provides the API to interact with
- the underlying device.
-
- If a system has multiple instances of the same hardware attached, each has its
- own instance of the camera class.
-
- The API exposes full control of the device to upper layers of libcamera through
- the public API, making it the highest level object libcamera exposes, and the
- object that all other API operations interact with from configuration to
- capture.
-
- Read the `Camera API`_ documentation for more details.
-
-.. _Camera API: https://libcamera.org/api-html/classlibcamera_1_1Camera.html
-
-Pipeline Handler
- The Pipeline Handler manages the complex pipelines exposed by the kernel
- drivers through the Media Controller and V4L2 APIs. It abstracts pipeline
- handling to hide device-specific details from the rest of the library, and
- implements both pipeline configuration based on stream configuration, and
- pipeline runtime execution and scheduling when needed by the device.
-
- The Pipeline Handler lives in the same process as the rest of the library, and
- has access to all helpers and kernel camera-related devices.
-
- Hardware abstraction is handled by device specific Pipeline Handlers which are
- derived from the Pipeline Handler base class allowing commonality to be shared
- among the implementations.
-
- Derived pipeline handlers create Camera device instances based on the devices
- they detect and support on the running system, and are responsible for
- managing the interactions with a camera device.
-
- More details can be found in the `PipelineHandler API`_ documentation, and the
- `Pipeline Handler Writers Guide`_.
-
-.. _PipelineHandler API: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html
-
-Image Processing Algorithms
- An image processing algorithm (IPA) component is a loadable plugin that
- implements 3A (Auto-Exposure, Auto-White Balance, and Auto-Focus) and other
- algorithms.
-
- The algorithms run on the CPU and interact with the camera devices through the
- Pipeline Handler to control hardware image processing based on the parameters
- supplied by upper layers, maintaining state and closing the control loop
- of the ISP.
-
- The component is sandboxed and can only interact with libcamera through the
- API provided by the Pipeline Handler and an IPA has no direct access to kernel
- camera devices.
-
- Open source IPA modules built with libcamera can be run in the same process
- space as libcamera, however external IPA modules are run in a separate process
- from the main libcamera process. IPA modules have a restricted view of the
- system, including no access to networking APIs and limited access to file
- systems.
-
- IPA modules are only required for platforms and devices with an ISP controlled
- by the host CPU. Camera sensors which have an integrated ISP are not
- controlled through the IPA module.
-
-Platform Support
-----------------
-
-The library currently supports the following hardware platforms specifically
-with dedicated pipeline handlers:
-
- - Intel IPU3 (ipu3)
- - Rockchip RK3399 (rkisp1)
- - RaspberryPi 3 and 4 (rpi/vc4)
-
-Furthermore, generic platform support is provided for the following:
-
- - USB video device class cameras (uvcvideo)
- - iMX7, Allwinner Sun6i (simple)
- - Virtual media controller driver for test use cases (vimc)
-
-Licensing
----------
-
-The libcamera core, is covered by the `LGPL-2.1-or-later`_ license. Pipeline
-Handlers are a part of the libcamera code base and need to be contributed
-upstream by device vendors. IPA modules included in libcamera are covered by a
-free software license, however third-parties may develop IPA modules outside of
-libcamera and distribute them under a closed-source license, provided they do
-not include source code from the libcamera project.
-
-The libcamera project itself contains multiple libraries, applications and
-utilities. Licenses are expressed through SPDX tags in text-based files that
-support comments, and through the .reuse/dep5 file otherwise. A copy of all
-licenses are stored in the LICENSES directory, and a full summary of the
-licensing used throughout the project can be found in the COPYING.rst document.
-
-Applications which link dynamically against libcamera and use only the public
-API are an independent work of the authors and have no license restrictions
-imposed upon them from libcamera.
-
-.. _LGPL-2.1-or-later: https://spdx.org/licenses/LGPL-2.1-or-later.html
diff --git a/Documentation/guides/ipa.rst b/Documentation/guides/ipa.rst
index 25deadef..cd640563 100644
--- a/Documentation/guides/ipa.rst
+++ b/Documentation/guides/ipa.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: ../documentation-contents.rst
+
IPA Writer's Guide
==================
diff --git a/Documentation/guides/pipeline-handler.rst b/Documentation/guides/pipeline-handler.rst
index 728e9676..69e832a5 100644
--- a/Documentation/guides/pipeline-handler.rst
+++ b/Documentation/guides/pipeline-handler.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: ../documentation-contents.rst
+
Pipeline Handler Writers Guide
==============================
@@ -151,13 +153,14 @@ integrates with the libcamera build system, and a *vivid.cpp* file that matches
the name of the pipeline.
In the *meson.build* file, add the *vivid.cpp* file as a build source for
-libcamera by adding it to the global meson ``libcamera_sources`` variable:
+libcamera by adding it to the global meson ``libcamera_internal_sources``
+variable:
.. code-block:: none
# SPDX-License-Identifier: CC0-1.0
- libcamera_sources += files([
+ libcamera_internal_sources += files([
'vivid.cpp',
])
@@ -258,7 +261,7 @@ implementations for the overridden class members.
return false;
}
- REGISTER_PIPELINE_HANDLER(PipelineHandlerVivid)
+ REGISTER_PIPELINE_HANDLER(PipelineHandlerVivid, "vivid")
} /* namespace libcamera */
@@ -266,6 +269,8 @@ Note that you must register the ``PipelineHandler`` subclass with the pipeline
handler factory using the `REGISTER_PIPELINE_HANDLER`_ macro which
registers it and creates a global symbol to reference the class and make it
available to try and match devices.
+String "vivid" is the name assigned to the pipeline, matching the pipeline
+subdirectory name in the source tree.
.. _REGISTER_PIPELINE_HANDLER: https://libcamera.org/api-html/pipeline__handler_8h.html
@@ -1345,7 +1350,7 @@ before being set.
continue;
}
- int32_t value = lroundf(it.second.get<float>() * 128 + offset);
+ int32_t value = std::lround(it.second.get<float>() * 128 + offset);
controls.set(cid, std::clamp(value, 0, 255));
}
@@ -1409,7 +1414,7 @@ value translation operations:
.. code-block:: cpp
- #include <math.h>
+ #include <cmath>
Frame completion and event handling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/guides/tracing.rst b/Documentation/guides/tracing.rst
index ae960d85..537dce50 100644
--- a/Documentation/guides/tracing.rst
+++ b/Documentation/guides/tracing.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: ../documentation-contents.rst
+
Tracing Guide
=============
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 63fac72d..251112fb 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -1,26 +1,31 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
-.. Front page matter is defined in the project README file.
-.. include:: ../README.rst
- :start-after: .. section-begin-libcamera
- :end-before: .. section-end-libcamera
+.. include:: introduction.rst
.. toctree::
:maxdepth: 1
:caption: Contents:
Home <self>
- Docs <docs>
Contribute <contributing>
Getting Started <getting-started>
- Developer Guide <guides/introduction>
Application Writer's Guide <guides/application-developer>
- Pipeline Handler Writer's Guide <guides/pipeline-handler>
- IPA Writer's guide <guides/ipa>
- Tracing guide <guides/tracing>
+ Camera Sensor Model <camera-sensor-model>
Environment variables <environment_variables>
- Sensor driver requirements <sensor_driver_requirements>
+ Feature Requirements <feature_requirements>
+ IPA Writer's guide <guides/ipa>
Lens driver requirements <lens_driver_requirements>
+ libcamera Architecture <libcamera_architecture>
+ Pipeline Handler Writer's Guide <guides/pipeline-handler>
Python Bindings <python-bindings>
- Camera Sensor Model <camera-sensor-model>
+ Sensor driver requirements <sensor_driver_requirements>
+ SoftwareISP Benchmarking <software-isp-benchmarking>
+ Tracing guide <guides/tracing>
+
+ Design document: AE <design/ae>
+
+.. toctree::
+ :hidden:
+
+ introduction
diff --git a/Documentation/internal-api-html/index.rst b/Documentation/internal-api-html/index.rst
new file mode 100644
index 00000000..43768648
--- /dev/null
+++ b/Documentation/internal-api-html/index.rst
@@ -0,0 +1,8 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+.. _internal-api:
+
+Internal API Reference
+======================
+
+:: Placeholder for Doxygen documentation
diff --git a/Documentation/introduction.rst b/Documentation/introduction.rst
new file mode 100644
index 00000000..82aa11a3
--- /dev/null
+++ b/Documentation/introduction.rst
@@ -0,0 +1,224 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+.. include:: documentation-contents.rst
+
+************
+Introduction
+************
+
+.. toctree::
+ :hidden:
+
+ API <api-html/index>
+ Internal API <internal-api-html/index>
+
+What is libcamera?
+==================
+
+libcamera is an open source complex camera support library for Linux, Android
+and ChromeOS. The library interfaces with Linux kernel device drivers and
+provides an intuitive API to developers in order to simplify the complexity
+involved in capturing images from complex cameras on Linux systems.
+
+What is a "complex camera"?
+===========================
+
+A modern "camera" tends to infact be several different pieces of hardware which
+must all be controlled together in order to produce and capture images of
+appropriate quality. A hardware pipeline typically consists of a camera sensor
+that captures raw frames and transmits them on a bus, a receiver that decodes
+the bus signals, and an image signal processor that processes raw frames to
+produce usable images in a standard format. The Linux kernel handles these
+multimedia devices through the 'Linux media' subsystem and provides a set of
+application programming interfaces known collectively as the
+V4L2 (`Video for Linux 2`_) and the `Media Controller`_ APIs, which provide an
+interface to interact and control media devices.
+
+.. _Video for Linux 2: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/v4l/v4l2.html
+.. _Media Controller: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/mediactl/media-controller.html
+
+Included in this subsystem are drivers for camera sensors, CSI2 (Camera
+Serial Interface) receivers, and ISPs (Image Signal Processors).
+
+The usage of these drivers to provide a functioning camera stack is a
+responsibility that lies in userspace, and is commonly implemented separately
+by vendors without a common architecture or API for application developers. This
+adds a lot of complexity to the task, particularly when considering that the
+differences in hardware pipelines and their representation in the kernel's APIs
+often necessitate bespoke handling.
+
+What is libcamera for?
+======================
+
+libcamera provides a complete camera stack for Linux-based systems to abstract
+the configuration of hardware and image control algorithms required to obtain
+desirable results from the camera through the kernel's APIs, reducing those
+operations to a simple and consistent method for developers. In short instead of
+having to deal with this:
+
+.. graphviz:: mali-c55.dot
+
+you can instead simply deal with:
+
+.. code-block:: python
+
+ >>> import libcamera as lc
+ >>> camera_manager = lc.CameraManager.singleton()
+ [0:15:59.582029920] [504] INFO Camera camera_manager.cpp:313 libcamera v0.3.0+182-01e57380
+ >>> for camera in camera_manager.cameras:
+ ... print(f' - {camera.id}')
+ ...
+ - mali-c55 tpg
+ - imx415 1-001a
+
+The library handles the rest for you. These documentary pages give more
+information on the internal workings of libcamera (and the kernel camera stack
+that lies behind it) as well as guidance on using libcamera in an application or
+extending the library with support for your hardware (through the pipeline
+handler and IPA module writer's guides).
+
+How should I use it?
+====================
+
+There are a few ways you might want to use libcamera, depending on your
+application. It's always possible to use the library directly, and you can find
+detailed information on how to do so in the
+:doc:`application writer's guide <guides/application-developer>`.
+
+It is often more appropriate to use one of the frameworks with libcamera
+support. For example an application powering an embedded media device
+incorporating capture, encoding and streaming of both video and audio would
+benefit from using `GStreamer`_, for which libcamera provides a plugin.
+Similarly an application for user-facing devices like a laptop would likely
+benefit accessing cameras through the XDG camera portal and `pipewire`_, which
+brings the advantages of resource sharing (multiple applications accessing the
+stream at the same time) and access control.
+
+.. _GStreamer: https://gstreamer.freedesktop.org/
+.. _pipewire: https://pipewire.org/
+
+Camera Stack
+============
+
+::
+
+ a c / +-------------+ +-------------+ +-------------+ +-------------+
+ p a | | Native | | Framework | | Native | | Android |
+ p t | | V4L2 | | Application | | libcamera | | Camera |
+ l i | | Application | | (gstreamer) | | Application | | Framework |
+ i o \ +-------------+ +-------------+ +-------------+ +-------------+
+ n ^ ^ ^ ^
+ | | | |
+ l a | | | |
+ i d v v | v
+ b a / +-------------+ +-------------+ | +-------------+
+ c p | | V4L2 | | Camera | | | Android |
+ a t | | Compat. | | Framework | | | Camera |
+ m a | | | | (gstreamer) | | | HAL |
+ e t \ +-------------+ +-------------+ | +-------------+
+ r i ^ ^ | ^
+ a o | | | |
+ n | | | |
+ / | ,................................................
+ | | ! : Language : !
+ l f | | ! : Bindings : !
+ i r | | ! : (optional) : !
+ b a | | \...............................................'
+ c m | | | | |
+ a e | | | | |
+ m w | v v v v
+ e o | +----------------------------------------------------------------+
+ r r | | |
+ a k | | libcamera |
+ | | |
+ \ +----------------------------------------------------------------+
+ ^ ^ ^
+ Userspace | | |
+ ------------------------ | ---------------- | ---------------- | ---------------
+ Kernel | | |
+ v v v
+ +-----------+ +-----------+ +-----------+
+ | Media | <--> | Video | <--> | V4L2 |
+ | Device | | Device | | Subdev |
+ +-----------+ +-----------+ +-----------+
+
+The camera stack comprises four software layers. From bottom to top:
+
+* The kernel drivers control the camera hardware and expose a
+ low-level interface to userspace through the Linux kernel V4L2
+ family of APIs (Media Controller API, V4L2 Video Device API and
+ V4L2 Subdev API).
+
+* The libcamera framework is the core part of the stack. It
+ handles all control of the camera devices in its core component,
+ libcamera, and exposes a native C++ API to upper layers. Optional
+ language bindings allow interfacing to libcamera from other
+ programming languages.
+
+ Those components live in the same source code repository and
+ all together constitute the libcamera framework.
+
+* The libcamera adaptation is an umbrella term designating the
+ components that interface to libcamera in other frameworks.
+ Notable examples are a V4L2 compatibility layer, a gstreamer
+ libcamera element, and an Android camera HAL implementation based
+ on libcamera.
+
+ Those components can live in the libcamera project source code
+ in separate repositories, or move to their respective project's
+ repository (for instance the gstreamer libcamera element).
+
+* The applications and upper level frameworks are based on the
+ libcamera framework or libcamera adaptation, and are outside of
+ the scope of the libcamera project.
+
+V4L2 Compatibility Layer
+ V4L2 compatibility is achieved through a shared library that traps all
+ accesses to camera devices and routes them to libcamera to emulate high-level
+ V4L2 camera devices. It is injected in a process address space through
+ ``LD_PRELOAD`` and is completely transparent for applications.
+
+ The compatibility layer exposes camera device features on a best-effort basis,
+ and aims for the level of features traditionally available from a UVC camera
+ designed for video conferencing.
+
+Android Camera HAL
+ Camera support for Android is achieved through a generic Android camera HAL
+ implementation on top of libcamera. The HAL implements features required by
+ Android and out of scope from libcamera, such as JPEG encoding support.
+
+ This component is used to provide support for ChromeOS platforms.
+
+GStreamer element (gstlibcamerasrc)
+ A `GStreamer element`_ is provided to allow capture from libcamera supported
+ devices through GStreamer pipelines, and connect to other elements for further
+ processing.
+
+Native libcamera API
+ Applications can make use of the libcamera API directly using the C++
+ API. An example application and walkthrough using the libcamera API can be
+ followed in the :doc:`Application writer's guide </guides/application-developer>`
+
+.. _GStreamer element: https://gstreamer.freedesktop.org/documentation/application-development/basics/elements.html
+
+Licensing
+=========
+
+The libcamera core is covered by the `LGPL-2.1-or-later`_ license. Pipeline
+Handlers are a part of the libcamera code base and need to be contributed
+upstream by device vendors. IPA modules included in libcamera are covered by a
+free software license, however third-parties may develop IPA modules outside of
+libcamera and distribute them under a closed-source license, provided they do
+not include source code from the libcamera project.
+
+The libcamera project itself contains multiple libraries, applications and
+utilities. Licenses are expressed through SPDX tags in text-based files that
+support comments, and through the .reuse/dep5 file otherwise. A copy of all
+licenses are stored in the LICENSES directory, and a full summary of the
+licensing used throughout the project can be found in the COPYING.rst document.
+
+Applications which link dynamically against libcamera and use only the public
+API are an independent work of the authors and have no license restrictions
+imposed upon them from libcamera.
+
+.. _LGPL-2.1-or-later: https://spdx.org/licenses/LGPL-2.1-or-later.html
diff --git a/Documentation/lens_driver_requirements.rst b/Documentation/lens_driver_requirements.rst
index b96e502d..85fef76f 100644
--- a/Documentation/lens_driver_requirements.rst
+++ b/Documentation/lens_driver_requirements.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: documentation-contents.rst
+
.. _lens-driver-requirements:
Lens Driver Requirements
diff --git a/Documentation/libcamera_architecture.rst b/Documentation/libcamera_architecture.rst
new file mode 100644
index 00000000..abbb0d17
--- /dev/null
+++ b/Documentation/libcamera_architecture.rst
@@ -0,0 +1,168 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+.. include:: documentation-contents.rst
+
+libcamera Architecture
+======================
+
+While offering a unified API towards upper layers, and presenting itself as a
+single library, libcamera isn't monolithic. It exposes multiple components
+through its public API and is built around a set of separate helpers internally.
+Hardware abstractions are handled through the use of device-specific components
+where required and dynamically loadable plugins are used to separate image
+processing algorithms from the core libcamera codebase.
+
+::
+
+ --------------------------< libcamera Public API >---------------------------
+ ^ ^
+ | |
+ v v
+ +-------------+ +---------------------------------------------------+
+ | Camera | | Camera Device |
+ | Manager | | +-----------------------------------------------+ |
+ +-------------+ | | Device-Agnostic | |
+ ^ | | | |
+ | | | +--------------------------+ |
+ | | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
+ | | | | { +-----------------+ } |
+ | | | | } | //// Image //// | { |
+ | | | | <-> | / Processing // | } |
+ | | | | } | / Algorithms // | { |
+ | | | | { +-----------------+ } |
+ | | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
+ | | | | ========================== |
+ | | | | +-----------------+ |
+ | | | | | // Pipeline /// | |
+ | | | | <-> | /// Handler /// | |
+ | | | | | /////////////// | |
+ | | +--------------------+ +-----------------+ |
+ | | Device-Specific |
+ | +---------------------------------------------------+
+ | ^ ^
+ | | |
+ v v v
+ +--------------------------------------------------------------------+
+ | Helpers and Support Classes |
+ | +-------------+ +-------------+ +-------------+ +-------------+ |
+ | | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | |
+ | | Support | | Allocator | | IPC | | Manager | |
+ | +-------------+ +-------------+ +-------------+ +-------------+ |
+ | +-------------+ +-------------+ |
+ | | Pipeline | | ... | |
+ | | Runner | | | |
+ | +-------------+ +-------------+ |
+ +--------------------------------------------------------------------+
+
+ /// Device-Specific Components
+ ~~~ Sandboxing
+
+
+Camera Manager
+ The Camera Manager enumerates cameras and instantiates Pipeline Handlers to
+ manage each Camera that libcamera supports. The Camera Manager supports
+ hotplug detection and notification events when supported by the underlying
+ kernel devices.
+
+ There is only ever one instance of the Camera Manager running per application.
+ Each application's instance of the Camera Manager ensures that only a single
+ application can take control of a camera device at once.
+
+ Read the `Camera Manager API`_ documentation for more details.
+
+.. _Camera Manager API: https://libcamera.org/api-html/classlibcamera_1_1CameraManager.html
+
+Camera Device
+ The Camera class represents a single item of camera hardware that is capable
+ of producing one or more image streams, and provides the API to interact with
+ the underlying device.
+
+ If a system has multiple instances of the same hardware attached, each has its
+ own instance of the camera class.
+
+ The API exposes full control of the device to upper layers of libcamera through
+ the public API, making it the highest level object libcamera exposes, and the
+ object that all other API operations interact with from configuration to
+ capture.
+
+ Read the `Camera API`_ documentation for more details.
+
+.. _Camera API: https://libcamera.org/api-html/classlibcamera_1_1Camera.html
+
+Pipeline Handler
+ The Pipeline Handler manages the complex pipelines exposed by the kernel
+ drivers through the Media Controller and V4L2 APIs. It abstracts pipeline
+ handling to hide device-specific details from the rest of the library, and
+ implements both pipeline configuration based on stream configuration, and
+ pipeline runtime execution and scheduling when needed by the device.
+
+ The Pipeline Handler lives in the same process as the rest of the library, and
+ has access to all helpers and kernel camera-related devices.
+
+ Hardware abstraction is handled by device specific Pipeline Handlers which are
+ derived from the Pipeline Handler base class allowing commonality to be shared
+ among the implementations.
+
+ Derived pipeline handlers create Camera device instances based on the devices
+ they detect and support on the running system, and are responsible for
+ managing the interactions with a camera device.
+
+ More details can be found in the `PipelineHandler API`_ documentation, and the
+ :doc:`Pipeline Handler Writers Guide <guides/pipeline-handler>`.
+
+.. _PipelineHandler API: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html
+
+Image Processing Algorithms
+ Together with the hardware image processing and hardware statistics
+ collection, the Image Processing Algorithms (IPA) implement 3A (Auto-Exposure,
+ Auto-White Balance and Auto-Focus) and other algorithms. They run on the CPU
+ and control hardware image processing based on the parameters supplied by
+ upper layers, closing the control loop of the ISP.
+
+ IPAs are loaded as external plugins named IPA Modules. IPA Modules can be part
+ of the libcamera code base or provided externally by camera vendors as
+ open-source or closed-source components.
+
+ Open source IPA Modules built with libcamera are run in the same process space
+ as libcamera. External IPA Modules are run in a separate sandboxed process. In
+ either case, they can only interact with libcamera through the API provided by
+ the Pipeline Handler. They have a restricted view of the system, with no direct
+ access to kernel camera devices, no access to networking APIs, and limited
+ access to file systems. All their accesses to image and metadata are mediated
+ by dmabuf instances explicitly passed by the Pipeline Handler to the IPA
+ Module.
+
+ IPA Modules are only required for platforms and devices with an ISP controlled
+ by the host CPU. Camera sensors which have an integrated ISP are not
+ controlled through the IPA Module.
+
+Helpers and Support Classes
+ While Pipeline Handlers are device-specific, implementations are expected to
+ share code due to usage of identical APIs towards the kernel camera drivers
+ and the Image Processing Algorithms. This includes without limitation handling
+ of the MC and V4L2 APIs, buffer management through dmabuf, and pipeline
+ discovery, configuration and scheduling. Such code will be factored out to
+ helpers when applicable.
+
+ Other parts of libcamera will also benefit from factoring code out to
+ self-contained support classes, even if such code is present only once in the
+ code base, in order to keep the source code clean and easy to read. This
+ should be the case for instance for plugin management.
+
+Platform Support
+----------------
+
+The library currently supports the following hardware platforms specifically
+with dedicated pipeline handlers:
+
+ - Arm Mali-C55
+ - Intel IPU3 (ipu3)
+ - NXP i.MX8MP (imx8-isi and rkisp1)
+ - RaspberryPi 3, 4 and zero (rpi/vc4)
+ - Rockchip RK3399 (rkisp1)
+
+Furthermore, generic platform support is provided for the following:
+
+ - USB video device class cameras (uvcvideo)
+ - iMX7, IPU6, Allwinner Sun6i (simple)
+ - Virtual media controller driver for test use cases (vimc)
diff --git a/Documentation/mainpage.dox b/Documentation/mainpage.dox
new file mode 100644
index 00000000..cbee9bab
--- /dev/null
+++ b/Documentation/mainpage.dox
@@ -0,0 +1,33 @@
+/**
+\mainpage libcamera API reference
+
+Welcome to the API reference for <a href="https://libcamera.org/">libcamera</a>,
+a complex camera support library for Linux, Android and ChromeOS. These pages
+are automatically generated from the libcamera source code and describe the API
+in detail - if this is your first interaction with libcamera then you may find
+it useful to visit the [documentation](../introduction.html) in
+the first instance, which can provide a more generic introduction to the
+library's concepts.
+
+\if internal
+
+As a follow-on to the developer's guide, to assist you in adding support for
+your platform the [pipeline handler writer's guide](../guides/pipeline-handler.html)
+and the [ipa module writer's guide](../guides/ipa.html) should be helpful.
+
+The full libcamera API is documented here. If you wish to see only the public
+part of the API you can use [these pages](../api-html/index.html) instead.
+
+\else
+
+As a follow-on to the developer's guide, to assist you in using libcamera within
+your project the [application developer's guide](../guides/application-developer.html)
+gives an overview on how to achieve that.
+
+Only the public part of the libcamera API is documented here; if you are a
+developer seeking to add support for your hardware to the library or make other
+improvements, you should switch to the internal API
+[reference pages](../internal-api-html/index.html) instead.
+
+\endif
+*/
diff --git a/Documentation/mali-c55.dot b/Documentation/mali-c55.dot
new file mode 100644
index 00000000..7bfc44c0
--- /dev/null
+++ b/Documentation/mali-c55.dot
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: CC-BY-SA-4.0 */
+
+digraph board {
+ rankdir=TB
+ n00000001 [label="{{} | mali-c55 tpg\n/dev/v4l-subdev0 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000001:port0 -> n00000003:port0 [style=dashed]
+ n00000003 [label="{{<port0> 0 | <port4> 4} | mali-c55 isp\n/dev/v4l-subdev1 | {<port1> 1 | <port2> 2 | <port3> 3}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000003:port1 -> n00000009:port0 [style=bold]
+ n00000003:port2 -> n00000009:port2 [style=bold]
+ n00000003:port1 -> n0000000d:port0 [style=bold]
+ n00000003:port3 -> n0000001c
+ n00000009 [label="{{<port0> 0 | <port2> 2} | mali-c55 resizer fr\n/dev/v4l-subdev2 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000009:port1 -> n00000010
+ n0000000d [label="{{<port0> 0} | mali-c55 resizer ds\n/dev/v4l-subdev3 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000000d:port1 -> n00000014
+ n00000010 [label="mali-c55 fr\n/dev/video0", shape=box, style=filled, fillcolor=yellow]
+ n00000014 [label="mali-c55 ds\n/dev/video1", shape=box, style=filled, fillcolor=yellow]
+ n00000018 [label="mali-c55 3a params\n/dev/video2", shape=box, style=filled, fillcolor=yellow]
+ n00000018 -> n00000003:port4
+ n0000001c [label="mali-c55 3a stats\n/dev/video3", shape=box, style=filled, fillcolor=yellow]
+ n00000030 [label="{{<port0> 0} | lte-csi2-rx\n/dev/v4l-subdev4 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000030:port1 -> n00000003:port0
+ n00000035 [label="{{} | imx415 1-001a\n/dev/v4l-subdev5 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000035:port0 -> n00000030:port0 [style=bold]
+}
diff --git a/Documentation/meson.build b/Documentation/meson.build
index 7a58fec8..6158320e 100644
--- a/Documentation/meson.build
+++ b/Documentation/meson.build
@@ -15,6 +15,7 @@ if doxygen.found() and dot.found()
cdata.set('TOP_SRCDIR', meson.project_source_root())
cdata.set('TOP_BUILDDIR', meson.project_build_root())
cdata.set('OUTPUT_DIR', meson.current_build_dir())
+ cdata.set('WARN_AS_ERROR', get_option('doc_werror') ? 'YES' : 'NO')
doxygen_predefined = []
foreach key : config_h.keys()
@@ -23,34 +24,92 @@ if doxygen.found() and dot.found()
cdata.set('PREDEFINED', ' \\\n\t\t\t '.join(doxygen_predefined))
- doxyfile = configure_file(input : 'Doxyfile.in',
- output : 'Doxyfile',
- configuration : cdata)
+ doxyfile_common = configure_file(input : 'Doxyfile-common.in',
+ output : 'Doxyfile-common',
+ configuration : cdata)
+
+ doxygen_public_input = [
+ libcamera_base_public_headers,
+ libcamera_base_public_sources,
+ libcamera_public_headers,
+ libcamera_public_sources,
+ ]
- doxygen_input = [
- doxyfile,
- libcamera_base_headers,
- libcamera_base_sources,
+ doxygen_internal_input = [
+ libcamera_base_private_headers,
+ libcamera_base_internal_sources,
libcamera_internal_headers,
+ libcamera_internal_sources,
libcamera_ipa_headers,
libcamera_ipa_interfaces,
- libcamera_public_headers,
- libcamera_sources,
libipa_headers,
libipa_sources,
]
if is_variable('ipu3_ipa_sources')
- doxygen_input += [ipu3_ipa_sources]
+ doxygen_internal_input += [ipu3_ipa_sources]
endif
- custom_target('doxygen',
- input : doxygen_input,
+ # We run doxygen twice - the first run excludes internal API objects as it
+ # is intended to document the public API only. A second run covers all of
+ # the library's objects for libcamera developers. Common configuration is
+ # set in an initially generated Doxyfile, which is then included by the two
+ # final Doxyfiles.
+
+ # This is the "public" run of doxygen generating an abridged version of the
+ # API's documentation.
+
+ doxyfile_tmpl = configure_file(input : 'Doxyfile-public.in',
+ output : 'Doxyfile-public.tmpl',
+ configuration : cdata)
+
+ # The set of public input files stored in the doxygen_public_input array
+ # needs to be set in Doxyfile public. We can't pass them through cdata
+ # cdata, as some of the array members are custom_tgt instances, which
+ # configuration_data.set() doesn't support. Using a separate script invoked
+ # through custom_target(), which supports custom_tgt instances as inputs.
+
+ doxyfile = custom_target('doxyfile-public',
+ input : [
+ doxygen_public_input,
+ ],
+ output : 'Doxyfile-public',
+ command : [
+ 'gen-doxyfile.py',
+ '-o', '@OUTPUT@',
+ doxyfile_tmpl,
+ '@INPUT@',
+ ])
+
+ custom_target('doxygen-public',
+ input : [
+ doxyfile,
+ doxyfile_common,
+ ],
output : 'api-html',
command : [doxygen, doxyfile],
install : true,
install_dir : doc_install_dir,
install_tag : 'doc')
+
+ # This is the internal documentation, which hard-codes a list of directories
+ # to parse in its doxyfile.
+
+ doxyfile = configure_file(input : 'Doxyfile-internal.in',
+ output : 'Doxyfile-internal',
+ configuration : cdata)
+
+ custom_target('doxygen-internal',
+ input : [
+ doxyfile,
+ doxyfile_common,
+ doxygen_internal_input,
+ ],
+ output : 'internal-api-html',
+ command : [doxygen, doxyfile],
+ install : true,
+ install_dir : doc_install_dir,
+ install_tag : 'doc-internal')
endif
#
@@ -69,17 +128,22 @@ if sphinx.found()
'coding-style.rst',
'conf.py',
'contributing.rst',
- 'docs.rst',
+ 'design/ae.rst',
+ 'documentation-contents.rst',
'environment_variables.rst',
+ 'feature_requirements.rst',
'guides/application-developer.rst',
- 'guides/introduction.rst',
'guides/ipa.rst',
'guides/pipeline-handler.rst',
'guides/tracing.rst',
'index.rst',
+ 'introduction.rst',
'lens_driver_requirements.rst',
+ 'libcamera_architecture.rst',
+ 'mali-c55.dot',
'python-bindings.rst',
'sensor_driver_requirements.rst',
+ 'software-isp-benchmarking.rst',
'../README.rst',
]
diff --git a/Documentation/python-bindings.rst b/Documentation/python-bindings.rst
index ed9f686b..94712238 100644
--- a/Documentation/python-bindings.rst
+++ b/Documentation/python-bindings.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: documentation-contents.rst
+
.. _python-bindings:
Python Bindings for libcamera
diff --git a/Documentation/sensor_driver_requirements.rst b/Documentation/sensor_driver_requirements.rst
index 0e516b34..fb4269d0 100644
--- a/Documentation/sensor_driver_requirements.rst
+++ b/Documentation/sensor_driver_requirements.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: documentation-contents.rst
+
.. _sensor-driver-requirements:
Sensor Driver Requirements
diff --git a/Documentation/software-isp-benchmarking.rst b/Documentation/software-isp-benchmarking.rst
new file mode 100644
index 00000000..9c2a409b
--- /dev/null
+++ b/Documentation/software-isp-benchmarking.rst
@@ -0,0 +1,79 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+.. include:: documentation-contents.rst
+
+.. _software-isp-benchmarking:
+
+Software ISP benchmarking
+=========================
+
+The Software ISP is particularly sensitive to performance regressions therefore
+it is a good idea to always benchmark the Software ISP before and after making
+changes to it and ensure that there are no performance regressions.
+
+DebayerCpu class builtin benchmark
+----------------------------------
+
+The DebayerCpu class has a builtin benchmark. This benchmark measures the time
+spent on processing (collecting statistics and debayering) only, it does not
+measure the time spent on capturing or outputting the frames.
+
+The builtin benchmark always runs. So this can be used by simply running "cam"
+or "qcam" with a pipeline using the Software ISP.
+
+When it runs it will skip measuring the first 30 frames to allow the caches and
+the CPU temperature (turbo-ing) to warm-up and then it measures 30 fps and shows
+the total and per frame processing time using an info level log message:
+
+.. code-block:: text
+
+ INFO Debayer debayer_cpu.cpp:907 Processed 30 frames in 244317us, 8143 us/frame
+
+To get stable measurements it is advised to disable any other processes which
+may cause significant CPU usage (e.g. disable wifi, bluetooth and browsers).
+When possible it is also advisable to disable CPU turbo-ing and
+frequency-scaling.
+
+For example when benchmarking on a Lenovo ThinkPad X1 Yoga Gen 8, with the
+charger plugged in, the CPU can be fixed to run at 2 GHz using:
+
+.. code-block:: shell
+
+ sudo x86_energy_perf_policy --turbo-enable 0
+ sudo cpupower frequency-set -d 2GHz -u 2GHz
+
+with these settings the builtin bench reports a processing time of ~7.8ms/frame
+on this laptop for FHD SGRBG10 (unpacked) bayer data.
+
+Measuring power consumption
+---------------------------
+
+Since the Software ISP is often used on mobile devices it is also important to
+measure power consumption and ensure that that does not regress.
+
+For example to measure power consumption on a Lenovo ThinkPad X1 Yoga Gen 8 it
+needs to be running on battery and it should be configured with its
+platform-profile (/sys/firmware/acpi/platform_profile) set to balanced and with
+its default turbo and frequency-scaling behavior to match real world usage.
+
+Then start qcam to capture a FHD picture at 30 fps and position the qcam window
+so that it is fully visible. After this run the following command to monitor the
+power consumption:
+
+.. code-block:: shell
+
+ watch -n 10 cat /sys/class/power_supply/BAT0/power_now /sys/class/hwmon/hwmon6/fan?_input
+
+Note this not only measures the power consumption in µW it also monitors the
+speed of this laptop's 2 fans. This is important because depending on the
+ambient temperature the 2 fans may spin up while testing and this will cause an
+additional power consumption of approx. 0.5 W messing up the measurement.
+
+After starting qcam + the watch command let the laptop sit without using it for
+2 minutes for the readings to stabilize. Then check that the fans have not
+turned on and manually take a couple of consecutive power readings and average
+these.
+
+On the example Lenovo ThinkPad X1 Yoga Gen 8 laptop this results in a measured
+power consumption of approx. 13 W while running qcam versus approx. 4-5 W while
+setting idle with its OLED panel on.
diff --git a/Documentation/theme/static/css/theme.css b/Documentation/theme/static/css/theme.css
index d4274ea6..a6d43195 100644
--- a/Documentation/theme/static/css/theme.css
+++ b/Documentation/theme/static/css/theme.css
@@ -283,9 +283,13 @@ div#signature {
font-size: 12px;
}
-#libcamera div.toctree-wrapper {
+#licensing div.toctree-wrapper {
height: 0px;
margin: 0px;
padding: 0px;
visibility: hidden;
}
+
+.documentation-nav {
+ display: none;
+}
diff --git a/Documentation/thread-safety.dox b/Documentation/thread-safety.dox
new file mode 100644
index 00000000..df4c457c
--- /dev/null
+++ b/Documentation/thread-safety.dox
@@ -0,0 +1,44 @@
+/**
+ * \page thread-safety Reentrancy and Thread-Safety
+ *
+ * Through the documentation, several terms are used to define how classes and
+ * their member functions can be used from multiple threads.
+ *
+ * - A **reentrant** function may be called simultaneously from multiple
+ * threads if and only if each invocation uses a different instance of the
+ * class. This is the default for all member functions not explictly marked
+ * otherwise.
+ *
+ * - \anchor thread-safe A **thread-safe** function may be called
+ * simultaneously from multiple threads on the same instance of a class. A
+ * thread-safe function is thus reentrant. Thread-safe functions may also be
+ * called simultaneously with any other reentrant function of the same class
+ * on the same instance.
+ *
+ * \internal
+ * - \anchor thread-bound A **thread-bound** function may be called only from
+ * the thread that the class instances lives in (see section \ref
+ * thread-objects). For instances of classes that do not derive from the
+ * Object class, this is the thread in which the instance was created. A
+ * thread-bound function is not thread-safe, and may or may not be reentrant.
+ * \endinternal
+ *
+ * Neither reentrancy nor thread-safety, in this context, mean that a function
+ * may be called simultaneously from the same thread, for instance from a
+ * callback invoked by the function. This may deadlock and isn't allowed unless
+ * separately documented.
+ *
+ * \if internal
+ * A class is defined as reentrant, thread-safe or thread-bound if all its
+ * member functions are reentrant, thread-safe or thread-bound respectively.
+ * \else
+ * A class is defined as reentrant or thread-safe if all its member functions
+ * are reentrant or thread-safe respectively.
+ * \endif
+ * Some member functions may additionally be documented as having additional
+ * thread-related attributes.
+ *
+ * Most classes are reentrant but not thread-safe, as making them fully
+ * thread-safe would incur locking costs considered prohibitive for the
+ * expected use cases.
+ */
diff --git a/README.rst b/README.rst
index 1da7a3d6..ae5126e2 100644
--- a/README.rst
+++ b/README.rst
@@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
-.. section-begin-libcamera
-
===========
libcamera
===========
@@ -22,7 +20,6 @@ open-source-friendly while still protecting vendor core IP. libcamera was born
out of that collaboration and will offer modern camera support to Linux-based
systems, including traditional Linux distributions, ChromeOS and Android.
-.. section-end-libcamera
.. section-begin-getting-started
Getting Started
@@ -88,7 +85,7 @@ for cam: [optional]
- libsdl2-dev: Enables the SDL sink
for qcam: [optional]
- libtiff-dev qtbase5-dev qttools5-dev-tools
+ libtiff-dev qt6-base-dev qt6-tools-dev-tools
for tracing with lttng: [optional]
liblttng-ust-dev python3-jinja2 lttng-tools
@@ -96,9 +93,6 @@ for tracing with lttng: [optional]
for android: [optional]
libexif-dev libjpeg-dev
-for Python bindings: [optional]
- pybind11-dev
-
for lc-compliance: [optional]
libevent-dev libgtest-dev
@@ -178,6 +172,22 @@ Which can be received on another device over the network with:
gst-launch-1.0 tcpclientsrc host=$DEVICE_IP port=5000 ! \
multipartdemux ! jpegdec ! autovideosink
+The GStreamer element also supports multiple streams. This is achieved by
+requesting additional source pads. Downstream caps filters can be used
+to choose specific parameters like resolution and pixel format. The pad
+property ``stream-role`` can be used to select a role.
+
+The following example displays a 640x480 view finder while streaming JPEG
+encoded 800x600 video. You can use the receiver pipeline above to view the
+remote stream from another device.
+
+.. code::
+
+ gst-launch-1.0 libcamerasrc name=cs src::stream-role=view-finder src_0::stream-role=video-recording \
+ cs.src ! queue ! video/x-raw,width=640,height=480 ! videoconvert ! autovideosink \
+ cs.src_0 ! queue ! video/x-raw,width=800,height=600 ! videoconvert ! \
+ jpegenc ! multipartmux ! tcpserversink host=0.0.0.0 port=5000
+
.. section-end-getting-started
Troubleshooting
diff --git a/include/libcamera/base/backtrace.h b/include/libcamera/base/backtrace.h
index 752034d1..699ddd9e 100644
--- a/include/libcamera/base/backtrace.h
+++ b/include/libcamera/base/backtrace.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * backtrace.h - Call stack backtraces
+ * Call stack backtraces
*/
#pragma once
diff --git a/include/libcamera/base/bound_method.h b/include/libcamera/base/bound_method.h
index c0275249..dd3488ee 100644
--- a/include/libcamera/base/bound_method.h
+++ b/include/libcamera/base/bound_method.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * bound_method.h - Method bind and invocation
+ * Method bind and invocation
*/
#pragma once
diff --git a/include/libcamera/base/class.h b/include/libcamera/base/class.h
index 571eecf4..a808422e 100644
--- a/include/libcamera/base/class.h
+++ b/include/libcamera/base/class.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * class.h - Utilities and helpers for classes
+ * Utilities and helpers for classes
*/
#pragma once
diff --git a/include/libcamera/base/compiler.h b/include/libcamera/base/compiler.h
deleted file mode 100644
index 02564f2f..00000000
--- a/include/libcamera/base/compiler.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2021, Google Inc.
- *
- * compiler.h - Compiler support
- */
-
-#pragma once
-
-#if __cplusplus >= 201703L
-#define __nodiscard [[nodiscard]]
-#else
-#define __nodiscard
-#endif
diff --git a/include/libcamera/base/event_dispatcher.h b/include/libcamera/base/event_dispatcher.h
index 184f1b12..408f8da6 100644
--- a/include/libcamera/base/event_dispatcher.h
+++ b/include/libcamera/base/event_dispatcher.h
@@ -2,13 +2,11 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_dispatcher.h - Event dispatcher
+ * Event dispatcher
*/
#pragma once
-#include <vector>
-
#include <libcamera/base/private.h>
namespace libcamera {
diff --git a/include/libcamera/base/event_dispatcher_poll.h b/include/libcamera/base/event_dispatcher_poll.h
index b7840309..1f7e05cf 100644
--- a/include/libcamera/base/event_dispatcher_poll.h
+++ b/include/libcamera/base/event_dispatcher_poll.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_dispatcher_poll.h - Poll-based event dispatcher
+ * Poll-based event dispatcher
*/
#pragma once
diff --git a/include/libcamera/base/event_notifier.h b/include/libcamera/base/event_notifier.h
index e5c0594d..158f2d44 100644
--- a/include/libcamera/base/event_notifier.h
+++ b/include/libcamera/base/event_notifier.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_notifier.h - File descriptor event notifier
+ * File descriptor event notifier
*/
#pragma once
diff --git a/include/libcamera/base/file.h b/include/libcamera/base/file.h
index 0cdc2ed0..6d3f106d 100644
--- a/include/libcamera/base/file.h
+++ b/include/libcamera/base/file.h
@@ -2,15 +2,15 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * file.h - File I/O operations
+ * File I/O operations
*/
#pragma once
-#include <sys/types.h>
-
#include <map>
+#include <stdint.h>
#include <string>
+#include <sys/types.h>
#include <libcamera/base/private.h>
diff --git a/include/libcamera/base/flags.h b/include/libcamera/base/flags.h
index a1b404bd..af4f6e35 100644
--- a/include/libcamera/base/flags.h
+++ b/include/libcamera/base/flags.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * flags.h - Type-safe enum-based bitfields
+ * Type-safe enum-based bitfields
*/
#pragma once
diff --git a/include/libcamera/base/log.h b/include/libcamera/base/log.h
index dcaacbe0..8af74b59 100644
--- a/include/libcamera/base/log.h
+++ b/include/libcamera/base/log.h
@@ -2,13 +2,14 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * log.h - Logging infrastructure
+ * Logging infrastructure
*/
#pragma once
-#include <chrono>
+#include <atomic>
#include <sstream>
+#include <string_view>
#include <libcamera/base/private.h>
@@ -29,25 +30,29 @@ enum LogSeverity {
class LogCategory
{
public:
- static LogCategory *create(const char *name);
+ static LogCategory *create(std::string_view name);
const std::string &name() const { return name_; }
- LogSeverity severity() const { return severity_; }
- void setSeverity(LogSeverity severity);
+ LogSeverity severity() const { return severity_.load(std::memory_order_relaxed); }
+ void setSeverity(LogSeverity severity) { severity_.store(severity, std::memory_order_relaxed); }
static const LogCategory &defaultCategory();
private:
- explicit LogCategory(const char *name);
+ friend class Logger;
+ explicit LogCategory(std::string_view name);
const std::string name_;
- LogSeverity severity_;
+
+ std::atomic<LogSeverity> severity_;
+ static_assert(decltype(severity_)::is_always_lock_free);
};
#define LOG_DECLARE_CATEGORY(name) \
extern const LogCategory &_LOG_CATEGORY(name)();
#define LOG_DEFINE_CATEGORY(name) \
+LOG_DECLARE_CATEGORY(name) \
const LogCategory &_LOG_CATEGORY(name)() \
{ \
/* The instance will be deleted by the Logger destructor. */ \
@@ -60,9 +65,7 @@ class LogMessage
public:
LogMessage(const char *fileName, unsigned int line,
const LogCategory &category, LogSeverity severity,
- const std::string &prefix = std::string());
-
- LogMessage(LogMessage &&);
+ std::string prefix = {});
~LogMessage();
std::ostream &stream() { return msgStream_; }
@@ -75,9 +78,7 @@ public:
const std::string msg() const { return msgStream_.str(); }
private:
- LIBCAMERA_DISABLE_COPY(LogMessage)
-
- void init(const char *fileName, unsigned int line);
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(LogMessage)
std::ostringstream msgStream_;
const LogCategory &category_;
diff --git a/include/libcamera/base/memfd.h b/include/libcamera/base/memfd.h
new file mode 100644
index 00000000..705d9929
--- /dev/null
+++ b/include/libcamera/base/memfd.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Anonymous file creation
+ */
+
+#pragma once
+
+#include <libcamera/base/flags.h>
+#include <libcamera/base/unique_fd.h>
+
+namespace libcamera {
+
+class MemFd
+{
+public:
+ enum class Seal {
+ None = 0,
+ Shrink = (1 << 0),
+ Grow = (1 << 1),
+ };
+
+ using Seals = Flags<Seal>;
+
+ static UniqueFD create(const char *name, std::size_t size,
+ Seals seals = Seal::None);
+};
+
+LIBCAMERA_FLAGS_ENABLE_OPERATORS(MemFd::Seal)
+
+} /* namespace libcamera */
diff --git a/include/libcamera/base/meson.build b/include/libcamera/base/meson.build
index bace25d5..f28ae4d4 100644
--- a/include/libcamera/base/meson.build
+++ b/include/libcamera/base/meson.build
@@ -5,7 +5,6 @@ libcamera_base_include_dir = libcamera_include_dir / 'base'
libcamera_base_public_headers = files([
'bound_method.h',
'class.h',
- 'compiler.h',
'flags.h',
'object.h',
'shared_fd.h',
@@ -21,6 +20,7 @@ libcamera_base_private_headers = files([
'event_notifier.h',
'file.h',
'log.h',
+ 'memfd.h',
'message.h',
'mutex.h',
'private.h',
diff --git a/include/libcamera/base/message.h b/include/libcamera/base/message.h
index b939af6f..4b232031 100644
--- a/include/libcamera/base/message.h
+++ b/include/libcamera/base/message.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * message.h - Message queue support
+ * Message queue support
*/
#pragma once
diff --git a/include/libcamera/base/mutex.h b/include/libcamera/base/mutex.h
index 52441c55..fa9a8d0d 100644
--- a/include/libcamera/base/mutex.h
+++ b/include/libcamera/base/mutex.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * mutex.h - Mutex classes with clang thread safety annotation
+ * Mutex classes with clang thread safety annotation
*/
#pragma once
diff --git a/include/libcamera/base/object.h b/include/libcamera/base/object.h
index cb7e0a13..6cb935a0 100644
--- a/include/libcamera/base/object.h
+++ b/include/libcamera/base/object.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * object.h - Base object
+ * Base object
*/
#pragma once
@@ -12,6 +12,7 @@
#include <vector>
#include <libcamera/base/bound_method.h>
+#include <libcamera/base/class.h>
namespace libcamera {
@@ -52,6 +53,8 @@ protected:
bool assertThreadBound(const char *message);
private:
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(Object)
+
friend class SignalBase;
friend class Thread;
diff --git a/include/libcamera/base/private.h b/include/libcamera/base/private.h
index 163012bf..8670c40b 100644
--- a/include/libcamera/base/private.h
+++ b/include/libcamera/base/private.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * private.h - Private Header Validation
+ * Private Header Validation
*
* A selection of internal libcamera headers are installed as part
* of the libcamera package to allow sharing of a select subset of
diff --git a/include/libcamera/base/semaphore.h b/include/libcamera/base/semaphore.h
index f1052317..59d4aa44 100644
--- a/include/libcamera/base/semaphore.h
+++ b/include/libcamera/base/semaphore.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * semaphore.h - General-purpose counting semaphore
+ * General-purpose counting semaphore
*/
#pragma once
diff --git a/include/libcamera/base/shared_fd.h b/include/libcamera/base/shared_fd.h
index e53a8b88..61fe11c1 100644
--- a/include/libcamera/base/shared_fd.h
+++ b/include/libcamera/base/shared_fd.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * shared_fd.h - File descriptor wrapper with shared ownership
+ * File descriptor wrapper with shared ownership
*/
#pragma once
diff --git a/include/libcamera/base/signal.h b/include/libcamera/base/signal.h
index 444997b4..ed1d81ea 100644
--- a/include/libcamera/base/signal.h
+++ b/include/libcamera/base/signal.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * signal.h - Signal & slot implementation
+ * Signal & slot implementation
*/
#pragma once
@@ -10,7 +10,6 @@
#include <functional>
#include <list>
#include <type_traits>
-#include <vector>
#include <libcamera/base/bound_method.h>
@@ -64,11 +63,8 @@ public:
#ifndef __DOXYGEN__
template<typename T, typename Func,
- std::enable_if_t<std::is_base_of<Object, T>::value
-#if __cplusplus >= 201703L
- && std::is_invocable_v<Func, Args...>
-#endif
- > * = nullptr>
+ std::enable_if_t<std::is_base_of<Object, T>::value &&
+ std::is_invocable_v<Func, Args...>> * = nullptr>
void connect(T *obj, Func func, ConnectionType type = ConnectionTypeAuto)
{
Object *object = static_cast<Object *>(obj);
@@ -76,11 +72,8 @@ public:
}
template<typename T, typename Func,
- std::enable_if_t<!std::is_base_of<Object, T>::value
-#if __cplusplus >= 201703L
- && std::is_invocable_v<Func, Args...>
-#endif
- > * = nullptr>
+ std::enable_if_t<!std::is_base_of<Object, T>::value &&
+ std::is_invocable_v<Func, Args...>> * = nullptr>
#else
template<typename T, typename Func>
#endif
diff --git a/include/libcamera/base/span.h b/include/libcamera/base/span.h
index 88d2e3de..92cce4f0 100644
--- a/include/libcamera/base/span.h
+++ b/include/libcamera/base/span.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * span.h - C++20 std::span<> implementation for C++11
+ * C++20 std::span<> implementation for C++11
*/
#pragma once
@@ -10,7 +10,6 @@
#include <array>
#include <iterator>
#include <limits>
-#include <stddef.h>
#include <type_traits>
namespace libcamera {
diff --git a/include/libcamera/base/thread.h b/include/libcamera/base/thread.h
index 9d00f102..b9284c2c 100644
--- a/include/libcamera/base/thread.h
+++ b/include/libcamera/base/thread.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * thread.h - Thread support
+ * Thread support
*/
#pragma once
@@ -13,8 +13,10 @@
#include <libcamera/base/private.h>
+#include <libcamera/base/class.h>
#include <libcamera/base/message.h>
#include <libcamera/base/signal.h>
+#include <libcamera/base/span.h>
#include <libcamera/base/utils.h>
namespace libcamera {
@@ -35,6 +37,8 @@ public:
void exit(int code = 0);
bool wait(utils::duration duration = utils::duration::max());
+ int setThreadAffinity(const Span<const unsigned int> &cpus);
+
bool isRunning();
Signal<> finished;
@@ -44,16 +48,21 @@ public:
EventDispatcher *eventDispatcher();
- void dispatchMessages(Message::Type type = Message::Type::None);
+ void dispatchMessages(Message::Type type = Message::Type::None,
+ Object *receiver = nullptr);
protected:
int exec();
virtual void run();
private:
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(Thread)
+
void startThread();
void finishThread();
+ void setThreadAffinityInternal();
+
void postMessage(std::unique_ptr<Message> msg, Object *receiver);
void removeMessages(Object *receiver);
diff --git a/include/libcamera/base/thread_annotations.h b/include/libcamera/base/thread_annotations.h
index 25b3c7b6..81930f08 100644
--- a/include/libcamera/base/thread_annotations.h
+++ b/include/libcamera/base/thread_annotations.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * thread_annotation.h - Macro of Clang thread safety analysis
+ * Macro of Clang thread safety analysis
*/
#pragma once
diff --git a/include/libcamera/base/timer.h b/include/libcamera/base/timer.h
index 759b68ad..9646a0fe 100644
--- a/include/libcamera/base/timer.h
+++ b/include/libcamera/base/timer.h
@@ -2,13 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * timer.h - Generic timer
+ * Generic timer
*/
#pragma once
#include <chrono>
-#include <stdint.h>
#include <libcamera/base/private.h>
diff --git a/include/libcamera/base/unique_fd.h b/include/libcamera/base/unique_fd.h
index ae4d96b7..3066bf28 100644
--- a/include/libcamera/base/unique_fd.h
+++ b/include/libcamera/base/unique_fd.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * unique_fd.h - File descriptor wrapper that owns a file descriptor.
+ * File descriptor wrapper that owns a file descriptor.
*/
#pragma once
@@ -10,7 +10,6 @@
#include <utility>
#include <libcamera/base/class.h>
-#include <libcamera/base/compiler.h>
namespace libcamera {
@@ -43,7 +42,7 @@ public:
return *this;
}
- __nodiscard int release()
+ [[nodiscard]] int release()
{
int fd = fd_;
fd_ = -1;
diff --git a/include/libcamera/base/utils.h b/include/libcamera/base/utils.h
index 922e4dfa..8d5c3578 100644
--- a/include/libcamera/base/utils.h
+++ b/include/libcamera/base/utils.h
@@ -2,19 +2,20 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * utils.h - Miscellaneous utility functions
+ * Miscellaneous utility functions
*/
#pragma once
#include <algorithm>
#include <chrono>
+#include <functional>
#include <iterator>
-#include <memory>
#include <ostream>
#include <sstream>
-#include <string>
+#include <stdint.h>
#include <string.h>
+#include <string>
#include <sys/time.h>
#include <type_traits>
#include <utility>
@@ -91,6 +92,30 @@ _hex hex(T value, unsigned int width = 0);
#ifndef __DOXYGEN__
template<>
+inline _hex hex<int8_t>(int8_t value, unsigned int width)
+{
+ return { static_cast<uint64_t>(value), width ? width : 2 };
+}
+
+template<>
+inline _hex hex<uint8_t>(uint8_t value, unsigned int width)
+{
+ return { static_cast<uint64_t>(value), width ? width : 2 };
+}
+
+template<>
+inline _hex hex<int16_t>(int16_t value, unsigned int width)
+{
+ return { static_cast<uint64_t>(value), width ? width : 4 };
+}
+
+template<>
+inline _hex hex<uint16_t>(uint16_t value, unsigned int width)
+{
+ return { static_cast<uint64_t>(value), width ? width : 4 };
+}
+
+template<>
inline _hex hex<int32_t>(int32_t value, unsigned int width)
{
return { static_cast<uint64_t>(value), width ? width : 8 };
@@ -180,7 +205,16 @@ public:
iterator &operator++();
std::string operator*() const;
- bool operator!=(const iterator &other) const;
+
+ bool operator==(const iterator &other) const
+ {
+ return pos_ == other.pos_;
+ }
+
+ bool operator!=(const iterator &other) const
+ {
+ return !(*this == other);
+ }
private:
const StringSplitter *ss_;
@@ -188,8 +222,15 @@ public:
std::string::size_type next_;
};
- iterator begin() const;
- iterator end() const;
+ iterator begin() const
+ {
+ return { this, 0 };
+ }
+
+ iterator end() const
+ {
+ return { this, std::string::npos };
+ }
private:
std::string str_;
@@ -375,6 +416,18 @@ constexpr std::underlying_type_t<Enum> to_underlying(Enum e) noexcept
return static_cast<std::underlying_type_t<Enum>>(e);
}
+class ScopeExitActions
+{
+public:
+ ~ScopeExitActions();
+
+ void operator+=(std::function<void()> &&action);
+ void release();
+
+private:
+ std::vector<std::function<void()>> actions_;
+};
+
} /* namespace utils */
#ifndef __DOXYGEN__
diff --git a/include/libcamera/camera.h b/include/libcamera/camera.h
index ae35792d..94cee7bd 100644
--- a/include/libcamera/camera.h
+++ b/include/libcamera/camera.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * camera.h - Camera object interface
+ * Camera object interface
*/
#pragma once
diff --git a/include/libcamera/camera_manager.h b/include/libcamera/camera_manager.h
index 1a891cac..b50df782 100644
--- a/include/libcamera/camera_manager.h
+++ b/include/libcamera/camera_manager.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * camera_manager.h - Camera management
+ * Camera management
*/
#pragma once
diff --git a/include/libcamera/color_space.h b/include/libcamera/color_space.h
index 6d6c2829..7b483cd1 100644
--- a/include/libcamera/color_space.h
+++ b/include/libcamera/color_space.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Raspberry Pi Ltd
*
- * color_space.h - color space definitions
+ * color space definitions
*/
#pragma once
diff --git a/include/libcamera/control_ids.h.in b/include/libcamera/control_ids.h.in
index d53b1cf7..5d0594c6 100644
--- a/include/libcamera/control_ids.h.in
+++ b/include/libcamera/control_ids.h.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_ids.h - Control ID list
+ * {{mode|capitalize}} ID list
*
* This file is auto-generated. Do not edit.
*/
@@ -10,24 +10,52 @@
#pragma once
#include <array>
+#include <map>
#include <stdint.h>
+#include <string>
#include <libcamera/controls.h>
namespace libcamera {
-namespace controls {
+namespace {{mode}} {
-enum {
-${ids}
-};
+extern const ControlIdMap {{mode}};
-${controls}
+{%- for vendor, ctrls in controls -%}
-extern const ControlIdMap controls;
+{% if vendor != 'libcamera' %}
+namespace {{vendor}} {
-${vendor_controls}
+#define LIBCAMERA_HAS_{{vendor|upper}}_VENDOR_{{mode|upper}}
+{%- endif %}
-} /* namespace controls */
+{% if ctrls %}
+enum {
+{%- for ctrl in ctrls %}
+ {{ctrl.name|snake_case|upper}} = {{ctrl.id}},
+{%- endfor %}
+};
+{% endif %}
+
+{% for ctrl in ctrls -%}
+{% if ctrl.is_enum -%}
+enum {{ctrl.name}}Enum {
+{%- for enum in ctrl.enum_values %}
+ {{enum.name}} = {{enum.value}},
+{%- endfor %}
+};
+extern const std::array<const ControlValue, {{ctrl.enum_values_count}}> {{ctrl.name}}Values;
+extern const std::map<std::string, {{ctrl.type}}> {{ctrl.name}}NameValueMap;
+{% endif -%}
+extern const Control<{{ctrl.type}}> {{ctrl.name}};
+{% endfor -%}
+
+{% if vendor != 'libcamera' %}
+} /* namespace {{vendor}} */
+{% endif -%}
+
+{% endfor %}
+} /* namespace {{mode}} */
} /* namespace libcamera */
diff --git a/include/libcamera/controls.h b/include/libcamera/controls.h
index 82b95599..7c7828ae 100644
--- a/include/libcamera/controls.h
+++ b/include/libcamera/controls.h
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * controls.h - Control handling
+ * Control handling
*/
#pragma once
#include <assert.h>
+#include <map>
#include <optional>
#include <set>
#include <stdint.h>
@@ -16,6 +17,7 @@
#include <vector>
#include <libcamera/base/class.h>
+#include <libcamera/base/flags.h>
#include <libcamera/base/span.h>
#include <libcamera/geometry.h>
@@ -28,67 +30,102 @@ enum ControlType {
ControlTypeNone,
ControlTypeBool,
ControlTypeByte,
+ ControlTypeUnsigned16,
+ ControlTypeUnsigned32,
ControlTypeInteger32,
ControlTypeInteger64,
ControlTypeFloat,
ControlTypeString,
ControlTypeRectangle,
ControlTypeSize,
+ ControlTypePoint,
};
namespace details {
-template<typename T>
+template<typename T, typename = std::void_t<>>
struct control_type {
};
template<>
struct control_type<void> {
static constexpr ControlType value = ControlTypeNone;
+ static constexpr std::size_t size = 0;
};
template<>
struct control_type<bool> {
static constexpr ControlType value = ControlTypeBool;
+ static constexpr std::size_t size = 0;
};
template<>
struct control_type<uint8_t> {
static constexpr ControlType value = ControlTypeByte;
+ static constexpr std::size_t size = 0;
+};
+
+template<>
+struct control_type<uint16_t> {
+ static constexpr ControlType value = ControlTypeUnsigned16;
+ static constexpr std::size_t size = 0;
+};
+
+template<>
+struct control_type<uint32_t> {
+ static constexpr ControlType value = ControlTypeUnsigned32;
+ static constexpr std::size_t size = 0;
};
template<>
struct control_type<int32_t> {
static constexpr ControlType value = ControlTypeInteger32;
+ static constexpr std::size_t size = 0;
};
template<>
struct control_type<int64_t> {
static constexpr ControlType value = ControlTypeInteger64;
+ static constexpr std::size_t size = 0;
};
template<>
struct control_type<float> {
static constexpr ControlType value = ControlTypeFloat;
+ static constexpr std::size_t size = 0;
};
template<>
struct control_type<std::string> {
static constexpr ControlType value = ControlTypeString;
+ static constexpr std::size_t size = 0;
};
template<>
struct control_type<Rectangle> {
static constexpr ControlType value = ControlTypeRectangle;
+ static constexpr std::size_t size = 0;
};
template<>
struct control_type<Size> {
static constexpr ControlType value = ControlTypeSize;
+ static constexpr std::size_t size = 0;
+};
+
+template<>
+struct control_type<Point> {
+ static constexpr ControlType value = ControlTypePoint;
+ static constexpr std::size_t size = 0;
};
template<typename T, std::size_t N>
struct control_type<Span<T, N>> : public control_type<std::remove_cv_t<T>> {
+ static constexpr std::size_t size = N;
+};
+
+template<typename T>
+struct control_type<T, std::enable_if_t<std::is_enum_v<T>>> : public control_type<int32_t> {
};
} /* namespace details */
@@ -213,23 +250,44 @@ private:
class ControlId
{
public:
- ControlId(unsigned int id, const std::string &name, ControlType type)
- : id_(id), name_(name), type_(type)
- {
- }
+ enum class Direction {
+ In = (1 << 0),
+ Out = (1 << 1),
+ };
+
+ using DirectionFlags = Flags<Direction>;
+
+ ControlId(unsigned int id, const std::string &name, const std::string &vendor,
+ ControlType type, DirectionFlags direction,
+ std::size_t size = 0,
+ const std::map<std::string, int32_t> &enumStrMap = {});
unsigned int id() const { return id_; }
const std::string &name() const { return name_; }
+ const std::string &vendor() const { return vendor_; }
ControlType type() const { return type_; }
+ DirectionFlags direction() const { return direction_; }
+ bool isInput() const { return !!(direction_ & Direction::In); }
+ bool isOutput() const { return !!(direction_ & Direction::Out); }
+ bool isArray() const { return size_ > 0; }
+ std::size_t size() const { return size_; }
+ const std::map<int32_t, std::string> &enumerators() const { return reverseMap_; }
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(ControlId)
unsigned int id_;
std::string name_;
+ std::string vendor_;
ControlType type_;
+ DirectionFlags direction_;
+ std::size_t size_;
+ std::map<std::string, int32_t> enumStrMap_;
+ std::map<int32_t, std::string> reverseMap_;
};
+LIBCAMERA_FLAGS_ENABLE_OPERATORS(ControlId::Direction)
+
static inline bool operator==(unsigned int lhs, const ControlId &rhs)
{
return lhs == rhs.id();
@@ -256,8 +314,11 @@ class Control : public ControlId
public:
using type = T;
- Control(unsigned int id, const char *name)
- : ControlId(id, name, details::control_type<std::remove_cv_t<T>>::value)
+ Control(unsigned int id, const char *name, const char *vendor,
+ ControlId::DirectionFlags direction,
+ const std::map<std::string, int32_t> &enumStrMap = {})
+ : ControlId(id, name, vendor, details::control_type<std::remove_cv_t<T>>::value,
+ direction, details::control_type<std::remove_cv_t<T>>::size, enumStrMap)
{
}
diff --git a/include/libcamera/fence.h b/include/libcamera/fence.h
index c0c916c2..598336cb 100644
--- a/include/libcamera/fence.h
+++ b/include/libcamera/fence.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * internal/fence.h - Synchronization fence
+ * Synchronization fence
*/
#pragma once
diff --git a/include/libcamera/formats.h.in b/include/libcamera/formats.h.in
index ead5287d..6ae7634f 100644
--- a/include/libcamera/formats.h.in
+++ b/include/libcamera/formats.h.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * formats.h - Formats
+ * Formats
*
* This file is auto-generated. Do not edit.
*/
diff --git a/include/libcamera/framebuffer.h b/include/libcamera/framebuffer.h
index 61244829..ff839243 100644
--- a/include/libcamera/framebuffer.h
+++ b/include/libcamera/framebuffer.h
@@ -2,12 +2,11 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * framebuffer.h - Frame buffer handling
+ * Frame buffer handling
*/
#pragma once
-#include <assert.h>
#include <limits>
#include <memory>
#include <stdint.h>
diff --git a/include/libcamera/framebuffer_allocator.h b/include/libcamera/framebuffer_allocator.h
index 45ff232b..f3896bf2 100644
--- a/include/libcamera/framebuffer_allocator.h
+++ b/include/libcamera/framebuffer_allocator.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * framebuffer_allocator.h - FrameBuffer allocator
+ * FrameBuffer allocator
*/
#pragma once
diff --git a/include/libcamera/geometry.h b/include/libcamera/geometry.h
index d7fdbe70..f322e3d5 100644
--- a/include/libcamera/geometry.h
+++ b/include/libcamera/geometry.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * geometry.h - Geometry-related classes
+ * Geometry-related classes
*/
#pragma once
@@ -11,8 +11,6 @@
#include <ostream>
#include <string>
-#include <libcamera/base/compiler.h>
-
namespace libcamera {
class Rectangle;
@@ -110,8 +108,8 @@ public:
return *this;
}
- __nodiscard constexpr Size alignedDownTo(unsigned int hAlignment,
- unsigned int vAlignment) const
+ [[nodiscard]] constexpr Size alignedDownTo(unsigned int hAlignment,
+ unsigned int vAlignment) const
{
return {
width / hAlignment * hAlignment,
@@ -119,8 +117,8 @@ public:
};
}
- __nodiscard constexpr Size alignedUpTo(unsigned int hAlignment,
- unsigned int vAlignment) const
+ [[nodiscard]] constexpr Size alignedUpTo(unsigned int hAlignment,
+ unsigned int vAlignment) const
{
return {
(width + hAlignment - 1) / hAlignment * hAlignment,
@@ -128,7 +126,7 @@ public:
};
}
- __nodiscard constexpr Size boundedTo(const Size &bound) const
+ [[nodiscard]] constexpr Size boundedTo(const Size &bound) const
{
return {
std::min(width, bound.width),
@@ -136,7 +134,7 @@ public:
};
}
- __nodiscard constexpr Size expandedTo(const Size &expand) const
+ [[nodiscard]] constexpr Size expandedTo(const Size &expand) const
{
return {
std::max(width, expand.width),
@@ -144,7 +142,7 @@ public:
};
}
- __nodiscard constexpr Size grownBy(const Size &margins) const
+ [[nodiscard]] constexpr Size grownBy(const Size &margins) const
{
return {
width + margins.width,
@@ -152,7 +150,7 @@ public:
};
}
- __nodiscard constexpr Size shrunkBy(const Size &margins) const
+ [[nodiscard]] constexpr Size shrunkBy(const Size &margins) const
{
return {
width > margins.width ? width - margins.width : 0,
@@ -160,10 +158,10 @@ public:
};
}
- __nodiscard Size boundedToAspectRatio(const Size &ratio) const;
- __nodiscard Size expandedToAspectRatio(const Size &ratio) const;
+ [[nodiscard]] Size boundedToAspectRatio(const Size &ratio) const;
+ [[nodiscard]] Size expandedToAspectRatio(const Size &ratio) const;
- __nodiscard Rectangle centeredTo(const Point &center) const;
+ [[nodiscard]] Rectangle centeredTo(const Point &center) const;
Size operator*(float factor) const;
Size operator/(float factor) const;
@@ -262,6 +260,15 @@ public:
{
}
+ constexpr Rectangle(const Point &point1, const Point &point2)
+ : Rectangle(std::min(point1.x, point2.x), std::min(point1.y, point2.y),
+ static_cast<unsigned int>(std::max(point1.x, point2.x)) -
+ static_cast<unsigned int>(std::min(point1.x, point2.x)),
+ static_cast<unsigned int>(std::max(point1.y, point2.y)) -
+ static_cast<unsigned int>(std::min(point1.y, point2.y)))
+ {
+ }
+
int x;
int y;
unsigned int width;
@@ -285,11 +292,14 @@ public:
Rectangle &scaleBy(const Size &numerator, const Size &denominator);
Rectangle &translateBy(const Point &point);
- __nodiscard Rectangle boundedTo(const Rectangle &bound) const;
- __nodiscard Rectangle enclosedIn(const Rectangle &boundary) const;
- __nodiscard Rectangle scaledBy(const Size &numerator,
- const Size &denominator) const;
- __nodiscard Rectangle translatedBy(const Point &point) const;
+ [[nodiscard]] Rectangle boundedTo(const Rectangle &bound) const;
+ [[nodiscard]] Rectangle enclosedIn(const Rectangle &boundary) const;
+ [[nodiscard]] Rectangle scaledBy(const Size &numerator,
+ const Size &denominator) const;
+ [[nodiscard]] Rectangle translatedBy(const Point &point) const;
+
+ Rectangle transformedBetween(const Rectangle &source,
+ const Rectangle &target) const;
};
bool operator==(const Rectangle &lhs, const Rectangle &rhs);
diff --git a/include/libcamera/internal/bayer_format.h b/include/libcamera/internal/bayer_format.h
index 78ba3969..5c14bb5f 100644
--- a/include/libcamera/internal/bayer_format.h
+++ b/include/libcamera/internal/bayer_format.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * bayer_format.h - Bayer Pixel Format
+ * Bayer Pixel Format
*/
#pragma once
@@ -34,6 +34,8 @@ public:
None = 0,
CSI2 = 1,
IPU3 = 2,
+ PISP1 = 3,
+ PISP2 = 4,
};
constexpr BayerFormat()
diff --git a/include/libcamera/internal/byte_stream_buffer.h b/include/libcamera/internal/byte_stream_buffer.h
index 0f4fce6f..5b1c10ab 100644
--- a/include/libcamera/internal/byte_stream_buffer.h
+++ b/include/libcamera/internal/byte_stream_buffer.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * byte_stream_buffer.h - Byte stream buffer
+ * Byte stream buffer
*/
#pragma once
diff --git a/include/libcamera/internal/camera.h b/include/libcamera/internal/camera.h
index 38dd94ff..18f5c32a 100644
--- a/include/libcamera/internal/camera.h
+++ b/include/libcamera/internal/camera.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera.h - Camera private data
+ * Camera private data
*/
#pragma once
@@ -11,6 +11,7 @@
#include <list>
#include <memory>
#include <set>
+#include <stdint.h>
#include <string>
#include <libcamera/base/class.h>
@@ -32,6 +33,7 @@ public:
~Private();
PipelineHandler *pipe() { return pipe_.get(); }
+ const PipelineHandler *pipe() const { return pipe_.get(); }
std::list<Request *> queuedRequests_;
ControlInfoMap controlInfo_;
diff --git a/include/libcamera/internal/camera_controls.h b/include/libcamera/internal/camera_controls.h
index ee6d382f..4a5a3ebc 100644
--- a/include/libcamera/internal/camera_controls.h
+++ b/include/libcamera/internal/camera_controls.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_controls.h - Camera controls
+ * Camera controls
*/
#pragma once
diff --git a/include/libcamera/internal/camera_lens.h b/include/libcamera/internal/camera_lens.h
index 277417da..f347c5e0 100644
--- a/include/libcamera/internal/camera_lens.h
+++ b/include/libcamera/internal/camera_lens.h
@@ -2,11 +2,12 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_lens.h - A camera lens controller
+ * A camera lens controller
*/
#pragma once
#include <memory>
+#include <stdint.h>
#include <string>
#include <libcamera/base/class.h>
diff --git a/include/libcamera/internal/camera_manager.h b/include/libcamera/internal/camera_manager.h
index 33ebe069..0150ca61 100644
--- a/include/libcamera/internal/camera_manager.h
+++ b/include/libcamera/internal/camera_manager.h
@@ -2,14 +2,13 @@
/*
* Copyright (C) 2023, Ideas on Board Oy.
*
- * camera_manager.h - Camera manager private data
+ * Camera manager private data
*/
#pragma once
#include <libcamera/camera_manager.h>
-#include <map>
#include <memory>
#include <sys/types.h>
#include <vector>
@@ -19,13 +18,14 @@
#include <libcamera/base/thread.h>
#include <libcamera/base/thread_annotations.h>
-#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/process.h"
namespace libcamera {
class Camera;
class DeviceEnumerator;
+class IPAManager;
+class PipelineHandlerFactoryBase;
class CameraManager::Private : public Extensible::Private, public Thread
{
@@ -38,12 +38,15 @@ public:
void addCamera(std::shared_ptr<Camera> camera) LIBCAMERA_TSA_EXCLUDES(mutex_);
void removeCamera(std::shared_ptr<Camera> camera) LIBCAMERA_TSA_EXCLUDES(mutex_);
+ IPAManager *ipaManager() const { return ipaManager_.get(); }
+
protected:
void run() override;
private:
int init();
void createPipelineHandlers();
+ void pipelineFactoryMatch(const PipelineHandlerFactoryBase *factory);
void cleanup() LIBCAMERA_TSA_EXCLUDES(mutex_);
/*
@@ -61,7 +64,7 @@ private:
std::unique_ptr<DeviceEnumerator> enumerator_;
- IPAManager ipaManager_;
+ std::unique_ptr<IPAManager> ipaManager_;
ProcessManager processManager_;
};
diff --git a/include/libcamera/internal/camera_sensor.h b/include/libcamera/internal/camera_sensor.h
index d05f48eb..13048f32 100644
--- a/include/libcamera/internal/camera_sensor.h
+++ b/include/libcamera/internal/camera_sensor.h
@@ -2,17 +2,18 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_sensor.h - A camera sensor
+ * A camera sensor
*/
#pragma once
#include <memory>
+#include <stdint.h>
#include <string>
+#include <variant>
#include <vector>
#include <libcamera/base/class.h>
-#include <libcamera/base/log.h>
#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
@@ -20,10 +21,8 @@
#include <libcamera/orientation.h>
#include <libcamera/transform.h>
-#include <libcamera/ipa/core_ipa_interface.h>
-
#include "libcamera/internal/bayer_format.h"
-#include "libcamera/internal/formats.h"
+#include "libcamera/internal/camera_sensor_properties.h"
#include "libcamera/internal/v4l2_subdevice.h"
namespace libcamera {
@@ -32,95 +31,101 @@ class CameraLens;
class MediaEntity;
class SensorConfiguration;
-struct CameraSensorProperties;
-
enum class Orientation;
-class CameraSensor : protected Loggable
+struct IPACameraSensorInfo;
+
+class CameraSensor
{
public:
- explicit CameraSensor(const MediaEntity *entity);
- ~CameraSensor();
+ virtual ~CameraSensor();
+
+ virtual const std::string &model() const = 0;
+ virtual const std::string &id() const = 0;
+
+ virtual const MediaEntity *entity() const = 0;
+ virtual V4L2Subdevice *device() = 0;
+
+ virtual CameraLens *focusLens() = 0;
- int init();
+ virtual const std::vector<unsigned int> &mbusCodes() const = 0;
+ virtual std::vector<Size> sizes(unsigned int mbusCode) const = 0;
+ virtual Size resolution() const = 0;
- const std::string &model() const { return model_; }
- const std::string &id() const { return id_; }
+ virtual V4L2SubdeviceFormat
+ getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size, const Size maxSize = Size()) const = 0;
+ virtual int setFormat(V4L2SubdeviceFormat *format,
+ Transform transform = Transform::Identity) = 0;
+ virtual int tryFormat(V4L2SubdeviceFormat *format) const = 0;
- const MediaEntity *entity() const { return entity_; }
- V4L2Subdevice *device() { return subdev_.get(); }
+ virtual int applyConfiguration(const SensorConfiguration &config,
+ Transform transform = Transform::Identity,
+ V4L2SubdeviceFormat *sensorFormat = nullptr) = 0;
- CameraLens *focusLens() { return focusLens_.get(); }
+ virtual V4L2Subdevice::Stream imageStream() const;
+ virtual std::optional<V4L2Subdevice::Stream> embeddedDataStream() const;
+ virtual V4L2SubdeviceFormat embeddedDataFormat() const;
+ virtual int setEmbeddedDataEnabled(bool enable);
- const std::vector<unsigned int> &mbusCodes() const { return mbusCodes_; }
- std::vector<Size> sizes(unsigned int mbusCode) const;
- Size resolution() const;
+ virtual const ControlList &properties() const = 0;
+ virtual int sensorInfo(IPACameraSensorInfo *info) const = 0;
+ virtual Transform computeTransform(Orientation *orientation) const = 0;
+ virtual BayerFormat::Order bayerOrder(Transform t) const = 0;
- V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes,
- const Size &size) const;
- int setFormat(V4L2SubdeviceFormat *format,
- Transform transform = Transform::Identity);
- int tryFormat(V4L2SubdeviceFormat *format) const;
+ virtual const ControlInfoMap &controls() const = 0;
+ virtual ControlList getControls(const std::vector<uint32_t> &ids) = 0;
+ virtual int setControls(ControlList *ctrls) = 0;
- int applyConfiguration(const SensorConfiguration &config,
- Transform transform = Transform::Identity,
- V4L2SubdeviceFormat *sensorFormat = nullptr);
+ virtual const std::vector<controls::draft::TestPatternModeEnum> &
+ testPatternModes() const = 0;
+ virtual int setTestPatternMode(controls::draft::TestPatternModeEnum mode) = 0;
+ virtual const CameraSensorProperties::SensorDelays &sensorDelays() = 0;
+};
+
+class CameraSensorFactoryBase
+{
+public:
+ CameraSensorFactoryBase(const char *name, int priority);
+ virtual ~CameraSensorFactoryBase() = default;
+
+ static std::unique_ptr<CameraSensor> create(MediaEntity *entity);
- const ControlList &properties() const { return properties_; }
- int sensorInfo(IPACameraSensorInfo *info) const;
- Transform computeTransform(Orientation *orientation) const;
- BayerFormat::Order bayerOrder(Transform t) const;
+ const std::string &name() const { return name_; }
+ int priority() const { return priority_; }
- const ControlInfoMap &controls() const;
- ControlList getControls(const std::vector<uint32_t> &ids);
- int setControls(ControlList *ctrls);
+private:
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorFactoryBase)
- const std::vector<controls::draft::TestPatternModeEnum> &testPatternModes() const
+ static std::vector<CameraSensorFactoryBase *> &factories();
+
+ static void registerFactory(CameraSensorFactoryBase *factory);
+
+ virtual std::variant<std::unique_ptr<CameraSensor>, int>
+ match(MediaEntity *entity) const = 0;
+
+ std::string name_;
+ int priority_;
+};
+
+template<typename _CameraSensor>
+class CameraSensorFactory final : public CameraSensorFactoryBase
+{
+public:
+ CameraSensorFactory(const char *name, int priority)
+ : CameraSensorFactoryBase(name, priority)
{
- return testPatternModes_;
}
- int setTestPatternMode(controls::draft::TestPatternModeEnum mode);
-
-protected:
- std::string logPrefix() const override;
private:
- LIBCAMERA_DISABLE_COPY(CameraSensor)
-
- int generateId();
- int validateSensorDriver();
- void initVimcDefaultProperties();
- void initStaticProperties();
- void initTestPatternModes();
- int initProperties();
- int discoverAncillaryDevices();
- int applyTestPatternMode(controls::draft::TestPatternModeEnum mode);
-
- const MediaEntity *entity_;
- std::unique_ptr<V4L2Subdevice> subdev_;
- unsigned int pad_;
-
- const CameraSensorProperties *staticProps_;
-
- std::string model_;
- std::string id_;
-
- V4L2Subdevice::Formats formats_;
- std::vector<unsigned int> mbusCodes_;
- std::vector<Size> sizes_;
- std::vector<controls::draft::TestPatternModeEnum> testPatternModes_;
- controls::draft::TestPatternModeEnum testPatternMode_;
-
- Size pixelArraySize_;
- Rectangle activeArea_;
- const BayerFormat *bayerFormat_;
- bool supportFlips_;
- bool flipsAlterBayerOrder_;
- Orientation mountingOrientation_;
-
- ControlList properties_;
-
- std::unique_ptr<CameraLens> focusLens_;
+ std::variant<std::unique_ptr<CameraSensor>, int>
+ match(MediaEntity *entity) const override
+ {
+ return _CameraSensor::match(entity);
+ }
};
+#define REGISTER_CAMERA_SENSOR(sensor, priority) \
+static CameraSensorFactory<sensor> global_##sensor##Factory{ #sensor, priority };
+
} /* namespace libcamera */
diff --git a/include/libcamera/internal/camera_sensor_properties.h b/include/libcamera/internal/camera_sensor_properties.h
index 1ee3cb99..d7d4dab6 100644
--- a/include/libcamera/internal/camera_sensor_properties.h
+++ b/include/libcamera/internal/camera_sensor_properties.h
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_sensor_properties.h - Database of camera sensor properties
+ * Database of camera sensor properties
*/
#pragma once
#include <map>
+#include <stdint.h>
#include <string>
#include <libcamera/control_ids.h>
@@ -16,10 +17,18 @@
namespace libcamera {
struct CameraSensorProperties {
+ struct SensorDelays {
+ uint8_t exposureDelay;
+ uint8_t gainDelay;
+ uint8_t vblankDelay;
+ uint8_t hblankDelay;
+ };
+
static const CameraSensorProperties *get(const std::string &sensor);
Size unitCellSize;
std::map<controls::draft::TestPatternModeEnum, int32_t> testPatternModes;
+ SensorDelays sensorDelays;
};
} /* namespace libcamera */
diff --git a/include/libcamera/internal/control_serializer.h b/include/libcamera/internal/control_serializer.h
index a38ca6b0..8a63ae44 100644
--- a/include/libcamera/internal/control_serializer.h
+++ b/include/libcamera/internal/control_serializer.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_serializer.h - Control (de)serializer
+ * Control (de)serializer
*/
#pragma once
diff --git a/include/libcamera/internal/control_validator.h b/include/libcamera/internal/control_validator.h
index 26412d8b..260602f2 100644
--- a/include/libcamera/internal/control_validator.h
+++ b/include/libcamera/internal/control_validator.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_validator.h - Control validator
+ * Control validator
*/
#pragma once
diff --git a/include/libcamera/internal/converter.h b/include/libcamera/internal/converter.h
index 834ec5ab..644ec429 100644
--- a/include/libcamera/internal/converter.h
+++ b/include/libcamera/internal/converter.h
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Laurent Pinchart
* Copyright 2022 NXP
*
- * converter.h - Generic format converter interface
+ * Generic format converter interface
*/
#pragma once
@@ -14,9 +14,11 @@
#include <memory>
#include <string>
#include <tuple>
+#include <utility>
#include <vector>
#include <libcamera/base/class.h>
+#include <libcamera/base/flags.h>
#include <libcamera/base/signal.h>
#include <libcamera/geometry.h>
@@ -26,12 +28,25 @@ namespace libcamera {
class FrameBuffer;
class MediaDevice;
class PixelFormat;
+class Stream;
struct StreamConfiguration;
class Converter
{
public:
- Converter(MediaDevice *media);
+ enum class Feature {
+ None = 0,
+ InputCrop = (1 << 0),
+ };
+
+ using Features = Flags<Feature>;
+
+ enum class Alignment {
+ Down = 0,
+ Up,
+ };
+
+ Converter(MediaDevice *media, Features features = Feature::None);
virtual ~Converter();
virtual int loadConfiguration(const std::string &filename) = 0;
@@ -41,25 +56,45 @@ public:
virtual std::vector<PixelFormat> formats(PixelFormat input) = 0;
virtual SizeRange sizes(const Size &input) = 0;
+ virtual Size adjustInputSize(const PixelFormat &pixFmt,
+ const Size &size,
+ Alignment align = Alignment::Down) = 0;
+ virtual Size adjustOutputSize(const PixelFormat &pixFmt,
+ const Size &size,
+ Alignment align = Alignment::Down) = 0;
+
virtual std::tuple<unsigned int, unsigned int>
strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size) = 0;
+ virtual int validateOutput(StreamConfiguration *cfg, bool *adjusted,
+ Alignment align = Alignment::Down) = 0;
+
virtual int configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs) = 0;
- virtual int exportBuffers(unsigned int output, unsigned int count,
+ virtual bool isConfigured(const Stream *stream) const = 0;
+ virtual int exportBuffers(const Stream *stream, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) = 0;
virtual int start() = 0;
virtual void stop() = 0;
virtual int queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs) = 0;
+ const std::map<const Stream *, FrameBuffer *> &outputs) = 0;
+
+ virtual int setInputCrop(const Stream *stream, Rectangle *rect) = 0;
+ virtual std::pair<Rectangle, Rectangle> inputCropBounds() = 0;
+ virtual std::pair<Rectangle, Rectangle> inputCropBounds(const Stream *stream) = 0;
Signal<FrameBuffer *> inputBufferReady;
Signal<FrameBuffer *> outputBufferReady;
const std::string &deviceNode() const { return deviceNode_; }
+ Features features() const { return features_; }
+
+protected:
+ Features features_;
+
private:
std::string deviceNode_;
};
diff --git a/include/libcamera/internal/converter/converter_v4l2_m2m.h b/include/libcamera/internal/converter/converter_v4l2_m2m.h
index 815916d0..0ad7bf7f 100644
--- a/include/libcamera/internal/converter/converter_v4l2_m2m.h
+++ b/include/libcamera/internal/converter/converter_v4l2_m2m.h
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Laurent Pinchart
* Copyright 2022 NXP
*
- * converter_v4l2_m2m.h - V4l2 M2M Format converter interface
+ * V4l2 M2M Format converter interface
*/
#pragma once
@@ -28,7 +28,9 @@ class FrameBuffer;
class MediaDevice;
class Size;
class SizeRange;
+class Stream;
struct StreamConfiguration;
+class Rectangle;
class V4L2M2MDevice;
class V4L2M2MConverter : public Converter
@@ -36,31 +38,45 @@ class V4L2M2MConverter : public Converter
public:
V4L2M2MConverter(MediaDevice *media);
- int loadConfiguration([[maybe_unused]] const std::string &filename) { return 0; }
- bool isValid() const { return m2m_ != nullptr; }
+ int loadConfiguration([[maybe_unused]] const std::string &filename) override { return 0; }
+ bool isValid() const override { return m2m_ != nullptr; }
- std::vector<PixelFormat> formats(PixelFormat input);
- SizeRange sizes(const Size &input);
+ std::vector<PixelFormat> formats(PixelFormat input) override;
+ SizeRange sizes(const Size &input) override;
std::tuple<unsigned int, unsigned int>
- strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size);
+ strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size) override;
+
+ Size adjustInputSize(const PixelFormat &pixFmt,
+ const Size &size, Alignment align = Alignment::Down) override;
+ Size adjustOutputSize(const PixelFormat &pixFmt,
+ const Size &size, Alignment align = Alignment::Down) override;
int configure(const StreamConfiguration &inputCfg,
- const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfg);
- int exportBuffers(unsigned int ouput, unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers);
+ const std::vector<std::reference_wrapper<StreamConfiguration>>
+ &outputCfg) override;
+ bool isConfigured(const Stream *stream) const override;
+ int exportBuffers(const Stream *stream, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start() override;
+ void stop() override;
- int start();
- void stop();
+ int validateOutput(StreamConfiguration *cfg, bool *adjusted,
+ Alignment align = Alignment::Down) override;
int queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs);
+ const std::map<const Stream *, FrameBuffer *> &outputs) override;
+
+ int setInputCrop(const Stream *stream, Rectangle *rect) override;
+ std::pair<Rectangle, Rectangle> inputCropBounds() override { return inputCropBounds_; }
+ std::pair<Rectangle, Rectangle> inputCropBounds(const Stream *stream) override;
private:
- class Stream : protected Loggable
+ class V4L2M2MStream : protected Loggable
{
public:
- Stream(V4L2M2MConverter *converter, unsigned int index);
+ V4L2M2MStream(V4L2M2MConverter *converter, const Stream *stream);
bool isValid() const { return m2m_ != nullptr; }
@@ -74,6 +90,11 @@ private:
int queueBuffers(FrameBuffer *input, FrameBuffer *output);
+ int setInputSelection(unsigned int target, Rectangle *rect);
+ int getInputSelection(unsigned int target, Rectangle *rect);
+
+ std::pair<Rectangle, Rectangle> inputCropBounds();
+
protected:
std::string logPrefix() const override;
@@ -82,17 +103,23 @@ private:
void outputBufferReady(FrameBuffer *buffer);
V4L2M2MConverter *converter_;
- unsigned int index_;
+ const Stream *stream_;
std::unique_ptr<V4L2M2MDevice> m2m_;
unsigned int inputBufferCount_;
unsigned int outputBufferCount_;
+
+ std::pair<Rectangle, Rectangle> inputCropBounds_;
};
+ Size adjustSizes(const Size &size, const std::vector<SizeRange> &ranges,
+ Alignment align);
+
std::unique_ptr<V4L2M2MDevice> m2m_;
- std::vector<Stream> streams_;
+ std::map<const Stream *, std::unique_ptr<V4L2M2MStream>> streams_;
std::map<FrameBuffer *, unsigned int> queue_;
+ std::pair<Rectangle, Rectangle> inputCropBounds_;
};
} /* namespace libcamera */
diff --git a/include/libcamera/internal/debug_controls.h b/include/libcamera/internal/debug_controls.h
new file mode 100644
index 00000000..0b049f48
--- /dev/null
+++ b/include/libcamera/internal/debug_controls.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Debug metadata helpers
+ */
+
+#pragma once
+
+#include <libcamera/control_ids.h>
+
+namespace libcamera {
+
+class DebugMetadata
+{
+public:
+ DebugMetadata() = default;
+
+ void enableByControl(const ControlList &controls);
+ void enable(bool enable = true);
+ void setParent(DebugMetadata *parent);
+ void moveEntries(ControlList &list);
+
+ template<typename T, typename V>
+ void set(const Control<T> &ctrl, const V &value)
+ {
+ if (parent_) {
+ parent_->set(ctrl, value);
+ return;
+ }
+
+ if (!enabled_)
+ return;
+
+ cache_.set(ctrl, value);
+ }
+
+ void set(unsigned int id, const ControlValue &value);
+
+private:
+ bool enabled_ = false;
+ DebugMetadata *parent_ = nullptr;
+ ControlList cache_;
+};
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/delayed_controls.h b/include/libcamera/internal/delayed_controls.h
index aef37077..e8d3014d 100644
--- a/include/libcamera/internal/delayed_controls.h
+++ b/include/libcamera/internal/delayed_controls.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * delayed_controls.h - Helper to deal with controls that take effect with a delay
+ * Helper to deal with controls that take effect with a delay
*/
#pragma once
diff --git a/include/libcamera/internal/device_enumerator.h b/include/libcamera/internal/device_enumerator.h
index 72ec9a60..db3532a9 100644
--- a/include/libcamera/internal/device_enumerator.h
+++ b/include/libcamera/internal/device_enumerator.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * device_enumerator.h - API to enumerate and find media devices
+ * API to enumerate and find media devices
*/
#pragma once
diff --git a/include/libcamera/internal/device_enumerator_sysfs.h b/include/libcamera/internal/device_enumerator_sysfs.h
index 3e84b83f..4ccc9845 100644
--- a/include/libcamera/internal/device_enumerator_sysfs.h
+++ b/include/libcamera/internal/device_enumerator_sysfs.h
@@ -2,12 +2,11 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * device_enumerator_sysfs.h - sysfs-based device enumerator
+ * sysfs-based device enumerator
*/
#pragma once
-#include <memory>
#include <string>
#include "libcamera/internal/device_enumerator.h"
diff --git a/include/libcamera/internal/device_enumerator_udev.h b/include/libcamera/internal/device_enumerator_udev.h
index 1b3360df..1378c190 100644
--- a/include/libcamera/internal/device_enumerator_udev.h
+++ b/include/libcamera/internal/device_enumerator_udev.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018-2019, Google Inc.
*
- * device_enumerator_udev.h - udev-based device enumerator
+ * udev-based device enumerator
*/
#pragma once
diff --git a/include/libcamera/internal/dma_buf_allocator.h b/include/libcamera/internal/dma_buf_allocator.h
new file mode 100644
index 00000000..13600915
--- /dev/null
+++ b/include/libcamera/internal/dma_buf_allocator.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper class for dma-buf allocations.
+ */
+
+#pragma once
+
+#include <memory>
+#include <stdint.h>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/flags.h>
+#include <libcamera/base/shared_fd.h>
+#include <libcamera/base/unique_fd.h>
+
+namespace libcamera {
+
+class FrameBuffer;
+
+class DmaBufAllocator
+{
+public:
+ enum class DmaBufAllocatorFlag {
+ CmaHeap = 1 << 0,
+ SystemHeap = 1 << 1,
+ UDmaBuf = 1 << 2,
+ };
+
+ using DmaBufAllocatorFlags = Flags<DmaBufAllocatorFlag>;
+
+ DmaBufAllocator(DmaBufAllocatorFlags flags = DmaBufAllocatorFlag::CmaHeap);
+ ~DmaBufAllocator();
+ bool isValid() const { return providerHandle_.isValid(); }
+ UniqueFD alloc(const char *name, std::size_t size);
+
+ int exportBuffers(unsigned int count,
+ const std::vector<unsigned int> &planeSizes,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers);
+
+private:
+ std::unique_ptr<FrameBuffer> createBuffer(
+ std::string name, const std::vector<unsigned int> &planeSizes);
+
+ UniqueFD allocFromHeap(const char *name, std::size_t size);
+ UniqueFD allocFromUDmaBuf(const char *name, std::size_t size);
+ UniqueFD providerHandle_;
+ DmaBufAllocatorFlag type_;
+};
+
+class DmaSyncer final
+{
+public:
+ enum class SyncType {
+ Read = 0,
+ Write,
+ ReadWrite,
+ };
+
+ explicit DmaSyncer(SharedFD fd, SyncType type = SyncType::ReadWrite);
+
+ DmaSyncer(DmaSyncer &&other) = default;
+ DmaSyncer &operator=(DmaSyncer &&other) = default;
+
+ ~DmaSyncer();
+
+private:
+ LIBCAMERA_DISABLE_COPY(DmaSyncer)
+
+ void sync(uint64_t step);
+
+ SharedFD fd_;
+ uint64_t flags_ = 0;
+};
+
+LIBCAMERA_FLAGS_ENABLE_OPERATORS(DmaBufAllocator::DmaBufAllocatorFlag)
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/formats.h b/include/libcamera/internal/formats.h
index 5b16c0a8..6a3e9c16 100644
--- a/include/libcamera/internal/formats.h
+++ b/include/libcamera/internal/formats.h
@@ -2,13 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * formats.h - libcamera image formats
+ * libcamera image formats
*/
#pragma once
#include <array>
-#include <map>
#include <vector>
#include <libcamera/geometry.h>
diff --git a/include/libcamera/internal/framebuffer.h b/include/libcamera/internal/framebuffer.h
index 1f42a4fc..97b49d42 100644
--- a/include/libcamera/internal/framebuffer.h
+++ b/include/libcamera/internal/framebuffer.h
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * framebuffer.h - Internal frame buffer handling
+ * Internal frame buffer handling
*/
#pragma once
#include <memory>
+#include <stdint.h>
#include <utility>
#include <libcamera/base/class.h>
diff --git a/include/libcamera/internal/ipa_data_serializer.h b/include/libcamera/internal/ipa_data_serializer.h
index 085f1fed..b4614f21 100644
--- a/include/libcamera/internal/ipa_data_serializer.h
+++ b/include/libcamera/internal/ipa_data_serializer.h
@@ -2,13 +2,12 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipa_data_serializer.h - Image Processing Algorithm data serializer
+ * Image Processing Algorithm data serializer
*/
#pragma once
-#include <deque>
-#include <iostream>
+#include <stdint.h>
#include <string.h>
#include <tuple>
#include <type_traits>
@@ -20,10 +19,9 @@
#include <libcamera/control_ids.h>
#include <libcamera/framebuffer.h>
#include <libcamera/geometry.h>
+
#include <libcamera/ipa/ipa_interface.h>
-#include "libcamera/internal/byte_stream_buffer.h"
-#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/control_serializer.h"
namespace libcamera {
@@ -165,7 +163,7 @@ public:
std::vector<SharedFD>::const_iterator fdIter = fdsBegin;
for (uint32_t i = 0; i < vecLen; i++) {
uint32_t sizeofData = readPOD<uint32_t>(dataIter, 0, dataEnd);
- uint32_t sizeofFds = readPOD<uint32_t>(dataIter, 4, dataEnd);
+ uint32_t sizeofFds = readPOD<uint32_t>(dataIter, 4, dataEnd);
dataIter += 8;
ret[i] = IPADataSerializer<V>::deserialize(dataIter,
@@ -272,7 +270,7 @@ public:
std::vector<SharedFD>::const_iterator fdIter = fdsBegin;
for (uint32_t i = 0; i < mapLen; i++) {
uint32_t sizeofData = readPOD<uint32_t>(dataIter, 0, dataEnd);
- uint32_t sizeofFds = readPOD<uint32_t>(dataIter, 4, dataEnd);
+ uint32_t sizeofFds = readPOD<uint32_t>(dataIter, 4, dataEnd);
dataIter += 8;
K key = IPADataSerializer<K>::deserialize(dataIter,
@@ -284,7 +282,7 @@ public:
dataIter += sizeofData;
fdIter += sizeofFds;
sizeofData = readPOD<uint32_t>(dataIter, 0, dataEnd);
- sizeofFds = readPOD<uint32_t>(dataIter, 4, dataEnd);
+ sizeofFds = readPOD<uint32_t>(dataIter, 4, dataEnd);
dataIter += 8;
const V value = IPADataSerializer<V>::deserialize(dataIter,
diff --git a/include/libcamera/internal/ipa_manager.h b/include/libcamera/internal/ipa_manager.h
index bf823563..16dede0c 100644
--- a/include/libcamera/internal/ipa_manager.h
+++ b/include/libcamera/internal/ipa_manager.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_manager.h - Image Processing Algorithm module manager
+ * Image Processing Algorithm module manager
*/
#pragma once
@@ -15,6 +15,7 @@
#include <libcamera/ipa/ipa_interface.h>
#include <libcamera/ipa/ipa_module_info.h>
+#include "libcamera/internal/camera_manager.h"
#include "libcamera/internal/ipa_module.h"
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/pub_key.h"
@@ -34,11 +35,13 @@ public:
uint32_t minVersion,
uint32_t maxVersion)
{
- IPAModule *m = self_->module(pipe, minVersion, maxVersion);
+ CameraManager *cm = pipe->cameraManager();
+ IPAManager *self = cm->_d()->ipaManager();
+ IPAModule *m = self->module(pipe, minVersion, maxVersion);
if (!m)
return nullptr;
- std::unique_ptr<T> proxy = std::make_unique<T>(m, !self_->isSignatureValid(m));
+ std::unique_ptr<T> proxy = std::make_unique<T>(m, !self->isSignatureValid(m));
if (!proxy->isValid()) {
LOG(IPAManager, Error) << "Failed to load proxy";
return nullptr;
@@ -55,8 +58,6 @@ public:
#endif
private:
- static IPAManager *self_;
-
void parseDir(const char *libDir, unsigned int maxDepth,
std::vector<std::string> &files);
unsigned int addDir(const char *libDir, unsigned int maxDepth = 0);
diff --git a/include/libcamera/internal/ipa_module.h b/include/libcamera/internal/ipa_module.h
index 8038bdee..7c49d3f3 100644
--- a/include/libcamera/internal/ipa_module.h
+++ b/include/libcamera/internal/ipa_module.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_module.h - Image Processing Algorithm module
+ * Image Processing Algorithm module
*/
#pragma once
diff --git a/include/libcamera/internal/ipa_proxy.h b/include/libcamera/internal/ipa_proxy.h
index 781c8b62..983bcc5f 100644
--- a/include/libcamera/internal/ipa_proxy.h
+++ b/include/libcamera/internal/ipa_proxy.h
@@ -2,14 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_proxy.h - Image Processing Algorithm proxy
+ * Image Processing Algorithm proxy
*/
#pragma once
-#include <memory>
#include <string>
-#include <vector>
#include <libcamera/ipa/ipa_interface.h>
@@ -31,7 +29,8 @@ public:
bool isValid() const { return valid_; }
- std::string configurationFile(const std::string &file) const;
+ std::string configurationFile(const std::string &name,
+ const std::string &fallbackName = std::string()) const;
protected:
std::string resolvePath(const std::string &file) const;
diff --git a/include/libcamera/internal/ipc_pipe.h b/include/libcamera/internal/ipc_pipe.h
index ab5dd67c..418c4622 100644
--- a/include/libcamera/internal/ipc_pipe.h
+++ b/include/libcamera/internal/ipc_pipe.h
@@ -2,11 +2,12 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipc_pipe.h - Image Processing Algorithm IPC module for IPA proxies
+ * Image Processing Algorithm IPC module for IPA proxies
*/
#pragma once
+#include <stdint.h>
#include <vector>
#include <libcamera/base/shared_fd.h>
diff --git a/include/libcamera/internal/ipc_pipe_unixsocket.h b/include/libcamera/internal/ipc_pipe_unixsocket.h
index 004d9539..84512809 100644
--- a/include/libcamera/internal/ipc_pipe_unixsocket.h
+++ b/include/libcamera/internal/ipc_pipe_unixsocket.h
@@ -2,14 +2,14 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipc_pipe_unixsocket.h - Image Processing Algorithm IPC module using unix socket
+ * Image Processing Algorithm IPC module using unix socket
*/
#pragma once
#include <map>
#include <memory>
-#include <vector>
+#include <stdint.h>
#include "libcamera/internal/ipc_pipe.h"
#include "libcamera/internal/ipc_unixsocket.h"
diff --git a/include/libcamera/internal/ipc_unixsocket.h b/include/libcamera/internal/ipc_unixsocket.h
index 3963d182..48bb7a94 100644
--- a/include/libcamera/internal/ipc_unixsocket.h
+++ b/include/libcamera/internal/ipc_unixsocket.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipc_unixsocket.h - IPC mechanism based on Unix sockets
+ * IPC mechanism based on Unix sockets
*/
#pragma once
diff --git a/include/libcamera/internal/mapped_framebuffer.h b/include/libcamera/internal/mapped_framebuffer.h
index fb39adbf..6aaabf50 100644
--- a/include/libcamera/internal/mapped_framebuffer.h
+++ b/include/libcamera/internal/mapped_framebuffer.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * mapped_framebuffer.h - Frame buffer memory mapping support
+ * Frame buffer memory mapping support
*/
#pragma once
diff --git a/include/libcamera/internal/matrix.h b/include/libcamera/internal/matrix.h
new file mode 100644
index 00000000..a055e692
--- /dev/null
+++ b/include/libcamera/internal/matrix.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Matrix and related operations
+ */
+#pragma once
+
+#include <algorithm>
+#include <sstream>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Matrix)
+
+#ifndef __DOXYGEN__
+template<typename T, unsigned int Rows, unsigned int Cols,
+ std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
+#else
+template<typename T, unsigned int Rows, unsigned int Cols>
+#endif /* __DOXYGEN__ */
+class Matrix
+{
+public:
+ Matrix()
+ {
+ data_.fill(static_cast<T>(0));
+ }
+
+ Matrix(const std::array<T, Rows * Cols> &data)
+ {
+ std::copy(data.begin(), data.end(), data_.begin());
+ }
+
+ static Matrix identity()
+ {
+ Matrix ret;
+ for (size_t i = 0; i < std::min(Rows, Cols); i++)
+ ret[i][i] = static_cast<T>(1);
+ return ret;
+ }
+
+ ~Matrix() = default;
+
+ const std::string toString() const
+ {
+ std::stringstream out;
+
+ out << "Matrix { ";
+ for (unsigned int i = 0; i < Rows; i++) {
+ out << "[ ";
+ for (unsigned int j = 0; j < Cols; j++) {
+ out << (*this)[i][j];
+ out << ((j + 1 < Cols) ? ", " : " ");
+ }
+ out << ((i + 1 < Rows) ? "], " : "]");
+ }
+ out << " }";
+
+ return out.str();
+ }
+
+ Span<const T, Rows * Cols> data() const { return data_; }
+
+ Span<const T, Cols> operator[](size_t i) const
+ {
+ return Span<const T, Cols>{ &data_.data()[i * Cols], Cols };
+ }
+
+ Span<T, Cols> operator[](size_t i)
+ {
+ return Span<T, Cols>{ &data_.data()[i * Cols], Cols };
+ }
+
+#ifndef __DOXYGEN__
+ template<typename U, std::enable_if_t<std::is_arithmetic_v<U>>>
+#else
+ template<typename U>
+#endif /* __DOXYGEN__ */
+ Matrix<T, Rows, Cols> &operator*=(U d)
+ {
+ for (unsigned int i = 0; i < Rows * Cols; i++)
+ data_[i] *= d;
+ return *this;
+ }
+
+private:
+ std::array<T, Rows * Cols> data_;
+};
+
+#ifndef __DOXYGEN__
+template<typename T, typename U, unsigned int Rows, unsigned int Cols,
+ std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
+#else
+template<typename T, typename U, unsigned int Rows, unsigned int Cols>
+#endif /* __DOXYGEN__ */
+Matrix<U, Rows, Cols> operator*(T d, const Matrix<U, Rows, Cols> &m)
+{
+ Matrix<U, Rows, Cols> result;
+
+ for (unsigned int i = 0; i < Rows; i++) {
+ for (unsigned int j = 0; j < Cols; j++)
+ result[i][j] = d * m[i][j];
+ }
+
+ return result;
+}
+
+#ifndef __DOXYGEN__
+template<typename T, typename U, unsigned int Rows, unsigned int Cols,
+ std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
+#else
+template<typename T, typename U, unsigned int Rows, unsigned int Cols>
+#endif /* __DOXYGEN__ */
+Matrix<U, Rows, Cols> operator*(const Matrix<U, Rows, Cols> &m, T d)
+{
+ return d * m;
+}
+
+#ifndef __DOXYGEN__
+template<typename T,
+ unsigned int R1, unsigned int C1,
+ unsigned int R2, unsigned int C2,
+ std::enable_if_t<C1 == R2> * = nullptr>
+#else
+template<typename T, unsigned int R1, unsigned int C1, unsigned int R2, unsigned in C2>
+#endif /* __DOXYGEN__ */
+Matrix<T, R1, C2> operator*(const Matrix<T, R1, C1> &m1, const Matrix<T, R2, C2> &m2)
+{
+ Matrix<T, R1, C2> result;
+
+ for (unsigned int i = 0; i < R1; i++) {
+ for (unsigned int j = 0; j < C2; j++) {
+ T sum = 0;
+
+ for (unsigned int k = 0; k < C1; k++)
+ sum += m1[i][k] * m2[k][j];
+
+ result[i][j] = sum;
+ }
+ }
+
+ return result;
+}
+
+template<typename T, unsigned int Rows, unsigned int Cols>
+Matrix<T, Rows, Cols> operator+(const Matrix<T, Rows, Cols> &m1, const Matrix<T, Rows, Cols> &m2)
+{
+ Matrix<T, Rows, Cols> result;
+
+ for (unsigned int i = 0; i < Rows; i++) {
+ for (unsigned int j = 0; j < Cols; j++)
+ result[i][j] = m1[i][j] + m2[i][j];
+ }
+
+ return result;
+}
+
+#ifndef __DOXYGEN__
+bool matrixValidateYaml(const YamlObject &obj, unsigned int size);
+#endif /* __DOXYGEN__ */
+
+#ifndef __DOXYGEN__
+template<typename T, unsigned int Rows, unsigned int Cols>
+std::ostream &operator<<(std::ostream &out, const Matrix<T, Rows, Cols> &m)
+{
+ out << m.toString();
+ return out;
+}
+
+template<typename T, unsigned int Rows, unsigned int Cols>
+struct YamlObject::Getter<Matrix<T, Rows, Cols>> {
+ std::optional<Matrix<T, Rows, Cols>> get(const YamlObject &obj) const
+ {
+ if (!matrixValidateYaml(obj, Rows * Cols))
+ return std::nullopt;
+
+ Matrix<T, Rows, Cols> matrix;
+ T *data = &matrix[0][0];
+
+ unsigned int i = 0;
+ for (const YamlObject &entry : obj.asList()) {
+ const auto value = entry.get<T>();
+ if (!value)
+ return std::nullopt;
+
+ data[i++] = *value;
+ }
+
+ return matrix;
+ }
+};
+#endif /* __DOXYGEN__ */
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/media_device.h b/include/libcamera/internal/media_device.h
index eb8cfde4..e412d3a0 100644
--- a/include/libcamera/internal/media_device.h
+++ b/include/libcamera/internal/media_device.h
@@ -2,13 +2,12 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * media_device.h - Media device handler
+ * Media device handler
*/
#pragma once
#include <map>
-#include <sstream>
#include <string>
#include <vector>
diff --git a/include/libcamera/internal/media_object.h b/include/libcamera/internal/media_object.h
index b1572968..9356d204 100644
--- a/include/libcamera/internal/media_object.h
+++ b/include/libcamera/internal/media_object.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * media_object.h - Media Device objects: entities, pads and links.
+ * Media Device objects: entities, pads and links.
*/
#pragma once
@@ -48,6 +48,8 @@ public:
unsigned int flags() const { return flags_; }
int setEnabled(bool enable);
+ std::string toString() const;
+
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(MediaLink)
@@ -61,6 +63,8 @@ private:
unsigned int flags_;
};
+std::ostream &operator<<(std::ostream &out, const MediaLink &link);
+
class MediaPad : public MediaObject
{
public:
@@ -71,6 +75,8 @@ public:
void addLink(MediaLink *link);
+ std::string toString() const;
+
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(MediaPad)
@@ -85,6 +91,8 @@ private:
std::vector<MediaLink *> links_;
};
+std::ostream &operator<<(std::ostream &out, const MediaPad &pad);
+
class MediaEntity : public MediaObject
{
public:
diff --git a/include/libcamera/internal/meson.build b/include/libcamera/internal/meson.build
index 7f1f3440..45408b31 100644
--- a/include/libcamera/internal/meson.build
+++ b/include/libcamera/internal/meson.build
@@ -2,13 +2,6 @@
subdir('tracepoints')
-libcamera_tracepoint_header = custom_target(
- 'tp_header',
- input : ['tracepoints.h.in', tracepoint_files],
- output : 'tracepoints.h',
- command : [gen_tracepoints_header, include_build_dir, '@OUTPUT@', '@INPUT@'],
-)
-
libcamera_internal_headers = files([
'bayer_format.h',
'byte_stream_buffer.h',
@@ -21,30 +14,47 @@ libcamera_internal_headers = files([
'control_serializer.h',
'control_validator.h',
'converter.h',
+ 'debug_controls.h',
'delayed_controls.h',
'device_enumerator.h',
'device_enumerator_sysfs.h',
'device_enumerator_udev.h',
+ 'dma_buf_allocator.h',
'formats.h',
'framebuffer.h',
+ 'ipa_data_serializer.h',
'ipa_manager.h',
'ipa_module.h',
'ipa_proxy.h',
+ 'ipc_pipe.h',
'ipc_unixsocket.h',
'mapped_framebuffer.h',
+ 'matrix.h',
'media_device.h',
'media_object.h',
'pipeline_handler.h',
'process.h',
'pub_key.h',
'request.h',
+ 'shared_mem_object.h',
'source_paths.h',
'sysfs.h',
'v4l2_device.h',
'v4l2_pixelformat.h',
'v4l2_subdevice.h',
'v4l2_videodevice.h',
+ 'vector.h',
'yaml_parser.h',
])
+tracepoints_h = custom_target(
+ 'tp_header',
+ input : ['tracepoints.h.in', tracepoint_files],
+ output : 'tracepoints.h',
+ command : [gen_tracepoints, include_build_dir, '@OUTPUT@', '@INPUT@'],
+)
+
+libcamera_internal_headers += tracepoints_h
+
subdir('converter')
+subdir('software_isp')
diff --git a/include/libcamera/internal/pipeline_handler.h b/include/libcamera/internal/pipeline_handler.h
index c96944f4..972a2fa6 100644
--- a/include/libcamera/internal/pipeline_handler.h
+++ b/include/libcamera/internal/pipeline_handler.h
@@ -2,26 +2,22 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * pipeline_handler.h - Pipeline handler infrastructure
+ * Pipeline handler infrastructure
*/
#pragma once
#include <memory>
#include <queue>
-#include <set>
#include <string>
#include <sys/types.h>
#include <vector>
-#include <libcamera/base/mutex.h>
#include <libcamera/base/object.h>
#include <libcamera/controls.h>
#include <libcamera/stream.h>
-#include "libcamera/internal/ipa_proxy.h"
-
namespace libcamera {
class Camera;
@@ -45,7 +41,7 @@ public:
MediaDevice *acquireMediaDevice(DeviceEnumerator *enumerator,
const DeviceMatch &dm);
- bool acquire();
+ bool acquire(Camera *camera);
void release(Camera *camera);
virtual std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
@@ -64,12 +60,16 @@ public:
bool completeBuffer(Request *request, FrameBuffer *buffer);
void completeRequest(Request *request);
+ void cancelRequest(Request *request);
std::string configurationFile(const std::string &subdir,
- const std::string &name) const;
+ const std::string &name,
+ bool silent = false) const;
const char *name() const { return name_; }
+ CameraManager *cameraManager() const { return manager_; }
+
protected:
void registerCamera(std::shared_ptr<Camera> camera);
void hotplugMediaDevice(MediaDevice *media);
@@ -77,6 +77,7 @@ protected:
virtual int queueRequestDevice(Camera *camera, Request *request) = 0;
virtual void stopDevice(Camera *camera) = 0;
+ virtual bool acquireDevice(Camera *camera);
virtual void releaseDevice(Camera *camera);
CameraManager *manager_;
@@ -96,9 +97,7 @@ private:
std::queue<Request *> waitingRequests_;
const char *name_;
-
- Mutex lock_;
- unsigned int useCount_ LIBCAMERA_TSA_GUARDED_BY(lock_);
+ unsigned int useCount_;
friend class PipelineHandlerFactoryBase;
};
@@ -114,6 +113,7 @@ public:
const std::string &name() const { return name_; }
static std::vector<PipelineHandlerFactoryBase *> &factories();
+ static const PipelineHandlerFactoryBase *getFactoryByName(const std::string &name);
private:
static void registerType(PipelineHandlerFactoryBase *factory);
@@ -140,7 +140,7 @@ public:
}
};
-#define REGISTER_PIPELINE_HANDLER(handler) \
-static PipelineHandlerFactory<handler> global_##handler##Factory(#handler);
+#define REGISTER_PIPELINE_HANDLER(handler, name) \
+ static PipelineHandlerFactory<handler> global_##handler##Factory(name);
} /* namespace libcamera */
diff --git a/include/libcamera/internal/process.h b/include/libcamera/internal/process.h
index 95e67e10..b1d07a5a 100644
--- a/include/libcamera/internal/process.h
+++ b/include/libcamera/internal/process.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * process.h - Process object
+ * Process object
*/
#pragma once
diff --git a/include/libcamera/internal/pub_key.h b/include/libcamera/internal/pub_key.h
index 8653a912..c8cc04cb 100644
--- a/include/libcamera/internal/pub_key.h
+++ b/include/libcamera/internal/pub_key.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * pub_key.h - Public key signature verification
+ * Public key signature verification
*/
#pragma once
diff --git a/include/libcamera/internal/request.h b/include/libcamera/internal/request.h
index 3454cf5a..73e9bb5c 100644
--- a/include/libcamera/internal/request.h
+++ b/include/libcamera/internal/request.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * request.h - Request class private data
+ * Request class private data
*/
#pragma once
@@ -10,6 +10,8 @@
#include <chrono>
#include <map>
#include <memory>
+#include <stdint.h>
+#include <unordered_set>
#include <libcamera/base/event_notifier.h>
#include <libcamera/base/timer.h>
diff --git a/include/libcamera/internal/shared_mem_object.h b/include/libcamera/internal/shared_mem_object.h
new file mode 100644
index 00000000..e9f1dacd
--- /dev/null
+++ b/include/libcamera/internal/shared_mem_object.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ * Copyright (C) 2024 Andrei Konovalov
+ * Copyright (C) 2024 Dennis Bonke
+ *
+ * Helpers for shared memory allocations
+ */
+#pragma once
+
+#include <stdint.h>
+#include <string>
+#include <sys/mman.h>
+#include <type_traits>
+#include <utility>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/shared_fd.h>
+#include <libcamera/base/span.h>
+
+namespace libcamera {
+
+class SharedMem
+{
+public:
+ SharedMem();
+
+ SharedMem(const std::string &name, std::size_t size);
+ SharedMem(SharedMem &&rhs);
+
+ virtual ~SharedMem();
+
+ SharedMem &operator=(SharedMem &&rhs);
+
+ const SharedFD &fd() const
+ {
+ return fd_;
+ }
+
+ Span<uint8_t> mem() const
+ {
+ return mem_;
+ }
+
+ explicit operator bool() const
+ {
+ return !mem_.empty();
+ }
+
+private:
+ LIBCAMERA_DISABLE_COPY(SharedMem)
+
+ SharedFD fd_;
+
+ Span<uint8_t> mem_;
+};
+
+template<class T, typename = std::enable_if_t<std::is_standard_layout<T>::value>>
+class SharedMemObject : public SharedMem
+{
+public:
+ static constexpr std::size_t kSize = sizeof(T);
+
+ SharedMemObject()
+ : SharedMem(), obj_(nullptr)
+ {
+ }
+
+ template<class... Args>
+ SharedMemObject(const std::string &name, Args &&...args)
+ : SharedMem(name, kSize), obj_(nullptr)
+ {
+ if (mem().empty())
+ return;
+
+ obj_ = new (mem().data()) T(std::forward<Args>(args)...);
+ }
+
+ SharedMemObject(SharedMemObject<T> &&rhs)
+ : SharedMem(std::move(rhs))
+ {
+ this->obj_ = rhs.obj_;
+ rhs.obj_ = nullptr;
+ }
+
+ ~SharedMemObject()
+ {
+ if (obj_)
+ obj_->~T();
+ }
+
+ SharedMemObject<T> &operator=(SharedMemObject<T> &&rhs)
+ {
+ SharedMem::operator=(std::move(rhs));
+ this->obj_ = rhs.obj_;
+ rhs.obj_ = nullptr;
+ return *this;
+ }
+
+ T *operator->()
+ {
+ return obj_;
+ }
+
+ const T *operator->() const
+ {
+ return obj_;
+ }
+
+ T &operator*()
+ {
+ return *obj_;
+ }
+
+ const T &operator*() const
+ {
+ return *obj_;
+ }
+
+private:
+ LIBCAMERA_DISABLE_COPY(SharedMemObject)
+
+ T *obj_;
+};
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/software_isp/debayer_params.h b/include/libcamera/internal/software_isp/debayer_params.h
new file mode 100644
index 00000000..7d8fdd48
--- /dev/null
+++ b/include/libcamera/internal/software_isp/debayer_params.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, 2024 Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * DebayerParams header
+ */
+
+#pragma once
+
+#include <array>
+#include <stdint.h>
+
+namespace libcamera {
+
+struct DebayerParams {
+ static constexpr unsigned int kRGBLookupSize = 256;
+
+ using ColorLookupTable = std::array<uint8_t, kRGBLookupSize>;
+
+ ColorLookupTable red;
+ ColorLookupTable green;
+ ColorLookupTable blue;
+};
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/software_isp/meson.build b/include/libcamera/internal/software_isp/meson.build
new file mode 100644
index 00000000..508ddddc
--- /dev/null
+++ b/include/libcamera/internal/software_isp/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_headers += files([
+ 'debayer_params.h',
+ 'software_isp.h',
+ 'swisp_stats.h',
+])
diff --git a/include/libcamera/internal/software_isp/software_isp.h b/include/libcamera/internal/software_isp/software_isp.h
new file mode 100644
index 00000000..133b545c
--- /dev/null
+++ b/include/libcamera/internal/software_isp/software_isp.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * Simple software ISP implementation
+ */
+
+#pragma once
+
+#include <deque>
+#include <functional>
+#include <initializer_list>
+#include <map>
+#include <memory>
+#include <stdint.h>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/object.h>
+#include <libcamera/base/signal.h>
+#include <libcamera/base/thread.h>
+
+#include <libcamera/geometry.h>
+#include <libcamera/pixel_format.h>
+
+#include <libcamera/ipa/soft_ipa_interface.h>
+#include <libcamera/ipa/soft_ipa_proxy.h>
+
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/dma_buf_allocator.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/shared_mem_object.h"
+#include "libcamera/internal/software_isp/debayer_params.h"
+
+namespace libcamera {
+
+class DebayerCpu;
+class FrameBuffer;
+class PixelFormat;
+class Stream;
+struct StreamConfiguration;
+
+LOG_DECLARE_CATEGORY(SoftwareIsp)
+
+class SoftwareIsp : public Object
+{
+public:
+ SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor,
+ ControlInfoMap *ipaControls);
+ ~SoftwareIsp();
+
+ int loadConfiguration([[maybe_unused]] const std::string &filename) { return 0; }
+
+ bool isValid() const;
+
+ std::vector<PixelFormat> formats(PixelFormat input);
+
+ SizeRange sizes(PixelFormat inputFormat, const Size &inputSize);
+
+ std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size);
+
+ int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs,
+ const ipa::soft::IPAConfigInfo &configInfo);
+
+ int exportBuffers(const Stream *stream, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers);
+
+ void processStats(const uint32_t frame, const uint32_t bufferId,
+ const ControlList &sensorControls);
+
+ int start();
+ void stop();
+
+ void queueRequest(const uint32_t frame, const ControlList &controls);
+ int queueBuffers(uint32_t frame, FrameBuffer *input,
+ const std::map<const Stream *, FrameBuffer *> &outputs);
+
+ void process(uint32_t frame, FrameBuffer *input, FrameBuffer *output);
+
+ Signal<FrameBuffer *> inputBufferReady;
+ Signal<FrameBuffer *> outputBufferReady;
+ Signal<uint32_t, uint32_t> ispStatsReady;
+ Signal<const ControlList &> setSensorControls;
+
+private:
+ void saveIspParams();
+ void setSensorCtrls(const ControlList &sensorControls);
+ void statsReady(uint32_t frame, uint32_t bufferId);
+ void inputReady(FrameBuffer *input);
+ void outputReady(FrameBuffer *output);
+
+ std::unique_ptr<DebayerCpu> debayer_;
+ Thread ispWorkerThread_;
+ SharedMemObject<DebayerParams> sharedParams_;
+ DebayerParams debayerParams_;
+ DmaBufAllocator dmaHeap_;
+
+ std::unique_ptr<ipa::soft::IPAProxySoft> ipa_;
+ std::deque<FrameBuffer *> queuedInputBuffers_;
+ std::deque<FrameBuffer *> queuedOutputBuffers_;
+};
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/software_isp/swisp_stats.h b/include/libcamera/internal/software_isp/swisp_stats.h
new file mode 100644
index 00000000..ae11f112
--- /dev/null
+++ b/include/libcamera/internal/software_isp/swisp_stats.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * Statistics data format used by the software ISP and software IPA
+ */
+
+#pragma once
+
+#include <array>
+#include <stdint.h>
+
+namespace libcamera {
+
+/**
+ * \brief Struct that holds the statistics for the Software ISP
+ *
+ * The struct value types are large enough to not overflow.
+ * Should they still overflow for some reason, no check is performed and they
+ * wrap around.
+ */
+struct SwIspStats {
+ /**
+ * \brief Holds the sum of all sampled red pixels
+ */
+ uint64_t sumR_;
+ /**
+ * \brief Holds the sum of all sampled green pixels
+ */
+ uint64_t sumG_;
+ /**
+ * \brief Holds the sum of all sampled blue pixels
+ */
+ uint64_t sumB_;
+ /**
+ * \brief Number of bins in the yHistogram
+ */
+ static constexpr unsigned int kYHistogramSize = 64;
+ /**
+ * \brief Type of the histogram.
+ */
+ using Histogram = std::array<uint32_t, kYHistogramSize>;
+ /**
+ * \brief A histogram of luminance values
+ */
+ Histogram yHistogram;
+};
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/source_paths.h b/include/libcamera/internal/source_paths.h
index be6f153b..14e64717 100644
--- a/include/libcamera/internal/source_paths.h
+++ b/include/libcamera/internal/source_paths.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * source_paths.h - Identify libcamera source and build paths
+ * Identify libcamera source and build paths
*/
#pragma once
diff --git a/include/libcamera/internal/sysfs.h b/include/libcamera/internal/sysfs.h
index 917457be..aca60fb6 100644
--- a/include/libcamera/internal/sysfs.h
+++ b/include/libcamera/internal/sysfs.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * sysfs.h - Miscellaneous utility functions to access sysfs
+ * Miscellaneous utility functions to access sysfs
*/
#pragma once
diff --git a/include/libcamera/internal/tracepoints.h.in b/include/libcamera/internal/tracepoints.h.in
index d0fc1365..385f9f54 100644
--- a/include/libcamera/internal/tracepoints.h.in
+++ b/include/libcamera/internal/tracepoints.h.in
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) {{year}}, Google Inc.
+ * Copyright (C) 2020, Google Inc.
*
- * tracepoints.h - Tracepoints with lttng
+ * Tracepoints with lttng
*
* This file is auto-generated. Do not edit.
*/
diff --git a/include/libcamera/internal/tracepoints/request.tp b/include/libcamera/internal/tracepoints/request.tp
index 4f367e91..42c59685 100644
--- a/include/libcamera/internal/tracepoints/request.tp
+++ b/include/libcamera/internal/tracepoints/request.tp
@@ -5,6 +5,8 @@
* request.tp - Tracepoints for the request object
*/
+#include <stdint.h>
+
#include <libcamera/framebuffer.h>
#include "libcamera/internal/request.h"
diff --git a/include/libcamera/internal/v4l2_device.h b/include/libcamera/internal/v4l2_device.h
index 50d4adbc..affe52c2 100644
--- a/include/libcamera/internal/v4l2_device.h
+++ b/include/libcamera/internal/v4l2_device.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_device.h - Common base for V4L2 video devices and subdevices
+ * Common base for V4L2 video devices and subdevices
*/
#pragma once
@@ -10,6 +10,7 @@
#include <map>
#include <memory>
#include <optional>
+#include <stdint.h>
#include <vector>
#include <linux/videodev2.h>
diff --git a/include/libcamera/internal/v4l2_pixelformat.h b/include/libcamera/internal/v4l2_pixelformat.h
index 44439fff..543eb21b 100644
--- a/include/libcamera/internal/v4l2_pixelformat.h
+++ b/include/libcamera/internal/v4l2_pixelformat.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019, Google Inc.
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * v4l2_pixelformat.h - V4L2 Pixel Format
+ * V4L2 Pixel Format
*/
#pragma once
@@ -49,6 +49,8 @@ public:
static const std::vector<V4L2PixelFormat> &
fromPixelFormat(const PixelFormat &pixelFormat);
+ bool isGenericLineBasedMetadata() const;
+
private:
uint32_t fourcc_;
};
diff --git a/include/libcamera/internal/v4l2_subdevice.h b/include/libcamera/internal/v4l2_subdevice.h
index 01ed4c2f..fa2a4a21 100644
--- a/include/libcamera/internal/v4l2_subdevice.h
+++ b/include/libcamera/internal/v4l2_subdevice.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_subdevice.h - V4L2 Subdevice
+ * V4L2 Subdevice
*/
#pragma once
@@ -10,6 +10,7 @@
#include <memory>
#include <optional>
#include <ostream>
+#include <stdint.h>
#include <string>
#include <vector>
@@ -176,6 +177,9 @@ private:
std::vector<SizeRange> enumPadSizes(const Stream &stream,
unsigned int code);
+ int getRoutingLegacy(Routing *routing, Whence whence);
+ int setRoutingLegacy(Routing *routing, Whence whence);
+
const MediaEntity *entity_;
std::string model_;
diff --git a/include/libcamera/internal/v4l2_videodevice.h b/include/libcamera/internal/v4l2_videodevice.h
index d157a447..f021c2a0 100644
--- a/include/libcamera/internal/v4l2_videodevice.h
+++ b/include/libcamera/internal/v4l2_videodevice.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_videodevice.h - V4L2 Video Device
+ * V4L2 Video Device
*/
#pragma once
@@ -208,6 +208,7 @@ public:
int setFormat(V4L2DeviceFormat *format);
Formats formats(uint32_t code = 0);
+ int getSelection(unsigned int target, Rectangle *rect);
int setSelection(unsigned int target, Rectangle *rect);
int allocateBuffers(unsigned int count,
diff --git a/include/libcamera/internal/vector.h b/include/libcamera/internal/vector.h
new file mode 100644
index 00000000..a67a0947
--- /dev/null
+++ b/include/libcamera/internal/vector.h
@@ -0,0 +1,366 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Vector and related operations
+ */
+#pragma once
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <functional>
+#include <numeric>
+#include <optional>
+#include <ostream>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+
+#include "libcamera/internal/matrix.h"
+#include "libcamera/internal/yaml_parser.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Vector)
+
+#ifndef __DOXYGEN__
+template<typename T, unsigned int Rows,
+ std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
+#else
+template<typename T, unsigned int Rows>
+#endif /* __DOXYGEN__ */
+class Vector
+{
+public:
+ constexpr Vector() = default;
+
+ constexpr explicit Vector(T scalar)
+ {
+ data_.fill(scalar);
+ }
+
+ constexpr Vector(const std::array<T, Rows> &data)
+ {
+ for (unsigned int i = 0; i < Rows; i++)
+ data_[i] = data[i];
+ }
+
+ const T &operator[](size_t i) const
+ {
+ ASSERT(i < data_.size());
+ return data_[i];
+ }
+
+ T &operator[](size_t i)
+ {
+ ASSERT(i < data_.size());
+ return data_[i];
+ }
+
+ constexpr Vector<T, Rows> operator-() const
+ {
+ Vector<T, Rows> ret;
+ for (unsigned int i = 0; i < Rows; i++)
+ ret[i] = -data_[i];
+ return ret;
+ }
+
+ constexpr Vector operator+(const Vector &other) const
+ {
+ return apply(*this, other, std::plus<>{});
+ }
+
+ constexpr Vector operator+(T scalar) const
+ {
+ return apply(*this, scalar, std::plus<>{});
+ }
+
+ constexpr Vector operator-(const Vector &other) const
+ {
+ return apply(*this, other, std::minus<>{});
+ }
+
+ constexpr Vector operator-(T scalar) const
+ {
+ return apply(*this, scalar, std::minus<>{});
+ }
+
+ constexpr Vector operator*(const Vector &other) const
+ {
+ return apply(*this, other, std::multiplies<>{});
+ }
+
+ constexpr Vector operator*(T scalar) const
+ {
+ return apply(*this, scalar, std::multiplies<>{});
+ }
+
+ constexpr Vector operator/(const Vector &other) const
+ {
+ return apply(*this, other, std::divides<>{});
+ }
+
+ constexpr Vector operator/(T scalar) const
+ {
+ return apply(*this, scalar, std::divides<>{});
+ }
+
+ Vector &operator+=(const Vector &other)
+ {
+ return apply(other, [](T a, T b) { return a + b; });
+ }
+
+ Vector &operator+=(T scalar)
+ {
+ return apply(scalar, [](T a, T b) { return a + b; });
+ }
+
+ Vector &operator-=(const Vector &other)
+ {
+ return apply(other, [](T a, T b) { return a - b; });
+ }
+
+ Vector &operator-=(T scalar)
+ {
+ return apply(scalar, [](T a, T b) { return a - b; });
+ }
+
+ Vector &operator*=(const Vector &other)
+ {
+ return apply(other, [](T a, T b) { return a * b; });
+ }
+
+ Vector &operator*=(T scalar)
+ {
+ return apply(scalar, [](T a, T b) { return a * b; });
+ }
+
+ Vector &operator/=(const Vector &other)
+ {
+ return apply(other, [](T a, T b) { return a / b; });
+ }
+
+ Vector &operator/=(T scalar)
+ {
+ return apply(scalar, [](T a, T b) { return a / b; });
+ }
+
+ constexpr Vector min(const Vector &other) const
+ {
+ return apply(*this, other, [](T a, T b) { return std::min(a, b); });
+ }
+
+ constexpr Vector min(T scalar) const
+ {
+ return apply(*this, scalar, [](T a, T b) { return std::min(a, b); });
+ }
+
+ constexpr Vector max(const Vector &other) const
+ {
+ return apply(*this, other, [](T a, T b) { return std::max(a, b); });
+ }
+
+ constexpr Vector max(T scalar) const
+ {
+ return apply(*this, scalar, [](T a, T b) -> T { return std::max(a, b); });
+ }
+
+ constexpr T dot(const Vector<T, Rows> &other) const
+ {
+ T ret = 0;
+ for (unsigned int i = 0; i < Rows; i++)
+ ret += data_[i] * other[i];
+ return ret;
+ }
+
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &x() const { return data_[0]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &y() const { return data_[1]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &z() const { return data_[2]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
+#endif /* __DOXYGEN__ */
+ constexpr T &x() { return data_[0]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
+#endif /* __DOXYGEN__ */
+ constexpr T &y() { return data_[1]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
+#endif /* __DOXYGEN__ */
+ constexpr T &z() { return data_[2]; }
+
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &r() const { return data_[0]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &g() const { return data_[1]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &b() const { return data_[2]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
+#endif /* __DOXYGEN__ */
+ constexpr T &r() { return data_[0]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
+#endif /* __DOXYGEN__ */
+ constexpr T &g() { return data_[1]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
+#endif /* __DOXYGEN__ */
+ constexpr T &b() { return data_[2]; }
+
+ constexpr double length2() const
+ {
+ double ret = 0;
+ for (unsigned int i = 0; i < Rows; i++)
+ ret += data_[i] * data_[i];
+ return ret;
+ }
+
+ constexpr double length() const
+ {
+ return std::sqrt(length2());
+ }
+
+ template<typename R = T>
+ constexpr R sum() const
+ {
+ return std::accumulate(data_.begin(), data_.end(), R{});
+ }
+
+private:
+ template<class BinaryOp>
+ static constexpr Vector apply(const Vector &lhs, const Vector &rhs, BinaryOp op)
+ {
+ Vector result;
+ std::transform(lhs.data_.begin(), lhs.data_.end(),
+ rhs.data_.begin(), result.data_.begin(),
+ op);
+
+ return result;
+ }
+
+ template<class BinaryOp>
+ static constexpr Vector apply(const Vector &lhs, T rhs, BinaryOp op)
+ {
+ Vector result;
+ std::transform(lhs.data_.begin(), lhs.data_.end(),
+ result.data_.begin(),
+ [&op, rhs](T v) { return op(v, rhs); });
+
+ return result;
+ }
+
+ template<class BinaryOp>
+ Vector &apply(const Vector &other, BinaryOp op)
+ {
+ auto itOther = other.data_.begin();
+ std::for_each(data_.begin(), data_.end(),
+ [&op, &itOther](T &v) { v = op(v, *itOther++); });
+
+ return *this;
+ }
+
+ template<class BinaryOp>
+ Vector &apply(T scalar, BinaryOp op)
+ {
+ std::for_each(data_.begin(), data_.end(),
+ [&op, scalar](T &v) { v = op(v, scalar); });
+
+ return *this;
+ }
+
+ std::array<T, Rows> data_;
+};
+
+template<typename T>
+using RGB = Vector<T, 3>;
+
+template<typename T, unsigned int Rows, unsigned int Cols>
+Vector<T, Rows> operator*(const Matrix<T, Rows, Cols> &m, const Vector<T, Cols> &v)
+{
+ Vector<T, Rows> result;
+
+ for (unsigned int i = 0; i < Rows; i++) {
+ T sum = 0;
+ for (unsigned int j = 0; j < Cols; j++)
+ sum += m[i][j] * v[j];
+ result[i] = sum;
+ }
+
+ return result;
+}
+
+template<typename T, unsigned int Rows>
+bool operator==(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
+{
+ for (unsigned int i = 0; i < Rows; i++) {
+ if (lhs[i] != rhs[i])
+ return false;
+ }
+
+ return true;
+}
+
+template<typename T, unsigned int Rows>
+bool operator!=(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
+{
+ return !(lhs == rhs);
+}
+
+#ifndef __DOXYGEN__
+bool vectorValidateYaml(const YamlObject &obj, unsigned int size);
+#endif /* __DOXYGEN__ */
+
+#ifndef __DOXYGEN__
+template<typename T, unsigned int Rows>
+std::ostream &operator<<(std::ostream &out, const Vector<T, Rows> &v)
+{
+ out << "Vector { ";
+ for (unsigned int i = 0; i < Rows; i++) {
+ out << v[i];
+ out << ((i + 1 < Rows) ? ", " : " ");
+ }
+ out << " }";
+
+ return out;
+}
+
+template<typename T, unsigned int Rows>
+struct YamlObject::Getter<Vector<T, Rows>> {
+ std::optional<Vector<T, Rows>> get(const YamlObject &obj) const
+ {
+ if (!vectorValidateYaml(obj, Rows))
+ return std::nullopt;
+
+ Vector<T, Rows> vector;
+
+ unsigned int i = 0;
+ for (const YamlObject &entry : obj.asList()) {
+ const auto value = entry.get<T>();
+ if (!value)
+ return std::nullopt;
+ vector[i++] = *value;
+ }
+
+ return vector;
+ }
+};
+#endif /* __DOXYGEN__ */
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/yaml_parser.h b/include/libcamera/internal/yaml_parser.h
index 8ca71df8..8c791656 100644
--- a/include/libcamera/internal/yaml_parser.h
+++ b/include/libcamera/internal/yaml_parser.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Google Inc.
*
- * yaml_parser.h - libcamera YAML parsing helper
+ * libcamera YAML parsing helper
*/
#pragma once
@@ -10,7 +10,9 @@
#include <iterator>
#include <map>
#include <optional>
+#include <stdint.h>
#include <string>
+#include <string_view>
#include <vector>
#include <libcamera/base/class.h>
@@ -158,37 +160,34 @@ public:
{
return type_ == Type::Dictionary;
}
+ bool isEmpty() const
+ {
+ return type_ == Type::Empty;
+ }
+ explicit operator bool() const
+ {
+ return type_ != Type::Empty;
+ }
std::size_t size() const;
-#ifndef __DOXYGEN__
- template<typename T,
- std::enable_if_t<
- std::is_same_v<bool, T> ||
- std::is_same_v<double, T> ||
- std::is_same_v<int8_t, T> ||
- std::is_same_v<uint8_t, T> ||
- std::is_same_v<int16_t, T> ||
- std::is_same_v<uint16_t, T> ||
- std::is_same_v<int32_t, T> ||
- std::is_same_v<uint32_t, T> ||
- std::is_same_v<std::string, T> ||
- std::is_same_v<Size, T>> * = nullptr>
-#else
template<typename T>
-#endif
- std::optional<T> get() const;
+ std::optional<T> get() const
+ {
+ return Getter<T>{}.get(*this);
+ }
- template<typename T>
- T get(const T &defaultValue) const
+ template<typename T, typename U>
+ T get(U &&defaultValue) const
{
- return get<T>().value_or(defaultValue);
+ return get<T>().value_or(std::forward<U>(defaultValue));
}
#ifndef __DOXYGEN__
template<typename T,
std::enable_if_t<
std::is_same_v<bool, T> ||
+ std::is_same_v<float, T> ||
std::is_same_v<double, T> ||
std::is_same_v<int8_t, T> ||
std::is_same_v<uint8_t, T> ||
@@ -208,25 +207,33 @@ public:
const YamlObject &operator[](std::size_t index) const;
- bool contains(const std::string &key) const;
- const YamlObject &operator[](const std::string &key) const;
+ bool contains(std::string_view key) const;
+ const YamlObject &operator[](std::string_view key) const;
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(YamlObject)
+ template<typename T>
+ friend struct Getter;
friend class YamlParserContext;
enum class Type {
Dictionary,
List,
Value,
+ Empty,
+ };
+
+ template<typename T, typename Enable = void>
+ struct Getter {
+ std::optional<T> get(const YamlObject &obj) const;
};
Type type_;
std::string value_;
Container list_;
- std::map<std::string, YamlObject *> dictionary_;
+ std::map<std::string, YamlObject *, std::less<>> dictionary_;
};
class YamlParser final
diff --git a/include/libcamera/ipa/ipa_controls.h b/include/libcamera/ipa/ipa_controls.h
index e5da1946..980668c8 100644
--- a/include/libcamera/ipa/ipa_controls.h
+++ b/include/libcamera/ipa/ipa_controls.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_controls.h - IPA Control handling
+ * IPA Control handling
*/
#pragma once
@@ -46,7 +46,8 @@ struct ipa_control_info_entry {
uint32_t id;
uint32_t type;
uint32_t offset;
- uint32_t padding[1];
+ uint8_t direction;
+ uint8_t padding[3];
};
#ifdef __cplusplus
diff --git a/include/libcamera/ipa/ipa_interface.h b/include/libcamera/ipa/ipa_interface.h
index 8884f0ed..dce9637a 100644
--- a/include/libcamera/ipa/ipa_interface.h
+++ b/include/libcamera/ipa/ipa_interface.h
@@ -2,24 +2,11 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_interface.h - Image Processing Algorithm interface
+ * Image Processing Algorithm interface
*/
#pragma once
-#include <stddef.h>
-#include <stdint.h>
-
-#include <map>
-#include <vector>
-
-#include <libcamera/base/flags.h>
-#include <libcamera/base/signal.h>
-
-#include <libcamera/controls.h>
-#include <libcamera/framebuffer.h>
-#include <libcamera/geometry.h>
-
namespace libcamera {
/*
@@ -33,8 +20,8 @@ public:
virtual ~IPAInterface() = default;
};
-} /* namespace libcamera */
-
extern "C" {
libcamera::IPAInterface *ipaCreate();
}
+
+} /* namespace libcamera */
diff --git a/include/libcamera/ipa/ipa_module_info.h b/include/libcamera/ipa/ipa_module_info.h
index b19b00f7..3507a6d7 100644
--- a/include/libcamera/ipa/ipa_module_info.h
+++ b/include/libcamera/ipa/ipa_module_info.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_module_info.h - Image Processing Algorithm module information
+ * Image Processing Algorithm module information
*/
#pragma once
diff --git a/include/libcamera/ipa/ipu3.mojom b/include/libcamera/ipa/ipu3.mojom
index d1b1c6b8..d9a50b01 100644
--- a/include/libcamera/ipa/ipu3.mojom
+++ b/include/libcamera/ipa/ipu3.mojom
@@ -31,14 +31,14 @@ interface IPAIPU3Interface {
unmapBuffers(array<uint32> ids);
[async] queueRequest(uint32 frame, libcamera.ControlList controls);
- [async] fillParamsBuffer(uint32 frame, uint32 bufferId);
- [async] processStatsBuffer(uint32 frame, int64 frameTimestamp,
- uint32 bufferId, libcamera.ControlList sensorControls);
+ [async] computeParams(uint32 frame, uint32 bufferId);
+ [async] processStats(uint32 frame, int64 frameTimestamp,
+ uint32 bufferId, libcamera.ControlList sensorControls);
};
interface IPAIPU3EventInterface {
setSensorControls(uint32 frame, libcamera.ControlList sensorControls,
libcamera.ControlList lensControls);
- paramsBufferReady(uint32 frame);
+ paramsComputed(uint32 frame);
metadataReady(uint32 frame, libcamera.ControlList metadata);
};
diff --git a/include/libcamera/ipa/mali-c55.mojom b/include/libcamera/ipa/mali-c55.mojom
new file mode 100644
index 00000000..5d7eb4ee
--- /dev/null
+++ b/include/libcamera/ipa/mali-c55.mojom
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+module ipa.mali_c55;
+
+import "include/libcamera/ipa/core.mojom";
+
+struct IPAConfigInfo {
+ libcamera.IPACameraSensorInfo sensorInfo;
+ libcamera.ControlInfoMap sensorControls;
+};
+
+interface IPAMaliC55Interface {
+ init(libcamera.IPASettings settings, IPAConfigInfo configInfo)
+ => (int32 ret, libcamera.ControlInfoMap ipaControls);
+ start() => (int32 ret);
+ stop();
+
+ configure(IPAConfigInfo configInfo, uint8 bayerOrder)
+ => (int32 ret, libcamera.ControlInfoMap ipaControls);
+
+ mapBuffers(array<libcamera.IPABuffer> buffers, bool readOnly);
+ unmapBuffers(array<libcamera.IPABuffer> buffers);
+
+ [async] queueRequest(uint32 request, libcamera.ControlList reqControls);
+ [async] fillParams(uint32 request, uint32 bufferId);
+ [async] processStats(uint32 request, uint32 bufferId,
+ libcamera.ControlList sensorControls);
+};
+
+interface IPAMaliC55EventInterface {
+ paramsComputed(uint32 request);
+ statsProcessed(uint32 request, libcamera.ControlList metadata);
+ setSensorControls(libcamera.ControlList sensorControls);
+};
diff --git a/include/libcamera/ipa/meson.build b/include/libcamera/ipa/meson.build
index f3b4881c..3129f119 100644
--- a/include/libcamera/ipa/meson.build
+++ b/include/libcamera/ipa/meson.build
@@ -11,8 +11,6 @@ libcamera_ipa_headers = files([
install_headers(libcamera_ipa_headers,
subdir : libcamera_ipa_include_dir)
-libcamera_generated_ipa_headers = []
-
ipa_headers_install_dir = get_option('includedir') / libcamera_ipa_include_dir
#
@@ -28,10 +26,11 @@ ipa_mojom_core = custom_target(core_mojom_file.split('.')[0] + '_mojom_module',
'--output-root', meson.project_build_root(),
'--input-root', meson.project_source_root(),
'--mojoms', '@INPUT@'
- ])
+ ],
+ env : py_build_env)
# core_ipa_interface.h
-libcamera_generated_ipa_headers += custom_target('core_ipa_interface_h',
+libcamera_ipa_headers += custom_target('core_ipa_interface_h',
input : ipa_mojom_core,
output : 'core_ipa_interface.h',
depends : mojom_templates,
@@ -44,10 +43,11 @@ libcamera_generated_ipa_headers += custom_target('core_ipa_interface_h',
'--libcamera_generate_core_header',
'--libcamera_output_path=@OUTPUT@',
'./' +'@INPUT@'
- ])
+ ],
+ env : py_build_env)
# core_ipa_serializer.h
-libcamera_generated_ipa_headers += custom_target('core_ipa_serializer_h',
+libcamera_ipa_headers += custom_target('core_ipa_serializer_h',
input : ipa_mojom_core,
output : 'core_ipa_serializer.h',
depends : mojom_templates,
@@ -58,13 +58,16 @@ libcamera_generated_ipa_headers += custom_target('core_ipa_serializer_h',
'--libcamera_generate_core_serializer',
'--libcamera_output_path=@OUTPUT@',
'./' +'@INPUT@'
- ])
+ ],
+ env : py_build_env)
# Mapping from pipeline handler name to mojom file
pipeline_ipa_mojom_mapping = {
'ipu3': 'ipu3.mojom',
+ 'mali-c55': 'mali-c55.mojom',
'rkisp1': 'rkisp1.mojom',
'rpi/vc4': 'raspberrypi.mojom',
+ 'simple': 'soft.mojom',
'vimc': 'vimc.mojom',
}
@@ -100,7 +103,8 @@ foreach pipeline, file : pipeline_ipa_mojom_mapping
'--output-root', meson.project_build_root(),
'--input-root', meson.project_source_root(),
'--mojoms', '@INPUT@'
- ])
+ ],
+ env : py_build_env)
# {interface}_ipa_interface.h
header = custom_target(name + '_ipa_interface_h',
@@ -116,7 +120,8 @@ foreach pipeline, file : pipeline_ipa_mojom_mapping
'--libcamera_generate_header',
'--libcamera_output_path=@OUTPUT@',
'./' +'@INPUT@'
- ])
+ ],
+ env : py_build_env)
# {interface}_ipa_serializer.h
serializer = custom_target(name + '_ipa_serializer_h',
@@ -130,7 +135,8 @@ foreach pipeline, file : pipeline_ipa_mojom_mapping
'--libcamera_generate_serializer',
'--libcamera_output_path=@OUTPUT@',
'./' +'@INPUT@'
- ])
+ ],
+ env : py_build_env)
# {interface}_ipa_proxy.h
proxy_header = custom_target(name + '_proxy_h',
@@ -144,14 +150,15 @@ foreach pipeline, file : pipeline_ipa_mojom_mapping
'--libcamera_generate_proxy_h',
'--libcamera_output_path=@OUTPUT@',
'./' +'@INPUT@'
- ])
+ ],
+ env : py_build_env)
ipa_mojoms += {
'name': name,
'mojom': mojom,
}
- libcamera_generated_ipa_headers += [header, serializer, proxy_header]
+ libcamera_ipa_headers += [header, serializer, proxy_header]
endforeach
ipa_mojom_files = []
diff --git a/include/libcamera/ipa/raspberrypi.mojom b/include/libcamera/ipa/raspberrypi.mojom
index 5986c436..e30c70bd 100644
--- a/include/libcamera/ipa/raspberrypi.mojom
+++ b/include/libcamera/ipa/raspberrypi.mojom
@@ -12,10 +12,6 @@ import "include/libcamera/ipa/core.mojom";
const uint32 MaxLsGridSize = 0x8000;
struct SensorConfig {
- uint32 gainDelay;
- uint32 exposureDelay;
- uint32 vblankDelay;
- uint32 hblankDelay;
uint32 sensorMetadata;
};
@@ -272,7 +268,7 @@ interface IPARPiEventInterface {
* \param[in] delayContext IPA context index used for this request
*
* This asynchronous event is signalled to the pipeline handler when
- * the IPA requires sensor specific controls (e.g. shutter speed, gain,
+ * the IPA requires sensor specific controls (e.g. exposure time, gain,
* blanking) to be applied.
*/
setDelayedControls(libcamera.ControlList controls, uint32 delayContext);
diff --git a/include/libcamera/ipa/rkisp1.mojom b/include/libcamera/ipa/rkisp1.mojom
index 1009e970..043ad27e 100644
--- a/include/libcamera/ipa/rkisp1.mojom
+++ b/include/libcamera/ipa/rkisp1.mojom
@@ -11,6 +11,7 @@ import "include/libcamera/ipa/core.mojom";
struct IPAConfigInfo {
libcamera.IPACameraSensorInfo sensorInfo;
libcamera.ControlInfoMap sensorControls;
+ uint32 paramFormat;
};
interface IPARkISP1Interface {
@@ -30,13 +31,13 @@ interface IPARkISP1Interface {
unmapBuffers(array<uint32> ids);
[async] queueRequest(uint32 frame, libcamera.ControlList reqControls);
- [async] fillParamsBuffer(uint32 frame, uint32 bufferId);
- [async] processStatsBuffer(uint32 frame, uint32 bufferId,
- libcamera.ControlList sensorControls);
+ [async] computeParams(uint32 frame, uint32 bufferId);
+ [async] processStats(uint32 frame, uint32 bufferId,
+ libcamera.ControlList sensorControls);
};
interface IPARkISP1EventInterface {
- paramsBufferReady(uint32 frame);
+ paramsComputed(uint32 frame, uint32 bytesused);
setSensorControls(uint32 frame, libcamera.ControlList sensorControls);
metadataReady(uint32 frame, libcamera.ControlList metadata);
};
diff --git a/include/libcamera/ipa/soft.mojom b/include/libcamera/ipa/soft.mojom
new file mode 100644
index 00000000..d52e6f1a
--- /dev/null
+++ b/include/libcamera/ipa/soft.mojom
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+/*
+ * \todo Document the interface and remove the related EXCLUDE_PATTERNS entry.
+ */
+
+module ipa.soft;
+
+import "include/libcamera/ipa/core.mojom";
+
+struct IPAConfigInfo {
+ libcamera.ControlInfoMap sensorControls;
+};
+
+interface IPASoftInterface {
+ init(libcamera.IPASettings settings,
+ libcamera.SharedFD fdStats,
+ libcamera.SharedFD fdParams,
+ libcamera.ControlInfoMap sensorCtrlInfoMap)
+ => (int32 ret, libcamera.ControlInfoMap ipaControls);
+ start() => (int32 ret);
+ stop();
+ configure(IPAConfigInfo configInfo)
+ => (int32 ret);
+
+ [async] queueRequest(uint32 frame, libcamera.ControlList sensorControls);
+ [async] computeParams(uint32 frame);
+ [async] processStats(uint32 frame,
+ uint32 bufferId,
+ libcamera.ControlList sensorControls);
+};
+
+interface IPASoftEventInterface {
+ setSensorControls(libcamera.ControlList sensorControls);
+ setIspParams();
+};
diff --git a/include/libcamera/ipa/vimc.mojom b/include/libcamera/ipa/vimc.mojom
index dd991f7e..c5c5fe83 100644
--- a/include/libcamera/ipa/vimc.mojom
+++ b/include/libcamera/ipa/vimc.mojom
@@ -47,9 +47,9 @@ interface IPAVimcInterface {
* interface functions that mimick how other pipeline handlers typically
* handle parameters at runtime.
*/
- [async] fillParamsBuffer(uint32 frame, uint32 bufferId);
+ [async] computeParams(uint32 frame, uint32 bufferId);
};
interface IPAVimcEventInterface {
- paramsBufferReady(uint32 bufferId, [flags] TestFlag flags);
+ paramsComputed(uint32 bufferId, [flags] TestFlag flags);
};
diff --git a/include/libcamera/logging.h b/include/libcamera/logging.h
index cd842f67..e1c6341c 100644
--- a/include/libcamera/logging.h
+++ b/include/libcamera/logging.h
@@ -2,11 +2,13 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * logging.h - Logging infrastructure
+ * Logging infrastructure
*/
#pragma once
+#include <ostream>
+
namespace libcamera {
enum LoggingTarget {
diff --git a/include/libcamera/meson.build b/include/libcamera/meson.build
index 84c6c4cb..fd69a517 100644
--- a/include/libcamera/meson.build
+++ b/include/libcamera/meson.build
@@ -34,8 +34,9 @@ libcamera_headers_install_dir = get_option('includedir') / libcamera_include_dir
controls_map = {
'controls': {
- 'draft': 'control_ids_draft.yaml',
'core': 'control_ids_core.yaml',
+ 'debug': 'control_ids_debug.yaml',
+ 'draft': 'control_ids_draft.yaml',
'rpi/vc4': 'control_ids_rpi.yaml',
},
@@ -47,13 +48,15 @@ controls_map = {
control_headers = []
controls_files = []
+controls_files_names = []
properties_files = []
+properties_files_names = []
foreach mode, entry : controls_map
files_list = []
input_files = []
foreach vendor, header : entry
- if vendor != 'core' and vendor != 'draft'
+ if vendor not in ['core', 'debug', 'draft']
if vendor not in pipelines
continue
endif
@@ -70,13 +73,15 @@ foreach mode, entry : controls_map
outfile = ''
if mode == 'controls'
outfile = 'control_ids.h'
- controls_files += files_list
+ controls_files += input_files
+ controls_files_names += files_list
else
outfile = 'property_ids.h'
- properties_files += files_list
+ properties_files += input_files
+ properties_files_names += files_list
endif
- template_file = files(outfile + '.in')
+ template_file = files('control_ids.h.in')
ranges_file = files('../../src/libcamera/control_ranges.yaml')
control_headers += custom_target(header + '_h',
input : input_files,
@@ -84,6 +89,7 @@ foreach mode, entry : controls_map
command : [gen_controls, '-o', '@OUTPUT@',
'--mode', mode, '-t', template_file,
'-r', ranges_file, '@INPUT@'],
+ env : py_build_env,
install : true,
install_dir : libcamera_headers_install_dir)
endforeach
@@ -103,24 +109,25 @@ formats_h = custom_target('formats_h',
install_dir : libcamera_headers_install_dir)
libcamera_public_headers += formats_h
+# version.h
+version = libcamera_version.split('.')
+libcamera_version_config = configuration_data()
+libcamera_version_config.set('LIBCAMERA_VERSION_MAJOR', version[0])
+libcamera_version_config.set('LIBCAMERA_VERSION_MINOR', version[1])
+libcamera_version_config.set('LIBCAMERA_VERSION_PATCH', version[2])
+
+version_h = configure_file(input : 'version.h.in',
+ output : 'version.h',
+ configuration : libcamera_version_config,
+ install_dir : libcamera_headers_install_dir)
+libcamera_public_headers += version_h
+
# libcamera.h
libcamera_h = custom_target('gen-header',
input : 'meson.build',
output : 'libcamera.h',
- command : [gen_header, meson.current_source_dir(), '@OUTPUT@'],
+ command : [gen_header, '@OUTPUT@', libcamera_public_headers],
install : true,
install_dir : libcamera_headers_install_dir)
libcamera_public_headers += libcamera_h
-
-# version.h
-version = libcamera_version.split('.')
-libcamera_version_config = configuration_data()
-libcamera_version_config.set('LIBCAMERA_VERSION_MAJOR', version[0])
-libcamera_version_config.set('LIBCAMERA_VERSION_MINOR', version[1])
-libcamera_version_config.set('LIBCAMERA_VERSION_PATCH', version[2])
-
-configure_file(input : 'version.h.in',
- output : 'version.h',
- configuration : libcamera_version_config,
- install_dir : libcamera_headers_install_dir)
diff --git a/include/libcamera/orientation.h b/include/libcamera/orientation.h
index 9a2c2fb2..a3b40e63 100644
--- a/include/libcamera/orientation.h
+++ b/include/libcamera/orientation.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2023, Ideas On Board Oy
*
- * orientation.h - Image orientation
+ * Image orientation
*/
#pragma once
diff --git a/include/libcamera/pixel_format.h b/include/libcamera/pixel_format.h
index d49c5f78..1b4d8c7c 100644
--- a/include/libcamera/pixel_format.h
+++ b/include/libcamera/pixel_format.h
@@ -2,13 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * pixel_format.h - libcamera Pixel Format
+ * libcamera Pixel Format
*/
#pragma once
#include <ostream>
-#include <set>
#include <stdint.h>
#include <string>
diff --git a/include/libcamera/property_ids.h.in b/include/libcamera/property_ids.h.in
deleted file mode 100644
index 43372c71..00000000
--- a/include/libcamera/property_ids.h.in
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * property_ids.h - Property ID list
- *
- * This file is auto-generated. Do not edit.
- */
-
-#pragma once
-
-#include <stdint.h>
-
-#include <libcamera/controls.h>
-
-namespace libcamera {
-
-namespace properties {
-
-enum {
-${ids}
-};
-
-${controls}
-
-extern const ControlIdMap properties;
-
-${vendor_controls}
-
-} /* namespace properties */
-
-} /* namespace libcamera */
diff --git a/include/libcamera/request.h b/include/libcamera/request.h
index dffde153..e214a9d1 100644
--- a/include/libcamera/request.h
+++ b/include/libcamera/request.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * request.h - Capture request handling
+ * Capture request handling
*/
#pragma once
@@ -12,7 +12,6 @@
#include <ostream>
#include <stdint.h>
#include <string>
-#include <unordered_set>
#include <libcamera/base/class.h>
#include <libcamera/base/signal.h>
diff --git a/include/libcamera/stream.h b/include/libcamera/stream.h
index 4e94187d..b5e8f0a9 100644
--- a/include/libcamera/stream.h
+++ b/include/libcamera/stream.h
@@ -2,13 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * stream.h - Video stream for a Camera
+ * Video stream for a Camera
*/
#pragma once
#include <map>
-#include <memory>
#include <ostream>
#include <string>
#include <vector>
@@ -62,6 +61,8 @@ private:
StreamFormats formats_;
};
+std::ostream &operator<<(std::ostream &out, const StreamConfiguration &cfg);
+
enum class StreamRole {
Raw,
StillCapture,
diff --git a/include/libcamera/transform.h b/include/libcamera/transform.h
index 44cb4c6f..4517412a 100644
--- a/include/libcamera/transform.h
+++ b/include/libcamera/transform.h
@@ -2,13 +2,11 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * transform.h - 2D plane transforms
+ * 2D plane transforms
*/
#pragma once
-#include <string>
-
namespace libcamera {
enum class Orientation;
diff --git a/include/libcamera/version.h.in b/include/libcamera/version.h.in
index 6e24d0a8..50bf1001 100644
--- a/include/libcamera/version.h.in
+++ b/include/libcamera/version.h.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * version.h - Library version information
+ * Library version information
*
* This file is auto-generated. Do not edit.
*/
diff --git a/include/linux/README b/include/linux/README
index 101e4997..f9f68641 100644
--- a/include/linux/README
+++ b/include/linux/README
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: CC0-1.0
-Files in this directory are imported from v6.7 of the Linux kernel. Do not
+Files in this directory are imported from v6.13-rc1-68-gf9bbbd9a696d of the Linux kernel. Do not
modify them manually.
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 96b90cf0..63b1e9ed 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -19,7 +19,7 @@
#define DMA_HEAP_VALID_FD_FLAGS (O_CLOEXEC | O_ACCMODE)
/* Currently no heap flags */
-#define DMA_HEAP_VALID_HEAP_FLAGS (0)
+#define DMA_HEAP_VALID_HEAP_FLAGS (0ULL)
/**
* struct dma_heap_allocation_data - metadata passed from userspace for
diff --git a/include/linux/drm_fourcc.h b/include/linux/drm_fourcc.h
index d6c83d9c..db679877 100644
--- a/include/linux/drm_fourcc.h
+++ b/include/linux/drm_fourcc.h
@@ -54,7 +54,7 @@ extern "C" {
* Format modifiers may change any property of the buffer, including the number
* of planes and/or the required allocation size. Format modifiers are
* vendor-namespaced, and as such the relationship between a fourcc code and a
- * modifier is specific to the modifer being used. For example, some modifiers
+ * modifier is specific to the modifier being used. For example, some modifiers
* may preserve meaning - such as number of planes - from the fourcc code,
* whereas others may not.
*
@@ -79,7 +79,7 @@ extern "C" {
* format.
* - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
* see modifiers as opaque tokens they can check for equality and intersect.
- * These users musn't need to know to reason about the modifier value
+ * These users mustn't need to know to reason about the modifier value
* (i.e. they are not expected to extract information out of the modifier).
*
* Vendors should document their modifier usage in as much detail as
@@ -210,6 +210,10 @@ extern "C" {
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+/* 48 bpp RGB */
+#define DRM_FORMAT_RGB161616 fourcc_code('R', 'G', '4', '8') /* [47:0] R:G:B 16:16:16 little endian */
+#define DRM_FORMAT_BGR161616 fourcc_code('B', 'G', '4', '8') /* [47:0] B:G:R 16:16:16 little endian */
+
/* 64 bpp RGB */
#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
@@ -486,6 +490,7 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09
#define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a
#define DRM_FORMAT_MOD_VENDOR_MIPI 0x0b
+#define DRM_FORMAT_MOD_VENDOR_RPI 0x0c
/* add more to the end as needed */
@@ -605,7 +610,7 @@ extern "C" {
* This is a tiled layout using 4Kb tiles in row-major layout.
* Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
* are arranged in four groups (two wide, two high) with column-major layout.
- * Each group therefore consits out of four 256 byte units, which are also laid
+ * Each group therefore consists out of four 256 byte units, which are also laid
* out as 2x2 column-major.
* 256 byte units are made out of four 64 byte blocks of pixels, producing
* either a square block or a 2:1 unit.
@@ -1178,7 +1183,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
*/
/*
- * The top 4 bits (out of the 56 bits alloted for specifying vendor specific
+ * The top 4 bits (out of the 56 bits allotted for specifying vendor specific
* modifiers) denote the category for modifiers. Currently we have three
* categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of
* sixteen different categories.
@@ -1494,7 +1499,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
* Amlogic FBC Memory Saving mode
*
* Indicates the storage is packed when pixel size is multiple of word
- * boudaries, i.e. 8bit should be stored in this mode to save allocation
+ * boundaries, i.e. 8bit should be stored in this mode to save allocation
* memory.
*
* This mode reduces body layout to 3072 bytes per 64x32 superblock with
@@ -1551,6 +1556,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
#define AMD_FMT_MOD_TILE_VER_GFX10 2
#define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3
#define AMD_FMT_MOD_TILE_VER_GFX11 4
+#define AMD_FMT_MOD_TILE_VER_GFX12 5
/*
* 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical
@@ -1561,6 +1567,8 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
/*
* 64K_D for non-32 bpp is the same for GFX9/GFX10/GFX10_RBPLUS and hence has
* GFX9 as canonical version.
+ *
+ * 64K_D_2D on GFX12 is identical to 64K_D on GFX11.
*/
#define AMD_FMT_MOD_TILE_GFX9_64K_D 10
#define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25
@@ -1568,6 +1576,21 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
#define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27
#define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31
+/* Gfx12 swizzle modes:
+ * 0 - LINEAR
+ * 1 - 256B_2D - 2D block dimensions
+ * 2 - 4KB_2D
+ * 3 - 64KB_2D
+ * 4 - 256KB_2D
+ * 5 - 4KB_3D - 3D block dimensions
+ * 6 - 64KB_3D
+ * 7 - 256KB_3D
+ */
+#define AMD_FMT_MOD_TILE_GFX12_256B_2D 1
+#define AMD_FMT_MOD_TILE_GFX12_4K_2D 2
+#define AMD_FMT_MOD_TILE_GFX12_64K_2D 3
+#define AMD_FMT_MOD_TILE_GFX12_256K_2D 4
+
#define AMD_FMT_MOD_DCC_BLOCK_64B 0
#define AMD_FMT_MOD_DCC_BLOCK_128B 1
#define AMD_FMT_MOD_DCC_BLOCK_256B 2
@@ -1666,6 +1689,9 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
*/
#define MIPI_FORMAT_MOD_CSI2_PACKED fourcc_mod_code(MIPI, 1)
+#define PISP_FORMAT_MOD_COMPRESS_MODE1 fourcc_mod_code(RPI, 1)
+#define PISP_FORMAT_MOD_COMPRESS_MODE2 fourcc_mod_code(RPI, 2)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/linux/intel-ipu3.h b/include/linux/intel-ipu3.h
index bd771f1b..8c192f35 100644
--- a/include/linux/intel-ipu3.h
+++ b/include/linux/intel-ipu3.h
@@ -2485,11 +2485,9 @@ struct ipu3_uapi_anr_config {
* &ipu3_uapi_yuvp1_y_ee_nr_config
* @yds: y down scaler config. See &ipu3_uapi_yuvp1_yds_config
* @chnr: chroma noise reduction config. See &ipu3_uapi_yuvp1_chnr_config
- * @reserved1: reserved
* @yds2: y channel down scaler config. See &ipu3_uapi_yuvp1_yds_config
* @tcc: total color correction config as defined in struct
* &ipu3_uapi_yuvp2_tcc_static_config
- * @reserved2: reserved
* @anr: advanced noise reduction config.See &ipu3_uapi_anr_config
* @awb_fr: AWB filter response config. See ipu3_uapi_awb_fr_config
* @ae: auto exposure config As specified by &ipu3_uapi_ae_config
@@ -2724,7 +2722,6 @@ struct ipu3_uapi_obgrid_param {
* @acc_ae: 0 = no update, 1 = update.
* @acc_af: 0 = no update, 1 = update.
* @acc_awb: 0 = no update, 1 = update.
- * @__acc_osys: 0 = no update, 1 = update.
* @reserved3: Not used.
* @lin_vmem_params: 0 = no update, 1 = update.
* @tnr3_vmem_params: 0 = no update, 1 = update.
diff --git a/include/linux/mali-c55-config.h b/include/linux/mali-c55-config.h
new file mode 100644
index 00000000..b3141559
--- /dev/null
+++ b/include/linux/mali-c55-config.h
@@ -0,0 +1,909 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * ARM Mali-C55 ISP Driver - Userspace API
+ *
+ * Copyright (C) 2023 Ideas on Board Oy
+ */
+
+#ifndef __UAPI_MALI_C55_CONFIG_H
+#define __UAPI_MALI_C55_CONFIG_H
+
+#include <linux/types.h>
+
+/*
+ * Frames are split into zones of almost equal width and height - a zone is a
+ * rectangular tile of a frame. The metering blocks within the ISP collect
+ * aggregated statistics per zone. A maximum of 15x15 zones can be configured,
+ * and so the statistics buffer within the hardware is sized to accommodate
+ * that.
+ *
+ * The utilised number of zones is runtime configurable.
+ */
+#define MALI_C55_MAX_ZONES (15 * 15)
+
+/**
+ * struct mali_c55_ae_1024bin_hist - Auto Exposure 1024-bin histogram statistics
+ *
+ * @bins: 1024 element array of 16-bit pixel counts.
+ *
+ * The 1024-bin histogram module collects image-global but zone-weighted
+ * intensity distributions of pixels in fixed-width bins. The modules can be
+ * configured into different "plane modes" which affect the contents of the
+ * collected statistics. In plane mode 0, pixel intensities are taken regardless
+ * of colour plane into a single 1024-bin histogram with a bin width of 4. In
+ * plane mode 1, four 256-bin histograms with a bin width of 16 are collected -
+ * one for each CFA colour plane. In plane modes 4, 5, 6 and 7 two 512-bin
+ * histograms with a bin width of 8 are collected - in each mode one of the
+ * colour planes is collected into the first histogram and all the others are
+ * combined into the second. The histograms are stored consecutively in the bins
+ * array.
+ *
+ * The 16-bit pixel counts are stored as a 4-bit exponent in the most
+ * significant bits followed by a 12-bit mantissa. Conversion to a usable
+ * format can be done according to the following pseudo-code::
+ *
+ * if (e == 0) {
+ * bin = m * 2;
+ * } else {
+ * bin = (m + 4096) * 2^e
+ * }
+ *
+ * where
+ * e is the exponent value in range 0..15
+ * m is the mantissa value in range 0..4095
+ *
+ * The pixels used in calculating the statistics can be masked using three
+ * methods:
+ *
+ * 1. Pixels can be skipped in X and Y directions independently.
+ * 2. Minimum/Maximum intensities can be configured
+ * 3. Zones can be differentially weighted, including 0 weighted to mask them
+ *
+ * The data for this histogram can be collected from different tap points in the
+ * ISP depending on configuration - after the white balance or digital gain
+ * blocks, or immediately after the input crossbar.
+ */
+struct mali_c55_ae_1024bin_hist {
+ __u16 bins[1024];
+} __attribute__((packed));
+
+/**
+ * struct mali_c55_ae_5bin_hist - Auto Exposure 5-bin histogram statistics
+ *
+ * @hist0: 16-bit normalised pixel count for the 0th intensity bin
+ * @hist1: 16-bit normalised pixel count for the 1st intensity bin
+ * @hist3: 16-bit normalised pixel count for the 3rd intensity bin
+ * @hist4: 16-bit normalised pixel count for the 4th intensity bin
+ *
+ * The ISP generates a 5-bin histogram of normalised pixel counts within bins of
+ * pixel intensity for each of 225 possible zones within a frame. The centre bin
+ * of the histogram for each zone is not available from the hardware and must be
+ * calculated by subtracting the values of hist0, hist1, hist3 and hist4 from
+ * 0xffff as in the following equation:
+ *
+ * hist2 = 0xffff - (hist0 + hist1 + hist3 + hist4)
+ */
+struct mali_c55_ae_5bin_hist {
+ __u16 hist0;
+ __u16 hist1;
+ __u16 hist3;
+ __u16 hist4;
+} __attribute__((packed));
+
+/**
+ * struct mali_c55_awb_average_ratios - Auto White Balance colour ratios
+ *
+ * @avg_rg_gr: Average R/G or G/R ratio in Q4.8 format.
+ * @avg_bg_br: Average B/G or B/R ratio in Q4.8 format.
+ * @num_pixels: The number of pixels used in the AWB calculation
+ *
+ * The ISP calculates and collects average colour ratios for each zone in an
+ * image and stores them in Q4.8 format (the lowest 8 bits are fractional, with
+ * bits [11:8] representing the integer). The exact ratios collected (either
+ * R/G, B/G or G/R, B/R) are configurable through the parameters buffer. The
+ * value of the 4 high bits is undefined.
+ */
+struct mali_c55_awb_average_ratios {
+ __u16 avg_rg_gr;
+ __u16 avg_bg_br;
+ __u32 num_pixels;
+} __attribute__((packed));
+
+/**
+ * struct mali_c55_af_statistics - Auto Focus edge and intensity statistics
+ *
+ * @intensity_stats: Packed mantissa and exponent value for pixel intensity
+ * @edge_stats: Packed mantissa and exponent values for edge intensity
+ *
+ * The ISP collects the squared sum of pixel intensities for each zone within a
+ * configurable Region of Interest on the frame. Additionally, the same data are
+ * collected after being passed through a bandpass filter which removes high and
+ * low frequency components - these are referred to as the edge statistics.
+ *
+ * The intensity and edge statistics for a zone can be used to calculate the
+ * contrast information for a zone
+ *
+ * C = E2 / I2
+ *
+ * Where I2 is the intensity statistic for a zone and E2 is the edge statistic
+ * for that zone. Optimum focus is reached when C is at its maximum.
+ *
+ * The intensity and edge statistics are stored packed into a non-standard 16
+ * bit floating point format, where the 7 most significant bits represent the
+ * exponent and the 9 least significant bits the mantissa. This format can be
+ * unpacked with the following pseudocode::
+ *
+ * if (e == 0) {
+ * x = m;
+ * } else {
+ * x = 2^e-1 * (m + 2^9)
+ * }
+ *
+ * where
+ * e is the exponent value in range 0..127
+ * m is the mantissa value in range 0..511
+ */
+struct mali_c55_af_statistics {
+ __u16 intensity_stats;
+ __u16 edge_stats;
+} __attribute__((packed));
+
+/**
+ * struct mali_c55_stats_buffer - 3A statistics for the mali-c55 ISP
+ *
+ * @ae_1024bin_hist: 1024-bin frame-global pixel intensity histogram
+ * @iridix_1024bin_hist: Post-Iridix block 1024-bin histogram
+ * @ae_5bin_hists: 5-bin pixel intensity histograms for AEC
+ * @reserved1: Undefined buffer space
+ * @awb_ratios: Color balance ratios for Auto White Balance
+ * @reserved2: Undefined buffer space
+ * @af_statistics: Pixel intensity statistics for Auto Focus
+ * @reserved3: Undefined buffer space
+ *
+ * This struct describes the metering statistics space in the Mali-C55 ISP's
+ * hardware in its entirety. The space between each defined area is marked as
+ * "unknown" and may not be 0, but should not be used. The @ae_5bin_hists,
+ * @awb_ratios and @af_statistics members are arrays of statistics per-zone.
+ * The zones are arranged in the array in raster order starting from the top
+ * left corner of the image.
+ */
+
+struct mali_c55_stats_buffer {
+ struct mali_c55_ae_1024bin_hist ae_1024bin_hist;
+ struct mali_c55_ae_1024bin_hist iridix_1024bin_hist;
+ struct mali_c55_ae_5bin_hist ae_5bin_hists[MALI_C55_MAX_ZONES];
+ __u32 reserved1[14];
+ struct mali_c55_awb_average_ratios awb_ratios[MALI_C55_MAX_ZONES];
+ __u32 reserved2[14];
+ struct mali_c55_af_statistics af_statistics[MALI_C55_MAX_ZONES];
+ __u32 reserved3[15];
+} __attribute__((packed));
+
+/**
+ * enum mali_c55_param_buffer_version - Mali-C55 parameters block versioning
+ *
+ * @MALI_C55_PARAM_BUFFER_V1: First version of Mali-C55 parameters block
+ */
+enum mali_c55_param_buffer_version {
+ MALI_C55_PARAM_BUFFER_V1,
+};
+
+/**
+ * enum mali_c55_param_block_type - Enumeration of Mali-C55 parameter blocks
+ *
+ * This enumeration defines the types of Mali-C55 parameters block. Each block
+ * configures a specific processing block of the Mali-C55 ISP. The block
+ * type allows the driver to correctly interpret the parameters block data.
+ *
+ * It is the responsibility of userspace to correctly set the type of each
+ * parameters block.
+ *
+ * @MALI_C55_PARAM_BLOCK_SENSOR_OFFS: Sensor pre-shading black level offset
+ * @MALI_C55_PARAM_BLOCK_AEXP_HIST: Auto-exposure 1024-bin histogram
+ * configuration
+ * @MALI_C55_PARAM_BLOCK_AEXP_IHIST: Post-Iridix auto-exposure 1024-bin
+ * histogram configuration
+ * @MALI_C55_PARAM_BLOCK_AEXP_HIST_WEIGHTS: Auto-exposure 1024-bin histogram
+ * weighting
+ * @MALI_C55_PARAM_BLOCK_AEXP_IHIST_WEIGHTS: Post-Iridix auto-exposure 1024-bin
+ * histogram weighting
+ * @MALI_C55_PARAM_BLOCK_DIGITAL_GAIN: Digital gain
+ * @MALI_C55_PARAM_BLOCK_AWB_GAINS: Auto-white balance gains
+ * @MALI_C55_PARAM_BLOCK_AWB_CONFIG: Auto-white balance statistics config
+ * @MALI_C55_PARAM_BLOCK_AWB_GAINS_AEXP: Auto-white balance gains for AEXP-0 tap
+ * @MALI_C55_PARAM_MESH_SHADING_CONFIG : Mesh shading tables configuration
+ * @MALI_C55_PARAM_MESH_SHADING_SELECTION: Mesh shading table selection
+ */
+enum mali_c55_param_block_type {
+ MALI_C55_PARAM_BLOCK_SENSOR_OFFS,
+ MALI_C55_PARAM_BLOCK_AEXP_HIST,
+ MALI_C55_PARAM_BLOCK_AEXP_IHIST,
+ MALI_C55_PARAM_BLOCK_AEXP_HIST_WEIGHTS,
+ MALI_C55_PARAM_BLOCK_AEXP_IHIST_WEIGHTS,
+ MALI_C55_PARAM_BLOCK_DIGITAL_GAIN,
+ MALI_C55_PARAM_BLOCK_AWB_GAINS,
+ MALI_C55_PARAM_BLOCK_AWB_CONFIG,
+ MALI_C55_PARAM_BLOCK_AWB_GAINS_AEXP,
+ MALI_C55_PARAM_MESH_SHADING_CONFIG,
+ MALI_C55_PARAM_MESH_SHADING_SELECTION,
+};
+
+#define MALI_C55_PARAM_BLOCK_FL_NONE 0
+#define MALI_C55_PARAM_BLOCK_FL_DISABLED BIT(0)
+
+/**
+ * struct mali_c55_params_block_header - Mali-C55 parameter block header
+ *
+ * This structure represents the common part of all the ISP configuration
+ * blocks. Each parameters block embeds an instance of this structure type
+ * as its first member, followed by the block-specific configuration data. The
+ * driver inspects this common header to discern the block type and its size and
+ * properly handle the block content by casting it to the correct block-specific
+ * type.
+ *
+ * The @type field is one of the values enumerated by
+ * :c:type:`mali_c55_param_block_type` and specifies how the data should be
+ * interpreted by the driver. The @size field specifies the size of the
+ * parameters block and is used by the driver for validation purposes. The
+ * @flags field holds a bitmask of per-block flags MALI_C55_PARAM_BLOCK_FL_*.
+ *
+ * If userspace wants to disable an ISP block the
+ * MALI_C55_PARAM_BLOCK_FL_DISABLED bit should be set in the @flags field. In
+ * that case userspace may optionally omit the remainder of the configuration
+ * block, which will in any case be ignored by the driver. If a new
+ * configuration of an ISP block has to be applied userspace shall fully
+ * populate the ISP block and omit setting the MALI_C55_PARAM_BLOCK_FL_DISABLED
+ * bit in the @flags field.
+ *
+ * Userspace is responsible for correctly populating the parameters block header
+ * fields (@type, @flags and @size) and correctly populate the block-specific
+ * parameters.
+ *
+ * For example:
+ *
+ * .. code-block:: c
+ *
+ * void populate_sensor_offs(struct mali_c55_params_block_header *block) {
+ * block->type = MALI_C55_PARAM_BLOCK_SENSOR_OFFS;
+ * block->enabled = MALI_C55_PARAM_BLOCK_FL_NONE;
+ * block->size = sizeof(struct mali_c55_params_sensor_off_preshading);
+ *
+ * struct mali_c55_params_sensor_off_preshading *sensor_offs =
+ * (struct mali_c55_params_sensor_off_preshading *)block;
+ *
+ * sensor_offs->chan00 = offset00;
+ * sensor_offs->chan01 = offset01;
+ * sensor_offs->chan10 = offset10;
+ * sensor_offs->chan11 = offset11;
+ * }
+ *
+ * @type: The parameters block type from :c:type:`mali_c55_param_block_type`
+ * @flags: Bitmask of block flags
+ * @size: Size (in bytes) of the parameters block
+ */
+struct mali_c55_params_block_header {
+ __u16 type;
+ __u16 flags;
+ __u32 size;
+} __attribute__((aligned(8)));
+
+/**
+ * struct mali_c55_params_sensor_off_preshading - offset subtraction for each
+ * color channel
+ *
+ * Provides removal of the sensor black level from the sensor data. Separate
+ * offsets are provided for each of the four Bayer component color channels
+ * which are defaulted to R, Gr, Gb, B.
+ *
+ * header.type should be set to MALI_C55_PARAM_BLOCK_SENSOR_OFFS from
+ * :c:type:`mali_c55_param_block_type` for this block.
+ *
+ * @header: The Mali-C55 parameters block header
+ * @chan00: Offset for color channel 00 (default: R)
+ * @chan01: Offset for color channel 01 (default: Gr)
+ * @chan10: Offset for color channel 10 (default: Gb)
+ * @chan11: Offset for color channel 11 (default: B)
+ */
+struct mali_c55_params_sensor_off_preshading {
+ struct mali_c55_params_block_header header;
+ __u32 chan00;
+ __u32 chan01;
+ __u32 chan10;
+ __u32 chan11;
+};
+
+/**
+ * enum mali_c55_aexp_hist_tap_points - Tap points for the AEXP histogram
+ * @MALI_C55_AEXP_HIST_TAP_WB: After static white balance
+ * @MALI_C55_AEXP_HIST_TAP_FS: After WDR Frame Stitch
+ * @MALI_C55_AEXP_HIST_TAP_TPG: After the test pattern generator
+ */
+enum mali_c55_aexp_hist_tap_points {
+ MALI_C55_AEXP_HIST_TAP_WB = 0,
+ MALI_C55_AEXP_HIST_TAP_FS,
+ MALI_C55_AEXP_HIST_TAP_TPG,
+};
+
+/**
+ * enum mali_c55_aexp_skip_x - Horizontal pixel skipping
+ * @MALI_C55_AEXP_SKIP_X_EVERY_2ND: Collect every 2nd pixel horizontally
+ * @MALI_C55_AEXP_SKIP_X_EVERY_3RD: Collect every 3rd pixel horizontally
+ * @MALI_C55_AEXP_SKIP_X_EVERY_4TH: Collect every 4th pixel horizontally
+ * @MALI_C55_AEXP_SKIP_X_EVERY_5TH: Collect every 5th pixel horizontally
+ * @MALI_C55_AEXP_SKIP_X_EVERY_8TH: Collect every 8th pixel horizontally
+ * @MALI_C55_AEXP_SKIP_X_EVERY_9TH: Collect every 9th pixel horizontally
+ */
+enum mali_c55_aexp_skip_x {
+ MALI_C55_AEXP_SKIP_X_EVERY_2ND,
+ MALI_C55_AEXP_SKIP_X_EVERY_3RD,
+ MALI_C55_AEXP_SKIP_X_EVERY_4TH,
+ MALI_C55_AEXP_SKIP_X_EVERY_5TH,
+ MALI_C55_AEXP_SKIP_X_EVERY_8TH,
+ MALI_C55_AEXP_SKIP_X_EVERY_9TH
+};
+
+/**
+ * enum mali_c55_aexp_skip_y - Vertical pixel skipping
+ * @MALI_C55_AEXP_SKIP_Y_ALL: Collect every single pixel vertically
+ * @MALI_C55_AEXP_SKIP_Y_EVERY_2ND: Collect every 2nd pixel vertically
+ * @MALI_C55_AEXP_SKIP_Y_EVERY_3RD: Collect every 3rd pixel vertically
+ * @MALI_C55_AEXP_SKIP_Y_EVERY_4TH: Collect every 4th pixel vertically
+ * @MALI_C55_AEXP_SKIP_Y_EVERY_5TH: Collect every 5th pixel vertically
+ * @MALI_C55_AEXP_SKIP_Y_EVERY_8TH: Collect every 8th pixel vertically
+ * @MALI_C55_AEXP_SKIP_Y_EVERY_9TH: Collect every 9th pixel vertically
+ */
+enum mali_c55_aexp_skip_y {
+ MALI_C55_AEXP_SKIP_Y_ALL,
+ MALI_C55_AEXP_SKIP_Y_EVERY_2ND,
+ MALI_C55_AEXP_SKIP_Y_EVERY_3RD,
+ MALI_C55_AEXP_SKIP_Y_EVERY_4TH,
+ MALI_C55_AEXP_SKIP_Y_EVERY_5TH,
+ MALI_C55_AEXP_SKIP_Y_EVERY_8TH,
+ MALI_C55_AEXP_SKIP_Y_EVERY_9TH
+};
+
+/**
+ * enum mali_c55_aexp_row_column_offset - Start from the first or second row or
+ * column
+ * @MALI_C55_AEXP_FIRST_ROW_OR_COL: Start from the first row / column
+ * @MALI_C55_AEXP_SECOND_ROW_OR_COL: Start from the second row / column
+ */
+enum mali_c55_aexp_row_column_offset {
+ MALI_C55_AEXP_FIRST_ROW_OR_COL = 1,
+ MALI_C55_AEXP_SECOND_ROW_OR_COL = 2,
+};
+
+/**
+ * enum mali_c55_aexp_hist_plane_mode - Mode for the AEXP Histograms
+ * @MALI_C55_AEXP_HIST_COMBINED: All color planes in one 1024-bin histogram
+ * @MALI_C55_AEXP_HIST_SEPARATE: Each color plane in one 256-bin histogram with a bin width of 16
+ * @MALI_C55_AEXP_HIST_FOCUS_00: Top left plane in the first bank, rest in second bank
+ * @MALI_C55_AEXP_HIST_FOCUS_01: Top right plane in the first bank, rest in second bank
+ * @MALI_C55_AEXP_HIST_FOCUS_10: Bottom left plane in the first bank, rest in second bank
+ * @MALI_C55_AEXP_HIST_FOCUS_11: Bottom right plane in the first bank, rest in second bank
+ *
+ * In the "focus" modes statistics are collected into two 512-bin histograms
+ * with a bin width of 8. One colour plane is in the first histogram with the
+ * remainder combined into the second. The four options represent which of the
+ * four positions in a bayer pattern are the focused plane.
+ */
+enum mali_c55_aexp_hist_plane_mode {
+ MALI_C55_AEXP_HIST_COMBINED = 0,
+ MALI_C55_AEXP_HIST_SEPARATE = 1,
+ MALI_C55_AEXP_HIST_FOCUS_00 = 4,
+ MALI_C55_AEXP_HIST_FOCUS_01 = 5,
+ MALI_C55_AEXP_HIST_FOCUS_10 = 6,
+ MALI_C55_AEXP_HIST_FOCUS_11 = 7,
+};
+
+/**
+ * struct mali_c55_params_aexp_hist - configuration for AEXP metering hists
+ *
+ * This struct allows users to configure the 1024-bin AEXP histograms. Broadly
+ * speaking the parameters allow you to mask particular regions of the image and
+ * to select different kinds of histogram.
+ *
+ * The skip_x, offset_x, skip_y and offset_y fields allow users to ignore or
+ * mask pixels in the frame by their position relative to the top left pixel.
+ * First, the skip_y, offset_x and offset_y fields define which of the pixels
+ * within each 2x2 region will be counted in the statistics.
+ *
+ * If skip_y == 0 then two pixels from each covered region will be counted. If
+ * both offset_x and offset_y are zero, then the two left-most pixels in each
+ * 2x2 pixel region will be counted. Setting offset_x = 1 will discount the top
+ * left pixel and count the top right pixel. Setting offset_y = 1 will discount
+ * the bottom left pixel and count the bottom right pixel.
+ *
+ * If skip_y != 0 then only a single pixel from each region covered by the
+ * pattern will be counted. In this case offset_x controls whether the pixel
+ * that's counted is in the left (if offset_x == 0) or right (if offset_x == 1)
+ * column and offset_y controls whether the pixel that's counted is in the top
+ * (if offset_y == 0) or bottom (if offset_y == 1) row.
+ *
+ * The skip_x and skip_y fields control how the 2x2 pixel region is repeated
+ * across the image data. The first instance of the region is always in the top
+ * left of the image data. The skip_x field controls how many pixels are ignored
+ * in the x direction before the pixel masking region is repeated. The skip_y
+ * field controls how many pixels are ignored in the y direction before the
+ * pixel masking region is repeated.
+ *
+ * These fields can be used to reduce the number of pixels counted for the
+ * statistics, but it's important to be careful to configure them correctly.
+ * Some combinations of values will result in colour components from the input
+ * data being ignored entirely, for example in the following configuration:
+ *
+ * skip_x = 0
+ * offset_x = 0
+ * skip_y = 0
+ * offset_y = 0
+ *
+ * Only the R and Gb components of RGGB data that was input would be collected.
+ * Similarly in the following configuration:
+ *
+ * skip_x = 0
+ * offset_x = 0
+ * skip_y = 1
+ * offset_y = 1
+ *
+ * Only the Gb component of RGGB data that was input would be collected. To
+ * correct things such that all 4 colour components were included it would be
+ * necessary to set the skip_x and skip_y fields in a way that resulted in all
+ * four colour components being collected:
+ *
+ * skip_x = 1
+ * offset_x = 0
+ * skip_y = 1
+ * offset_y = 1
+ *
+ * header.type should be set to one of either MALI_C55_PARAM_BLOCK_AEXP_HIST or
+ * MALI_C55_PARAM_BLOCK_AEXP_IHIST from :c:type:`mali_c55_param_block_type`.
+ *
+ * @header: The Mali-C55 parameters block header
+ * @skip_x: Horizontal decimation. See enum mali_c55_aexp_skip_x
+ * @offset_x: Skip the first column, or not. See enum mali_c55_aexp_row_column_offset
+ * @skip_y: Vertical decimation. See enum mali_c55_aexp_skip_y
+ * @offset_y: Skip the first row, or not. See enum mali_c55_aexp_row_column_offset
+ * @scale_bottom: Scale pixels in bottom half of intensity range: 0=1x ,1=2x, 2=4x, 4=8x, 4=16x
+ * @scale_top: scale pixels in top half of intensity range: 0=1x ,1=2x, 2=4x, 4=8x, 4=16x
+ * @plane_mode: Plane separation mode. See enum mali_c55_aexp_hist_plane_mode
+ * @tap_point: Tap point for histogram from enum mali_c55_aexp_hist_tap_points.
+ * This parameter is unused for the post-Iridix Histogram
+ */
+struct mali_c55_params_aexp_hist {
+ struct mali_c55_params_block_header header;
+ __u8 skip_x;
+ __u8 offset_x;
+ __u8 skip_y;
+ __u8 offset_y;
+ __u8 scale_bottom;
+ __u8 scale_top;
+ __u8 plane_mode;
+ __u8 tap_point;
+};
+
+/**
+ * struct mali_c55_params_aexp_weights - Array of weights for AEXP metering
+ *
+ * This struct allows users to configure the weighting for both of the 1024-bin
+ * AEXP histograms. The pixel data collected for each zone is multiplied by the
+ * corresponding weight from this array, which may be zero if the intention is
+ * to mask off the zone entirely.
+ *
+ * header.type should be set to one of either MALI_C55_PARAM_BLOCK_AEXP_HIST_WEIGHTS
+ * or MALI_C55_PARAM_BLOCK_AEXP_IHIST_WEIGHTS from :c:type:`mali_c55_param_block_type`.
+ *
+ * @header: The Mali-C55 parameters block header
+ * @nodes_used_horiz: Number of active zones horizontally [0..15]
+ * @nodes_used_vert: Number of active zones vertically [0..15]
+ * @zone_weights: Zone weighting. Index is row*col where 0,0 is the top
+ * left zone continuing in raster order. Each zone can be
+ * weighted in the range [0..15]. The number of rows and
+ * columns is defined by @nodes_used_vert and
+ * @nodes_used_horiz
+ */
+struct mali_c55_params_aexp_weights {
+ struct mali_c55_params_block_header header;
+ __u8 nodes_used_horiz;
+ __u8 nodes_used_vert;
+ __u8 zone_weights[MALI_C55_MAX_ZONES];
+};
+
+/**
+ * struct mali_c55_params_digital_gain - Digital gain value
+ *
+ * This struct carries a digital gain value to set in the ISP.
+ *
+ * header.type should be set to MALI_C55_PARAM_BLOCK_DIGITAL_GAIN from
+ * :c:type:`mali_c55_param_block_type` for this block.
+ *
+ * @header: The Mali-C55 parameters block header
+ * @gain: The digital gain value to apply, in Q5.8 format.
+ */
+struct mali_c55_params_digital_gain {
+ struct mali_c55_params_block_header header;
+ __u16 gain;
+};
+
+/**
+ * enum mali_c55_awb_stats_mode - Statistics mode for AWB
+ * @MALI_C55_AWB_MODE_GRBR: Statistics collected as Green/Red and Blue/Red ratios
+ * @MALI_C55_AWB_MODE_RGBG: Statistics collected as Red/Green and Blue/Green ratios
+ */
+enum mali_c55_awb_stats_mode {
+ MALI_C55_AWB_MODE_GRBR = 0,
+ MALI_C55_AWB_MODE_RGBG,
+};
+
+/**
+ * struct mali_c55_params_awb_gains - Gain settings for auto white balance
+ *
+ * This struct allows users to configure the gains for auto-white balance. There
+ * are four gain settings corresponding to each colour channel in the bayer
+ * domain. Although named generically, the association between the gain applied
+ * and the colour channel is done automatically within the ISP depending on the
+ * input format, and so the following mapping always holds true::
+ *
+ * gain00 = R
+ * gain01 = Gr
+ * gain10 = Gb
+ * gain11 = B
+ *
+ * All of the gains are stored in Q4.8 format.
+ *
+ * header.type should be set to one of either MALI_C55_PARAM_BLOCK_AWB_GAINS or
+ * MALI_C55_PARAM_BLOCK_AWB_GAINS_AEXP from :c:type:`mali_c55_param_block_type`.
+ *
+ * @header: The Mali-C55 parameters block header
+ * @gain00: Multiplier for colour channel 00
+ * @gain01: Multiplier for colour channel 01
+ * @gain10: Multiplier for colour channel 10
+ * @gain11: Multiplier for colour channel 11
+ */
+struct mali_c55_params_awb_gains {
+ struct mali_c55_params_block_header header;
+ __u16 gain00;
+ __u16 gain01;
+ __u16 gain10;
+ __u16 gain11;
+};
+
+/**
+ * enum mali_c55_params_awb_tap_points - Tap points for the AWB statistics
+ * @MALI_C55_AWB_STATS_TAP_PF: Immediately after the Purple Fringe block
+ * @MALI_C55_AWB_STATS_TAP_CNR: Immediately after the CNR block
+ */
+enum mali_c55_params_awb_tap_points {
+ MALI_C55_AWB_STATS_TAP_PF = 0,
+ MALI_C55_AWB_STATS_TAP_CNR,
+};
+
+/**
+ * struct mali_c55_params_awb_config - Stats settings for auto-white balance
+ *
+ * This struct allows the configuration of the statistics generated for auto
+ * white balance. Pixel intensity limits can be set to exclude overly bright or
+ * dark regions of an image from the statistics entirely. Colour ratio minima
+ * and maxima can be set to discount pixels who's ratios fall outside the
+ * defined boundaries; there are two sets of registers to do this - the
+ * "min/max" ratios which bound a region and the "high/low" ratios which further
+ * trim the upper and lower ratios. For example with the boundaries configured
+ * as follows, only pixels whos colour ratios falls into the region marked "A"
+ * would be counted::
+ *
+ * cr_high
+ * 2.0 | |
+ * | cb_max --> _________________________v_____
+ * 1.8 | | \ |
+ * | | \ |
+ * 1.6 | | \ |
+ * | | \ |
+ * c 1.4 | cb_low -->|\ A \|<-- cb_high
+ * b | | \ |
+ * 1.2 | | \ |
+ * r | | \ |
+ * a 1.0 | cb_min --> |____\_________________________|
+ * t | ^ ^ ^
+ * i 0.8 | | | |
+ * o | cr_min | cr_max
+ * s 0.6 | |
+ * | cr_low
+ * 0.4 |
+ * |
+ * 0.2 |
+ * |
+ * 0.0 |_______________________________________________________________
+ * 0.0 0.2 0.4 0.6 0.8 1.0 1.2 1.4 1.6 1.8 2.0
+ * cr ratios
+ *
+ * header.type should be set to MALI_C55_PARAM_BLOCK_AWB_CONFIG from
+ * :c:type:`mali_c55_param_block_type` for this block.
+ *
+ * @header: The Mali-C55 parameters block header
+ * @tap_point: The tap point from enum mali_c55_params_awb_tap_points
+ * @stats_mode: AWB statistics collection mode, see :c:type:`mali_c55_awb_stats_mode`
+ * @white_level: Upper pixel intensity (I.E. raw pixel values) limit
+ * @black_level: Lower pixel intensity (I.E. raw pixel values) limit
+ * @cr_max: Maximum R/G ratio (Q4.8 format)
+ * @cr_min: Minimum R/G ratio (Q4.8 format)
+ * @cb_max: Maximum B/G ratio (Q4.8 format)
+ * @cb_min: Minimum B/G ratio (Q4.8 format)
+ * @nodes_used_horiz: Number of active zones horizontally [0..15]
+ * @nodes_used_vert: Number of active zones vertically [0..15]
+ * @cr_high: R/G ratio trim high (Q4.8 format)
+ * @cr_low: R/G ratio trim low (Q4.8 format)
+ * @cb_high: B/G ratio trim high (Q4.8 format)
+ * @cb_low: B/G ratio trim low (Q4.8 format)
+ */
+struct mali_c55_params_awb_config {
+ struct mali_c55_params_block_header header;
+ __u8 tap_point;
+ __u8 stats_mode;
+ __u16 white_level;
+ __u16 black_level;
+ __u16 cr_max;
+ __u16 cr_min;
+ __u16 cb_max;
+ __u16 cb_min;
+ __u8 nodes_used_horiz;
+ __u8 nodes_used_vert;
+ __u16 cr_high;
+ __u16 cr_low;
+ __u16 cb_high;
+ __u16 cb_low;
+};
+
+#define MALI_C55_NUM_MESH_SHADING_ELEMENTS 3072
+
+/**
+ * struct mali_c55_params_mesh_shading_config - Mesh shading configuration
+ *
+ * The mesh shading correction module allows programming a separate table of
+ * either 16x16 or 32x32 node coefficients for 3 different light sources. The
+ * final correction coefficients applied are computed by blending the
+ * coefficients from two tables together.
+ *
+ * A page of 1024 32-bit integers is associated to each colour channel, with
+ * pages stored consecutively in memory. Each 32-bit integer packs 3 8-bit
+ * correction coefficients for a single node, one for each of the three light
+ * sources. The 8 most significant bits are unused. The following table
+ * describes the layout::
+ *
+ * +----------- Page (Colour Plane) 0 -------------+
+ * | @mesh[i] | Mesh Point | Bits | Light Source |
+ * +-----------+------------+-------+--------------+
+ * | 0 | 0,0 | 16,23 | LS2 |
+ * | | | 08-15 | LS1 |
+ * | | | 00-07 | LS0 |
+ * +-----------+------------+-------+--------------+
+ * | 1 | 0,1 | 16,23 | LS2 |
+ * | | | 08-15 | LS1 |
+ * | | | 00-07 | LS0 |
+ * +-----------+------------+-------+--------------+
+ * | ... | ... | ... | ... |
+ * +-----------+------------+-------+--------------+
+ * | 1023 | 31,31 | 16,23 | LS2 |
+ * | | | 08-15 | LS1 |
+ * | | | 00-07 | LS0 |
+ * +----------- Page (Colour Plane) 1 -------------+
+ * | @mesh[i] | Mesh Point | Bits | Light Source |
+ * +-----------+------------+-------+--------------+
+ * | 1024 | 0,0 | 16,23 | LS2 |
+ * | | | 08-15 | LS1 |
+ * | | | 00-07 | LS0 |
+ * +-----------+------------+-------+--------------+
+ * | 1025 | 0,1 | 16,23 | LS2 |
+ * | | | 08-15 | LS1 |
+ * | | | 00-07 | LS0 |
+ * +-----------+------------+-------+--------------+
+ * | ... | ... | ... | ... |
+ * +-----------+------------+-------+--------------+
+ * | 2047 | 31,31 | 16,23 | LS2 |
+ * | | | 08-15 | LS1 |
+ * | | | 00-07 | LS0 |
+ * +----------- Page (Colour Plane) 2 -------------+
+ * | @mesh[i] | Mesh Point | Bits | Light Source |
+ * +-----------+------------+-------+--------------+
+ * | 2048 | 0,0 | 16,23 | LS2 |
+ * | | | 08-15 | LS1 |
+ * | | | 00-07 | LS0 |
+ * +-----------+------------+-------+--------------+
+ * | 2049 | 0,1 | 16,23 | LS2 |
+ * | | | 08-15 | LS1 |
+ * | | | 00-07 | LS0 |
+ * +-----------+------------+-------+--------------+
+ * | ... | ... | ... | ... |
+ * +-----------+------------+-------+--------------+
+ * | 3071 | 31,31 | 16,23 | LS2 |
+ * | | | 08-15 | LS1 |
+ * | | | 00-07 | LS0 |
+ * +-----------+------------+-------+--------------+
+ *
+ * The @mesh_scale member determines the precision and minimum and maximum gain.
+ * For example if @mesh_scale is 0 and therefore selects 0 - 2x gain, a value of
+ * 0 in a coefficient means 0.0 gain, a value of 128 means 1.0 gain and 255
+ * means 2.0 gain.
+ *
+ * header.type should be set to MALI_C55_PARAM_MESH_SHADING_CONFIG from
+ * :c:type:`mali_c55_param_block_type` for this block.
+ *
+ * @header: The Mali-C55 parameters block header
+ * @mesh_show: Output the mesh data rather than image data
+ * @mesh_scale: Set the precision and maximum gain range of mesh shading
+ * - 0 = 0-2x gain
+ * - 1 = 0-4x gain
+ * - 2 = 0-8x gain
+ * - 3 = 0-16x gain
+ * - 4 = 1-2x gain
+ * - 5 = 1-3x gain
+ * - 6 = 1-5x gain
+ * - 7 = 1-9x gain
+ * @mesh_page_r: Mesh page select for red colour plane [0..2]
+ * @mesh_page_g: Mesh page select for green colour plane [0..2]
+ * @mesh_page_b: Mesh page select for blue colour plane [0..2]
+ * @mesh_width: Number of horizontal nodes minus 1 [15,31]
+ * @mesh_height: Number of vertical nodes minus 1 [15,31]
+ * @mesh: Mesh shading correction tables
+ */
+struct mali_c55_params_mesh_shading_config {
+ struct mali_c55_params_block_header header;
+ __u8 mesh_show;
+ __u8 mesh_scale;
+ __u8 mesh_page_r;
+ __u8 mesh_page_g;
+ __u8 mesh_page_b;
+ __u8 mesh_width;
+ __u8 mesh_height;
+ __u32 mesh[MALI_C55_NUM_MESH_SHADING_ELEMENTS];
+};
+
+/** enum mali_c55_params_mesh_alpha_bank - Mesh shading table bank selection
+ * @MALI_C55_MESH_ALPHA_BANK_LS0_AND_LS1 - Select Light Sources 0 and 1
+ * @MALI_C55_MESH_ALPHA_BANK_LS1_AND_LS2 - Select Light Sources 1 and 2
+ * @MALI_C55_MESH_ALPHA_BANK_LS0_AND_LS2 - Select Light Sources 0 and 2
+ */
+enum mali_c55_params_mesh_alpha_bank {
+ MALI_C55_MESH_ALPHA_BANK_LS0_AND_LS1 = 0,
+ MALI_C55_MESH_ALPHA_BANK_LS1_AND_LS2 = 1,
+ MALI_C55_MESH_ALPHA_BANK_LS0_AND_LS2 = 4
+};
+
+/**
+ * struct mali_c55_params_mesh_shading_selection - Mesh table selection
+ *
+ * The module computes the final correction coefficients by blending the ones
+ * from two light source tables, which are selected (independently for each
+ * colour channel) by the @mesh_alpha_bank_r/g/b fields.
+ *
+ * The final blended coefficients for each node are calculated using the
+ * following equation:
+ *
+ * Final coefficient = (a * LS\ :sub:`b`\ + (256 - a) * LS\ :sub:`a`\) / 256
+ *
+ * Where a is the @mesh_alpha_r/g/b value, and LS\ :sub:`a`\ and LS\ :sub:`b`\
+ * are the node cofficients for the two tables selected by the
+ * @mesh_alpha_bank_r/g/b value.
+ *
+ * The scale of the applied correction may also be controlled by tuning the
+ * @mesh_strength member. This is a modifier to the final coefficients which can
+ * be used to globally reduce the gains applied.
+ *
+ * header.type should be set to MALI_C55_PARAM_MESH_SHADING_SELECTION from
+ * :c:type:`mali_c55_param_block_type` for this block.
+ *
+ * @header: The Mali-C55 parameters block header
+ * @mesh_alpha_bank_r: Red mesh table select (c:type:`enum mali_c55_params_mesh_alpha_bank`)
+ * @mesh_alpha_bank_g: Green mesh table select (c:type:`enum mali_c55_params_mesh_alpha_bank`)
+ * @mesh_alpha_bank_b: Blue mesh table select (c:type:`enum mali_c55_params_mesh_alpha_bank`)
+ * @mesh_alpha_r: Blend coefficient for R [0..255]
+ * @mesh_alpha_g: Blend coefficient for G [0..255]
+ * @mesh_alpha_b: Blend coefficient for B [0..255]
+ * @mesh_strength: Mesh strength in Q4.12 format [0..4096]
+ */
+struct mali_c55_params_mesh_shading_selection {
+ struct mali_c55_params_block_header header;
+ __u8 mesh_alpha_bank_r;
+ __u8 mesh_alpha_bank_g;
+ __u8 mesh_alpha_bank_b;
+ __u8 mesh_alpha_r;
+ __u8 mesh_alpha_g;
+ __u8 mesh_alpha_b;
+ __u16 mesh_strength;
+};
+
+/**
+ * define MALI_C55_PARAMS_MAX_SIZE - Maximum size of all Mali C55 Parameters
+ *
+ * Though the parameters for the Mali-C55 are passed as optional blocks, the
+ * driver still needs to know the absolute maximum size so that it can allocate
+ * a buffer sized appropriately to accommodate userspace attempting to set all
+ * possible parameters in a single frame.
+ *
+ * Some structs are in this list multiple times. Where that's the case, it just
+ * reflects the fact that the same struct can be used with multiple different
+ * header types from :c:type:`mali_c55_param_block_type`.
+ */
+#define MALI_C55_PARAMS_MAX_SIZE \
+ (sizeof(struct mali_c55_params_sensor_off_preshading) + \
+ sizeof(struct mali_c55_params_aexp_hist) + \
+ sizeof(struct mali_c55_params_aexp_weights) + \
+ sizeof(struct mali_c55_params_aexp_hist) + \
+ sizeof(struct mali_c55_params_aexp_weights) + \
+ sizeof(struct mali_c55_params_digital_gain) + \
+ sizeof(struct mali_c55_params_awb_gains) + \
+ sizeof(struct mali_c55_params_awb_config) + \
+ sizeof(struct mali_c55_params_awb_gains) + \
+ sizeof(struct mali_c55_params_mesh_shading_config) + \
+ sizeof(struct mali_c55_params_mesh_shading_selection))
+
+/**
+ * struct mali_c55_params_buffer - 3A configuration parameters
+ *
+ * This struct contains the configuration parameters of the Mali-C55 ISP
+ * algorithms, serialized by userspace into a data buffer. Each configuration
+ * parameter block is represented by a block-specific structure which contains a
+ * :c:type:`mali_c55_params_block_header` entry as first member. Userspace
+ * populates the @data buffer with configuration parameters for the blocks that
+ * it intends to configure. As a consequence, the data buffer effective size
+ * changes according to the number of ISP blocks that userspace intends to
+ * configure.
+ *
+ * The parameters buffer is versioned by the @version field to allow modifying
+ * and extending its definition. Userspace shall populate the @version field to
+ * inform the driver about the version it intends to use. The driver will parse
+ * and handle the @data buffer according to the data layout specific to the
+ * indicated version and return an error if the desired version is not
+ * supported.
+ *
+ * For each ISP block that userspace wants to configure, a block-specific
+ * structure is appended to the @data buffer, one after the other without gaps
+ * in between nor overlaps. Userspace shall populate the @total_size field with
+ * the effective size, in bytes, of the @data buffer.
+ *
+ * The expected memory layout of the parameters buffer is::
+ *
+ * +-------------------- struct mali_c55_params_buffer ------------------+
+ * | version = MALI_C55_PARAM_BUFFER_V1; |
+ * | total_size = sizeof(struct mali_c55_params_sensor_off_preshading) |
+ * | sizeof(struct mali_c55_params_aexp_hist); |
+ * | +------------------------- data ---------------------------------+ |
+ * | | +--------- struct mali_c55_params_sensor_off_preshading ------+ | |
+ * | | | +-------- struct mali_c55_params_block_header header -----+ | | |
+ * | | | | type = MALI_C55_PARAM_BLOCK_SENSOR_OFFS; | | | |
+ * | | | | flags = MALI_C55_PARAM_BLOCK_FL_NONE; | | | |
+ * | | | | size = | | | |
+ * | | | | sizeof(struct mali_c55_params_sensor_off_preshading);| | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | | chan00 = ...; | | |
+ * | | | chan01 = ...; | | |
+ * | | | chan10 = ...; | | |
+ * | | | chan11 = ...; | | |
+ * | | +------------ struct mali_c55_params_aexp_hist ---------------+ | |
+ * | | | +-------- struct mali_c55_params_block_header header -----+ | | |
+ * | | | | type = MALI_C55_PARAM_BLOCK_AEXP_HIST; | | | |
+ * | | | | flags = MALI_C55_PARAM_BLOCK_FL_NONE; | | | |
+ * | | | | size = sizeof(struct mali_c55_params_aexp_hist); | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | | skip_x = ...; | | |
+ * | | | offset_x = ...; | | |
+ * | | | skip_y = ...; | | |
+ * | | | offset_y = ...; | | |
+ * | | | scale_bottom = ...; | | |
+ * | | | scale_top = ...; | | |
+ * | | | plane_mode = ...; | | |
+ * | | | tap_point = ...; | | |
+ * | | +-------------------------------------------------------------+ | |
+ * | +-----------------------------------------------------------------+ |
+ * +---------------------------------------------------------------------+
+ *
+ * @version: The version from :c:type:`mali_c55_param_buffer_version`
+ * @total_size: The Mali-C55 configuration data effective size, excluding this
+ * header
+ * @data: The Mali-C55 configuration blocks data
+ */
+struct mali_c55_params_buffer {
+ __u8 version;
+ __u32 total_size;
+ __u8 data[MALI_C55_PARAMS_MAX_SIZE];
+};
+
+#endif /* __UAPI_MALI_C55_CONFIG_H */
diff --git a/include/linux/media-bus-format.h b/include/linux/media-bus-format.h
index f05f747e..bf467168 100644
--- a/include/linux/media-bus-format.h
+++ b/include/linux/media-bus-format.h
@@ -34,7 +34,7 @@
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x1026 */
+/* RGB - next is 0x1027 */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
@@ -72,6 +72,7 @@
#define MEDIA_BUS_FMT_RGB888_1X36_CPADLO 0x1021
#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019
#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a
+#define MEDIA_BUS_FMT_RGB202020_1X60 0x1026
/* YUV (including grey) - next is 0x202f */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
@@ -121,7 +122,7 @@
#define MEDIA_BUS_FMT_YUV16_1X48 0x202a
#define MEDIA_BUS_FMT_UYYVYY16_0_5X48 0x202b
-/* Bayer - next is 0x3021 */
+/* Bayer - next is 0x3025 */
#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001
#define MEDIA_BUS_FMT_SGBRG8_1X8 0x3013
#define MEDIA_BUS_FMT_SGRBG8_1X8 0x3002
@@ -154,6 +155,10 @@
#define MEDIA_BUS_FMT_SGBRG16_1X16 0x301e
#define MEDIA_BUS_FMT_SGRBG16_1X16 0x301f
#define MEDIA_BUS_FMT_SRGGB16_1X16 0x3020
+#define MEDIA_BUS_FMT_SBGGR20_1X20 0x3021
+#define MEDIA_BUS_FMT_SGBRG20_1X20 0x3022
+#define MEDIA_BUS_FMT_SGRBG20_1X20 0x3023
+#define MEDIA_BUS_FMT_SRGGB20_1X20 0x3024
/* JPEG compressed formats - next is 0x4002 */
#define MEDIA_BUS_FMT_JPEG_1X8 0x4001
@@ -174,4 +179,17 @@
*/
#define MEDIA_BUS_FMT_METADATA_FIXED 0x7001
+/* Generic line based metadata formats for serial buses. Next is 0x8008. */
+#define MEDIA_BUS_FMT_META_8 0x8001
+#define MEDIA_BUS_FMT_META_10 0x8002
+#define MEDIA_BUS_FMT_META_12 0x8003
+#define MEDIA_BUS_FMT_META_14 0x8004
+#define MEDIA_BUS_FMT_META_16 0x8005
+#define MEDIA_BUS_FMT_META_20 0x8006
+#define MEDIA_BUS_FMT_META_24 0x8007
+
+/* Specific metadata formats. Next is 0x9003. */
+#define MEDIA_BUS_FMT_CCS_EMBEDDED 0x9001
+#define MEDIA_BUS_FMT_OV2740_EMBEDDED 0x9002
+
#endif /* __LINUX_MEDIA_BUS_FORMAT_H */
diff --git a/include/linux/media.h b/include/linux/media.h
index b5a77bbf..4a733b9b 100644
--- a/include/linux/media.h
+++ b/include/linux/media.h
@@ -206,6 +206,7 @@ struct media_entity_desc {
#define MEDIA_PAD_FL_SINK (1U << 0)
#define MEDIA_PAD_FL_SOURCE (1U << 1)
#define MEDIA_PAD_FL_MUST_CONNECT (1U << 2)
+#define MEDIA_PAD_FL_INTERNAL (1U << 3)
struct media_pad_desc {
__u32 entity; /* entity ID */
diff --git a/include/linux/rkisp1-config.h b/include/linux/rkisp1-config.h
index 2d1c448a..edbc6cb6 100644
--- a/include/linux/rkisp1-config.h
+++ b/include/linux/rkisp1-config.h
@@ -4,8 +4,8 @@
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
-#ifndef _UAPI_RKISP1_CONFIG_H
-#define _UAPI_RKISP1_CONFIG_H
+#ifndef _RKISP1_CONFIG_H
+#define _RKISP1_CONFIG_H
#include <linux/types.h>
@@ -165,6 +165,11 @@
#define RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS 6
/*
+ * Compand
+ */
+#define RKISP1_CIF_ISP_COMPAND_NUM_POINTS 64
+
+/*
* Measurement types
*/
#define RKISP1_CIF_ISP_STAT_AWB (1U << 0)
@@ -175,11 +180,14 @@
/**
* enum rkisp1_cif_isp_version - ISP variants
*
- * @RKISP1_V10: used at least in rk3288 and rk3399
- * @RKISP1_V11: declared in the original vendor code, but not used
- * @RKISP1_V12: used at least in rk3326 and px30
- * @RKISP1_V13: used at least in rk1808
- * @RKISP1_V_IMX8MP: used in at least imx8mp
+ * @RKISP1_V10: Used at least in RK3288 and RK3399.
+ * @RKISP1_V11: Declared in the original vendor code, but not used. Same number
+ * of entries in grids and histogram as v10.
+ * @RKISP1_V12: Used at least in RK3326 and PX30.
+ * @RKISP1_V13: Used at least in RK1808. Same number of entries in grids and
+ * histogram as v12.
+ * @RKISP1_V_IMX8MP: Used in at least i.MX8MP. Same number of entries in grids
+ * and histogram as v10.
*/
enum rkisp1_cif_isp_version {
RKISP1_V10 = 10,
@@ -586,10 +594,9 @@ enum rkisp1_cif_isp_goc_mode {
* as is reported by the hw_revision field of the struct media_device_info
* that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
*
- * Versions <= V11 have RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10
- * entries, versions >= V12 have RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12
- * entries. RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES is equal to the maximum
- * of the two.
+ * V10 has RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10 entries, V12 has
+ * RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 entries.
+ * RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES is equal to the maximum of the two.
*/
struct rkisp1_cif_isp_goc_config {
__u32 mode;
@@ -609,10 +616,10 @@ struct rkisp1_cif_isp_goc_config {
* as is reported by the hw_revision field of the struct media_device_info
* that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
*
- * Versions <= V11 have RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10
- * entries, versions >= V12 have RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12
- * entries. RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE is equal to the maximum
- * of the two.
+ * V10 has RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10 entries, V12 has
+ * RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12 entries.
+ * RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE is equal to the maximum of the
+ * two.
*/
struct rkisp1_cif_isp_hst_config {
__u32 mode;
@@ -849,6 +856,39 @@ struct rkisp1_params_cfg {
struct rkisp1_cif_isp_isp_other_cfg others;
};
+/**
+ * struct rkisp1_cif_isp_compand_bls_config - Rockchip ISP1 Companding parameters (BLS)
+ * @r: Fixed subtraction value for Bayer pattern R
+ * @gr: Fixed subtraction value for Bayer pattern Gr
+ * @gb: Fixed subtraction value for Bayer pattern Gb
+ * @b: Fixed subtraction value for Bayer pattern B
+ *
+ * The values will be subtracted from the sensor values. Note that unlike the
+ * dedicated BLS block, the BLS values in the compander are 20-bit unsigned.
+ */
+struct rkisp1_cif_isp_compand_bls_config {
+ __u32 r;
+ __u32 gr;
+ __u32 gb;
+ __u32 b;
+};
+
+/**
+ * struct rkisp1_cif_isp_compand_curve_config - Rockchip ISP1 Companding
+ * parameters (expand and compression curves)
+ * @px: Compand curve x-values. Each value stores the distance from the
+ * previous x-value, expressed as log2 of the distance on 5 bits.
+ * @x: Compand curve x-values. The functionality of these parameters are
+ * unknown due to do a lack of hardware documentation, but these are left
+ * here for future compatibility purposes.
+ * @y: Compand curve y-values
+ */
+struct rkisp1_cif_isp_compand_curve_config {
+ __u8 px[RKISP1_CIF_ISP_COMPAND_NUM_POINTS];
+ __u32 x[RKISP1_CIF_ISP_COMPAND_NUM_POINTS];
+ __u32 y[RKISP1_CIF_ISP_COMPAND_NUM_POINTS];
+};
+
/*---------- PART2: Measurement Statistics ------------*/
/**
@@ -904,9 +944,9 @@ struct rkisp1_cif_isp_bls_meas_val {
* as is reported by the hw_revision field of the struct media_device_info
* that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
*
- * Versions <= V11 have RKISP1_CIF_ISP_AE_MEAN_MAX_V10 entries,
- * versions >= V12 have RKISP1_CIF_ISP_AE_MEAN_MAX_V12 entries.
- * RKISP1_CIF_ISP_AE_MEAN_MAX is equal to the maximum of the two.
+ * V10 has RKISP1_CIF_ISP_AE_MEAN_MAX_V10 entries, V12 has
+ * RKISP1_CIF_ISP_AE_MEAN_MAX_V12 entries. RKISP1_CIF_ISP_AE_MEAN_MAX is equal
+ * to the maximum of the two.
*
* Image is divided into 5x5 blocks on V10 and 9x9 blocks on V12.
*/
@@ -946,21 +986,21 @@ struct rkisp1_cif_isp_af_stat {
* integer part.
*
* The window of the measurements area is divided to 5x5 sub-windows for
- * V10/V11 and to 9x9 sub-windows for V12. The histogram is then computed for
- * each sub-window independently and the final result is a weighted average of
- * the histogram measurements on all sub-windows. The window of the
- * measurements area and the weight of each sub-window are configurable using
+ * V10 and to 9x9 sub-windows for V12. The histogram is then computed for each
+ * sub-window independently and the final result is a weighted average of the
+ * histogram measurements on all sub-windows. The window of the measurements
+ * area and the weight of each sub-window are configurable using
* struct @rkisp1_cif_isp_hst_config.
*
- * The histogram contains 16 bins in V10/V11 and 32 bins in V12/V13.
+ * The histogram contains 16 bins in V10 and 32 bins in V12.
*
* The number of entries of @hist_bins depends on the hardware revision
* as is reported by the hw_revision field of the struct media_device_info
* that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
*
- * Versions <= V11 have RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 entries,
- * versions >= V12 have RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 entries.
- * RKISP1_CIF_ISP_HIST_BIN_N_MAX is equal to the maximum of the two.
+ * V10 has RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 entries, V12 has
+ * RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 entries. RKISP1_CIF_ISP_HIST_BIN_N_MAX is
+ * equal to the maximum of the two.
*/
struct rkisp1_cif_isp_hist_stat {
__u32 hist_bins[RKISP1_CIF_ISP_HIST_BIN_N_MAX];
@@ -994,4 +1034,544 @@ struct rkisp1_stat_buffer {
struct rkisp1_cif_isp_stat params;
};
-#endif /* _UAPI_RKISP1_CONFIG_H */
+/*---------- PART3: Extensible Configuration Parameters ------------*/
+
+/**
+ * enum rkisp1_ext_params_block_type - RkISP1 extensible params block type
+ *
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS: Black level subtraction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC: Defect pixel cluster correction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_SDG: Sensor de-gamma
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_GAIN: Auto white balance gains
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_FLT: ISP filtering
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_BDM: Bayer de-mosaic
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_CTK: Cross-talk correction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_GOC: Gamma out correction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF: De-noise pre-filter
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF_STRENGTH: De-noise pre-filter strength
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_CPROC: Color processing
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_IE: Image effects
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_LSC: Lens shading correction
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_MEAS: Auto white balance statistics
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_HST_MEAS: Histogram statistics
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_AEC_MEAS: Auto exposure statistics
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_AFC_MEAS: Auto-focus statistics
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS: BLS in the compand block
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND: Companding expand curve
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS: Companding compress curve
+ */
+enum rkisp1_ext_params_block_type {
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_SDG,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_GAIN,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_FLT,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_BDM,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_CTK,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_GOC,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF_STRENGTH,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_CPROC,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_IE,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_LSC,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_MEAS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_HST_MEAS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_AEC_MEAS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_AFC_MEAS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS,
+};
+
+#define RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE (1U << 0)
+#define RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE (1U << 1)
+
+/**
+ * struct rkisp1_ext_params_block_header - RkISP1 extensible parameters block
+ * header
+ *
+ * This structure represents the common part of all the ISP configuration
+ * blocks. Each parameters block shall embed an instance of this structure type
+ * as its first member, followed by the block-specific configuration data. The
+ * driver inspects this common header to discern the block type and its size and
+ * properly handle the block content by casting it to the correct block-specific
+ * type.
+ *
+ * The @type field is one of the values enumerated by
+ * :c:type:`rkisp1_ext_params_block_type` and specifies how the data should be
+ * interpreted by the driver. The @size field specifies the size of the
+ * parameters block and is used by the driver for validation purposes.
+ *
+ * The @flags field is a bitmask of per-block flags RKISP1_EXT_PARAMS_FL_*.
+ *
+ * When userspace wants to configure and enable an ISP block it shall fully
+ * populate the block configuration and set the
+ * RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE bit in the @flags field.
+ *
+ * When userspace simply wants to disable an ISP block the
+ * RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE bit should be set in @flags field. The
+ * driver ignores the rest of the block configuration structure in this case.
+ *
+ * If a new configuration of an ISP block has to be applied userspace shall
+ * fully populate the ISP block configuration and omit setting the
+ * RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE and RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE bits
+ * in the @flags field.
+ *
+ * Setting both the RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE and
+ * RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE bits in the @flags field is not allowed
+ * and not accepted by the driver.
+ *
+ * Userspace is responsible for correctly populating the parameters block header
+ * fields (@type, @flags and @size) and the block-specific parameters.
+ *
+ * For example:
+ *
+ * .. code-block:: c
+ *
+ * void populate_bls(struct rkisp1_ext_params_block_header *block) {
+ * struct rkisp1_ext_params_bls_config *bls =
+ * (struct rkisp1_ext_params_bls_config *)block;
+ *
+ * bls->header.type = RKISP1_EXT_PARAMS_BLOCK_ID_BLS;
+ * bls->header.flags = RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE;
+ * bls->header.size = sizeof(*bls);
+ *
+ * bls->config.enable_auto = 0;
+ * bls->config.fixed_val.r = blackLevelRed_;
+ * bls->config.fixed_val.gr = blackLevelGreenR_;
+ * bls->config.fixed_val.gb = blackLevelGreenB_;
+ * bls->config.fixed_val.b = blackLevelBlue_;
+ * }
+ *
+ * @type: The parameters block type, see
+ * :c:type:`rkisp1_ext_params_block_type`
+ * @flags: A bitmask of block flags
+ * @size: Size (in bytes) of the parameters block, including this header
+ */
+struct rkisp1_ext_params_block_header {
+ __u16 type;
+ __u16 flags;
+ __u32 size;
+};
+
+/**
+ * struct rkisp1_ext_params_bls_config - RkISP1 extensible params BLS config
+ *
+ * RkISP1 extensible parameters Black Level Subtraction configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Black Level Subtraction configuration, see
+ * :c:type:`rkisp1_cif_isp_bls_config`
+ */
+struct rkisp1_ext_params_bls_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_bls_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_dpcc_config - RkISP1 extensible params DPCC config
+ *
+ * RkISP1 extensible parameters Defective Pixel Cluster Correction configuration
+ * block. Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Defective Pixel Cluster Correction configuration, see
+ * :c:type:`rkisp1_cif_isp_dpcc_config`
+ */
+struct rkisp1_ext_params_dpcc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_dpcc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_sdg_config - RkISP1 extensible params SDG config
+ *
+ * RkISP1 extensible parameters Sensor Degamma configuration block. Identified
+ * by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_SDG`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Sensor Degamma configuration, see
+ * :c:type:`rkisp1_cif_isp_sdg_config`
+ */
+struct rkisp1_ext_params_sdg_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_sdg_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_lsc_config - RkISP1 extensible params LSC config
+ *
+ * RkISP1 extensible parameters Lens Shading Correction configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_LSC`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Lens Shading Correction configuration, see
+ * :c:type:`rkisp1_cif_isp_lsc_config`
+ */
+struct rkisp1_ext_params_lsc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_lsc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_awb_gain_config - RkISP1 extensible params AWB
+ * gain config
+ *
+ * RkISP1 extensible parameters Auto-White Balance Gains configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_GAIN`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Auto-White Balance Gains configuration, see
+ * :c:type:`rkisp1_cif_isp_awb_gain_config`
+ */
+struct rkisp1_ext_params_awb_gain_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_awb_gain_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_flt_config - RkISP1 extensible params FLT config
+ *
+ * RkISP1 extensible parameters Filter configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_FLT`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Filter configuration, see :c:type:`rkisp1_cif_isp_flt_config`
+ */
+struct rkisp1_ext_params_flt_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_flt_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_bdm_config - RkISP1 extensible params BDM config
+ *
+ * RkISP1 extensible parameters Demosaicing configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_BDM`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Demosaicing configuration, see :c:type:`rkisp1_cif_isp_bdm_config`
+ */
+struct rkisp1_ext_params_bdm_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_bdm_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_ctk_config - RkISP1 extensible params CTK config
+ *
+ * RkISP1 extensible parameters Cross-Talk configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_CTK`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Cross-Talk configuration, see :c:type:`rkisp1_cif_isp_ctk_config`
+ */
+struct rkisp1_ext_params_ctk_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_ctk_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_goc_config - RkISP1 extensible params GOC config
+ *
+ * RkISP1 extensible parameters Gamma-Out configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_GOC`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Gamma-Out configuration, see :c:type:`rkisp1_cif_isp_goc_config`
+ */
+struct rkisp1_ext_params_goc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_goc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_dpf_config - RkISP1 extensible params DPF config
+ *
+ * RkISP1 extensible parameters De-noise Pre-Filter configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: De-noise Pre-Filter configuration, see
+ * :c:type:`rkisp1_cif_isp_dpf_config`
+ */
+struct rkisp1_ext_params_dpf_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_dpf_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_dpf_strength_config - RkISP1 extensible params DPF
+ * strength config
+ *
+ * RkISP1 extensible parameters De-noise Pre-Filter strength configuration
+ * block. Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_DPF_STRENGTH`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: De-noise Pre-Filter strength configuration, see
+ * :c:type:`rkisp1_cif_isp_dpf_strength_config`
+ */
+struct rkisp1_ext_params_dpf_strength_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_dpf_strength_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_cproc_config - RkISP1 extensible params CPROC config
+ *
+ * RkISP1 extensible parameters Color Processing configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_CPROC`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Color processing configuration, see
+ * :c:type:`rkisp1_cif_isp_cproc_config`
+ */
+struct rkisp1_ext_params_cproc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_cproc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_ie_config - RkISP1 extensible params IE config
+ *
+ * RkISP1 extensible parameters Image Effect configuration block. Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_IE`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Image Effect configuration, see :c:type:`rkisp1_cif_isp_ie_config`
+ */
+struct rkisp1_ext_params_ie_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_ie_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_awb_meas_config - RkISP1 extensible params AWB
+ * Meas config
+ *
+ * RkISP1 extensible parameters Auto-White Balance Measurement configuration
+ * block. Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_AWB_MEAS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Auto-White Balance measure configuration, see
+ * :c:type:`rkisp1_cif_isp_awb_meas_config`
+ */
+struct rkisp1_ext_params_awb_meas_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_awb_meas_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_hst_config - RkISP1 extensible params Histogram config
+ *
+ * RkISP1 extensible parameters Histogram statistics configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_HST_MEAS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Histogram statistics configuration, see
+ * :c:type:`rkisp1_cif_isp_hst_config`
+ */
+struct rkisp1_ext_params_hst_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_hst_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_aec_config - RkISP1 extensible params AEC config
+ *
+ * RkISP1 extensible parameters Auto-Exposure statistics configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_AEC_MEAS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Auto-Exposure statistics configuration, see
+ * :c:type:`rkisp1_cif_isp_aec_config`
+ */
+struct rkisp1_ext_params_aec_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_aec_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_afc_config - RkISP1 extensible params AFC config
+ *
+ * RkISP1 extensible parameters Auto-Focus statistics configuration block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_AFC_MEAS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Auto-Focus statistics configuration, see
+ * :c:type:`rkisp1_cif_isp_afc_config`
+ */
+struct rkisp1_ext_params_afc_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_afc_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_compand_bls_config - RkISP1 extensible params
+ * Compand BLS config
+ *
+ * RkISP1 extensible parameters Companding configuration block (black level
+ * subtraction). Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Companding BLS configuration, see
+ * :c:type:`rkisp1_cif_isp_compand_bls_config`
+ */
+struct rkisp1_ext_params_compand_bls_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_compand_bls_config config;
+} __attribute__((aligned(8)));
+
+/**
+ * struct rkisp1_ext_params_compand_curve_config - RkISP1 extensible params
+ * Compand curve config
+ *
+ * RkISP1 extensible parameters Companding configuration block (expand and
+ * compression curves). Identified by
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND` or
+ * :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS`.
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: Companding curve configuration, see
+ * :c:type:`rkisp1_cif_isp_compand_curve_config`
+ */
+struct rkisp1_ext_params_compand_curve_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_compand_curve_config config;
+} __attribute__((aligned(8)));
+
+/*
+ * The rkisp1_ext_params_compand_curve_config structure is counted twice as it
+ * is used for both the COMPAND_EXPAND and COMPAND_COMPRESS block types.
+ */
+#define RKISP1_EXT_PARAMS_MAX_SIZE \
+ (sizeof(struct rkisp1_ext_params_bls_config) +\
+ sizeof(struct rkisp1_ext_params_dpcc_config) +\
+ sizeof(struct rkisp1_ext_params_sdg_config) +\
+ sizeof(struct rkisp1_ext_params_lsc_config) +\
+ sizeof(struct rkisp1_ext_params_awb_gain_config) +\
+ sizeof(struct rkisp1_ext_params_flt_config) +\
+ sizeof(struct rkisp1_ext_params_bdm_config) +\
+ sizeof(struct rkisp1_ext_params_ctk_config) +\
+ sizeof(struct rkisp1_ext_params_goc_config) +\
+ sizeof(struct rkisp1_ext_params_dpf_config) +\
+ sizeof(struct rkisp1_ext_params_dpf_strength_config) +\
+ sizeof(struct rkisp1_ext_params_cproc_config) +\
+ sizeof(struct rkisp1_ext_params_ie_config) +\
+ sizeof(struct rkisp1_ext_params_awb_meas_config) +\
+ sizeof(struct rkisp1_ext_params_hst_config) +\
+ sizeof(struct rkisp1_ext_params_aec_config) +\
+ sizeof(struct rkisp1_ext_params_afc_config) +\
+ sizeof(struct rkisp1_ext_params_compand_bls_config) +\
+ sizeof(struct rkisp1_ext_params_compand_curve_config) +\
+ sizeof(struct rkisp1_ext_params_compand_curve_config))
+
+/**
+ * enum rksip1_ext_param_buffer_version - RkISP1 extensible parameters version
+ *
+ * @RKISP1_EXT_PARAM_BUFFER_V1: First version of RkISP1 extensible parameters
+ */
+enum rksip1_ext_param_buffer_version {
+ RKISP1_EXT_PARAM_BUFFER_V1 = 1,
+};
+
+/**
+ * struct rkisp1_ext_params_cfg - RkISP1 extensible parameters configuration
+ *
+ * This struct contains the configuration parameters of the RkISP1 ISP
+ * algorithms, serialized by userspace into a data buffer. Each configuration
+ * parameter block is represented by a block-specific structure which contains a
+ * :c:type:`rkisp1_ext_params_block_header` entry as first member. Userspace
+ * populates the @data buffer with configuration parameters for the blocks that
+ * it intends to configure. As a consequence, the data buffer effective size
+ * changes according to the number of ISP blocks that userspace intends to
+ * configure and is set by userspace in the @data_size field.
+ *
+ * The parameters buffer is versioned by the @version field to allow modifying
+ * and extending its definition. Userspace shall populate the @version field to
+ * inform the driver about the version it intends to use. The driver will parse
+ * and handle the @data buffer according to the data layout specific to the
+ * indicated version and return an error if the desired version is not
+ * supported.
+ *
+ * Currently the single RKISP1_EXT_PARAM_BUFFER_V1 version is supported.
+ * When a new format version will be added, a mechanism for userspace to query
+ * the supported format versions will be implemented in the form of a read-only
+ * V4L2 control. If such control is not available, userspace should assume only
+ * RKISP1_EXT_PARAM_BUFFER_V1 is supported by the driver.
+ *
+ * For each ISP block that userspace wants to configure, a block-specific
+ * structure is appended to the @data buffer, one after the other without gaps
+ * in between nor overlaps. Userspace shall populate the @data_size field with
+ * the effective size, in bytes, of the @data buffer.
+ *
+ * The expected memory layout of the parameters buffer is::
+ *
+ * +-------------------- struct rkisp1_ext_params_cfg -------------------+
+ * | version = RKISP_EXT_PARAMS_BUFFER_V1; |
+ * | data_size = sizeof(struct rkisp1_ext_params_bls_config) |
+ * | + sizeof(struct rkisp1_ext_params_dpcc_config); |
+ * | +------------------------- data ---------------------------------+ |
+ * | | +------------- struct rkisp1_ext_params_bls_config -----------+ | |
+ * | | | +-------- struct rkisp1_ext_params_block_header ---------+ | | |
+ * | | | | type = RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS; | | | |
+ * | | | | flags = RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE; | | | |
+ * | | | | size = sizeof(struct rkisp1_ext_params_bls_config); | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | | +---------- struct rkisp1_cif_isp_bls_config -------------+ | | |
+ * | | | | enable_auto = 0; | | | |
+ * | | | | fixed_val.r = 256; | | | |
+ * | | | | fixed_val.gr = 256; | | | |
+ * | | | | fixed_val.gb = 256; | | | |
+ * | | | | fixed_val.b = 256; | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | +------------ struct rkisp1_ext_params_dpcc_config -----------+ | |
+ * | | | +-------- struct rkisp1_ext_params_block_header ---------+ | | |
+ * | | | | type = RKISP1_EXT_PARAMS_BLOCK_TYPE_DPCC; | | | |
+ * | | | | flags = RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE; | | | |
+ * | | | | size = sizeof(struct rkisp1_ext_params_dpcc_config); | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | | +---------- struct rkisp1_cif_isp_dpcc_config ------------+ | | |
+ * | | | | mode = RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE; | | | |
+ * | | | | output_mode = | | | |
+ * | | | | RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_G_CENTER; | | | |
+ * | | | | set_use = ... ; | | | |
+ * | | | | ... = ... ; | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | +-------------------------------------------------------------+ | |
+ * | +-----------------------------------------------------------------+ |
+ * +---------------------------------------------------------------------+
+ *
+ * @version: The RkISP1 extensible parameters buffer version, see
+ * :c:type:`rksip1_ext_param_buffer_version`
+ * @data_size: The RkISP1 configuration data effective size, excluding this
+ * header
+ * @data: The RkISP1 extensible configuration data blocks
+ */
+struct rkisp1_ext_params_cfg {
+ __u32 version;
+ __u32 data_size;
+ __u8 data[RKISP1_EXT_PARAMS_MAX_SIZE];
+};
+
+#endif /* _RKISP1_CONFIG_H */
diff --git a/include/linux/udmabuf.h b/include/linux/udmabuf.h
new file mode 100644
index 00000000..76cc7de9
--- /dev/null
+++ b/include/linux/udmabuf.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_UDMABUF_H
+#define _LINUX_UDMABUF_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define UDMABUF_FLAGS_CLOEXEC 0x01
+
+struct udmabuf_create {
+ __u32 memfd;
+ __u32 flags;
+ __u64 offset;
+ __u64 size;
+};
+
+struct udmabuf_create_item {
+ __u32 memfd;
+ __u32 __pad;
+ __u64 offset;
+ __u64 size;
+};
+
+struct udmabuf_create_list {
+ __u32 flags;
+ __u32 count;
+ struct udmabuf_create_item list[];
+};
+
+#define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create)
+#define UDMABUF_CREATE_LIST _IOW('u', 0x43, struct udmabuf_create_list)
+
+#endif /* _LINUX_UDMABUF_H */
diff --git a/include/linux/v4l2-controls.h b/include/linux/v4l2-controls.h
index b9f64810..882a8180 100644
--- a/include/linux/v4l2-controls.h
+++ b/include/linux/v4l2-controls.h
@@ -211,6 +211,12 @@ enum v4l2_colorfx {
*/
#define V4L2_CID_USER_NPCM_BASE (V4L2_CID_USER_BASE + 0x11b0)
+/*
+ * The base for THine THP7312 driver controls.
+ * We reserve 32 controls for this driver.
+ */
+#define V4L2_CID_USER_THP7312_BASE (V4L2_CID_USER_BASE + 0x11c0)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
@@ -892,6 +898,8 @@ enum v4l2_mpeg_video_av1_level {
V4L2_MPEG_VIDEO_AV1_LEVEL_7_3 = 23
};
+#define V4L2_CID_MPEG_VIDEO_AVERAGE_QP (V4L2_CID_CODEC_BASE + 657)
+
/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
#define V4L2_CID_CODEC_CX2341X_BASE (V4L2_CTRL_CLASS_CODEC | 0x1000)
#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_CODEC_CX2341X_BASE+0)
diff --git a/include/linux/v4l2-mediabus.h b/include/linux/v4l2-mediabus.h
index 2c318de1..097ef739 100644
--- a/include/linux/v4l2-mediabus.h
+++ b/include/linux/v4l2-mediabus.h
@@ -19,12 +19,18 @@
* @width: image width
* @height: image height
* @code: data format code (from enum v4l2_mbus_pixelcode)
- * @field: used interlacing type (from enum v4l2_field)
- * @colorspace: colorspace of the data (from enum v4l2_colorspace)
- * @ycbcr_enc: YCbCr encoding of the data (from enum v4l2_ycbcr_encoding)
- * @hsv_enc: HSV encoding of the data (from enum v4l2_hsv_encoding)
- * @quantization: quantization of the data (from enum v4l2_quantization)
- * @xfer_func: transfer function of the data (from enum v4l2_xfer_func)
+ * @field: used interlacing type (from enum v4l2_field), zero for metadata
+ * mbus codes
+ * @colorspace: colorspace of the data (from enum v4l2_colorspace), zero on
+ * metadata mbus codes
+ * @ycbcr_enc: YCbCr encoding of the data (from enum v4l2_ycbcr_encoding), zero
+ * for metadata mbus codes
+ * @hsv_enc: HSV encoding of the data (from enum v4l2_hsv_encoding), zero for
+ * metadata mbus codes
+ * @quantization: quantization of the data (from enum v4l2_quantization), zero
+ * for metadata mbus codes
+ * @xfer_func: transfer function of the data (from enum v4l2_xfer_func), zero
+ * for metadata mbus codes
* @flags: flags (V4L2_MBUS_FRAMEFMT_*)
* @reserved: reserved bytes that can be later used
*/
diff --git a/include/linux/v4l2-subdev.h b/include/linux/v4l2-subdev.h
index b383c2fe..839b1329 100644
--- a/include/linux/v4l2-subdev.h
+++ b/include/linux/v4l2-subdev.h
@@ -50,6 +50,10 @@ struct v4l2_subdev_format {
* @rect: pad crop rectangle boundaries
* @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
+ *
+ * The subdev crop API is an obsolete interface and may be removed in the
+ * future. It is superseded by the selection API. No new extensions to this
+ * structure will be accepted.
*/
struct v4l2_subdev_crop {
__u32 which;
@@ -116,13 +120,15 @@ struct v4l2_subdev_frame_size_enum {
* @pad: pad number, as reported by the media API
* @interval: frame interval in seconds
* @stream: stream number, defined in subdev routing
+ * @which: interval type (from enum v4l2_subdev_format_whence)
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_interval {
__u32 pad;
struct v4l2_fract interval;
__u32 stream;
- __u32 reserved[8];
+ __u32 which;
+ __u32 reserved[7];
};
/**
@@ -133,7 +139,7 @@ struct v4l2_subdev_frame_interval {
* @width: frame width in pixels
* @height: frame height in pixels
* @interval: frame interval in seconds
- * @which: format type (from enum v4l2_subdev_format_whence)
+ * @which: interval type (from enum v4l2_subdev_format_whence)
* @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
@@ -198,6 +204,11 @@ struct v4l2_subdev_capability {
* on a video node.
*/
#define V4L2_SUBDEV_ROUTE_FL_ACTIVE (1U << 0)
+/*
+ * Is the route immutable? The ACTIVE flag of an immutable route may not be
+ * unset.
+ */
+#define V4L2_SUBDEV_ROUTE_FL_IMMUTABLE (1U << 1)
/**
* struct v4l2_subdev_route - A route inside a subdev
@@ -222,15 +233,19 @@ struct v4l2_subdev_route {
* struct v4l2_subdev_routing - Subdev routing information
*
* @which: configuration type (from enum v4l2_subdev_format_whence)
- * @num_routes: the total number of routes in the routes array
+ * @len_routes: the length of the routes array, in routes; set by the user, not
+ * modified by the kernel
* @routes: pointer to the routes array
+ * @num_routes: the total number of routes, possibly more than fits in the
+ * routes array
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_routing {
__u32 which;
- __u32 num_routes;
+ __u32 len_routes;
__u64 routes;
- __u32 reserved[6];
+ __u32 num_routes;
+ __u32 reserved[11];
};
/*
@@ -239,7 +254,14 @@ struct v4l2_subdev_routing {
* set (which is the default), the 'stream' fields will be forced to 0 by the
* kernel.
*/
- #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0)
+#define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0)
+
+/*
+ * The client is aware of the struct v4l2_subdev_frame_interval which field. If
+ * this is not set (which is the default), the which field is forced to
+ * V4L2_SUBDEV_FORMAT_ACTIVE by the kernel.
+ */
+#define V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH (1ULL << 1)
/**
* struct v4l2_subdev_client_capability - Capabilities of the client accessing
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 7e556911..317d063a 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -555,6 +555,8 @@ struct v4l2_pix_format {
/* RGB formats (6 or 8 bytes per pixel) */
#define V4L2_PIX_FMT_BGR48_12 v4l2_fourcc('B', '3', '1', '2') /* 48 BGR 12-bit per component */
+#define V4L2_PIX_FMT_BGR48 v4l2_fourcc('B', 'G', 'R', '6') /* 48 BGR 16-bit per component */
+#define V4L2_PIX_FMT_RGB48 v4l2_fourcc('R', 'G', 'B', '6') /* 48 RGB 16-bit per component */
#define V4L2_PIX_FMT_ABGR64_12 v4l2_fourcc('B', '4', '1', '2') /* 64 BGRA 12-bit per component */
/* Grey formats */
@@ -572,6 +574,8 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */
#define V4L2_PIX_FMT_Y10P v4l2_fourcc('Y', '1', '0', 'P') /* 10 Greyscale, MIPI RAW10 packed */
#define V4L2_PIX_FMT_IPU3_Y10 v4l2_fourcc('i', 'p', '3', 'y') /* IPU3 packed 10-bit greyscale */
+#define V4L2_PIX_FMT_Y12P v4l2_fourcc('Y', '1', '2', 'P') /* 12 Greyscale, MIPI RAW12 packed */
+#define V4L2_PIX_FMT_Y14P v4l2_fourcc('Y', '1', '4', 'P') /* 14 Greyscale, MIPI RAW14 packed */
/* Palette formats */
#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */
@@ -785,6 +789,18 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_IPU3_SGRBG10 v4l2_fourcc('i', 'p', '3', 'G') /* IPU3 packed 10-bit GRBG bayer */
#define V4L2_PIX_FMT_IPU3_SRGGB10 v4l2_fourcc('i', 'p', '3', 'r') /* IPU3 packed 10-bit RGGB bayer */
+/* Raspberry Pi PiSP compressed formats. */
+#define V4L2_PIX_FMT_PISP_COMP1_RGGB v4l2_fourcc('P', 'C', '1', 'R') /* PiSP 8-bit mode 1 compressed RGGB bayer */
+#define V4L2_PIX_FMT_PISP_COMP1_GRBG v4l2_fourcc('P', 'C', '1', 'G') /* PiSP 8-bit mode 1 compressed GRBG bayer */
+#define V4L2_PIX_FMT_PISP_COMP1_GBRG v4l2_fourcc('P', 'C', '1', 'g') /* PiSP 8-bit mode 1 compressed GBRG bayer */
+#define V4L2_PIX_FMT_PISP_COMP1_BGGR v4l2_fourcc('P', 'C', '1', 'B') /* PiSP 8-bit mode 1 compressed BGGR bayer */
+#define V4L2_PIX_FMT_PISP_COMP1_MONO v4l2_fourcc('P', 'C', '1', 'M') /* PiSP 8-bit mode 1 compressed monochrome */
+#define V4L2_PIX_FMT_PISP_COMP2_RGGB v4l2_fourcc('P', 'C', '2', 'R') /* PiSP 8-bit mode 2 compressed RGGB bayer */
+#define V4L2_PIX_FMT_PISP_COMP2_GRBG v4l2_fourcc('P', 'C', '2', 'G') /* PiSP 8-bit mode 2 compressed GRBG bayer */
+#define V4L2_PIX_FMT_PISP_COMP2_GBRG v4l2_fourcc('P', 'C', '2', 'g') /* PiSP 8-bit mode 2 compressed GBRG bayer */
+#define V4L2_PIX_FMT_PISP_COMP2_BGGR v4l2_fourcc('P', 'C', '2', 'B') /* PiSP 8-bit mode 2 compressed BGGR bayer */
+#define V4L2_PIX_FMT_PISP_COMP2_MONO v4l2_fourcc('P', 'C', '2', 'M') /* PiSP 8-bit mode 2 compressed monochrome */
+
/* SDR formats - used only for Software Defined Radio devices */
#define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
#define V4L2_SDR_FMT_CU16LE v4l2_fourcc('C', 'U', '1', '6') /* IQ u16le */
@@ -813,6 +829,31 @@ struct v4l2_pix_format {
/* Vendor specific - used for RK_ISP1 camera sub-system */
#define V4L2_META_FMT_RK_ISP1_PARAMS v4l2_fourcc('R', 'K', '1', 'P') /* Rockchip ISP1 3A Parameters */
#define V4L2_META_FMT_RK_ISP1_STAT_3A v4l2_fourcc('R', 'K', '1', 'S') /* Rockchip ISP1 3A Statistics */
+#define V4L2_META_FMT_RK_ISP1_EXT_PARAMS v4l2_fourcc('R', 'K', '1', 'E') /* Rockchip ISP1 3a Extensible Parameters */
+
+/* Vendor specific - used for RaspberryPi PiSP */
+#define V4L2_META_FMT_RPI_BE_CFG v4l2_fourcc('R', 'P', 'B', 'C') /* PiSP BE configuration */
+
+/* The metadata format identifier for FE configuration buffers. */
+#define V4L2_META_FMT_RPI_FE_CFG v4l2_fourcc('R', 'P', 'F', 'C')
+
+/* The metadata format identifier for FE stats buffers. */
+#define V4L2_META_FMT_RPI_FE_STATS v4l2_fourcc('R', 'P', 'F', 'S')
+
+#define V4L2_META_FMT_MALI_C55_PARAMS v4l2_fourcc('C', '5', '5', 'P') /* ARM Mali-C55 Parameters */
+#define V4L2_META_FMT_MALI_C55_3A_STATS v4l2_fourcc('C', '5', '5', 'S') /* ARM Mali-C55 3A Statistics */
+
+/*
+ * Line-based metadata formats. Remember to update v4l_fill_fmtdesc() when
+ * adding new ones!
+ */
+#define V4L2_META_FMT_GENERIC_8 v4l2_fourcc('M', 'E', 'T', '8') /* Generic 8-bit metadata */
+#define V4L2_META_FMT_GENERIC_CSI2_10 v4l2_fourcc('M', 'C', '1', 'A') /* 10-bit CSI-2 packed 8-bit metadata */
+#define V4L2_META_FMT_GENERIC_CSI2_12 v4l2_fourcc('M', 'C', '1', 'C') /* 12-bit CSI-2 packed 8-bit metadata */
+#define V4L2_META_FMT_GENERIC_CSI2_14 v4l2_fourcc('M', 'C', '1', 'E') /* 14-bit CSI-2 packed 8-bit metadata */
+#define V4L2_META_FMT_GENERIC_CSI2_16 v4l2_fourcc('M', 'C', '1', 'G') /* 16-bit CSI-2 packed 8-bit metadata */
+#define V4L2_META_FMT_GENERIC_CSI2_20 v4l2_fourcc('M', 'C', '1', 'K') /* 20-bit CSI-2 packed 8-bit metadata */
+#define V4L2_META_FMT_GENERIC_CSI2_24 v4l2_fourcc('M', 'C', '1', 'O') /* 24-bit CSI-2 packed 8-bit metadata */
/* priv field value to indicates that subsequent fields are valid. */
#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe
@@ -844,6 +885,7 @@ struct v4l2_fmtdesc {
#define V4L2_FMT_FLAG_CSC_YCBCR_ENC 0x0080
#define V4L2_FMT_FLAG_CSC_HSV_ENC V4L2_FMT_FLAG_CSC_YCBCR_ENC
#define V4L2_FMT_FLAG_CSC_QUANTIZATION 0x0100
+#define V4L2_FMT_FLAG_META_LINE_BASED 0x0200
/* Frame Size and frame rate enumeration */
/*
@@ -993,18 +1035,20 @@ struct v4l2_requestbuffers {
#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
#define V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 5)
#define V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS (1 << 6)
+#define V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS (1 << 7)
+#define V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS (1 << 8)
/**
* struct v4l2_plane - plane info for multi-planar buffers
* @bytesused: number of bytes occupied by data in the plane (payload)
* @length: size of this plane (NOT the payload) in bytes
- * @mem_offset: when memory in the associated struct v4l2_buffer is
+ * @m.mem_offset: when memory in the associated struct v4l2_buffer is
* V4L2_MEMORY_MMAP, equals the offset from the start of
* the device memory for this plane (or is a "cookie" that
* should be passed to mmap() called on the video node)
- * @userptr: when memory is V4L2_MEMORY_USERPTR, a userspace pointer
+ * @m.userptr: when memory is V4L2_MEMORY_USERPTR, a userspace pointer
* pointing to this plane
- * @fd: when memory is V4L2_MEMORY_DMABUF, a userspace file
+ * @m.fd: when memory is V4L2_MEMORY_DMABUF, a userspace file
* descriptor associated with this plane
* @m: union of @mem_offset, @userptr and @fd
* @data_offset: offset in the plane to the start of data; usually 0,
@@ -1042,14 +1086,14 @@ struct v4l2_plane {
* @sequence: sequence count of this frame
* @memory: enum v4l2_memory; the method, in which the actual video data is
* passed
- * @offset: for non-multiplanar buffers with memory == V4L2_MEMORY_MMAP;
+ * @m.offset: for non-multiplanar buffers with memory == V4L2_MEMORY_MMAP;
* offset from the start of the device memory for this plane,
* (or a "cookie" that should be passed to mmap() as offset)
- * @userptr: for non-multiplanar buffers with memory == V4L2_MEMORY_USERPTR;
+ * @m.userptr: for non-multiplanar buffers with memory == V4L2_MEMORY_USERPTR;
* a userspace pointer pointing to this buffer
- * @fd: for non-multiplanar buffers with memory == V4L2_MEMORY_DMABUF;
+ * @m.fd: for non-multiplanar buffers with memory == V4L2_MEMORY_DMABUF;
* a userspace file descriptor associated with this buffer
- * @planes: for multiplanar buffers; userspace pointer to the array of plane
+ * @m.planes: for multiplanar buffers; userspace pointer to the array of plane
* info structs for this buffer
* @m: union of @offset, @userptr, @planes and @fd
* @length: size in bytes of the buffer (NOT its payload) for single-plane
@@ -1787,8 +1831,10 @@ struct v4l2_ext_control {
struct v4l2_ctrl_av1_tile_group_entry *p_av1_tile_group_entry;
struct v4l2_ctrl_av1_frame *p_av1_frame;
struct v4l2_ctrl_av1_film_grain *p_av1_film_grain;
+ struct v4l2_ctrl_hdr10_cll_info *p_hdr10_cll_info;
+ struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering_display;
void *ptr;
- };
+ } __attribute__ ((packed));
} __attribute__ ((packed));
struct v4l2_ext_controls {
@@ -2358,23 +2404,32 @@ struct v4l2_sdr_format {
* struct v4l2_meta_format - metadata format definition
* @dataformat: little endian four character code (fourcc)
* @buffersize: maximum size in bytes required for data
+ * @width: number of data units of data per line (valid for line
+ * based formats only, see format documentation)
+ * @height: number of lines of data per buffer (valid for line based
+ * formats only)
+ * @bytesperline: offset between the beginnings of two adjacent lines in
+ * bytes (valid for line based formats only)
*/
struct v4l2_meta_format {
__u32 dataformat;
__u32 buffersize;
+ __u32 width;
+ __u32 height;
+ __u32 bytesperline;
} __attribute__ ((packed));
/**
* struct v4l2_format - stream data format
- * @type: enum v4l2_buf_type; type of the data stream
- * @pix: definition of an image format
- * @pix_mp: definition of a multiplanar image format
- * @win: definition of an overlaid image
- * @vbi: raw VBI capture or output parameters
- * @sliced: sliced VBI capture or output parameters
- * @raw_data: placeholder for future extensions and custom formats
- * @fmt: union of @pix, @pix_mp, @win, @vbi, @sliced, @sdr, @meta
- * and @raw_data
+ * @type: enum v4l2_buf_type; type of the data stream
+ * @fmt.pix: definition of an image format
+ * @fmt.pix_mp: definition of a multiplanar image format
+ * @fmt.win: definition of an overlaid image
+ * @fmt.vbi: raw VBI capture or output parameters
+ * @fmt.sliced: sliced VBI capture or output parameters
+ * @fmt.raw_data: placeholder for future extensions and custom formats
+ * @fmt: union of @pix, @pix_mp, @win, @vbi, @sliced, @sdr,
+ * @meta and @raw_data
*/
struct v4l2_format {
__u32 type;
@@ -2547,6 +2602,9 @@ struct v4l2_dbg_chip_info {
* @flags: additional buffer management attributes (ignored unless the
* queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability
* and configured for MMAP streaming I/O).
+ * @max_num_buffers: if V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS capability flag is set
+ * this field indicate the maximum possible number of buffers
+ * for this queue.
* @reserved: future extensions
*/
struct v4l2_create_buffers {
@@ -2556,7 +2614,22 @@ struct v4l2_create_buffers {
struct v4l2_format format;
__u32 capabilities;
__u32 flags;
- __u32 reserved[6];
+ __u32 max_num_buffers;
+ __u32 reserved[5];
+};
+
+/**
+ * struct v4l2_remove_buffers - VIDIOC_REMOVE_BUFS argument
+ * @index: the first buffer to be removed
+ * @count: number of buffers to removed
+ * @type: enum v4l2_buf_type
+ * @reserved: future extensions
+ */
+struct v4l2_remove_buffers {
+ __u32 index;
+ __u32 count;
+ __u32 type;
+ __u32 reserved[13];
};
/*
@@ -2658,6 +2731,8 @@ struct v4l2_create_buffers {
#define VIDIOC_DBG_G_CHIP_INFO _IOWR('V', 102, struct v4l2_dbg_chip_info)
#define VIDIOC_QUERY_EXT_CTRL _IOWR('V', 103, struct v4l2_query_ext_ctrl)
+#define VIDIOC_REMOVE_BUFS _IOWR('V', 104, struct v4l2_remove_buffers)
+
/* Reminder: when adding new ioctls please add support for them to
drivers/media/v4l2-core/v4l2-compat-ioctl32.c as well! */
diff --git a/meson.build b/meson.build
index 740ead1b..f8d32dcf 100644
--- a/meson.build
+++ b/meson.build
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: CC0-1.0
project('libcamera', 'c', 'cpp',
- meson_version : '>= 0.60',
- version : '0.2.0',
+ meson_version : '>= 0.63',
+ version : '0.4.0',
default_options : [
'werror=true',
'warning_level=2',
@@ -74,6 +74,10 @@ cc = meson.get_compiler('c')
cxx = meson.get_compiler('cpp')
config_h = configuration_data()
+if cc.has_header_symbol('fcntl.h', 'F_ADD_SEALS', prefix : '#define _GNU_SOURCE')
+ config_h.set('HAVE_FILE_SEALS', 1)
+endif
+
if cc.has_header_symbol('unistd.h', 'issetugid')
config_h.set('HAVE_ISSETUGID', 1)
endif
@@ -82,17 +86,35 @@ if cc.has_header_symbol('locale.h', 'locale_t', prefix : '#define _GNU_SOURCE')
config_h.set('HAVE_LOCALE_T', 1)
endif
+if cc.has_header_symbol('sys/mman.h', 'memfd_create', prefix : '#define _GNU_SOURCE')
+ config_h.set('HAVE_MEMFD_CREATE', 1)
+endif
+
+ioctl_posix_test = '''
+#include <sys/ioctl.h>
+int ioctl (int, int, ...);
+'''
+
+if cc.compiles(ioctl_posix_test)
+ config_h.set('HAVE_POSIX_IOCTL', 1)
+endif
+
if cc.has_header_symbol('stdlib.h', 'secure_getenv', prefix : '#define _GNU_SOURCE')
config_h.set('HAVE_SECURE_GETENV', 1)
endif
common_arguments = [
+ '-Wmissing-declarations',
'-Wshadow',
'-include', meson.current_build_dir() / 'config.h',
]
c_arguments = []
-cpp_arguments = []
+cpp_arguments = [
+ '-Wnon-virtual-dtor',
+]
+
+cxx_stdlib = 'libstdc++'
if cc.get_id() == 'clang'
if cc.version().version_compare('<9')
@@ -119,6 +141,7 @@ if cc.get_id() == 'clang'
cpp_arguments += [
'-stdlib=libc++',
]
+ cxx_stdlib = 'libc++'
endif
cpp_arguments += [
@@ -128,16 +151,8 @@ if cc.get_id() == 'clang'
endif
if cc.get_id() == 'gcc'
- if cc.version().version_compare('<8')
- error('gcc version is too old, libcamera requires 8.0 or newer')
- endif
-
- # On gcc 8, the file system library is provided in a separate static
- # library.
if cc.version().version_compare('<9')
- cpp_arguments += [
- '-lstdc++fs',
- ]
+ error('gcc version is too old, libcamera requires 9.0 or newer')
endif
# gcc 13 implements the C++23 version of automatic move from local
@@ -191,7 +206,7 @@ liblttng = dependency('lttng-ust', required : get_option('tracing'))
# Pipeline handlers
#
-pipelines = get_option('pipelines')
+wanted_pipelines = get_option('pipelines')
arch_arm = ['arm', 'aarch64']
arch_x86 = ['x86', 'x86_64']
@@ -201,21 +216,24 @@ pipelines_support = {
'mali-c55': arch_arm,
'rkisp1': arch_arm,
'rpi/vc4': arch_arm,
- 'simple': arch_arm,
+ 'simple': ['any'],
'uvcvideo': ['any'],
'vimc': ['test'],
+ 'virtual': ['test'],
}
-if pipelines.contains('all')
+if wanted_pipelines.contains('all')
pipelines = pipelines_support.keys()
-elif pipelines.contains('auto')
+elif wanted_pipelines.contains('auto')
host_cpu = host_machine.cpu_family()
pipelines = []
foreach pipeline, archs : pipelines_support
- if host_cpu in archs or 'any' in archs
+ if pipeline in wanted_pipelines or host_cpu in archs or 'any' in archs
pipelines += pipeline
endif
endforeach
+else
+ pipelines = wanted_pipelines
endif
# Tests require the vimc pipeline handler, include it automatically when tests
@@ -268,8 +286,8 @@ py_mod.find_installation('python3', modules : py_modules)
summary({
'Enabled pipelines': pipelines,
'Enabled IPA modules': enabled_ipa_names,
- 'Controls files': controls_files,
- 'Properties files': properties_files,
+ 'Controls files': controls_files_names,
+ 'Properties files': properties_files_names,
'Hotplug support': libudev.found(),
'Tracing support': tracing_enabled,
'Android support': android_enabled,
diff --git a/meson_options.txt b/meson_options.txt
index 7c4f6d3a..f19bca91 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -20,6 +20,11 @@ option('documentation',
type : 'feature',
description : 'Generate the project documentation')
+option('doc_werror',
+ type : 'boolean',
+ value : false,
+ description : 'Treat documentation warnings as errors')
+
option('gstreamer',
type : 'feature',
value : 'auto',
@@ -27,7 +32,7 @@ option('gstreamer',
option('ipas',
type : 'array',
- choices : ['ipu3', 'rkisp1', 'rpi/vc4', 'vimc'],
+ choices : ['ipu3', 'mali-c55', 'rkisp1', 'rpi/vc4', 'simple', 'vimc'],
description : 'Select which IPA modules to build')
option('lc-compliance',
@@ -48,7 +53,8 @@ option('pipelines',
'rpi/vc4',
'simple',
'uvcvideo',
- 'vimc'
+ 'vimc',
+ 'virtual'
],
description : 'Select which pipeline handlers to build. If this is set to "auto", all the pipelines applicable to the target architecture will be built. If this is set to "all", all the pipelines will be built. If both are selected then "all" will take precedence.')
@@ -78,6 +84,7 @@ option('udev',
description : 'Enable udev support for hotplug')
option('v4l2',
- type : 'boolean',
- value : false,
- description : 'Compile the V4L2 compatibility layer')
+ type : 'feature',
+ value : 'auto',
+ description : 'Compile the V4L2 compatibility layer',
+ deprecated : {'true': 'enabled', 'false': 'disabled'})
diff --git a/src/android/camera3_hal.cpp b/src/android/camera3_hal.cpp
index da836bae..a5ad2374 100644
--- a/src/android/camera3_hal.cpp
+++ b/src/android/camera3_hal.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera3_hal.cpp - Android Camera HALv3 module
+ * Android Camera HALv3 module
*/
#include <hardware/camera_common.h>
diff --git a/src/android/camera_buffer.h b/src/android/camera_buffer.h
index b4531c80..96669962 100644
--- a/src/android/camera_buffer.h
+++ b/src/android/camera_buffer.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_buffer.h - Frame buffer handling interface definition
+ * Frame buffer handling interface definition
*/
#pragma once
diff --git a/src/android/camera_capabilities.cpp b/src/android/camera_capabilities.cpp
index 1bfeaea4..b161bc6b 100644
--- a/src/android/camera_capabilities.cpp
+++ b/src/android/camera_capabilities.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_capabilities.cpp - Camera static properties manager
+ * Camera static properties manager
*/
#include "camera_capabilities.h"
@@ -11,6 +11,7 @@
#include <array>
#include <cmath>
#include <map>
+#include <stdint.h>
#include <type_traits>
#include <hardware/camera3.h>
@@ -1081,7 +1082,7 @@ int CameraCapabilities::initializeStaticMetadata()
}
{
- const Span<const Rectangle> &rects =
+ const Span<const Rectangle> rects =
properties.get(properties::PixelArrayActiveAreas).value_or(Span<const Rectangle>{});
std::vector<int32_t> data{
static_cast<int32_t>(rects[0].x),
@@ -1176,11 +1177,46 @@ int CameraCapabilities::initializeStaticMetadata()
maxFrameDuration_);
/* Statistics static metadata. */
- uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
- staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
- faceDetectMode);
-
int32_t maxFaceCount = 0;
+ auto iter = camera_->controls().find(controls::draft::FaceDetectMode.id());
+ if (iter != camera_->controls().end()) {
+ const ControlInfo &faceDetectCtrlInfo = iter->second;
+ std::vector<uint8_t> faceDetectModes;
+ bool hasFaceDetection = false;
+
+ for (const auto &value : faceDetectCtrlInfo.values()) {
+ int32_t mode = value.get<int32_t>();
+ uint8_t androidMode = 0;
+
+ switch (mode) {
+ case controls::draft::FaceDetectModeOff:
+ androidMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+ break;
+ case controls::draft::FaceDetectModeSimple:
+ androidMode = ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE;
+ hasFaceDetection = true;
+ break;
+ default:
+ LOG(HAL, Fatal) << "Received invalid face detect mode: " << mode;
+ }
+ faceDetectModes.push_back(androidMode);
+ }
+ if (hasFaceDetection) {
+ /*
+ * \todo Create new libcamera controls to query max
+ * possible faces detected.
+ */
+ maxFaceCount = 10;
+ staticMetadata_->addEntry(
+ ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
+ faceDetectModes.data(), faceDetectModes.size());
+ }
+ } else {
+ uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+ staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
+ faceDetectMode);
+ }
+
staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
maxFaceCount);
diff --git a/src/android/camera_capabilities.h b/src/android/camera_capabilities.h
index 6f66f221..56ac1efe 100644
--- a/src/android/camera_capabilities.h
+++ b/src/android/camera_capabilities.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_capabilities.h - Camera static properties manager
+ * Camera static properties manager
*/
#pragma once
diff --git a/src/android/camera_device.cpp b/src/android/camera_device.cpp
index d2679a97..a038131a 100644
--- a/src/android/camera_device.cpp
+++ b/src/android/camera_device.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_device.cpp - libcamera Android Camera Device
+ * libcamera Android Camera Device
*/
#include "camera_device.h"
@@ -22,6 +22,7 @@
#include <libcamera/controls.h>
#include <libcamera/fence.h>
#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
#include <libcamera/property_ids.h>
#include "system/graphics.h"
@@ -433,8 +434,6 @@ void CameraDevice::flush()
void CameraDevice::stop()
{
MutexLocker stateLock(stateMutex_);
- if (state_ == State::Stopped)
- return;
camera_->stop();
@@ -815,6 +814,11 @@ int CameraDevice::processControls(Camera3RequestDescriptor *descriptor)
controls.set(controls::ScalerCrop, cropRegion);
}
+ if (settings.getEntry(ANDROID_STATISTICS_FACE_DETECT_MODE, &entry)) {
+ const uint8_t *data = entry.data.u8;
+ controls.set(controls::draft::FaceDetectMode, data[0]);
+ }
+
if (settings.getEntry(ANDROID_SENSOR_TEST_PATTERN_MODE, &entry)) {
const int32_t data = *entry.data.i32;
int32_t testPatternMode = controls::draft::TestPatternModeOff;
@@ -1542,8 +1546,9 @@ CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor) cons
value32 = ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
resultMetadata->addEntry(ANDROID_SENSOR_TEST_PATTERN_MODE, value32);
- value = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
- resultMetadata->addEntry(ANDROID_STATISTICS_FACE_DETECT_MODE, value);
+ if (settings.getEntry(ANDROID_STATISTICS_FACE_DETECT_MODE, &entry))
+ resultMetadata->addEntry(ANDROID_STATISTICS_FACE_DETECT_MODE,
+ entry.data.u8, 1);
value = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF;
resultMetadata->addEntry(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE,
@@ -1582,6 +1587,61 @@ CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor) cons
resultMetadata->addEntry(ANDROID_SENSOR_FRAME_DURATION,
*frameDuration * 1000);
+ const auto &faceDetectRectangles =
+ metadata.get(controls::draft::FaceDetectFaceRectangles);
+ if (faceDetectRectangles) {
+ std::vector<int32_t> flatRectangles;
+ for (const Rectangle &rect : *faceDetectRectangles) {
+ flatRectangles.push_back(rect.x);
+ flatRectangles.push_back(rect.y);
+ flatRectangles.push_back(rect.x + rect.width);
+ flatRectangles.push_back(rect.y + rect.height);
+ }
+ resultMetadata->addEntry(
+ ANDROID_STATISTICS_FACE_RECTANGLES, flatRectangles);
+ }
+
+ const auto &faceDetectFaceScores =
+ metadata.get(controls::draft::FaceDetectFaceScores);
+ if (faceDetectRectangles && faceDetectFaceScores) {
+ if (faceDetectFaceScores->size() != faceDetectRectangles->size()) {
+ LOG(HAL, Error) << "Pipeline returned wrong number of face scores; "
+ << "Expected: " << faceDetectRectangles->size()
+ << ", got: " << faceDetectFaceScores->size();
+ }
+ resultMetadata->addEntry(ANDROID_STATISTICS_FACE_SCORES,
+ *faceDetectFaceScores);
+ }
+
+ const auto &faceDetectFaceLandmarks =
+ metadata.get(controls::draft::FaceDetectFaceLandmarks);
+ if (faceDetectRectangles && faceDetectFaceLandmarks) {
+ size_t expectedLandmarks = faceDetectRectangles->size() * 3;
+ if (faceDetectFaceLandmarks->size() != expectedLandmarks) {
+ LOG(HAL, Error) << "Pipeline returned wrong number of face landmarks; "
+ << "Expected: " << expectedLandmarks
+ << ", got: " << faceDetectFaceLandmarks->size();
+ }
+
+ std::vector<int32_t> androidLandmarks;
+ for (const Point &landmark : *faceDetectFaceLandmarks) {
+ androidLandmarks.push_back(landmark.x);
+ androidLandmarks.push_back(landmark.y);
+ }
+ resultMetadata->addEntry(
+ ANDROID_STATISTICS_FACE_LANDMARKS, androidLandmarks);
+ }
+
+ const auto &faceDetectFaceIds = metadata.get(controls::draft::FaceDetectFaceIds);
+ if (faceDetectRectangles && faceDetectFaceIds) {
+ if (faceDetectFaceIds->size() != faceDetectRectangles->size()) {
+ LOG(HAL, Error) << "Pipeline returned wrong number of face ids; "
+ << "Expected: " << faceDetectRectangles->size()
+ << ", got: " << faceDetectFaceIds->size();
+ }
+ resultMetadata->addEntry(ANDROID_STATISTICS_FACE_IDS, *faceDetectFaceIds);
+ }
+
const auto &scalerCrop = metadata.get(controls::ScalerCrop);
if (scalerCrop) {
const Rectangle &crop = *scalerCrop;
diff --git a/src/android/camera_device.h b/src/android/camera_device.h
index 43ee0159..194ca303 100644
--- a/src/android/camera_device.h
+++ b/src/android/camera_device.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_device.h - libcamera Android Camera Device
+ * libcamera Android Camera Device
*/
#pragma once
diff --git a/src/android/camera_hal_config.cpp b/src/android/camera_hal_config.cpp
index 0e7cde63..7ef451ef 100644
--- a/src/android/camera_hal_config.cpp
+++ b/src/android/camera_hal_config.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_hal_config.cpp - Camera HAL configuration file manager
+ * Camera HAL configuration file manager
*/
#include "camera_hal_config.h"
diff --git a/src/android/camera_hal_config.h b/src/android/camera_hal_config.h
index 9df554f9..a4bedb6e 100644
--- a/src/android/camera_hal_config.h
+++ b/src/android/camera_hal_config.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_hal_config.h - Camera HAL configuration file manager
+ * Camera HAL configuration file manager
*/
#pragma once
diff --git a/src/android/camera_hal_manager.cpp b/src/android/camera_hal_manager.cpp
index a86e23d4..7500c749 100644
--- a/src/android/camera_hal_manager.cpp
+++ b/src/android/camera_hal_manager.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_hal_manager.cpp - libcamera Android Camera Manager
+ * libcamera Android Camera Manager
*/
#include "camera_hal_manager.h"
diff --git a/src/android/camera_hal_manager.h b/src/android/camera_hal_manager.h
index a5f8b933..836a8daf 100644
--- a/src/android/camera_hal_manager.h
+++ b/src/android/camera_hal_manager.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_hal_manager.h - libcamera Android Camera Manager
+ * libcamera Android Camera Manager
*/
#pragma once
diff --git a/src/android/camera_metadata.cpp b/src/android/camera_metadata.cpp
index b3e515d2..99f033f9 100644
--- a/src/android/camera_metadata.cpp
+++ b/src/android/camera_metadata.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_metadata.cpp - libcamera Android Camera Metadata Helper
+ * libcamera Android Camera Metadata Helper
*/
#include "camera_metadata.h"
diff --git a/src/android/camera_metadata.h b/src/android/camera_metadata.h
index 0c31ec6b..474f280c 100644
--- a/src/android/camera_metadata.h
+++ b/src/android/camera_metadata.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_metadata.h - libcamera Android Camera Metadata Helper
+ * libcamera Android Camera Metadata Helper
*/
#pragma once
diff --git a/src/android/camera_ops.cpp b/src/android/camera_ops.cpp
index 8a3cfa17..ecaac5a3 100644
--- a/src/android/camera_ops.cpp
+++ b/src/android/camera_ops.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_ops.h - Android Camera HAL Operations
+ * Android Camera HAL Operations
*/
#include "camera_ops.h"
diff --git a/src/android/camera_ops.h b/src/android/camera_ops.h
index b501bb7e..750dc945 100644
--- a/src/android/camera_ops.h
+++ b/src/android/camera_ops.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_ops.h - Android Camera HAL Operations
+ * Android Camera HAL Operations
*/
#pragma once
diff --git a/src/android/camera_request.cpp b/src/android/camera_request.cpp
index 6c87adba..0d45960d 100644
--- a/src/android/camera_request.cpp
+++ b/src/android/camera_request.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019-2021, Google Inc.
*
- * camera_request.cpp - libcamera Android Camera Request Descriptor
+ * libcamera Android Camera Request Descriptor
*/
#include "camera_request.h"
diff --git a/src/android/camera_request.h b/src/android/camera_request.h
index 20aba79d..5b479180 100644
--- a/src/android/camera_request.h
+++ b/src/android/camera_request.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019-2021, Google Inc.
*
- * camera_request.h - libcamera Android Camera Request Descriptor
+ * libcamera Android Camera Request Descriptor
*/
#pragma once
diff --git a/src/android/camera_stream.cpp b/src/android/camera_stream.cpp
index 045e6006..1d68540d 100644
--- a/src/android/camera_stream.cpp
+++ b/src/android/camera_stream.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * camera_stream.cpp - Camera HAL stream
+ * Camera HAL stream
*/
#include "camera_stream.h"
diff --git a/src/android/camera_stream.h b/src/android/camera_stream.h
index 4c5078b2..395552da 100644
--- a/src/android/camera_stream.h
+++ b/src/android/camera_stream.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * camera_stream.h - Camera HAL stream
+ * Camera HAL stream
*/
#pragma once
diff --git a/src/android/cros/camera3_hal.cpp b/src/android/cros/camera3_hal.cpp
index 71acb441..6010a5ad 100644
--- a/src/android/cros/camera3_hal.cpp
+++ b/src/android/cros/camera3_hal.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera3_hal.cpp - cros-specific components of Android Camera HALv3 module
+ * cros-specific components of Android Camera HALv3 module
*/
#include <cros-camera/cros_camera_hal.h>
diff --git a/src/android/cros_mojo_token.h b/src/android/cros_mojo_token.h
index 043c752a..d0baa80f 100644
--- a/src/android/cros_mojo_token.h
+++ b/src/android/cros_mojo_token.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Google Inc.
*
- * cros_mojo_token.h - cros-specific mojo token
+ * cros-specific mojo token
*/
#pragma once
diff --git a/src/android/frame_buffer_allocator.h b/src/android/frame_buffer_allocator.h
index e5c94922..3e68641c 100644
--- a/src/android/frame_buffer_allocator.h
+++ b/src/android/frame_buffer_allocator.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * frame_buffer_allocator.h - Interface definition to allocate Frame buffer in
+ * Interface definition to allocate Frame buffer in
* platform dependent way.
*/
#ifndef __ANDROID_FRAME_BUFFER_ALLOCATOR_H__
diff --git a/src/android/hal_framebuffer.cpp b/src/android/hal_framebuffer.cpp
index 3f3d1ed1..d4899f45 100644
--- a/src/android/hal_framebuffer.cpp
+++ b/src/android/hal_framebuffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Google Inc.
*
- * hal_framebuffer.cpp - HAL Frame Buffer Handling
+ * HAL Frame Buffer Handling
*/
#include "hal_framebuffer.h"
diff --git a/src/android/hal_framebuffer.h b/src/android/hal_framebuffer.h
index dc96a7e1..cea49e2d 100644
--- a/src/android/hal_framebuffer.h
+++ b/src/android/hal_framebuffer.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Google Inc.
*
- * hal_framebuffer.h - HAL Frame Buffer Handling
+ * HAL Frame Buffer Handling
*/
#pragma once
diff --git a/src/android/jpeg/encoder.h b/src/android/jpeg/encoder.h
index 31f26895..ed033c19 100644
--- a/src/android/jpeg/encoder.h
+++ b/src/android/jpeg/encoder.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * encoder.h - Image encoding interface
+ * Image encoding interface
*/
#pragma once
diff --git a/src/android/jpeg/encoder_jea.cpp b/src/android/jpeg/encoder_jea.cpp
index 7880a6bd..25dc4317 100644
--- a/src/android/jpeg/encoder_jea.cpp
+++ b/src/android/jpeg/encoder_jea.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Google Inc.
*
- * encoder_jea.cpp - JPEG encoding using CrOS JEA
+ * JPEG encoding using CrOS JEA
*/
#include "encoder_jea.h"
diff --git a/src/android/jpeg/encoder_jea.h b/src/android/jpeg/encoder_jea.h
index ffe9df27..91115d2e 100644
--- a/src/android/jpeg/encoder_jea.h
+++ b/src/android/jpeg/encoder_jea.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Google Inc.
*
- * encoder_jea.h - JPEG encoding using CrOS JEA
+ * JPEG encoding using CrOS JEA
*/
#pragma once
diff --git a/src/android/jpeg/encoder_libjpeg.cpp b/src/android/jpeg/encoder_libjpeg.cpp
index f4e8dfad..cb242b5e 100644
--- a/src/android/jpeg/encoder_libjpeg.cpp
+++ b/src/android/jpeg/encoder_libjpeg.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * encoder_libjpeg.cpp - JPEG encoding using libjpeg native API
+ * JPEG encoding using libjpeg native API
*/
#include "encoder_libjpeg.h"
@@ -125,7 +125,7 @@ void EncoderLibJpeg::compressRGB(const std::vector<Span<uint8_t>> &planes)
*/
void EncoderLibJpeg::compressNV(const std::vector<Span<uint8_t>> &planes)
{
- uint8_t tmprowbuf[compress_.image_width * 3];
+ std::vector<uint8_t> tmprowbuf(compress_.image_width * 3);
/*
* \todo Use the raw api, and only unpack the cb/cr samples to new line
@@ -149,10 +149,10 @@ void EncoderLibJpeg::compressNV(const std::vector<Span<uint8_t>> &planes)
const unsigned char *src_c = planes[1].data();
JSAMPROW row_pointer[1];
- row_pointer[0] = &tmprowbuf[0];
+ row_pointer[0] = tmprowbuf.data();
for (unsigned int y = 0; y < compress_.image_height; y++) {
- unsigned char *dst = &tmprowbuf[0];
+ unsigned char *dst = tmprowbuf.data();
const unsigned char *src_y = src + y * y_stride;
const unsigned char *src_cb = src_c + (y / vertSubSample) * c_stride + cb_pos;
diff --git a/src/android/jpeg/encoder_libjpeg.h b/src/android/jpeg/encoder_libjpeg.h
index 146a6a72..4ac85c22 100644
--- a/src/android/jpeg/encoder_libjpeg.h
+++ b/src/android/jpeg/encoder_libjpeg.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * encoder_libjpeg.h - JPEG encoding using libjpeg
+ * JPEG encoding using libjpeg
*/
#pragma once
diff --git a/src/android/jpeg/exif.cpp b/src/android/jpeg/exif.cpp
index 6b1d0f1f..b8c871df 100644
--- a/src/android/jpeg/exif.cpp
+++ b/src/android/jpeg/exif.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * exif.cpp - EXIF tag creation using libexif
+ * EXIF tag creation using libexif
*/
#include "exif.h"
diff --git a/src/android/jpeg/exif.h b/src/android/jpeg/exif.h
index e68716f3..446d53f3 100644
--- a/src/android/jpeg/exif.h
+++ b/src/android/jpeg/exif.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * exif.h - EXIF tag creator using libexif
+ * EXIF tag creator using libexif
*/
#pragma once
diff --git a/src/android/jpeg/post_processor_jpeg.cpp b/src/android/jpeg/post_processor_jpeg.cpp
index 40261652..89b8a401 100644
--- a/src/android/jpeg/post_processor_jpeg.cpp
+++ b/src/android/jpeg/post_processor_jpeg.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * post_processor_jpeg.cpp - JPEG Post Processor
+ * JPEG Post Processor
*/
#include "post_processor_jpeg.h"
diff --git a/src/android/jpeg/post_processor_jpeg.h b/src/android/jpeg/post_processor_jpeg.h
index 98309b01..6fe21457 100644
--- a/src/android/jpeg/post_processor_jpeg.h
+++ b/src/android/jpeg/post_processor_jpeg.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * post_processor_jpeg.h - JPEG Post Processor
+ * JPEG Post Processor
*/
#pragma once
diff --git a/src/android/jpeg/thumbnailer.cpp b/src/android/jpeg/thumbnailer.cpp
index 41c71c76..adafc468 100644
--- a/src/android/jpeg/thumbnailer.cpp
+++ b/src/android/jpeg/thumbnailer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * thumbnailer.cpp - Simple image thumbnailer
+ * Simple image thumbnailer
*/
#include "thumbnailer.h"
diff --git a/src/android/jpeg/thumbnailer.h b/src/android/jpeg/thumbnailer.h
index d933cf0e..1b836e59 100644
--- a/src/android/jpeg/thumbnailer.h
+++ b/src/android/jpeg/thumbnailer.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * thumbnailer.h - Simple image thumbnailer
+ * Simple image thumbnailer
*/
#pragma once
diff --git a/src/android/meson.build b/src/android/meson.build
index 68646120..7b226a4b 100644
--- a/src/android/meson.build
+++ b/src/android/meson.build
@@ -4,6 +4,7 @@ android_deps = [
dependency('libexif', required : get_option('android')),
dependency('libjpeg', required : get_option('android')),
libcamera_private,
+ libyuv_dep,
]
android_enabled = true
@@ -15,27 +16,6 @@ foreach dep : android_deps
endif
endforeach
-libyuv_dep = dependency('libyuv', required : false)
-
-# Fallback to a subproject if libyuv isn't found, as it's typically not
-# provided by distributions.
-if not libyuv_dep.found()
- cmake = import('cmake')
-
- libyuv_vars = cmake.subproject_options()
- libyuv_vars.add_cmake_defines({'CMAKE_POSITION_INDEPENDENT_CODE': 'ON'})
- libyuv_vars.set_override_option('cpp_std', 'c++17')
- libyuv_vars.append_compile_args('cpp',
- '-Wno-sign-compare',
- '-Wno-unused-variable',
- '-Wno-unused-parameter')
- libyuv_vars.append_link_args('-ljpeg')
- libyuv = cmake.subproject('libyuv', options : libyuv_vars)
- libyuv_dep = libyuv.dependency('yuv')
-endif
-
-android_deps += [libyuv_dep]
-
android_hal_sources = files([
'camera3_hal.cpp',
'camera_capabilities.cpp',
diff --git a/src/android/mm/cros_camera_buffer.cpp b/src/android/mm/cros_camera_buffer.cpp
index 2ac3dc4a..e2a44a2a 100644
--- a/src/android/mm/cros_camera_buffer.cpp
+++ b/src/android/mm/cros_camera_buffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * cros_camera_buffer.cpp - Chromium OS buffer backend using CameraBufferManager
+ * Chromium OS buffer backend using CameraBufferManager
*/
#include "../camera_buffer.h"
diff --git a/src/android/mm/cros_frame_buffer_allocator.cpp b/src/android/mm/cros_frame_buffer_allocator.cpp
index 0a5c59f2..264c0d48 100644
--- a/src/android/mm/cros_frame_buffer_allocator.cpp
+++ b/src/android/mm/cros_frame_buffer_allocator.cpp
@@ -2,8 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * cros_frame_buffer.cpp - Allocate FrameBuffer for Chromium OS using
- * CameraBufferManager
+ * Allocate FrameBuffer for Chromium OS using CameraBufferManager
*/
#include <memory>
diff --git a/src/android/mm/generic_camera_buffer.cpp b/src/android/mm/generic_camera_buffer.cpp
index 1bd7090d..0ffcb445 100644
--- a/src/android/mm/generic_camera_buffer.cpp
+++ b/src/android/mm/generic_camera_buffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * generic_camera_buffer.cpp - Generic Android frame buffer backend
+ * Generic Android frame buffer backend
*/
#include "../camera_buffer.h"
diff --git a/src/android/mm/generic_frame_buffer_allocator.cpp b/src/android/mm/generic_frame_buffer_allocator.cpp
index 7ecef2c6..79625a9a 100644
--- a/src/android/mm/generic_frame_buffer_allocator.cpp
+++ b/src/android/mm/generic_frame_buffer_allocator.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * generic_camera_buffer.cpp - Allocate FrameBuffer using gralloc API
+ * Allocate FrameBuffer using gralloc API
*/
#include <dlfcn.h>
diff --git a/src/android/mm/libhardware_stub.c b/src/android/mm/libhardware_stub.c
index 00f15cd9..28faa638 100644
--- a/src/android/mm/libhardware_stub.c
+++ b/src/android/mm/libhardware_stub.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2023, Ideas on Board
*
- * libhardware_stub.c - Android libhardware stub for test compilation
+ * Android libhardware stub for test compilation
*/
#include <errno.h>
diff --git a/src/android/post_processor.h b/src/android/post_processor.h
index 1a205b05..b504a379 100644
--- a/src/android/post_processor.h
+++ b/src/android/post_processor.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * post_processor.h - CameraStream Post Processing Interface
+ * CameraStream Post Processing Interface
*/
#pragma once
diff --git a/src/android/yuv/post_processor_yuv.cpp b/src/android/yuv/post_processor_yuv.cpp
index ed44e6fe..c998807b 100644
--- a/src/android/yuv/post_processor_yuv.cpp
+++ b/src/android/yuv/post_processor_yuv.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * post_processor_yuv.cpp - Post Processor using libyuv
+ * Post Processor using libyuv
*/
#include "post_processor_yuv.h"
diff --git a/src/android/yuv/post_processor_yuv.h b/src/android/yuv/post_processor_yuv.h
index a7ac17c5..ed7bb1fb 100644
--- a/src/android/yuv/post_processor_yuv.h
+++ b/src/android/yuv/post_processor_yuv.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * post_processor_yuv.h - Post Processor using libyuv
+ * Post Processor using libyuv
*/
#pragma once
diff --git a/src/apps/cam/camera_session.cpp b/src/apps/cam/camera_session.cpp
index 8447f932..9e934827 100644
--- a/src/apps/cam/camera_session.cpp
+++ b/src/apps/cam/camera_session.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_session.cpp - Camera capture session
+ * Camera capture session
*/
#include <iomanip>
@@ -39,9 +39,14 @@ CameraSession::CameraSession(CameraManager *cm,
{
char *endptr;
unsigned long index = strtoul(cameraId.c_str(), &endptr, 10);
- if (*endptr == '\0' && index > 0 && index <= cm->cameras().size())
- camera_ = cm->cameras()[index - 1];
- else
+
+ if (*endptr == '\0' && index > 0) {
+ auto cameras = cm->cameras();
+ if (index <= cameras.size())
+ camera_ = cameras[index - 1];
+ }
+
+ if (!camera_)
camera_ = cm->get(cameraId);
if (!camera_) {
@@ -154,8 +159,43 @@ CameraSession::~CameraSession()
void CameraSession::listControls() const
{
for (const auto &[id, info] : camera_->controls()) {
- std::cout << "Control: " << id->name() << ": "
- << info.toString() << std::endl;
+ std::stringstream io;
+ io << "["
+ << (id->isInput() ? "in" : " ")
+ << (id->isOutput() ? "out" : " ")
+ << "] ";
+
+ if (info.values().empty()) {
+ std::cout << "Control: " << io.str()
+ << id->vendor() << "::" << id->name() << ": "
+ << info.toString() << std::endl;
+ } else {
+ std::cout << "Control: " << io.str()
+ << id->vendor() << "::" << id->name() << ":"
+ << std::endl;
+ for (const auto &value : info.values()) {
+ int32_t val = value.get<int32_t>();
+ const auto &it = id->enumerators().find(val);
+
+ std::cout << " - ";
+ if (it == id->enumerators().end())
+ std::cout << "UNKNOWN";
+ else
+ std::cout << it->second;
+ std::cout << " (" << val << ")" << std::endl;
+ }
+ }
+
+ if (id->isArray()) {
+ std::size_t size = id->size();
+
+ std::cout << " Size: ";
+ if (size == std::numeric_limits<std::size_t>::max())
+ std::cout << "n";
+ else
+ std::cout << std::to_string(size);
+ std::cout << std::endl;
+ }
}
}
@@ -225,11 +265,16 @@ int CameraSession::start()
#endif
if (options_.isSet(OptFile)) {
- if (!options_[OptFile].toString().empty())
- sink_ = std::make_unique<FileSink>(camera_.get(), streamNames_,
- options_[OptFile]);
- else
- sink_ = std::make_unique<FileSink>(camera_.get(), streamNames_);
+ std::unique_ptr<FileSink> sink =
+ std::make_unique<FileSink>(camera_.get(), streamNames_);
+
+ if (!options_[OptFile].toString().empty()) {
+ ret = sink->setFilePattern(options_[OptFile]);
+ if (ret)
+ return ret;
+ }
+
+ sink_ = std::move(sink);
}
if (sink_) {
@@ -377,7 +422,7 @@ void CameraSession::requestComplete(Request *request)
* Defer processing of the completed request to the event loop, to avoid
* blocking the camera manager thread.
*/
- EventLoop::instance()->callLater([=]() { processRequest(request); });
+ EventLoop::instance()->callLater([this, request]() { processRequest(request); });
}
void CameraSession::processRequest(Request *request)
diff --git a/src/apps/cam/camera_session.h b/src/apps/cam/camera_session.h
index 0bab519f..4442fd9b 100644
--- a/src/apps/cam/camera_session.h
+++ b/src/apps/cam/camera_session.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_session.h - Camera capture session
+ * Camera capture session
*/
#pragma once
diff --git a/src/apps/cam/capture_script.cpp b/src/apps/cam/capture_script.cpp
index 1215713f..fc1dfa75 100644
--- a/src/apps/cam/capture_script.cpp
+++ b/src/apps/cam/capture_script.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas on Board Oy
*
- * capture_script.cpp - Capture session configuration script
+ * Capture session configuration script
*/
#include "capture_script.h"
diff --git a/src/apps/cam/capture_script.h b/src/apps/cam/capture_script.h
index 40042c03..294b9203 100644
--- a/src/apps/cam/capture_script.h
+++ b/src/apps/cam/capture_script.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas on Board Oy
*
- * capture_script.h - Capture session configuration script
+ * Capture session configuration script
*/
#pragma once
diff --git a/src/apps/cam/drm.cpp b/src/apps/cam/drm.cpp
index 8779a713..47bbb6b0 100644
--- a/src/apps/cam/drm.cpp
+++ b/src/apps/cam/drm.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * drm.cpp - DRM/KMS Helpers
+ * DRM/KMS Helpers
*/
#include "drm.h"
diff --git a/src/apps/cam/drm.h b/src/apps/cam/drm.h
index ebaea04d..1ba83b6e 100644
--- a/src/apps/cam/drm.h
+++ b/src/apps/cam/drm.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * drm.h - DRM/KMS Helpers
+ * DRM/KMS Helpers
*/
#pragma once
diff --git a/src/apps/cam/file_sink.cpp b/src/apps/cam/file_sink.cpp
index 906b50e6..76e21db9 100644
--- a/src/apps/cam/file_sink.cpp
+++ b/src/apps/cam/file_sink.cpp
@@ -2,9 +2,10 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * file_sink.cpp - File Sink
+ * File Sink
*/
+#include <array>
#include <assert.h>
#include <fcntl.h>
#include <iomanip>
@@ -12,6 +13,7 @@
#include <sstream>
#include <string.h>
#include <unistd.h>
+#include <utility>
#include <libcamera/camera.h>
@@ -24,13 +26,13 @@
using namespace libcamera;
FileSink::FileSink([[maybe_unused]] const libcamera::Camera *camera,
- const std::map<const libcamera::Stream *, std::string> &streamNames,
- const std::string &pattern)
+ const std::map<const libcamera::Stream *, std::string> &streamNames)
:
#ifdef HAVE_TIFF
camera_(camera),
#endif
- streamNames_(streamNames), pattern_(pattern)
+ pattern_(kDefaultFilePattern), fileType_(FileType::Binary),
+ streamNames_(streamNames)
{
}
@@ -38,6 +40,41 @@ FileSink::~FileSink()
{
}
+int FileSink::setFilePattern(const std::string &pattern)
+{
+ static const std::array<std::pair<std::string, FileType>, 2> types{{
+ { ".dng", FileType::Dng },
+ { ".ppm", FileType::Ppm },
+ }};
+
+ pattern_ = pattern;
+
+ if (pattern_.empty() || pattern_.back() == '/')
+ pattern_ += kDefaultFilePattern;
+
+ fileType_ = FileType::Binary;
+
+ for (const auto &type : types) {
+ if (pattern_.size() < type.first.size())
+ continue;
+
+ if (pattern_.find(type.first, pattern_.size() - type.first.size()) !=
+ std::string::npos) {
+ fileType_ = type.second;
+ break;
+ }
+ }
+
+#ifndef HAVE_TIFF
+ if (fileType_ == FileType::Dng) {
+ std::cerr << "DNG support not available" << std::endl;
+ return -EINVAL;
+ }
+#endif /* HAVE_TIFF */
+
+ return 0;
+}
+
int FileSink::configure(const libcamera::CameraConfiguration &config)
{
int ret = FrameSink::configure(config);
@@ -67,21 +104,10 @@ bool FileSink::processRequest(Request *request)
void FileSink::writeBuffer(const Stream *stream, FrameBuffer *buffer,
[[maybe_unused]] const ControlList &metadata)
{
- std::string filename;
+ std::string filename = pattern_;
size_t pos;
int fd, ret = 0;
- if (!pattern_.empty())
- filename = pattern_;
-
-#ifdef HAVE_TIFF
- bool dng = filename.find(".dng", filename.size() - 4) != std::string::npos;
-#endif /* HAVE_TIFF */
- bool ppm = filename.find(".ppm", filename.size() - 4) != std::string::npos;
-
- if (filename.empty() || filename.back() == '/')
- filename += "frame-#.bin";
-
pos = filename.find_first_of('#');
if (pos != std::string::npos) {
std::stringstream ss;
@@ -93,7 +119,7 @@ void FileSink::writeBuffer(const Stream *stream, FrameBuffer *buffer,
Image *image = mappedBuffers_[buffer].get();
#ifdef HAVE_TIFF
- if (dng) {
+ if (fileType_ == FileType::Dng) {
ret = DNGWriter::write(filename.c_str(), camera_,
stream->configuration(), metadata,
buffer, image->data(0).data());
@@ -104,7 +130,7 @@ void FileSink::writeBuffer(const Stream *stream, FrameBuffer *buffer,
return;
}
#endif /* HAVE_TIFF */
- if (ppm) {
+ if (fileType_ == FileType::Ppm) {
ret = PPMWriter::write(filename.c_str(), stream->configuration(),
image->data(0));
if (ret < 0)
diff --git a/src/apps/cam/file_sink.h b/src/apps/cam/file_sink.h
index 300edf8d..71b7fe0f 100644
--- a/src/apps/cam/file_sink.h
+++ b/src/apps/cam/file_sink.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * file_sink.h - File Sink
+ * File Sink
*/
#pragma once
@@ -21,10 +21,11 @@ class FileSink : public FrameSink
{
public:
FileSink(const libcamera::Camera *camera,
- const std::map<const libcamera::Stream *, std::string> &streamNames,
- const std::string &pattern = "");
+ const std::map<const libcamera::Stream *, std::string> &streamNames);
~FileSink();
+ int setFilePattern(const std::string &pattern);
+
int configure(const libcamera::CameraConfiguration &config) override;
void mapBuffer(libcamera::FrameBuffer *buffer) override;
@@ -32,6 +33,14 @@ public:
bool processRequest(libcamera::Request *request) override;
private:
+ static constexpr const char *kDefaultFilePattern = "frame-#.bin";
+
+ enum class FileType {
+ Binary,
+ Dng,
+ Ppm,
+ };
+
void writeBuffer(const libcamera::Stream *stream,
libcamera::FrameBuffer *buffer,
const libcamera::ControlList &metadata);
@@ -39,7 +48,10 @@ private:
#ifdef HAVE_TIFF
const libcamera::Camera *camera_;
#endif
- std::map<const libcamera::Stream *, std::string> streamNames_;
+
std::string pattern_;
+ FileType fileType_;
+
+ std::map<const libcamera::Stream *, std::string> streamNames_;
std::map<libcamera::FrameBuffer *, std::unique_ptr<Image>> mappedBuffers_;
};
diff --git a/src/apps/cam/frame_sink.cpp b/src/apps/cam/frame_sink.cpp
index af21d575..68d6f2c1 100644
--- a/src/apps/cam/frame_sink.cpp
+++ b/src/apps/cam/frame_sink.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * frame_sink.cpp - Base Frame Sink Class
+ * Base Frame Sink Class
*/
#include "frame_sink.h"
diff --git a/src/apps/cam/frame_sink.h b/src/apps/cam/frame_sink.h
index ca4347cb..11105c6c 100644
--- a/src/apps/cam/frame_sink.h
+++ b/src/apps/cam/frame_sink.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * frame_sink.h - Base Frame Sink Class
+ * Base Frame Sink Class
*/
#pragma once
diff --git a/src/apps/cam/kms_sink.cpp b/src/apps/cam/kms_sink.cpp
index 6991c3fa..672c985a 100644
--- a/src/apps/cam/kms_sink.cpp
+++ b/src/apps/cam/kms_sink.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * kms_sink.cpp - KMS Sink
+ * KMS Sink
*/
#include "kms_sink.h"
diff --git a/src/apps/cam/kms_sink.h b/src/apps/cam/kms_sink.h
index e2c618a1..4b7b4c26 100644
--- a/src/apps/cam/kms_sink.h
+++ b/src/apps/cam/kms_sink.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * kms_sink.h - KMS Sink
+ * KMS Sink
*/
#pragma once
diff --git a/src/apps/cam/main.cpp b/src/apps/cam/main.cpp
index 1aabee01..460dbc81 100644
--- a/src/apps/cam/main.cpp
+++ b/src/apps/cam/main.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * main.cpp - cam - The libcamera swiss army knife
+ * cam - The libcamera swiss army knife
*/
#include <atomic>
@@ -344,12 +344,16 @@ std::string CamApp::cameraName(const Camera *camera)
return name;
}
+namespace {
+
void signalHandler([[maybe_unused]] int signal)
{
std::cout << "Exiting" << std::endl;
CamApp::instance()->quit();
}
+} /* namespace */
+
int main(int argc, char **argv)
{
CamApp app;
diff --git a/src/apps/cam/main.h b/src/apps/cam/main.h
index 4aa959b3..64e6a20e 100644
--- a/src/apps/cam/main.h
+++ b/src/apps/cam/main.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * main.h - Cam application
+ * Cam application
*/
#pragma once
diff --git a/src/apps/cam/sdl_sink.cpp b/src/apps/cam/sdl_sink.cpp
index a2f4abc1..8355dd5e 100644
--- a/src/apps/cam/sdl_sink.cpp
+++ b/src/apps/cam/sdl_sink.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas on Board Oy
*
- * sdl_sink.h - SDL Sink
+ * SDL Sink
*/
#include "sdl_sink.h"
diff --git a/src/apps/cam/sdl_sink.h b/src/apps/cam/sdl_sink.h
index 6c19c663..18ec7fbe 100644
--- a/src/apps/cam/sdl_sink.h
+++ b/src/apps/cam/sdl_sink.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas on Board Oy
*
- * sdl_sink.h - SDL Sink
+ * SDL Sink
*/
#pragma once
diff --git a/src/apps/cam/sdl_texture.cpp b/src/apps/cam/sdl_texture.cpp
index e9040bc5..e52c4a3a 100644
--- a/src/apps/cam/sdl_texture.cpp
+++ b/src/apps/cam/sdl_texture.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas on Board Oy
*
- * sdl_texture.cpp - SDL Texture
+ * SDL Texture
*/
#include "sdl_texture.h"
diff --git a/src/apps/cam/sdl_texture.h b/src/apps/cam/sdl_texture.h
index 3993dd46..990f83b6 100644
--- a/src/apps/cam/sdl_texture.h
+++ b/src/apps/cam/sdl_texture.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas on Board Oy
*
- * sdl_texture.h - SDL Texture
+ * SDL Texture
*/
#pragma once
diff --git a/src/apps/cam/sdl_texture_mjpg.cpp b/src/apps/cam/sdl_texture_mjpg.cpp
index da958e03..cace18fc 100644
--- a/src/apps/cam/sdl_texture_mjpg.cpp
+++ b/src/apps/cam/sdl_texture_mjpg.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas on Board Oy
*
- * sdl_texture_mjpg.cpp - SDL Texture MJPG
+ * SDL Texture MJPG
*/
#include "sdl_texture_mjpg.h"
diff --git a/src/apps/cam/sdl_texture_mjpg.h b/src/apps/cam/sdl_texture_mjpg.h
index 814ca79a..37bed5f0 100644
--- a/src/apps/cam/sdl_texture_mjpg.h
+++ b/src/apps/cam/sdl_texture_mjpg.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas on Board Oy
*
- * sdl_texture_mjpg.h - SDL Texture MJPG
+ * SDL Texture MJPG
*/
#pragma once
diff --git a/src/apps/cam/sdl_texture_yuv.cpp b/src/apps/cam/sdl_texture_yuv.cpp
index b29c3b93..480d7a37 100644
--- a/src/apps/cam/sdl_texture_yuv.cpp
+++ b/src/apps/cam/sdl_texture_yuv.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas on Board Oy
*
- * sdl_texture_yuv.cpp - SDL YUV Textures
+ * SDL YUV Textures
*/
#include "sdl_texture_yuv.h"
diff --git a/src/apps/cam/sdl_texture_yuv.h b/src/apps/cam/sdl_texture_yuv.h
index 310e4e50..29c756e7 100644
--- a/src/apps/cam/sdl_texture_yuv.h
+++ b/src/apps/cam/sdl_texture_yuv.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas on Board Oy
*
- * sdl_texture_yuv.h - SDL YUV Textures
+ * SDL YUV Textures
*/
#pragma once
diff --git a/src/apps/common/dng_writer.cpp b/src/apps/common/dng_writer.cpp
index 82bc065a..ac461951 100644
--- a/src/apps/common/dng_writer.cpp
+++ b/src/apps/common/dng_writer.cpp
@@ -2,14 +2,16 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * dng_writer.cpp - DNG writer
+ * DNG writer
*/
#include "dng_writer.h"
#include <algorithm>
+#include <endian.h>
#include <iostream>
#include <map>
+#include <vector>
#include <tiffio.h>
@@ -126,7 +128,9 @@ struct Matrix3d {
float m[9];
};
-void packScanlineSBGGR8(void *output, const void *input, unsigned int width)
+namespace {
+
+void packScanlineRaw8(void *output, const void *input, unsigned int width)
{
const uint8_t *in = static_cast<const uint8_t *>(input);
uint8_t *out = static_cast<uint8_t *>(output);
@@ -134,7 +138,67 @@ void packScanlineSBGGR8(void *output, const void *input, unsigned int width)
std::copy(in, in + width, out);
}
-void packScanlineSBGGR10P(void *output, const void *input, unsigned int width)
+void packScanlineRaw10(void *output, const void *input, unsigned int width)
+{
+ const uint8_t *in = static_cast<const uint8_t *>(input);
+ uint8_t *out = static_cast<uint8_t *>(output);
+
+ for (unsigned int i = 0; i < width; i += 4) {
+ *out++ = in[1] << 6 | in[0] >> 2;
+ *out++ = in[0] << 6 | (in[3] & 0x03) << 4 | in[2] >> 4;
+ *out++ = in[2] << 4 | (in[5] & 0x03) << 2 | in[4] >> 6;
+ *out++ = in[4] << 2 | (in[7] & 0x03) << 0;
+ *out++ = in[6];
+ in += 8;
+ }
+}
+
+void packScanlineRaw12(void *output, const void *input, unsigned int width)
+{
+ const uint8_t *in = static_cast<const uint8_t *>(input);
+ uint8_t *out = static_cast<uint8_t *>(output);
+
+ for (unsigned int i = 0; i < width; i += 2) {
+ *out++ = in[1] << 4 | in[0] >> 4;
+ *out++ = in[0] << 4 | (in[3] & 0x0f);
+ *out++ = in[2];
+ in += 4;
+ }
+}
+
+void packScanlineRaw16(void *output, const void *input, unsigned int width)
+{
+ const uint16_t *in = static_cast<const uint16_t *>(input);
+ uint16_t *out = static_cast<uint16_t *>(output);
+
+ std::copy(in, in + width, out);
+}
+
+/* Thumbnail function for raw data with each pixel aligned to 16bit. */
+void thumbScanlineRaw(const FormatInfo &info, void *output, const void *input,
+ unsigned int width, unsigned int stride)
+{
+ const uint16_t *in = static_cast<const uint16_t *>(input);
+ const uint16_t *in2 = static_cast<const uint16_t *>(input) + stride / 2;
+ uint8_t *out = static_cast<uint8_t *>(output);
+
+ /* Shift down to 8. */
+ unsigned int shift = info.bitsPerSample - 8;
+
+ /* Simple averaging that produces greyscale RGB values. */
+ for (unsigned int x = 0; x < width; x++) {
+ uint16_t value = (le16toh(in[0]) + le16toh(in[1]) +
+ le16toh(in2[0]) + le16toh(in2[1])) >> 2;
+ value = value >> shift;
+ *out++ = value;
+ *out++ = value;
+ *out++ = value;
+ in += 16;
+ in2 += 16;
+ }
+}
+
+void packScanlineRaw10_CSI2P(void *output, const void *input, unsigned int width)
{
const uint8_t *in = static_cast<const uint8_t *>(input);
uint8_t *out = static_cast<uint8_t *>(output);
@@ -150,7 +214,7 @@ void packScanlineSBGGR10P(void *output, const void *input, unsigned int width)
}
}
-void packScanlineSBGGR12P(void *output, const void *input, unsigned int width)
+void packScanlineRaw12_CSI2P(void *output, const void *input, unsigned int width)
{
const uint8_t *in = static_cast<const uint8_t *>(input);
uint8_t *out = static_cast<uint8_t *>(output);
@@ -164,7 +228,7 @@ void packScanlineSBGGR12P(void *output, const void *input, unsigned int width)
}
}
-void thumbScanlineSBGGRxxP(const FormatInfo &info, void *output,
+void thumbScanlineRaw_CSI2P(const FormatInfo &info, void *output,
const void *input, unsigned int width,
unsigned int stride)
{
@@ -282,78 +346,150 @@ void thumbScanlineIPU3([[maybe_unused]] const FormatInfo &info, void *output,
}
}
-static const std::map<PixelFormat, FormatInfo> formatInfo = {
+const std::map<PixelFormat, FormatInfo> formatInfo = {
{ formats::SBGGR8, {
.bitsPerSample = 8,
.pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
- .packScanline = packScanlineSBGGR8,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw8,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SGBRG8, {
.bitsPerSample = 8,
.pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
- .packScanline = packScanlineSBGGR8,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw8,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SGRBG8, {
.bitsPerSample = 8,
.pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
- .packScanline = packScanlineSBGGR8,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw8,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SRGGB8, {
.bitsPerSample = 8,
.pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
- .packScanline = packScanlineSBGGR8,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw8,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
+ } },
+ { formats::SBGGR10, {
+ .bitsPerSample = 10,
+ .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
+ .packScanline = packScanlineRaw10,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGBRG10, {
+ .bitsPerSample = 10,
+ .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
+ .packScanline = packScanlineRaw10,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGRBG10, {
+ .bitsPerSample = 10,
+ .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
+ .packScanline = packScanlineRaw10,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SRGGB10, {
+ .bitsPerSample = 10,
+ .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
+ .packScanline = packScanlineRaw10,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SBGGR12, {
+ .bitsPerSample = 12,
+ .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
+ .packScanline = packScanlineRaw12,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGBRG12, {
+ .bitsPerSample = 12,
+ .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
+ .packScanline = packScanlineRaw12,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGRBG12, {
+ .bitsPerSample = 12,
+ .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
+ .packScanline = packScanlineRaw12,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SRGGB12, {
+ .bitsPerSample = 12,
+ .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
+ .packScanline = packScanlineRaw12,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SBGGR16, {
+ .bitsPerSample = 16,
+ .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
+ .packScanline = packScanlineRaw16,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGBRG16, {
+ .bitsPerSample = 16,
+ .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
+ .packScanline = packScanlineRaw16,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGRBG16, {
+ .bitsPerSample = 16,
+ .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
+ .packScanline = packScanlineRaw16,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SRGGB16, {
+ .bitsPerSample = 16,
+ .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
+ .packScanline = packScanlineRaw16,
+ .thumbScanline = thumbScanlineRaw,
} },
{ formats::SBGGR10_CSI2P, {
.bitsPerSample = 10,
.pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
- .packScanline = packScanlineSBGGR10P,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw10_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SGBRG10_CSI2P, {
.bitsPerSample = 10,
.pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
- .packScanline = packScanlineSBGGR10P,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw10_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SGRBG10_CSI2P, {
.bitsPerSample = 10,
.pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
- .packScanline = packScanlineSBGGR10P,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw10_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SRGGB10_CSI2P, {
.bitsPerSample = 10,
.pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
- .packScanline = packScanlineSBGGR10P,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw10_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SBGGR12_CSI2P, {
.bitsPerSample = 12,
.pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
- .packScanline = packScanlineSBGGR12P,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw12_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SGBRG12_CSI2P, {
.bitsPerSample = 12,
.pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
- .packScanline = packScanlineSBGGR12P,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw12_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SGRBG12_CSI2P, {
.bitsPerSample = 12,
.pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
- .packScanline = packScanlineSBGGR12P,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw12_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SRGGB12_CSI2P, {
.bitsPerSample = 12,
.pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
- .packScanline = packScanlineSBGGR12P,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw12_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SBGGR10_IPU3, {
.bitsPerSample = 16,
@@ -381,6 +517,8 @@ static const std::map<PixelFormat, FormatInfo> formatInfo = {
} },
};
+} /* namespace */
+
int DNGWriter::write(const char *filename, const Camera *camera,
const StreamConfiguration &config,
const ControlList &metadata,
@@ -407,7 +545,7 @@ int DNGWriter::write(const char *filename, const Camera *camera,
* or a thumbnail scanline. The latter will always be much smaller than
* the former as we downscale by 16 in both directions.
*/
- uint8_t scanline[(config.size.width * info->bitsPerSample + 7) / 8];
+ std::vector<uint8_t> scanline((config.size.width * info->bitsPerSample + 7) / 8);
toff_t rawIFDOffset = 0;
toff_t exifIFDOffset = 0;
@@ -507,10 +645,10 @@ int DNGWriter::write(const char *filename, const Camera *camera,
/* Write the thumbnail. */
const uint8_t *row = static_cast<const uint8_t *>(data);
for (unsigned int y = 0; y < config.size.height / 16; y++) {
- info->thumbScanline(*info, &scanline, row,
+ info->thumbScanline(*info, scanline.data(), row,
config.size.width / 16, config.stride);
- if (TIFFWriteScanline(tif, &scanline, y, 0) != 1) {
+ if (TIFFWriteScanline(tif, scanline.data(), y, 0) != 1) {
std::cerr << "Failed to write thumbnail scanline"
<< std::endl;
TIFFClose(tif);
@@ -522,6 +660,23 @@ int DNGWriter::write(const char *filename, const Camera *camera,
TIFFWriteDirectory(tif);
+ /*
+ * Workaround for a bug introduced in libtiff version 4.5.1 and no fix
+ * released. In these versions the CFA* tags were missing in the field
+ * info.
+ * Introduced by: https://gitlab.com/libtiff/libtiff/-/commit/738e04099b13192bb1f654e74e9b5829313f3161
+ * Fixed by: https://gitlab.com/libtiff/libtiff/-/commit/49856998c3d82e65444b47bb4fb11b7830a0c2be
+ */
+ if (!TIFFFindField(tif, TIFFTAG_CFAREPEATPATTERNDIM, TIFF_ANY)) {
+ static const TIFFFieldInfo infos[] = {
+ { TIFFTAG_CFAREPEATPATTERNDIM, 2, 2, TIFF_SHORT, FIELD_CUSTOM,
+ 1, 0, const_cast<char *>("CFARepeatPatternDim") },
+ { TIFFTAG_CFAPATTERN, -1, -1, TIFF_BYTE, FIELD_CUSTOM,
+ 1, 1, const_cast<char *>("CFAPattern") },
+ };
+ TIFFMergeFieldInfo(tif, infos, 2);
+ }
+
/* Create a new IFD for the RAW image. */
const uint16_t cfaRepeatPatternDim[] = { 2, 2 };
const uint8_t cfaPlaneColor[] = {
@@ -593,9 +748,9 @@ int DNGWriter::write(const char *filename, const Camera *camera,
/* Write RAW content. */
row = static_cast<const uint8_t *>(data);
for (unsigned int y = 0; y < config.size.height; y++) {
- info->packScanline(&scanline, row, config.size.width);
+ info->packScanline(scanline.data(), row, config.size.width);
- if (TIFFWriteScanline(tif, &scanline, y, 0) != 1) {
+ if (TIFFWriteScanline(tif, scanline.data(), y, 0) != 1) {
std::cerr << "Failed to write RAW scanline"
<< std::endl;
TIFFClose(tif);
diff --git a/src/apps/common/dng_writer.h b/src/apps/common/dng_writer.h
index 38f38f62..aaa8a852 100644
--- a/src/apps/common/dng_writer.h
+++ b/src/apps/common/dng_writer.h
@@ -2,13 +2,12 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * dng_writer.h - DNG writer
+ * DNG writer
*/
#pragma once
#ifdef HAVE_TIFF
-#define HAVE_DNG
#include <libcamera/camera.h>
#include <libcamera/controls.h>
diff --git a/src/apps/common/event_loop.cpp b/src/apps/common/event_loop.cpp
index cb83845c..b6230f4b 100644
--- a/src/apps/common/event_loop.cpp
+++ b/src/apps/common/event_loop.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_loop.cpp - cam - Event loop
+ * cam - Event loop
*/
#include "event_loop.h"
@@ -21,12 +21,35 @@ EventLoop::EventLoop()
evthread_use_pthreads();
base_ = event_base_new();
instance_ = this;
+
+ callsTrigger_ = event_new(base_, -1, EV_PERSIST, [](evutil_socket_t, short, void *closure) {
+ auto *self = static_cast<EventLoop *>(closure);
+
+ for (;;) {
+ std::function<void()> call;
+
+ {
+ std::lock_guard locker(self->lock_);
+ if (self->calls_.empty())
+ break;
+
+ call = std::move(self->calls_.front());
+ self->calls_.pop_front();
+ }
+
+ call();
+ }
+ }, this);
+ assert(callsTrigger_);
+ event_add(callsTrigger_, nullptr);
}
EventLoop::~EventLoop()
{
instance_ = nullptr;
+ event_free(callsTrigger_);
+
events_.clear();
event_base_free(base_);
libevent_global_shutdown();
@@ -50,20 +73,20 @@ void EventLoop::exit(int code)
event_base_loopbreak(base_);
}
-void EventLoop::callLater(const std::function<void()> &func)
+void EventLoop::callLater(std::function<void()> &&func)
{
{
std::unique_lock<std::mutex> locker(lock_);
- calls_.push_back(func);
+ calls_.push_back(std::move(func));
}
- event_base_once(base_, -1, EV_TIMEOUT, dispatchCallback, this, nullptr);
+ event_active(callsTrigger_, 0, 0);
}
void EventLoop::addFdEvent(int fd, EventType type,
- const std::function<void()> &callback)
+ std::function<void()> &&callback)
{
- std::unique_ptr<Event> event = std::make_unique<Event>(callback);
+ std::unique_ptr<Event> event = std::make_unique<Event>(std::move(callback));
short events = (type & Read ? EV_READ : 0)
| (type & Write ? EV_WRITE : 0)
| EV_PERSIST;
@@ -85,9 +108,9 @@ void EventLoop::addFdEvent(int fd, EventType type,
}
void EventLoop::addTimerEvent(const std::chrono::microseconds period,
- const std::function<void()> &callback)
+ std::function<void()> &&callback)
{
- std::unique_ptr<Event> event = std::make_unique<Event>(callback);
+ std::unique_ptr<Event> event = std::make_unique<Event>(std::move(callback));
event->event_ = event_new(base_, -1, EV_PERSIST, &EventLoop::Event::dispatch,
event.get());
if (!event->event_) {
@@ -108,31 +131,8 @@ void EventLoop::addTimerEvent(const std::chrono::microseconds period,
events_.push_back(std::move(event));
}
-void EventLoop::dispatchCallback([[maybe_unused]] evutil_socket_t fd,
- [[maybe_unused]] short flags, void *param)
-{
- EventLoop *loop = static_cast<EventLoop *>(param);
- loop->dispatchCall();
-}
-
-void EventLoop::dispatchCall()
-{
- std::function<void()> call;
-
- {
- std::unique_lock<std::mutex> locker(lock_);
- if (calls_.empty())
- return;
-
- call = calls_.front();
- calls_.pop_front();
- }
-
- call();
-}
-
-EventLoop::Event::Event(const std::function<void()> &callback)
- : callback_(callback), event_(nullptr)
+EventLoop::Event::Event(std::function<void()> &&callback)
+ : callback_(std::move(callback)), event_(nullptr)
{
}
diff --git a/src/apps/common/event_loop.h b/src/apps/common/event_loop.h
index ef79e8e5..d8b6df2f 100644
--- a/src/apps/common/event_loop.h
+++ b/src/apps/common/event_loop.h
@@ -2,17 +2,20 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_loop.h - cam - Event loop
+ * cam - Event loop
*/
#pragma once
#include <chrono>
+#include <deque>
#include <functional>
#include <list>
#include <memory>
#include <mutex>
+#include <libcamera/base/class.h>
+
#include <event2/util.h>
struct event_base;
@@ -33,18 +36,20 @@ public:
int exec();
void exit(int code = 0);
- void callLater(const std::function<void()> &func);
+ void callLater(std::function<void()> &&func);
void addFdEvent(int fd, EventType type,
- const std::function<void()> &handler);
+ std::function<void()> &&handler);
- using duration = std::chrono::steady_clock::duration;
void addTimerEvent(const std::chrono::microseconds period,
- const std::function<void()> &handler);
+ std::function<void()> &&handler);
private:
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(EventLoop)
+
struct Event {
- Event(const std::function<void()> &callback);
+ Event(std::function<void()> &&callback);
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(Event)
~Event();
static void dispatch(int fd, short events, void *arg);
@@ -58,11 +63,9 @@ private:
struct event_base *base_;
int exitCode_;
- std::list<std::function<void()>> calls_;
+ std::deque<std::function<void()>> calls_;
+ struct event *callsTrigger_ = nullptr;
+
std::list<std::unique_ptr<Event>> events_;
std::mutex lock_;
-
- static void dispatchCallback(evutil_socket_t fd, short flags,
- void *param);
- void dispatchCall();
};
diff --git a/src/apps/common/image.cpp b/src/apps/common/image.cpp
index fe2cc6da..a2a0f58f 100644
--- a/src/apps/common/image.cpp
+++ b/src/apps/common/image.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * image.cpp - Multi-planar image with access to pixel data
+ * Multi-planar image with access to pixel data
*/
#include "image.h"
diff --git a/src/apps/common/image.h b/src/apps/common/image.h
index 7953b177..e47e446b 100644
--- a/src/apps/common/image.h
+++ b/src/apps/common/image.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * image.h - Multi-planar image with access to pixel data
+ * Multi-planar image with access to pixel data
*/
#pragma once
diff --git a/src/apps/common/options.cpp b/src/apps/common/options.cpp
index 4f7e8691..cae193cc 100644
--- a/src/apps/common/options.cpp
+++ b/src/apps/common/options.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * options.cpp - cam - Options parsing
+ * cam - Options parsing
*/
#include <assert.h>
@@ -10,6 +10,7 @@
#include <iomanip>
#include <iostream>
#include <string.h>
+#include <vector>
#include "options.h"
@@ -879,8 +880,8 @@ OptionsParser::Options OptionsParser::parse(int argc, char **argv)
* Allocate short and long options arrays large enough to contain all
* options.
*/
- char shortOptions[optionsMap_.size() * 3 + 2];
- struct option longOptions[optionsMap_.size() + 1];
+ std::vector<char> shortOptions(optionsMap_.size() * 3 + 2);
+ std::vector<struct option> longOptions(optionsMap_.size() + 1);
unsigned int ids = 0;
unsigned int idl = 0;
@@ -922,7 +923,8 @@ OptionsParser::Options OptionsParser::parse(int argc, char **argv)
opterr = 0;
while (true) {
- int c = getopt_long(argc, argv, shortOptions, longOptions, nullptr);
+ int c = getopt_long(argc, argv, shortOptions.data(),
+ longOptions.data(), nullptr);
if (c == -1)
break;
@@ -1038,7 +1040,7 @@ void OptionsParser::usageOptions(const std::list<Option> &options,
std::cerr << std::setw(indent) << argument;
- for (const char *help = option.help, *end = help; end; ) {
+ for (const char *help = option.help, *end = help; end;) {
end = strchr(help, '\n');
if (end) {
std::cerr << std::string(help, end - help + 1);
diff --git a/src/apps/common/options.h b/src/apps/common/options.h
index 4ddd4987..9771aa7a 100644
--- a/src/apps/common/options.h
+++ b/src/apps/common/options.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * options.h - cam - Options parsing
+ * cam - Options parsing
*/
#pragma once
diff --git a/src/apps/common/ppm_writer.cpp b/src/apps/common/ppm_writer.cpp
index a8ccf67a..368de8bf 100644
--- a/src/apps/common/ppm_writer.cpp
+++ b/src/apps/common/ppm_writer.cpp
@@ -2,11 +2,12 @@
/*
* Copyright (C) 2024 Red Hat, Inc.
*
- * ppm_writer.cpp - PPM writer
+ * PPM writer
*/
#include "ppm_writer.h"
+#include <errno.h>
#include <fstream>
#include <iostream>
@@ -28,7 +29,7 @@ int PPMWriter::write(const char *filename,
std::ofstream output(filename, std::ios::binary);
if (!output) {
std::cerr << "Failed to open ppm file: " << filename << std::endl;
- return -EINVAL;
+ return -EIO;
}
output << "P6" << std::endl
@@ -36,7 +37,7 @@ int PPMWriter::write(const char *filename,
<< "255" << std::endl;
if (!output) {
std::cerr << "Failed to write the file header" << std::endl;
- return -EINVAL;
+ return -EIO;
}
const unsigned int rowLength = config.size.width * 3;
@@ -45,7 +46,7 @@ int PPMWriter::write(const char *filename,
output.write(row, rowLength);
if (!output) {
std::cerr << "Failed to write image data at row " << y << std::endl;
- return -EINVAL;
+ return -EIO;
}
}
diff --git a/src/apps/common/ppm_writer.h b/src/apps/common/ppm_writer.h
index 4c38f5ce..8c8d2e15 100644
--- a/src/apps/common/ppm_writer.h
+++ b/src/apps/common/ppm_writer.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2024, Red Hat, Inc.
*
- * ppm_writer.h - PPM writer
+ * PPM writer
*/
#pragma once
diff --git a/src/apps/common/stream_options.cpp b/src/apps/common/stream_options.cpp
index 663b979a..99239e07 100644
--- a/src/apps/common/stream_options.cpp
+++ b/src/apps/common/stream_options.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * stream_options.cpp - Helper to parse options for streams
+ * Helper to parse options for streams
*/
#include "stream_options.h"
diff --git a/src/apps/common/stream_options.h b/src/apps/common/stream_options.h
index a5f3bde0..a93f104c 100644
--- a/src/apps/common/stream_options.h
+++ b/src/apps/common/stream_options.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * stream_options.h - Helper to parse options for streams
+ * Helper to parse options for streams
*/
#pragma once
diff --git a/src/apps/ipa-verify/main.cpp b/src/apps/ipa-verify/main.cpp
index 76ba5073..0903cd85 100644
--- a/src/apps/ipa-verify/main.cpp
+++ b/src/apps/ipa-verify/main.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2023, Ideas on Board Oy
*
- * ipa_verify.cpp - Verify signature on an IPA module
+ * Verify signature on an IPA module
*/
#include <iostream>
diff --git a/src/apps/lc-compliance/environment.cpp b/src/apps/lc-compliance/environment.cpp
index 5eb3775f..987264f1 100644
--- a/src/apps/lc-compliance/environment.cpp
+++ b/src/apps/lc-compliance/environment.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Collabora Ltd.
*
- * environment.cpp - Common environment for tests
+ * Common environment for tests
*/
#include "environment.h"
diff --git a/src/apps/lc-compliance/environment.h b/src/apps/lc-compliance/environment.h
index 0debbcce..834c722e 100644
--- a/src/apps/lc-compliance/environment.h
+++ b/src/apps/lc-compliance/environment.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Collabora Ltd.
*
- * environment.h - Common environment for tests
+ * Common environment for tests
*/
#pragma once
@@ -23,5 +23,5 @@ private:
Environment() = default;
std::string cameraId_;
- libcamera::CameraManager *cm_;
+ libcamera::CameraManager *cm_ = nullptr;
};
diff --git a/src/apps/lc-compliance/helpers/capture.cpp b/src/apps/lc-compliance/helpers/capture.cpp
index 5aab973f..f2c6d58c 100644
--- a/src/apps/lc-compliance/helpers/capture.cpp
+++ b/src/apps/lc-compliance/helpers/capture.cpp
@@ -2,18 +2,19 @@
/*
* Copyright (C) 2020-2021, Google Inc.
*
- * simple_capture.cpp - Simple capture helper
+ * Simple capture helper
*/
#include "capture.h"
+#include <assert.h>
+
#include <gtest/gtest.h>
using namespace libcamera;
Capture::Capture(std::shared_ptr<Camera> camera)
- : loop_(nullptr), camera_(camera),
- allocator_(std::make_unique<FrameBufferAllocator>(camera))
+ : camera_(std::move(camera)), allocator_(camera_)
{
}
@@ -26,10 +27,8 @@ void Capture::configure(StreamRole role)
{
config_ = camera_->generateConfiguration({ role });
- if (!config_) {
- std::cout << "Role not supported by camera" << std::endl;
- GTEST_SKIP();
- }
+ if (!config_)
+ GTEST_SKIP() << "Role not supported by camera";
if (config_->validate() != CameraConfiguration::Valid) {
config_.reset();
@@ -42,155 +41,106 @@ void Capture::configure(StreamRole role)
}
}
-void Capture::start()
+void Capture::run(unsigned int captureLimit, std::optional<unsigned int> queueLimit)
{
- Stream *stream = config_->at(0).stream();
- int count = allocator_->allocate(stream);
-
- ASSERT_GE(count, 0) << "Failed to allocate buffers";
- EXPECT_EQ(count, config_->at(0).bufferCount) << "Allocated less buffers than expected";
-
- camera_->requestCompleted.connect(this, &Capture::requestComplete);
-
- ASSERT_EQ(camera_->start(), 0) << "Failed to start camera";
-}
+ assert(!queueLimit || captureLimit <= *queueLimit);
-void Capture::stop()
-{
- if (!config_ || !allocator_->allocated())
- return;
+ captureLimit_ = captureLimit;
+ queueLimit_ = queueLimit;
- camera_->stop();
+ captureCount_ = queueCount_ = 0;
- camera_->requestCompleted.disconnect(this);
+ EventLoop loop;
+ loop_ = &loop;
- Stream *stream = config_->at(0).stream();
- requests_.clear();
- allocator_->free(stream);
-}
-
-/* CaptureBalanced */
-
-CaptureBalanced::CaptureBalanced(std::shared_ptr<Camera> camera)
- : Capture(camera)
-{
-}
-
-void CaptureBalanced::capture(unsigned int numRequests)
-{
start();
- Stream *stream = config_->at(0).stream();
- const std::vector<std::unique_ptr<FrameBuffer>> &buffers = allocator_->buffers(stream);
-
- /* No point in testing less requests then the camera depth. */
- if (buffers.size() > numRequests) {
- std::cout << "Camera needs " + std::to_string(buffers.size())
- + " requests, can't test only "
- + std::to_string(numRequests) << std::endl;
- GTEST_SKIP();
- }
+ for (const auto &request : requests_)
+ queueRequest(request.get());
- queueCount_ = 0;
- captureCount_ = 0;
- captureLimit_ = numRequests;
+ EXPECT_EQ(loop_->exec(), 0);
- /* Queue the recommended number of requests. */
- for (const std::unique_ptr<FrameBuffer> &buffer : buffers) {
- std::unique_ptr<Request> request = camera_->createRequest();
- ASSERT_TRUE(request) << "Can't create request";
-
- ASSERT_EQ(request->addBuffer(stream, buffer.get()), 0) << "Can't set buffer for request";
-
- ASSERT_EQ(queueRequest(request.get()), 0) << "Failed to queue request";
-
- requests_.push_back(std::move(request));
- }
-
- /* Run capture session. */
- loop_ = new EventLoop();
- loop_->exec();
stop();
- delete loop_;
- ASSERT_EQ(captureCount_, captureLimit_);
+ EXPECT_LE(captureLimit_, captureCount_);
+ EXPECT_LE(captureCount_, queueCount_);
+ EXPECT_TRUE(!queueLimit_ || queueCount_ <= *queueLimit_);
}
-int CaptureBalanced::queueRequest(Request *request)
+int Capture::queueRequest(libcamera::Request *request)
{
- queueCount_++;
- if (queueCount_ > captureLimit_)
+ if (queueLimit_ && queueCount_ >= *queueLimit_)
return 0;
- return camera_->queueRequest(request);
+ int ret = camera_->queueRequest(request);
+ if (ret < 0)
+ return ret;
+
+ queueCount_ += 1;
+ return 0;
}
-void CaptureBalanced::requestComplete(Request *request)
+void Capture::requestComplete(Request *request)
{
- EXPECT_EQ(request->status(), Request::Status::RequestComplete)
- << "Request didn't complete successfully";
-
captureCount_++;
if (captureCount_ >= captureLimit_) {
loop_->exit(0);
return;
}
+ EXPECT_EQ(request->status(), Request::Status::RequestComplete)
+ << "Request didn't complete successfully";
+
request->reuse(Request::ReuseBuffers);
if (queueRequest(request))
loop_->exit(-EINVAL);
}
-/* CaptureUnbalanced */
-
-CaptureUnbalanced::CaptureUnbalanced(std::shared_ptr<Camera> camera)
- : Capture(camera)
-{
-}
-
-void CaptureUnbalanced::capture(unsigned int numRequests)
+void Capture::start()
{
- start();
+ assert(config_);
+ assert(!config_->empty());
+ assert(!allocator_.allocated());
+ assert(requests_.empty());
Stream *stream = config_->at(0).stream();
- const std::vector<std::unique_ptr<FrameBuffer>> &buffers = allocator_->buffers(stream);
+ int count = allocator_.allocate(stream);
- captureCount_ = 0;
- captureLimit_ = numRequests;
+ ASSERT_GE(count, 0) << "Failed to allocate buffers";
+ EXPECT_EQ(count, config_->at(0).bufferCount) << "Allocated less buffers than expected";
+
+ const std::vector<std::unique_ptr<FrameBuffer>> &buffers = allocator_.buffers(stream);
+
+ /* No point in testing less requests then the camera depth. */
+ if (queueLimit_ && *queueLimit_ < buffers.size()) {
+ GTEST_SKIP() << "Camera needs " << buffers.size()
+ << " requests, can't test only " << *queueLimit_;
+ }
- /* Queue the recommended number of requests. */
for (const std::unique_ptr<FrameBuffer> &buffer : buffers) {
std::unique_ptr<Request> request = camera_->createRequest();
ASSERT_TRUE(request) << "Can't create request";
ASSERT_EQ(request->addBuffer(stream, buffer.get()), 0) << "Can't set buffer for request";
- ASSERT_EQ(camera_->queueRequest(request.get()), 0) << "Failed to queue request";
-
requests_.push_back(std::move(request));
}
- /* Run capture session. */
- loop_ = new EventLoop();
- int status = loop_->exec();
- stop();
- delete loop_;
+ camera_->requestCompleted.connect(this, &Capture::requestComplete);
- ASSERT_EQ(status, 0);
+ ASSERT_EQ(camera_->start(), 0) << "Failed to start camera";
}
-void CaptureUnbalanced::requestComplete(Request *request)
+void Capture::stop()
{
- captureCount_++;
- if (captureCount_ >= captureLimit_) {
- loop_->exit(0);
+ if (!config_ || !allocator_.allocated())
return;
- }
- EXPECT_EQ(request->status(), Request::Status::RequestComplete)
- << "Request didn't complete successfully";
+ camera_->stop();
- request->reuse(Request::ReuseBuffers);
- if (camera_->queueRequest(request))
- loop_->exit(-EINVAL);
+ camera_->requestCompleted.disconnect(this);
+
+ Stream *stream = config_->at(0).stream();
+ requests_.clear();
+ allocator_.free(stream);
}
diff --git a/src/apps/lc-compliance/helpers/capture.h b/src/apps/lc-compliance/helpers/capture.h
index 0574ab1c..0e7b848f 100644
--- a/src/apps/lc-compliance/helpers/capture.h
+++ b/src/apps/lc-compliance/helpers/capture.h
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2020-2021, Google Inc.
*
- * simple_capture.h - Simple capture helper
+ * Simple capture helper
*/
#pragma once
#include <memory>
+#include <optional>
#include <libcamera/libcamera.h>
@@ -16,51 +17,29 @@
class Capture
{
public:
+ Capture(std::shared_ptr<libcamera::Camera> camera);
+ ~Capture();
+
void configure(libcamera::StreamRole role);
+ void run(unsigned int captureLimit, std::optional<unsigned int> queueLimit = {});
-protected:
- Capture(std::shared_ptr<libcamera::Camera> camera);
- virtual ~Capture();
+private:
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(Capture)
void start();
void stop();
- virtual void requestComplete(libcamera::Request *request) = 0;
-
- EventLoop *loop_;
+ int queueRequest(libcamera::Request *request);
+ void requestComplete(libcamera::Request *request);
std::shared_ptr<libcamera::Camera> camera_;
- std::unique_ptr<libcamera::FrameBufferAllocator> allocator_;
+ libcamera::FrameBufferAllocator allocator_;
std::unique_ptr<libcamera::CameraConfiguration> config_;
std::vector<std::unique_ptr<libcamera::Request>> requests_;
-};
-
-class CaptureBalanced : public Capture
-{
-public:
- CaptureBalanced(std::shared_ptr<libcamera::Camera> camera);
-
- void capture(unsigned int numRequests);
-
-private:
- int queueRequest(libcamera::Request *request);
- void requestComplete(libcamera::Request *request) override;
-
- unsigned int queueCount_;
- unsigned int captureCount_;
- unsigned int captureLimit_;
-};
-
-class CaptureUnbalanced : public Capture
-{
-public:
- CaptureUnbalanced(std::shared_ptr<libcamera::Camera> camera);
-
- void capture(unsigned int numRequests);
-
-private:
- void requestComplete(libcamera::Request *request) override;
- unsigned int captureCount_;
- unsigned int captureLimit_;
+ EventLoop *loop_ = nullptr;
+ unsigned int captureLimit_ = 0;
+ std::optional<unsigned int> queueLimit_;
+ unsigned int captureCount_ = 0;
+ unsigned int queueCount_ = 0;
};
diff --git a/src/apps/lc-compliance/main.cpp b/src/apps/lc-compliance/main.cpp
index 74e0d4df..e9f0ffbb 100644
--- a/src/apps/lc-compliance/main.cpp
+++ b/src/apps/lc-compliance/main.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Google Inc.
* Copyright (C) 2021, Collabora Ltd.
*
- * main.cpp - lc-compliance - The libcamera compliance tool
+ * lc-compliance - The libcamera compliance tool
*/
#include <iomanip>
@@ -45,13 +45,11 @@ class ThrowListener : public testing::EmptyTestEventListener
static void listCameras(CameraManager *cm)
{
for (const std::shared_ptr<Camera> &cam : cm->cameras())
- std::cout << "- " << cam.get()->id() << std::endl;
+ std::cout << "- " << cam->id() << std::endl;
}
static int initCamera(CameraManager *cm, OptionsParser::Options options)
{
- std::shared_ptr<Camera> camera;
-
int ret = cm->start();
if (ret) {
std::cout << "Failed to start camera manager: "
@@ -66,7 +64,7 @@ static int initCamera(CameraManager *cm, OptionsParser::Options options)
}
const std::string &cameraId = options[OptCamera];
- camera = cm->get(cameraId);
+ std::shared_ptr<Camera> camera = cm->get(cameraId);
if (!camera) {
std::cout << "Camera " << cameraId << " not found, available cameras:" << std::endl;
listCameras(cm);
@@ -82,45 +80,27 @@ static int initCamera(CameraManager *cm, OptionsParser::Options options)
static int initGtestParameters(char *arg0, OptionsParser::Options options)
{
- const std::map<std::string, std::string> gtestFlags = { { "list", "--gtest_list_tests" },
- { "filter", "--gtest_filter" } };
-
- int argc = 0;
+ std::vector<const char *> argv;
std::string filterParam;
- /*
- * +2 to have space for both the 0th argument that is needed but not
- * used and the null at the end.
- */
- char **argv = new char *[(gtestFlags.size() + 2)];
- if (!argv)
- return -ENOMEM;
-
- argv[0] = arg0;
- argc++;
+ argv.push_back(arg0);
- if (options.isSet(OptList)) {
- argv[argc] = const_cast<char *>(gtestFlags.at("list").c_str());
- argc++;
- }
+ if (options.isSet(OptList))
+ argv.push_back("--gtest_list_tests");
if (options.isSet(OptFilter)) {
/*
* The filter flag needs to be passed as a single parameter, in
* the format --gtest_filter=filterStr
*/
- filterParam = gtestFlags.at("filter") + "=" +
- static_cast<const std::string &>(options[OptFilter]);
-
- argv[argc] = const_cast<char *>(filterParam.c_str());
- argc++;
+ filterParam = "--gtest_filter=" + options[OptFilter].toString();
+ argv.push_back(filterParam.c_str());
}
- argv[argc] = nullptr;
-
- ::testing::InitGoogleTest(&argc, argv);
+ argv.push_back(nullptr);
- delete[] argv;
+ int argc = argv.size();
+ ::testing::InitGoogleTest(&argc, const_cast<char **>(argv.data()));
return 0;
}
diff --git a/src/apps/lc-compliance/tests/capture_test.cpp b/src/apps/lc-compliance/tests/capture_test.cpp
index 284d3630..93bed48f 100644
--- a/src/apps/lc-compliance/tests/capture_test.cpp
+++ b/src/apps/lc-compliance/tests/capture_test.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Google Inc.
* Copyright (C) 2021, Collabora Ltd.
*
- * capture_test.cpp - Test camera capture
+ * Test camera capture
*/
#include "capture.h"
@@ -14,10 +14,13 @@
#include "environment.h"
+namespace {
+
using namespace libcamera;
-const std::vector<int> NUMREQUESTS = { 1, 2, 3, 5, 8, 13, 21, 34, 55, 89 };
-const std::vector<StreamRole> ROLES = {
+const int NUMREQUESTS[] = { 1, 2, 3, 5, 8, 13, 21, 34, 55, 89 };
+
+const StreamRole ROLES[] = {
StreamRole::Raw,
StreamRole::StillCapture,
StreamRole::VideoRecording,
@@ -84,11 +87,11 @@ TEST_P(SingleStream, Capture)
{
auto [role, numRequests] = GetParam();
- CaptureBalanced capture(camera_);
+ Capture capture(camera_);
capture.configure(role);
- capture.capture(numRequests);
+ capture.run(numRequests, numRequests);
}
/*
@@ -103,12 +106,12 @@ TEST_P(SingleStream, CaptureStartStop)
auto [role, numRequests] = GetParam();
unsigned int numRepeats = 3;
- CaptureBalanced capture(camera_);
+ Capture capture(camera_);
capture.configure(role);
for (unsigned int starts = 0; starts < numRepeats; starts++)
- capture.capture(numRequests);
+ capture.run(numRequests, numRequests);
}
/*
@@ -122,11 +125,11 @@ TEST_P(SingleStream, UnbalancedStop)
{
auto [role, numRequests] = GetParam();
- CaptureUnbalanced capture(camera_);
+ Capture capture(camera_);
capture.configure(role);
- capture.capture(numRequests);
+ capture.run(numRequests);
}
INSTANTIATE_TEST_SUITE_P(CaptureTests,
@@ -134,3 +137,5 @@ INSTANTIATE_TEST_SUITE_P(CaptureTests,
testing::Combine(testing::ValuesIn(ROLES),
testing::ValuesIn(NUMREQUESTS)),
SingleStream::nameParameters);
+
+} /* namespace */
diff --git a/src/apps/qcam/assets/shader/bayer_8.vert b/src/apps/qcam/assets/shader/bayer_8.vert
index 3695a5e9..fb5109ee 100644
--- a/src/apps/qcam/assets/shader/bayer_8.vert
+++ b/src/apps/qcam/assets/shader/bayer_8.vert
@@ -19,6 +19,8 @@ Copyright (C) 2021, Linaro
attribute vec4 vertexIn;
attribute vec2 textureIn;
+uniform mat4 proj_matrix;
+
uniform vec2 tex_size; /* The texture size in pixels */
uniform vec2 tex_step;
@@ -47,5 +49,5 @@ void main(void) {
yCoord = center.y + vec4(-2.0 * tex_step.y,
-tex_step.y, tex_step.y, 2.0 * tex_step.y);
- gl_Position = vertexIn;
+ gl_Position = proj_matrix * vertexIn;
}
diff --git a/src/apps/qcam/assets/shader/identity.vert b/src/apps/qcam/assets/shader/identity.vert
index 12c41377..907e8741 100644
--- a/src/apps/qcam/assets/shader/identity.vert
+++ b/src/apps/qcam/assets/shader/identity.vert
@@ -9,10 +9,11 @@ attribute vec4 vertexIn;
attribute vec2 textureIn;
varying vec2 textureOut;
+uniform mat4 proj_matrix;
uniform float stride_factor;
void main(void)
{
- gl_Position = vertexIn;
+ gl_Position = proj_matrix * vertexIn;
textureOut = vec2(textureIn.x * stride_factor, textureIn.y);
}
diff --git a/src/apps/qcam/cam_select_dialog.cpp b/src/apps/qcam/cam_select_dialog.cpp
index 3c8b12a9..6b6d0713 100644
--- a/src/apps/qcam/cam_select_dialog.cpp
+++ b/src/apps/qcam/cam_select_dialog.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Utkarsh Tiwari <utkarsh02t@gmail.com>
*
- * cam_select_dialog.cpp - qcam - Camera Selection dialog
+ * qcam - Camera Selection dialog
*/
#include "cam_select_dialog.h"
@@ -15,7 +15,9 @@
#include <QComboBox>
#include <QDialogButtonBox>
#include <QFormLayout>
+#include <QGuiApplication>
#include <QLabel>
+#include <QScreen>
#include <QString>
CameraSelectorDialog::CameraSelectorDialog(libcamera::CameraManager *cameraManager,
@@ -53,6 +55,14 @@ CameraSelectorDialog::CameraSelectorDialog(libcamera::CameraManager *cameraManag
layout->addRow("Location:", cameraLocation_);
layout->addRow("Model:", cameraModel_);
layout->addWidget(buttonBox);
+
+ /*
+ * Decrease the minimum width of dialog to fit on narrow screens, with a
+ * 20 pixels margin.
+ */
+ QRect screenGeometry = qGuiApp->primaryScreen()->availableGeometry();
+ if (screenGeometry.width() < minimumWidth())
+ setMinimumWidth(screenGeometry.width() - 20);
}
CameraSelectorDialog::~CameraSelectorDialog() = default;
diff --git a/src/apps/qcam/cam_select_dialog.h b/src/apps/qcam/cam_select_dialog.h
index 0b7709ed..4bec9ea9 100644
--- a/src/apps/qcam/cam_select_dialog.h
+++ b/src/apps/qcam/cam_select_dialog.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Utkarsh Tiwari <utkarsh02t@gmail.com>
*
- * cam_select_dialog.h - qcam - Camera Selection dialog
+ * qcam - Camera Selection dialog
*/
#pragma once
diff --git a/src/apps/qcam/format_converter.cpp b/src/apps/qcam/format_converter.cpp
index de76b32c..b025a3c7 100644
--- a/src/apps/qcam/format_converter.cpp
+++ b/src/apps/qcam/format_converter.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * format_convert.cpp - qcam - Convert buffer to RGB
+ * Convert buffer to RGB
*/
#include "format_converter.h"
@@ -249,7 +249,7 @@ void FormatConverter::convertYUVPacked(const Image *srcImage, unsigned char *dst
dst_stride = width_ * 4;
for (src_y = 0, dst_y = 0; dst_y < height_; src_y++, dst_y++) {
- for (src_x = 0, dst_x = 0; dst_x < width_; ) {
+ for (src_x = 0, dst_x = 0; dst_x < width_;) {
cb = src[src_y * src_stride + src_x * 4 + cb_pos_];
cr = src[src_y * src_stride + src_x * 4 + cr_pos];
diff --git a/src/apps/qcam/format_converter.h b/src/apps/qcam/format_converter.h
index 37dbfae2..819a87a5 100644
--- a/src/apps/qcam/format_converter.h
+++ b/src/apps/qcam/format_converter.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * format_convert.h - qcam - Convert buffer to RGB
+ * Convert buffer to RGB
*/
#pragma once
diff --git a/src/apps/qcam/main.cpp b/src/apps/qcam/main.cpp
index 36cb93a5..d0bde141 100644
--- a/src/apps/qcam/main.cpp
+++ b/src/apps/qcam/main.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * main.cpp - qcam - The libcamera GUI test application
+ * qcam - The libcamera GUI test application
*/
#include <signal.h>
@@ -21,6 +21,8 @@
using namespace libcamera;
+namespace {
+
void signalHandler([[maybe_unused]] int signal)
{
qInfo() << "Exiting";
@@ -52,6 +54,8 @@ OptionsParser::Options parseOptions(int argc, char *argv[])
return options;
}
+} /* namespace */
+
int main(int argc, char **argv)
{
QApplication app(argc, argv);
diff --git a/src/apps/qcam/main_window.cpp b/src/apps/qcam/main_window.cpp
index 0f16c038..3880a846 100644
--- a/src/apps/qcam/main_window.cpp
+++ b/src/apps/qcam/main_window.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * main_window.cpp - qcam - Main application window
+ * qcam - Main application window
*/
#include "main_window.h"
@@ -37,16 +37,6 @@
using namespace libcamera;
-#if QT_VERSION < QT_VERSION_CHECK(5, 14, 0)
-/*
- * Qt::fixed was introduced in v5.14, and ::fixed deprecated in v5.15. Allow
- * usage of Qt::fixed unconditionally.
- */
-namespace Qt {
-constexpr auto fixed = ::fixed;
-} /* namespace Qt */
-#endif
-
/**
* \brief Custom QEvent to signal capture completion
*/
@@ -190,7 +180,7 @@ int MainWindow::createToolbars()
action = toolbar_->addAction(QIcon::fromTheme("application-exit",
QIcon(":x-circle.svg")),
"Quit");
- action->setShortcut(Qt::CTRL | Qt::Key_Q);
+ action->setShortcut(QKeySequence::Quit);
connect(action, &QAction::triggered, this, &MainWindow::quit);
/* Camera selector. */
@@ -221,7 +211,7 @@ int MainWindow::createToolbars()
action->setShortcut(QKeySequence::SaveAs);
connect(action, &QAction::triggered, this, &MainWindow::saveImageAs);
-#ifdef HAVE_DNG
+#ifdef HAVE_TIFF
/* Save Raw action. */
action = toolbar_->addAction(QIcon::fromTheme("camera-photo",
QIcon(":aperture.svg")),
@@ -261,16 +251,14 @@ void MainWindow::updateTitle()
void MainWindow::switchCamera()
{
/* Get and acquire the new camera. */
- std::string newCameraId = chooseCamera();
+ std::shared_ptr<Camera> cam = chooseCamera();
- if (newCameraId.empty())
+ if (!cam)
return;
- if (camera_ && newCameraId == camera_->id())
+ if (camera_ && cam == camera_)
return;
- const std::shared_ptr<Camera> &cam = cm_->get(newCameraId);
-
if (cam->acquire()) {
qInfo() << "Failed to acquire camera" << cam->id().c_str();
return;
@@ -292,40 +280,41 @@ void MainWindow::switchCamera()
startStopAction_->setChecked(true);
/* Display the current cameraId in the toolbar .*/
- cameraSelectButton_->setText(QString::fromStdString(newCameraId));
+ cameraSelectButton_->setText(QString::fromStdString(cam->id()));
}
-std::string MainWindow::chooseCamera()
+std::shared_ptr<Camera> MainWindow::chooseCamera()
{
if (cameraSelectorDialog_->exec() != QDialog::Accepted)
- return std::string();
+ return {};
- return cameraSelectorDialog_->getCameraId();
+ std::string id = cameraSelectorDialog_->getCameraId();
+ return cm_->get(id);
}
int MainWindow::openCamera()
{
- std::string cameraName;
-
/*
- * Use the camera specified on the command line, if any, or display the
- * camera selection dialog box otherwise.
+ * If a camera is specified on the command line, get it. Otherwise, if
+ * only one camera is available, pick it automatically, else, display
+ * the selector dialog box.
*/
- if (options_.isSet(OptCamera))
- cameraName = static_cast<std::string>(options_[OptCamera]);
- else
- cameraName = chooseCamera();
-
- if (cameraName == "")
- return -EINVAL;
+ if (options_.isSet(OptCamera)) {
+ std::string cameraName = static_cast<std::string>(options_[OptCamera]);
+ camera_ = cm_->get(cameraName);
+ if (!camera_)
+ qInfo() << "Camera" << cameraName.c_str() << "not found";
+ } else {
+ std::vector<std::shared_ptr<Camera>> cameras = cm_->cameras();
+ camera_ = (cameras.size() == 1) ? cameras[0] : chooseCamera();
+ if (!camera_)
+ qInfo() << "No camera detected";
+ }
- /* Get and acquire the camera. */
- camera_ = cm_->get(cameraName);
- if (!camera_) {
- qInfo() << "Camera" << cameraName.c_str() << "not found";
+ if (!camera_)
return -ENODEV;
- }
+ /* Acquire the camera. */
if (camera_->acquire()) {
qInfo() << "Failed to acquire camera";
camera_.reset();
@@ -333,7 +322,7 @@ int MainWindow::openCamera()
}
/* Set the camera switch button with the currently selected Camera id. */
- cameraSelectButton_->setText(QString::fromStdString(cameraName));
+ cameraSelectButton_->setText(QString::fromStdString(camera_->id()));
return 0;
}
@@ -656,7 +645,7 @@ void MainWindow::captureRaw()
void MainWindow::processRaw(FrameBuffer *buffer,
[[maybe_unused]] const ControlList &metadata)
{
-#ifdef HAVE_DNG
+#ifdef HAVE_TIFF
QString defaultPath = QStandardPaths::writableLocation(QStandardPaths::PicturesLocation);
QString filename = QFileDialog::getSaveFileName(this, "Save DNG", defaultPath,
"DNG Files (*.dng)");
diff --git a/src/apps/qcam/main_window.h b/src/apps/qcam/main_window.h
index 2e3e1b5c..81fcf915 100644
--- a/src/apps/qcam/main_window.h
+++ b/src/apps/qcam/main_window.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * main_window.h - qcam - Main application window
+ * qcam - Main application window
*/
#pragma once
@@ -73,7 +73,7 @@ private Q_SLOTS:
private:
int createToolbars();
- std::string chooseCamera();
+ std::shared_ptr<libcamera::Camera> chooseCamera();
int openCamera();
int startCapture();
diff --git a/src/apps/qcam/meson.build b/src/apps/qcam/meson.build
index 6cf4c171..f7c14064 100644
--- a/src/apps/qcam/meson.build
+++ b/src/apps/qcam/meson.build
@@ -1,13 +1,13 @@
# SPDX-License-Identifier: CC0-1.0
-qt5 = import('qt5')
-qt5_dep = dependency('qt5',
+qt6 = import('qt6')
+qt6_dep = dependency('qt6',
method : 'pkg-config',
- modules : ['Core', 'Gui', 'Widgets'],
+ modules : ['Core', 'Gui', 'OpenGL', 'OpenGLWidgets', 'Widgets'],
required : get_option('qcam'),
- version : '>=5.4')
+ version : '>=6.2')
-if not qt5_dep.found()
+if not qt6_dep.found()
qcam_enabled = false
subdir_done()
endif
@@ -20,46 +20,31 @@ qcam_sources = files([
'main.cpp',
'main_window.cpp',
'message_handler.cpp',
+ 'viewfinder_gl.cpp',
'viewfinder_qt.cpp',
])
qcam_moc_headers = files([
'cam_select_dialog.h',
'main_window.h',
+ 'viewfinder_gl.h',
'viewfinder_qt.h',
])
qcam_resources = files([
'assets/feathericons/feathericons.qrc',
+ 'assets/shader/shaders.qrc',
])
-qt5_cpp_args = [apps_cpp_args, '-DQT_NO_KEYWORDS']
+qt6_cpp_args = [
+ apps_cpp_args,
+ '-DQT_NO_KEYWORDS',
+ '-Wno-extra-semi',
+]
-if cxx.has_header_symbol('QOpenGLWidget', 'QOpenGLWidget',
- dependencies : qt5_dep, args : '-fPIC')
- qcam_sources += files([
- 'viewfinder_gl.cpp',
- ])
- qcam_moc_headers += files([
- 'viewfinder_gl.h',
- ])
- qcam_resources += files([
- 'assets/shader/shaders.qrc'
- ])
-endif
-
-# gcc 9 introduced a deprecated-copy warning that is triggered by Qt until
-# Qt 5.13. clang 10 introduced the same warning, but detects more issues
-# that are not fixed in Qt yet. Disable the warning manually in both cases.
-if ((cc.get_id() == 'gcc' and cc.version().version_compare('>=9.0') and
- qt5_dep.version().version_compare('<5.13')) or
- (cc.get_id() == 'clang' and cc.version().version_compare('>=10.0')))
- qt5_cpp_args += ['-Wno-deprecated-copy']
-endif
-
-resources = qt5.preprocess(moc_headers : qcam_moc_headers,
+resources = qt6.preprocess(moc_headers : qcam_moc_headers,
qresources : qcam_resources,
- dependencies : qt5_dep)
+ dependencies : qt6_dep)
qcam = executable('qcam', qcam_sources, resources,
install : true,
@@ -69,6 +54,6 @@ qcam = executable('qcam', qcam_sources, resources,
libatomic,
libcamera_public,
libtiff,
- qt5_dep,
+ qt6_dep,
],
- cpp_args : qt5_cpp_args)
+ cpp_args : qt6_cpp_args)
diff --git a/src/apps/qcam/message_handler.cpp b/src/apps/qcam/message_handler.cpp
index 261623e1..c89714a9 100644
--- a/src/apps/qcam/message_handler.cpp
+++ b/src/apps/qcam/message_handler.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
- * message_handler.cpp - qcam - Log message handling
+ * qcam - Log message handling
*/
#include "message_handler.h"
diff --git a/src/apps/qcam/message_handler.h b/src/apps/qcam/message_handler.h
index 56294d37..92ef74d1 100644
--- a/src/apps/qcam/message_handler.h
+++ b/src/apps/qcam/message_handler.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
- * message_handler.cpp - qcam - Log message handling
+ * Log message handling
*/
#pragma once
diff --git a/src/apps/qcam/viewfinder.h b/src/apps/qcam/viewfinder.h
index a57446e8..914f88ec 100644
--- a/src/apps/qcam/viewfinder.h
+++ b/src/apps/qcam/viewfinder.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * viewfinder.h - qcam - Viewfinder base class
+ * qcam - Viewfinder base class
*/
#pragma once
diff --git a/src/apps/qcam/viewfinder_gl.cpp b/src/apps/qcam/viewfinder_gl.cpp
index f83b99ad..f31956ff 100644
--- a/src/apps/qcam/viewfinder_gl.cpp
+++ b/src/apps/qcam/viewfinder_gl.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Linaro
*
- * viewfinderGL.cpp - OpenGL Viewfinder for rendering by OpenGL shader
+ * OpenGL Viewfinder for rendering by OpenGL shader
*/
#include "viewfinder_gl.h"
@@ -12,6 +12,7 @@
#include <QByteArray>
#include <QFile>
#include <QImage>
+#include <QMatrix4x4>
#include <QStringList>
#include <libcamera/formats.h>
@@ -443,15 +444,11 @@ bool ViewFinderGL::createFragmentShader()
close();
}
- /* Bind shader pipeline for use */
- if (!shaderProgram_.bind()) {
- qWarning() << "[ViewFinderGL]:" << shaderProgram_.log();
- close();
- }
-
attributeVertex = shaderProgram_.attributeLocation("vertexIn");
attributeTexture = shaderProgram_.attributeLocation("textureIn");
+ vertexBuffer_.bind();
+
shaderProgram_.enableAttributeArray(attributeVertex);
shaderProgram_.setAttributeBuffer(attributeVertex,
GL_FLOAT,
@@ -466,6 +463,9 @@ bool ViewFinderGL::createFragmentShader()
2,
2 * sizeof(GLfloat));
+ vertexBuffer_.release();
+
+ projMatrixUniform_ = shaderProgram_.uniformLocation("proj_matrix");
textureUniformY_ = shaderProgram_.uniformLocation("tex_y");
textureUniformU_ = shaderProgram_.uniformLocation("tex_u");
textureUniformV_ = shaderProgram_.uniformLocation("tex_v");
@@ -511,7 +511,7 @@ void ViewFinderGL::initializeGL()
glEnable(GL_TEXTURE_2D);
glDisable(GL_DEPTH_TEST);
- static const GLfloat coordinates[2][4][2]{
+ const GLfloat coordinates[2][4][2]{
{
/* Vertex coordinates */
{ -1.0f, -1.0f },
@@ -535,8 +535,6 @@ void ViewFinderGL::initializeGL()
/* Create Vertex Shader */
if (!createVertexShader())
qWarning() << "[ViewFinderGL]: create vertex shader failed.";
-
- glClearColor(1.0f, 1.0f, 1.0f, 0.0f);
}
void ViewFinderGL::doRender()
@@ -805,28 +803,42 @@ void ViewFinderGL::doRender()
shaderProgram_.setUniformValue(textureUniformStrideFactor_,
static_cast<float>(size_.width() - 1) /
(stridePixels - 1));
+
+ /*
+ * Place the viewfinder in the centre of the widget, preserving the
+ * aspect ratio of the image.
+ */
+ QMatrix4x4 projMatrix;
+ QSizeF scaledSize = size_.scaled(size(), Qt::KeepAspectRatio);
+ projMatrix.scale(scaledSize.width() / size().width(),
+ scaledSize.height() / size().height());
+
+ shaderProgram_.setUniformValue(projMatrixUniform_, projMatrix);
}
void ViewFinderGL::paintGL()
{
- if (!fragmentShader_)
+ if (!fragmentShader_) {
if (!createFragmentShader()) {
qWarning() << "[ViewFinderGL]:"
<< "create fragment shader failed.";
}
+ }
- if (image_) {
- glClearColor(0.0, 0.0, 0.0, 1.0);
- glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
-
- doRender();
- glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
+ /* Bind shader pipeline for use. */
+ if (!shaderProgram_.bind()) {
+ qWarning() << "[ViewFinderGL]:" << shaderProgram_.log();
+ close();
}
-}
-void ViewFinderGL::resizeGL(int w, int h)
-{
- glViewport(0, 0, w, h);
+ if (!image_)
+ return;
+
+ glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+
+ doRender();
+ glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
}
QSize ViewFinderGL::sizeHint() const
diff --git a/src/apps/qcam/viewfinder_gl.h b/src/apps/qcam/viewfinder_gl.h
index 68c2912d..23c657bc 100644
--- a/src/apps/qcam/viewfinder_gl.h
+++ b/src/apps/qcam/viewfinder_gl.h
@@ -2,8 +2,7 @@
/*
* Copyright (C) 2020, Linaro
*
- * viewfinder_GL.h - OpenGL Viewfinder for rendering by OpenGL shader
- *
+ * OpenGL Viewfinder for rendering by OpenGL shader
*/
#pragma once
@@ -52,7 +51,6 @@ Q_SIGNALS:
protected:
void initializeGL() override;
void paintGL() override;
- void resizeGL(int w, int h) override;
QSize sizeHint() const override;
private:
@@ -89,6 +87,7 @@ private:
/* Common texture parameters */
GLuint textureMinMagFilters_;
+ GLuint projMatrixUniform_;
/* YUV texture parameters */
GLuint textureUniformU_;
diff --git a/src/apps/qcam/viewfinder_qt.cpp b/src/apps/qcam/viewfinder_qt.cpp
index 62ed5e7c..1a238922 100644
--- a/src/apps/qcam/viewfinder_qt.cpp
+++ b/src/apps/qcam/viewfinder_qt.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * viewfinder_qt.cpp - qcam - QPainter-based ViewFinder
+ * qcam - QPainter-based ViewFinder
*/
#include "viewfinder_qt.h"
@@ -18,6 +18,7 @@
#include <QMap>
#include <QMutexLocker>
#include <QPainter>
+#include <QResizeEvent>
#include <QtDebug>
#include "../common/image.h"
@@ -26,23 +27,23 @@
static const QMap<libcamera::PixelFormat, QImage::Format> nativeFormats
{
-#if QT_VERSION >= QT_VERSION_CHECK(5, 2, 0)
{ libcamera::formats::ABGR8888, QImage::Format_RGBX8888 },
{ libcamera::formats::XBGR8888, QImage::Format_RGBX8888 },
-#endif
{ libcamera::formats::ARGB8888, QImage::Format_RGB32 },
{ libcamera::formats::XRGB8888, QImage::Format_RGB32 },
-#if QT_VERSION >= QT_VERSION_CHECK(5, 14, 0)
{ libcamera::formats::RGB888, QImage::Format_BGR888 },
-#endif
{ libcamera::formats::BGR888, QImage::Format_RGB888 },
{ libcamera::formats::RGB565, QImage::Format_RGB16 },
};
ViewFinderQt::ViewFinderQt(QWidget *parent)
- : QWidget(parent), buffer_(nullptr)
+ : QWidget(parent), place_(rect()), buffer_(nullptr)
{
icon_ = QIcon(":camera-off.svg");
+
+ QPalette pal = palette();
+ pal.setColor(QPalette::Window, Qt::black);
+ setPalette(pal);
}
ViewFinderQt::~ViewFinderQt()
@@ -117,6 +118,11 @@ void ViewFinderQt::render(libcamera::FrameBuffer *buffer, Image *image)
}
}
+ /*
+ * Indicate the widget paints all its pixels, to optimize rendering by
+ * skipping erasing the widget before painting.
+ */
+ setAttribute(Qt::WA_OpaquePaintEvent, true);
update();
if (buffer)
@@ -132,6 +138,11 @@ void ViewFinderQt::stop()
buffer_ = nullptr;
}
+ /*
+ * The logo has a transparent background, reenable erasing the widget
+ * before painting.
+ */
+ setAttribute(Qt::WA_OpaquePaintEvent, false);
update();
}
@@ -146,9 +157,23 @@ void ViewFinderQt::paintEvent(QPaintEvent *)
{
QPainter painter(this);
- /* If we have an image, draw it. */
+ painter.setBrush(palette().window());
+
+ /* If we have an image, draw it, with black letterbox rectangles. */
if (!image_.isNull()) {
- painter.drawImage(rect(), image_, image_.rect());
+ if (place_.width() < width()) {
+ QRect rect{ 0, 0, (width() - place_.width()) / 2, height() };
+ painter.drawRect(rect);
+ rect.moveLeft(place_.right());
+ painter.drawRect(rect);
+ } else {
+ QRect rect{ 0, 0, width(), (height() - place_.height()) / 2 };
+ painter.drawRect(rect);
+ rect.moveTop(place_.bottom());
+ painter.drawRect(rect);
+ }
+
+ painter.drawImage(place_, image_, image_.rect());
return;
}
@@ -173,7 +198,6 @@ void ViewFinderQt::paintEvent(QPaintEvent *)
else
point.setY((height() - pixmap_.height()) / 2);
- painter.setBackgroundMode(Qt::OpaqueMode);
painter.drawPixmap(point, pixmap_);
}
@@ -181,3 +205,14 @@ QSize ViewFinderQt::sizeHint() const
{
return size_.isValid() ? size_ : QSize(640, 480);
}
+
+void ViewFinderQt::resizeEvent(QResizeEvent *event)
+{
+ if (!size_.isValid())
+ return;
+
+ place_.setSize(size_.scaled(event->size(), Qt::KeepAspectRatio));
+ place_.moveCenter(rect().center());
+
+ QWidget::resizeEvent(event);
+}
diff --git a/src/apps/qcam/viewfinder_qt.h b/src/apps/qcam/viewfinder_qt.h
index eb3a9988..50fde88e 100644
--- a/src/apps/qcam/viewfinder_qt.h
+++ b/src/apps/qcam/viewfinder_qt.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * viewfinder_qt.h - qcam - QPainter-based ViewFinder
+ * qcam - QPainter-based ViewFinder
*/
#pragma once
@@ -44,6 +44,7 @@ Q_SIGNALS:
protected:
void paintEvent(QPaintEvent *) override;
+ void resizeEvent(QResizeEvent *) override;
QSize sizeHint() const override;
private:
@@ -51,6 +52,7 @@ private:
libcamera::PixelFormat format_;
QSize size_;
+ QRect place_;
/* Camera stopped icon */
QSize vfSize_;
diff --git a/src/gstreamer/gstlibcamera-controls.cpp.in b/src/gstreamer/gstlibcamera-controls.cpp.in
new file mode 100644
index 00000000..d937b19e
--- /dev/null
+++ b/src/gstreamer/gstlibcamera-controls.cpp.in
@@ -0,0 +1,332 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Jaslo Ziska
+ *
+ * GStreamer Camera Controls
+ *
+ * This file is auto-generated. Do not edit.
+ */
+
+#include <vector>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/geometry.h>
+
+#include "gstlibcamera-controls.h"
+
+using namespace libcamera;
+
+static void value_set_rectangle(GValue *value, const Rectangle &rect)
+{
+ Point top_left = rect.topLeft();
+ Size size = rect.size();
+
+ GValue x = G_VALUE_INIT;
+ g_value_init(&x, G_TYPE_INT);
+ g_value_set_int(&x, top_left.x);
+ gst_value_array_append_and_take_value(value, &x);
+
+ GValue y = G_VALUE_INIT;
+ g_value_init(&y, G_TYPE_INT);
+ g_value_set_int(&y, top_left.y);
+ gst_value_array_append_and_take_value(value, &y);
+
+ GValue width = G_VALUE_INIT;
+ g_value_init(&width, G_TYPE_INT);
+ g_value_set_int(&width, size.width);
+ gst_value_array_append_and_take_value(value, &width);
+
+ GValue height = G_VALUE_INIT;
+ g_value_init(&height, G_TYPE_INT);
+ g_value_set_int(&height, size.height);
+ gst_value_array_append_and_take_value(value, &height);
+}
+
+static Rectangle value_get_rectangle(const GValue *value)
+{
+ const GValue *r;
+ r = gst_value_array_get_value(value, 0);
+ int x = g_value_get_int(r);
+ r = gst_value_array_get_value(value, 1);
+ int y = g_value_get_int(r);
+ r = gst_value_array_get_value(value, 2);
+ int w = g_value_get_int(r);
+ r = gst_value_array_get_value(value, 3);
+ int h = g_value_get_int(r);
+
+ return Rectangle(x, y, w, h);
+}
+
+{% for vendor, ctrls in controls %}
+{%- for ctrl in ctrls if ctrl.is_enum %}
+static const GEnumValue {{ ctrl.name|snake_case }}_types[] = {
+{%- for enum in ctrl.enum_values %}
+ {
+ controls::{{ ctrl.namespace }}{{ enum.name }},
+ {{ enum.description|format_description|indent_str('\t\t') }},
+ "{{ enum.gst_name }}"
+ },
+{%- endfor %}
+ {0, NULL, NULL}
+};
+
+#define TYPE_{{ ctrl.name|snake_case|upper }} \
+ ({{ ctrl.name|snake_case }}_get_type())
+static GType {{ ctrl.name|snake_case }}_get_type()
+{
+ static GType {{ ctrl.name|snake_case }}_type = 0;
+
+ if (!{{ ctrl.name|snake_case }}_type)
+ {{ ctrl.name|snake_case }}_type =
+ g_enum_register_static("{{ ctrl.name }}",
+ {{ ctrl.name|snake_case }}_types);
+
+ return {{ ctrl.name|snake_case }}_type;
+}
+{% endfor %}
+{%- endfor %}
+
+void GstCameraControls::installProperties(GObjectClass *klass, int lastPropId)
+{
+{%- for vendor, ctrls in controls %}
+{%- for ctrl in ctrls %}
+
+{%- set spec %}
+{%- if ctrl.is_rectangle -%}
+gst_param_spec_array(
+{%- else -%}
+g_param_spec_{{ ctrl.gtype }}(
+{%- endif -%}
+{%- if ctrl.is_array %}
+ "{{ ctrl.vendor_prefix }}{{ ctrl.name|kebab_case }}-value",
+ "{{ ctrl.name }} Value",
+ "One {{ ctrl.name }} element value",
+{%- else %}
+ "{{ ctrl.vendor_prefix }}{{ ctrl.name|kebab_case }}",
+ "{{ ctrl.name }}",
+ {{ ctrl.description|format_description|indent_str('\t') }},
+{%- endif %}
+{%- if ctrl.is_enum %}
+ TYPE_{{ ctrl.name|snake_case|upper }},
+ {{ ctrl.default }},
+{%- elif ctrl.is_rectangle %}
+ g_param_spec_int(
+ "rectangle-value",
+ "Rectangle Value",
+ "One rectangle value, either x, y, width or height.",
+ {{ ctrl.min }}, {{ ctrl.max }}, {{ ctrl.default }},
+ (GParamFlags) (GST_PARAM_CONTROLLABLE | G_PARAM_READWRITE |
+ G_PARAM_STATIC_STRINGS)
+ ),
+{%- elif ctrl.gtype == 'boolean' %}
+ {{ ctrl.default }},
+{%- elif ctrl.gtype in ['float', 'int', 'int64', 'uchar'] %}
+ {{ ctrl.min }}, {{ ctrl.max }}, {{ ctrl.default }},
+{%- endif %}
+ (GParamFlags) (GST_PARAM_CONTROLLABLE | G_PARAM_READWRITE |
+ G_PARAM_STATIC_STRINGS)
+)
+{%- endset %}
+
+ g_object_class_install_property(
+ klass,
+ lastPropId + controls::{{ ctrl.namespace }}{{ ctrl.name|snake_case|upper }},
+{%- if ctrl.is_array %}
+ gst_param_spec_array(
+ "{{ ctrl.vendor_prefix }}{{ ctrl.name|kebab_case }}",
+ "{{ ctrl.name }}",
+ {{ ctrl.description|format_description|indent_str('\t\t\t') }},
+ {{ spec|indent_str('\t\t\t') }},
+ (GParamFlags) (GST_PARAM_CONTROLLABLE |
+ G_PARAM_READWRITE |
+ G_PARAM_STATIC_STRINGS)
+ )
+{%- else %}
+ {{ spec|indent_str('\t\t') }}
+{%- endif %}
+ );
+{%- endfor %}
+{%- endfor %}
+}
+
+bool GstCameraControls::getProperty(guint propId, GValue *value,
+ [[maybe_unused]] GParamSpec *pspec)
+{
+ if (!controls_acc_.contains(propId)) {
+ GST_WARNING("Control '%s' is not available, default value will "
+ "be returned",
+ controls::controls.at(propId)->name().c_str());
+ return true;
+ }
+ const ControlValue &cv = controls_acc_.get(propId);
+
+ switch (propId) {
+{%- for vendor, ctrls in controls %}
+{%- for ctrl in ctrls %}
+
+ case controls::{{ ctrl.namespace }}{{ ctrl.name|snake_case|upper }}: {
+ auto control = cv.get<{{ ctrl.type }}>();
+
+{%- if ctrl.is_array %}
+ for (size_t i = 0; i < control.size(); ++i) {
+ GValue element = G_VALUE_INIT;
+{%- if ctrl.is_rectangle %}
+ g_value_init(&element, GST_TYPE_PARAM_ARRAY_LIST);
+ value_set_rectangle(&element, control[i]);
+{%- else %}
+ g_value_init(&element, G_TYPE_{{ ctrl.gtype|upper }});
+ g_value_set_{{ ctrl.gtype }}(&element, control[i]);
+{%- endif %}
+ gst_value_array_append_and_take_value(value, &element);
+ }
+{%- else %}
+{%- if ctrl.is_rectangle %}
+ value_set_rectangle(value, control);
+{%- else %}
+ g_value_set_{{ ctrl.gtype }}(value, control);
+{%- endif %}
+{%- endif %}
+
+ return true;
+ }
+{%- endfor %}
+{%- endfor %}
+
+ default:
+ return false;
+ }
+}
+
+bool GstCameraControls::setProperty(guint propId, const GValue *value,
+ [[maybe_unused]] GParamSpec *pspec)
+{
+ /*
+ * Check whether the camera capabilities are already available.
+ * They might not be available if the pipeline has not started yet.
+ */
+ if (!capabilities_.empty()) {
+ /* If so, check that the control is supported by the camera. */
+ const ControlId *cid = capabilities_.idmap().at(propId);
+ auto info = capabilities_.find(cid);
+
+ if (info == capabilities_.end()) {
+ GST_WARNING("Control '%s' is not supported by the "
+ "camera and will be ignored",
+ cid->name().c_str());
+ return true;
+ }
+ }
+
+ switch (propId) {
+{%- for vendor, ctrls in controls %}
+{%- for ctrl in ctrls %}
+
+ case controls::{{ ctrl.namespace }}{{ ctrl.name|snake_case|upper }}: {
+ ControlValue control;
+{%- if ctrl.is_array %}
+ size_t size = gst_value_array_get_size(value);
+{%- if ctrl.size != 0 %}
+ if (size != {{ ctrl.size }}) {
+ GST_ERROR("Incorrect array size for control "
+ "'{{ ctrl.name|kebab_case }}', must be of "
+ "size {{ ctrl.size }}");
+ return true;
+ }
+{%- endif %}
+
+ std::vector<{{ ctrl.element_type }}> values(size);
+ for (size_t i = 0; i < size; ++i) {
+ const GValue *element =
+ gst_value_array_get_value(value, i);
+{%- if ctrl.is_rectangle %}
+ if (gst_value_array_get_size(element) != 4) {
+ GST_ERROR("Rectangle in control "
+ "'{{ ctrl.name|kebab_case }}' at"
+ "index %zu must be an array of size 4",
+ i);
+ return true;
+ }
+ values[i] = value_get_rectangle(element);
+{%- else %}
+ values[i] = g_value_get_{{ ctrl.gtype }}(element);
+{%- endif %}
+ }
+
+{%- if ctrl.size == 0 %}
+ control.set(Span<const {{ ctrl.element_type }}>(values.data(),
+ size));
+{%- else %}
+ control.set(Span<const {{ ctrl.element_type }},
+ {{ ctrl.size }}>(values.data(),
+ {{ ctrl.size }}));
+{%- endif %}
+{%- else %}
+{%- if ctrl.is_rectangle %}
+ if (gst_value_array_get_size(value) != 4) {
+ GST_ERROR("Rectangle in control "
+ "'{{ ctrl.name|kebab_case }}' must be an "
+ "array of size 4");
+ return true;
+ }
+ Rectangle val = value_get_rectangle(value);
+{%- else %}
+ auto val = g_value_get_{{ ctrl.gtype }}(value);
+{%- endif %}
+ control.set(val);
+{%- endif %}
+ controls_.set(propId, control);
+ controls_acc_.set(propId, control);
+ return true;
+ }
+{%- endfor %}
+{%- endfor %}
+
+ default:
+ return false;
+ }
+}
+
+void GstCameraControls::setCamera(const std::shared_ptr<libcamera::Camera> &cam)
+{
+ capabilities_ = cam->controls();
+
+ /*
+ * Check the controls which were set before the camera capabilities were
+ * known. This is required because GStreamer may set properties before
+ * the pipeline has started and thus before the camera was known.
+ */
+ ControlList new_controls;
+ for (auto control = controls_acc_.begin();
+ control != controls_acc_.end();
+ ++control) {
+ unsigned int id = control->first;
+ ControlValue value = control->second;
+
+ const ControlId *cid = capabilities_.idmap().at(id);
+ auto info = capabilities_.find(cid);
+
+ /* Only add controls which are supported. */
+ if (info != capabilities_.end())
+ new_controls.set(id, value);
+ else
+ GST_WARNING("Control '%s' is not supported by the "
+ "camera and will be ignored",
+ cid->name().c_str());
+ }
+
+ controls_acc_ = new_controls;
+ controls_ = new_controls;
+}
+
+void GstCameraControls::applyControls(std::unique_ptr<libcamera::Request> &request)
+{
+ request->controls().merge(controls_);
+ controls_.clear();
+}
+
+void GstCameraControls::readMetadata(libcamera::Request *request)
+{
+ controls_acc_.merge(request->metadata(),
+ ControlList::MergePolicy::OverwriteExisting);
+}
diff --git a/src/gstreamer/gstlibcamera-controls.h b/src/gstreamer/gstlibcamera-controls.h
new file mode 100644
index 00000000..749220b5
--- /dev/null
+++ b/src/gstreamer/gstlibcamera-controls.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Collabora Ltd.
+ * Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+ *
+ * GStreamer Camera Controls
+ */
+
+#pragma once
+
+#include <memory>
+
+#include <libcamera/camera.h>
+#include <libcamera/controls.h>
+#include <libcamera/request.h>
+
+#include "gstlibcamerasrc.h"
+
+namespace libcamera {
+
+class GstCameraControls
+{
+public:
+ static void installProperties(GObjectClass *klass, int lastProp);
+
+ bool getProperty(guint propId, GValue *value, GParamSpec *pspec);
+ bool setProperty(guint propId, const GValue *value, GParamSpec *pspec);
+
+ void setCamera(const std::shared_ptr<libcamera::Camera> &cam);
+
+ void applyControls(std::unique_ptr<libcamera::Request> &request);
+ void readMetadata(libcamera::Request *request);
+
+private:
+ /* Supported controls and limits of camera. */
+ ControlInfoMap capabilities_;
+ /* Set of user modified controls. */
+ ControlList controls_;
+ /* Accumulator of all controls ever set and metadata returned by camera */
+ ControlList controls_acc_;
+};
+
+} /* namespace libcamera */
diff --git a/src/gstreamer/gstlibcamera-utils.cpp b/src/gstreamer/gstlibcamera-utils.cpp
index 469ac810..a466b305 100644
--- a/src/gstreamer/gstlibcamera-utils.cpp
+++ b/src/gstreamer/gstlibcamera-utils.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamera-utils.c - GStreamer libcamera Utility Function
+ * GStreamer libcamera Utility Function
*/
#include "gstlibcamera-utils.h"
@@ -85,7 +85,7 @@ static struct {
};
static GstVideoColorimetry
-colorimetry_from_colorspace(const ColorSpace &colorSpace)
+colorimetry_from_colorspace(const ColorSpace &colorSpace, GstVideoTransferFunction transfer)
{
GstVideoColorimetry colorimetry;
@@ -113,6 +113,8 @@ colorimetry_from_colorspace(const ColorSpace &colorSpace)
break;
case ColorSpace::TransferFunction::Rec709:
colorimetry.transfer = GST_VIDEO_TRANSFER_BT709;
+ if (transfer != GST_VIDEO_TRANSFER_UNKNOWN)
+ colorimetry.transfer = transfer;
break;
}
@@ -144,7 +146,8 @@ colorimetry_from_colorspace(const ColorSpace &colorSpace)
}
static std::optional<ColorSpace>
-colorspace_from_colorimetry(const GstVideoColorimetry &colorimetry)
+colorspace_from_colorimetry(const GstVideoColorimetry &colorimetry,
+ GstVideoTransferFunction *transfer)
{
std::optional<ColorSpace> colorspace = ColorSpace::Raw;
@@ -188,6 +191,7 @@ colorspace_from_colorimetry(const GstVideoColorimetry &colorimetry)
case GST_VIDEO_TRANSFER_BT2020_12:
case GST_VIDEO_TRANSFER_BT709:
colorspace->transferFunction = ColorSpace::TransferFunction::Rec709;
+ *transfer = colorimetry.transfer;
break;
default:
GST_WARNING("Colorimetry transfer function %d not mapped in gstlibcamera",
@@ -254,52 +258,50 @@ gst_format_to_pixel_format(GstVideoFormat gst_format)
return PixelFormat{};
}
+static const struct {
+ PixelFormat format;
+ const gchar *name;
+} bayer_map[]{
+ { formats::SBGGR8, "bggr" },
+ { formats::SGBRG8, "gbrg" },
+ { formats::SGRBG8, "grbg" },
+ { formats::SRGGB8, "rggb" },
+ { formats::SBGGR10, "bggr10le" },
+ { formats::SGBRG10, "gbrg10le" },
+ { formats::SGRBG10, "grbg10le" },
+ { formats::SRGGB10, "rggb10le" },
+ { formats::SBGGR12, "bggr12le" },
+ { formats::SGBRG12, "gbrg12le" },
+ { formats::SGRBG12, "grbg12le" },
+ { formats::SRGGB12, "rggb12le" },
+ { formats::SBGGR14, "bggr14le" },
+ { formats::SGBRG14, "gbrg14le" },
+ { formats::SGRBG14, "grbg14le" },
+ { formats::SRGGB14, "rggb14le" },
+ { formats::SBGGR16, "bggr16le" },
+ { formats::SGBRG16, "gbrg16le" },
+ { formats::SGRBG16, "grbg16le" },
+ { formats::SRGGB16, "rggb16le" },
+};
+
static const gchar *
-bayer_format_to_string(int format)
+bayer_format_to_string(PixelFormat format)
{
- switch (format) {
- case formats::SBGGR8:
- return "bggr";
- case formats::SGBRG8:
- return "gbrg";
- case formats::SGRBG8:
- return "grbg";
- case formats::SRGGB8:
- return "rggb";
- case formats::SBGGR10:
- return "bggr10le";
- case formats::SGBRG10:
- return "gbrg10le";
- case formats::SGRBG10:
- return "grbg10le";
- case formats::SRGGB10:
- return "rggb10le";
- case formats::SBGGR12:
- return "bggr12le";
- case formats::SGBRG12:
- return "gbrg12le";
- case formats::SGRBG12:
- return "grbg12le";
- case formats::SRGGB12:
- return "rggb12le";
- case formats::SBGGR14:
- return "bggr14le";
- case formats::SGBRG14:
- return "gbrg14le";
- case formats::SGRBG14:
- return "grbg14le";
- case formats::SRGGB14:
- return "rggb14le";
- case formats::SBGGR16:
- return "bggr16le";
- case formats::SGBRG16:
- return "gbrg16le";
- case formats::SGRBG16:
- return "grbg16le";
- case formats::SRGGB16:
- return "rggb16le";
+ for (auto &b : bayer_map) {
+ if (b.format == format)
+ return b.name;
+ }
+ return nullptr;
+}
+
+static PixelFormat
+bayer_format_from_string(const gchar *name)
+{
+ for (auto &b : bayer_map) {
+ if (strcmp(b.name, name) == 0)
+ return b.format;
}
- return NULL;
+ return PixelFormat{};
}
static GstStructure *
@@ -359,13 +361,21 @@ gst_libcamera_stream_formats_to_caps(const StreamFormats &formats)
GValue val = G_VALUE_INIT;
g_value_init(&val, GST_TYPE_INT_RANGE);
- gst_value_set_int_range_step(&val, range.min.width, range.max.width, range.hStep);
- gst_structure_set_value(s, "width", &val);
- gst_value_set_int_range_step(&val, range.min.height, range.max.height, range.vStep);
- gst_structure_set_value(s, "height", &val);
+ if (range.min.width == range.max.width) {
+ gst_structure_set(s, "width", G_TYPE_INT, range.min.width, nullptr);
+ } else {
+ gst_value_set_int_range_step(&val, range.min.width, range.max.width, range.hStep);
+ gst_structure_set_value(s, "width", &val);
+ }
+ if (range.min.height == range.max.height) {
+ gst_structure_set(s, "height", G_TYPE_INT, range.min.height, nullptr);
+ } else {
+ gst_value_set_int_range_step(&val, range.min.height, range.max.height, range.vStep);
+ gst_structure_set_value(s, "height", &val);
+ }
g_value_unset(&val);
- gst_caps_append_structure(caps, s);
+ caps = gst_caps_merge_structure(caps, s);
}
}
@@ -373,7 +383,8 @@ gst_libcamera_stream_formats_to_caps(const StreamFormats &formats)
}
GstCaps *
-gst_libcamera_stream_configuration_to_caps(const StreamConfiguration &stream_cfg)
+gst_libcamera_stream_configuration_to_caps(const StreamConfiguration &stream_cfg,
+ GstVideoTransferFunction transfer)
{
GstCaps *caps = gst_caps_new_empty();
GstStructure *s = bare_structure_from_format(stream_cfg.pixelFormat);
@@ -384,8 +395,8 @@ gst_libcamera_stream_configuration_to_caps(const StreamConfiguration &stream_cfg
nullptr);
if (stream_cfg.colorSpace) {
- GstVideoColorimetry colorimetry = colorimetry_from_colorspace(stream_cfg.colorSpace.value());
- gchar *colorimetry_str = gst_video_colorimetry_to_string(&colorimetry);
+ GstVideoColorimetry colorimetry = colorimetry_from_colorspace(stream_cfg.colorSpace.value(), transfer);
+ g_autofree gchar *colorimetry_str = gst_video_colorimetry_to_string(&colorimetry);
if (colorimetry_str)
gst_structure_set(s, "colorimetry", G_TYPE_STRING, colorimetry_str, nullptr);
@@ -399,9 +410,8 @@ gst_libcamera_stream_configuration_to_caps(const StreamConfiguration &stream_cfg
return caps;
}
-void
-gst_libcamera_configure_stream_from_caps(StreamConfiguration &stream_cfg,
- GstCaps *caps)
+void gst_libcamera_configure_stream_from_caps(StreamConfiguration &stream_cfg,
+ GstCaps *caps, GstVideoTransferFunction *transfer)
{
GstVideoFormat gst_format = pixel_format_to_gst_format(stream_cfg.pixelFormat);
guint i;
@@ -466,6 +476,9 @@ gst_libcamera_configure_stream_from_caps(StreamConfiguration &stream_cfg,
const gchar *format = gst_structure_get_string(s, "format");
gst_format = gst_video_format_from_string(format);
stream_cfg.pixelFormat = gst_format_to_pixel_format(gst_format);
+ } else if (gst_structure_has_name(s, "video/x-bayer")) {
+ const gchar *format = gst_structure_get_string(s, "format");
+ stream_cfg.pixelFormat = bayer_format_from_string(format);
} else if (gst_structure_has_name(s, "image/jpeg")) {
stream_cfg.pixelFormat = formats::MJPEG;
} else {
@@ -486,7 +499,7 @@ gst_libcamera_configure_stream_from_caps(StreamConfiguration &stream_cfg,
if (!gst_video_colorimetry_from_string(&colorimetry, colorimetry_str))
g_critical("Invalid colorimetry %s", colorimetry_str);
- stream_cfg.colorSpace = colorspace_from_colorimetry(colorimetry);
+ stream_cfg.colorSpace = colorspace_from_colorimetry(colorimetry, transfer);
}
}
diff --git a/src/gstreamer/gstlibcamera-utils.h b/src/gstreamer/gstlibcamera-utils.h
index 6adeab0e..4978987c 100644
--- a/src/gstreamer/gstlibcamera-utils.h
+++ b/src/gstreamer/gstlibcamera-utils.h
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamera-utils.h - GStreamer libcamera Utility Functions
+ * GStreamer libcamera Utility Functions
*/
#pragma once
@@ -16,9 +16,10 @@
#include <gst/video/video.h>
GstCaps *gst_libcamera_stream_formats_to_caps(const libcamera::StreamFormats &formats);
-GstCaps *gst_libcamera_stream_configuration_to_caps(const libcamera::StreamConfiguration &stream_cfg);
+GstCaps *gst_libcamera_stream_configuration_to_caps(const libcamera::StreamConfiguration &stream_cfg,
+ GstVideoTransferFunction transfer);
void gst_libcamera_configure_stream_from_caps(libcamera::StreamConfiguration &stream_cfg,
- GstCaps *caps);
+ GstCaps *caps, GstVideoTransferFunction *transfer);
void gst_libcamera_get_framerate_from_caps(GstCaps *caps, GstStructure *element_caps);
void gst_libcamera_clamp_and_set_frameduration(libcamera::ControlList &controls,
const libcamera::ControlInfoMap &camera_controls,
diff --git a/src/gstreamer/gstlibcamera.cpp b/src/gstreamer/gstlibcamera.cpp
index 52388b5e..bff98979 100644
--- a/src/gstreamer/gstlibcamera.cpp
+++ b/src/gstreamer/gstlibcamera.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2019, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamera.c - GStreamer plugin
+ * GStreamer plugin
*/
#include "gstlibcameraprovider.h"
diff --git a/src/gstreamer/gstlibcameraallocator.cpp b/src/gstreamer/gstlibcameraallocator.cpp
index c740b8fc..d4492d99 100644
--- a/src/gstreamer/gstlibcameraallocator.cpp
+++ b/src/gstreamer/gstlibcameraallocator.cpp
@@ -3,11 +3,13 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcameraallocator.cpp - GStreamer Custom Allocator
+ * GStreamer Custom Allocator
*/
#include "gstlibcameraallocator.h"
+#include <utility>
+
#include <libcamera/camera.h>
#include <libcamera/framebuffer_allocator.h>
#include <libcamera/stream.h>
@@ -100,6 +102,11 @@ struct _GstLibcameraAllocator {
* FrameWrap.
*/
GHashTable *pools;
+ /*
+ * The camera manager represents the library, which needs to be kept
+ * alive until all the memory has been released.
+ */
+ std::shared_ptr<CameraManager> *cm_ptr;
};
G_DEFINE_TYPE(GstLibcameraAllocator, gst_libcamera_allocator,
@@ -173,6 +180,9 @@ gst_libcamera_allocator_finalize(GObject *object)
delete self->fb_allocator;
+ /* Keep last. */
+ delete self->cm_ptr;
+
G_OBJECT_CLASS(gst_libcamera_allocator_parent_class)->finalize(object);
}
@@ -191,16 +201,20 @@ GstLibcameraAllocator *
gst_libcamera_allocator_new(std::shared_ptr<Camera> camera,
CameraConfiguration *config_)
{
- auto *self = GST_LIBCAMERA_ALLOCATOR(g_object_new(GST_TYPE_LIBCAMERA_ALLOCATOR,
- nullptr));
+ g_autoptr(GstLibcameraAllocator) self = GST_LIBCAMERA_ALLOCATOR(g_object_new(GST_TYPE_LIBCAMERA_ALLOCATOR,
+ nullptr));
+ gint ret;
+
+ self->cm_ptr = new std::shared_ptr<CameraManager>(gst_libcamera_get_camera_manager(ret));
+ if (ret)
+ return nullptr;
self->fb_allocator = new FrameBufferAllocator(camera);
for (StreamConfiguration &streamCfg : *config_) {
Stream *stream = streamCfg.stream();
- gint ret;
ret = self->fb_allocator->allocate(stream);
- if (ret == 0)
+ if (ret <= 0)
return nullptr;
GQueue *pool = g_queue_new();
@@ -214,7 +228,7 @@ gst_libcamera_allocator_new(std::shared_ptr<Camera> camera,
g_hash_table_insert(self->pools, stream, pool);
}
- return self;
+ return std::exchange(self, nullptr);
}
bool
diff --git a/src/gstreamer/gstlibcameraallocator.h b/src/gstreamer/gstlibcameraallocator.h
index 0a08c3bb..1a6ba346 100644
--- a/src/gstreamer/gstlibcameraallocator.h
+++ b/src/gstreamer/gstlibcameraallocator.h
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcameraallocator.h - GStreamer Custom Allocator
+ * GStreamer Custom Allocator
*/
#pragma once
diff --git a/src/gstreamer/gstlibcamerapad.cpp b/src/gstreamer/gstlibcamerapad.cpp
index 9e710a47..7b22aebe 100644
--- a/src/gstreamer/gstlibcamerapad.cpp
+++ b/src/gstreamer/gstlibcamerapad.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2019, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerapad.cpp - GStreamer Capture Pad
+ * GStreamer Capture Pad
*/
#include "gstlibcamerapad.h"
diff --git a/src/gstreamer/gstlibcamerapad.h b/src/gstreamer/gstlibcamerapad.h
index 103ee57a..630c168a 100644
--- a/src/gstreamer/gstlibcamerapad.h
+++ b/src/gstreamer/gstlibcamerapad.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerapad.h - GStreamer Capture Element
+ * GStreamer Capture Element
*/
#pragma once
diff --git a/src/gstreamer/gstlibcamerapool.cpp b/src/gstreamer/gstlibcamerapool.cpp
index 0c2be43c..9cd7eccb 100644
--- a/src/gstreamer/gstlibcamerapool.cpp
+++ b/src/gstreamer/gstlibcamerapool.cpp
@@ -3,11 +3,13 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerapool.cpp - GStreamer Buffer Pool
+ * GStreamer Buffer Pool
*/
#include "gstlibcamerapool.h"
+#include <deque>
+
#include <libcamera/stream.h>
#include "gstlibcamera-utils.h"
@@ -24,24 +26,41 @@ static guint signals[N_SIGNALS];
struct _GstLibcameraPool {
GstBufferPool parent;
- GstAtomicQueue *queue;
+ std::deque<GstBuffer *> *queue;
GstLibcameraAllocator *allocator;
Stream *stream;
};
G_DEFINE_TYPE(GstLibcameraPool, gst_libcamera_pool, GST_TYPE_BUFFER_POOL)
+static GstBuffer *
+gst_libcamera_pool_pop_buffer(GstLibcameraPool *self)
+{
+ GLibLocker lock(GST_OBJECT(self));
+ GstBuffer *buf;
+
+ if (self->queue->empty())
+ return nullptr;
+
+ buf = self->queue->front();
+ self->queue->pop_front();
+
+ return buf;
+}
+
static GstFlowReturn
gst_libcamera_pool_acquire_buffer(GstBufferPool *pool, GstBuffer **buffer,
[[maybe_unused]] GstBufferPoolAcquireParams *params)
{
GstLibcameraPool *self = GST_LIBCAMERA_POOL(pool);
- GstBuffer *buf = GST_BUFFER(gst_atomic_queue_pop(self->queue));
+ GstBuffer *buf = gst_libcamera_pool_pop_buffer(self);
+
if (!buf)
return GST_FLOW_ERROR;
if (!gst_libcamera_allocator_prepare_buffer(self->allocator, self->stream, buf)) {
- gst_atomic_queue_push(self->queue, buf);
+ GLibLocker lock(GST_OBJECT(self));
+ self->queue->push_back(buf);
return GST_FLOW_ERROR;
}
@@ -64,9 +83,13 @@ static void
gst_libcamera_pool_release_buffer(GstBufferPool *pool, GstBuffer *buffer)
{
GstLibcameraPool *self = GST_LIBCAMERA_POOL(pool);
- bool do_notify = gst_atomic_queue_length(self->queue) == 0;
+ bool do_notify;
- gst_atomic_queue_push(self->queue, buffer);
+ {
+ GLibLocker lock(GST_OBJECT(self));
+ do_notify = self->queue->empty();
+ self->queue->push_back(buffer);
+ }
if (do_notify)
g_signal_emit(self, signals[SIGNAL_BUFFER_NOTIFY], 0);
@@ -75,7 +98,7 @@ gst_libcamera_pool_release_buffer(GstBufferPool *pool, GstBuffer *buffer)
static void
gst_libcamera_pool_init(GstLibcameraPool *self)
{
- self->queue = gst_atomic_queue_new(4);
+ self->queue = new std::deque<GstBuffer *>();
}
static void
@@ -84,10 +107,10 @@ gst_libcamera_pool_finalize(GObject *object)
GstLibcameraPool *self = GST_LIBCAMERA_POOL(object);
GstBuffer *buf;
- while ((buf = GST_BUFFER(gst_atomic_queue_pop(self->queue))))
+ while ((buf = gst_libcamera_pool_pop_buffer(self)))
gst_buffer_unref(buf);
- gst_atomic_queue_unref(self->queue);
+ delete self->queue;
g_object_unref(self->allocator);
G_OBJECT_CLASS(gst_libcamera_pool_parent_class)->finalize(object);
@@ -122,7 +145,7 @@ gst_libcamera_pool_new(GstLibcameraAllocator *allocator, Stream *stream)
gsize pool_size = gst_libcamera_allocator_get_pool_size(allocator, stream);
for (gsize i = 0; i < pool_size; i++) {
GstBuffer *buffer = gst_buffer_new();
- gst_atomic_queue_push(pool->queue, buffer);
+ pool->queue->push_back(buffer);
}
return pool;
diff --git a/src/gstreamer/gstlibcamerapool.h b/src/gstreamer/gstlibcamerapool.h
index ce3bf60b..2a7a9c77 100644
--- a/src/gstreamer/gstlibcamerapool.h
+++ b/src/gstreamer/gstlibcamerapool.h
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerapool.h - GStreamer Buffer Pool
+ * GStreamer Buffer Pool
*
* This is a partial implementation of GstBufferPool intended for internal use
* only. This pool cannot be configured or activated.
diff --git a/src/gstreamer/gstlibcameraprovider.cpp b/src/gstreamer/gstlibcameraprovider.cpp
index ce3e0a08..5da96ea3 100644
--- a/src/gstreamer/gstlibcameraprovider.cpp
+++ b/src/gstreamer/gstlibcameraprovider.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcameraprovider.c - GStreamer Device Provider
+ * GStreamer Device Provider
*/
#include <array>
@@ -33,7 +33,6 @@ GST_DEBUG_CATEGORY_STATIC(provider_debug);
enum {
PROP_DEVICE_NAME = 1,
- PROP_AUTO_FOCUS_MODE = 2,
};
#define GST_TYPE_LIBCAMERA_DEVICE gst_libcamera_device_get_type()
@@ -43,7 +42,6 @@ G_DECLARE_FINAL_TYPE(GstLibcameraDevice, gst_libcamera_device,
struct _GstLibcameraDevice {
GstDevice parent;
gchar *name;
- controls::AfModeEnum auto_focus_mode = controls::AfModeManual;
};
G_DEFINE_TYPE(GstLibcameraDevice, gst_libcamera_device, GST_TYPE_DEVICE)
@@ -60,7 +58,6 @@ gst_libcamera_device_create_element(GstDevice *device, const gchar *name)
g_assert(source);
g_object_set(source, "camera-name", GST_LIBCAMERA_DEVICE(device)->name, nullptr);
- g_object_set(source, "auto-focus-mode", GST_LIBCAMERA_DEVICE(device)->auto_focus_mode, nullptr);
return source;
}
@@ -87,9 +84,6 @@ gst_libcamera_device_set_property(GObject *object, guint prop_id,
case PROP_DEVICE_NAME:
device->name = g_value_dup_string(value);
break;
- case PROP_AUTO_FOCUS_MODE:
- device->auto_focus_mode = static_cast<controls::AfModeEnum>(g_value_get_enum(value));
- break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
break;
@@ -129,15 +123,6 @@ gst_libcamera_device_class_init(GstLibcameraDeviceClass *klass)
(GParamFlags)(G_PARAM_STATIC_STRINGS | G_PARAM_WRITABLE |
G_PARAM_CONSTRUCT_ONLY));
g_object_class_install_property(object_class, PROP_DEVICE_NAME, pspec);
-
- pspec = g_param_spec_enum("auto-focus-mode",
- "Set auto-focus mode",
- "Available options: AfModeManual, "
- "AfModeAuto or AfModeContinuous.",
- gst_libcamera_auto_focus_get_type(),
- static_cast<gint>(controls::AfModeManual),
- G_PARAM_WRITABLE);
- g_object_class_install_property(object_class, PROP_AUTO_FOCUS_MODE, pspec);
}
static GstDevice *
diff --git a/src/gstreamer/gstlibcameraprovider.h b/src/gstreamer/gstlibcameraprovider.h
index aaceabf5..19708b9d 100644
--- a/src/gstreamer/gstlibcameraprovider.h
+++ b/src/gstreamer/gstlibcameraprovider.h
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcameraprovider.h - GStreamer Device Provider
+ * GStreamer Device Provider
*/
#pragma once
diff --git a/src/gstreamer/gstlibcamerasrc.cpp b/src/gstreamer/gstlibcamerasrc.cpp
index f015c6d2..5e9e843d 100644
--- a/src/gstreamer/gstlibcamerasrc.cpp
+++ b/src/gstreamer/gstlibcamerasrc.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2019, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerasrc.cpp - GStreamer Capture Element
+ * GStreamer Capture Element
*/
/**
@@ -37,10 +37,11 @@
#include <gst/base/base.h>
+#include "gstlibcamera-controls.h"
+#include "gstlibcamera-utils.h"
#include "gstlibcameraallocator.h"
#include "gstlibcamerapad.h"
#include "gstlibcamerapool.h"
-#include "gstlibcamera-utils.h"
using namespace libcamera;
@@ -128,6 +129,7 @@ struct GstLibcameraSrcState {
ControlList initControls_;
guint group_id_;
+ GstCameraControls controls_;
int queueRequest();
void requestCompleted(Request *request);
@@ -142,7 +144,6 @@ struct _GstLibcameraSrc {
GstTask *task;
gchar *camera_name;
- controls::AfModeEnum auto_focus_mode = controls::AfModeManual;
std::atomic<GstEvent *> pending_eos;
@@ -154,10 +155,15 @@ struct _GstLibcameraSrc {
enum {
PROP_0,
PROP_CAMERA_NAME,
- PROP_AUTO_FOCUS_MODE,
+ PROP_LAST
};
+static void gst_libcamera_src_child_proxy_init(gpointer g_iface,
+ gpointer iface_data);
+
G_DEFINE_TYPE_WITH_CODE(GstLibcameraSrc, gst_libcamera_src, GST_TYPE_ELEMENT,
+ G_IMPLEMENT_INTERFACE(GST_TYPE_CHILD_PROXY,
+ gst_libcamera_src_child_proxy_init)
GST_DEBUG_CATEGORY_INIT(source_debug, "libcamerasrc", 0,
"libcamera Source"))
@@ -180,6 +186,9 @@ int GstLibcameraSrcState::queueRequest()
if (!request)
return -ENOMEM;
+ /* Apply controls */
+ controls_.applyControls(request);
+
std::unique_ptr<RequestWrap> wrap =
std::make_unique<RequestWrap>(std::move(request));
@@ -223,6 +232,9 @@ GstLibcameraSrcState::requestCompleted(Request *request)
{
GLibLocker locker(&lock_);
+
+ controls_.readMetadata(request);
+
wrap = std::move(queuedRequests_.front());
queuedRequests_.pop();
}
@@ -377,21 +389,22 @@ gst_libcamera_src_open(GstLibcameraSrc *self)
}
if (camera_name) {
- cam = cm->get(self->camera_name);
+ cam = cm->get(camera_name);
if (!cam) {
GST_ELEMENT_ERROR(self, RESOURCE, NOT_FOUND,
- ("Could not find a camera named '%s'.", self->camera_name),
+ ("Could not find a camera named '%s'.", camera_name),
("libcamera::CameraMananger::get() returned nullptr"));
return false;
}
} else {
- if (cm->cameras().empty()) {
+ auto cameras = cm->cameras();
+ if (cameras.empty()) {
GST_ELEMENT_ERROR(self, RESOURCE, NOT_FOUND,
("Could not find any supported camera on this system."),
("libcamera::CameraMananger::cameras() is empty"));
return false;
}
- cam = cm->cameras()[0];
+ cam = cameras[0];
}
GST_INFO_OBJECT(self, "Using camera '%s'", cam->id().c_str());
@@ -404,6 +417,8 @@ gst_libcamera_src_open(GstLibcameraSrc *self)
return false;
}
+ self->state->controls_.setCamera(cam);
+
cam->requestCompleted.connect(self->state, &GstLibcameraSrcState::requestCompleted);
/* No need to lock here, we didn't start our threads yet. */
@@ -418,6 +433,8 @@ static bool
gst_libcamera_src_negotiate(GstLibcameraSrc *self)
{
GstLibcameraSrcState *state = self->state;
+ std::vector<GstVideoTransferFunction> transfer(state->srcpads_.size(),
+ GST_VIDEO_TRANSFER_UNKNOWN);
g_autoptr(GstStructure) element_caps = gst_structure_new_empty("caps");
@@ -433,7 +450,7 @@ gst_libcamera_src_negotiate(GstLibcameraSrc *self)
/* Fixate caps and configure the stream. */
caps = gst_caps_make_writable(caps);
- gst_libcamera_configure_stream_from_caps(stream_cfg, caps);
+ gst_libcamera_configure_stream_from_caps(stream_cfg, caps, &transfer[i]);
gst_libcamera_get_framerate_from_caps(caps, element_caps);
}
@@ -461,7 +478,7 @@ gst_libcamera_src_negotiate(GstLibcameraSrc *self)
GstPad *srcpad = state->srcpads_[i];
const StreamConfiguration &stream_cfg = state->config_->at(i);
- g_autoptr(GstCaps) caps = gst_libcamera_stream_configuration_to_caps(stream_cfg);
+ g_autoptr(GstCaps) caps = gst_libcamera_stream_configuration_to_caps(stream_cfg, transfer[i]);
gst_libcamera_framerate_to_caps(caps, element_caps);
if (!gst_pad_push_event(srcpad, gst_event_new_caps(caps)))
@@ -657,18 +674,6 @@ gst_libcamera_src_task_enter(GstTask *task, [[maybe_unused]] GThread *thread,
gst_pad_push_event(srcpad, gst_event_new_segment(&segment));
}
- if (self->auto_focus_mode != controls::AfModeManual) {
- const ControlInfoMap &infoMap = state->cam_->controls();
- if (infoMap.find(&controls::AfMode) != infoMap.end()) {
- state->initControls_.set(controls::AfMode, self->auto_focus_mode);
- } else {
- GST_ELEMENT_ERROR(self, RESOURCE, SETTINGS,
- ("Failed to enable auto focus"),
- ("AfMode not supported by this camera, "
- "please retry with 'auto-focus-mode=AfModeManual'"));
- }
- }
-
ret = state->cam_->start(&state->initControls_);
if (ret) {
GST_ELEMENT_ERROR(self, RESOURCE, SETTINGS,
@@ -730,17 +735,16 @@ gst_libcamera_src_set_property(GObject *object, guint prop_id,
{
GLibLocker lock(GST_OBJECT(object));
GstLibcameraSrc *self = GST_LIBCAMERA_SRC(object);
+ GstLibcameraSrcState *state = self->state;
switch (prop_id) {
case PROP_CAMERA_NAME:
g_free(self->camera_name);
self->camera_name = g_value_dup_string(value);
break;
- case PROP_AUTO_FOCUS_MODE:
- self->auto_focus_mode = static_cast<controls::AfModeEnum>(g_value_get_enum(value));
- break;
default:
- G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
+ if (!state->controls_.setProperty(prop_id - PROP_LAST, value, pspec))
+ G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
break;
}
}
@@ -751,16 +755,15 @@ gst_libcamera_src_get_property(GObject *object, guint prop_id, GValue *value,
{
GLibLocker lock(GST_OBJECT(object));
GstLibcameraSrc *self = GST_LIBCAMERA_SRC(object);
+ GstLibcameraSrcState *state = self->state;
switch (prop_id) {
case PROP_CAMERA_NAME:
g_value_set_string(value, self->camera_name);
break;
- case PROP_AUTO_FOCUS_MODE:
- g_value_set_enum(value, static_cast<gint>(self->auto_focus_mode));
- break;
default:
- G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
+ if (!state->controls_.getProperty(prop_id - PROP_LAST, value, pspec))
+ G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
break;
}
}
@@ -864,8 +867,10 @@ gst_libcamera_src_init(GstLibcameraSrc *self)
g_mutex_init(&state->lock_);
- state->srcpads_.push_back(gst_pad_new_from_template(templ, "src"));
- gst_element_add_pad(GST_ELEMENT(self), state->srcpads_.back());
+ GstPad *pad = gst_pad_new_from_template(templ, "src");
+ state->srcpads_.push_back(pad);
+ gst_element_add_pad(GST_ELEMENT(self), pad);
+ gst_child_proxy_child_added(GST_CHILD_PROXY(self), G_OBJECT(pad), GST_OBJECT_NAME(pad));
GST_OBJECT_FLAG_SET(self, GST_ELEMENT_FLAG_SOURCE);
@@ -896,6 +901,8 @@ gst_libcamera_src_request_new_pad(GstElement *element, GstPadTemplate *templ,
return NULL;
}
+ gst_child_proxy_child_added(GST_CHILD_PROXY(self), G_OBJECT(pad), GST_OBJECT_NAME(pad));
+
return reinterpret_cast<GstPad *>(g_steal_pointer(&pad));
}
@@ -904,6 +911,8 @@ gst_libcamera_src_release_pad(GstElement *element, GstPad *pad)
{
GstLibcameraSrc *self = GST_LIBCAMERA_SRC(element);
+ gst_child_proxy_child_removed(GST_CHILD_PROXY(self), G_OBJECT(pad), GST_OBJECT_NAME(pad));
+
GST_DEBUG_OBJECT(self, "Pad %" GST_PTR_FORMAT " being released", pad);
{
@@ -939,7 +948,7 @@ gst_libcamera_src_class_init(GstLibcameraSrcClass *klass)
gst_element_class_set_metadata(element_class,
"libcamera Source", "Source/Video",
"Linux Camera source using libcamera",
- "Nicolas Dufresne <nicolas.dufresne@collabora.com");
+ "Nicolas Dufresne <nicolas.dufresne@collabora.com>");
gst_element_class_add_static_pad_template_with_gtype(element_class,
&src_template,
GST_TYPE_LIBCAMERA_PAD);
@@ -955,12 +964,35 @@ gst_libcamera_src_class_init(GstLibcameraSrcClass *klass)
| G_PARAM_STATIC_STRINGS));
g_object_class_install_property(object_class, PROP_CAMERA_NAME, spec);
- spec = g_param_spec_enum("auto-focus-mode",
- "Set auto-focus mode",
- "Available options: AfModeManual, "
- "AfModeAuto or AfModeContinuous.",
- gst_libcamera_auto_focus_get_type(),
- static_cast<gint>(controls::AfModeManual),
- G_PARAM_WRITABLE);
- g_object_class_install_property(object_class, PROP_AUTO_FOCUS_MODE, spec);
+ GstCameraControls::installProperties(object_class, PROP_LAST);
+}
+
+/* GstChildProxy implementation */
+static GObject *
+gst_libcamera_src_child_proxy_get_child_by_index(GstChildProxy *child_proxy,
+ guint index)
+{
+ GLibLocker lock(GST_OBJECT(child_proxy));
+ GObject *obj = nullptr;
+
+ obj = reinterpret_cast<GObject *>(g_list_nth_data(GST_ELEMENT(child_proxy)->srcpads, index));
+ if (obj)
+ gst_object_ref(obj);
+
+ return obj;
+}
+
+static guint
+gst_libcamera_src_child_proxy_get_children_count(GstChildProxy *child_proxy)
+{
+ GLibLocker lock(GST_OBJECT(child_proxy));
+ return GST_ELEMENT_CAST(child_proxy)->numsrcpads;
+}
+
+static void
+gst_libcamera_src_child_proxy_init(gpointer g_iface, [[maybe_unused]] gpointer iface_data)
+{
+ GstChildProxyInterface *iface = reinterpret_cast<GstChildProxyInterface *>(g_iface);
+ iface->get_child_by_index = gst_libcamera_src_child_proxy_get_child_by_index;
+ iface->get_children_count = gst_libcamera_src_child_proxy_get_children_count;
}
diff --git a/src/gstreamer/gstlibcamerasrc.h b/src/gstreamer/gstlibcamerasrc.h
index 0a88ba02..a27db9ca 100644
--- a/src/gstreamer/gstlibcamerasrc.h
+++ b/src/gstreamer/gstlibcamerasrc.h
@@ -3,13 +3,11 @@
* Copyright (C) 2019, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerasrc.h - GStreamer Capture Element
+ * GStreamer Capture Element
*/
#pragma once
-#include <libcamera/control_ids.h>
-
#include <gst/gst.h>
G_BEGIN_DECLS
@@ -19,32 +17,3 @@ G_DECLARE_FINAL_TYPE(GstLibcameraSrc, gst_libcamera_src,
GST_LIBCAMERA, SRC, GstElement)
G_END_DECLS
-
-inline GType
-gst_libcamera_auto_focus_get_type()
-{
- static GType type = 0;
- static const GEnumValue values[] = {
- {
- static_cast<gint>(libcamera::controls::AfModeManual),
- "AfModeManual",
- "manual-focus",
- },
- {
- static_cast<gint>(libcamera::controls::AfModeAuto),
- "AfModeAuto",
- "automatic-auto-focus",
- },
- {
- static_cast<gint>(libcamera::controls::AfModeContinuous),
- "AfModeContinuous",
- "continuous-auto-focus",
- },
- { 0, NULL, NULL }
- };
-
- if (!type)
- type = g_enum_register_static("GstLibcameraAutoFocus", values);
-
- return type;
-}
diff --git a/src/gstreamer/meson.build b/src/gstreamer/meson.build
index c2a01e7b..6b7e53b5 100644
--- a/src/gstreamer/meson.build
+++ b/src/gstreamer/meson.build
@@ -25,6 +25,16 @@ libcamera_gst_sources = [
'gstlibcamerasrc.cpp',
]
+# Generate gstreamer control properties
+
+gen_gst_controls_template = files('gstlibcamera-controls.cpp.in')
+libcamera_gst_sources += custom_target('gstlibcamera-controls.cpp',
+ input : controls_files,
+ output : 'gstlibcamera-controls.cpp',
+ command : [gen_gst_controls, '-o', '@OUTPUT@',
+ '-t', gen_gst_controls_template, '@INPUT@'],
+ env : py_build_env)
+
libcamera_gst_cpp_args = [
'-DVERSION="@0@"'.format(libcamera_git_version),
'-DPACKAGE="@0@"'.format(meson.project_name()),
diff --git a/src/ipa/ipa-sign-install.sh b/src/ipa/ipa-sign-install.sh
index bcedb8b5..71696d5a 100755
--- a/src/ipa/ipa-sign-install.sh
+++ b/src/ipa/ipa-sign-install.sh
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# ipa-sign-install.sh - Regenerate IPA module signatures when installing
+# Regenerate IPA module signatures when installing
key=$1
shift
diff --git a/src/ipa/ipa-sign.sh b/src/ipa/ipa-sign.sh
index 8673dad1..69024213 100755
--- a/src/ipa/ipa-sign.sh
+++ b/src/ipa/ipa-sign.sh
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# ipa-sign.sh - Generate a signature for an IPA module
+# Generate a signature for an IPA module
key="$1"
input="$2"
diff --git a/src/ipa/ipu3/algorithms/af.cpp b/src/ipa/ipu3/algorithms/af.cpp
index 12927eec..cf68fb59 100644
--- a/src/ipa/ipu3/algorithms/af.cpp
+++ b/src/ipa/ipu3/algorithms/af.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Red Hat
*
- * af.cpp - IPU3 auto focus algorithm
+ * IPU3 auto focus algorithm
*/
#include "af.h"
@@ -11,7 +11,6 @@
#include <chrono>
#include <cmath>
#include <fcntl.h>
-#include <numeric>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
@@ -23,8 +22,6 @@
#include <libcamera/ipa/core_ipa_interface.h>
-#include "libipa/histogram.h"
-
/**
* \file af.h
*/
diff --git a/src/ipa/ipu3/algorithms/af.h b/src/ipa/ipu3/algorithms/af.h
index c6168e30..68126d46 100644
--- a/src/ipa/ipu3/algorithms/af.h
+++ b/src/ipa/ipu3/algorithms/af.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Red Hat
*
- * af.h - IPU3 Af algorithm
+ * IPU3 Af algorithm
*/
#pragma once
diff --git a/src/ipa/ipu3/algorithms/agc.cpp b/src/ipa/ipu3/algorithms/agc.cpp
index 606a237a..39d0aebb 100644
--- a/src/ipa/ipu3/algorithms/agc.cpp
+++ b/src/ipa/ipu3/algorithms/agc.cpp
@@ -2,21 +2,22 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * ipu3_agc.cpp - AGC/AEC mean-based control algorithm
+ * AGC/AEC mean-based control algorithm
*/
#include "agc.h"
#include <algorithm>
#include <chrono>
-#include <cmath>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include <libcamera/control_ids.h>
+
#include <libcamera/ipa/core_ipa_interface.h>
+#include "libipa/colours.h"
#include "libipa/histogram.h"
/**
@@ -33,7 +34,7 @@ namespace ipa::ipu3::algorithms {
* \class Agc
* \brief A mean-based auto-exposure algorithm
*
- * This algorithm calculates a shutter time and an analogue gain so that the
+ * This algorithm calculates an exposure time and an analogue gain so that the
* average value of the green channel of the brightest 2% of pixels approaches
* 0.5. The AWB gains are not used here, and all cells in the grid have the same
* weight, like an average-metering case. In this metering mode, the camera uses
@@ -51,29 +52,37 @@ LOG_DEFINE_CATEGORY(IPU3Agc)
static constexpr double kMinAnalogueGain = 1.0;
/* \todo Honour the FrameDurationLimits control instead of hardcoding a limit */
-static constexpr utils::Duration kMaxShutterSpeed = 60ms;
+static constexpr utils::Duration kMaxExposureTime = 60ms;
/* Histogram constants */
static constexpr uint32_t knumHistogramBins = 256;
-/* Target value to reach for the top 2% of the histogram */
-static constexpr double kEvGainTarget = 0.5;
-
-/* Number of frames to wait before calculating stats on minimum exposure */
-static constexpr uint32_t kNumStartupFrames = 10;
+Agc::Agc()
+ : minExposureTime_(0s), maxExposureTime_(0s)
+{
+}
-/*
- * Relative luminance target.
+/**
+ * \brief Initialise the AGC algorithm from tuning files
+ * \param[in] context The shared IPA context
+ * \param[in] tuningData The YamlObject containing Agc tuning data
+ *
+ * This function calls the base class' tuningData parsers to discover which
+ * control values are supported.
*
- * It's a number that's chosen so that, when the camera points at a grey
- * target, the resulting image brightness is considered right.
+ * \return 0 on success or errors from the base class
*/
-static constexpr double kRelativeLuminanceTarget = 0.16;
-
-Agc::Agc()
- : frameCount_(0), minShutterSpeed_(0s),
- maxShutterSpeed_(0s), filteredExposure_(0s)
+int Agc::init(IPAContext &context, const YamlObject &tuningData)
{
+ int ret;
+
+ ret = parseTuningData(tuningData);
+ if (ret)
+ return ret;
+
+ context.ctrlMap.merge(controls());
+
+ return 0;
}
/**
@@ -90,10 +99,11 @@ int Agc::configure(IPAContext &context,
IPAActiveState &activeState = context.activeState;
stride_ = configuration.grid.stride;
+ bdsGrid_ = configuration.grid.bdsGrid;
- minShutterSpeed_ = configuration.agc.minShutterSpeed;
- maxShutterSpeed_ = std::min(configuration.agc.maxShutterSpeed,
- kMaxShutterSpeed);
+ minExposureTime_ = configuration.agc.minExposureTime;
+ maxExposureTime_ = std::min(configuration.agc.maxExposureTime,
+ kMaxExposureTime);
minAnalogueGain_ = std::max(configuration.agc.minAnalogueGain, kMinAnalogueGain);
maxAnalogueGain_ = configuration.agc.maxAnalogueGain;
@@ -102,168 +112,53 @@ int Agc::configure(IPAContext &context,
activeState.agc.gain = minAnalogueGain_;
activeState.agc.exposure = 10ms / configuration.sensor.lineDuration;
- frameCount_ = 0;
+ context.activeState.agc.constraintMode = constraintModes().begin()->first;
+ context.activeState.agc.exposureMode = exposureModeHelpers().begin()->first;
+
+ /* \todo Run this again when FrameDurationLimits is passed in */
+ setLimits(minExposureTime_, maxExposureTime_, minAnalogueGain_,
+ maxAnalogueGain_);
+ resetFrameCount();
+
return 0;
}
-/**
- * \brief Estimate the mean value of the top 2% of the histogram
- * \param[in] stats The statistics computed by the ImgU
- * \param[in] grid The grid used to store the statistics in the IPU3
- * \return The mean value of the top 2% of the histogram
- */
-double Agc::measureBrightness(const ipu3_uapi_stats_3a *stats,
- const ipu3_uapi_grid_config &grid) const
+Histogram Agc::parseStatistics(const ipu3_uapi_stats_3a *stats,
+ const ipu3_uapi_grid_config &grid)
{
- /* Initialise the histogram array */
uint32_t hist[knumHistogramBins] = { 0 };
+ rgbTriples_.clear();
+
for (unsigned int cellY = 0; cellY < grid.height; cellY++) {
for (unsigned int cellX = 0; cellX < grid.width; cellX++) {
uint32_t cellPosition = cellY * stride_ + cellX;
const ipu3_uapi_awb_set_item *cell =
reinterpret_cast<const ipu3_uapi_awb_set_item *>(
- &stats->awb_raw_buffer.meta_data[cellPosition]
- );
+ &stats->awb_raw_buffer.meta_data[cellPosition]);
+
+ rgbTriples_.push_back({
+ cell->R_avg,
+ (cell->Gr_avg + cell->Gb_avg) / 2,
+ cell->B_avg
+ });
- uint8_t gr = cell->Gr_avg;
- uint8_t gb = cell->Gb_avg;
/*
* Store the average green value to estimate the
* brightness. Even the overexposed pixels are
* taken into account.
*/
- hist[(gr + gb) / 2]++;
+ hist[(cell->Gr_avg + cell->Gb_avg) / 2]++;
}
}
- /* Estimate the quantile mean of the top 2% of the histogram. */
- return Histogram(Span<uint32_t>(hist)).interQuantileMean(0.98, 1.0);
-}
-
-/**
- * \brief Apply a filter on the exposure value to limit the speed of changes
- * \param[in] exposureValue The target exposure from the AGC algorithm
- *
- * The speed of the filter is adaptive, and will produce the target quicker
- * during startup, or when the target exposure is within 20% of the most recent
- * filter output.
- *
- * \return The filtered exposure
- */
-utils::Duration Agc::filterExposure(utils::Duration exposureValue)
-{
- double speed = 0.2;
-
- /* Adapt instantly if we are in startup phase. */
- if (frameCount_ < kNumStartupFrames)
- speed = 1.0;
-
- /*
- * If we are close to the desired result, go faster to avoid making
- * multiple micro-adjustments.
- * \todo Make this customisable?
- */
- if (filteredExposure_ < 1.2 * exposureValue &&
- filteredExposure_ > 0.8 * exposureValue)
- speed = sqrt(speed);
-
- filteredExposure_ = speed * exposureValue +
- filteredExposure_ * (1.0 - speed);
-
- LOG(IPU3Agc, Debug) << "After filtering, exposure " << filteredExposure_;
-
- return filteredExposure_;
-}
-
-/**
- * \brief Estimate the new exposure and gain values
- * \param[inout] frameContext The shared IPA frame Context
- * \param[in] yGain The gain calculated based on the relative luminance target
- * \param[in] iqMeanGain The gain calculated based on the relative luminance target
- */
-void Agc::computeExposure(IPAContext &context, IPAFrameContext &frameContext,
- double yGain, double iqMeanGain)
-{
- const IPASessionConfiguration &configuration = context.configuration;
- /* Get the effective exposure and gain applied on the sensor. */
- uint32_t exposure = frameContext.sensor.exposure;
- double analogueGain = frameContext.sensor.gain;
-
- /* Use the highest of the two gain estimates. */
- double evGain = std::max(yGain, iqMeanGain);
-
- /* Consider within 1% of the target as correctly exposed */
- if (utils::abs_diff(evGain, 1.0) < 0.01)
- LOG(IPU3Agc, Debug) << "We are well exposed (evGain = "
- << evGain << ")";
-
- /* extracted from Rpi::Agc::computeTargetExposure */
-
- /* Calculate the shutter time in seconds */
- utils::Duration currentShutter = exposure * configuration.sensor.lineDuration;
-
- /*
- * Update the exposure value for the next computation using the values
- * of exposure and gain really used by the sensor.
- */
- utils::Duration effectiveExposureValue = currentShutter * analogueGain;
-
- LOG(IPU3Agc, Debug) << "Actual total exposure " << currentShutter * analogueGain
- << " Shutter speed " << currentShutter
- << " Gain " << analogueGain
- << " Needed ev gain " << evGain;
-
- /*
- * Calculate the current exposure value for the scene as the latest
- * exposure value applied multiplied by the new estimated gain.
- */
- utils::Duration exposureValue = effectiveExposureValue * evGain;
-
- /* Clamp the exposure value to the min and max authorized */
- utils::Duration maxTotalExposure = maxShutterSpeed_ * maxAnalogueGain_;
- exposureValue = std::min(exposureValue, maxTotalExposure);
- LOG(IPU3Agc, Debug) << "Target total exposure " << exposureValue
- << ", maximum is " << maxTotalExposure;
-
- /*
- * Filter the exposure.
- * \todo estimate if we need to desaturate
- */
- exposureValue = filterExposure(exposureValue);
-
- /*
- * Divide the exposure value as new exposure and gain values.
- *
- * Push the shutter time up to the maximum first, and only then
- * increase the gain.
- */
- utils::Duration shutterTime =
- std::clamp<utils::Duration>(exposureValue / minAnalogueGain_,
- minShutterSpeed_, maxShutterSpeed_);
- double stepGain = std::clamp(exposureValue / shutterTime,
- minAnalogueGain_, maxAnalogueGain_);
- LOG(IPU3Agc, Debug) << "Divided up shutter and gain are "
- << shutterTime << " and "
- << stepGain;
-
- IPAActiveState &activeState = context.activeState;
- /* Update the estimated exposure and gain. */
- activeState.agc.exposure = shutterTime / configuration.sensor.lineDuration;
- activeState.agc.gain = stepGain;
+ return Histogram(Span<uint32_t>(hist));
}
/**
* \brief Estimate the relative luminance of the frame with a given gain
- * \param[in] frameContext The shared IPA frame context
- * \param[in] grid The grid used to store the statistics in the IPU3
- * \param[in] stats The IPU3 statistics and ISP results
- * \param[in] gain The gain to apply to the frame
- * \return The relative luminance
- *
- * This function estimates the average relative luminance of the frame that
- * would be output by the sensor if an additional \a gain was applied.
+ * \param[in] gain The gain to apply in estimating luminance
*
* The estimation is based on the AWB statistics for the current frame. Red,
* green and blue averages for all cells are first multiplied by the gain, and
@@ -278,40 +173,22 @@ void Agc::computeExposure(IPAContext &context, IPAFrameContext &frameContext,
*
* More detailed information can be found in:
* https://en.wikipedia.org/wiki/Relative_luminance
+ *
+ * \return The relative luminance of the frame
*/
-double Agc::estimateLuminance(IPAActiveState &activeState,
- const ipu3_uapi_grid_config &grid,
- const ipu3_uapi_stats_3a *stats,
- double gain)
+double Agc::estimateLuminance(double gain) const
{
- double redSum = 0, greenSum = 0, blueSum = 0;
-
- /* Sum the per-channel averages, saturated to 255. */
- for (unsigned int cellY = 0; cellY < grid.height; cellY++) {
- for (unsigned int cellX = 0; cellX < grid.width; cellX++) {
- uint32_t cellPosition = cellY * stride_ + cellX;
-
- const ipu3_uapi_awb_set_item *cell =
- reinterpret_cast<const ipu3_uapi_awb_set_item *>(
- &stats->awb_raw_buffer.meta_data[cellPosition]
- );
- const uint8_t G_avg = (cell->Gr_avg + cell->Gb_avg) / 2;
+ RGB<double> sum{ 0.0 };
- redSum += std::min(cell->R_avg * gain, 255.0);
- greenSum += std::min(G_avg * gain, 255.0);
- blueSum += std::min(cell->B_avg * gain, 255.0);
- }
+ for (unsigned int i = 0; i < rgbTriples_.size(); i++) {
+ sum.r() += std::min(std::get<0>(rgbTriples_[i]) * gain, 255.0);
+ sum.g() += std::min(std::get<1>(rgbTriples_[i]) * gain, 255.0);
+ sum.b() += std::min(std::get<2>(rgbTriples_[i]) * gain, 255.0);
}
- /*
- * Apply the AWB gains to approximate colours correctly, use the Rec.
- * 601 formula to calculate the relative luminance, and normalize it.
- */
- double ySum = redSum * activeState.awb.gains.red * 0.299
- + greenSum * activeState.awb.gains.green * 0.587
- + blueSum * activeState.awb.gains.blue * 0.114;
-
- return ySum / (grid.height * grid.width) / 255;
+ RGB<double> gains{{ rGain_, gGain_, bGain_ }};
+ double ySum = rec601LuminanceFromRGB(sum * gains);
+ return ySum / (bdsGrid_.height * bdsGrid_.width) / 255;
}
/**
@@ -330,44 +207,36 @@ void Agc::process(IPAContext &context, [[maybe_unused]] const uint32_t frame,
const ipu3_uapi_stats_3a *stats,
ControlList &metadata)
{
- /*
- * Estimate the gain needed to have the proportion of pixels in a given
- * desired range. iqMean is the mean value of the top 2% of the
- * cumulative histogram, and we want it to be as close as possible to a
- * configured target.
- */
- double iqMean = measureBrightness(stats, context.configuration.grid.bdsGrid);
- double iqMeanGain = kEvGainTarget * knumHistogramBins / iqMean;
+ Histogram hist = parseStatistics(stats, context.configuration.grid.bdsGrid);
+ rGain_ = context.activeState.awb.gains.red;
+ gGain_ = context.activeState.awb.gains.blue;
+ bGain_ = context.activeState.awb.gains.green;
/*
- * Estimate the gain needed to achieve a relative luminance target. To
- * account for non-linearity caused by saturation, the value needs to be
- * estimated in an iterative process, as multiplying by a gain will not
- * increase the relative luminance by the same factor if some image
- * regions are saturated.
+ * The Agc algorithm needs to know the effective exposure value that was
+ * applied to the sensor when the statistics were collected.
*/
- double yGain = 1.0;
- double yTarget = kRelativeLuminanceTarget;
-
- for (unsigned int i = 0; i < 8; i++) {
- double yValue = estimateLuminance(context.activeState,
- context.configuration.grid.bdsGrid,
- stats, yGain);
- double extraGain = std::min(10.0, yTarget / (yValue + .001));
-
- yGain *= extraGain;
- LOG(IPU3Agc, Debug) << "Y value: " << yValue
- << ", Y target: " << yTarget
- << ", gives gain " << yGain;
- if (extraGain < 1.01)
- break;
- }
-
- computeExposure(context, frameContext, yGain, iqMeanGain);
- frameCount_++;
-
utils::Duration exposureTime = context.configuration.sensor.lineDuration
* frameContext.sensor.exposure;
+ double analogueGain = frameContext.sensor.gain;
+ utils::Duration effectiveExposureValue = exposureTime * analogueGain;
+
+ utils::Duration newExposureTime;
+ double aGain, dGain;
+ std::tie(newExposureTime, aGain, dGain) =
+ calculateNewEv(context.activeState.agc.constraintMode,
+ context.activeState.agc.exposureMode, hist,
+ effectiveExposureValue);
+
+ LOG(IPU3Agc, Debug)
+ << "Divided up exposure time, analogue gain and digital gain are "
+ << newExposureTime << ", " << aGain << " and " << dGain;
+
+ IPAActiveState &activeState = context.activeState;
+ /* Update the estimated exposure time and gain. */
+ activeState.agc.exposure = newExposureTime / context.configuration.sensor.lineDuration;
+ activeState.agc.gain = aGain;
+
metadata.set(controls::AnalogueGain, frameContext.sensor.gain);
metadata.set(controls::ExposureTime, exposureTime.get<std::micro>());
@@ -377,7 +246,6 @@ void Agc::process(IPAContext &context, [[maybe_unused]] const uint32_t frame,
utils::Duration frameDuration = context.configuration.sensor.lineDuration
* vTotal;
metadata.set(controls::FrameDuration, frameDuration.get<std::micro>());
-
}
REGISTER_IPA_ALGORITHM(Agc, "Agc")
diff --git a/src/ipa/ipu3/algorithms/agc.h b/src/ipa/ipu3/algorithms/agc.h
index 9d6e3ff1..890c271b 100644
--- a/src/ipa/ipu3/algorithms/agc.h
+++ b/src/ipa/ipu3/algorithms/agc.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * agc.h - IPU3 AGC/AEC mean-based control algorithm
+ * IPU3 AGC/AEC mean-based control algorithm
*/
#pragma once
@@ -13,6 +13,9 @@
#include <libcamera/geometry.h>
+#include "libipa/agc_mean_luminance.h"
+#include "libipa/histogram.h"
+
#include "algorithm.h"
namespace libcamera {
@@ -21,12 +24,13 @@ struct IPACameraSensorInfo;
namespace ipa::ipu3::algorithms {
-class Agc : public Algorithm
+class Agc : public Algorithm, public AgcMeanLuminance
{
public:
Agc();
~Agc() = default;
+ int init(IPAContext &context, const YamlObject &tuningData) override;
int configure(IPAContext &context, const IPAConfigInfo &configInfo) override;
void process(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
@@ -34,27 +38,22 @@ public:
ControlList &metadata) override;
private:
- double measureBrightness(const ipu3_uapi_stats_3a *stats,
- const ipu3_uapi_grid_config &grid) const;
- utils::Duration filterExposure(utils::Duration currentExposure);
- void computeExposure(IPAContext &context, IPAFrameContext &frameContext,
- double yGain, double iqMeanGain);
- double estimateLuminance(IPAActiveState &activeState,
- const ipu3_uapi_grid_config &grid,
- const ipu3_uapi_stats_3a *stats,
- double gain);
-
- uint64_t frameCount_;
+ double estimateLuminance(double gain) const override;
+ Histogram parseStatistics(const ipu3_uapi_stats_3a *stats,
+ const ipu3_uapi_grid_config &grid);
- utils::Duration minShutterSpeed_;
- utils::Duration maxShutterSpeed_;
+ utils::Duration minExposureTime_;
+ utils::Duration maxExposureTime_;
double minAnalogueGain_;
double maxAnalogueGain_;
- utils::Duration filteredExposure_;
-
uint32_t stride_;
+ double rGain_;
+ double gGain_;
+ double bGain_;
+ ipu3_uapi_grid_config bdsGrid_;
+ std::vector<std::tuple<uint8_t, uint8_t, uint8_t>> rgbTriples_;
};
} /* namespace ipa::ipu3::algorithms */
diff --git a/src/ipa/ipu3/algorithms/algorithm.h b/src/ipa/ipu3/algorithms/algorithm.h
index ae134a94..c7801f93 100644
--- a/src/ipa/ipu3/algorithms/algorithm.h
+++ b/src/ipa/ipu3/algorithms/algorithm.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * algorithm.h - IPU3 control algorithm interface
+ * IPU3 control algorithm interface
*/
#pragma once
diff --git a/src/ipa/ipu3/algorithms/awb.cpp b/src/ipa/ipu3/algorithms/awb.cpp
index 5abd4621..55de05d9 100644
--- a/src/ipa/ipu3/algorithms/awb.cpp
+++ b/src/ipa/ipu3/algorithms/awb.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * awb.cpp - AWB control algorithm
+ * AWB control algorithm
*/
#include "awb.h"
@@ -13,6 +13,8 @@
#include <libcamera/control_ids.h>
+#include "libipa/colours.h"
+
/**
* \file awb.h
*/
@@ -301,51 +303,24 @@ void Awb::prepare(IPAContext &context,
params->use.acc_ccm = 1;
}
-/**
- * The function estimates the correlated color temperature using
- * from RGB color space input.
- * In physics and color science, the Planckian locus or black body locus is
- * the path or locus that the color of an incandescent black body would take
- * in a particular chromaticity space as the blackbody temperature changes.
- *
- * If a narrow range of color temperatures is considered (those encapsulating
- * daylight being the most practical case) one can approximate the Planckian
- * locus in order to calculate the CCT in terms of chromaticity coordinates.
- *
- * More detailed information can be found in:
- * https://en.wikipedia.org/wiki/Color_temperature#Approximation
- */
-uint32_t Awb::estimateCCT(double red, double green, double blue)
-{
- /* Convert the RGB values to CIE tristimulus values (XYZ) */
- double X = (-0.14282) * (red) + (1.54924) * (green) + (-0.95641) * (blue);
- double Y = (-0.32466) * (red) + (1.57837) * (green) + (-0.73191) * (blue);
- double Z = (-0.68202) * (red) + (0.77073) * (green) + (0.56332) * (blue);
-
- /* Calculate the normalized chromaticity values */
- double x = X / (X + Y + Z);
- double y = Y / (X + Y + Z);
-
- /* Calculate CCT */
- double n = (x - 0.3320) / (0.1858 - y);
- return 449 * n * n * n + 3525 * n * n + 6823.3 * n + 5520.33;
-}
-
/* Generate an RGB vector with the average values for each zone */
void Awb::generateZones()
{
zones_.clear();
for (unsigned int i = 0; i < kAwbStatsSizeX * kAwbStatsSizeY; i++) {
- RGB zone;
double counted = awbStats_[i].counted;
if (counted >= cellsPerZoneThreshold_) {
- zone.G = awbStats_[i].sum.green / counted;
- if (zone.G >= kMinGreenLevelInZone) {
- zone.R = awbStats_[i].sum.red / counted;
- zone.B = awbStats_[i].sum.blue / counted;
+ RGB<double> zone{{
+ static_cast<double>(awbStats_[i].sum.red),
+ static_cast<double>(awbStats_[i].sum.green),
+ static_cast<double>(awbStats_[i].sum.blue)
+ }};
+
+ zone /= counted;
+
+ if (zone.g() >= kMinGreenLevelInZone)
zones_.push_back(zone);
- }
}
}
}
@@ -412,32 +387,32 @@ void Awb::awbGreyWorld()
* consider some variations, such as normalising all the zones first, or
* doing an L2 average etc.
*/
- std::vector<RGB> &redDerivative(zones_);
- std::vector<RGB> blueDerivative(redDerivative);
+ std::vector<RGB<double>> &redDerivative(zones_);
+ std::vector<RGB<double>> blueDerivative(redDerivative);
std::sort(redDerivative.begin(), redDerivative.end(),
- [](RGB const &a, RGB const &b) {
- return a.G * b.R < b.G * a.R;
+ [](RGB<double> const &a, RGB<double> const &b) {
+ return a.g() * b.r() < b.g() * a.r();
});
std::sort(blueDerivative.begin(), blueDerivative.end(),
- [](RGB const &a, RGB const &b) {
- return a.G * b.B < b.G * a.B;
+ [](RGB<double> const &a, RGB<double> const &b) {
+ return a.g() * b.b() < b.g() * a.b();
});
/* Average the middle half of the values. */
int discard = redDerivative.size() / 4;
- RGB sumRed(0, 0, 0);
- RGB sumBlue(0, 0, 0);
+ RGB<double> sumRed{ 0.0 };
+ RGB<double> sumBlue{ 0.0 };
for (auto ri = redDerivative.begin() + discard,
bi = blueDerivative.begin() + discard;
ri != redDerivative.end() - discard; ri++, bi++)
sumRed += *ri, sumBlue += *bi;
- double redGain = sumRed.G / (sumRed.R + 1),
- blueGain = sumBlue.G / (sumBlue.B + 1);
+ double redGain = sumRed.g() / (sumRed.r() + 1),
+ blueGain = sumBlue.g() / (sumBlue.b() + 1);
/* Color temperature is not relevant in Grey world but still useful to estimate it :-) */
- asyncResults_.temperatureK = estimateCCT(sumRed.R, sumRed.G, sumBlue.B);
+ asyncResults_.temperatureK = estimateCCT({{ sumRed.r(), sumRed.g(), sumBlue.b() }});
/*
* Gain values are unsigned integer value ranging [0, 8) with 13 bit
diff --git a/src/ipa/ipu3/algorithms/awb.h b/src/ipa/ipu3/algorithms/awb.h
index 7a70854e..dbf69c90 100644
--- a/src/ipa/ipu3/algorithms/awb.h
+++ b/src/ipa/ipu3/algorithms/awb.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * awb.h - IPU3 AWB control algorithm
+ * IPU3 AWB control algorithm
*/
#pragma once
@@ -13,6 +13,8 @@
#include <libcamera/geometry.h>
+#include "libcamera/internal/vector.h"
+
#include "algorithm.h"
namespace libcamera {
@@ -48,20 +50,6 @@ public:
ControlList &metadata) override;
private:
- /* \todo Make these structs available to all the ISPs ? */
- struct RGB {
- RGB(double _R = 0, double _G = 0, double _B = 0)
- : R(_R), G(_G), B(_B)
- {
- }
- double R, G, B;
- RGB &operator+=(RGB const &other)
- {
- R += other.R, G += other.G, B += other.B;
- return *this;
- }
- };
-
struct AwbStatus {
double temperatureK;
double redGain;
@@ -75,11 +63,10 @@ private:
void generateAwbStats(const ipu3_uapi_stats_3a *stats);
void clearAwbStats();
void awbGreyWorld();
- uint32_t estimateCCT(double red, double green, double blue);
static constexpr uint16_t threshold(float value);
static constexpr uint16_t gainValue(double gain);
- std::vector<RGB> zones_;
+ std::vector<RGB<double>> zones_;
Accumulator awbStats_[kAwbStatsSizeX * kAwbStatsSizeY];
AwbStatus asyncResults_;
diff --git a/src/ipa/ipu3/algorithms/blc.cpp b/src/ipa/ipu3/algorithms/blc.cpp
index e838072a..35748fb2 100644
--- a/src/ipa/ipu3/algorithms/blc.cpp
+++ b/src/ipa/ipu3/algorithms/blc.cpp
@@ -2,13 +2,11 @@
/*
* Copyright (C) 2021, Google inc.
*
- * blc.cpp - IPU3 Black Level Correction control
+ * IPU3 Black Level Correction control
*/
#include "blc.h"
-#include <string.h>
-
/**
* \file blc.h
* \brief IPU3 Black Level Correction control
@@ -57,8 +55,8 @@ void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context,
* tuning processes. This is a first rough approximation.
*/
params->obgrid_param.gr = 64;
- params->obgrid_param.r = 64;
- params->obgrid_param.b = 64;
+ params->obgrid_param.r = 64;
+ params->obgrid_param.b = 64;
params->obgrid_param.gb = 64;
/* Enable the custom black level correction processing */
diff --git a/src/ipa/ipu3/algorithms/blc.h b/src/ipa/ipu3/algorithms/blc.h
index 292bf67b..62748045 100644
--- a/src/ipa/ipu3/algorithms/blc.h
+++ b/src/ipa/ipu3/algorithms/blc.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google inc.
*
- * black_correction.h - IPU3 Black Level Correction control
+ * IPU3 Black Level Correction control
*/
#pragma once
diff --git a/src/ipa/ipu3/algorithms/tone_mapping.cpp b/src/ipa/ipu3/algorithms/tone_mapping.cpp
index a169894c..160338c1 100644
--- a/src/ipa/ipu3/algorithms/tone_mapping.cpp
+++ b/src/ipa/ipu3/algorithms/tone_mapping.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google inc.
*
- * tone_mapping.cpp - IPU3 ToneMapping and Gamma control
+ * IPU3 ToneMapping and Gamma control
*/
#include "tone_mapping.h"
diff --git a/src/ipa/ipu3/algorithms/tone_mapping.h b/src/ipa/ipu3/algorithms/tone_mapping.h
index 5ae35da5..b2b38010 100644
--- a/src/ipa/ipu3/algorithms/tone_mapping.h
+++ b/src/ipa/ipu3/algorithms/tone_mapping.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google inc.
*
- * tone_mapping.h - IPU3 ToneMapping and Gamma control
+ * IPU3 ToneMapping and Gamma control
*/
#pragma once
diff --git a/src/ipa/ipu3/ipa_context.cpp b/src/ipa/ipu3/ipa_context.cpp
index 959f314f..3b22f791 100644
--- a/src/ipa/ipu3/ipa_context.cpp
+++ b/src/ipa/ipu3/ipa_context.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * ipa_context.cpp - IPU3 IPA Context
+ * IPU3 IPA Context
*/
#include "ipa_context.h"
@@ -39,6 +39,10 @@ namespace libcamera::ipa::ipu3 {
* \struct IPAContext
* \brief Global IPA context data shared between all algorithms
*
+ * \fn IPAContext::IPAContext
+ * \brief Initialize the instance with the given number of frame contexts
+ * \param[in] frameContextSize Size of the frame context ring buffer
+ *
* \var IPAContext::configuration
* \brief The IPA session configuration, immutable during the session
*
@@ -47,6 +51,9 @@ namespace libcamera::ipa::ipu3 {
*
* \var IPAContext::activeState
* \brief The current state of IPA algorithms
+ *
+ * \var IPAContext::ctrlMap
+ * \brief A ControlInfoMap::Map of controls populated by the algorithms
*/
/**
@@ -89,11 +96,11 @@ namespace libcamera::ipa::ipu3 {
* \var IPASessionConfiguration::agc
* \brief AGC parameters configuration of the IPA
*
- * \var IPASessionConfiguration::agc.minShutterSpeed
- * \brief Minimum shutter speed supported with the configured sensor
+ * \var IPASessionConfiguration::agc.minExposureTime
+ * \brief Minimum exposure time supported with the configured sensor
*
- * \var IPASessionConfiguration::agc.maxShutterSpeed
- * \brief Maximum shutter speed supported with the configured sensor
+ * \var IPASessionConfiguration::agc.maxExposureTime
+ * \brief Maximum exposure time supported with the configured sensor
*
* \var IPASessionConfiguration::agc.minAnalogueGain
* \brief Minimum analogue gain supported with the configured sensor
diff --git a/src/ipa/ipu3/ipa_context.h b/src/ipa/ipu3/ipa_context.h
index e9a3863b..97fcf06c 100644
--- a/src/ipa/ipu3/ipa_context.h
+++ b/src/ipa/ipu3/ipa_context.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * ipa_context.h - IPU3 IPA Context
+ * IPU3 IPA Context
*
*/
@@ -12,6 +12,7 @@
#include <libcamera/base/utils.h>
+#include <libcamera/controls.h>
#include <libcamera/geometry.h>
#include <libipa/fc_queue.h>
@@ -32,8 +33,8 @@ struct IPASessionConfiguration {
} af;
struct {
- utils::Duration minShutterSpeed;
- utils::Duration maxShutterSpeed;
+ utils::Duration minExposureTime;
+ utils::Duration maxExposureTime;
double minAnalogueGain;
double maxAnalogueGain;
} agc;
@@ -55,6 +56,8 @@ struct IPAActiveState {
struct {
uint32_t exposure;
double gain;
+ uint32_t constraintMode;
+ uint32_t exposureMode;
} agc;
struct {
@@ -81,10 +84,17 @@ struct IPAFrameContext : public FrameContext {
};
struct IPAContext {
+ IPAContext(unsigned int frameContextSize)
+ : frameContexts(frameContextSize)
+ {
+ }
+
IPASessionConfiguration configuration;
IPAActiveState activeState;
FCQueue<IPAFrameContext> frameContexts;
+
+ ControlInfoMap::Map ctrlMap;
};
} /* namespace ipa::ipu3 */
diff --git a/src/ipa/ipu3/ipu3-ipa-design-guide.rst b/src/ipa/ipu3/ipu3-ipa-design-guide.rst
index 72506397..85d735c6 100644
--- a/src/ipa/ipu3/ipu3-ipa-design-guide.rst
+++ b/src/ipa/ipu3/ipu3-ipa-design-guide.rst
@@ -27,8 +27,8 @@ from applications, and managing events from the pipeline handler.
└─┬───┬───┬──────┬────┬────┬────┬─┴────▼─┬──┘ 1: init()
│ │ │ │ ▲ │ ▲ │ ▲ │ ▲ │ 2: configure()
│1 │2 │3 │4│ │4│ │4│ │4│ │5 3: mapBuffers(), start()
- │ │ │ │ │ │ │ │ │ │ │ │ 4: (▼) queueRequest(), fillParamsBuffer(), processStatsBuffer()
- ▼ ▼ ▼ ▼ │ ▼ │ ▼ │ ▼ │ ▼ (▲) setSensorControls, paramsBufferReady, metadataReady Signals
+ │ │ │ │ │ │ │ │ │ │ │ │ 4: (▼) queueRequest(), computeParams(), processStats()
+ ▼ ▼ ▼ ▼ │ ▼ │ ▼ │ ▼ │ ▼ (▲) setSensorControls, paramsComputed, metadataReady Signals
┌──────────────────┴────┴────┴────┴─────────┐ 5: stop(), unmapBuffers()
│ IPU3 IPA │
│ ┌───────────────────────┐ │
@@ -104,8 +104,8 @@ to operate when running:
- configure()
- queueRequest()
-- fillParamsBuffer()
-- processStatsBuffer()
+- computeParams()
+- processStats()
The configuration phase allows the pipeline-handler to inform the IPA of
the current stream configurations, which is then passed into each
@@ -119,7 +119,7 @@ When configured, the IPA is notified by the pipeline handler of the
Camera ``start()`` event, after which incoming requests will be queued
for processing, requiring a parameter buffer (``ipu3_uapi_params``) to
be populated for the ImgU. This is given to the IPA through
-``fillParamsBuffer()``, and then passed directly to each algorithm
+``computeParams()``, and then passed directly to each algorithm
through the ``prepare()`` call allowing the ISP configuration to be
updated for the needs of each component that the algorithm is
responsible for.
@@ -129,7 +129,7 @@ structure that it modifies, and it should take care to ensure that any
structure set by a use flag is fully initialised to suitable values.
The parameter buffer is returned to the pipeline handler through the
-``paramsBufferReady`` signal, and from there queued to the ImgU along
+``paramsComputed`` signal, and from there queued to the ImgU along
with a raw frame captured with the CIO2.
Post-frame completion
@@ -138,7 +138,7 @@ Post-frame completion
When the capture of an image is completed, and successfully processed
through the ImgU, the generated statistics buffer
(``ipu3_uapi_stats_3a``) is given to the IPA through
-``processStatsBuffer()``. This provides the IPA with an opportunity to
+``processStats()``. This provides the IPA with an opportunity to
examine the results of the ISP and run the calculations required by each
algorithm on the new data. The algorithms may require context from the
operations of other algorithms, for example, the AWB might choose to use
diff --git a/src/ipa/ipu3/ipu3.cpp b/src/ipa/ipu3/ipu3.cpp
index 08ee6eb3..1cae08bf 100644
--- a/src/ipa/ipu3/ipu3.cpp
+++ b/src/ipa/ipu3/ipu3.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipu3.cpp - IPU3 Image Processing Algorithms
+ * IPU3 Image Processing Algorithms
*/
#include <algorithm>
@@ -23,24 +23,22 @@
#include <libcamera/base/utils.h>
#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+#include <libcamera/request.h>
+
#include <libcamera/ipa/ipa_interface.h>
#include <libcamera/ipa/ipa_module_info.h>
#include <libcamera/ipa/ipu3_ipa_interface.h>
-#include <libcamera/request.h>
#include "libcamera/internal/mapped_framebuffer.h"
#include "libcamera/internal/yaml_parser.h"
-#include "algorithms/af.h"
-#include "algorithms/agc.h"
-#include "algorithms/algorithm.h"
-#include "algorithms/awb.h"
-#include "algorithms/blc.h"
-#include "algorithms/tone_mapping.h"
#include "libipa/camera_sensor_helper.h"
#include "ipa_context.h"
+#include "module.h"
/* Minimum grid width, expressed as a number of cells */
static constexpr uint32_t kMinGridWidth = 16;
@@ -89,14 +87,14 @@ namespace ipa::ipu3 {
* parameter buffer, and adapting the settings of the sensor attached to the
* IPU3 CIO2 through sensor-specific V4L2 controls.
*
- * In fillParamsBuffer(), we populate the ImgU parameter buffer with
+ * In computeParams(), we populate the ImgU parameter buffer with
* settings to configure the device in preparation for handling the frame
* queued in the Request.
*
* When the frame has completed processing, the ImgU will generate a statistics
- * buffer which is given to the IPA with processStatsBuffer(). In this we run the
+ * buffer which is given to the IPA with processStats(). In this we run the
* algorithms to parse the statistics and cache any results for the next
- * fillParamsBuffer() call.
+ * computeParams() call.
*
* The individual algorithms are split into modular components that are called
* iteratively to allow them to process statistics from the ImgU in the order
@@ -114,7 +112,7 @@ namespace ipa::ipu3 {
* blue gains to apply to generate a neutral grey frame overall.
*
* AGC is handled by calculating a histogram of the green channel to estimate an
- * analogue gain and shutter time which will provide a well exposed frame. A
+ * analogue gain and exposure time which will provide a well exposed frame. A
* low-pass IIR filter is used to smooth the changes to the sensor to reduce
* perceivable steps.
*
@@ -157,10 +155,10 @@ public:
void unmapBuffers(const std::vector<unsigned int> &ids) override;
void queueRequest(const uint32_t frame, const ControlList &controls) override;
- void fillParamsBuffer(const uint32_t frame, const uint32_t bufferId) override;
- void processStatsBuffer(const uint32_t frame, const int64_t frameTimestamp,
- const uint32_t bufferId,
- const ControlList &sensorControls) override;
+ void computeParams(const uint32_t frame, const uint32_t bufferId) override;
+ void processStats(const uint32_t frame, const int64_t frameTimestamp,
+ const uint32_t bufferId,
+ const ControlList &sensorControls) override;
protected:
std::string logPrefix() const override;
@@ -189,7 +187,7 @@ private:
};
IPAIPU3::IPAIPU3()
- : context_({ {}, {}, { kMaxFrameContexts } })
+ : context_(kMaxFrameContexts)
{
}
@@ -217,13 +215,13 @@ void IPAIPU3::updateSessionConfiguration(const ControlInfoMap &sensorControls)
/*
* When the AGC computes the new exposure values for a frame, it needs
- * to know the limits for shutter speed and analogue gain.
+ * to know the limits for exposure time and analogue gain.
* As it depends on the sensor, update it with the controls.
*
- * \todo take VBLANK into account for maximum shutter speed
+ * \todo take VBLANK into account for maximum exposure time
*/
- context_.configuration.agc.minShutterSpeed = minExposure * context_.configuration.sensor.lineDuration;
- context_.configuration.agc.maxShutterSpeed = maxExposure * context_.configuration.sensor.lineDuration;
+ context_.configuration.agc.minExposureTime = minExposure * context_.configuration.sensor.lineDuration;
+ context_.configuration.agc.maxExposureTime = maxExposure * context_.configuration.sensor.lineDuration;
context_.configuration.agc.minAnalogueGain = camHelper_->gain(minGain);
context_.configuration.agc.maxAnalogueGain = camHelper_->gain(maxGain);
}
@@ -287,6 +285,7 @@ void IPAIPU3::updateControls(const IPACameraSensorInfo &sensorInfo,
frameDurations[1],
frameDurations[2]);
+ controls.merge(context_.ctrlMap);
*ipaControls = ControlInfoMap(std::move(controls), controls::controls);
}
@@ -312,8 +311,8 @@ int IPAIPU3::init(const IPASettings &settings,
/* Clean context */
context_.configuration = {};
- context_.configuration.sensor.lineDuration = sensorInfo.minLineLength
- * 1.0s / sensorInfo.pixelRate;
+ context_.configuration.sensor.lineDuration =
+ sensorInfo.minLineLength * 1.0s / sensorInfo.pixelRate;
/* Load the tuning data file. */
File file(settings.configurationFile);
@@ -476,8 +475,8 @@ int IPAIPU3::configure(const IPAConfigInfo &configInfo,
context_.frameContexts.clear();
/* Initialise the sensor configuration. */
- context_.configuration.sensor.lineDuration = sensorInfo_.minLineLength
- * 1.0s / sensorInfo_.pixelRate;
+ context_.configuration.sensor.lineDuration =
+ sensorInfo_.minLineLength * 1.0s / sensorInfo_.pixelRate;
context_.configuration.sensor.size = sensorInfo_.outputSize;
/*
@@ -539,7 +538,7 @@ void IPAIPU3::unmapBuffers(const std::vector<unsigned int> &ids)
* Algorithms are expected to fill the IPU3 parameter buffer for the next
* frame given their most recent processing of the ImgU statistics.
*/
-void IPAIPU3::fillParamsBuffer(const uint32_t frame, const uint32_t bufferId)
+void IPAIPU3::computeParams(const uint32_t frame, const uint32_t bufferId)
{
auto it = buffers_.find(bufferId);
if (it == buffers_.end()) {
@@ -567,7 +566,7 @@ void IPAIPU3::fillParamsBuffer(const uint32_t frame, const uint32_t bufferId)
for (auto const &algo : algorithms())
algo->prepare(context_, frame, frameContext, params);
- paramsBufferReady.emit(frame);
+ paramsComputed.emit(frame);
}
/**
@@ -581,9 +580,9 @@ void IPAIPU3::fillParamsBuffer(const uint32_t frame, const uint32_t bufferId)
* statistics are passed to each algorithm module to run their calculations and
* update their state accordingly.
*/
-void IPAIPU3::processStatsBuffer(const uint32_t frame,
- [[maybe_unused]] const int64_t frameTimestamp,
- const uint32_t bufferId, const ControlList &sensorControls)
+void IPAIPU3::processStats(const uint32_t frame,
+ [[maybe_unused]] const int64_t frameTimestamp,
+ const uint32_t bufferId, const ControlList &sensorControls)
{
auto it = buffers_.find(bufferId);
if (it == buffers_.end()) {
@@ -672,7 +671,7 @@ extern "C" {
const struct IPAModuleInfo ipaModuleInfo = {
IPA_MODULE_API_VERSION,
1,
- "PipelineHandlerIPU3",
+ "ipu3",
"ipu3",
};
diff --git a/src/ipa/ipu3/meson.build b/src/ipa/ipu3/meson.build
index 66c39843..34de6213 100644
--- a/src/ipa/ipu3/meson.build
+++ b/src/ipa/ipu3/meson.build
@@ -12,12 +12,10 @@ ipu3_ipa_sources = files([
ipu3_ipa_sources += ipu3_ipa_algorithms
-mod = shared_module(ipa_name,
- [ipu3_ipa_sources, libcamera_generated_ipa_headers],
+mod = shared_module(ipa_name, ipu3_ipa_sources,
name_prefix : '',
- include_directories : [ipa_includes, libipa_includes],
- dependencies : libcamera_private,
- link_with : libipa,
+ include_directories : [ipa_includes],
+ dependencies : [libcamera_private, libipa_dep],
install : true,
install_dir : ipa_install_dir)
diff --git a/src/ipa/ipu3/module.h b/src/ipa/ipu3/module.h
index d94fc459..60f65cc4 100644
--- a/src/ipa/ipu3/module.h
+++ b/src/ipa/ipu3/module.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas On Board
*
- * module.h - IPU3 IPA Module
+ * IPU3 IPA Module
*/
#pragma once
diff --git a/src/ipa/libipa/agc_mean_luminance.cpp b/src/ipa/libipa/agc_mean_luminance.cpp
new file mode 100644
index 00000000..02555a44
--- /dev/null
+++ b/src/ipa/libipa/agc_mean_luminance.cpp
@@ -0,0 +1,578 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Base class for mean luminance AGC algorithms
+ */
+
+#include "agc_mean_luminance.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
+
+#include "exposure_mode_helper.h"
+
+using namespace libcamera::controls;
+
+/**
+ * \file agc_mean_luminance.h
+ * \brief Base class implementing mean luminance AEGC
+ */
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(AgcMeanLuminance)
+
+namespace ipa {
+
+/*
+ * Number of frames for which to run the algorithm at full speed, before slowing
+ * down to prevent large and jarring changes in exposure from frame to frame.
+ */
+static constexpr uint32_t kNumStartupFrames = 10;
+
+/*
+ * Default relative luminance target
+ *
+ * This value should be chosen so that when the camera points at a grey target,
+ * the resulting image brightness looks "right". Custom values can be passed
+ * as the relativeLuminanceTarget value in sensor tuning files.
+ */
+static constexpr double kDefaultRelativeLuminanceTarget = 0.16;
+
+/**
+ * \struct AgcMeanLuminance::AgcConstraint
+ * \brief The boundaries and target for an AeConstraintMode constraint
+ *
+ * This structure describes an AeConstraintMode constraint for the purposes of
+ * this algorithm. These constraints are expressed as a pair of quantile
+ * boundaries for a histogram, along with a luminance target and a bounds-type.
+ * The algorithm uses the constraints by ensuring that the defined portion of a
+ * luminance histogram (I.E. lying between the two quantiles) is above or below
+ * the given luminance value.
+ */
+
+/**
+ * \enum AgcMeanLuminance::AgcConstraint::Bound
+ * \brief Specify whether the constraint defines a lower or upper bound
+ * \var AgcMeanLuminance::AgcConstraint::Lower
+ * \brief The constraint defines a lower bound
+ * \var AgcMeanLuminance::AgcConstraint::Upper
+ * \brief The constraint defines an upper bound
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::bound
+ * \brief The type of constraint bound
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::qLo
+ * \brief The lower quantile to use for the constraint
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::qHi
+ * \brief The upper quantile to use for the constraint
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::yTarget
+ * \brief The luminance target for the constraint
+ */
+
+/**
+ * \class AgcMeanLuminance
+ * \brief A mean-based auto-exposure algorithm
+ *
+ * This algorithm calculates an exposure time, analogue and digital gain such
+ * that the normalised mean luminance value of an image is driven towards a
+ * target, which itself is discovered from tuning data. The algorithm is a
+ * two-stage process.
+ *
+ * In the first stage, an initial gain value is derived by iteratively comparing
+ * the gain-adjusted mean luminance across the entire image against a target,
+ * and selecting a value which pushes it as closely as possible towards the
+ * target.
+ *
+ * In the second stage we calculate the gain required to drive the average of a
+ * section of a histogram to a target value, where the target and the boundaries
+ * of the section of the histogram used in the calculation are taken from the
+ * values defined for the currently configured AeConstraintMode within the
+ * tuning data. This class provides a helper function to parse those tuning data
+ * to discover the constraints, and so requires a specific format for those
+ * data which is described in \ref parseTuningData(). The gain from the first
+ * stage is then clamped to the gain from this stage.
+ *
+ * The final gain is used to adjust the effective exposure value of the image,
+ * and that new exposure value is divided into exposure time, analogue gain and
+ * digital gain according to the selected AeExposureMode. This class uses the
+ * \ref ExposureModeHelper class to assist in that division, and expects the
+ * data needed to initialise that class to be present in tuning data in a
+ * format described in \ref parseTuningData().
+ *
+ * In order to be able to use this algorithm an IPA module needs to be able to
+ * do the following:
+ *
+ * 1. Provide a luminance estimation across an entire image.
+ * 2. Provide a luminance Histogram for the image to use in calculating
+ * constraint compliance. The precision of the Histogram that is available
+ * will determine the supportable precision of the constraints.
+ *
+ * IPA modules that want to use this class to implement their AEGC algorithm
+ * should derive it and provide an overriding estimateLuminance() function for
+ * this class to use. They must call parseTuningData() in init(), and must also
+ * call setLimits() and resetFrameCounter() in configure(). They may then use
+ * calculateNewEv() in process(). If the limits passed to setLimits() change for
+ * any reason (for example, in response to a FrameDurationLimit control being
+ * passed in queueRequest()) then setLimits() must be called again with the new
+ * values.
+ */
+
+AgcMeanLuminance::AgcMeanLuminance()
+ : frameCount_(0), filteredExposure_(0s), relativeLuminanceTarget_(0)
+{
+}
+
+AgcMeanLuminance::~AgcMeanLuminance() = default;
+
+void AgcMeanLuminance::parseRelativeLuminanceTarget(const YamlObject &tuningData)
+{
+ relativeLuminanceTarget_ =
+ tuningData["relativeLuminanceTarget"].get<double>(kDefaultRelativeLuminanceTarget);
+}
+
+void AgcMeanLuminance::parseConstraint(const YamlObject &modeDict, int32_t id)
+{
+ for (const auto &[boundName, content] : modeDict.asDict()) {
+ if (boundName != "upper" && boundName != "lower") {
+ LOG(AgcMeanLuminance, Warning)
+ << "Ignoring unknown constraint bound '" << boundName << "'";
+ continue;
+ }
+
+ unsigned int idx = static_cast<unsigned int>(boundName == "upper");
+ AgcConstraint::Bound bound = static_cast<AgcConstraint::Bound>(idx);
+ double qLo = content["qLo"].get<double>().value_or(0.98);
+ double qHi = content["qHi"].get<double>().value_or(1.0);
+ double yTarget =
+ content["yTarget"].getList<double>().value_or(std::vector<double>{ 0.5 }).at(0);
+
+ AgcConstraint constraint = { bound, qLo, qHi, yTarget };
+
+ if (!constraintModes_.count(id))
+ constraintModes_[id] = {};
+
+ if (idx)
+ constraintModes_[id].push_back(constraint);
+ else
+ constraintModes_[id].insert(constraintModes_[id].begin(), constraint);
+ }
+}
+
+int AgcMeanLuminance::parseConstraintModes(const YamlObject &tuningData)
+{
+ std::vector<ControlValue> availableConstraintModes;
+
+ const YamlObject &yamlConstraintModes = tuningData[controls::AeConstraintMode.name()];
+ if (yamlConstraintModes.isDictionary()) {
+ for (const auto &[modeName, modeDict] : yamlConstraintModes.asDict()) {
+ if (AeConstraintModeNameValueMap.find(modeName) ==
+ AeConstraintModeNameValueMap.end()) {
+ LOG(AgcMeanLuminance, Warning)
+ << "Skipping unknown constraint mode '" << modeName << "'";
+ continue;
+ }
+
+ if (!modeDict.isDictionary()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Invalid constraint mode '" << modeName << "'";
+ return -EINVAL;
+ }
+
+ parseConstraint(modeDict,
+ AeConstraintModeNameValueMap.at(modeName));
+ availableConstraintModes.push_back(
+ AeConstraintModeNameValueMap.at(modeName));
+ }
+ }
+
+ /*
+ * If the tuning data file contains no constraints then we use the
+ * default constraint that the IPU3/RkISP1 Agc algorithms were adhering
+ * to anyway before centralisation; this constraint forces the top 2% of
+ * the histogram to be at least 0.5.
+ */
+ if (constraintModes_.empty()) {
+ AgcConstraint constraint = {
+ AgcConstraint::Bound::Lower,
+ 0.98,
+ 1.0,
+ 0.5
+ };
+
+ constraintModes_[controls::ConstraintNormal].insert(
+ constraintModes_[controls::ConstraintNormal].begin(),
+ constraint);
+ availableConstraintModes.push_back(
+ AeConstraintModeNameValueMap.at("ConstraintNormal"));
+ }
+
+ controls_[&controls::AeConstraintMode] = ControlInfo(availableConstraintModes);
+
+ return 0;
+}
+
+int AgcMeanLuminance::parseExposureModes(const YamlObject &tuningData)
+{
+ std::vector<ControlValue> availableExposureModes;
+
+ const YamlObject &yamlExposureModes = tuningData[controls::AeExposureMode.name()];
+ if (yamlExposureModes.isDictionary()) {
+ for (const auto &[modeName, modeValues] : yamlExposureModes.asDict()) {
+ if (AeExposureModeNameValueMap.find(modeName) ==
+ AeExposureModeNameValueMap.end()) {
+ LOG(AgcMeanLuminance, Warning)
+ << "Skipping unknown exposure mode '" << modeName << "'";
+ continue;
+ }
+
+ if (!modeValues.isDictionary()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Invalid exposure mode '" << modeName << "'";
+ return -EINVAL;
+ }
+
+ std::vector<uint32_t> exposureTimes =
+ modeValues["exposureTime"].getList<uint32_t>().value_or(std::vector<uint32_t>{});
+ std::vector<double> gains =
+ modeValues["gain"].getList<double>().value_or(std::vector<double>{});
+
+ if (exposureTimes.size() != gains.size()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Exposure time and gain array sizes unequal";
+ return -EINVAL;
+ }
+
+ if (exposureTimes.empty()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Exposure time and gain arrays are empty";
+ return -EINVAL;
+ }
+
+ std::vector<std::pair<utils::Duration, double>> stages;
+ for (unsigned int i = 0; i < exposureTimes.size(); i++) {
+ stages.push_back({
+ std::chrono::microseconds(exposureTimes[i]),
+ gains[i]
+ });
+ }
+
+ std::shared_ptr<ExposureModeHelper> helper =
+ std::make_shared<ExposureModeHelper>(stages);
+
+ exposureModeHelpers_[AeExposureModeNameValueMap.at(modeName)] = helper;
+ availableExposureModes.push_back(AeExposureModeNameValueMap.at(modeName));
+ }
+ }
+
+ /*
+ * If we don't have any exposure modes in the tuning data we create an
+ * ExposureModeHelper using an empty vector of stages. This will result
+ * in the ExposureModeHelper simply driving the exposure time as high as
+ * possible before touching gain.
+ */
+ if (availableExposureModes.empty()) {
+ int32_t exposureModeId = AeExposureModeNameValueMap.at("ExposureNormal");
+ std::vector<std::pair<utils::Duration, double>> stages = { };
+
+ std::shared_ptr<ExposureModeHelper> helper =
+ std::make_shared<ExposureModeHelper>(stages);
+
+ exposureModeHelpers_[exposureModeId] = helper;
+ availableExposureModes.push_back(exposureModeId);
+ }
+
+ controls_[&controls::AeExposureMode] = ControlInfo(availableExposureModes);
+
+ return 0;
+}
+
+/**
+ * \brief Parse tuning data for AeConstraintMode and AeExposureMode controls
+ * \param[in] tuningData the YamlObject representing the tuning data
+ *
+ * This function parses tuning data to build the list of allowed values for the
+ * AeConstraintMode and AeExposureMode controls. Those tuning data must provide
+ * the data in a specific format; the Agc algorithm's tuning data should contain
+ * a dictionary called AeConstraintMode containing per-mode setting dictionaries
+ * with the key being a value from \ref controls::AeConstraintModeNameValueMap.
+ * Each mode dict may contain either a "lower" or "upper" key or both, for
+ * example:
+ *
+ * \code{.unparsed}
+ * algorithms:
+ * - Agc:
+ * AeConstraintMode:
+ * ConstraintNormal:
+ * lower:
+ * qLo: 0.98
+ * qHi: 1.0
+ * yTarget: 0.5
+ * ConstraintHighlight:
+ * lower:
+ * qLo: 0.98
+ * qHi: 1.0
+ * yTarget: 0.5
+ * upper:
+ * qLo: 0.98
+ * qHi: 1.0
+ * yTarget: 0.8
+ *
+ * \endcode
+ *
+ * For the AeExposureMode control the data should contain a dictionary called
+ * AeExposureMode containing per-mode setting dictionaries with the key being a
+ * value from \ref controls::AeExposureModeNameValueMap. Each mode dict should
+ * contain an array of exposure times with the key "exposureTime" and an array
+ * of gain values with the key "gain", in this format:
+ *
+ * \code{.unparsed}
+ * algorithms:
+ * - Agc:
+ * AeExposureMode:
+ * ExposureNormal:
+ * exposureTime: [ 100, 10000, 30000, 60000, 120000 ]
+ * gain: [ 2.0, 4.0, 6.0, 8.0, 10.0 ]
+ * ExposureShort:
+ * exposureTime: [ 100, 10000, 30000, 60000, 120000 ]
+ * gain: [ 2.0, 4.0, 6.0, 8.0, 10.0 ]
+ *
+ * \endcode
+ *
+ * \return 0 on success or a negative error code
+ */
+int AgcMeanLuminance::parseTuningData(const YamlObject &tuningData)
+{
+ int ret;
+
+ parseRelativeLuminanceTarget(tuningData);
+
+ ret = parseConstraintModes(tuningData);
+ if (ret)
+ return ret;
+
+ return parseExposureModes(tuningData);
+}
+
+/**
+ * \brief Set the ExposureModeHelper limits for this class
+ * \param[in] minExposureTime Minimum exposure time to allow
+ * \param[in] maxExposureTime Maximum ewposure time to allow
+ * \param[in] minGain Minimum gain to allow
+ * \param[in] maxGain Maximum gain to allow
+ *
+ * This function calls \ref ExposureModeHelper::setLimits() for each
+ * ExposureModeHelper that has been created for this class.
+ */
+void AgcMeanLuminance::setLimits(utils::Duration minExposureTime,
+ utils::Duration maxExposureTime,
+ double minGain, double maxGain)
+{
+ for (auto &[id, helper] : exposureModeHelpers_)
+ helper->setLimits(minExposureTime, maxExposureTime, minGain, maxGain);
+}
+
+/**
+ * \fn AgcMeanLuminance::constraintModes()
+ * \brief Get the constraint modes that have been parsed from tuning data
+ */
+
+/**
+ * \fn AgcMeanLuminance::exposureModeHelpers()
+ * \brief Get the ExposureModeHelpers that have been parsed from tuning data
+ */
+
+/**
+ * \fn AgcMeanLuminance::controls()
+ * \brief Get the controls that have been generated after parsing tuning data
+ */
+
+/**
+ * \fn AgcMeanLuminance::estimateLuminance(const double gain)
+ * \brief Estimate the luminance of an image, adjusted by a given gain
+ * \param[in] gain The gain with which to adjust the luminance estimate
+ *
+ * This function estimates the average relative luminance of the frame that
+ * would be output by the sensor if an additional \a gain was applied. It is a
+ * pure virtual function because estimation of luminance is a hardware-specific
+ * operation, which depends wholly on the format of the stats that are delivered
+ * to libcamera from the ISP. Derived classes must override this function with
+ * one that calculates the normalised mean luminance value across the entire
+ * image.
+ *
+ * \return The normalised relative luminance of the image
+ */
+
+/**
+ * \brief Estimate the initial gain needed to achieve a relative luminance
+ * target
+ * \return The calculated initial gain
+ */
+double AgcMeanLuminance::estimateInitialGain() const
+{
+ double yTarget = relativeLuminanceTarget_;
+ double yGain = 1.0;
+
+ /*
+ * To account for non-linearity caused by saturation, the value needs to
+ * be estimated in an iterative process, as multiplying by a gain will
+ * not increase the relative luminance by the same factor if some image
+ * regions are saturated.
+ */
+ for (unsigned int i = 0; i < 8; i++) {
+ double yValue = estimateLuminance(yGain);
+ double extra_gain = std::min(10.0, yTarget / (yValue + .001));
+
+ yGain *= extra_gain;
+ LOG(AgcMeanLuminance, Debug) << "Y value: " << yValue
+ << ", Y target: " << yTarget
+ << ", gives gain " << yGain;
+
+ if (utils::abs_diff(extra_gain, 1.0) < 0.01)
+ break;
+ }
+
+ return yGain;
+}
+
+/**
+ * \brief Clamp gain within the bounds of a defined constraint
+ * \param[in] constraintModeIndex The index of the constraint to adhere to
+ * \param[in] hist A histogram over which to calculate inter-quantile means
+ * \param[in] gain The gain to clamp
+ *
+ * \return The gain clamped within the constraint bounds
+ */
+double AgcMeanLuminance::constraintClampGain(uint32_t constraintModeIndex,
+ const Histogram &hist,
+ double gain)
+{
+ std::vector<AgcConstraint> &constraints = constraintModes_[constraintModeIndex];
+ for (const AgcConstraint &constraint : constraints) {
+ double newGain = constraint.yTarget * hist.bins() /
+ hist.interQuantileMean(constraint.qLo, constraint.qHi);
+
+ if (constraint.bound == AgcConstraint::Bound::Lower &&
+ newGain > gain)
+ gain = newGain;
+
+ if (constraint.bound == AgcConstraint::Bound::Upper &&
+ newGain < gain)
+ gain = newGain;
+ }
+
+ return gain;
+}
+
+/**
+ * \brief Apply a filter on the exposure value to limit the speed of changes
+ * \param[in] exposureValue The target exposure from the AGC algorithm
+ *
+ * The speed of the filter is adaptive, and will produce the target quicker
+ * during startup, or when the target exposure is within 20% of the most recent
+ * filter output.
+ *
+ * \return The filtered exposure
+ */
+utils::Duration AgcMeanLuminance::filterExposure(utils::Duration exposureValue)
+{
+ double speed = 0.2;
+
+ /* Adapt instantly if we are in startup phase. */
+ if (frameCount_ < kNumStartupFrames)
+ speed = 1.0;
+
+ /*
+ * If we are close to the desired result, go faster to avoid making
+ * multiple micro-adjustments.
+ * \todo Make this customisable?
+ */
+ if (filteredExposure_ < 1.2 * exposureValue &&
+ filteredExposure_ > 0.8 * exposureValue)
+ speed = sqrt(speed);
+
+ filteredExposure_ = speed * exposureValue +
+ filteredExposure_ * (1.0 - speed);
+
+ return filteredExposure_;
+}
+
+/**
+ * \brief Calculate the new exposure value and splut it between exposure time
+ * and gain
+ * \param[in] constraintModeIndex The index of the current constraint mode
+ * \param[in] exposureModeIndex The index of the current exposure mode
+ * \param[in] yHist A Histogram from the ISP statistics to use in constraining
+ * the calculated gain
+ * \param[in] effectiveExposureValue The EV applied to the frame from which the
+ * statistics in use derive
+ *
+ * Calculate a new exposure value to try to obtain the target. The calculated
+ * exposure value is filtered to prevent rapid changes from frame to frame, and
+ * divided into exposure time, analogue and digital gain.
+ *
+ * \return Tuple of exposure time, analogue gain, and digital gain
+ */
+std::tuple<utils::Duration, double, double>
+AgcMeanLuminance::calculateNewEv(uint32_t constraintModeIndex,
+ uint32_t exposureModeIndex,
+ const Histogram &yHist,
+ utils::Duration effectiveExposureValue)
+{
+ /*
+ * The pipeline handler should validate that we have received an allowed
+ * value for AeExposureMode.
+ */
+ std::shared_ptr<ExposureModeHelper> exposureModeHelper =
+ exposureModeHelpers_.at(exposureModeIndex);
+
+ double gain = estimateInitialGain();
+ gain = constraintClampGain(constraintModeIndex, yHist, gain);
+
+ /*
+ * We don't check whether we're already close to the target, because
+ * even if the effective exposure value is the same as the last frame's
+ * we could have switched to an exposure mode that would require a new
+ * pass through the splitExposure() function.
+ */
+
+ utils::Duration newExposureValue = effectiveExposureValue * gain;
+
+ /*
+ * We filter the exposure value to make sure changes are not too jarring
+ * from frame to frame.
+ */
+ newExposureValue = filterExposure(newExposureValue);
+
+ frameCount_++;
+ return exposureModeHelper->splitExposure(newExposureValue);
+}
+
+/**
+ * \fn AgcMeanLuminance::resetFrameCount()
+ * \brief Reset the frame counter
+ *
+ * This function resets the internal frame counter, which exists to help the
+ * algorithm decide whether it should respond instantly or not. The expectation
+ * is for derived classes to call this function before each camera start call in
+ * their configure() function.
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/agc_mean_luminance.h b/src/ipa/libipa/agc_mean_luminance.h
new file mode 100644
index 00000000..c41391cb
--- /dev/null
+++ b/src/ipa/libipa/agc_mean_luminance.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ agc_mean_luminance.h - Base class for mean luminance AGC algorithms
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include <libcamera/base/utils.h>
+
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "exposure_mode_helper.h"
+#include "histogram.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+class AgcMeanLuminance
+{
+public:
+ AgcMeanLuminance();
+ virtual ~AgcMeanLuminance();
+
+ struct AgcConstraint {
+ enum class Bound {
+ Lower = 0,
+ Upper = 1
+ };
+ Bound bound;
+ double qLo;
+ double qHi;
+ double yTarget;
+ };
+
+ int parseTuningData(const YamlObject &tuningData);
+
+ void setLimits(utils::Duration minExposureTime, utils::Duration maxExposureTime,
+ double minGain, double maxGain);
+
+ std::map<int32_t, std::vector<AgcConstraint>> constraintModes()
+ {
+ return constraintModes_;
+ }
+
+ std::map<int32_t, std::shared_ptr<ExposureModeHelper>> exposureModeHelpers()
+ {
+ return exposureModeHelpers_;
+ }
+
+ ControlInfoMap::Map controls()
+ {
+ return controls_;
+ }
+
+ std::tuple<utils::Duration, double, double>
+ calculateNewEv(uint32_t constraintModeIndex, uint32_t exposureModeIndex,
+ const Histogram &yHist, utils::Duration effectiveExposureValue);
+
+ void resetFrameCount()
+ {
+ frameCount_ = 0;
+ }
+
+private:
+ virtual double estimateLuminance(const double gain) const = 0;
+
+ void parseRelativeLuminanceTarget(const YamlObject &tuningData);
+ void parseConstraint(const YamlObject &modeDict, int32_t id);
+ int parseConstraintModes(const YamlObject &tuningData);
+ int parseExposureModes(const YamlObject &tuningData);
+ double estimateInitialGain() const;
+ double constraintClampGain(uint32_t constraintModeIndex,
+ const Histogram &hist,
+ double gain);
+ utils::Duration filterExposure(utils::Duration exposureValue);
+
+ uint64_t frameCount_;
+ utils::Duration filteredExposure_;
+ double relativeLuminanceTarget_;
+
+ std::map<int32_t, std::vector<AgcConstraint>> constraintModes_;
+ std::map<int32_t, std::shared_ptr<ExposureModeHelper>> exposureModeHelpers_;
+ ControlInfoMap::Map controls_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/algorithm.cpp b/src/ipa/libipa/algorithm.cpp
index bc1c29a6..201efdfd 100644
--- a/src/ipa/libipa/algorithm.cpp
+++ b/src/ipa/libipa/algorithm.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * algorithm.cpp - IPA control algorithm interface
+ * IPA control algorithm interface
*/
#include "algorithm.h"
diff --git a/src/ipa/libipa/algorithm.h b/src/ipa/libipa/algorithm.h
index 987e3e4c..9a19dbd6 100644
--- a/src/ipa/libipa/algorithm.h
+++ b/src/ipa/libipa/algorithm.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * algorithm.h - ISP control algorithm interface
+ * ISP control algorithm interface
*/
#pragma once
diff --git a/src/ipa/libipa/awb.cpp b/src/ipa/libipa/awb.cpp
new file mode 100644
index 00000000..925fac23
--- /dev/null
+++ b/src/ipa/libipa/awb.cpp
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Generic AWB algorithms
+ */
+
+#include "awb.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+/**
+ * \file awb.h
+ * \brief Base classes for AWB algorithms
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Awb)
+
+namespace ipa {
+
+/**
+ * \class AwbResult
+ * \brief The result of an AWB calculation
+ *
+ * This class holds the result of an auto white balance calculation.
+ */
+
+/**
+ * \var AwbResult::gains
+ * \brief The calculated white balance gains
+ */
+
+/**
+ * \var AwbResult::colourTemperature
+ * \brief The calculated colour temperature in Kelvin
+ */
+
+/**
+ * \class AwbStats
+ * \brief An abstraction class wrapping hardware-specific AWB statistics
+ *
+ * IPA modules using an AWB algorithm based on the AwbAlgorithm class need to
+ * implement this class to give the algorithm access to the hardware-specific
+ * statistics data.
+ */
+
+/**
+ * \fn AwbStats::computeColourError()
+ * \brief Compute an error value for when the given gains would be applied
+ * \param[in] gains The gains to apply
+ *
+ * Compute an error value (non-greyness) assuming the given \a gains would be
+ * applied. To keep the actual implementations computationally inexpensive,
+ * the squared colour error shall be returned.
+ *
+ * If the AWB statistics provide multiple zones, the average of the individual
+ * squared errors shall be returned. Averaging/normalizing is necessary so that
+ * the numeric dimensions are the same on all hardware platforms.
+ *
+ * \return The computed error value
+ */
+
+/**
+ * \fn AwbStats::rgbMeans()
+ * \brief Get RGB means of the statistics
+ *
+ * Fetch the RGB means from the statistics. The values of each channel are
+ * dimensionless and only the ratios are used for further calculations. This is
+ * used by the simple grey world model to calculate the gains to apply.
+ *
+ * \return The RGB means
+ */
+
+/**
+ * \class AwbAlgorithm
+ * \brief A base class for auto white balance algorithms
+ *
+ * This class is a base class for auto white balance algorithms. It defines an
+ * interface for the algorithms to implement, and is used by the IPAs to
+ * interact with the concrete implementation.
+ */
+
+/**
+ * \fn AwbAlgorithm::init()
+ * \brief Initialize the algorithm with the given tuning data
+ * \param[in] tuningData The tuning data to use for the algorithm
+ *
+ * \return 0 on success, a negative error code otherwise
+ */
+
+/**
+ * \fn AwbAlgorithm::calculateAwb()
+ * \brief Calculate AWB data from the given statistics
+ * \param[in] stats The statistics to use for the calculation
+ * \param[in] lux The lux value of the scene
+ *
+ * Calculate an AwbResult object from the given statistics and lux value. A \a
+ * lux value of 0 means it is unknown or invalid and the algorithm shall ignore
+ * it.
+ *
+ * \return The AWB result
+ */
+
+/**
+ * \fn AwbAlgorithm::gainsFromColourTemperature()
+ * \brief Compute white balance gains from a colour temperature
+ * \param[in] colourTemperature The colour temperature in Kelvin
+ *
+ * Compute the white balance gains from a \a colourTemperature. This function
+ * does not take any statistics into account. It is used to compute the colour
+ * gains when the user manually specifies a colour temperature.
+ *
+ * \return The colour gains
+ */
+
+/**
+ * \fn AwbAlgorithm::controls()
+ * \brief Get the controls info map for this algorithm
+ *
+ * \return The controls info map
+ */
+
+/**
+ * \fn AwbAlgorithm::handleControls()
+ * \param[in] controls The controls to handle
+ * \brief Handle the controls supplied in a request
+ */
+
+/**
+ * \brief Parse the mode configurations from the tuning data
+ * \param[in] tuningData the YamlObject representing the tuning data
+ * \param[in] def The default value for the AwbMode control
+ *
+ * Utility function to parse the tuning data for an AwbMode entry and read all
+ * provided modes. It adds controls::AwbMode to AwbAlgorithm::controls_ and
+ * populates AwbAlgorithm::modes_. For a list of possible modes see \ref
+ * controls::AwbModeEnum.
+ *
+ * Each mode entry must contain a "lo" and "hi" key to specify the lower and
+ * upper colour temperature of that mode. For example:
+ *
+ * \code{.unparsed}
+ * algorithms:
+ * - Awb:
+ * AwbMode:
+ * AwbAuto:
+ * lo: 2500
+ * hi: 8000
+ * AwbIncandescent:
+ * lo: 2500
+ * hi: 3000
+ * ...
+ * \endcode
+ *
+ * If \a def is supplied but not contained in the the \a tuningData, -EINVAL is
+ * returned.
+ *
+ * \sa controls::AwbModeEnum
+ * \return Zero on success, negative error code otherwise
+ */
+int AwbAlgorithm::parseModeConfigs(const YamlObject &tuningData,
+ const ControlValue &def)
+{
+ std::vector<ControlValue> availableModes;
+
+ const YamlObject &yamlModes = tuningData[controls::AwbMode.name()];
+ if (!yamlModes.isDictionary()) {
+ LOG(Awb, Error)
+ << "AwbModes must be a dictionary.";
+ return -EINVAL;
+ }
+
+ for (const auto &[modeName, modeDict] : yamlModes.asDict()) {
+ if (controls::AwbModeNameValueMap.find(modeName) ==
+ controls::AwbModeNameValueMap.end()) {
+ LOG(Awb, Warning)
+ << "Skipping unknown AWB mode '"
+ << modeName << "'";
+ continue;
+ }
+
+ if (!modeDict.isDictionary()) {
+ LOG(Awb, Error)
+ << "Invalid AWB mode '" << modeName << "'";
+ return -EINVAL;
+ }
+
+ const auto &modeValue = static_cast<controls::AwbModeEnum>(
+ controls::AwbModeNameValueMap.at(modeName));
+
+ ModeConfig &config = modes_[modeValue];
+
+ auto hi = modeDict["hi"].get<double>();
+ if (!hi) {
+ LOG(Awb, Error) << "Failed to read hi param of mode "
+ << modeName;
+ return -EINVAL;
+ }
+ config.ctHi = *hi;
+
+ auto lo = modeDict["lo"].get<double>();
+ if (!lo) {
+ LOG(Awb, Error) << "Failed to read low param of mode "
+ << modeName;
+ return -EINVAL;
+ }
+ config.ctLo = *lo;
+
+ availableModes.push_back(modeValue);
+ }
+
+ if (modes_.empty()) {
+ LOG(Awb, Error) << "No AWB modes configured";
+ return -EINVAL;
+ }
+
+ if (!def.isNone() &&
+ modes_.find(def.get<controls::AwbModeEnum>()) == modes_.end()) {
+ const auto &names = controls::AwbMode.enumerators();
+ LOG(Awb, Error) << names.at(def.get<controls::AwbModeEnum>())
+ << " mode is missing in the configuration.";
+ return -EINVAL;
+ }
+
+ controls_[&controls::AwbMode] = ControlInfo(availableModes, def);
+
+ return 0;
+}
+
+/**
+ * \class AwbAlgorithm::ModeConfig
+ * \brief Holds the configuration of a single AWB mode
+ *
+ * AWB modes limit the regulation of the AWB algorithm to a specific range of
+ * colour temperatures.
+ */
+
+/**
+ * \var AwbAlgorithm::ModeConfig::ctLo
+ * \brief The lowest valid colour temperature of that mode
+ */
+
+/**
+ * \var AwbAlgorithm::ModeConfig::ctHi
+ * \brief The highest valid colour temperature of that mode
+ */
+
+/**
+ * \var AwbAlgorithm::controls_
+ * \brief Controls info map for the controls provided by the algorithm
+ */
+
+/**
+ * \var AwbAlgorithm::modes_
+ * \brief Map of all configured modes
+ * \sa AwbAlgorithm::parseModeConfigs
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/awb.h b/src/ipa/libipa/awb.h
new file mode 100644
index 00000000..4bab7a45
--- /dev/null
+++ b/src/ipa/libipa/awb.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Generic AWB algorithms
+ */
+
+#pragma once
+
+#include <map>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/vector.h"
+#include "libcamera/internal/yaml_parser.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+struct AwbResult {
+ RGB<double> gains;
+ double colourTemperature;
+};
+
+struct AwbStats {
+ virtual double computeColourError(const RGB<double> &gains) const = 0;
+ virtual RGB<double> rgbMeans() const = 0;
+
+protected:
+ ~AwbStats() = default;
+};
+
+class AwbAlgorithm
+{
+public:
+ virtual ~AwbAlgorithm() = default;
+
+ virtual int init(const YamlObject &tuningData) = 0;
+ virtual AwbResult calculateAwb(const AwbStats &stats, unsigned int lux) = 0;
+ virtual RGB<double> gainsFromColourTemperature(double colourTemperature) = 0;
+
+ const ControlInfoMap::Map &controls() const
+ {
+ return controls_;
+ }
+
+ virtual void handleControls([[maybe_unused]] const ControlList &controls) {}
+
+protected:
+ int parseModeConfigs(const YamlObject &tuningData,
+ const ControlValue &def = {});
+
+ struct ModeConfig {
+ double ctHi;
+ double ctLo;
+ };
+
+ ControlInfoMap::Map controls_;
+ std::map<controls::AwbModeEnum, AwbAlgorithm::ModeConfig> modes_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/awb_bayes.cpp b/src/ipa/libipa/awb_bayes.cpp
new file mode 100644
index 00000000..d1d0eaf0
--- /dev/null
+++ b/src/ipa/libipa/awb_bayes.cpp
@@ -0,0 +1,505 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Implementation of a bayesian AWB algorithm
+ */
+
+#include "awb_bayes.h"
+
+#include <algorithm>
+#include <cmath>
+#include <limits>
+#include <map>
+#include <ostream>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
+
+#include "colours.h"
+
+/**
+ * \file awb_bayes.h
+ * \brief Implementation of bayesian auto white balance algorithm
+ *
+ * This implementation is based on the initial implementation done by
+ * RaspberryPi.
+ *
+ * \todo Documentation
+ *
+ * \todo Not all the features implemented by RaspberryPi were ported over to
+ * this algorithm because they either rely on hardware features not generally
+ * available or were considered not important enough at the moment.
+ *
+ * The following parts are not implemented:
+ *
+ * - min_pixels: minimum proportion of pixels counted within AWB region for it
+ * to be "useful"
+ * - min_g: minimum G value of those pixels, to be regarded a "useful"
+ * - min_regions: number of AWB regions that must be "useful" in order to do the
+ * AWB calculation
+ * - deltaLimit: clamp on colour error term (so as not to penalize non-grey
+ * excessively)
+ * - bias_proportion: The biasProportion parameter adds a small proportion of
+ * the counted pixels to a region biased to the biasCT colour temperature.
+ * A typical value for biasProportion would be between 0.05 to 0.1.
+ * - bias_ct: CT target for the search bias
+ * - sensitivityR: red sensitivity ratio (set to canonical sensor's R/G divided
+ * by this sensor's R/G)
+ * - sensitivityB: blue sensitivity ratio (set to canonical sensor's B/G divided
+ * by this sensor's B/G)
+ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Awb)
+
+namespace {
+
+template<typename T>
+class LimitsRecorder
+{
+public:
+ LimitsRecorder()
+ : min_(std::numeric_limits<T>::max()),
+ max_(std::numeric_limits<T>::min())
+ {
+ }
+
+ void record(const T &value)
+ {
+ min_ = std::min(min_, value);
+ max_ = std::max(max_, value);
+ }
+
+ const T &min() const { return min_; }
+ const T &max() const { return max_; }
+
+private:
+ T min_;
+ T max_;
+};
+
+#ifndef __DOXYGEN__
+template<typename T>
+std::ostream &operator<<(std::ostream &out, const LimitsRecorder<T> &v)
+{
+ out << "[ " << v.min() << ", " << v.max() << " ]";
+ return out;
+}
+#endif
+
+} /* namespace */
+
+namespace ipa {
+
+/**
+ * \brief Step size control for CT search
+ */
+constexpr double kSearchStep = 0.2;
+
+/**
+ * \copydoc libcamera::ipa::Interpolator::interpolate()
+ */
+template<>
+void Interpolator<Pwl>::interpolate(const Pwl &a, const Pwl &b, Pwl &dest, double lambda)
+{
+ dest = Pwl::combine(a, b,
+ [=](double /*x*/, double y0, double y1) -> double {
+ return y0 * (1.0 - lambda) + y1 * lambda;
+ });
+}
+
+/**
+ * \class AwbBayes
+ * \brief Implementation of a bayesian auto white balance algorithm
+ *
+ * In a bayesian AWB algorithm the auto white balance estimation is improved by
+ * taking the likelihood of a given lightsource based on the estimated lux level
+ * into account. E.g. If it is very bright we can assume that we are outside and
+ * that colour temperatures around 6500 are preferred.
+ *
+ * The second part of this algorithm is the search for the most likely colour
+ * temperature. It is implemented in AwbBayes::coarseSearch() and in
+ * AwbBayes::fineSearch(). The search works very well without prior likelihoods
+ * and therefore the algorithm itself provides very good results even without
+ * prior likelihoods.
+ */
+
+/**
+ * \var AwbBayes::transversePos_
+ * \brief How far to wander off CT curve towards "more purple"
+ */
+
+/**
+ * \var AwbBayes::transverseNeg_
+ * \brief How far to wander off CT curve towards "more green"
+ */
+
+/**
+ * \var AwbBayes::currentMode_
+ * \brief The currently selected mode
+ */
+
+int AwbBayes::init(const YamlObject &tuningData)
+{
+ int ret = colourGainCurve_.readYaml(tuningData["colourGains"], "ct", "gains");
+ if (ret) {
+ LOG(Awb, Error)
+ << "Failed to parse 'colourGains' "
+ << "parameter from tuning file";
+ return ret;
+ }
+
+ ctR_.clear();
+ ctB_.clear();
+ for (const auto &[ct, g] : colourGainCurve_.data()) {
+ ctR_.append(ct, 1.0 / g[0]);
+ ctB_.append(ct, 1.0 / g[1]);
+ }
+
+ /* We will want the inverse functions of these too. */
+ ctRInverse_ = ctR_.inverse().first;
+ ctBInverse_ = ctB_.inverse().first;
+
+ ret = readPriors(tuningData);
+ if (ret) {
+ LOG(Awb, Error) << "Failed to read priors";
+ return ret;
+ }
+
+ ret = parseModeConfigs(tuningData, controls::AwbAuto);
+ if (ret) {
+ LOG(Awb, Error)
+ << "Failed to parse mode parameter from tuning file";
+ return ret;
+ }
+ currentMode_ = &modes_[controls::AwbAuto];
+
+ transversePos_ = tuningData["transversePos"].get<double>(0.01);
+ transverseNeg_ = tuningData["transverseNeg"].get<double>(0.01);
+ if (transversePos_ <= 0 || transverseNeg_ <= 0) {
+ LOG(Awb, Error) << "AwbConfig: transversePos/Neg must be > 0";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int AwbBayes::readPriors(const YamlObject &tuningData)
+{
+ const auto &priorsList = tuningData["priors"];
+ std::map<uint32_t, Pwl> priors;
+
+ if (!priorsList) {
+ LOG(Awb, Error) << "No priors specified";
+ return -EINVAL;
+ }
+
+ for (const auto &p : priorsList.asList()) {
+ if (!p.contains("lux")) {
+ LOG(Awb, Error) << "Missing lux value";
+ return -EINVAL;
+ }
+
+ uint32_t lux = p["lux"].get<uint32_t>(0);
+ if (priors.count(lux)) {
+ LOG(Awb, Error) << "Duplicate prior for lux value " << lux;
+ return -EINVAL;
+ }
+
+ std::vector<uint32_t> temperatures =
+ p["ct"].getList<uint32_t>().value_or(std::vector<uint32_t>{});
+ std::vector<double> probabilities =
+ p["probability"].getList<double>().value_or(std::vector<double>{});
+
+ if (temperatures.size() != probabilities.size()) {
+ LOG(Awb, Error)
+ << "Ct and probability array sizes are unequal";
+ return -EINVAL;
+ }
+
+ if (temperatures.empty()) {
+ LOG(Awb, Error)
+ << "Ct and probability arrays are empty";
+ return -EINVAL;
+ }
+
+ std::map<int, double> ctToProbability;
+ for (unsigned int i = 0; i < temperatures.size(); i++) {
+ int t = temperatures[i];
+ if (ctToProbability.count(t)) {
+ LOG(Awb, Error) << "Duplicate ct value";
+ return -EINVAL;
+ }
+
+ ctToProbability[t] = probabilities[i];
+ }
+
+ auto &pwl = priors[lux];
+ for (const auto &[ct, prob] : ctToProbability) {
+ if (prob < 1e-6) {
+ LOG(Awb, Error) << "Prior probability must be larger than 1e-6";
+ return -EINVAL;
+ }
+ pwl.append(ct, prob);
+ }
+ }
+
+ if (priors.empty()) {
+ LOG(Awb, Error) << "No priors specified";
+ return -EINVAL;
+ }
+
+ priors_.setData(std::move(priors));
+
+ return 0;
+}
+
+void AwbBayes::handleControls(const ControlList &controls)
+{
+ auto mode = controls.get(controls::AwbMode);
+ if (mode) {
+ auto it = modes_.find(static_cast<controls::AwbModeEnum>(*mode));
+ if (it != modes_.end())
+ currentMode_ = &it->second;
+ else
+ LOG(Awb, Error) << "Unsupported AWB mode " << *mode;
+ }
+}
+
+RGB<double> AwbBayes::gainsFromColourTemperature(double colourTemperature)
+{
+ /*
+ * \todo In the RaspberryPi code, the ct curve was interpolated in
+ * the white point space (1/x) not in gains space. This feels counter
+ * intuitive, as the gains are in linear space. But I can't prove it.
+ */
+ const auto &gains = colourGainCurve_.getInterpolated(colourTemperature);
+ return { { gains[0], 1.0, gains[1] } };
+}
+
+AwbResult AwbBayes::calculateAwb(const AwbStats &stats, unsigned int lux)
+{
+ ipa::Pwl prior;
+ if (lux > 0) {
+ prior = priors_.getInterpolated(lux);
+ prior.map([](double x, double y) {
+ LOG(Awb, Debug) << "(" << x << "," << y << ")";
+ });
+ } else {
+ prior.append(0, 1.0);
+ }
+
+ double t = coarseSearch(prior, stats);
+ double r = ctR_.eval(t);
+ double b = ctB_.eval(t);
+ LOG(Awb, Debug)
+ << "After coarse search: r " << r << " b " << b << " (gains r "
+ << 1 / r << " b " << 1 / b << ")";
+
+ /*
+ * Original comment from RaspberryPi:
+ * Not entirely sure how to handle the fine search yet. Mostly the
+ * estimated CT is already good enough, but the fine search allows us to
+ * wander transversely off the CT curve. Under some illuminants, where
+ * there may be more or less green light, this may prove beneficial,
+ * though I probably need more real datasets before deciding exactly how
+ * this should be controlled and tuned.
+ */
+ fineSearch(t, r, b, prior, stats);
+ LOG(Awb, Debug)
+ << "After fine search: r " << r << " b " << b << " (gains r "
+ << 1 / r << " b " << 1 / b << ")";
+
+ return { { { 1.0 / r, 1.0, 1.0 / b } }, t };
+}
+
+double AwbBayes::coarseSearch(const ipa::Pwl &prior, const AwbStats &stats) const
+{
+ std::vector<Pwl::Point> points;
+ size_t bestPoint = 0;
+ double t = currentMode_->ctLo;
+ int spanR = -1;
+ int spanB = -1;
+ LimitsRecorder<double> errorLimits;
+ LimitsRecorder<double> priorLogLikelihoodLimits;
+
+ /* Step down the CT curve evaluating log likelihood. */
+ while (true) {
+ double r = ctR_.eval(t, &spanR);
+ double b = ctB_.eval(t, &spanB);
+ RGB<double> gains({ 1 / r, 1.0, 1 / b });
+ double delta2Sum = stats.computeColourError(gains);
+ double priorLogLikelihood = log(prior.eval(prior.domain().clamp(t)));
+ double finalLogLikelihood = delta2Sum - priorLogLikelihood;
+
+ errorLimits.record(delta2Sum);
+ priorLogLikelihoodLimits.record(priorLogLikelihood);
+
+ points.push_back({ { t, finalLogLikelihood } });
+ if (points.back().y() < points[bestPoint].y())
+ bestPoint = points.size() - 1;
+
+ if (t == currentMode_->ctHi)
+ break;
+
+ /*
+ * Ensure even steps along the r/b curve by scaling them by the
+ * current t.
+ */
+ t = std::min(t + t / 10 * kSearchStep, currentMode_->ctHi);
+ }
+
+ t = points[bestPoint].x();
+ LOG(Awb, Debug) << "Coarse search found CT " << t
+ << " error limits:" << errorLimits
+ << " prior log likelihood limits:" << priorLogLikelihoodLimits;
+
+ /*
+ * We have the best point of the search, but refine it with a quadratic
+ * interpolation around its neighbors.
+ */
+ if (points.size() > 2) {
+ bestPoint = std::clamp(bestPoint, std::size_t{ 1 }, points.size() - 2);
+ t = interpolateQuadratic(points[bestPoint - 1],
+ points[bestPoint],
+ points[bestPoint + 1]);
+ LOG(Awb, Debug)
+ << "After quadratic refinement, coarse search has CT "
+ << t;
+ }
+
+ return t;
+}
+
+void AwbBayes::fineSearch(double &t, double &r, double &b, ipa::Pwl const &prior, const AwbStats &stats) const
+{
+ int spanR = -1;
+ int spanB = -1;
+ double step = t / 10 * kSearchStep * 0.1;
+ int nsteps = 5;
+ ctR_.eval(t, &spanR);
+ ctB_.eval(t, &spanB);
+ double rDiff = ctR_.eval(t + nsteps * step, &spanR) -
+ ctR_.eval(t - nsteps * step, &spanR);
+ double bDiff = ctB_.eval(t + nsteps * step, &spanB) -
+ ctB_.eval(t - nsteps * step, &spanB);
+ Pwl::Point transverse({ bDiff, -rDiff });
+ if (transverse.length2() < 1e-6)
+ return;
+ /*
+ * transverse is a unit vector orthogonal to the b vs. r function
+ * (pointing outwards with r and b increasing)
+ */
+ transverse = transverse / transverse.length();
+ double bestLogLikelihood = 0;
+ double bestT = 0;
+ Pwl::Point bestRB(0);
+ double transverseRange = transverseNeg_ + transversePos_;
+ const int maxNumDeltas = 12;
+ LimitsRecorder<double> errorLimits;
+ LimitsRecorder<double> priorLogLikelihoodLimits;
+
+
+ /* a transverse step approximately every 0.01 r/b units */
+ int numDeltas = floor(transverseRange * 100 + 0.5) + 1;
+ numDeltas = std::clamp(numDeltas, 3, maxNumDeltas);
+
+ /*
+ * Step down CT curve. March a bit further if the transverse range is
+ * large.
+ */
+ nsteps += numDeltas;
+ for (int i = -nsteps; i <= nsteps; i++) {
+ double tTest = t + i * step;
+ double priorLogLikelihood =
+ log(prior.eval(prior.domain().clamp(tTest)));
+ priorLogLikelihoodLimits.record(priorLogLikelihood);
+ Pwl::Point rbStart{ { ctR_.eval(tTest, &spanR),
+ ctB_.eval(tTest, &spanB) } };
+ Pwl::Point samples[maxNumDeltas];
+ int bestPoint = 0;
+
+ /*
+ * Sample numDeltas points transversely *off* the CT curve
+ * in the range [-transverseNeg, transversePos].
+ * The x values of a sample contains the distance and the y
+ * value contains the corresponding log likelihood.
+ */
+ double transverseStep = transverseRange / (numDeltas - 1);
+ for (int j = 0; j < numDeltas; j++) {
+ auto &p = samples[j];
+ p.x() = -transverseNeg_ + transverseStep * j;
+ Pwl::Point rbTest = rbStart + transverse * p.x();
+ RGB<double> gains({ 1 / rbTest[0], 1.0, 1 / rbTest[1] });
+ double delta2Sum = stats.computeColourError(gains);
+ errorLimits.record(delta2Sum);
+ p.y() = delta2Sum - priorLogLikelihood;
+
+ if (p.y() < samples[bestPoint].y())
+ bestPoint = j;
+ }
+
+ /*
+ * We have all samples transversely across the CT curve,
+ * now let's do a quadratic interpolation for the best result.
+ */
+ bestPoint = std::clamp(bestPoint, 1, numDeltas - 2);
+ double bestOffset = interpolateQuadratic(samples[bestPoint - 1],
+ samples[bestPoint],
+ samples[bestPoint + 1]);
+ Pwl::Point rbTest = rbStart + transverse * bestOffset;
+ RGB<double> gains({ 1 / rbTest[0], 1.0, 1 / rbTest[1] });
+ double delta2Sum = stats.computeColourError(gains);
+ errorLimits.record(delta2Sum);
+ double finalLogLikelihood = delta2Sum - priorLogLikelihood;
+
+ if (bestT == 0 || finalLogLikelihood < bestLogLikelihood) {
+ bestLogLikelihood = finalLogLikelihood;
+ bestT = tTest;
+ bestRB = rbTest;
+ }
+ }
+
+ t = bestT;
+ r = bestRB[0];
+ b = bestRB[1];
+ LOG(Awb, Debug)
+ << "Fine search found t " << t << " r " << r << " b " << b
+ << " error limits: " << errorLimits
+ << " prior log likelihood limits: " << priorLogLikelihoodLimits;
+}
+
+/**
+ * \brief Find extremum of function
+ * \param[in] a First point
+ * \param[in] b Second point
+ * \param[in] c Third point
+ *
+ * Given 3 points on a curve, find the extremum of the function in that interval
+ * by fitting a quadratic.
+ *
+ * \return The x value of the extremum clamped to the interval [a.x(), c.x()]
+ */
+double AwbBayes::interpolateQuadratic(Pwl::Point const &a, Pwl::Point const &b,
+ Pwl::Point const &c) const
+{
+ const double eps = 1e-3;
+ Pwl::Point ca = c - a;
+ Pwl::Point ba = b - a;
+ double denominator = 2 * (ba.y() * ca.x() - ca.y() * ba.x());
+ if (std::abs(denominator) > eps) {
+ double numerator = ba.y() * ca.x() * ca.x() - ca.y() * ba.x() * ba.x();
+ double result = numerator / denominator + a.x();
+ return std::max(a.x(), std::min(c.x(), result));
+ }
+ /* has degenerated to straight line segment */
+ return a.y() < c.y() - eps ? a.x() : (c.y() < a.y() - eps ? c.x() : b.x());
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/awb_bayes.h b/src/ipa/libipa/awb_bayes.h
new file mode 100644
index 00000000..bb933038
--- /dev/null
+++ b/src/ipa/libipa/awb_bayes.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Base class for bayes AWB algorithms
+ */
+
+#pragma once
+
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/vector.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "awb.h"
+#include "interpolator.h"
+#include "pwl.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+class AwbBayes : public AwbAlgorithm
+{
+public:
+ AwbBayes() = default;
+
+ int init(const YamlObject &tuningData) override;
+ AwbResult calculateAwb(const AwbStats &stats, unsigned int lux) override;
+ RGB<double> gainsFromColourTemperature(double temperatureK) override;
+ void handleControls(const ControlList &controls) override;
+
+private:
+ int readPriors(const YamlObject &tuningData);
+
+ void fineSearch(double &t, double &r, double &b, ipa::Pwl const &prior,
+ const AwbStats &stats) const;
+ double coarseSearch(const ipa::Pwl &prior, const AwbStats &stats) const;
+ double interpolateQuadratic(ipa::Pwl::Point const &a,
+ ipa::Pwl::Point const &b,
+ ipa::Pwl::Point const &c) const;
+
+ Interpolator<Pwl> priors_;
+ Interpolator<Vector<double, 2>> colourGainCurve_;
+
+ ipa::Pwl ctR_;
+ ipa::Pwl ctB_;
+ ipa::Pwl ctRInverse_;
+ ipa::Pwl ctBInverse_;
+
+ double transversePos_;
+ double transverseNeg_;
+
+ ModeConfig *currentMode_ = nullptr;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/awb_grey.cpp b/src/ipa/libipa/awb_grey.cpp
new file mode 100644
index 00000000..d3d2132b
--- /dev/null
+++ b/src/ipa/libipa/awb_grey.cpp
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Implementation of grey world AWB algorithm
+ */
+
+#include "awb_grey.h"
+
+#include <algorithm>
+
+#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
+
+#include "colours.h"
+
+using namespace libcamera::controls;
+
+/**
+ * \file awb_grey.h
+ * \brief Implementation of a grey world AWB algorithm
+ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Awb)
+namespace ipa {
+
+/**
+ * \class AwbGrey
+ * \brief A Grey world auto white balance algorithm
+ */
+
+/**
+ * \brief Initialize the algorithm with the given tuning data
+ * \param[in] tuningData The tuning data for the algorithm
+ *
+ * Load the colour temperature curve from the tuning data. If there is no tuning
+ * data available, continue with a warning. Manual colour temperature will not
+ * work in that case.
+ *
+ * \return 0 on success, a negative error code otherwise
+ */
+int AwbGrey::init(const YamlObject &tuningData)
+{
+ Interpolator<Vector<double, 2>> gains;
+ int ret = gains.readYaml(tuningData["colourGains"], "ct", "gains");
+ if (ret < 0)
+ LOG(Awb, Warning)
+ << "Failed to parse 'colourGains' "
+ << "parameter from tuning file; "
+ << "manual colour temperature will not work properly";
+ else
+ colourGainCurve_ = gains;
+
+ return 0;
+}
+
+/**
+ * \brief Calculate AWB data from the given statistics
+ * \param[in] stats The statistics to use for the calculation
+ * \param[in] lux The lux value of the scene
+ *
+ * The colour temperature is estimated based on the colours::estimateCCT()
+ * function. The gains are calculated purely based on the RGB means provided by
+ * the \a stats. The colour temperature is not taken into account when
+ * calculating the gains.
+ *
+ * The \a lux parameter is not used in this algorithm.
+ *
+ * \return The AWB result
+ */
+AwbResult AwbGrey::calculateAwb(const AwbStats &stats, [[maybe_unused]] unsigned int lux)
+{
+ AwbResult result;
+ auto means = stats.rgbMeans();
+ result.colourTemperature = estimateCCT(means);
+
+ /*
+ * Estimate the red and blue gains to apply in a grey world. The green
+ * gain is hardcoded to 1.0. Avoid divisions by zero by clamping the
+ * divisor to a minimum value of 1.0.
+ */
+ result.gains.r() = means.g() / std::max(means.r(), 1.0);
+ result.gains.g() = 1.0;
+ result.gains.b() = means.g() / std::max(means.b(), 1.0);
+ return result;
+}
+
+/**
+ * \brief Compute white balance gains from a colour temperature
+ * \param[in] colourTemperature The colour temperature in Kelvin
+ *
+ * Compute the white balance gains from a \a colourTemperature. This function
+ * does not take any statistics into account. It simply interpolates the colour
+ * gains configured in the colour temperature curve.
+ *
+ * \return The colour gains if a colour temperature curve is available,
+ * [1, 1, 1] otherwise.
+ */
+RGB<double> AwbGrey::gainsFromColourTemperature(double colourTemperature)
+{
+ if (!colourGainCurve_) {
+ LOG(Awb, Error) << "No gains defined";
+ return RGB<double>({ 1.0, 1.0, 1.0 });
+ }
+
+ auto gains = colourGainCurve_->getInterpolated(colourTemperature);
+ return { { gains[0], 1.0, gains[1] } };
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/awb_grey.h b/src/ipa/libipa/awb_grey.h
new file mode 100644
index 00000000..7ec7bfa5
--- /dev/null
+++ b/src/ipa/libipa/awb_grey.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * AWB grey world algorithm
+ */
+
+#pragma once
+
+#include <optional>
+
+#include "libcamera/internal/vector.h"
+
+#include "awb.h"
+#include "interpolator.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+class AwbGrey : public AwbAlgorithm
+{
+public:
+ AwbGrey() = default;
+
+ int init(const YamlObject &tuningData) override;
+ AwbResult calculateAwb(const AwbStats &stats, unsigned int lux) override;
+ RGB<double> gainsFromColourTemperature(double colourTemperature) override;
+
+private:
+ std::optional<Interpolator<Vector<double, 2>>> colourGainCurve_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/camera_sensor_helper.cpp b/src/ipa/libipa/camera_sensor_helper.cpp
index ce29f423..7c66cd57 100644
--- a/src/ipa/libipa/camera_sensor_helper.cpp
+++ b/src/ipa/libipa/camera_sensor_helper.cpp
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_sensor_helper.cpp - Helper class that performs sensor-specific
+ * Helper class that performs sensor-specific
* parameter computations
*/
#include "camera_sensor_helper.h"
#include <cmath>
+#include <limits>
#include <libcamera/base/log.h>
@@ -40,6 +41,7 @@ namespace ipa {
*/
/**
+ * \fn CameraSensorHelper::CameraSensorHelper()
* \brief Construct a CameraSensorHelper instance
*
* CameraSensorHelper derived class instances shall never be constructed
@@ -48,6 +50,33 @@ namespace ipa {
*/
/**
+ * \fn CameraSensorHelper::blackLevel()
+ * \brief Fetch the black level of the sensor
+ *
+ * This function returns the black level of the sensor scaled to a 16bit pixel
+ * width. If it is unknown an empty optional is returned.
+ *
+ * \todo Fill the blanks and add pedestal values for all supported sensors. Once
+ * done, drop the std::optional<>.
+ *
+ * Black levels are typically the result of the following phenomena:
+ * - Pedestal added by the sensor to pixel values. They are typically fixed,
+ * sometimes programmable and should be reported in datasheets (but
+ * documentation is not always available).
+ * - Dark currents and other physical effects that add charge to pixels in the
+ * absence of light. Those can depend on the integration time and the sensor
+ * die temperature, and their contribution to pixel values depend on the
+ * sensor gains.
+ *
+ * The pedestal is usually the value with the biggest contribution to the
+ * overall black level. In most cases it is either known before or in rare cases
+ * (there is not a single driver with such a control in the linux kernel) can be
+ * queried from the sensor. This function provides that fixed, known value.
+ *
+ * \return The black level of the sensor, or std::nullopt if not known
+ */
+
+/**
* \brief Compute gain code from the analogue gain absolute value
* \param[in] gain The real gain to pass
*
@@ -58,21 +87,16 @@ namespace ipa {
*/
uint32_t CameraSensorHelper::gainCode(double gain) const
{
- const AnalogueGainConstants &k = gainConstants_;
-
- switch (gainType_) {
- case AnalogueGainLinear:
- ASSERT(k.linear.m0 == 0 || k.linear.m1 == 0);
+ if (auto *l = std::get_if<AnalogueGainLinear>(&gain_)) {
+ ASSERT(l->m0 == 0 || l->m1 == 0);
- return (k.linear.c0 - k.linear.c1 * gain) /
- (k.linear.m1 * gain - k.linear.m0);
+ return (l->c0 - l->c1 * gain) /
+ (l->m1 * gain - l->m0);
+ } else if (auto *e = std::get_if<AnalogueGainExp>(&gain_)) {
+ ASSERT(e->a != 0 && e->m != 0);
- case AnalogueGainExponential:
- ASSERT(k.exp.a != 0 && k.exp.m != 0);
-
- return std::log2(gain / k.exp.a) / k.exp.m;
-
- default:
+ return std::log2(gain / e->a) / e->m;
+ } else {
ASSERT(false);
return 0;
}
@@ -90,38 +114,26 @@ uint32_t CameraSensorHelper::gainCode(double gain) const
*/
double CameraSensorHelper::gain(uint32_t gainCode) const
{
- const AnalogueGainConstants &k = gainConstants_;
double gain = static_cast<double>(gainCode);
- switch (gainType_) {
- case AnalogueGainLinear:
- ASSERT(k.linear.m0 == 0 || k.linear.m1 == 0);
-
- return (k.linear.m0 * gain + k.linear.c0) /
- (k.linear.m1 * gain + k.linear.c1);
-
- case AnalogueGainExponential:
- ASSERT(k.exp.a != 0 && k.exp.m != 0);
+ if (auto *l = std::get_if<AnalogueGainLinear>(&gain_)) {
+ ASSERT(l->m0 == 0 || l->m1 == 0);
- return k.exp.a * std::exp2(k.exp.m * gain);
+ return (l->m0 * gain + l->c0) /
+ (l->m1 * gain + l->c1);
+ } else if (auto *e = std::get_if<AnalogueGainExp>(&gain_)) {
+ ASSERT(e->a != 0 && e->m != 0);
- default:
+ return e->a * std::exp2(e->m * gain);
+ } else {
ASSERT(false);
return 0.0;
}
}
/**
- * \enum CameraSensorHelper::AnalogueGainType
- * \brief The gain calculation modes as defined by the MIPI CCS
- *
- * Describes the image sensor analogue gain capabilities.
- * Two modes are possible, depending on the sensor: Linear and Exponential.
- */
-
-/**
- * \var CameraSensorHelper::AnalogueGainLinear
- * \brief Gain is computed using linear gain estimation
+ * \struct CameraSensorHelper::AnalogueGainLinear
+ * \brief Analogue gain constants for the linear gain model
*
* The relationship between the integer gain parameter and the resulting gain
* multiplier is given by the following equation:
@@ -136,11 +148,27 @@ double CameraSensorHelper::gain(uint32_t gainCode) const
* The full Gain equation therefore reduces to either:
*
* \f$gain=\frac{c0}{m1x+c1}\f$ or \f$\frac{m0x+c0}{c1}\f$
+ *
+ * \var CameraSensorHelper::AnalogueGainLinear::m0
+ * \brief Constant used in the linear gain coding/decoding
+ *
+ * \note Either m0 or m1 shall be zero.
+ *
+ * \var CameraSensorHelper::AnalogueGainLinear::c0
+ * \brief Constant used in the linear gain coding/decoding
+ *
+ * \var CameraSensorHelper::AnalogueGainLinear::m1
+ * \brief Constant used in the linear gain coding/decoding
+ *
+ * \note Either m0 or m1 shall be zero.
+ *
+ * \var CameraSensorHelper::AnalogueGainLinear::c1
+ * \brief Constant used in the linear gain coding/decoding
*/
/**
- * \var CameraSensorHelper::AnalogueGainExponential
- * \brief Gain is expressed using an exponential model
+ * \struct CameraSensorHelper::AnalogueGainExp
+ * \brief Analogue gain constants for the exponential gain model
*
* The relationship between the integer gain parameter and the resulting gain
* multiplier is given by the following equation:
@@ -156,61 +184,22 @@ double CameraSensorHelper::gain(uint32_t gainCode) const
*
* When the gain is expressed in dB, 'a' is equal to 1 and 'm' to
* \f$log_{2}{10^{\frac{1}{20}}}\f$.
- */
-
-/**
- * \struct CameraSensorHelper::AnalogueGainLinearConstants
- * \brief Analogue gain constants for the linear gain model
- *
- * \var CameraSensorHelper::AnalogueGainLinearConstants::m0
- * \brief Constant used in the linear gain coding/decoding
- *
- * \note Either m0 or m1 shall be zero.
- *
- * \var CameraSensorHelper::AnalogueGainLinearConstants::c0
- * \brief Constant used in the linear gain coding/decoding
- *
- * \var CameraSensorHelper::AnalogueGainLinearConstants::m1
- * \brief Constant used in the linear gain coding/decoding
- *
- * \note Either m0 or m1 shall be zero.
- *
- * \var CameraSensorHelper::AnalogueGainLinearConstants::c1
- * \brief Constant used in the linear gain coding/decoding
- */
-
-/**
- * \struct CameraSensorHelper::AnalogueGainExpConstants
- * \brief Analogue gain constants for the exponential gain model
*
- * \var CameraSensorHelper::AnalogueGainExpConstants::a
+ * \var CameraSensorHelper::AnalogueGainExp::a
* \brief Constant used in the exponential gain coding/decoding
*
- * \var CameraSensorHelper::AnalogueGainExpConstants::m
+ * \var CameraSensorHelper::AnalogueGainExp::m
* \brief Constant used in the exponential gain coding/decoding
*/
/**
- * \struct CameraSensorHelper::AnalogueGainConstants
- * \brief Analogue gain model constants
- *
- * This union stores the constants used to calculate the analogue gain. The
- * CameraSensorHelper::gainType_ variable selects which union member is valid.
- *
- * \var CameraSensorHelper::AnalogueGainConstants::linear
- * \brief Constants for the linear gain model
- *
- * \var CameraSensorHelper::AnalogueGainConstants::exp
- * \brief Constants for the exponential gain model
- */
-
-/**
- * \var CameraSensorHelper::gainType_
- * \brief The analogue gain model type
+ * \var CameraSensorHelper::blackLevel_
+ * \brief The black level of the sensor
+ * \sa CameraSensorHelper::blackLevel()
*/
/**
- * \var CameraSensorHelper::gainConstants_
+ * \var CameraSensorHelper::gain_
* \brief The analogue gain parameters used for calculation
*
* The analogue gain is calculated through a formula, and its parameters are
@@ -366,42 +355,168 @@ static constexpr double expGainDb(double step)
return log2_10 * step / 20;
}
-class CameraSensorHelperAr0521 : public CameraSensorHelper
+class CameraSensorHelperAr0144 : public CameraSensorHelper
{
public:
- uint32_t gainCode(double gain) const override;
- double gain(uint32_t gainCode) const override;
+ CameraSensorHelperAr0144()
+ {
+ /* Power-on default value: 168 at 12bits. */
+ blackLevel_ = 2688;
+ }
+
+ uint32_t gainCode(double gain) const override
+ {
+ /* The recommended minimum gain is 1.6842 to avoid artifacts. */
+ gain = std::clamp(gain, 1.0 / (1.0 - 13.0 / 32.0), 18.45);
+
+ /*
+ * The analogue gain is made of a coarse exponential gain in
+ * the range [2^0, 2^4] and a fine inversely linear gain in the
+ * range [1.0, 2.0[. There is an additional fixed 1.153125
+ * multiplier when the coarse gain reaches 2^2.
+ */
+
+ if (gain > 4.0)
+ gain /= 1.153125;
+
+ unsigned int coarse = std::log2(gain);
+ unsigned int fine = (1 - (1 << coarse) / gain) * 32;
+
+ /* The fine gain rounding depends on the coarse gain. */
+ if (coarse == 1 || coarse == 3)
+ fine &= ~1;
+ else if (coarse == 4)
+ fine &= ~3;
+
+ return (coarse << 4) | (fine & 0xf);
+ }
+
+ double gain(uint32_t gainCode) const override
+ {
+ unsigned int coarse = gainCode >> 4;
+ unsigned int fine = gainCode & 0xf;
+ unsigned int d1;
+ double d2, m;
+
+ switch (coarse) {
+ default:
+ case 0:
+ d1 = 1;
+ d2 = 32.0;
+ m = 1.0;
+ break;
+ case 1:
+ d1 = 2;
+ d2 = 16.0;
+ m = 1.0;
+ break;
+ case 2:
+ d1 = 1;
+ d2 = 32.0;
+ m = 1.153125;
+ break;
+ case 3:
+ d1 = 2;
+ d2 = 16.0;
+ m = 1.153125;
+ break;
+ case 4:
+ d1 = 4;
+ d2 = 8.0;
+ m = 1.153125;
+ break;
+ }
+
+ /*
+ * With infinite precision, the calculated gain would be exact,
+ * and the reverse conversion with gainCode() would produce the
+ * same gain code. In the real world, rounding errors may cause
+ * the calculated gain to be lower by an amount negligible for
+ * all purposes, except for the reverse conversion. Converting
+ * the gain to a gain code could then return the quantized value
+ * just lower than the original gain code. To avoid this, tests
+ * showed that adding the machine epsilon to the multiplier m is
+ * sufficient.
+ */
+ m += std::numeric_limits<decltype(m)>::epsilon();
+
+ return m * (1 << coarse) / (1.0 - (fine / d1) / d2);
+ }
private:
static constexpr double kStep_ = 16;
};
+REGISTER_CAMERA_SENSOR_HELPER("ar0144", CameraSensorHelperAr0144)
-uint32_t CameraSensorHelperAr0521::gainCode(double gain) const
+class CameraSensorHelperAr0521 : public CameraSensorHelper
{
- gain = std::clamp(gain, 1.0, 15.5);
- unsigned int coarse = std::log2(gain);
- unsigned int fine = (gain / (1 << coarse) - 1) * kStep_;
+public:
+ uint32_t gainCode(double gain) const override
+ {
+ gain = std::clamp(gain, 1.0, 15.5);
+ unsigned int coarse = std::log2(gain);
+ unsigned int fine = (gain / (1 << coarse) - 1) * kStep_;
- return (coarse << 4) | (fine & 0xf);
-}
+ return (coarse << 4) | (fine & 0xf);
+ }
-double CameraSensorHelperAr0521::gain(uint32_t gainCode) const
-{
- unsigned int coarse = gainCode >> 4;
- unsigned int fine = gainCode & 0xf;
+ double gain(uint32_t gainCode) const override
+ {
+ unsigned int coarse = gainCode >> 4;
+ unsigned int fine = gainCode & 0xf;
- return (1 << coarse) * (1 + fine / kStep_);
-}
+ return (1 << coarse) * (1 + fine / kStep_);
+ }
+private:
+ static constexpr double kStep_ = 16;
+};
REGISTER_CAMERA_SENSOR_HELPER("ar0521", CameraSensorHelperAr0521)
+class CameraSensorHelperGc05a2 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperGc05a2()
+ {
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 100, 0, 0, 1024 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("gc05a2", CameraSensorHelperGc05a2)
+
+class CameraSensorHelperGc08a3 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperGc08a3()
+ {
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 100, 0, 0, 1024 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("gc08a3", CameraSensorHelperGc08a3)
+
+class CameraSensorHelperImx214 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx214()
+ {
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 0, 512, -1, 512 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx214", CameraSensorHelperImx214)
+
class CameraSensorHelperImx219 : public CameraSensorHelper
{
public:
CameraSensorHelperImx219()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 0, 256, -1, 256 };
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 0, 256, -1, 256 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("imx219", CameraSensorHelperImx219)
@@ -411,19 +526,33 @@ class CameraSensorHelperImx258 : public CameraSensorHelper
public:
CameraSensorHelperImx258()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 0, 512, -1, 512 };
+ /* From datasheet: 0x40 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 0, 512, -1, 512 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("imx258", CameraSensorHelperImx258)
+class CameraSensorHelperImx283 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx283()
+ {
+ /* From datasheet: 0x32 at 10bits. */
+ blackLevel_ = 3200;
+ gain_ = AnalogueGainLinear{ 0, 2048, -1, 2048 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx283", CameraSensorHelperImx283)
+
class CameraSensorHelperImx290 : public CameraSensorHelper
{
public:
CameraSensorHelperImx290()
{
- gainType_ = AnalogueGainExponential;
- gainConstants_.exp = { 1.0, expGainDb(0.3) };
+ /* From datasheet: 0xf0 at 12bits. */
+ blackLevel_ = 3840;
+ gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) };
}
};
REGISTER_CAMERA_SENSOR_HELPER("imx290", CameraSensorHelperImx290)
@@ -433,8 +562,7 @@ class CameraSensorHelperImx296 : public CameraSensorHelper
public:
CameraSensorHelperImx296()
{
- gainType_ = AnalogueGainExponential;
- gainConstants_.exp = { 1.0, expGainDb(0.1) };
+ gain_ = AnalogueGainExp{ 1.0, expGainDb(0.1) };
}
};
REGISTER_CAMERA_SENSOR_HELPER("imx296", CameraSensorHelperImx296)
@@ -444,13 +572,39 @@ class CameraSensorHelperImx327 : public CameraSensorHelperImx290
};
REGISTER_CAMERA_SENSOR_HELPER("imx327", CameraSensorHelperImx327)
+class CameraSensorHelperImx335 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx335()
+ {
+ /* From datasheet: 0x32 at 10bits. */
+ blackLevel_ = 3200;
+ gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx335", CameraSensorHelperImx335)
+
+class CameraSensorHelperImx415 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx415()
+ {
+ gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx415", CameraSensorHelperImx415)
+
+class CameraSensorHelperImx462 : public CameraSensorHelperImx290
+{
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx462", CameraSensorHelperImx462)
+
class CameraSensorHelperImx477 : public CameraSensorHelper
{
public:
CameraSensorHelperImx477()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 0, 1024, -1, 1024 };
+ gain_ = AnalogueGainLinear{ 0, 1024, -1, 1024 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("imx477", CameraSensorHelperImx477)
@@ -464,8 +618,7 @@ public:
* The Sensor Manual doesn't appear to document the gain model.
* This has been validated with some empirical testing only.
*/
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 128 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov2685", CameraSensorHelperOv2685)
@@ -475,8 +628,7 @@ class CameraSensorHelperOv2740 : public CameraSensorHelper
public:
CameraSensorHelperOv2740()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 128 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov2740", CameraSensorHelperOv2740)
@@ -486,8 +638,9 @@ class CameraSensorHelperOv4689 : public CameraSensorHelper
public:
CameraSensorHelperOv4689()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 128 };
+ /* From datasheet: 0x40 at 12bits. */
+ blackLevel_ = 1024;
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov4689", CameraSensorHelperOv4689)
@@ -497,8 +650,9 @@ class CameraSensorHelperOv5640 : public CameraSensorHelper
public:
CameraSensorHelperOv5640()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 16 };
+ /* From datasheet: 0x10 at 10bits. */
+ blackLevel_ = 1024;
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 16 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov5640", CameraSensorHelperOv5640)
@@ -508,8 +662,7 @@ class CameraSensorHelperOv5647 : public CameraSensorHelper
public:
CameraSensorHelperOv5647()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 16 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 16 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov5647", CameraSensorHelperOv5647)
@@ -519,8 +672,7 @@ class CameraSensorHelperOv5670 : public CameraSensorHelper
public:
CameraSensorHelperOv5670()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 128 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov5670", CameraSensorHelperOv5670)
@@ -530,8 +682,9 @@ class CameraSensorHelperOv5675 : public CameraSensorHelper
public:
CameraSensorHelperOv5675()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 128 };
+ /* From Linux kernel driver: 0x40 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov5675", CameraSensorHelperOv5675)
@@ -541,8 +694,7 @@ class CameraSensorHelperOv5693 : public CameraSensorHelper
public:
CameraSensorHelperOv5693()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 16 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 16 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov5693", CameraSensorHelperOv5693)
@@ -552,8 +704,7 @@ class CameraSensorHelperOv64a40 : public CameraSensorHelper
public:
CameraSensorHelperOv64a40()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 128 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov64a40", CameraSensorHelperOv64a40)
@@ -563,15 +714,13 @@ class CameraSensorHelperOv8858 : public CameraSensorHelper
public:
CameraSensorHelperOv8858()
{
- gainType_ = AnalogueGainLinear;
-
/*
* \todo Validate the selected 1/128 step value as it differs
* from what the sensor manual describes.
*
* See: https://patchwork.linuxtv.org/project/linux-media/patch/20221106171129.166892-2-nicholas@rothemail.net/#142267
*/
- gainConstants_.linear = { 1, 0, 0, 128 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov8858", CameraSensorHelperOv8858)
@@ -581,8 +730,7 @@ class CameraSensorHelperOv8865 : public CameraSensorHelper
public:
CameraSensorHelperOv8865()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 128 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov8865", CameraSensorHelperOv8865)
@@ -592,8 +740,7 @@ class CameraSensorHelperOv13858 : public CameraSensorHelper
public:
CameraSensorHelperOv13858()
{
- gainType_ = AnalogueGainLinear;
- gainConstants_.linear = { 1, 0, 0, 128 };
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
}
};
REGISTER_CAMERA_SENSOR_HELPER("ov13858", CameraSensorHelperOv13858)
diff --git a/src/ipa/libipa/camera_sensor_helper.h b/src/ipa/libipa/camera_sensor_helper.h
index 1ca9371b..a9300a64 100644
--- a/src/ipa/libipa/camera_sensor_helper.h
+++ b/src/ipa/libipa/camera_sensor_helper.h
@@ -2,15 +2,16 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_sensor_helper.h - Helper class that performs sensor-specific parameter computations
+ * Helper class that performs sensor-specific parameter computations
*/
#pragma once
-#include <stdint.h>
-
#include <memory>
+#include <optional>
+#include <stdint.h>
#include <string>
+#include <variant>
#include <vector>
#include <libcamera/base/class.h>
@@ -25,34 +26,25 @@ public:
CameraSensorHelper() = default;
virtual ~CameraSensorHelper() = default;
+ std::optional<int16_t> blackLevel() const { return blackLevel_; }
virtual uint32_t gainCode(double gain) const;
virtual double gain(uint32_t gainCode) const;
protected:
- enum AnalogueGainType {
- AnalogueGainLinear,
- AnalogueGainExponential,
- };
-
- struct AnalogueGainLinearConstants {
+ struct AnalogueGainLinear {
int16_t m0;
int16_t c0;
int16_t m1;
int16_t c1;
};
- struct AnalogueGainExpConstants {
+ struct AnalogueGainExp {
double a;
double m;
};
- union AnalogueGainConstants {
- AnalogueGainLinearConstants linear;
- AnalogueGainExpConstants exp;
- };
-
- AnalogueGainType gainType_;
- AnalogueGainConstants gainConstants_;
+ std::optional<int16_t> blackLevel_;
+ std::variant<std::monostate, AnalogueGainLinear, AnalogueGainExp> gain_;
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorHelper)
diff --git a/src/ipa/libipa/colours.cpp b/src/ipa/libipa/colours.cpp
new file mode 100644
index 00000000..97124cf4
--- /dev/null
+++ b/src/ipa/libipa/colours.cpp
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * libipa miscellaneous colour helpers
+ */
+
+#include "colours.h"
+
+#include <algorithm>
+#include <cmath>
+
+namespace libcamera {
+
+namespace ipa {
+
+/**
+ * \file colours.h
+ * \brief Functions to reduce code duplication between IPA modules
+ */
+
+/**
+ * \brief Estimate luminance from RGB values following ITU-R BT.601
+ * \param[in] rgb The RGB value
+ *
+ * This function estimates a luminance value from a triplet of Red, Green and
+ * Blue values, following the formula defined by ITU-R Recommendation BT.601-7
+ * which can be found at https://www.itu.int/rec/R-REC-BT.601
+ *
+ * \return The estimated luminance value
+ */
+double rec601LuminanceFromRGB(const RGB<double> &rgb)
+{
+ static const Vector<double, 3> rgb2y{{
+ 0.299, 0.587, 0.114
+ }};
+
+ return rgb.dot(rgb2y);
+}
+
+/**
+ * \brief Estimate correlated colour temperature from RGB color space input
+ * \param[in] rgb The RGB value
+ *
+ * This function estimates the correlated color temperature RGB color space
+ * input. In physics and color science, the Planckian locus or black body locus
+ * is the path or locus that the color of an incandescent black body would take
+ * in a particular chromaticity space as the black body temperature changes.
+ *
+ * If a narrow range of color temperatures is considered (those encapsulating
+ * daylight being the most practical case) one can approximate the Planckian
+ * locus in order to calculate the CCT in terms of chromaticity coordinates.
+ *
+ * More detailed information can be found in:
+ * https://en.wikipedia.org/wiki/Color_temperature#Approximation
+ *
+ * \return The estimated color temperature
+ */
+uint32_t estimateCCT(const RGB<double> &rgb)
+{
+ /*
+ * Convert the RGB values to CIE tristimulus values (XYZ) and divide by
+ * the sum of X, Y and Z to calculate the CIE xy chromaticity.
+ */
+ static const Matrix<double, 3, 3> rgb2xyz({
+ -0.14282, 1.54924, -0.95641,
+ -0.32466, 1.57837, -0.73191,
+ -0.68202, 0.77073, 0.56332
+ });
+
+ Vector<double, 3> xyz = rgb2xyz * rgb;
+ xyz /= xyz.sum();
+
+ /* Calculate CCT */
+ double n = (xyz.x() - 0.3320) / (0.1858 - xyz.y());
+ return 449 * n * n * n + 3525 * n * n + 6823.3 * n + 5520.33;
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/colours.h b/src/ipa/libipa/colours.h
new file mode 100644
index 00000000..d39b2ca8
--- /dev/null
+++ b/src/ipa/libipa/colours.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * libipa miscellaneous colour helpers
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include "libcamera/internal/vector.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+double rec601LuminanceFromRGB(const RGB<double> &rgb);
+uint32_t estimateCCT(const RGB<double> &rgb);
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/exposure_mode_helper.cpp b/src/ipa/libipa/exposure_mode_helper.cpp
new file mode 100644
index 00000000..0c1e99e3
--- /dev/null
+++ b/src/ipa/libipa/exposure_mode_helper.cpp
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that performs computations relating to exposure
+ */
+#include "exposure_mode_helper.h"
+
+#include <algorithm>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file exposure_mode_helper.h
+ * \brief Helper class that performs computations relating to exposure
+ *
+ * AEGC algorithms have a need to split exposure between exposure time, analogue
+ * and digital gain. Multiple implementations do so based on paired stages of
+ * exposure time and gain limits; provide a helper to avoid duplicating the code.
+ */
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(ExposureModeHelper)
+
+namespace ipa {
+
+/**
+ * \class ExposureModeHelper
+ * \brief Class for splitting exposure into exposure time and total gain
+ *
+ * The ExposureModeHelper class provides a standard interface through which an
+ * AEGC algorithm can divide exposure between exposure time and gain. It is
+ * configured with a set of exposure time and gain pairs and works by initially
+ * fixing gain at 1.0 and increasing exposure time up to the exposure time value
+ * from the first pair in the set in an attempt to meet the required exposure
+ * value.
+ *
+ * If the required exposure is not achievable by the first exposure time value
+ * alone it ramps gain up to the value from the first pair in the set. If the
+ * required exposure is still not met it then allows exposure time to ramp up to
+ * the exposure time value from the second pair in the set, and continues in this
+ * vein until either the required exposure time is met, or else the hardware's
+ * exposure time or gain limits are reached.
+ *
+ * This method allows users to strike a balance between a well-exposed image and
+ * an acceptable frame-rate, as opposed to simply maximising exposure time
+ * followed by gain. The same helpers can be used to perform the latter
+ * operation if needed by passing an empty set of pairs to the initialisation
+ * function.
+ *
+ * The gain values may exceed a camera sensor's analogue gain limits if either
+ * it or the IPA is also capable of digital gain. The configure() function must
+ * be called with the hardware's limits to inform the helper of those
+ * constraints. Any gain that is needed will be applied as analogue gain first
+ * until the hardware's limit is reached, following which digital gain will be
+ * used.
+ */
+
+/**
+ * \brief Construct an ExposureModeHelper instance
+ * \param[in] stages The vector of paired exposure time and gain limits
+ *
+ * The input stages are exposure time and _total_ gain pairs; the gain
+ * encompasses both analogue and digital gain.
+ *
+ * The vector of stages may be empty. In that case, the helper will simply use
+ * the runtime limits set through setLimits() instead.
+ */
+ExposureModeHelper::ExposureModeHelper(const Span<std::pair<utils::Duration, double>> stages)
+{
+ minExposureTime_ = 0us;
+ maxExposureTime_ = 0us;
+ minGain_ = 0;
+ maxGain_ = 0;
+
+ for (const auto &[s, g] : stages) {
+ exposureTimes_.push_back(s);
+ gains_.push_back(g);
+ }
+}
+
+/**
+ * \brief Set the exposure time and gain limits
+ * \param[in] minExposureTime The minimum exposure time supported
+ * \param[in] maxExposureTime The maximum exposure time supported
+ * \param[in] minGain The minimum analogue gain supported
+ * \param[in] maxGain The maximum analogue gain supported
+ *
+ * This function configures the exposure time and analogue gain limits that need
+ * to be adhered to as the helper divides up exposure. Note that this function
+ * *must* be called whenever those limits change and before splitExposure() is
+ * used.
+ *
+ * If the algorithm using the helpers needs to indicate that either exposure time
+ * or analogue gain or both should be fixed it can do so by setting both the
+ * minima and maxima to the same value.
+ */
+void ExposureModeHelper::setLimits(utils::Duration minExposureTime,
+ utils::Duration maxExposureTime,
+ double minGain, double maxGain)
+{
+ minExposureTime_ = minExposureTime;
+ maxExposureTime_ = maxExposureTime;
+ minGain_ = minGain;
+ maxGain_ = maxGain;
+}
+
+utils::Duration ExposureModeHelper::clampExposureTime(utils::Duration exposureTime) const
+{
+ return std::clamp(exposureTime, minExposureTime_, maxExposureTime_);
+}
+
+double ExposureModeHelper::clampGain(double gain) const
+{
+ return std::clamp(gain, minGain_, maxGain_);
+}
+
+/**
+ * \brief Split exposure into exposure time and gain
+ * \param[in] exposure Exposure value
+ *
+ * This function divides a given exposure into exposure time, analogue and
+ * digital gain by iterating through stages of exposure time and gain limits.
+ * At each stage the current stage's exposure time limit is multiplied by the
+ * previous stage's gain limit (or 1.0 initially) to see if the combination of
+ * the two can meet the required exposure. If they cannot then the current
+ * stage's exposure time limit is multiplied by the same stage's gain limit to
+ * see if that combination can meet the required exposure time. If they cannot
+ * then the function moves to consider the next stage.
+ *
+ * When a combination of exposure time and gain _stage_ limits are found that
+ * are sufficient to meet the required exposure, the function attempts to reduce
+ * exposure time as much as possible whilst fixing gain and still meeting the
+ * exposure. If a _runtime_ limit prevents exposure time from being lowered
+ * enough to meet the exposure with gain fixed at the stage limit, gain is also
+ * lowered to compensate.
+ *
+ * Once the exposure time and gain values are ascertained, gain is assigned as
+ * analogue gain as much as possible, with digital gain only in use if the
+ * maximum analogue gain runtime limit is unable to accommodate the exposure
+ * value.
+ *
+ * If no combination of exposure time and gain limits is found that meets the
+ * required exposure, the helper falls-back to simply maximising the exposure
+ * time first, followed by analogue gain, followed by digital gain.
+ *
+ * \return Tuple of exposure time, analogue gain, and digital gain
+ */
+std::tuple<utils::Duration, double, double>
+ExposureModeHelper::splitExposure(utils::Duration exposure) const
+{
+ ASSERT(maxExposureTime_);
+ ASSERT(maxGain_);
+
+ bool gainFixed = minGain_ == maxGain_;
+ bool exposureTimeFixed = minExposureTime_ == maxExposureTime_;
+
+ /*
+ * There's no point entering the loop if we cannot change either gain
+ * nor exposure time anyway.
+ */
+ if (exposureTimeFixed && gainFixed)
+ return { minExposureTime_, minGain_, exposure / (minExposureTime_ * minGain_) };
+
+ utils::Duration exposureTime;
+ double stageGain = 1.0;
+ double gain;
+
+ for (unsigned int stage = 0; stage < gains_.size(); stage++) {
+ double lastStageGain = stage == 0 ? 1.0 : clampGain(gains_[stage - 1]);
+ utils::Duration stageExposureTime = clampExposureTime(exposureTimes_[stage]);
+ stageGain = clampGain(gains_[stage]);
+
+ /*
+ * We perform the clamping on both exposure time and gain in
+ * case the helper has had limits set that prevent those values
+ * being lowered beyond a certain minimum...this can happen at
+ * runtime for various reasons and so would not be known when
+ * the stage limits are initialised.
+ */
+
+ /* Clamp the gain to lastStageGain and regulate exposureTime. */
+ if (stageExposureTime * lastStageGain >= exposure) {
+ exposureTime = clampExposureTime(exposure / clampGain(lastStageGain));
+ gain = clampGain(exposure / exposureTime);
+
+ return { exposureTime, gain, exposure / (exposureTime * gain) };
+ }
+
+ /* Clamp the exposureTime to stageExposureTime and regulate gain. */
+ if (stageExposureTime * stageGain >= exposure) {
+ exposureTime = clampExposureTime(stageExposureTime);
+ gain = clampGain(exposure / exposureTime);
+
+ return { exposureTime, gain, exposure / (exposureTime * gain) };
+ }
+ }
+
+ /*
+ * From here on all we can do is max out the exposure time, followed by
+ * the analogue gain. If we still haven't achieved the target we send
+ * the rest of the exposure time to digital gain. If we were given no
+ * stages to use then the default stageGain of 1.0 is used so that
+ * exposure time is maxed before gain is touched at all.
+ */
+ exposureTime = clampExposureTime(exposure / clampGain(stageGain));
+ gain = clampGain(exposure / exposureTime);
+
+ return { exposureTime, gain, exposure / (exposureTime * gain) };
+}
+
+/**
+ * \fn ExposureModeHelper::minExposureTime()
+ * \brief Retrieve the configured minimum exposure time limit set through
+ * setLimits()
+ * \return The minExposureTime_ value
+ */
+
+/**
+ * \fn ExposureModeHelper::maxExposureTime()
+ * \brief Retrieve the configured maximum exposure time set through setLimits()
+ * \return The maxExposureTime_ value
+ */
+
+/**
+ * \fn ExposureModeHelper::minGain()
+ * \brief Retrieve the configured minimum gain set through setLimits()
+ * \return The minGain_ value
+ */
+
+/**
+ * \fn ExposureModeHelper::maxGain()
+ * \brief Retrieve the configured maximum gain set through setLimits()
+ * \return The maxGain_ value
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/exposure_mode_helper.h b/src/ipa/libipa/exposure_mode_helper.h
new file mode 100644
index 00000000..c5be1b67
--- /dev/null
+++ b/src/ipa/libipa/exposure_mode_helper.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that performs computations relating to exposure
+ */
+
+#pragma once
+
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
+
+namespace libcamera {
+
+namespace ipa {
+
+class ExposureModeHelper
+{
+public:
+ ExposureModeHelper(const Span<std::pair<utils::Duration, double>> stages);
+ ~ExposureModeHelper() = default;
+
+ void setLimits(utils::Duration minExposureTime, utils::Duration maxExposureTime,
+ double minGain, double maxGain);
+
+ std::tuple<utils::Duration, double, double>
+ splitExposure(utils::Duration exposure) const;
+
+ utils::Duration minExposureTime() const { return minExposureTime_; }
+ utils::Duration maxExposureTime() const { return maxExposureTime_; }
+ double minGain() const { return minGain_; }
+ double maxGain() const { return maxGain_; }
+
+private:
+ utils::Duration clampExposureTime(utils::Duration exposureTime) const;
+ double clampGain(double gain) const;
+
+ std::vector<utils::Duration> exposureTimes_;
+ std::vector<double> gains_;
+
+ utils::Duration minExposureTime_;
+ utils::Duration maxExposureTime_;
+ double minGain_;
+ double maxGain_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/fc_queue.cpp b/src/ipa/libipa/fc_queue.cpp
index e812faa5..0365e919 100644
--- a/src/ipa/libipa/fc_queue.cpp
+++ b/src/ipa/libipa/fc_queue.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Google Inc.
*
- * fc_queue.cpp - IPA Frame context queue
+ * IPA Frame context queue
*/
#include "fc_queue.h"
diff --git a/src/ipa/libipa/fc_queue.h b/src/ipa/libipa/fc_queue.h
index a589e7e1..a1d13652 100644
--- a/src/ipa/libipa/fc_queue.h
+++ b/src/ipa/libipa/fc_queue.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Google Inc.
*
- * fc_queue.h - IPA Frame context queue
+ * IPA Frame context queue
*/
#pragma once
@@ -25,6 +25,7 @@ struct FrameContext {
private:
template<typename T> friend class FCQueue;
uint32_t frame;
+ bool initialised = false;
};
template<typename FrameContext>
@@ -38,8 +39,10 @@ public:
void clear()
{
- for (FrameContext &ctx : contexts_)
+ for (FrameContext &ctx : contexts_) {
+ ctx.initialised = false;
ctx.frame = 0;
+ }
}
FrameContext &alloc(const uint32_t frame)
@@ -83,6 +86,21 @@ public:
<< " has been overwritten by "
<< frameContext.frame;
+ if (frame == 0 && !frameContext.initialised) {
+ /*
+ * If the IPA calls get() at start() time it will get an
+ * un-intialized FrameContext as the below "frame ==
+ * frameContext.frame" check will return success because
+ * FrameContexts are zeroed at creation time.
+ *
+ * Make sure the FrameContext gets initialised if get()
+ * is called before alloc() by the IPA for frame#0.
+ */
+ init(frameContext, frame);
+
+ return frameContext;
+ }
+
if (frame == frameContext.frame)
return frameContext;
@@ -108,6 +126,7 @@ private:
{
frameContext = {};
frameContext.frame = frame;
+ frameContext.initialised = true;
}
std::vector<FrameContext> contexts_;
diff --git a/src/ipa/libipa/fixedpoint.cpp b/src/ipa/libipa/fixedpoint.cpp
new file mode 100644
index 00000000..6b698fc5
--- /dev/null
+++ b/src/ipa/libipa/fixedpoint.cpp
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Fixed / floating point conversions
+ */
+
+#include "fixedpoint.h"
+
+/**
+ * \file fixedpoint.h
+ */
+
+namespace libcamera {
+
+namespace ipa {
+
+/**
+ * \fn R floatingToFixedPoint(T number)
+ * \brief Convert a floating point number to a fixed-point representation
+ * \tparam I Bit width of the integer part of the fixed-point
+ * \tparam F Bit width of the fractional part of the fixed-point
+ * \tparam R Return type of the fixed-point representation
+ * \tparam T Input type of the floating point representation
+ * \param number The floating point number to convert to fixed point
+ * \return The converted value
+ */
+
+/**
+ * \fn R fixedToFloatingPoint(T number)
+ * \brief Convert a fixed-point number to a floating point representation
+ * \tparam I Bit width of the integer part of the fixed-point
+ * \tparam F Bit width of the fractional part of the fixed-point
+ * \tparam R Return type of the floating point representation
+ * \tparam T Input type of the fixed-point representation
+ * \param number The fixed point number to convert to floating point
+ * \return The converted value
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/fixedpoint.h b/src/ipa/libipa/fixedpoint.h
new file mode 100644
index 00000000..709cf50f
--- /dev/null
+++ b/src/ipa/libipa/fixedpoint.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Fixed / floating point conversions
+ */
+
+#pragma once
+
+#include <cmath>
+#include <type_traits>
+
+namespace libcamera {
+
+namespace ipa {
+
+#ifndef __DOXYGEN__
+template<unsigned int I, unsigned int F, typename R, typename T,
+ std::enable_if_t<std::is_integral_v<R> &&
+ std::is_floating_point_v<T>> * = nullptr>
+#else
+template<unsigned int I, unsigned int F, typename R, typename T>
+#endif
+constexpr R floatingToFixedPoint(T number)
+{
+ static_assert(sizeof(int) >= sizeof(R));
+ static_assert(I + F <= sizeof(R) * 8);
+
+ /*
+ * The intermediate cast to int is needed on arm platforms to properly
+ * cast negative values. See
+ * https://embeddeduse.com/2013/08/25/casting-a-negative-float-to-an-unsigned-int/
+ */
+ R mask = (1 << (F + I)) - 1;
+ R frac = static_cast<R>(static_cast<int>(std::round(number * (1 << F)))) & mask;
+
+ return frac;
+}
+
+#ifndef __DOXYGEN__
+template<unsigned int I, unsigned int F, typename R, typename T,
+ std::enable_if_t<std::is_floating_point_v<R> &&
+ std::is_integral_v<T>> * = nullptr>
+#else
+template<unsigned int I, unsigned int F, typename R, typename T>
+#endif
+constexpr R fixedToFloatingPoint(T number)
+{
+ static_assert(sizeof(int) >= sizeof(T));
+ static_assert(I + F <= sizeof(T) * 8);
+
+ /*
+ * Recreate the upper bits in case of a negative number by shifting the sign
+ * bit from the fixed point to the first bit of the unsigned and then right shifting
+ * by the same amount which keeps the sign bit in place.
+ * This can be optimized by the compiler quite well.
+ */
+ int remaining_bits = sizeof(int) * 8 - (I + F);
+ int t = static_cast<int>(static_cast<unsigned>(number) << remaining_bits) >> remaining_bits;
+ return static_cast<R>(t) / static_cast<R>(1 << F);
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/histogram.cpp b/src/ipa/libipa/histogram.cpp
index 6b5cde8e..10e44b54 100644
--- a/src/ipa/libipa/histogram.cpp
+++ b/src/ipa/libipa/histogram.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * histogram.cpp - histogram calculations
+ * histogram calculations
*/
#include "histogram.h"
@@ -29,24 +29,46 @@ namespace ipa {
*/
/**
+ * \fn Histogram::Histogram()
+ * \brief Construct an empty Histogram
+ *
+ * This empty constructor exists largely to allow Histograms to be embedded in
+ * other classes which may be created before the contents of the Histogram are
+ * known.
+ */
+
+/**
* \brief Create a cumulative histogram
- * \param[in] data A pre-sorted histogram to be passed
+ * \param[in] data A (non-cumulative) histogram
*/
Histogram::Histogram(Span<const uint32_t> data)
{
- cumulative_.reserve(data.size());
- cumulative_.push_back(0);
- for (const uint32_t &value : data)
- cumulative_.push_back(cumulative_.back() + value);
+ cumulative_.resize(data.size() + 1);
+ cumulative_[0] = 0;
+ for (const auto &[i, value] : utils::enumerate(data))
+ cumulative_[i + 1] = cumulative_[i] + value;
}
/**
+ * \fn Histogram::Histogram(Span<const uint32_t> data, Transform transform)
+ * \brief Create a cumulative histogram
+ * \param[in] data A (non-cumulative) histogram
+ * \param[in] transform The transformation function to apply to every bin
+ */
+
+/**
* \fn Histogram::bins()
* \brief Retrieve the number of bins currently used by the Histogram
* \return Number of bins
*/
/**
+ * \fn Histogram::data()
+ * \brief Retrieve the internal data
+ * \return The data
+ */
+
+/**
* \fn Histogram::total()
* \brief Retrieve the total number of values in the data set
* \return Number of values
diff --git a/src/ipa/libipa/histogram.h b/src/ipa/libipa/histogram.h
index 05bb4b80..a926002c 100644
--- a/src/ipa/libipa/histogram.h
+++ b/src/ipa/libipa/histogram.h
@@ -2,18 +2,18 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * histogram.h - histogram calculation interface
+ * histogram calculation interface
*/
#pragma once
-#include <assert.h>
#include <limits.h>
#include <stdint.h>
-
+#include <type_traits>
#include <vector>
#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
namespace libcamera {
@@ -22,8 +22,21 @@ namespace ipa {
class Histogram
{
public:
+ Histogram() { cumulative_.push_back(0); }
Histogram(Span<const uint32_t> data);
+
+ template<typename Transform,
+ std::enable_if_t<std::is_invocable_v<Transform, uint32_t>> * = nullptr>
+ Histogram(Span<const uint32_t> data, Transform transform)
+ {
+ cumulative_.resize(data.size() + 1);
+ cumulative_[0] = 0;
+ for (const auto &[i, value] : utils::enumerate(data))
+ cumulative_[i + 1] = cumulative_[i] + transform(value);
+ }
+
size_t bins() const { return cumulative_.size() - 1; }
+ const Span<const uint64_t> data() const { return cumulative_; }
uint64_t total() const { return cumulative_[cumulative_.size() - 1]; }
uint64_t cumulativeFrequency(double bin) const;
double quantile(double q, uint32_t first = 0, uint32_t last = UINT_MAX) const;
diff --git a/src/ipa/libipa/interpolator.cpp b/src/ipa/libipa/interpolator.cpp
new file mode 100644
index 00000000..f901a86e
--- /dev/null
+++ b/src/ipa/libipa/interpolator.cpp
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class for interpolating objects
+ */
+#include "interpolator.h"
+
+#include <algorithm>
+#include <string>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "interpolator.h"
+
+/**
+ * \file interpolator.h
+ * \brief Helper class for linear interpolating a set of objects
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Interpolator)
+
+namespace ipa {
+
+/**
+ * \class Interpolator
+ * \brief Class for storing, retrieving, and interpolating objects
+ * \tparam T Type of objects stored in the interpolator
+ *
+ * The main use case is to pass a map from color temperatures to corresponding
+ * objects (eg. matrices for color correction), and then requesting a
+ * interpolated object for a specific color temperature. This class will
+ * abstract away the interpolation portion.
+ */
+
+/**
+ * \fn Interpolator::Interpolator()
+ * \brief Construct an empty interpolator
+ */
+
+/**
+ * \fn Interpolator::Interpolator(const std::map<unsigned int, T> &data)
+ * \brief Construct an interpolator from a map of objects
+ * \param data Map from which to construct the interpolator
+ */
+
+/**
+ * \fn Interpolator::Interpolator(std::map<unsigned int, T> &&data)
+ * \brief Construct an interpolator from a map of objects
+ * \param data Map from which to construct the interpolator
+ */
+
+/**
+ * \fn int Interpolator<T>::readYaml(const libcamera::YamlObject &yaml,
+ const std::string &key_name,
+ const std::string &value_name)
+ * \brief Initialize an Interpolator instance from yaml
+ * \tparam T Type of data stored in the interpolator
+ * \param[in] yaml The yaml object that contains the map of unsigned integers to
+ * objects
+ * \param[in] key_name The name of the key in the yaml object
+ * \param[in] value_name The name of the value in the yaml object
+ *
+ * The yaml object is expected to be a list of maps. Each map has two or more
+ * pairs: one of \a key_name to the key value (usually color temperature), and
+ * one or more of \a value_name to the object. This is a bit difficult to
+ * explain, so here is an example (in python, as it is easier to parse than
+ * yaml):
+ * [
+ * {
+ * 'ct': 2860,
+ * 'ccm': [ 2.12089, -0.52461, -0.59629,
+ * -0.85342, 2.80445, -0.95103,
+ * -0.26897, -1.14788, 2.41685 ],
+ * 'offsets': [ 0, 0, 0 ]
+ * },
+ *
+ * {
+ * 'ct': 2960,
+ * 'ccm': [ 2.26962, -0.54174, -0.72789,
+ * -0.77008, 2.60271, -0.83262,
+ * -0.26036, -1.51254, 2.77289 ],
+ * 'offsets': [ 0, 0, 0 ]
+ * },
+ *
+ * {
+ * 'ct': 3603,
+ * 'ccm': [ 2.18644, -0.66148, -0.52496,
+ * -0.77828, 2.69474, -0.91645,
+ * -0.25239, -0.83059, 2.08298 ],
+ * 'offsets': [ 0, 0, 0 ]
+ * },
+ * ]
+ *
+ * In this case, \a key_name would be 'ct', and \a value_name can be either
+ * 'ccm' or 'offsets'. This way multiple interpolators can be defined in
+ * one set of color temperature ranges in the tuning file, and they can be
+ * retrieved separately with the \a value_name parameter.
+ *
+ * \return Zero on success, negative error code otherwise
+ */
+
+/**
+ * \fn void Interpolator<T>::setQuantization(const unsigned int q)
+ * \brief Set the quantization value
+ * \param[in] q The quantization value
+ *
+ * Sets the quantization value. When this is set, 'key' gets quantized to this
+ * size, before doing the interpolation. This can help in reducing the number of
+ * updates pushed to the hardware.
+ *
+ * Note that normally a threshold needs to be combined with quantization.
+ * Otherwise a value that swings around the edge of the quantization step will
+ * lead to constant updates.
+ */
+
+/**
+ * \fn void Interpolator<T>::setData(std::map<unsigned int, T> &&data)
+ * \brief Set the internal map
+ *
+ * Overwrites the internal map using move semantics.
+ */
+
+/**
+ * \fn std::map<unsigned int, T> &Interpolator<T>::data() const
+ * \brief Access the internal map
+ *
+ * \return The internal map
+ */
+
+/**
+ * \fn const T& Interpolator<T>::getInterpolated()
+ * \brief Retrieve an interpolated value for the given key
+ * \param[in] key The unsigned integer key of the object to retrieve
+ * \param[out] quantizedKey If provided, the key value after quantization
+ * \return The object corresponding to the key. The object is cached internally,
+ * so on successive calls with the same key (after quantization) interpolation
+ * is not recalculated.
+ */
+
+/**
+ * \fn void Interpolator<T>::interpolate(const T &a, const T &b, T &dest, double lambda)
+ * \brief Interpolate between two instances of T
+ * \param a The first value to interpolate
+ * \param b The second value to interpolate
+ * \param dest The destination for the interpolated value
+ * \param lambda The interpolation factor (0..1)
+ *
+ * Interpolates between \a a and \a b according to \a lambda. It calculates
+ * dest = a * (1.0 - lambda) + b * lambda;
+ *
+ * If T supports multiplication with double and addition, this function can be
+ * used as is. For other types this function can be overwritten using partial
+ * template specialization.
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/interpolator.h b/src/ipa/libipa/interpolator.h
new file mode 100644
index 00000000..7880db69
--- /dev/null
+++ b/src/ipa/libipa/interpolator.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class for interpolating maps of objects
+ */
+
+#pragma once
+
+#include <algorithm>
+#include <cmath>
+#include <map>
+#include <string>
+#include <tuple>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Interpolator)
+
+namespace ipa {
+
+template<typename T>
+class Interpolator
+{
+public:
+ Interpolator() = default;
+ Interpolator(const std::map<unsigned int, T> &data)
+ : data_(data)
+ {
+ }
+ Interpolator(std::map<unsigned int, T> &&data)
+ : data_(std::move(data))
+ {
+ }
+
+ ~Interpolator() = default;
+
+ int readYaml(const libcamera::YamlObject &yaml,
+ const std::string &key_name,
+ const std::string &value_name)
+ {
+ data_.clear();
+ lastInterpolatedKey_.reset();
+
+ if (!yaml.isList()) {
+ LOG(Interpolator, Error) << "yaml object must be a list";
+ return -EINVAL;
+ }
+
+ for (const auto &value : yaml.asList()) {
+ unsigned int ct = std::stoul(value[key_name].get<std::string>(""));
+ std::optional<T> data =
+ value[value_name].get<T>();
+ if (!data) {
+ return -EINVAL;
+ }
+
+ data_[ct] = *data;
+ }
+
+ if (data_.size() < 1) {
+ LOG(Interpolator, Error) << "Need at least one element";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ void setQuantization(const unsigned int q)
+ {
+ quantization_ = q;
+ }
+
+ void setData(std::map<unsigned int, T> &&data)
+ {
+ data_ = std::move(data);
+ lastInterpolatedKey_.reset();
+ }
+
+ const std::map<unsigned int, T> &data() const
+ {
+ return data_;
+ }
+
+ const T &getInterpolated(unsigned int key, unsigned int *quantizedKey = nullptr)
+ {
+ ASSERT(data_.size() > 0);
+
+ if (quantization_ > 0)
+ key = std::lround(key / static_cast<double>(quantization_)) * quantization_;
+
+ if (quantizedKey)
+ *quantizedKey = key;
+
+ if (lastInterpolatedKey_.has_value() &&
+ *lastInterpolatedKey_ == key)
+ return lastInterpolatedValue_;
+
+ auto it = data_.lower_bound(key);
+
+ if (it == data_.begin())
+ return it->second;
+
+ if (it == data_.end())
+ return std::prev(it)->second;
+
+ if (it->first == key)
+ return it->second;
+
+ auto it2 = std::prev(it);
+ double lambda = (key - it2->first) / static_cast<double>(it->first - it2->first);
+ interpolate(it2->second, it->second, lastInterpolatedValue_, lambda);
+ lastInterpolatedKey_ = key;
+
+ return lastInterpolatedValue_;
+ }
+
+ void interpolate(const T &a, const T &b, T &dest, double lambda)
+ {
+ dest = a * (1.0 - lambda) + b * lambda;
+ }
+
+private:
+ std::map<unsigned int, T> data_;
+ T lastInterpolatedValue_;
+ std::optional<unsigned int> lastInterpolatedKey_;
+ unsigned int quantization_ = 0;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/lsc_polynomial.cpp b/src/ipa/libipa/lsc_polynomial.cpp
new file mode 100644
index 00000000..f607d86c
--- /dev/null
+++ b/src/ipa/libipa/lsc_polynomial.cpp
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * Polynomial class to represent lens shading correction
+ */
+
+#include "lsc_polynomial.h"
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file lsc_polynomial.h
+ * \brief LscPolynomial class
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(LscPolynomial)
+
+namespace ipa {
+
+/**
+ * \class LscPolynomial
+ * \brief Class for handling even polynomials used in lens shading correction
+ *
+ * Shading artifacts of camera lenses can be modeled using even radial
+ * polynomials. This class implements a polynomial with 5 coefficients which
+ * follows the definition of the FixVignetteRadial opcode in the Adobe DNG
+ * specification.
+ */
+
+/**
+ * \fn LscPolynomial::LscPolynomial(double cx = 0.0, double cy = 0.0, double k0 = 0.0,
+ double k1 = 0.0, double k2 = 0.0, double k3 = 0.0,
+ double k4 = 0.0)
+ * \brief Construct a polynomial using the given coefficients
+ * \param cx Center-x relative to the image in normalized coordinates (0..1)
+ * \param cy Center-y relative to the image in normalized coordinates (0..1)
+ * \param k0 Coefficient of the polynomial
+ * \param k1 Coefficient of the polynomial
+ * \param k2 Coefficient of the polynomial
+ * \param k3 Coefficient of the polynomial
+ * \param k4 Coefficient of the polynomial
+ */
+
+/**
+ * \fn LscPolynomial::sampleAtNormalizedPixelPos(double x, double y)
+ * \brief Sample the polynomial at the given normalized pixel position
+ *
+ * This functions samples the polynomial at the given pixel position divided by
+ * the value returned by getM().
+ *
+ * \param x x position in normalized coordinates
+ * \param y y position in normalized coordinates
+ * \return The sampled value
+ */
+
+/**
+ * \fn LscPolynomial::getM()
+ * \brief Get the value m as described in the dng specification
+ *
+ * Returns m according to dng spec. m represents the Euclidean distance
+ * (in pixels) from the optical center to the farthest pixel in the
+ * image.
+ *
+ * \return The sampled value
+ */
+
+/**
+ * \fn LscPolynomial::setReferenceImageSize(const Size &size)
+ * \brief Set the reference image size
+ *
+ * Set the reference image size that is used for subsequent calls to getM() and
+ * sampleAtNormalizedPixelPos()
+ *
+ * \param size The size of the reference image
+ */
+
+} // namespace ipa
+} // namespace libcamera
diff --git a/src/ipa/libipa/lsc_polynomial.h b/src/ipa/libipa/lsc_polynomial.h
new file mode 100644
index 00000000..c898faeb
--- /dev/null
+++ b/src/ipa/libipa/lsc_polynomial.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * Helper for radial polynomial used in lens shading correction.
+ */
+#pragma once
+
+#include <algorithm>
+#include <array>
+#include <assert.h>
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(LscPolynomial)
+
+namespace ipa {
+
+class LscPolynomial
+{
+public:
+ LscPolynomial(double cx = 0.0, double cy = 0.0, double k0 = 0.0,
+ double k1 = 0.0, double k2 = 0.0, double k3 = 0.0,
+ double k4 = 0.0)
+ : cx_(cx), cy_(cy), cnx_(0), cny_(0),
+ coefficients_({ k0, k1, k2, k3, k4 })
+ {
+ }
+
+ double sampleAtNormalizedPixelPos(double x, double y) const
+ {
+ double dx = x - cnx_;
+ double dy = y - cny_;
+ double r = sqrt(dx * dx + dy * dy);
+ double res = 1.0;
+ for (unsigned int i = 0; i < coefficients_.size(); i++) {
+ res += coefficients_[i] * std::pow(r, (i + 1) * 2);
+ }
+ return res;
+ }
+
+ double getM() const
+ {
+ double cpx = imageSize_.width * cx_;
+ double cpy = imageSize_.height * cy_;
+ double mx = std::max(cpx, std::fabs(imageSize_.width - cpx));
+ double my = std::max(cpy, std::fabs(imageSize_.height - cpy));
+
+ return sqrt(mx * mx + my * my);
+ }
+
+ void setReferenceImageSize(const Size &size)
+ {
+ assert(!size.isNull());
+ imageSize_ = size;
+
+ /* Calculate normalized centers */
+ double m = getM();
+ cnx_ = (size.width * cx_) / m;
+ cny_ = (size.height * cy_) / m;
+ }
+
+private:
+ double cx_;
+ double cy_;
+ double cnx_;
+ double cny_;
+ std::array<double, 5> coefficients_;
+
+ Size imageSize_;
+};
+
+} /* namespace ipa */
+
+#ifndef __DOXYGEN__
+
+template<>
+struct YamlObject::Getter<ipa::LscPolynomial> {
+ std::optional<ipa::LscPolynomial> get(const YamlObject &obj) const
+ {
+ std::optional<double> cx = obj["cx"].get<double>();
+ std::optional<double> cy = obj["cy"].get<double>();
+ std::optional<double> k0 = obj["k0"].get<double>();
+ std::optional<double> k1 = obj["k1"].get<double>();
+ std::optional<double> k2 = obj["k2"].get<double>();
+ std::optional<double> k3 = obj["k3"].get<double>();
+ std::optional<double> k4 = obj["k4"].get<double>();
+
+ if (!(cx && cy && k0 && k1 && k2 && k3 && k4))
+ LOG(LscPolynomial, Error)
+ << "Polynomial is missing a parameter";
+
+ return ipa::LscPolynomial(*cx, *cy, *k0, *k1, *k2, *k3, *k4);
+ }
+};
+
+#endif
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/lux.cpp b/src/ipa/libipa/lux.cpp
new file mode 100644
index 00000000..899e8824
--- /dev/null
+++ b/src/ipa/libipa/lux.cpp
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that implements lux estimation
+ */
+#include "lux.h"
+
+#include <algorithm>
+#include <chrono>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "histogram.h"
+
+/**
+ * \file lux.h
+ * \brief Helper class that implements lux estimation
+ *
+ * Estimating the lux level of an image is a common operation that can for
+ * instance be used to adjust the target Y value in AGC or for Bayesian AWB
+ * estimation.
+ */
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(Lux)
+
+namespace ipa {
+
+/**
+ * \class Lux
+ * \brief Class that implements lux estimation
+ *
+ * IPAs that wish to use lux estimation should create a Lux algorithm module
+ * that lightly wraps this module by providing the platform-specific luminance
+ * histogram. The Lux entry in the tuning file must then precede the algorithms
+ * that depend on the estimated lux value.
+ */
+
+/**
+ * \var Lux::referenceExposureTime_
+ * \brief The exposure time of the reference image, in microseconds
+ */
+
+/**
+ * \var Lux::referenceAnalogueGain_
+ * \brief The analogue gain of the reference image
+ */
+
+/**
+ * \var Lux::referenceDigitalGain_
+ * \brief The analogue gain of the reference image
+ */
+
+/**
+ * \var Lux::referenceY_
+ * \brief The measured luminance of the reference image, normalized to 1
+ *
+ */
+
+/**
+ * \var Lux::referenceLux_
+ * \brief The estimated lux level of the reference image
+ */
+
+/**
+ * \brief Construct the Lux helper module
+ */
+Lux::Lux()
+{
+}
+
+/**
+ * \brief Parse tuning data
+ * \param[in] tuningData The YamlObject representing the tuning data
+ *
+ * This function parses yaml tuning data for the common Lux module. It requires
+ * reference exposure time, analogue gain, digital gain, and lux values.
+ *
+ * \code{.unparsed}
+ * algorithms:
+ * - Lux:
+ * referenceExposureTime: 10000
+ * referenceAnalogueGain: 4.0
+ * referenceDigitalGain: 1.0
+ * referenceY: 0.1831
+ * referenceLux: 1000
+ * \endcode
+ *
+ * \return 0 on success or a negative error code
+ */
+int Lux::parseTuningData(const YamlObject &tuningData)
+{
+ auto value = tuningData["referenceExposureTime"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceExposureTime'";
+ return -EINVAL;
+ }
+ referenceExposureTime_ = *value * 1.0us;
+
+ value = tuningData["referenceAnalogueGain"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceAnalogueGain'";
+ return -EINVAL;
+ }
+ referenceAnalogueGain_ = *value;
+
+ value = tuningData["referenceDigitalGain"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceDigitalGain'";
+ return -EINVAL;
+ }
+ referenceDigitalGain_ = *value;
+
+ value = tuningData["referenceY"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceY'";
+ return -EINVAL;
+ }
+ referenceY_ = *value;
+
+ value = tuningData["referenceLux"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceLux'";
+ return -EINVAL;
+ }
+ referenceLux_ = *value;
+
+ return 0;
+}
+
+/**
+ * \brief Estimate lux given runtime values
+ * \param[in] exposureTime Exposure time applied to the frame
+ * \param[in] aGain Analogue gain applied to the frame
+ * \param[in] dGain Digital gain applied to the frame
+ * \param[in] yHist Histogram from the ISP statistics
+ *
+ * Estimate the lux given the exposure time, gain, and histogram.
+ *
+ * \return Estimated lux value
+ */
+double Lux::estimateLux(utils::Duration exposureTime,
+ double aGain, double dGain,
+ const Histogram &yHist) const
+{
+ double currentY = yHist.interQuantileMean(0, 1);
+ double exposureTimeRatio = referenceExposureTime_ / exposureTime;
+ double aGainRatio = referenceAnalogueGain_ / aGain;
+ double dGainRatio = referenceDigitalGain_ / dGain;
+ double yRatio = (currentY / yHist.bins()) / referenceY_;
+
+ double estimatedLux = exposureTimeRatio * aGainRatio * dGainRatio *
+ yRatio * referenceLux_;
+
+ LOG(Lux, Debug) << "Estimated lux " << estimatedLux;
+ return estimatedLux;
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/lux.h b/src/ipa/libipa/lux.h
new file mode 100644
index 00000000..d95bcdaf
--- /dev/null
+++ b/src/ipa/libipa/lux.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that implements lux estimation
+ */
+
+#pragma once
+
+#include <libcamera/base/utils.h>
+
+namespace libcamera {
+
+class YamlObject;
+
+namespace ipa {
+
+class Histogram;
+
+class Lux
+{
+public:
+ Lux();
+
+ int parseTuningData(const YamlObject &tuningData);
+ double estimateLux(utils::Duration exposureTime,
+ double aGain, double dGain,
+ const Histogram &yHist) const;
+
+private:
+ utils::Duration referenceExposureTime_;
+ double referenceAnalogueGain_;
+ double referenceDigitalGain_;
+ double referenceY_;
+ double referenceLux_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/meson.build b/src/ipa/libipa/meson.build
index 016b8e0e..660be940 100644
--- a/src/ipa/libipa/meson.build
+++ b/src/ipa/libipa/meson.build
@@ -1,19 +1,41 @@
# SPDX-License-Identifier: CC0-1.0
libipa_headers = files([
+ 'agc_mean_luminance.h',
'algorithm.h',
+ 'awb_bayes.h',
+ 'awb_grey.h',
+ 'awb.h',
'camera_sensor_helper.h',
+ 'colours.h',
+ 'exposure_mode_helper.h',
'fc_queue.h',
+ 'fixedpoint.h',
'histogram.h',
+ 'interpolator.h',
+ 'lsc_polynomial.h',
+ 'lux.h',
'module.h',
+ 'pwl.h',
])
libipa_sources = files([
+ 'agc_mean_luminance.cpp',
'algorithm.cpp',
+ 'awb_bayes.cpp',
+ 'awb_grey.cpp',
+ 'awb.cpp',
'camera_sensor_helper.cpp',
+ 'colours.cpp',
+ 'exposure_mode_helper.cpp',
'fc_queue.cpp',
+ 'fixedpoint.cpp',
'histogram.cpp',
+ 'interpolator.cpp',
+ 'lsc_polynomial.cpp',
+ 'lux.cpp',
'module.cpp',
+ 'pwl.cpp',
])
libipa_includes = include_directories('..')
@@ -21,3 +43,7 @@ libipa_includes = include_directories('..')
libipa = static_library('ipa', [libipa_sources, libipa_headers],
include_directories : ipa_includes,
dependencies : libcamera_private)
+
+libipa_dep = declare_dependency(sources : libipa_headers,
+ include_directories : libipa_includes,
+ link_with : libipa)
diff --git a/src/ipa/libipa/module.cpp b/src/ipa/libipa/module.cpp
index ee01f12a..64ca9141 100644
--- a/src/ipa/libipa/module.cpp
+++ b/src/ipa/libipa/module.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas On Board
*
- * module.cpp - IPA Module
+ * IPA Module
*/
#include "module.h"
diff --git a/src/ipa/libipa/module.h b/src/ipa/libipa/module.h
index 4149a353..0fb51916 100644
--- a/src/ipa/libipa/module.h
+++ b/src/ipa/libipa/module.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas On Board
*
- * module.h - IPA module
+ * IPA module
*/
#pragma once
diff --git a/src/ipa/libipa/pwl.cpp b/src/ipa/libipa/pwl.cpp
new file mode 100644
index 00000000..3fa005ba
--- /dev/null
+++ b/src/ipa/libipa/pwl.cpp
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Piecewise linear functions
+ */
+
+#include "pwl.h"
+
+#include <cmath>
+#include <sstream>
+
+/**
+ * \file pwl.h
+ * \brief Piecewise linear functions
+ */
+
+namespace libcamera {
+
+namespace ipa {
+
+/**
+ * \class Pwl
+ * \brief Describe a univariate piecewise linear function in two-dimensional
+ * real space
+ *
+ * A piecewise linear function is a univariate function that maps reals to
+ * reals, and it is composed of multiple straight-line segments.
+ *
+ * While a mathematical piecewise linear function would usually be defined by
+ * a list of linear functions and for which values of the domain they apply,
+ * this Pwl class is instead defined by a list of points at which these line
+ * segments intersect. These intersecting points are known as knots.
+ *
+ * https://en.wikipedia.org/wiki/Piecewise_linear_function
+ *
+ * A consequence of the Pwl class being defined by knots instead of linear
+ * functions is that the values of the piecewise linear function past the ends
+ * of the function are constants as opposed to linear functions. In a
+ * mathematical piecewise linear function that is defined by multiple linear
+ * functions, the ends of the function are also linear functions and hence grow
+ * to infinity (or negative infinity). However, since this Pwl class is defined
+ * by knots, the y-value of the leftmost and rightmost knots will hold for all
+ * x values to negative infinity and positive infinity, respectively.
+ */
+
+/**
+ * \typedef Pwl::Point
+ * \brief Describe a point in two-dimensional real space
+ */
+
+/**
+ * \class Pwl::Interval
+ * \brief Describe an interval in one-dimensional real space
+ */
+
+/**
+ * \fn Pwl::Interval::Interval(double _start, double _end)
+ * \brief Construct an interval
+ * \param[in] _start Start of the interval
+ * \param[in] _end End of the interval
+ */
+
+/**
+ * \fn Pwl::Interval::contains
+ * \brief Check if a given value falls within the interval
+ * \param[in] value Value to check
+ * \return True if the value falls within the interval, including its bounds,
+ * or false otherwise
+ */
+
+/**
+ * \fn Pwl::Interval::clamp
+ * \brief Clamp a value such that it is within the interval
+ * \param[in] value Value to clamp
+ * \return The clamped value
+ */
+
+/**
+ * \fn Pwl::Interval::length
+ * \brief Compute the length of the interval
+ * \return The length of the interval
+ */
+
+/**
+ * \var Pwl::Interval::start
+ * \brief Start of the interval
+ */
+
+/**
+ * \var Pwl::Interval::end
+ * \brief End of the interval
+ */
+
+/**
+ * \brief Construct an empty piecewise linear function
+ */
+Pwl::Pwl()
+{
+}
+
+/**
+ * \brief Construct a piecewise linear function from a list of 2D points
+ * \param[in] points Vector of points from which to construct the piecewise
+ * linear function
+ *
+ * \a points must be in ascending order of x-value.
+ */
+Pwl::Pwl(const std::vector<Point> &points)
+ : points_(points)
+{
+}
+
+/**
+ * \copydoc Pwl::Pwl(const std::vector<Point> &points)
+ *
+ * The contents of the \a points vector is moved to the newly constructed Pwl
+ * instance.
+ */
+Pwl::Pwl(std::vector<Point> &&points)
+ : points_(std::move(points))
+{
+}
+
+/**
+ * \brief Append a point to the end of the piecewise linear function
+ * \param[in] x x-coordinate of the point to add to the piecewise linear function
+ * \param[in] y y-coordinate of the point to add to the piecewise linear function
+ * \param[in] eps Epsilon for the minimum x distance between points (optional)
+ *
+ * The point's x-coordinate must be greater than the x-coordinate of the last
+ * (= greatest) point already in the piecewise linear function.
+ */
+void Pwl::append(double x, double y, const double eps)
+{
+ if (points_.empty() || points_.back().x() + eps < x)
+ points_.push_back(Point({ x, y }));
+}
+
+/**
+ * \brief Prepend a point to the beginning of the piecewise linear function
+ * \param[in] x x-coordinate of the point to add to the piecewise linear function
+ * \param[in] y y-coordinate of the point to add to the piecewise linear function
+ * \param[in] eps Epsilon for the minimum x distance between points (optional)
+ *
+ * The point's x-coordinate must be less than the x-coordinate of the first
+ * (= smallest) point already in the piecewise linear function.
+ */
+void Pwl::prepend(double x, double y, const double eps)
+{
+ if (points_.empty() || points_.front().x() - eps > x)
+ points_.insert(points_.begin(), Point({ x, y }));
+}
+
+/**
+ * \fn Pwl::empty() const
+ * \brief Check if the piecewise linear function is empty
+ * \return True if there are no points in the function, false otherwise
+ */
+
+/**
+ * \fn Pwl::clear()
+ * \brief Clear the piecewise linear function
+ */
+
+/**
+ * \fn Pwl::size() const
+ * \brief Retrieve the number of points in the piecewise linear function
+ * \return The number of points in the piecewise linear function
+ */
+
+/**
+ * \brief Get the domain of the piecewise linear function
+ * \return An interval representing the domain
+ */
+Pwl::Interval Pwl::domain() const
+{
+ return Interval(points_[0].x(), points_[points_.size() - 1].x());
+}
+
+/**
+ * \brief Get the range of the piecewise linear function
+ * \return An interval representing the range
+ */
+Pwl::Interval Pwl::range() const
+{
+ double lo = points_[0].y(), hi = lo;
+ for (auto &p : points_)
+ lo = std::min(lo, p.y()), hi = std::max(hi, p.y());
+ return Interval(lo, hi);
+}
+
+/**
+ * \brief Evaluate the piecewise linear function
+ * \param[in] x The x value to input into the function
+ * \param[inout] span Initial guess for span
+ * \param[in] updateSpan Set to true to update span
+ *
+ * Evaluate Pwl, optionally supplying an initial guess for the
+ * "span". The "span" may be optionally be updated. If you want to know
+ * the "span" value but don't have an initial guess you can set it to
+ * -1.
+ *
+ * \return The result of evaluating the piecewise linear function at position \a x
+ */
+double Pwl::eval(double x, int *span, bool updateSpan) const
+{
+ int index = findSpan(x, span && *span != -1
+ ? *span
+ : points_.size() / 2 - 1);
+ if (span && updateSpan)
+ *span = index;
+ return points_[index].y() +
+ (x - points_[index].x()) * (points_[index + 1].y() - points_[index].y()) /
+ (points_[index + 1].x() - points_[index].x());
+}
+
+int Pwl::findSpan(double x, int span) const
+{
+ /*
+ * Pwls are generally small, so linear search may well be faster than
+ * binary, though could review this if large Pwls start turning up.
+ */
+ int lastSpan = points_.size() - 2;
+ /*
+ * some algorithms may call us with span pointing directly at the last
+ * control point
+ */
+ span = std::max(0, std::min(lastSpan, span));
+ while (span < lastSpan && x >= points_[span + 1].x())
+ span++;
+ while (span && x < points_[span].x())
+ span--;
+ return span;
+}
+
+/**
+ * \brief Compute the inverse function
+ * \param[in] eps Epsilon for the minimum x distance between points (optional)
+ *
+ * The output includes whether the resulting inverse function is a proper
+ * (true) inverse, or only a best effort (e.g. input was non-monotonic).
+ *
+ * \return A pair of the inverse piecewise linear function, and whether or not
+ * the result is a proper/true inverse
+ */
+std::pair<Pwl, bool> Pwl::inverse(const double eps) const
+{
+ bool appended = false, prepended = false, neither = false;
+ Pwl inverse;
+
+ for (Point const &p : points_) {
+ if (inverse.empty()) {
+ inverse.append(p.y(), p.x(), eps);
+ } else if (std::abs(inverse.points_.back().x() - p.y()) <= eps ||
+ std::abs(inverse.points_.front().x() - p.y()) <= eps) {
+ /* do nothing */;
+ } else if (p.y() > inverse.points_.back().x()) {
+ inverse.append(p.y(), p.x(), eps);
+ appended = true;
+ } else if (p.y() < inverse.points_.front().x()) {
+ inverse.prepend(p.y(), p.x(), eps);
+ prepended = true;
+ } else {
+ neither = true;
+ }
+ }
+
+ /*
+ * This is not a proper inverse if we found ourselves putting points
+ * onto both ends of the inverse, or if there were points that couldn't
+ * go on either.
+ */
+ bool trueInverse = !(neither || (appended && prepended));
+
+ return { inverse, trueInverse };
+}
+
+/**
+ * \brief Compose two piecewise linear functions together
+ * \param[in] other The "other" piecewise linear function
+ * \param[in] eps Epsilon for the minimum x distance between points (optional)
+ *
+ * The "this" function is done first, and "other" after.
+ *
+ * \return The composed piecewise linear function
+ */
+Pwl Pwl::compose(Pwl const &other, const double eps) const
+{
+ double thisX = points_[0].x(), thisY = points_[0].y();
+ int thisSpan = 0, otherSpan = other.findSpan(thisY, 0);
+ Pwl result({ Point({ thisX, other.eval(thisY, &otherSpan, false) }) });
+
+ while (thisSpan != (int)points_.size() - 1) {
+ double dx = points_[thisSpan + 1].x() - points_[thisSpan].x(),
+ dy = points_[thisSpan + 1].y() - points_[thisSpan].y();
+ if (std::abs(dy) > eps &&
+ otherSpan + 1 < (int)other.points_.size() &&
+ points_[thisSpan + 1].y() >= other.points_[otherSpan + 1].x() + eps) {
+ /*
+ * next control point in result will be where this
+ * function's y reaches the next span in other
+ */
+ thisX = points_[thisSpan].x() +
+ (other.points_[otherSpan + 1].x() -
+ points_[thisSpan].y()) *
+ dx / dy;
+ thisY = other.points_[++otherSpan].x();
+ } else if (std::abs(dy) > eps && otherSpan > 0 &&
+ points_[thisSpan + 1].y() <=
+ other.points_[otherSpan - 1].x() - eps) {
+ /*
+ * next control point in result will be where this
+ * function's y reaches the previous span in other
+ */
+ thisX = points_[thisSpan].x() +
+ (other.points_[otherSpan + 1].x() -
+ points_[thisSpan].y()) *
+ dx / dy;
+ thisY = other.points_[--otherSpan].x();
+ } else {
+ /* we stay in the same span in other */
+ thisSpan++;
+ thisX = points_[thisSpan].x(),
+ thisY = points_[thisSpan].y();
+ }
+ result.append(thisX, other.eval(thisY, &otherSpan, false),
+ eps);
+ }
+ return result;
+}
+
+/**
+ * \brief Apply function to (x, y) values at every control point
+ * \param[in] f Function to be applied
+ */
+void Pwl::map(std::function<void(double x, double y)> f) const
+{
+ for (auto &pt : points_)
+ f(pt.x(), pt.y());
+}
+
+/**
+ * \brief Apply function to (x, y0, y1) values wherever either Pwl has a
+ * control point.
+ * \param[in] pwl0 First piecewise linear function
+ * \param[in] pwl1 Second piecewise linear function
+ * \param[in] f Function to be applied
+ *
+ * This applies the function \a f to every parameter (x, y0, y1), where x is
+ * the combined list of x-values from \a pwl0 and \a pwl1, y0 is the y-value
+ * for the given x in \a pwl0, and y1 is the y-value for the same x in \a pwl1.
+ */
+void Pwl::map2(Pwl const &pwl0, Pwl const &pwl1,
+ std::function<void(double x, double y0, double y1)> f)
+{
+ int span0 = 0, span1 = 0;
+ double x = std::min(pwl0.points_[0].x(), pwl1.points_[0].x());
+ f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false));
+
+ while (span0 < (int)pwl0.points_.size() - 1 ||
+ span1 < (int)pwl1.points_.size() - 1) {
+ if (span0 == (int)pwl0.points_.size() - 1)
+ x = pwl1.points_[++span1].x();
+ else if (span1 == (int)pwl1.points_.size() - 1)
+ x = pwl0.points_[++span0].x();
+ else if (pwl0.points_[span0 + 1].x() > pwl1.points_[span1 + 1].x())
+ x = pwl1.points_[++span1].x();
+ else
+ x = pwl0.points_[++span0].x();
+ f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false));
+ }
+}
+
+/**
+ * \brief Combine two Pwls
+ * \param[in] pwl0 First piecewise linear function
+ * \param[in] pwl1 Second piecewise linear function
+ * \param[in] f Function to be applied
+ * \param[in] eps Epsilon for the minimum x distance between points (optional)
+ *
+ * Create a new Pwl where the y values are given by running \a f wherever
+ * either pwl has a knot.
+ *
+ * \return The combined pwl
+ */
+Pwl Pwl::combine(Pwl const &pwl0, Pwl const &pwl1,
+ std::function<double(double x, double y0, double y1)> f,
+ const double eps)
+{
+ Pwl result;
+ map2(pwl0, pwl1, [&](double x, double y0, double y1) {
+ result.append(x, f(x, y0, y1), eps);
+ });
+ return result;
+}
+
+/**
+ * \brief Multiply the piecewise linear function
+ * \param[in] d Scalar multiplier to multiply the function by
+ * \return This function, after it has been multiplied by \a d
+ */
+Pwl &Pwl::operator*=(double d)
+{
+ for (auto &pt : points_)
+ pt[1] *= d;
+ return *this;
+}
+
+/**
+ * \brief Assemble and return a string describing the piecewise linear function
+ * \return A string describing the piecewise linear function
+ */
+std::string Pwl::toString() const
+{
+ std::stringstream ss;
+ ss << "Pwl { ";
+ for (auto &p : points_)
+ ss << "(" << p.x() << ", " << p.y() << ") ";
+ ss << "}";
+ return ss.str();
+}
+
+} /* namespace ipa */
+
+#ifndef __DOXYGEN__
+/*
+ * The YAML data shall be a list of numerical values with an even number of
+ * elements. They are parsed in pairs into x and y points in the piecewise
+ * linear function, and added in order. x must be monotonically increasing.
+ */
+template<>
+std::optional<ipa::Pwl>
+YamlObject::Getter<ipa::Pwl>::get(const YamlObject &obj) const
+{
+ if (!obj.size() || obj.size() % 2)
+ return std::nullopt;
+
+ ipa::Pwl pwl;
+
+ const auto &list = obj.asList();
+
+ for (auto it = list.begin(); it != list.end(); it++) {
+ auto x = it->get<double>();
+ if (!x)
+ return std::nullopt;
+ auto y = (++it)->get<double>();
+ if (!y)
+ return std::nullopt;
+
+ pwl.append(*x, *y);
+ }
+
+ if (pwl.size() != obj.size() / 2)
+ return std::nullopt;
+
+ return pwl;
+}
+#endif /* __DOXYGEN__ */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/pwl.h b/src/ipa/libipa/pwl.h
new file mode 100644
index 00000000..c1496c30
--- /dev/null
+++ b/src/ipa/libipa/pwl.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Piecewise linear functions interface
+ */
+#pragma once
+
+#include <algorithm>
+#include <functional>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "libcamera/internal/vector.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+class Pwl
+{
+public:
+ using Point = Vector<double, 2>;
+
+ struct Interval {
+ Interval(double _start, double _end)
+ : start(_start), end(_end) {}
+
+ bool contains(double value)
+ {
+ return value >= start && value <= end;
+ }
+
+ double clamp(double value)
+ {
+ return std::clamp(value, start, end);
+ }
+
+ double length() const { return end - start; }
+
+ double start, end;
+ };
+
+ Pwl();
+ Pwl(const std::vector<Point> &points);
+ Pwl(std::vector<Point> &&points);
+
+ void append(double x, double y, double eps = 1e-6);
+
+ bool empty() const { return points_.empty(); }
+ void clear() { points_.clear(); }
+ size_t size() const { return points_.size(); }
+
+ Interval domain() const;
+ Interval range() const;
+
+ double eval(double x, int *span = nullptr,
+ bool updateSpan = true) const;
+
+ std::pair<Pwl, bool> inverse(double eps = 1e-6) const;
+ Pwl compose(const Pwl &other, double eps = 1e-6) const;
+
+ void map(std::function<void(double x, double y)> f) const;
+
+ static Pwl
+ combine(const Pwl &pwl0, const Pwl &pwl1,
+ std::function<double(double x, double y0, double y1)> f,
+ double eps = 1e-6);
+
+ Pwl &operator*=(double d);
+
+ std::string toString() const;
+
+private:
+ static void map2(const Pwl &pwl0, const Pwl &pwl1,
+ std::function<void(double x, double y0, double y1)> f);
+ void prepend(double x, double y, double eps = 1e-6);
+ int findSpan(double x, int span) const;
+
+ std::vector<Point> points_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/agc.cpp b/src/ipa/mali-c55/algorithms/agc.cpp
new file mode 100644
index 00000000..70667db3
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/agc.cpp
@@ -0,0 +1,410 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board Oy
+ *
+ * agc.cpp - AGC/AEC mean-based control algorithm
+ */
+
+#include "agc.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/property_ids.h>
+
+#include "libipa/colours.h"
+#include "libipa/fixedpoint.h"
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+namespace ipa::mali_c55::algorithms {
+
+LOG_DEFINE_CATEGORY(MaliC55Agc)
+
+/*
+ * Number of histogram bins. This is only true for the specific configuration we
+ * set to the ISP; 4 separate histograms of 256 bins each. If that configuration
+ * ever changes then this constant will need updating.
+ */
+static constexpr unsigned int kNumHistogramBins = 256;
+
+/*
+ * The Mali-C55 ISP has a digital gain block which allows setting gain in Q5.8
+ * format, a range of 0.0 to (very nearly) 32.0. We clamp from 1.0 to the actual
+ * max value which is 8191 * 2^-8.
+ */
+static constexpr double kMinDigitalGain = 1.0;
+static constexpr double kMaxDigitalGain = 31.99609375;
+
+uint32_t AgcStatistics::decodeBinValue(uint16_t binVal)
+{
+ int exponent = (binVal & 0xf000) >> 12;
+ int mantissa = binVal & 0xfff;
+
+ if (!exponent)
+ return mantissa * 2;
+ else
+ return (mantissa + 4096) * std::pow(2, exponent);
+}
+
+/*
+ * We configure the ISP to give us 4 histograms of 256 bins each, with
+ * a single histogram per colour channel (R/Gr/Gb/B). The memory space
+ * containing the data is a single block containing all 4 histograms
+ * with the position of each colour's histogram within it dependent on
+ * the bayer pattern of the data input to the ISP.
+ *
+ * NOTE: The validity of this function depends on the parameters we have
+ * configured. With different skip/offset x, y values not all of the
+ * colour channels would be populated, and they may not be in the same
+ * planes as calculated here.
+ */
+int AgcStatistics::setBayerOrderIndices(BayerFormat::Order bayerOrder)
+{
+ switch (bayerOrder) {
+ case BayerFormat::Order::RGGB:
+ rIndex_ = 0;
+ grIndex_ = 1;
+ gbIndex_ = 2;
+ bIndex_ = 3;
+ break;
+ case BayerFormat::Order::GRBG:
+ grIndex_ = 0;
+ rIndex_ = 1;
+ bIndex_ = 2;
+ gbIndex_ = 3;
+ break;
+ case BayerFormat::Order::GBRG:
+ gbIndex_ = 0;
+ bIndex_ = 1;
+ rIndex_ = 2;
+ grIndex_ = 3;
+ break;
+ case BayerFormat::Order::BGGR:
+ bIndex_ = 0;
+ gbIndex_ = 1;
+ grIndex_ = 2;
+ rIndex_ = 3;
+ break;
+ default:
+ LOG(MaliC55Agc, Error)
+ << "Invalid bayer format " << bayerOrder;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void AgcStatistics::parseStatistics(const mali_c55_stats_buffer *stats)
+{
+ uint32_t r[256], g[256], b[256], y[256];
+
+ /*
+ * We need to decode the bin values for each histogram from their 16-bit
+ * compressed values to a 32-bit value. We also take the average of the
+ * Gr/Gb values into a single green histogram.
+ */
+ for (unsigned int i = 0; i < 256; i++) {
+ r[i] = decodeBinValue(stats->ae_1024bin_hist.bins[i + (256 * rIndex_)]);
+ g[i] = (decodeBinValue(stats->ae_1024bin_hist.bins[i + (256 * grIndex_)]) +
+ decodeBinValue(stats->ae_1024bin_hist.bins[i + (256 * gbIndex_)])) / 2;
+ b[i] = decodeBinValue(stats->ae_1024bin_hist.bins[i + (256 * bIndex_)]);
+
+ y[i] = rec601LuminanceFromRGB({ { static_cast<double>(r[i]),
+ static_cast<double>(g[i]),
+ static_cast<double>(b[i]) } });
+ }
+
+ rHist = Histogram(Span<uint32_t>(r, kNumHistogramBins));
+ gHist = Histogram(Span<uint32_t>(g, kNumHistogramBins));
+ bHist = Histogram(Span<uint32_t>(b, kNumHistogramBins));
+ yHist = Histogram(Span<uint32_t>(y, kNumHistogramBins));
+}
+
+Agc::Agc()
+ : AgcMeanLuminance()
+{
+}
+
+int Agc::init(IPAContext &context, const YamlObject &tuningData)
+{
+ int ret = parseTuningData(tuningData);
+ if (ret)
+ return ret;
+
+ context.ctrlMap[&controls::AeEnable] = ControlInfo(false, true);
+ context.ctrlMap[&controls::DigitalGain] = ControlInfo(
+ static_cast<float>(kMinDigitalGain),
+ static_cast<float>(kMaxDigitalGain),
+ static_cast<float>(kMinDigitalGain)
+ );
+ context.ctrlMap.merge(controls());
+
+ return 0;
+}
+
+int Agc::configure(IPAContext &context,
+ [[maybe_unused]] const IPACameraSensorInfo &configInfo)
+{
+ int ret = statistics_.setBayerOrderIndices(context.configuration.sensor.bayerOrder);
+ if (ret)
+ return ret;
+
+ /*
+ * Defaults; we use whatever the sensor's default exposure is and the
+ * minimum analogue gain. AEGC is _active_ by default.
+ */
+ context.activeState.agc.autoEnabled = true;
+ context.activeState.agc.automatic.sensorGain = context.configuration.agc.minAnalogueGain;
+ context.activeState.agc.automatic.exposure = context.configuration.agc.defaultExposure;
+ context.activeState.agc.automatic.ispGain = kMinDigitalGain;
+ context.activeState.agc.manual.sensorGain = context.configuration.agc.minAnalogueGain;
+ context.activeState.agc.manual.exposure = context.configuration.agc.defaultExposure;
+ context.activeState.agc.manual.ispGain = kMinDigitalGain;
+ context.activeState.agc.constraintMode = constraintModes().begin()->first;
+ context.activeState.agc.exposureMode = exposureModeHelpers().begin()->first;
+
+ /* \todo Run this again when FrameDurationLimits is passed in */
+ setLimits(context.configuration.agc.minShutterSpeed,
+ context.configuration.agc.maxShutterSpeed,
+ context.configuration.agc.minAnalogueGain,
+ context.configuration.agc.maxAnalogueGain);
+
+ resetFrameCount();
+
+ return 0;
+}
+
+void Agc::queueRequest(IPAContext &context, const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ const ControlList &controls)
+{
+ auto &agc = context.activeState.agc;
+
+ const auto &constraintMode = controls.get(controls::AeConstraintMode);
+ agc.constraintMode = constraintMode.value_or(agc.constraintMode);
+
+ const auto &exposureMode = controls.get(controls::AeExposureMode);
+ agc.exposureMode = exposureMode.value_or(agc.exposureMode);
+
+ const auto &agcEnable = controls.get(controls::AeEnable);
+ if (agcEnable && *agcEnable != agc.autoEnabled) {
+ agc.autoEnabled = *agcEnable;
+
+ LOG(MaliC55Agc, Info)
+ << (agc.autoEnabled ? "Enabling" : "Disabling")
+ << " AGC";
+ }
+
+ /*
+ * If the automatic exposure and gain is enabled we have no further work
+ * to do here...
+ */
+ if (agc.autoEnabled)
+ return;
+
+ /*
+ * ...otherwise we need to look for exposure and gain controls and use
+ * those to set the activeState.
+ */
+ const auto &exposure = controls.get(controls::ExposureTime);
+ if (exposure) {
+ agc.manual.exposure = *exposure * 1.0us / context.configuration.sensor.lineDuration;
+
+ LOG(MaliC55Agc, Debug)
+ << "Exposure set to " << agc.manual.exposure
+ << " on request sequence " << frame;
+ }
+
+ const auto &analogueGain = controls.get(controls::AnalogueGain);
+ if (analogueGain) {
+ agc.manual.sensorGain = *analogueGain;
+
+ LOG(MaliC55Agc, Debug)
+ << "Analogue gain set to " << agc.manual.sensorGain
+ << " on request sequence " << frame;
+ }
+
+ const auto &digitalGain = controls.get(controls::DigitalGain);
+ if (digitalGain) {
+ agc.manual.ispGain = *digitalGain;
+
+ LOG(MaliC55Agc, Debug)
+ << "Digital gain set to " << agc.manual.ispGain
+ << " on request sequence " << frame;
+ }
+}
+
+size_t Agc::fillGainParamBlock(IPAContext &context, IPAFrameContext &frameContext,
+ mali_c55_params_block block)
+{
+ IPAActiveState &activeState = context.activeState;
+ double gain;
+
+ if (activeState.agc.autoEnabled)
+ gain = activeState.agc.automatic.ispGain;
+ else
+ gain = activeState.agc.manual.ispGain;
+
+ block.header->type = MALI_C55_PARAM_BLOCK_DIGITAL_GAIN;
+ block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE;
+ block.header->size = sizeof(struct mali_c55_params_digital_gain);
+
+ block.digital_gain->gain = floatingToFixedPoint<5, 8, uint16_t, double>(gain);
+ frameContext.agc.ispGain = gain;
+
+ return block.header->size;
+}
+
+size_t Agc::fillParamsBuffer(mali_c55_params_block block,
+ enum mali_c55_param_block_type type)
+{
+ block.header->type = type;
+ block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE;
+ block.header->size = sizeof(struct mali_c55_params_aexp_hist);
+
+ /* Collect every 3rd pixel horizontally */
+ block.aexp_hist->skip_x = 1;
+ /* Start from first column */
+ block.aexp_hist->offset_x = 0;
+ /* Collect every pixel vertically */
+ block.aexp_hist->skip_y = 0;
+ /* Start from the first row */
+ block.aexp_hist->offset_y = 0;
+ /* 1x scaling (i.e. none) */
+ block.aexp_hist->scale_bottom = 0;
+ block.aexp_hist->scale_top = 0;
+ /* Collect all Bayer planes into 4 separate histograms */
+ block.aexp_hist->plane_mode = 1;
+ /* Tap the data immediately after the digital gain block */
+ block.aexp_hist->tap_point = MALI_C55_AEXP_HIST_TAP_FS;
+
+ return block.header->size;
+}
+
+size_t Agc::fillWeightsArrayBuffer(mali_c55_params_block block,
+ enum mali_c55_param_block_type type)
+{
+ block.header->type = type;
+ block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE;
+ block.header->size = sizeof(struct mali_c55_params_aexp_weights);
+
+ /* We use every zone - a 15x15 grid */
+ block.aexp_weights->nodes_used_horiz = 15;
+ block.aexp_weights->nodes_used_vert = 15;
+
+ /*
+ * We uniformly weight the zones to 1 - this results in the collected
+ * histograms containing a true pixel count, which we can then use to
+ * approximate colour channel averages for the image.
+ */
+ Span<uint8_t> weights{
+ block.aexp_weights->zone_weights,
+ MALI_C55_MAX_ZONES
+ };
+ std::fill(weights.begin(), weights.end(), 1);
+
+ return block.header->size;
+}
+
+void Agc::prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext, mali_c55_params_buffer *params)
+{
+ mali_c55_params_block block;
+
+ block.data = &params->data[params->total_size];
+ params->total_size += fillGainParamBlock(context, frameContext, block);
+
+ if (frame > 0)
+ return;
+
+ block.data = &params->data[params->total_size];
+ params->total_size += fillParamsBuffer(block,
+ MALI_C55_PARAM_BLOCK_AEXP_HIST);
+
+ block.data = &params->data[params->total_size];
+ params->total_size += fillWeightsArrayBuffer(block,
+ MALI_C55_PARAM_BLOCK_AEXP_HIST_WEIGHTS);
+
+ block.data = &params->data[params->total_size];
+ params->total_size += fillParamsBuffer(block,
+ MALI_C55_PARAM_BLOCK_AEXP_IHIST);
+
+ block.data = &params->data[params->total_size];
+ params->total_size += fillWeightsArrayBuffer(block,
+ MALI_C55_PARAM_BLOCK_AEXP_IHIST_WEIGHTS);
+}
+
+double Agc::estimateLuminance(const double gain) const
+{
+ double rAvg = statistics_.rHist.interQuantileMean(0, 1) * gain;
+ double gAvg = statistics_.gHist.interQuantileMean(0, 1) * gain;
+ double bAvg = statistics_.bHist.interQuantileMean(0, 1) * gain;
+ double yAvg = rec601LuminanceFromRGB({ { rAvg, gAvg, bAvg } });
+
+ return yAvg / kNumHistogramBins;
+}
+
+void Agc::process(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const mali_c55_stats_buffer *stats,
+ [[maybe_unused]] ControlList &metadata)
+{
+ IPASessionConfiguration &configuration = context.configuration;
+ IPAActiveState &activeState = context.activeState;
+
+ if (!stats) {
+ LOG(MaliC55Agc, Error) << "No statistics buffer passed to Agc";
+ return;
+ }
+
+ statistics_.parseStatistics(stats);
+ context.activeState.agc.temperatureK = estimateCCT({ { statistics_.rHist.interQuantileMean(0, 1),
+ statistics_.gHist.interQuantileMean(0, 1),
+ statistics_.bHist.interQuantileMean(0, 1) } });
+
+ /*
+ * The Agc algorithm needs to know the effective exposure value that was
+ * applied to the sensor when the statistics were collected.
+ */
+ uint32_t exposure = frameContext.agc.exposure;
+ double analogueGain = frameContext.agc.sensorGain;
+ double digitalGain = frameContext.agc.ispGain;
+ double totalGain = analogueGain * digitalGain;
+ utils::Duration currentShutter = exposure * configuration.sensor.lineDuration;
+ utils::Duration effectiveExposureValue = currentShutter * totalGain;
+
+ utils::Duration shutterTime;
+ double aGain, dGain;
+ std::tie(shutterTime, aGain, dGain) =
+ calculateNewEv(activeState.agc.constraintMode,
+ activeState.agc.exposureMode, statistics_.yHist,
+ effectiveExposureValue);
+
+ dGain = std::clamp(dGain, kMinDigitalGain, kMaxDigitalGain);
+
+ LOG(MaliC55Agc, Debug)
+ << "Divided up shutter, analogue gain and digital gain are "
+ << shutterTime << ", " << aGain << " and " << dGain;
+
+ activeState.agc.automatic.exposure = shutterTime / configuration.sensor.lineDuration;
+ activeState.agc.automatic.sensorGain = aGain;
+ activeState.agc.automatic.ispGain = dGain;
+
+ metadata.set(controls::ExposureTime, currentShutter.get<std::micro>());
+ metadata.set(controls::AnalogueGain, frameContext.agc.sensorGain);
+ metadata.set(controls::DigitalGain, frameContext.agc.ispGain);
+ metadata.set(controls::ColourTemperature, context.activeState.agc.temperatureK);
+}
+
+REGISTER_IPA_ALGORITHM(Agc, "Agc")
+
+} /* namespace ipa::mali_c55::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/agc.h b/src/ipa/mali-c55/algorithms/agc.h
new file mode 100644
index 00000000..c5c574e5
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/agc.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Ideas on Board Oy
+ *
+ * agc.h - Mali C55 AGC/AEC mean-based control algorithm
+ */
+
+#pragma once
+
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+#include "libipa/agc_mean_luminance.h"
+#include "libipa/histogram.h"
+
+#include "algorithm.h"
+#include "ipa_context.h"
+
+namespace libcamera {
+
+namespace ipa::mali_c55::algorithms {
+
+class AgcStatistics
+{
+public:
+ AgcStatistics()
+ {
+ }
+
+ int setBayerOrderIndices(BayerFormat::Order bayerOrder);
+ uint32_t decodeBinValue(uint16_t binVal);
+ void parseStatistics(const mali_c55_stats_buffer *stats);
+
+ Histogram rHist;
+ Histogram gHist;
+ Histogram bHist;
+ Histogram yHist;
+private:
+ unsigned int rIndex_;
+ unsigned int grIndex_;
+ unsigned int gbIndex_;
+ unsigned int bIndex_;
+};
+
+class Agc : public Algorithm, public AgcMeanLuminance
+{
+public:
+ Agc();
+ ~Agc() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context,
+ const IPACameraSensorInfo &configInfo) override;
+ void queueRequest(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ mali_c55_params_buffer *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const mali_c55_stats_buffer *stats,
+ ControlList &metadata) override;
+
+private:
+ double estimateLuminance(const double gain) const override;
+ size_t fillGainParamBlock(IPAContext &context,
+ IPAFrameContext &frameContext,
+ mali_c55_params_block block);
+ size_t fillParamsBuffer(mali_c55_params_block block,
+ enum mali_c55_param_block_type type);
+ size_t fillWeightsArrayBuffer(mali_c55_params_block block,
+ enum mali_c55_param_block_type type);
+
+ AgcStatistics statistics_;
+};
+
+} /* namespace ipa::mali_c55::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/algorithm.h b/src/ipa/mali-c55/algorithms/algorithm.h
new file mode 100644
index 00000000..36a3bff0
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/algorithm.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * algorithm.h - Mali-C55 control algorithm interface
+ */
+
+#pragma once
+
+#include <linux/mali-c55-config.h>
+
+#include <libipa/algorithm.h>
+
+#include "module.h"
+
+namespace libcamera {
+
+namespace ipa::mali_c55 {
+
+class Algorithm : public libcamera::ipa::Algorithm<Module>
+{
+};
+
+union mali_c55_params_block {
+ struct mali_c55_params_block_header *header;
+ struct mali_c55_params_sensor_off_preshading *sensor_offs;
+ struct mali_c55_params_aexp_hist *aexp_hist;
+ struct mali_c55_params_aexp_weights *aexp_weights;
+ struct mali_c55_params_digital_gain *digital_gain;
+ struct mali_c55_params_awb_gains *awb_gains;
+ struct mali_c55_params_awb_config *awb_config;
+ struct mali_c55_params_mesh_shading_config *shading_config;
+ struct mali_c55_params_mesh_shading_selection *shading_selection;
+ __u8 *data;
+};
+
+} /* namespace ipa::mali_c55 */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/awb.cpp b/src/ipa/mali-c55/algorithms/awb.cpp
new file mode 100644
index 00000000..050b191b
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/awb.cpp
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board Oy
+ *
+ * awb.cpp - Mali C55 grey world auto white balance algorithm
+ */
+
+#include "awb.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/control_ids.h>
+
+#include "libipa/fixedpoint.h"
+
+namespace libcamera {
+
+namespace ipa::mali_c55::algorithms {
+
+LOG_DEFINE_CATEGORY(MaliC55Awb)
+
+/* Number of frames at which we should run AWB at full speed */
+static constexpr uint32_t kNumStartupFrames = 4;
+
+Awb::Awb()
+{
+}
+
+int Awb::configure([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const IPACameraSensorInfo &configInfo)
+{
+ /*
+ * Initially we have no idea what the colour balance will be like, so
+ * for the first frame we will make no assumptions and leave the R/B
+ * channels unmodified.
+ */
+ context.activeState.awb.rGain = 1.0;
+ context.activeState.awb.bGain = 1.0;
+
+ return 0;
+}
+
+size_t Awb::fillGainsParamBlock(mali_c55_params_block block, IPAContext &context,
+ IPAFrameContext &frameContext)
+{
+ block.header->type = MALI_C55_PARAM_BLOCK_AWB_GAINS;
+ block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE;
+ block.header->size = sizeof(struct mali_c55_params_awb_gains);
+
+ double rGain = context.activeState.awb.rGain;
+ double bGain = context.activeState.awb.bGain;
+
+ /*
+ * The gains here map as follows:
+ * gain00 = R
+ * gain01 = Gr
+ * gain10 = Gb
+ * gain11 = B
+ *
+ * This holds true regardless of the bayer order of the input data, as
+ * the mapping is done internally in the ISP.
+ */
+ block.awb_gains->gain00 = floatingToFixedPoint<4, 8, uint16_t, double>(rGain);
+ block.awb_gains->gain01 = floatingToFixedPoint<4, 8, uint16_t, double>(1.0);
+ block.awb_gains->gain10 = floatingToFixedPoint<4, 8, uint16_t, double>(1.0);
+ block.awb_gains->gain11 = floatingToFixedPoint<4, 8, uint16_t, double>(bGain);
+
+ frameContext.awb.rGain = rGain;
+ frameContext.awb.bGain = bGain;
+
+ return sizeof(struct mali_c55_params_awb_gains);
+}
+
+size_t Awb::fillConfigParamBlock(mali_c55_params_block block)
+{
+ block.header->type = MALI_C55_PARAM_BLOCK_AWB_CONFIG;
+ block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE;
+ block.header->size = sizeof(struct mali_c55_params_awb_config);
+
+ /* Tap the stats after the purple fringe block */
+ block.awb_config->tap_point = MALI_C55_AWB_STATS_TAP_PF;
+
+ /* Get R/G and B/G ratios as statistics */
+ block.awb_config->stats_mode = MALI_C55_AWB_MODE_RGBG;
+
+ /* Default white level */
+ block.awb_config->white_level = 1023;
+
+ /* Default black level */
+ block.awb_config->black_level = 0;
+
+ /*
+ * By default pixels are included who's colour ratios are bounded in a
+ * region (on a cr ratio x cb ratio graph) defined by four points:
+ * (0.25, 0.25)
+ * (0.25, 1.99609375)
+ * (1.99609375, 1.99609375)
+ * (1.99609375, 0.25)
+ *
+ * The ratios themselves are stored in Q4.8 format.
+ *
+ * \todo should these perhaps be tunable?
+ */
+ block.awb_config->cr_max = 511;
+ block.awb_config->cr_min = 64;
+ block.awb_config->cb_max = 511;
+ block.awb_config->cb_min = 64;
+
+ /* We use the full 15x15 zoning scheme */
+ block.awb_config->nodes_used_horiz = 15;
+ block.awb_config->nodes_used_vert = 15;
+
+ /*
+ * We set the trimming boundaries equivalent to the main boundaries. In
+ * other words; no trimming.
+ */
+ block.awb_config->cr_high = 511;
+ block.awb_config->cr_low = 64;
+ block.awb_config->cb_high = 511;
+ block.awb_config->cb_low = 64;
+
+ return sizeof(struct mali_c55_params_awb_config);
+}
+
+void Awb::prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext, mali_c55_params_buffer *params)
+{
+ mali_c55_params_block block;
+ block.data = &params->data[params->total_size];
+
+ params->total_size += fillGainsParamBlock(block, context, frameContext);
+
+ if (frame > 0)
+ return;
+
+ block.data = &params->data[params->total_size];
+ params->total_size += fillConfigParamBlock(block);
+}
+
+void Awb::process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext, const mali_c55_stats_buffer *stats,
+ [[maybe_unused]] ControlList &metadata)
+{
+ const struct mali_c55_awb_average_ratios *awb_ratios = stats->awb_ratios;
+
+ /*
+ * The ISP produces average R:G and B:G ratios for zones. We take the
+ * average of all the zones with data and simply invert them to provide
+ * gain figures that we can apply to approximate a grey world.
+ */
+ unsigned int counted_zones = 0;
+ double rgSum = 0, bgSum = 0;
+
+ for (unsigned int i = 0; i < 225; i++) {
+ if (!awb_ratios[i].num_pixels)
+ continue;
+
+ /*
+ * The statistics are in Q4.8 format, so we convert to double
+ * here.
+ */
+ rgSum += fixedToFloatingPoint<4, 8, double, uint16_t>(awb_ratios[i].avg_rg_gr);
+ bgSum += fixedToFloatingPoint<4, 8, double, uint16_t>(awb_ratios[i].avg_bg_br);
+ counted_zones++;
+ }
+
+ /*
+ * Sometimes the first frame's statistics have no valid pixels, in which
+ * case we'll just assume a grey world until they say otherwise.
+ */
+ double rgAvg, bgAvg;
+ if (!counted_zones) {
+ rgAvg = 1.0;
+ bgAvg = 1.0;
+ } else {
+ rgAvg = rgSum / counted_zones;
+ bgAvg = bgSum / counted_zones;
+ }
+
+ /*
+ * The statistics are generated _after_ white balancing is performed in
+ * the ISP. To get the true ratio we therefore have to adjust the stats
+ * figure by the gains that were applied when the statistics for this
+ * frame were generated.
+ */
+ double rRatio = rgAvg / frameContext.awb.rGain;
+ double bRatio = bgAvg / frameContext.awb.bGain;
+
+ /*
+ * And then we can simply invert the ratio to find the gain we should
+ * apply.
+ */
+ double rGain = 1 / rRatio;
+ double bGain = 1 / bRatio;
+
+ /*
+ * Running at full speed, this algorithm results in oscillations in the
+ * colour balance. To remove those we dampen the speed at which it makes
+ * changes in gain, unless we're in the startup phase in which case we
+ * want to fix the miscolouring as quickly as possible.
+ */
+ double speed = frame < kNumStartupFrames ? 1.0 : 0.2;
+ rGain = speed * rGain + context.activeState.awb.rGain * (1.0 - speed);
+ bGain = speed * bGain + context.activeState.awb.bGain * (1.0 - speed);
+
+ context.activeState.awb.rGain = rGain;
+ context.activeState.awb.bGain = bGain;
+
+ metadata.set(controls::ColourGains, {
+ static_cast<float>(frameContext.awb.rGain),
+ static_cast<float>(frameContext.awb.bGain),
+ });
+
+ LOG(MaliC55Awb, Debug) << "For frame number " << frame << ": "
+ << "Average R/G Ratio: " << rgAvg
+ << ", Average B/G Ratio: " << bgAvg
+ << "\nrGain applied to this frame: " << frameContext.awb.rGain
+ << ", bGain applied to this frame: " << frameContext.awb.bGain
+ << "\nrGain to apply: " << context.activeState.awb.rGain
+ << ", bGain to apply: " << context.activeState.awb.bGain;
+}
+
+REGISTER_IPA_ALGORITHM(Awb, "Awb")
+
+} /* namespace ipa::mali_c55::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/awb.h b/src/ipa/mali-c55/algorithms/awb.h
new file mode 100644
index 00000000..800c2e83
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/awb.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * awb.h - Mali C55 grey world auto white balance algorithm
+ */
+
+#include "algorithm.h"
+#include "ipa_context.h"
+
+namespace libcamera {
+
+namespace ipa::mali_c55::algorithms {
+
+class Awb : public Algorithm
+{
+public:
+ Awb();
+ ~Awb() = default;
+
+ int configure(IPAContext &context,
+ const IPACameraSensorInfo &configInfo) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ mali_c55_params_buffer *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const mali_c55_stats_buffer *stats,
+ ControlList &metadata) override;
+
+private:
+ size_t fillGainsParamBlock(mali_c55_params_block block,
+ IPAContext &context,
+ IPAFrameContext &frameContext);
+ size_t fillConfigParamBlock(mali_c55_params_block block);
+};
+
+} /* namespace ipa::mali_c55::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/blc.cpp b/src/ipa/mali-c55/algorithms/blc.cpp
new file mode 100644
index 00000000..2a54c86a
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/blc.cpp
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * Mali-C55 sensor offset (black level) correction
+ */
+
+#include "blc.h"
+
+#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+/**
+ * \file blc.h
+ */
+
+namespace libcamera {
+
+namespace ipa::mali_c55::algorithms {
+
+/**
+ * \class BlackLevelCorrection
+ * \brief MaliC55 Black Level Correction control
+ */
+
+LOG_DEFINE_CATEGORY(MaliC55Blc)
+
+BlackLevelCorrection::BlackLevelCorrection()
+ : tuningParameters_(false)
+{
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int BlackLevelCorrection::init([[maybe_unused]] IPAContext &context,
+ const YamlObject &tuningData)
+{
+ offset00 = tuningData["offset00"].get<uint32_t>(0);
+ offset01 = tuningData["offset01"].get<uint32_t>(0);
+ offset10 = tuningData["offset10"].get<uint32_t>(0);
+ offset11 = tuningData["offset11"].get<uint32_t>(0);
+
+ if (offset00 > kMaxOffset || offset01 > kMaxOffset ||
+ offset10 > kMaxOffset || offset11 > kMaxOffset) {
+ LOG(MaliC55Blc, Error) << "Invalid black level offsets";
+ return -EINVAL;
+ }
+
+ tuningParameters_ = true;
+
+ LOG(MaliC55Blc, Debug)
+ << "Black levels: 00 " << offset00 << ", 01 " << offset01
+ << ", 10 " << offset10 << ", 11 " << offset11;
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::configure
+ */
+int BlackLevelCorrection::configure(IPAContext &context,
+ [[maybe_unused]] const IPACameraSensorInfo &configInfo)
+{
+ /*
+ * If no Black Levels were passed in through tuning data then we could
+ * use the value from the CameraSensorHelper if one is available.
+ */
+ if (context.configuration.sensor.blackLevel &&
+ !(offset00 + offset01 + offset10 + offset11)) {
+ offset00 = context.configuration.sensor.blackLevel;
+ offset01 = context.configuration.sensor.blackLevel;
+ offset10 = context.configuration.sensor.blackLevel;
+ offset11 = context.configuration.sensor.blackLevel;
+ }
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context,
+ const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ mali_c55_params_buffer *params)
+{
+ mali_c55_params_block block;
+ block.data = &params->data[params->total_size];
+
+ if (frame > 0)
+ return;
+
+ if (!tuningParameters_)
+ return;
+
+ block.header->type = MALI_C55_PARAM_BLOCK_SENSOR_OFFS;
+ block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE;
+ block.header->size = sizeof(mali_c55_params_sensor_off_preshading);
+
+ block.sensor_offs->chan00 = offset00;
+ block.sensor_offs->chan01 = offset01;
+ block.sensor_offs->chan10 = offset10;
+ block.sensor_offs->chan11 = offset11;
+
+ params->total_size += block.header->size;
+}
+
+void BlackLevelCorrection::process([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ [[maybe_unused]] const mali_c55_stats_buffer *stats,
+ ControlList &metadata)
+{
+ /*
+ * Black Level Offsets in tuning data need to be 20-bit, whereas the
+ * metadata expects values from a 16-bit range. Right-shift to remove
+ * the 4 least significant bits.
+ *
+ * The black levels should be reported in the order R, Gr, Gb, B. We
+ * ignore that here given we're using matching values so far, but it
+ * would be safer to check the sensor's bayer order.
+ *
+ * \todo Account for bayer order.
+ */
+ metadata.set(controls::SensorBlackLevels, {
+ static_cast<int32_t>(offset00 >> 4),
+ static_cast<int32_t>(offset01 >> 4),
+ static_cast<int32_t>(offset10 >> 4),
+ static_cast<int32_t>(offset11 >> 4),
+ });
+}
+
+REGISTER_IPA_ALGORITHM(BlackLevelCorrection, "BlackLevelCorrection")
+
+} /* namespace ipa::mali_c55::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/blc.h b/src/ipa/mali-c55/algorithms/blc.h
new file mode 100644
index 00000000..9696e8e9
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/blc.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * Mali-C55 sensor offset (black level) correction
+ */
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::mali_c55::algorithms {
+
+class BlackLevelCorrection : public Algorithm
+{
+public:
+ BlackLevelCorrection();
+ ~BlackLevelCorrection() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context,
+ const IPACameraSensorInfo &configInfo) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ mali_c55_params_buffer *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const mali_c55_stats_buffer *stats,
+ ControlList &metadata) override;
+
+private:
+ static constexpr uint32_t kMaxOffset = 0xfffff;
+
+ bool tuningParameters_;
+ uint32_t offset00;
+ uint32_t offset01;
+ uint32_t offset10;
+ uint32_t offset11;
+};
+
+} /* namespace ipa::mali_c55::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/lsc.cpp b/src/ipa/mali-c55/algorithms/lsc.cpp
new file mode 100644
index 00000000..c5afc04d
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/lsc.cpp
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board Oy
+ *
+ * lsc.cpp - Mali-C55 Lens shading correction algorithm
+ */
+
+#include "lsc.h"
+
+#include "libcamera/internal/yaml_parser.h"
+
+namespace libcamera {
+
+namespace ipa::mali_c55::algorithms {
+
+LOG_DEFINE_CATEGORY(MaliC55Lsc)
+
+int Lsc::init([[maybe_unused]] IPAContext &context, const YamlObject &tuningData)
+{
+ if (!tuningData.contains("meshScale")) {
+ LOG(MaliC55Lsc, Error) << "meshScale missing from tuningData";
+ return -EINVAL;
+ }
+
+ meshScale_ = tuningData["meshScale"].get<uint32_t>(0);
+
+ const YamlObject &yamlSets = tuningData["sets"];
+ if (!yamlSets.isList()) {
+ LOG(MaliC55Lsc, Error) << "LSC tables missing or invalid";
+ return -EINVAL;
+ }
+
+ size_t tableSize = 0;
+ const auto &sets = yamlSets.asList();
+ for (const auto &yamlSet : sets) {
+ uint32_t ct = yamlSet["ct"].get<uint32_t>(0);
+
+ if (!ct) {
+ LOG(MaliC55Lsc, Error) << "Invalid colour temperature";
+ return -EINVAL;
+ }
+
+ if (std::count(colourTemperatures_.begin(),
+ colourTemperatures_.end(), ct)) {
+ LOG(MaliC55Lsc, Error)
+ << "Multiple sets found for colour temperature";
+ return -EINVAL;
+ }
+
+ std::vector<uint8_t> rTable =
+ yamlSet["r"].getList<uint8_t>().value_or(std::vector<uint8_t>{});
+ std::vector<uint8_t> gTable =
+ yamlSet["g"].getList<uint8_t>().value_or(std::vector<uint8_t>{});
+ std::vector<uint8_t> bTable =
+ yamlSet["b"].getList<uint8_t>().value_or(std::vector<uint8_t>{});
+
+ /*
+ * Some validation to do; only 16x16 and 32x32 tables of
+ * coefficients are acceptable, and all tables across all of the
+ * sets must be the same size. The first time we encounter a
+ * table we check that it is an acceptable size and if so make
+ * sure all other tables are of equal size.
+ */
+ if (!tableSize) {
+ if (rTable.size() != 256 && rTable.size() != 1024) {
+ LOG(MaliC55Lsc, Error)
+ << "Invalid table size for colour temperature " << ct;
+ return -EINVAL;
+ }
+ tableSize = rTable.size();
+ }
+
+ if (rTable.size() != tableSize ||
+ gTable.size() != tableSize ||
+ bTable.size() != tableSize) {
+ LOG(MaliC55Lsc, Error)
+ << "Invalid or mismatched table size for colour temperature " << ct;
+ return -EINVAL;
+ }
+
+ if (colourTemperatures_.size() >= 3) {
+ LOG(MaliC55Lsc, Error)
+ << "A maximum of 3 colour temperatures are supported";
+ return -EINVAL;
+ }
+
+ for (unsigned int i = 0; i < tableSize; i++) {
+ mesh_[kRedOffset + i] |=
+ (rTable[i] << (colourTemperatures_.size() * 8));
+ mesh_[kGreenOffset + i] |=
+ (gTable[i] << (colourTemperatures_.size() * 8));
+ mesh_[kBlueOffset + i] |=
+ (bTable[i] << (colourTemperatures_.size() * 8));
+ }
+
+ colourTemperatures_.push_back(ct);
+ }
+
+ /*
+ * The mesh has either 16x16 or 32x32 nodes, we tell the driver which it
+ * is based on the number of values in the tuning data's table.
+ */
+ if (tableSize == 256)
+ meshSize_ = 15;
+ else
+ meshSize_ = 31;
+
+ return 0;
+}
+
+size_t Lsc::fillConfigParamsBlock(mali_c55_params_block block) const
+{
+ block.header->type = MALI_C55_PARAM_MESH_SHADING_CONFIG;
+ block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE;
+ block.header->size = sizeof(struct mali_c55_params_mesh_shading_config);
+
+ block.shading_config->mesh_show = false;
+ block.shading_config->mesh_scale = meshScale_;
+ block.shading_config->mesh_page_r = 0;
+ block.shading_config->mesh_page_g = 1;
+ block.shading_config->mesh_page_b = 2;
+ block.shading_config->mesh_width = meshSize_;
+ block.shading_config->mesh_height = meshSize_;
+
+ std::copy(mesh_.begin(), mesh_.end(), block.shading_config->mesh);
+
+ return block.header->size;
+}
+
+size_t Lsc::fillSelectionParamsBlock(mali_c55_params_block block, uint8_t bank,
+ uint8_t alpha) const
+{
+ block.header->type = MALI_C55_PARAM_MESH_SHADING_SELECTION;
+ block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE;
+ block.header->size = sizeof(struct mali_c55_params_mesh_shading_selection);
+
+ block.shading_selection->mesh_alpha_bank_r = bank;
+ block.shading_selection->mesh_alpha_bank_g = bank;
+ block.shading_selection->mesh_alpha_bank_b = bank;
+ block.shading_selection->mesh_alpha_r = alpha;
+ block.shading_selection->mesh_alpha_g = alpha;
+ block.shading_selection->mesh_alpha_b = alpha;
+ block.shading_selection->mesh_strength = 0x1000; /* Otherwise known as 1.0 */
+
+ return block.header->size;
+}
+
+std::tuple<uint8_t, uint8_t> Lsc::findBankAndAlpha(uint32_t ct) const
+{
+ unsigned int i;
+
+ ct = std::clamp<uint32_t>(ct, colourTemperatures_.front(),
+ colourTemperatures_.back());
+
+ for (i = 0; i < colourTemperatures_.size() - 1; i++) {
+ if (ct >= colourTemperatures_[i] &&
+ ct <= colourTemperatures_[i + 1])
+ break;
+ }
+
+ /*
+ * With the clamping, we're guaranteed an index into colourTemperatures_
+ * that's <= colourTemperatures_.size() - 1.
+ */
+ uint8_t alpha = (255 * (ct - colourTemperatures_[i])) /
+ (colourTemperatures_[i + 1] - colourTemperatures_[i]);
+
+ return { i, alpha };
+}
+
+void Lsc::prepare(IPAContext &context, [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ mali_c55_params_buffer *params)
+{
+ /*
+ * For each frame we assess the colour temperature of the **last** frame
+ * and then select an appropriately blended table of coefficients based
+ * on that ct. As a bit of a shortcut, if we've only a single table the
+ * handling is somewhat simpler; if it's the first frame we just select
+ * that table and if we're past the first frame then we can just do
+ * nothing - the config will never change.
+ */
+ uint32_t temperatureK = context.activeState.agc.temperatureK;
+ uint8_t bank, alpha;
+
+ if (colourTemperatures_.size() == 1) {
+ if (frame > 0)
+ return;
+
+ bank = 0;
+ alpha = 0;
+ } else {
+ std::tie(bank, alpha) = findBankAndAlpha(temperatureK);
+ }
+
+ mali_c55_params_block block;
+ block.data = &params->data[params->total_size];
+
+ params->total_size += fillSelectionParamsBlock(block, bank, alpha);
+
+ if (frame > 0)
+ return;
+
+ /*
+ * If this is the first frame, we need to load the parsed coefficient
+ * tables from tuning data to the ISP.
+ */
+ block.data = &params->data[params->total_size];
+ params->total_size += fillConfigParamsBlock(block);
+}
+
+REGISTER_IPA_ALGORITHM(Lsc, "Lsc")
+
+} /* namespace ipa::mali_c55::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/lsc.h b/src/ipa/mali-c55/algorithms/lsc.h
new file mode 100644
index 00000000..e613277a
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/lsc.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board Oy
+ *
+ * lsc.h - Mali-C55 Lens shading correction algorithm
+ */
+
+#include <map>
+#include <tuple>
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::mali_c55::algorithms {
+
+class Lsc : public Algorithm
+{
+public:
+ Lsc() = default;
+ ~Lsc() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ mali_c55_params_buffer *params) override;
+private:
+ static constexpr unsigned int kRedOffset = 0;
+ static constexpr unsigned int kGreenOffset = 1024;
+ static constexpr unsigned int kBlueOffset = 2048;
+
+ size_t fillConfigParamsBlock(mali_c55_params_block block) const;
+ size_t fillSelectionParamsBlock(mali_c55_params_block block,
+ uint8_t bank, uint8_t alpha) const;
+ std::tuple<uint8_t, uint8_t> findBankAndAlpha(uint32_t ct) const;
+
+ std::vector<uint32_t> mesh_ = std::vector<uint32_t>(3072);
+ std::vector<uint32_t> colourTemperatures_;
+ uint32_t meshScale_;
+ uint32_t meshSize_;
+};
+
+} /* namespace ipa::mali_c55::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/meson.build b/src/ipa/mali-c55/algorithms/meson.build
new file mode 100644
index 00000000..1665da07
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: CC0-1.0
+
+mali_c55_ipa_algorithms = files([
+ 'agc.cpp',
+ 'awb.cpp',
+ 'blc.cpp',
+ 'lsc.cpp',
+])
diff --git a/src/ipa/mali-c55/data/imx415.yaml b/src/ipa/mali-c55/data/imx415.yaml
new file mode 100644
index 00000000..126b427a
--- /dev/null
+++ b/src/ipa/mali-c55/data/imx415.yaml
@@ -0,0 +1,325 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+ - Awb:
+ - BlackLevelCorrection:
+ offset00: 51200
+ offset01: 51200
+ offset10: 51200
+ offset11: 51200
+ - Lsc:
+ meshScale: 4 # 1.0 - 2.0 Gain
+ sets:
+ - ct: 2500
+ r: [
+ 21, 20, 19, 17, 15, 14, 12, 11, 9, 9, 9, 9, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 13, 13, 10, 10, 13, 16, 17, 18, 21, 22,
+ 21, 20, 18, 16, 14, 13, 12, 11, 10, 9, 9, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 12, 15, 17, 18, 21, 21,
+ 20, 19, 17, 16, 14, 13, 12, 11, 10, 9, 8, 8, 8, 7, 7, 7, 8, 8, 8, 8, 8, 9, 9, 8, 8, 8, 11, 15, 17, 18, 21, 21,
+ 19, 19, 17, 15, 14, 13, 12, 11, 10, 8, 8, 7, 7, 7, 6, 6, 7, 7, 8, 8, 8, 8, 8, 7, 7, 8, 10, 14, 17, 18, 20, 22,
+ 19, 18, 17, 15, 14, 13, 11, 11, 9, 8, 8, 7, 7, 6, 5, 5, 5, 5, 6, 7, 8, 7, 7, 6, 7, 7, 10, 12, 16, 18, 20, 22,
+ 18, 18, 16, 15, 14, 12, 11, 10, 9, 8, 6, 6, 5, 5, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 8, 12, 16, 18, 19, 20,
+ 18, 18, 16, 14, 13, 12, 11, 9, 9, 7, 6, 5, 5, 5, 4, 4, 4, 5, 5, 4, 4, 5, 5, 5, 5, 6, 8, 11, 15, 18, 18, 19,
+ 18, 17, 15, 14, 13, 12, 11, 9, 8, 7, 6, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 6, 7, 9, 14, 17, 18, 18,
+ 18, 17, 15, 14, 13, 12, 11, 9, 8, 7, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 6, 8, 12, 17, 18, 18,
+ 18, 16, 15, 13, 12, 11, 10, 9, 8, 7, 5, 4, 4, 4, 4, 3, 3, 4, 4, 4, 3, 3, 4, 4, 4, 5, 6, 8, 12, 16, 19, 19,
+ 17, 16, 15, 13, 12, 11, 10, 8, 7, 6, 4, 4, 3, 3, 3, 3, 3, 3, 4, 4, 3, 3, 3, 4, 4, 5, 6, 9, 12, 16, 19, 20,
+ 17, 15, 15, 13, 12, 11, 10, 8, 6, 6, 5, 4, 3, 3, 3, 2, 3, 3, 4, 4, 3, 3, 2, 3, 4, 5, 6, 9, 11, 16, 19, 20,
+ 17, 15, 15, 14, 11, 11, 10, 8, 6, 5, 5, 4, 3, 3, 2, 2, 2, 3, 3, 3, 3, 3, 2, 3, 4, 5, 6, 8, 11, 16, 18, 19,
+ 16, 16, 15, 13, 11, 11, 10, 7, 6, 5, 4, 4, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 3, 4, 6, 8, 11, 14, 17, 18,
+ 16, 16, 14, 13, 11, 10, 9, 7, 6, 5, 4, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 3, 4, 6, 8, 10, 14, 17, 18,
+ 16, 15, 14, 13, 13, 10, 9, 7, 6, 4, 4, 2, 2, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 4, 6, 8, 11, 17, 18, 19,
+ 16, 15, 14, 14, 13, 12, 9, 8, 7, 5, 4, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 4, 5, 6, 8, 12, 17, 19, 20,
+ 17, 15, 15, 14, 13, 12, 9, 8, 7, 5, 3, 2, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5, 6, 8, 13, 16, 19, 21,
+ 17, 16, 15, 13, 13, 12, 9, 8, 7, 5, 3, 2, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 5, 5, 7, 8, 13, 16, 19, 20,
+ 17, 16, 15, 14, 13, 12, 9, 8, 7, 5, 3, 2, 0, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 5, 6, 7, 9, 13, 17, 19, 20,
+ 18, 16, 15, 14, 13, 12, 9, 8, 7, 5, 4, 2, 1, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 6, 8, 9, 13, 17, 20, 20,
+ 18, 16, 16, 15, 14, 12, 10, 9, 7, 6, 5, 3, 2, 1, 0, 0, 0, 1, 2, 3, 3, 3, 4, 5, 6, 7, 9, 10, 14, 18, 20, 20,
+ 18, 17, 16, 15, 14, 12, 10, 9, 8, 7, 6, 5, 3, 3, 1, 0, 0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 9, 12, 15, 19, 20, 20,
+ 18, 18, 17, 16, 14, 13, 11, 10, 9, 8, 7, 6, 5, 5, 3, 1, 1, 1, 2, 3, 5, 5, 5, 6, 7, 9, 12, 15, 17, 19, 20, 20,
+ 18, 18, 17, 16, 15, 13, 12, 10, 10, 9, 8, 7, 6, 5, 4, 2, 1, 2, 3, 4, 5, 5, 6, 6, 8, 10, 13, 16, 18, 20, 20, 21,
+ 19, 18, 17, 16, 15, 14, 13, 11, 10, 10, 9, 8, 7, 6, 5, 4, 3, 2, 3, 3, 5, 5, 6, 7, 10, 11, 14, 17, 19, 20, 21, 22,
+ 20, 19, 18, 17, 16, 15, 13, 12, 11, 10, 10, 9, 8, 7, 6, 5, 4, 3, 3, 3, 5, 6, 6, 7, 10, 12, 14, 18, 20, 21, 22, 23,
+ 21, 20, 19, 18, 17, 16, 14, 13, 12, 11, 10, 10, 9, 7, 7, 5, 5, 4, 4, 5, 6, 6, 7, 8, 11, 13, 16, 19, 21, 22, 22, 22,
+ 22, 21, 20, 19, 18, 17, 16, 14, 13, 12, 12, 10, 9, 8, 7, 6, 6, 5, 5, 6, 7, 7, 8, 9, 12, 14, 18, 20, 21, 22, 22, 22,
+ 23, 22, 21, 20, 19, 17, 16, 15, 14, 14, 13, 12, 10, 9, 8, 7, 6, 5, 5, 6, 7, 8, 8, 10, 12, 15, 18, 20, 21, 22, 22, 22,
+ 24, 23, 22, 21, 20, 18, 17, 16, 15, 15, 14, 14, 13, 11, 9, 8, 6, 6, 6, 6, 7, 8, 9, 11, 14, 17, 19, 20, 21, 21, 21, 21,
+ 24, 24, 23, 21, 20, 19, 17, 16, 15, 15, 15, 14, 14, 14, 11, 9, 6, 5, 5, 6, 8, 8, 10, 12, 15, 17, 20, 20, 21, 21, 21, 21,
+ ]
+ g: [
+ 19, 18, 17, 15, 13, 12, 10, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 12, 12, 9, 9, 11, 15, 15, 16, 19, 20,
+ 19, 18, 16, 15, 12, 12, 10, 10, 8, 7, 7, 7, 7, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 7, 7, 11, 14, 15, 16, 19, 19,
+ 18, 17, 16, 14, 12, 12, 10, 10, 8, 7, 7, 6, 6, 5, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 10, 14, 15, 17, 19, 19,
+ 17, 17, 16, 14, 12, 12, 10, 10, 8, 7, 6, 6, 6, 5, 5, 4, 5, 5, 6, 6, 6, 7, 7, 5, 6, 6, 9, 13, 15, 17, 18, 20,
+ 17, 17, 15, 14, 12, 11, 10, 9, 8, 7, 6, 5, 5, 4, 4, 4, 4, 4, 5, 6, 6, 6, 5, 5, 5, 6, 8, 11, 15, 17, 18, 20,
+ 17, 17, 15, 13, 12, 11, 9, 9, 8, 7, 5, 5, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 7, 11, 15, 17, 18, 18,
+ 17, 16, 15, 13, 12, 11, 9, 8, 8, 6, 5, 4, 4, 4, 3, 3, 4, 4, 3, 3, 3, 4, 4, 4, 4, 5, 6, 9, 14, 17, 17, 18,
+ 17, 16, 14, 13, 12, 11, 9, 8, 7, 6, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 6, 8, 13, 16, 17, 17,
+ 17, 15, 14, 13, 12, 11, 9, 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 7, 12, 16, 17, 17,
+ 17, 15, 14, 12, 11, 10, 9, 8, 7, 6, 4, 4, 3, 3, 3, 2, 2, 3, 3, 3, 2, 2, 3, 3, 3, 4, 5, 7, 11, 15, 18, 18,
+ 16, 14, 13, 12, 11, 10, 9, 7, 7, 5, 4, 3, 3, 3, 2, 2, 2, 3, 3, 3, 2, 2, 2, 3, 3, 4, 5, 8, 11, 15, 18, 19,
+ 16, 14, 13, 12, 11, 10, 9, 7, 5, 5, 4, 3, 3, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 3, 4, 5, 8, 10, 15, 18, 19,
+ 16, 14, 14, 13, 11, 10, 9, 7, 5, 5, 4, 3, 2, 2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 5, 7, 10, 15, 17, 18,
+ 16, 15, 14, 12, 11, 10, 9, 7, 5, 5, 4, 3, 2, 2, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 3, 5, 6, 10, 14, 17, 17,
+ 15, 15, 13, 12, 11, 10, 9, 7, 5, 4, 3, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 2, 2, 3, 5, 7, 10, 14, 17, 18,
+ 15, 14, 13, 12, 12, 10, 9, 7, 6, 4, 3, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 4, 5, 7, 10, 17, 18, 18,
+ 15, 14, 14, 13, 12, 11, 9, 7, 6, 4, 3, 2, 1, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 7, 12, 17, 19, 19,
+ 16, 14, 14, 13, 12, 12, 9, 7, 6, 4, 3, 2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4, 4, 5, 8, 12, 17, 19, 20,
+ 16, 15, 14, 13, 12, 12, 9, 7, 7, 4, 3, 1, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 6, 8, 12, 17, 19, 20,
+ 17, 15, 14, 13, 12, 12, 9, 7, 7, 5, 3, 1, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 8, 12, 17, 19, 20,
+ 18, 15, 15, 14, 13, 12, 9, 8, 7, 5, 4, 2, 1, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 6, 7, 9, 13, 17, 19, 20,
+ 18, 16, 15, 14, 13, 12, 9, 9, 7, 6, 5, 3, 2, 1, 0, 0, 0, 1, 2, 3, 3, 3, 4, 5, 6, 7, 9, 10, 14, 18, 20, 20,
+ 18, 16, 16, 15, 13, 12, 10, 9, 8, 7, 6, 5, 3, 3, 1, 0, 0, 1, 2, 3, 4, 4, 5, 5, 6, 7, 9, 12, 15, 19, 20, 20,
+ 18, 18, 16, 16, 14, 13, 10, 10, 9, 8, 7, 6, 5, 5, 3, 1, 1, 1, 2, 3, 5, 5, 5, 6, 6, 8, 11, 15, 17, 19, 20, 20,
+ 18, 18, 16, 16, 14, 13, 12, 10, 9, 9, 8, 7, 6, 5, 4, 3, 1, 2, 3, 5, 5, 5, 5, 6, 7, 10, 12, 15, 18, 20, 20, 20,
+ 18, 18, 17, 16, 15, 14, 12, 11, 10, 10, 9, 8, 7, 6, 5, 4, 3, 2, 3, 3, 5, 5, 5, 6, 9, 11, 14, 16, 19, 20, 20, 21,
+ 19, 19, 18, 17, 16, 15, 13, 12, 11, 10, 10, 9, 8, 7, 5, 4, 4, 3, 3, 3, 5, 5, 6, 7, 10, 12, 14, 18, 20, 20, 21, 22,
+ 21, 20, 18, 18, 17, 16, 14, 12, 11, 11, 10, 10, 8, 7, 7, 5, 4, 4, 4, 5, 6, 6, 6, 7, 11, 13, 16, 19, 20, 21, 21, 22,
+ 22, 21, 20, 19, 18, 16, 15, 14, 12, 12, 12, 10, 9, 8, 7, 6, 5, 4, 5, 6, 6, 7, 7, 8, 12, 13, 17, 19, 21, 21, 21, 21,
+ 23, 22, 21, 20, 18, 17, 16, 16, 14, 14, 13, 12, 10, 10, 8, 7, 6, 5, 5, 6, 7, 7, 8, 9, 12, 15, 18, 19, 21, 21, 21, 20,
+ 23, 22, 22, 21, 20, 18, 17, 16, 15, 15, 15, 14, 13, 11, 9, 8, 6, 6, 6, 6, 7, 8, 9, 10, 13, 16, 19, 20, 20, 21, 21, 20,
+ 24, 23, 22, 21, 20, 19, 17, 17, 16, 16, 15, 15, 14, 14, 11, 9, 6, 6, 6, 6, 8, 8, 10, 12, 15, 17, 19, 20, 20, 21, 21, 20,
+ ]
+ b: [
+ 11, 9, 9, 7, 6, 5, 4, 4, 3, 3, 3, 3, 3, 4, 5, 5, 5, 4, 5, 4, 4, 4, 7, 7, 3, 3, 5, 8, 8, 9, 11, 11,
+ 11, 10, 8, 7, 5, 5, 5, 4, 3, 3, 3, 3, 3, 3, 4, 4, 5, 4, 5, 4, 4, 4, 4, 4, 2, 2, 5, 8, 8, 9, 11, 11,
+ 10, 10, 7, 7, 5, 5, 5, 4, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 3, 1, 1, 5, 7, 9, 9, 11, 11,
+ 10, 9, 8, 7, 6, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 3, 2, 1, 1, 4, 7, 9, 10, 12, 13,
+ 9, 9, 8, 7, 6, 5, 5, 5, 4, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 4, 3, 2, 1, 1, 1, 4, 6, 9, 10, 12, 13,
+ 9, 9, 9, 7, 6, 6, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 3, 3, 2, 1, 2, 2, 2, 1, 1, 1, 2, 6, 9, 10, 11, 12,
+ 8, 9, 9, 7, 7, 6, 5, 4, 4, 3, 3, 2, 2, 2, 2, 2, 3, 3, 2, 1, 1, 1, 1, 0, 0, 1, 2, 5, 9, 11, 11, 11,
+ 8, 9, 9, 7, 7, 6, 5, 4, 4, 3, 3, 2, 2, 2, 2, 2, 3, 3, 2, 1, 1, 1, 1, 0, 0, 0, 1, 3, 8, 11, 11, 11,
+ 9, 9, 8, 7, 7, 7, 6, 4, 4, 3, 3, 2, 2, 2, 2, 2, 3, 3, 2, 1, 1, 1, 1, 0, 0, 0, 1, 3, 7, 11, 11, 11,
+ 9, 9, 8, 7, 7, 7, 6, 5, 4, 3, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 0, 0, 0, 0, 0, 1, 3, 7, 10, 12, 13,
+ 9, 9, 8, 8, 7, 7, 6, 5, 4, 3, 2, 1, 2, 1, 2, 2, 2, 3, 2, 1, 0, 0, 0, 0, 0, 0, 1, 4, 7, 10, 13, 13,
+ 9, 8, 8, 8, 7, 7, 6, 5, 3, 3, 2, 1, 1, 1, 1, 1, 2, 3, 3, 2, 1, 0, 0, 0, 0, 1, 2, 4, 7, 11, 13, 13,
+ 9, 8, 8, 7, 7, 6, 6, 5, 3, 3, 2, 2, 2, 1, 1, 1, 2, 3, 3, 2, 1, 0, 0, 0, 0, 1, 2, 4, 6, 11, 13, 13,
+ 9, 8, 8, 7, 7, 6, 6, 4, 3, 3, 2, 2, 1, 1, 1, 1, 2, 3, 3, 3, 1, 1, 0, 0, 1, 1, 2, 3, 6, 10, 12, 13,
+ 9, 8, 8, 7, 7, 6, 6, 4, 3, 3, 2, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1, 1, 1, 1, 1, 2, 2, 4, 6, 10, 13, 13,
+ 9, 9, 8, 8, 8, 6, 6, 4, 4, 3, 2, 1, 1, 1, 1, 1, 2, 2, 3, 3, 1, 1, 1, 1, 2, 2, 3, 4, 6, 13, 14, 14,
+ 9, 9, 8, 8, 8, 8, 6, 5, 4, 3, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 3, 3, 5, 9, 13, 15, 15,
+ 10, 9, 9, 9, 10, 10, 6, 6, 5, 3, 2, 1, 0, 0, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 9, 13, 15, 16,
+ 10, 10, 9, 9, 10, 10, 7, 6, 5, 3, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 9, 13, 15, 16,
+ 11, 10, 9, 9, 10, 10, 7, 6, 6, 3, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 6, 9, 13, 15, 16,
+ 12, 10, 10, 10, 10, 10, 7, 6, 6, 4, 3, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 5, 5, 6, 7, 10, 14, 15, 16,
+ 12, 11, 10, 10, 10, 10, 8, 7, 6, 6, 4, 3, 2, 1, 1, 1, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 6, 8, 11, 15, 16, 16,
+ 12, 12, 11, 11, 10, 10, 9, 8, 7, 6, 6, 4, 4, 3, 2, 2, 2, 2, 3, 4, 4, 4, 5, 5, 5, 6, 7, 10, 13, 15, 16, 15,
+ 12, 12, 12, 12, 11, 10, 10, 8, 8, 7, 7, 6, 5, 5, 3, 2, 2, 2, 3, 4, 5, 5, 5, 5, 6, 7, 10, 13, 14, 16, 16, 16,
+ 12, 12, 13, 12, 12, 11, 11, 9, 8, 8, 8, 7, 6, 6, 5, 4, 3, 3, 3, 5, 6, 6, 6, 6, 7, 9, 11, 14, 15, 17, 17, 16,
+ 13, 13, 13, 13, 12, 12, 11, 11, 10, 10, 9, 8, 7, 7, 6, 5, 5, 4, 4, 4, 5, 6, 6, 6, 9, 10, 12, 14, 16, 17, 17, 18,
+ 13, 13, 14, 13, 13, 13, 12, 12, 11, 10, 10, 9, 8, 7, 6, 6, 6, 5, 4, 4, 6, 6, 6, 7, 9, 11, 12, 16, 17, 17, 17, 18,
+ 15, 15, 15, 15, 14, 13, 13, 13, 12, 11, 11, 10, 9, 9, 8, 7, 6, 5, 5, 5, 6, 7, 7, 8, 10, 12, 14, 17, 17, 18, 17, 17,
+ 16, 16, 16, 16, 15, 14, 14, 14, 13, 13, 12, 11, 11, 9, 8, 7, 7, 6, 6, 6, 7, 7, 8, 8, 10, 12, 16, 17, 18, 18, 17, 17,
+ 18, 17, 17, 16, 16, 15, 14, 14, 14, 14, 14, 13, 11, 11, 9, 8, 7, 7, 6, 6, 7, 7, 8, 9, 10, 13, 16, 17, 17, 18, 17, 16,
+ 18, 17, 17, 17, 16, 15, 15, 15, 15, 15, 15, 14, 14, 12, 10, 9, 7, 7, 6, 6, 7, 7, 8, 9, 11, 14, 16, 16, 17, 17, 16, 15,
+ 18, 18, 17, 17, 17, 16, 15, 15, 15, 16, 15, 15, 14, 14, 12, 9, 7, 6, 6, 6, 7, 7, 9, 10, 12, 14, 15, 16, 16, 16, 15, 15,
+ ]
+ - ct: 5500
+ r: [
+ 19, 18, 17, 16, 15, 13, 11, 10, 9, 9, 9, 8, 8, 8, 8, 8, 8, 9, 10, 10, 8, 8, 9, 9, 9, 11, 14, 15, 16, 16, 18, 18,
+ 18, 18, 17, 15, 14, 13, 11, 11, 9, 9, 8, 8, 7, 7, 7, 7, 7, 8, 9, 8, 8, 8, 9, 9, 8, 8, 11, 14, 16, 17, 17, 18,
+ 18, 17, 17, 15, 14, 13, 12, 11, 9, 9, 8, 7, 7, 6, 6, 6, 6, 8, 8, 8, 8, 8, 8, 8, 7, 7, 10, 13, 16, 17, 18, 18,
+ 17, 17, 16, 15, 14, 12, 11, 11, 9, 8, 7, 7, 6, 5, 5, 5, 5, 6, 8, 7, 8, 8, 8, 6, 6, 6, 9, 12, 16, 16, 18, 19,
+ 17, 17, 16, 14, 13, 12, 11, 10, 10, 8, 7, 7, 5, 5, 5, 5, 5, 5, 6, 7, 7, 7, 6, 6, 6, 6, 8, 11, 15, 16, 17, 19,
+ 18, 17, 16, 14, 13, 12, 11, 10, 10, 8, 7, 6, 5, 5, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 10, 14, 16, 17, 18,
+ 18, 17, 16, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 3, 4, 5, 5, 5, 5, 5, 6, 10, 13, 16, 16, 17,
+ 18, 16, 15, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 4, 4, 4, 4, 5, 6, 8, 12, 16, 16, 16,
+ 17, 16, 15, 13, 12, 11, 10, 9, 8, 7, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 4, 4, 5, 7, 11, 16, 16, 16,
+ 17, 16, 15, 12, 12, 12, 10, 8, 7, 7, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 5, 7, 10, 16, 16, 16,
+ 16, 16, 14, 12, 12, 11, 10, 8, 7, 6, 4, 4, 3, 3, 3, 3, 3, 4, 3, 3, 2, 2, 2, 3, 3, 4, 4, 7, 10, 14, 16, 16,
+ 16, 15, 14, 13, 12, 11, 10, 8, 7, 6, 5, 4, 3, 3, 3, 3, 3, 4, 3, 3, 3, 2, 2, 3, 3, 3, 5, 7, 10, 14, 15, 16,
+ 16, 15, 15, 13, 11, 11, 11, 9, 7, 5, 5, 4, 3, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 5, 6, 9, 14, 15, 16,
+ 16, 15, 15, 13, 11, 11, 11, 10, 7, 5, 4, 4, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 3, 3, 5, 6, 10, 13, 15, 17,
+ 15, 15, 14, 12, 11, 11, 11, 10, 7, 4, 4, 3, 2, 2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 3, 3, 3, 5, 6, 9, 13, 16, 17,
+ 15, 15, 14, 12, 11, 11, 10, 9, 7, 4, 4, 3, 1, 1, 1, 1, 2, 2, 3, 3, 2, 2, 3, 3, 3, 3, 5, 7, 10, 15, 17, 17,
+ 15, 15, 14, 12, 11, 11, 10, 9, 7, 4, 4, 3, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 5, 7, 11, 16, 17, 18,
+ 16, 15, 15, 12, 12, 11, 10, 9, 6, 4, 4, 3, 1, 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 7, 12, 15, 17, 18,
+ 16, 16, 15, 12, 12, 11, 10, 8, 6, 5, 4, 3, 1, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6, 8, 12, 15, 17, 18,
+ 15, 15, 15, 13, 12, 12, 10, 8, 7, 5, 4, 3, 2, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 4, 5, 7, 9, 12, 16, 17, 18,
+ 15, 15, 15, 13, 13, 12, 10, 8, 7, 5, 5, 3, 2, 1, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 5, 5, 7, 9, 13, 16, 18, 18,
+ 15, 15, 15, 14, 13, 12, 10, 8, 7, 6, 5, 5, 3, 2, 1, 0, 1, 1, 1, 2, 3, 3, 3, 5, 5, 6, 8, 10, 14, 17, 18, 19,
+ 16, 16, 16, 15, 13, 12, 10, 9, 8, 7, 6, 6, 5, 4, 2, 1, 1, 1, 1, 2, 3, 3, 5, 5, 6, 7, 9, 12, 15, 18, 18, 19,
+ 17, 16, 16, 16, 13, 12, 11, 10, 9, 8, 7, 7, 6, 5, 4, 2, 1, 1, 2, 3, 4, 4, 5, 5, 6, 8, 11, 14, 16, 18, 19, 19,
+ 18, 18, 17, 16, 14, 13, 12, 10, 9, 8, 8, 7, 7, 5, 5, 3, 2, 2, 4, 4, 5, 5, 5, 5, 7, 10, 12, 16, 17, 19, 19, 20,
+ 18, 18, 17, 16, 15, 13, 12, 10, 10, 9, 9, 9, 7, 6, 5, 4, 3, 3, 4, 4, 5, 5, 5, 6, 7, 11, 15, 17, 18, 19, 19, 20,
+ 19, 18, 18, 17, 16, 14, 13, 11, 10, 10, 9, 9, 8, 6, 5, 5, 4, 4, 4, 4, 6, 6, 6, 7, 9, 11, 15, 17, 19, 19, 20, 21,
+ 20, 19, 19, 19, 17, 16, 13, 12, 11, 11, 10, 9, 9, 7, 6, 5, 5, 4, 4, 5, 6, 7, 7, 8, 9, 12, 15, 18, 19, 19, 20, 20,
+ 21, 20, 20, 19, 19, 16, 16, 13, 12, 12, 11, 11, 9, 8, 7, 6, 6, 5, 5, 6, 7, 7, 8, 8, 10, 13, 17, 19, 19, 20, 20, 20,
+ 22, 21, 20, 20, 19, 17, 16, 14, 13, 13, 14, 12, 11, 9, 8, 7, 6, 5, 5, 6, 7, 7, 8, 9, 11, 15, 17, 19, 19, 20, 20, 20,
+ 22, 22, 21, 20, 19, 18, 16, 15, 14, 14, 15, 15, 13, 11, 9, 8, 7, 5, 5, 6, 7, 8, 9, 10, 13, 16, 18, 18, 19, 20, 19, 19,
+ 22, 22, 21, 20, 19, 19, 16, 16, 15, 15, 15, 15, 15, 13, 10, 9, 7, 5, 5, 6, 8, 8, 10, 11, 14, 16, 18, 18, 19, 19, 19, 19,
+ ]
+ g: [
+ 16, 16, 15, 14, 13, 11, 10, 9, 8, 7, 7, 7, 6, 6, 6, 6, 6, 7, 8, 8, 6, 6, 7, 7, 7, 9, 12, 13, 13, 14, 14, 14,
+ 16, 16, 15, 14, 13, 11, 10, 9, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 7, 7, 6, 6, 7, 7, 6, 6, 10, 12, 13, 14, 14, 14,
+ 16, 15, 15, 13, 13, 11, 10, 9, 8, 7, 7, 6, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 5, 5, 8, 12, 14, 15, 15, 16,
+ 15, 15, 14, 13, 12, 11, 10, 10, 8, 7, 6, 6, 5, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 5, 5, 5, 7, 11, 14, 15, 15, 17,
+ 16, 15, 14, 13, 12, 11, 10, 10, 9, 7, 6, 6, 4, 4, 4, 3, 3, 4, 4, 6, 6, 5, 5, 4, 4, 5, 6, 10, 14, 14, 16, 16,
+ 16, 15, 15, 13, 12, 11, 10, 10, 9, 7, 6, 6, 4, 4, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 9, 13, 14, 15, 16,
+ 16, 15, 15, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 4, 5, 8, 13, 14, 15, 15,
+ 16, 15, 14, 12, 11, 10, 9, 8, 7, 6, 6, 5, 4, 3, 3, 3, 3, 3, 2, 2, 2, 3, 3, 3, 3, 4, 5, 7, 11, 14, 15, 15,
+ 16, 15, 14, 12, 11, 10, 9, 8, 7, 6, 5, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 4, 6, 10, 14, 15, 14,
+ 16, 15, 14, 12, 11, 11, 9, 8, 7, 6, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 4, 6, 10, 15, 15, 15,
+ 15, 15, 13, 12, 11, 11, 10, 8, 7, 5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3, 6, 9, 13, 15, 15,
+ 15, 14, 13, 12, 11, 11, 10, 8, 7, 5, 5, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 4, 6, 9, 13, 14, 15,
+ 15, 14, 14, 13, 11, 11, 10, 8, 7, 5, 5, 3, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 1, 2, 2, 2, 4, 5, 8, 13, 14, 15,
+ 15, 14, 14, 13, 11, 11, 11, 10, 7, 5, 4, 3, 2, 2, 2, 2, 2, 2, 3, 3, 2, 2, 1, 2, 2, 2, 4, 5, 8, 13, 14, 15,
+ 15, 14, 13, 12, 11, 11, 10, 9, 7, 4, 4, 3, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 5, 8, 13, 15, 16,
+ 15, 15, 13, 12, 11, 11, 10, 9, 7, 4, 4, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 6, 8, 15, 16, 16,
+ 15, 15, 14, 12, 11, 11, 10, 9, 7, 4, 4, 2, 1, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 6, 11, 15, 16, 17,
+ 15, 15, 15, 12, 12, 11, 10, 10, 7, 4, 4, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 6, 11, 15, 17, 17,
+ 15, 15, 15, 12, 12, 12, 10, 9, 7, 5, 4, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 5, 7, 11, 15, 17, 17,
+ 15, 15, 15, 13, 12, 12, 10, 9, 7, 5, 4, 3, 2, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 3, 4, 4, 6, 8, 12, 15, 17, 17,
+ 15, 15, 15, 13, 13, 12, 10, 9, 7, 6, 5, 3, 2, 1, 0, 0, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 6, 9, 13, 16, 17, 18,
+ 15, 15, 15, 14, 13, 13, 10, 9, 8, 6, 6, 5, 3, 2, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 7, 10, 14, 17, 18, 18,
+ 15, 16, 16, 15, 13, 13, 11, 9, 8, 7, 6, 6, 5, 4, 2, 1, 1, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 12, 15, 17, 18, 18,
+ 16, 16, 16, 16, 13, 13, 11, 10, 9, 8, 7, 7, 6, 6, 4, 2, 2, 2, 2, 3, 5, 5, 5, 5, 6, 8, 11, 14, 16, 18, 18, 18,
+ 17, 17, 17, 16, 14, 13, 13, 11, 10, 9, 8, 8, 7, 6, 5, 4, 2, 2, 4, 5, 5, 5, 5, 5, 7, 9, 12, 15, 17, 18, 18, 18,
+ 18, 18, 17, 16, 15, 14, 13, 11, 10, 10, 9, 9, 7, 6, 5, 5, 4, 3, 4, 4, 5, 5, 5, 6, 7, 10, 15, 16, 18, 18, 18, 19,
+ 19, 18, 18, 17, 16, 14, 13, 12, 11, 10, 10, 9, 9, 7, 6, 5, 5, 4, 4, 4, 6, 6, 6, 6, 8, 10, 15, 16, 18, 18, 18, 19,
+ 20, 19, 19, 19, 17, 16, 14, 13, 12, 11, 11, 10, 9, 7, 7, 6, 5, 4, 4, 5, 6, 6, 6, 7, 9, 11, 15, 17, 18, 18, 18, 18,
+ 22, 20, 20, 20, 19, 17, 16, 14, 13, 13, 12, 11, 10, 9, 7, 6, 6, 5, 5, 6, 7, 7, 7, 8, 9, 12, 16, 18, 18, 19, 18, 18,
+ 22, 22, 21, 20, 19, 18, 17, 16, 14, 14, 15, 13, 11, 10, 9, 8, 7, 6, 5, 6, 7, 8, 8, 9, 10, 14, 17, 18, 18, 19, 18, 17,
+ 22, 22, 22, 21, 20, 19, 17, 17, 16, 16, 16, 16, 14, 12, 10, 8, 7, 6, 6, 7, 8, 8, 8, 10, 13, 16, 18, 18, 18, 18, 18, 17,
+ 22, 22, 22, 21, 20, 20, 18, 17, 16, 16, 17, 17, 16, 14, 11, 10, 8, 6, 6, 7, 8, 8, 10, 11, 14, 17, 18, 18, 18, 18, 18, 17,
+ ]
+ b: [
+ 13, 12, 12, 12, 11, 9, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 6, 7, 7, 6, 6, 5, 5, 5, 6, 9, 9, 9, 10, 9, 8,
+ 13, 13, 12, 11, 11, 9, 9, 8, 7, 7, 7, 6, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 5, 5, 4, 4, 7, 9, 9, 10, 10, 9,
+ 13, 13, 12, 11, 11, 9, 9, 8, 7, 7, 6, 6, 5, 4, 4, 4, 4, 5, 6, 6, 6, 5, 5, 5, 3, 3, 6, 9, 10, 11, 10, 11,
+ 13, 13, 12, 11, 11, 10, 9, 9, 7, 7, 6, 5, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 4, 3, 3, 5, 8, 10, 11, 11, 12,
+ 13, 13, 12, 11, 11, 10, 9, 9, 8, 7, 6, 6, 4, 4, 3, 3, 3, 4, 4, 5, 5, 5, 4, 3, 2, 3, 4, 8, 11, 11, 11, 12,
+ 13, 13, 13, 11, 11, 10, 9, 9, 8, 7, 6, 6, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3, 7, 11, 11, 11, 12,
+ 14, 14, 13, 11, 11, 10, 9, 8, 8, 7, 6, 6, 4, 3, 3, 3, 3, 3, 2, 2, 2, 3, 2, 2, 2, 2, 3, 6, 11, 11, 11, 11,
+ 14, 14, 13, 11, 11, 10, 9, 8, 7, 7, 6, 5, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 9, 11, 11, 11,
+ 14, 14, 13, 12, 11, 11, 9, 8, 7, 6, 5, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 8, 12, 12, 11,
+ 14, 13, 13, 12, 12, 11, 10, 8, 7, 6, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 8, 12, 12, 13,
+ 13, 13, 13, 12, 12, 12, 11, 9, 7, 6, 4, 3, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 4, 8, 12, 12, 13,
+ 14, 13, 13, 12, 12, 11, 11, 9, 7, 6, 5, 3, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 4, 8, 12, 13, 13,
+ 14, 13, 13, 13, 12, 11, 11, 9, 7, 6, 5, 4, 2, 1, 1, 1, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 4, 7, 12, 13, 13,
+ 14, 13, 13, 13, 12, 12, 12, 11, 7, 5, 5, 4, 2, 1, 1, 1, 1, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 4, 7, 12, 13, 13,
+ 14, 14, 13, 13, 12, 12, 11, 11, 7, 5, 4, 3, 1, 1, 1, 1, 1, 2, 3, 3, 2, 2, 2, 3, 3, 3, 4, 4, 7, 12, 13, 14,
+ 14, 14, 13, 13, 12, 12, 11, 11, 8, 5, 4, 3, 1, 1, 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 8, 13, 15, 15,
+ 14, 15, 14, 13, 13, 12, 11, 11, 8, 5, 4, 3, 1, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 6, 10, 15, 15, 16,
+ 15, 15, 15, 14, 13, 13, 12, 11, 8, 5, 4, 3, 1, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 6, 11, 15, 16, 16,
+ 15, 15, 15, 14, 14, 14, 12, 11, 8, 6, 5, 3, 1, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 6, 7, 11, 15, 16, 16,
+ 15, 15, 15, 15, 14, 14, 12, 11, 9, 7, 5, 3, 2, 1, 0, 1, 1, 1, 2, 3, 3, 4, 4, 4, 4, 5, 6, 8, 12, 15, 16, 16,
+ 15, 16, 15, 15, 15, 14, 13, 11, 9, 7, 6, 5, 3, 2, 1, 1, 1, 1, 2, 3, 3, 4, 4, 4, 5, 6, 7, 9, 12, 16, 16, 16,
+ 15, 16, 16, 15, 15, 15, 13, 11, 10, 8, 7, 6, 4, 3, 2, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 7, 8, 10, 14, 16, 16, 16,
+ 16, 16, 17, 16, 15, 15, 14, 12, 11, 9, 8, 8, 6, 5, 3, 2, 2, 2, 3, 3, 5, 5, 6, 6, 7, 8, 9, 12, 15, 16, 16, 16,
+ 16, 17, 18, 17, 16, 15, 14, 13, 11, 10, 9, 9, 8, 6, 5, 3, 3, 3, 3, 4, 6, 6, 6, 6, 7, 8, 11, 14, 16, 16, 16, 16,
+ 17, 18, 18, 18, 17, 16, 16, 14, 12, 11, 10, 9, 8, 7, 6, 5, 3, 3, 5, 6, 6, 6, 6, 6, 8, 10, 12, 16, 17, 17, 17, 16,
+ 18, 18, 18, 18, 18, 17, 16, 14, 13, 12, 11, 11, 8, 8, 6, 6, 5, 4, 5, 5, 6, 6, 6, 7, 8, 11, 15, 16, 17, 17, 16, 16,
+ 18, 19, 19, 19, 19, 17, 17, 15, 14, 13, 12, 11, 11, 8, 7, 6, 6, 5, 5, 5, 7, 7, 7, 8, 9, 11, 15, 17, 17, 17, 16, 16,
+ 20, 20, 20, 20, 19, 19, 17, 17, 15, 14, 14, 12, 11, 9, 8, 7, 7, 6, 6, 6, 8, 8, 8, 8, 9, 12, 15, 18, 18, 16, 16, 16,
+ 22, 22, 22, 22, 21, 20, 19, 18, 17, 16, 15, 14, 12, 11, 10, 8, 8, 7, 7, 7, 8, 8, 8, 9, 10, 13, 17, 18, 18, 16, 16, 15,
+ 23, 22, 22, 22, 22, 21, 20, 20, 18, 18, 19, 16, 14, 13, 11, 10, 9, 8, 7, 7, 8, 9, 9, 10, 11, 15, 17, 18, 17, 17, 16, 14,
+ 23, 23, 23, 23, 23, 22, 21, 21, 20, 20, 20, 19, 18, 15, 12, 11, 10, 8, 8, 8, 9, 9, 10, 11, 13, 17, 17, 17, 17, 16, 15, 13,
+ 23, 23, 24, 24, 23, 23, 22, 21, 21, 21, 20, 20, 19, 17, 14, 12, 11, 9, 8, 9, 9, 10, 10, 12, 15, 17, 17, 17, 16, 16, 15, 13,
+ ]
+ - ct: 8500
+ r: [
+ 18, 17, 16, 15, 13, 12, 10, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 12, 12, 8, 8, 10, 13, 14, 15, 17, 18,
+ 17, 17, 16, 14, 12, 11, 10, 10, 8, 8, 8, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 10, 13, 14, 15, 17, 17,
+ 17, 16, 15, 13, 12, 11, 10, 10, 8, 8, 7, 7, 7, 6, 7, 7, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 9, 12, 14, 15, 17, 17,
+ 16, 16, 15, 13, 12, 11, 10, 10, 9, 7, 7, 7, 6, 6, 5, 5, 6, 6, 7, 7, 7, 7, 7, 5, 5, 5, 8, 11, 14, 15, 17, 19,
+ 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 7, 6, 6, 5, 5, 5, 5, 5, 5, 6, 6, 6, 5, 5, 5, 5, 7, 10, 13, 15, 17, 19,
+ 15, 15, 14, 13, 12, 11, 9, 9, 8, 7, 6, 5, 5, 5, 4, 4, 5, 5, 5, 4, 4, 4, 4, 4, 4, 5, 6, 9, 13, 15, 16, 17,
+ 15, 15, 13, 12, 11, 11, 9, 8, 8, 6, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 5, 8, 13, 15, 15, 16,
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 4, 5, 7, 11, 14, 15, 15,
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 4, 4, 3, 3, 4, 3, 3, 3, 2, 3, 3, 3, 3, 4, 6, 10, 14, 15, 15,
+ 15, 13, 13, 11, 11, 10, 9, 8, 7, 6, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 3, 4, 6, 10, 13, 16, 16,
+ 14, 13, 12, 11, 10, 10, 9, 7, 7, 5, 4, 3, 3, 3, 3, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 3, 4, 6, 9, 13, 16, 17,
+ 14, 13, 12, 11, 10, 10, 9, 7, 6, 5, 4, 3, 3, 3, 2, 2, 2, 3, 3, 3, 2, 2, 1, 2, 2, 3, 4, 6, 9, 13, 16, 17,
+ 14, 13, 12, 12, 10, 10, 9, 7, 5, 5, 4, 3, 3, 2, 2, 2, 2, 3, 3, 3, 2, 2, 1, 1, 2, 3, 4, 6, 9, 13, 15, 17,
+ 14, 13, 12, 11, 10, 9, 9, 7, 5, 5, 4, 3, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 1, 1, 2, 3, 4, 6, 9, 12, 15, 16,
+ 13, 13, 12, 11, 10, 9, 8, 7, 5, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 2, 2, 3, 4, 6, 8, 12, 15, 16,
+ 13, 13, 12, 11, 11, 9, 8, 7, 6, 4, 3, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 1, 2, 3, 3, 4, 6, 9, 15, 16, 16,
+ 13, 13, 12, 12, 11, 10, 9, 7, 6, 4, 3, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 4, 6, 10, 15, 17, 18,
+ 14, 13, 13, 12, 12, 11, 9, 7, 6, 4, 3, 2, 1, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 4, 5, 7, 11, 15, 17, 18,
+ 14, 13, 13, 12, 12, 11, 9, 7, 7, 4, 3, 1, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 4, 4, 5, 7, 11, 15, 17, 18,
+ 15, 13, 13, 12, 12, 11, 8, 7, 6, 4, 3, 1, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 4, 5, 6, 7, 11, 15, 17, 18,
+ 15, 14, 13, 13, 12, 11, 8, 8, 7, 5, 4, 2, 1, 0, 0, 0, 0, 1, 2, 2, 2, 2, 3, 3, 5, 5, 7, 8, 11, 15, 17, 18,
+ 15, 14, 14, 13, 12, 11, 9, 8, 7, 6, 5, 3, 2, 1, 0, 0, 0, 1, 2, 2, 3, 3, 3, 5, 5, 6, 8, 9, 13, 16, 18, 18,
+ 15, 14, 14, 13, 12, 11, 9, 9, 8, 7, 6, 5, 3, 3, 1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 5, 6, 8, 11, 14, 17, 18, 18,
+ 16, 16, 15, 14, 13, 12, 10, 9, 8, 8, 7, 6, 5, 5, 3, 1, 1, 1, 2, 3, 4, 4, 5, 5, 6, 8, 10, 13, 15, 17, 18, 18,
+ 16, 16, 15, 14, 14, 12, 11, 10, 9, 9, 8, 7, 6, 5, 4, 3, 1, 1, 2, 4, 4, 5, 5, 5, 7, 9, 11, 14, 16, 18, 18, 19,
+ 16, 16, 15, 15, 14, 13, 12, 11, 10, 10, 9, 8, 7, 6, 5, 4, 3, 2, 3, 3, 4, 5, 5, 6, 9, 10, 13, 15, 18, 18, 19, 19,
+ 17, 17, 16, 15, 15, 14, 12, 11, 11, 10, 10, 9, 8, 7, 6, 5, 4, 3, 3, 3, 5, 5, 6, 6, 9, 11, 13, 17, 18, 19, 19, 20,
+ 19, 18, 17, 17, 15, 15, 13, 12, 11, 11, 10, 10, 9, 8, 7, 5, 5, 4, 4, 5, 6, 6, 6, 7, 10, 12, 15, 18, 19, 19, 19, 20,
+ 19, 19, 18, 17, 17, 16, 14, 13, 12, 12, 11, 10, 9, 8, 7, 6, 6, 5, 5, 6, 6, 6, 7, 8, 11, 12, 16, 18, 19, 19, 19, 19,
+ 20, 19, 19, 18, 17, 16, 15, 14, 13, 13, 12, 12, 10, 9, 8, 7, 6, 5, 5, 6, 6, 7, 8, 9, 11, 14, 17, 18, 19, 19, 19, 19,
+ 20, 20, 19, 18, 18, 17, 15, 15, 14, 14, 14, 13, 12, 11, 9, 7, 6, 5, 5, 6, 7, 7, 8, 9, 12, 15, 17, 18, 18, 19, 18, 18,
+ 21, 20, 20, 19, 18, 18, 15, 15, 14, 14, 14, 14, 13, 13, 11, 9, 6, 5, 5, 6, 7, 7, 9, 10, 13, 15, 18, 18, 18, 18, 18, 18,
+ ]
+ g: [
+ 16, 16, 15, 13, 12, 10, 9, 8, 7, 7, 7, 6, 6, 6, 7, 7, 7, 6, 6, 6, 6, 6, 11, 11, 6, 6, 9, 12, 12, 13, 15, 15,
+ 16, 15, 14, 13, 11, 10, 9, 9, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 9, 12, 12, 13, 15, 15,
+ 15, 15, 14, 12, 11, 11, 9, 9, 8, 7, 6, 6, 6, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 5, 4, 5, 8, 11, 13, 14, 15, 15,
+ 15, 15, 14, 12, 11, 10, 9, 9, 8, 6, 6, 6, 5, 5, 4, 4, 4, 4, 6, 6, 6, 6, 6, 4, 4, 4, 7, 11, 13, 14, 15, 17,
+ 15, 15, 13, 12, 11, 10, 9, 9, 8, 6, 6, 5, 5, 4, 4, 3, 3, 4, 4, 5, 5, 5, 4, 4, 3, 4, 6, 9, 13, 14, 15, 17,
+ 15, 14, 13, 12, 11, 10, 9, 8, 8, 6, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 4, 5, 8, 13, 14, 14, 15,
+ 14, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 4, 7, 12, 14, 14, 14,
+ 14, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 4, 6, 11, 14, 14, 14,
+ 14, 13, 12, 11, 11, 10, 9, 8, 7, 6, 4, 4, 3, 3, 3, 2, 3, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 5, 10, 14, 14, 14,
+ 15, 13, 12, 11, 11, 10, 9, 8, 7, 6, 4, 4, 3, 3, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 5, 10, 13, 15, 16,
+ 14, 13, 12, 11, 11, 10, 9, 7, 7, 5, 4, 3, 2, 2, 2, 2, 2, 2, 3, 2, 2, 1, 2, 2, 2, 2, 3, 6, 9, 13, 16, 16,
+ 14, 13, 12, 11, 10, 10, 9, 7, 5, 5, 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 2, 2, 2, 4, 6, 8, 13, 16, 16,
+ 14, 13, 13, 12, 10, 10, 9, 7, 5, 5, 4, 3, 2, 2, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 2, 2, 4, 5, 8, 13, 15, 16,
+ 14, 13, 13, 12, 10, 10, 9, 7, 5, 5, 4, 3, 2, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 3, 5, 8, 12, 14, 15,
+ 14, 13, 12, 11, 10, 10, 9, 7, 6, 4, 3, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 2, 3, 3, 5, 8, 12, 15, 15,
+ 14, 13, 12, 11, 11, 10, 9, 7, 6, 4, 3, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 2, 3, 4, 5, 8, 14, 16, 16,
+ 14, 13, 13, 12, 12, 10, 9, 7, 6, 4, 3, 2, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 10, 15, 16, 17,
+ 14, 13, 13, 13, 12, 12, 9, 7, 7, 4, 3, 2, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 4, 6, 11, 15, 17, 17,
+ 14, 14, 13, 12, 12, 12, 9, 8, 7, 5, 3, 2, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5, 7, 11, 15, 17, 17,
+ 15, 14, 13, 13, 12, 12, 9, 8, 7, 5, 3, 2, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5, 5, 7, 11, 15, 17, 17,
+ 16, 14, 14, 13, 13, 12, 9, 8, 7, 5, 4, 2, 1, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3, 3, 5, 5, 6, 8, 12, 16, 17, 17,
+ 16, 15, 14, 14, 13, 12, 10, 9, 7, 6, 5, 4, 2, 1, 0, 0, 0, 1, 2, 3, 3, 3, 3, 5, 5, 6, 8, 9, 13, 17, 17, 17,
+ 16, 15, 15, 14, 13, 12, 10, 10, 8, 7, 6, 5, 4, 3, 2, 0, 1, 1, 2, 3, 3, 4, 5, 5, 5, 6, 8, 11, 14, 17, 17, 17,
+ 16, 16, 15, 15, 14, 13, 11, 10, 9, 8, 7, 6, 5, 5, 3, 2, 1, 1, 2, 3, 5, 5, 5, 5, 6, 7, 10, 13, 16, 17, 17, 17,
+ 17, 17, 15, 15, 14, 13, 12, 11, 10, 9, 9, 7, 6, 6, 5, 3, 2, 2, 3, 4, 5, 5, 5, 6, 6, 9, 11, 14, 16, 18, 18, 18,
+ 17, 17, 16, 15, 15, 14, 13, 12, 11, 10, 10, 9, 7, 7, 5, 4, 3, 2, 3, 4, 5, 5, 5, 6, 9, 10, 13, 15, 18, 18, 18, 18,
+ 18, 17, 17, 16, 15, 15, 14, 12, 11, 11, 11, 9, 8, 7, 6, 5, 4, 3, 3, 4, 5, 5, 6, 6, 9, 11, 13, 17, 18, 18, 18, 18,
+ 20, 19, 18, 18, 16, 16, 14, 13, 12, 11, 11, 10, 9, 8, 7, 5, 5, 4, 4, 5, 6, 6, 6, 7, 10, 11, 15, 18, 18, 18, 18, 18,
+ 20, 20, 19, 18, 18, 17, 15, 14, 13, 13, 12, 11, 10, 9, 8, 6, 6, 5, 5, 6, 6, 7, 7, 8, 11, 12, 16, 18, 18, 18, 18, 18,
+ 22, 21, 20, 19, 18, 17, 17, 17, 15, 15, 14, 13, 11, 10, 8, 7, 6, 6, 6, 6, 7, 7, 8, 8, 11, 14, 17, 18, 18, 18, 18, 17,
+ 22, 22, 21, 20, 19, 18, 17, 17, 17, 16, 16, 15, 14, 12, 10, 8, 7, 6, 6, 7, 7, 8, 8, 10, 13, 16, 18, 18, 18, 18, 18, 16,
+ 22, 22, 22, 21, 20, 19, 18, 17, 17, 17, 16, 16, 15, 15, 12, 10, 7, 6, 6, 7, 8, 8, 9, 11, 14, 16, 18, 18, 18, 18, 17, 16,
+ ]
+ b: [
+ 13, 13, 13, 11, 10, 9, 9, 8, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 9, 9, 4, 4, 7, 9, 9, 9, 10, 10,
+ 13, 13, 12, 11, 10, 9, 9, 8, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 7, 9, 9, 10, 10, 10,
+ 13, 13, 12, 11, 10, 10, 9, 9, 7, 7, 6, 6, 6, 5, 5, 5, 5, 6, 6, 6, 5, 5, 5, 4, 3, 3, 6, 9, 10, 11, 11, 11,
+ 13, 13, 12, 11, 10, 10, 9, 9, 8, 7, 6, 6, 5, 5, 4, 4, 4, 4, 5, 5, 5, 5, 5, 3, 2, 3, 5, 9, 10, 11, 11, 13,
+ 13, 13, 12, 11, 11, 10, 9, 9, 8, 7, 6, 5, 5, 4, 4, 3, 3, 4, 4, 5, 5, 5, 3, 2, 2, 3, 5, 8, 11, 11, 12, 13,
+ 13, 12, 12, 11, 11, 10, 9, 8, 8, 7, 5, 5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3, 7, 11, 11, 11, 11,
+ 13, 13, 12, 11, 11, 10, 9, 8, 8, 7, 5, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 3, 2, 2, 2, 2, 3, 6, 11, 11, 11, 11,
+ 13, 13, 12, 11, 11, 11, 9, 8, 7, 6, 5, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 5, 10, 12, 12, 11,
+ 13, 13, 13, 11, 11, 11, 10, 8, 7, 6, 5, 4, 3, 3, 3, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 4, 9, 12, 12, 12,
+ 14, 13, 13, 12, 11, 11, 10, 8, 7, 6, 4, 4, 3, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 4, 9, 12, 13, 14,
+ 13, 13, 12, 12, 11, 11, 10, 8, 7, 6, 4, 3, 2, 2, 2, 1, 2, 3, 3, 2, 2, 1, 2, 2, 2, 2, 2, 4, 8, 12, 14, 14,
+ 13, 13, 12, 12, 11, 11, 11, 8, 7, 6, 5, 3, 2, 2, 1, 1, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 3, 4, 8, 12, 14, 14,
+ 13, 13, 12, 12, 12, 11, 11, 8, 7, 6, 5, 3, 2, 1, 1, 1, 1, 2, 3, 3, 2, 2, 2, 2, 2, 3, 3, 4, 8, 12, 14, 14,
+ 13, 13, 13, 12, 12, 11, 11, 8, 6, 6, 5, 3, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 4, 7, 12, 13, 14,
+ 13, 13, 13, 12, 12, 11, 10, 8, 6, 5, 4, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 7, 12, 14, 14,
+ 14, 14, 13, 13, 13, 11, 10, 8, 7, 5, 4, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 4, 4, 5, 8, 14, 16, 16,
+ 14, 14, 13, 13, 13, 12, 11, 9, 7, 5, 4, 2, 1, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4, 4, 6, 10, 15, 16, 16,
+ 15, 14, 14, 14, 15, 15, 11, 9, 8, 5, 4, 2, 1, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 7, 11, 15, 16, 17,
+ 15, 15, 14, 14, 15, 15, 12, 10, 9, 6, 4, 2, 1, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 5, 5, 6, 7, 11, 15, 16, 16,
+ 15, 15, 15, 15, 15, 15, 12, 10, 9, 6, 5, 2, 1, 0, 0, 0, 1, 1, 1, 3, 3, 3, 4, 4, 5, 6, 6, 7, 11, 15, 16, 17,
+ 16, 16, 15, 16, 15, 15, 12, 11, 10, 7, 5, 3, 2, 1, 0, 0, 1, 1, 2, 3, 4, 4, 4, 5, 6, 6, 7, 9, 12, 16, 16, 16,
+ 16, 16, 16, 16, 16, 15, 13, 12, 10, 9, 6, 5, 3, 2, 1, 1, 1, 2, 3, 4, 4, 4, 5, 6, 6, 7, 8, 9, 14, 16, 17, 16,
+ 16, 16, 16, 16, 16, 15, 14, 12, 11, 10, 8, 6, 5, 4, 2, 1, 1, 2, 3, 4, 5, 5, 6, 6, 6, 7, 9, 12, 15, 16, 16, 16,
+ 17, 17, 18, 17, 16, 16, 14, 13, 12, 11, 9, 8, 6, 5, 4, 2, 2, 2, 3, 4, 6, 6, 6, 6, 7, 8, 11, 14, 16, 17, 16, 16,
+ 17, 17, 18, 18, 17, 16, 16, 14, 13, 12, 11, 9, 8, 7, 5, 4, 2, 3, 4, 6, 6, 6, 6, 7, 8, 10, 12, 15, 17, 17, 17, 16,
+ 18, 18, 18, 18, 18, 17, 16, 15, 14, 13, 12, 11, 9, 8, 6, 5, 4, 4, 4, 5, 6, 6, 7, 7, 10, 11, 14, 16, 17, 17, 17, 17,
+ 18, 18, 19, 19, 19, 18, 17, 16, 15, 14, 14, 11, 10, 9, 7, 6, 6, 5, 5, 5, 6, 7, 7, 7, 10, 12, 14, 17, 17, 17, 17, 17,
+ 20, 20, 20, 20, 20, 19, 18, 17, 16, 15, 14, 13, 11, 10, 9, 7, 6, 6, 6, 6, 7, 7, 7, 8, 11, 12, 15, 18, 18, 17, 17, 16,
+ 22, 21, 21, 21, 21, 21, 20, 19, 17, 16, 16, 14, 13, 11, 10, 8, 7, 7, 7, 7, 8, 8, 8, 9, 11, 13, 17, 18, 18, 17, 16, 15,
+ 23, 22, 22, 22, 22, 21, 21, 20, 19, 19, 18, 16, 14, 13, 11, 9, 8, 8, 8, 8, 8, 8, 9, 10, 12, 15, 18, 18, 18, 17, 16, 14,
+ 23, 24, 24, 23, 23, 22, 22, 21, 21, 20, 20, 19, 18, 15, 13, 11, 9, 8, 8, 8, 9, 9, 10, 11, 13, 16, 18, 18, 17, 17, 15, 14,
+ 24, 24, 24, 24, 24, 23, 22, 22, 22, 22, 20, 20, 19, 18, 15, 13, 10, 9, 9, 9, 9, 10, 11, 12, 15, 17, 18, 18, 17, 16, 15, 13,
+ ]
+...
diff --git a/src/ipa/mali-c55/data/meson.build b/src/ipa/mali-c55/data/meson.build
new file mode 100644
index 00000000..8a5fdd36
--- /dev/null
+++ b/src/ipa/mali-c55/data/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'imx415.yaml',
+ 'uncalibrated.yaml'
+])
+
+install_data(conf_files,
+ install_dir : ipa_data_dir / 'mali-c55')
diff --git a/src/ipa/mali-c55/data/uncalibrated.yaml b/src/ipa/mali-c55/data/uncalibrated.yaml
new file mode 100644
index 00000000..6dcc0295
--- /dev/null
+++ b/src/ipa/mali-c55/data/uncalibrated.yaml
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+...
diff --git a/src/ipa/mali-c55/ipa_context.cpp b/src/ipa/mali-c55/ipa_context.cpp
new file mode 100644
index 00000000..99f76ecd
--- /dev/null
+++ b/src/ipa/mali-c55/ipa_context.cpp
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * ipa_context.cpp - MaliC55 IPA Context
+ */
+
+#include "ipa_context.h"
+
+/**
+ * \file ipa_context.h
+ * \brief Context and state information shared between the algorithms
+ */
+
+namespace libcamera::ipa::mali_c55 {
+
+/**
+ * \struct IPASessionConfiguration
+ * \brief Session configuration for the IPA module
+ *
+ * The session configuration contains all IPA configuration parameters that
+ * remain constant during the capture session, from IPA module start to stop.
+ * It is typically set during the configure() operation of the IPA module, but
+ * may also be updated in the start() operation.
+ */
+
+/**
+ * \struct IPAActiveState
+ * \brief Active state for algorithms
+ *
+ * The active state contains all algorithm-specific data that needs to be
+ * maintained by algorithms across frames. Unlike the session configuration,
+ * the active state is mutable and constantly updated by algorithms. The active
+ * state is accessible through the IPAContext structure.
+ *
+ * The active state stores two distinct categories of information:
+ *
+ * - The consolidated value of all algorithm controls. Requests passed to
+ * the queueRequest() function store values for controls that the
+ * application wants to modify for that particular frame, and the
+ * queueRequest() function updates the active state with those values.
+ * The active state thus contains a consolidated view of the value of all
+ * controls handled by the algorithm.
+ *
+ * - The value of parameters computed by the algorithm when running in auto
+ * mode. Algorithms running in auto mode compute new parameters every
+ * time statistics buffers are received (either synchronously, or
+ * possibly in a background thread). The latest computed value of those
+ * parameters is stored in the active state in the process() function.
+ *
+ * Each of the members in the active state belongs to a specific algorithm. A
+ * member may be read by any algorithm, but shall only be written by its owner.
+ */
+
+/**
+ * \struct IPAFrameContext
+ * \brief Per-frame context for algorithms
+ *
+ * The frame context stores two distinct categories of information:
+ *
+ * - The value of the controls to be applied to the frame. These values are
+ * typically set in the queueRequest() function, from the consolidated
+ * control values stored in the active state. The frame context thus stores
+ * values for all controls related to the algorithm, not limited to the
+ * controls specified in the corresponding request, but consolidated from all
+ * requests that have been queued so far.
+ *
+ * For controls that can be set manually or computed by an algorithm
+ * (depending on the algorithm operation mode), such as for instance the
+ * colour gains for the AWB algorithm, the control value will be stored in
+ * the frame context in the queueRequest() function only when operating in
+ * manual mode. When operating in auto mode, the values are computed by the
+ * algorithm in process(), stored in the active state, and copied to the
+ * frame context in prepare(), just before being stored in the ISP parameters
+ * buffer.
+ *
+ * The queueRequest() function can also store ancillary data in the frame
+ * context, such as flags to indicate if (and what) control values have
+ * changed compared to the previous request.
+ *
+ * - Status information computed by the algorithm for a frame. For instance,
+ * the colour temperature estimated by the AWB algorithm from ISP statistics
+ * calculated on a frame is stored in the frame context for that frame in
+ * the process() function.
+ */
+
+/**
+ * \struct IPAContext
+ * \brief Global IPA context data shared between all algorithms
+ *
+ * \var IPAContext::configuration
+ * \brief The IPA session configuration, immutable during the session
+ *
+ * \var IPAContext::activeState
+ * \brief The IPA active state, storing the latest state for all algorithms
+ *
+ * \var IPAContext::frameContexts
+ * \brief Ring buffer of per-frame contexts
+ */
+
+} /* namespace libcamera::ipa::mali_c55 */
diff --git a/src/ipa/mali-c55/ipa_context.h b/src/ipa/mali-c55/ipa_context.h
new file mode 100644
index 00000000..5e3e2fbd
--- /dev/null
+++ b/src/ipa/mali-c55/ipa_context.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * ipa_context.h - Mali-C55 IPA Context
+ */
+
+#pragma once
+
+#include <libcamera/base/utils.h>
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+#include <libipa/fc_queue.h>
+
+namespace libcamera {
+
+namespace ipa::mali_c55 {
+
+struct IPASessionConfiguration {
+ struct {
+ utils::Duration minShutterSpeed;
+ utils::Duration maxShutterSpeed;
+ uint32_t defaultExposure;
+ double minAnalogueGain;
+ double maxAnalogueGain;
+ } agc;
+
+ struct {
+ BayerFormat::Order bayerOrder;
+ utils::Duration lineDuration;
+ uint32_t blackLevel;
+ } sensor;
+};
+
+struct IPAActiveState {
+ struct {
+ struct {
+ uint32_t exposure;
+ double sensorGain;
+ double ispGain;
+ } automatic;
+ struct {
+ uint32_t exposure;
+ double sensorGain;
+ double ispGain;
+ } manual;
+ bool autoEnabled;
+ uint32_t constraintMode;
+ uint32_t exposureMode;
+ uint32_t temperatureK;
+ } agc;
+
+ struct {
+ double rGain;
+ double bGain;
+ } awb;
+};
+
+struct IPAFrameContext : public FrameContext {
+ struct {
+ uint32_t exposure;
+ double sensorGain;
+ double ispGain;
+ } agc;
+
+ struct {
+ double rGain;
+ double bGain;
+ } awb;
+};
+
+struct IPAContext {
+ IPAContext(unsigned int frameContextSize)
+ : frameContexts(frameContextSize)
+ {
+ }
+
+ IPASessionConfiguration configuration;
+ IPAActiveState activeState;
+
+ FCQueue<IPAFrameContext> frameContexts;
+
+ ControlInfoMap::Map ctrlMap;
+};
+
+} /* namespace ipa::mali_c55 */
+
+} /* namespace libcamera*/
diff --git a/src/ipa/mali-c55/mali-c55.cpp b/src/ipa/mali-c55/mali-c55.cpp
new file mode 100644
index 00000000..c6941a95
--- /dev/null
+++ b/src/ipa/mali-c55/mali-c55.cpp
@@ -0,0 +1,399 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Ideas on Board Oy
+ *
+ * mali-c55.cpp - Mali-C55 ISP image processing algorithms
+ */
+
+#include <map>
+#include <string.h>
+#include <vector>
+
+#include <linux/mali-c55-config.h>
+#include <linux/v4l2-controls.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/ipa/ipa_interface.h>
+#include <libcamera/ipa/ipa_module_info.h>
+#include <libcamera/ipa/mali-c55_ipa_interface.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "algorithms/algorithm.h"
+#include "libipa/camera_sensor_helper.h"
+
+#include "ipa_context.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPAMaliC55)
+
+using namespace std::literals::chrono_literals;
+
+namespace ipa::mali_c55 {
+
+/* Maximum number of frame contexts to be held */
+static constexpr uint32_t kMaxFrameContexts = 16;
+
+class IPAMaliC55 : public IPAMaliC55Interface, public Module
+{
+public:
+ IPAMaliC55();
+
+ int init(const IPASettings &settings, const IPAConfigInfo &ipaConfig,
+ ControlInfoMap *ipaControls) override;
+ int start() override;
+ void stop() override;
+ int configure(const IPAConfigInfo &ipaConfig, uint8_t bayerOrder,
+ ControlInfoMap *ipaControls) override;
+ void mapBuffers(const std::vector<IPABuffer> &buffers, bool readOnly) override;
+ void unmapBuffers(const std::vector<IPABuffer> &buffers) override;
+ void queueRequest(const uint32_t request, const ControlList &controls) override;
+ void fillParams(unsigned int request, uint32_t bufferId) override;
+ void processStats(unsigned int request, unsigned int bufferId,
+ const ControlList &sensorControls) override;
+
+protected:
+ std::string logPrefix() const override;
+
+private:
+ void updateSessionConfiguration(const IPACameraSensorInfo &info,
+ const ControlInfoMap &sensorControls,
+ BayerFormat::Order bayerOrder);
+ void updateControls(const IPACameraSensorInfo &sensorInfo,
+ const ControlInfoMap &sensorControls,
+ ControlInfoMap *ipaControls);
+ void setControls();
+
+ std::map<unsigned int, MappedFrameBuffer> buffers_;
+
+ ControlInfoMap sensorControls_;
+
+ /* Interface to the Camera Helper */
+ std::unique_ptr<CameraSensorHelper> camHelper_;
+
+ /* Local parameter storage */
+ struct IPAContext context_;
+};
+
+namespace {
+
+} /* namespace */
+
+IPAMaliC55::IPAMaliC55()
+ : context_(kMaxFrameContexts)
+{
+}
+
+std::string IPAMaliC55::logPrefix() const
+{
+ return "mali-c55";
+}
+
+int IPAMaliC55::init(const IPASettings &settings, const IPAConfigInfo &ipaConfig,
+ ControlInfoMap *ipaControls)
+{
+ camHelper_ = CameraSensorHelperFactoryBase::create(settings.sensorModel);
+ if (!camHelper_) {
+ LOG(IPAMaliC55, Error)
+ << "Failed to create camera sensor helper for "
+ << settings.sensorModel;
+ return -ENODEV;
+ }
+
+ File file(settings.configurationFile);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ int ret = file.error();
+ LOG(IPAMaliC55, Error)
+ << "Failed to open configuration file "
+ << settings.configurationFile << ": " << strerror(-ret);
+ return ret;
+ }
+
+ std::unique_ptr<libcamera::YamlObject> data = YamlParser::parse(file);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->contains("algorithms")) {
+ LOG(IPAMaliC55, Error)
+ << "Tuning file doesn't contain any algorithm";
+ return -EINVAL;
+ }
+
+ int ret = createAlgorithms(context_, (*data)["algorithms"]);
+ if (ret)
+ return ret;
+
+ updateControls(ipaConfig.sensorInfo, ipaConfig.sensorControls, ipaControls);
+
+ return 0;
+}
+
+void IPAMaliC55::setControls()
+{
+ IPAActiveState &activeState = context_.activeState;
+ uint32_t exposure;
+ uint32_t gain;
+
+ if (activeState.agc.autoEnabled) {
+ exposure = activeState.agc.automatic.exposure;
+ gain = camHelper_->gainCode(activeState.agc.automatic.sensorGain);
+ } else {
+ exposure = activeState.agc.manual.exposure;
+ gain = camHelper_->gainCode(activeState.agc.manual.sensorGain);
+ }
+
+ ControlList ctrls(sensorControls_);
+ ctrls.set(V4L2_CID_EXPOSURE, static_cast<int32_t>(exposure));
+ ctrls.set(V4L2_CID_ANALOGUE_GAIN, static_cast<int32_t>(gain));
+
+ setSensorControls.emit(ctrls);
+}
+
+int IPAMaliC55::start()
+{
+ return 0;
+}
+
+void IPAMaliC55::stop()
+{
+ context_.frameContexts.clear();
+}
+
+void IPAMaliC55::updateSessionConfiguration(const IPACameraSensorInfo &info,
+ const ControlInfoMap &sensorControls,
+ BayerFormat::Order bayerOrder)
+{
+ context_.configuration.sensor.bayerOrder = bayerOrder;
+
+ const ControlInfo &v4l2Exposure = sensorControls.find(V4L2_CID_EXPOSURE)->second;
+ int32_t minExposure = v4l2Exposure.min().get<int32_t>();
+ int32_t maxExposure = v4l2Exposure.max().get<int32_t>();
+ int32_t defExposure = v4l2Exposure.def().get<int32_t>();
+
+ const ControlInfo &v4l2Gain = sensorControls.find(V4L2_CID_ANALOGUE_GAIN)->second;
+ int32_t minGain = v4l2Gain.min().get<int32_t>();
+ int32_t maxGain = v4l2Gain.max().get<int32_t>();
+
+ /*
+ * When the AGC computes the new exposure values for a frame, it needs
+ * to know the limits for shutter speed and analogue gain.
+ * As it depends on the sensor, update it with the controls.
+ *
+ * \todo take VBLANK into account for maximum shutter speed
+ */
+ context_.configuration.sensor.lineDuration = info.minLineLength * 1.0s / info.pixelRate;
+ context_.configuration.agc.minShutterSpeed = minExposure * context_.configuration.sensor.lineDuration;
+ context_.configuration.agc.maxShutterSpeed = maxExposure * context_.configuration.sensor.lineDuration;
+ context_.configuration.agc.defaultExposure = defExposure;
+ context_.configuration.agc.minAnalogueGain = camHelper_->gain(minGain);
+ context_.configuration.agc.maxAnalogueGain = camHelper_->gain(maxGain);
+
+ if (camHelper_->blackLevel().has_value()) {
+ /*
+ * The black level from CameraSensorHelper is a 16-bit value.
+ * The Mali-C55 ISP expects 20-bit settings, so we shift it to
+ * the appropriate width
+ */
+ context_.configuration.sensor.blackLevel =
+ camHelper_->blackLevel().value() << 4;
+ }
+}
+
+void IPAMaliC55::updateControls(const IPACameraSensorInfo &sensorInfo,
+ const ControlInfoMap &sensorControls,
+ ControlInfoMap *ipaControls)
+{
+ ControlInfoMap::Map ctrlMap;
+
+ /*
+ * Compute the frame duration limits.
+ *
+ * The frame length is computed assuming a fixed line length combined
+ * with the vertical frame sizes.
+ */
+ const ControlInfo &v4l2HBlank = sensorControls.find(V4L2_CID_HBLANK)->second;
+ uint32_t hblank = v4l2HBlank.def().get<int32_t>();
+ uint32_t lineLength = sensorInfo.outputSize.width + hblank;
+
+ const ControlInfo &v4l2VBlank = sensorControls.find(V4L2_CID_VBLANK)->second;
+ std::array<uint32_t, 3> frameHeights{
+ v4l2VBlank.min().get<int32_t>() + sensorInfo.outputSize.height,
+ v4l2VBlank.max().get<int32_t>() + sensorInfo.outputSize.height,
+ v4l2VBlank.def().get<int32_t>() + sensorInfo.outputSize.height,
+ };
+
+ std::array<int64_t, 3> frameDurations;
+ for (unsigned int i = 0; i < frameHeights.size(); ++i) {
+ uint64_t frameSize = lineLength * frameHeights[i];
+ frameDurations[i] = frameSize / (sensorInfo.pixelRate / 1000000U);
+ }
+
+ ctrlMap[&controls::FrameDurationLimits] = ControlInfo(frameDurations[0],
+ frameDurations[1],
+ frameDurations[2]);
+
+ /*
+ * Compute exposure time limits from the V4L2_CID_EXPOSURE control
+ * limits and the line duration.
+ */
+ double lineDuration = sensorInfo.minLineLength / sensorInfo.pixelRate;
+
+ const ControlInfo &v4l2Exposure = sensorControls.find(V4L2_CID_EXPOSURE)->second;
+ int32_t minExposure = v4l2Exposure.min().get<int32_t>() * lineDuration;
+ int32_t maxExposure = v4l2Exposure.max().get<int32_t>() * lineDuration;
+ int32_t defExposure = v4l2Exposure.def().get<int32_t>() * lineDuration;
+ ctrlMap[&controls::ExposureTime] = ControlInfo(minExposure, maxExposure, defExposure);
+
+ /* Compute the analogue gain limits. */
+ const ControlInfo &v4l2Gain = sensorControls.find(V4L2_CID_ANALOGUE_GAIN)->second;
+ float minGain = camHelper_->gain(v4l2Gain.min().get<int32_t>());
+ float maxGain = camHelper_->gain(v4l2Gain.max().get<int32_t>());
+ float defGain = camHelper_->gain(v4l2Gain.def().get<int32_t>());
+ ctrlMap[&controls::AnalogueGain] = ControlInfo(minGain, maxGain, defGain);
+
+ /*
+ * Merge in any controls that we support either statically or from the
+ * algorithms.
+ */
+ ctrlMap.merge(context_.ctrlMap);
+
+ *ipaControls = ControlInfoMap(std::move(ctrlMap), controls::controls);
+}
+
+int IPAMaliC55::configure(const IPAConfigInfo &ipaConfig, uint8_t bayerOrder,
+ ControlInfoMap *ipaControls)
+{
+ sensorControls_ = ipaConfig.sensorControls;
+
+ /* Clear the IPA context before the streaming session. */
+ context_.configuration = {};
+ context_.activeState = {};
+ context_.frameContexts.clear();
+
+ const IPACameraSensorInfo &info = ipaConfig.sensorInfo;
+
+ updateSessionConfiguration(info, ipaConfig.sensorControls,
+ static_cast<BayerFormat::Order>(bayerOrder));
+ updateControls(info, ipaConfig.sensorControls, ipaControls);
+
+ for (auto const &a : algorithms()) {
+ Algorithm *algo = static_cast<Algorithm *>(a.get());
+
+ int ret = algo->configure(context_, info);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void IPAMaliC55::mapBuffers(const std::vector<IPABuffer> &buffers, bool readOnly)
+{
+ for (const IPABuffer &buffer : buffers) {
+ const FrameBuffer fb(buffer.planes);
+ buffers_.emplace(
+ buffer.id,
+ MappedFrameBuffer(
+ &fb,
+ readOnly ? MappedFrameBuffer::MapFlag::Read
+ : MappedFrameBuffer::MapFlag::ReadWrite));
+ }
+}
+
+void IPAMaliC55::unmapBuffers(const std::vector<IPABuffer> &buffers)
+{
+ for (const IPABuffer &buffer : buffers) {
+ auto it = buffers_.find(buffer.id);
+ if (it == buffers_.end())
+ continue;
+
+ buffers_.erase(buffer.id);
+ }
+}
+
+void IPAMaliC55::queueRequest(const uint32_t request, const ControlList &controls)
+{
+ IPAFrameContext &frameContext = context_.frameContexts.alloc(request);
+
+ for (auto const &a : algorithms()) {
+ Algorithm *algo = static_cast<Algorithm *>(a.get());
+
+ algo->queueRequest(context_, request, frameContext, controls);
+ }
+}
+
+void IPAMaliC55::fillParams(unsigned int request,
+ [[maybe_unused]] uint32_t bufferId)
+{
+ struct mali_c55_params_buffer *params;
+ IPAFrameContext &frameContext = context_.frameContexts.get(request);
+
+ params = reinterpret_cast<mali_c55_params_buffer *>(
+ buffers_.at(bufferId).planes()[0].data());
+ memset(params, 0, sizeof(mali_c55_params_buffer));
+
+ params->version = MALI_C55_PARAM_BUFFER_V1;
+
+ for (auto const &algo : algorithms()) {
+ algo->prepare(context_, request, frameContext, params);
+
+ ASSERT(params->total_size <= MALI_C55_PARAMS_MAX_SIZE);
+ }
+
+ paramsComputed.emit(request);
+}
+
+void IPAMaliC55::processStats(unsigned int request, unsigned int bufferId,
+ const ControlList &sensorControls)
+{
+ IPAFrameContext &frameContext = context_.frameContexts.get(request);
+ const mali_c55_stats_buffer *stats = nullptr;
+
+ stats = reinterpret_cast<mali_c55_stats_buffer *>(
+ buffers_.at(bufferId).planes()[0].data());
+
+ frameContext.agc.exposure =
+ sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
+ frameContext.agc.sensorGain =
+ camHelper_->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>());
+
+ ControlList metadata(controls::controls);
+
+ for (auto const &a : algorithms()) {
+ Algorithm *algo = static_cast<Algorithm *>(a.get());
+
+ algo->process(context_, request, frameContext, stats, metadata);
+ }
+
+ setControls();
+
+ statsProcessed.emit(request, metadata);
+}
+
+} /* namespace ipa::mali_c55 */
+
+/*
+ * External IPA module interface
+ */
+extern "C" {
+const struct IPAModuleInfo ipaModuleInfo = {
+ IPA_MODULE_API_VERSION,
+ 1,
+ "mali-c55",
+ "mali-c55",
+};
+
+IPAInterface *ipaCreate()
+{
+ return new ipa::mali_c55::IPAMaliC55();
+}
+
+} /* extern "C" */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/meson.build b/src/ipa/mali-c55/meson.build
new file mode 100644
index 00000000..864d90ec
--- /dev/null
+++ b/src/ipa/mali-c55/meson.build
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: CC0-1.0
+
+subdir('algorithms')
+subdir('data')
+
+ipa_name = 'ipa_mali_c55'
+
+mali_c55_ipa_sources = files([
+ 'ipa_context.cpp',
+ 'mali-c55.cpp'
+])
+
+mali_c55_ipa_sources += mali_c55_ipa_algorithms
+
+mod = shared_module(ipa_name,
+ mali_c55_ipa_sources,
+ name_prefix : '',
+ include_directories : [ipa_includes, libipa_includes],
+ dependencies : libcamera_private,
+ link_with : libipa,
+ install : true,
+ install_dir : ipa_install_dir)
+
+if ipa_sign_module
+ custom_target(ipa_name + '.so.sign',
+ input : mod,
+ output : ipa_name + '.so.sign',
+ command : [ipa_sign, ipa_priv_key, '@INPUT@', '@OUTPUT@'],
+ install : false,
+ build_by_default : true)
+endif
+
+ipa_names += ipa_name
diff --git a/src/ipa/mali-c55/module.h b/src/ipa/mali-c55/module.h
new file mode 100644
index 00000000..1d85ec1f
--- /dev/null
+++ b/src/ipa/mali-c55/module.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * module.h - Mali-C55 IPA Module
+ */
+
+#pragma once
+
+#include <linux/mali-c55-config.h>
+
+#include <libcamera/ipa/mali-c55_ipa_interface.h>
+
+#include <libipa/module.h>
+
+#include "ipa_context.h"
+
+namespace libcamera {
+
+namespace ipa::mali_c55 {
+
+using Module = ipa::Module<IPAContext, IPAFrameContext, IPACameraSensorInfo,
+ mali_c55_params_buffer, mali_c55_stats_buffer>;
+
+} /* namespace ipa::mali_c55 */
+
+} /* namespace libcamera*/
diff --git a/src/ipa/meson.build b/src/ipa/meson.build
index 48793e07..0ad4631d 100644
--- a/src/ipa/meson.build
+++ b/src/ipa/meson.build
@@ -41,6 +41,9 @@ ipa_names = []
subdirs = []
foreach pipeline : pipelines
+ # The current implementation expects the IPA module name to match the
+ # pipeline name.
+ # \todo Make the IPA naming scheme more flexible.
if not ipa_modules.contains(pipeline)
continue
endif
diff --git a/src/ipa/rkisp1/algorithms/agc.cpp b/src/ipa/rkisp1/algorithms/agc.cpp
index 47a6f7b2..5a3ba013 100644
--- a/src/ipa/rkisp1/algorithms/agc.cpp
+++ b/src/ipa/rkisp1/algorithms/agc.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * agc.cpp - AGC/AEC mean-based control algorithm
+ * AGC/AEC mean-based control algorithm
*/
#include "agc.h"
@@ -10,6 +10,8 @@
#include <algorithm>
#include <chrono>
#include <cmath>
+#include <tuple>
+#include <vector>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
@@ -17,6 +19,8 @@
#include <libcamera/control_ids.h>
#include <libcamera/ipa/core_ipa_interface.h>
+#include "libcamera/internal/yaml_parser.h"
+
#include "libipa/histogram.h"
/**
@@ -36,35 +40,130 @@ namespace ipa::rkisp1::algorithms {
LOG_DEFINE_CATEGORY(RkISP1Agc)
-/* Minimum limit for analogue gain value */
-static constexpr double kMinAnalogueGain = 1.0;
+int Agc::parseMeteringModes(IPAContext &context, const YamlObject &tuningData)
+{
+ if (!tuningData.isDictionary())
+ LOG(RkISP1Agc, Warning)
+ << "'AeMeteringMode' parameter not found in tuning file";
+
+ for (const auto &[key, value] : tuningData.asDict()) {
+ if (controls::AeMeteringModeNameValueMap.find(key) ==
+ controls::AeMeteringModeNameValueMap.end()) {
+ LOG(RkISP1Agc, Warning)
+ << "Skipping unknown metering mode '" << key << "'";
+ continue;
+ }
-/* \todo Honour the FrameDurationLimits control instead of hardcoding a limit */
-static constexpr utils::Duration kMaxShutterSpeed = 60ms;
+ std::vector<uint8_t> weights =
+ value.getList<uint8_t>().value_or(std::vector<uint8_t>{});
+ if (weights.size() != context.hw->numHistogramWeights) {
+ LOG(RkISP1Agc, Warning)
+ << "Failed to read metering mode'" << key << "'";
+ continue;
+ }
-/* Number of frames to wait before calculating stats on minimum exposure */
-static constexpr uint32_t kNumStartupFrames = 10;
+ meteringModes_[controls::AeMeteringModeNameValueMap.at(key)] = weights;
+ }
-/* Target value to reach for the top 2% of the histogram */
-static constexpr double kEvGainTarget = 0.5;
+ if (meteringModes_.empty()) {
+ LOG(RkISP1Agc, Warning)
+ << "No metering modes read from tuning file; defaulting to matrix";
+ int32_t meteringModeId = controls::AeMeteringModeNameValueMap.at("MeteringMatrix");
+ std::vector<uint8_t> weights(context.hw->numHistogramWeights, 1);
-/*
- * Relative luminance target.
- *
- * It's a number that's chosen so that, when the camera points at a grey
- * target, the resulting image brightness is considered right.
- *
- * \todo Why is the value different between IPU3 and RkISP1 ?
- */
-static constexpr double kRelativeLuminanceTarget = 0.4;
+ meteringModes_[meteringModeId] = weights;
+ }
+
+ std::vector<ControlValue> meteringModes;
+ std::vector<int> meteringModeKeys = utils::map_keys(meteringModes_);
+ std::transform(meteringModeKeys.begin(), meteringModeKeys.end(),
+ std::back_inserter(meteringModes),
+ [](int x) { return ControlValue(x); });
+ context.ctrlMap[&controls::AeMeteringMode] = ControlInfo(meteringModes);
+
+ return 0;
+}
+
+uint8_t Agc::computeHistogramPredivider(const Size &size,
+ enum rkisp1_cif_isp_histogram_mode mode)
+{
+ /*
+ * The maximum number of pixels that could potentially be in one bin is
+ * if all the pixels of the image are in it, multiplied by 3 for the
+ * three color channels. The counter for each bin is 16 bits wide, so
+ * `factor` thus contains the number of times we'd wrap around. This is
+ * obviously the number of pixels that we need to skip to make sure
+ * that we don't wrap around, but we compute the square root of it
+ * instead, as the skip that we need to program is for both the x and y
+ * directions.
+ *
+ * Even though it looks like dividing into a counter of 65536 would
+ * overflow by 1, this is apparently fine according to the hardware
+ * documentation, and this successfully gets the expected documented
+ * predivider size for cases where:
+ * (width / predivider) * (height / predivider) * 3 == 65536.
+ *
+ * There's a bit of extra rounding math to make sure the rounding goes
+ * the correct direction so that the square of the step is big enough
+ * to encompass the `factor` number of pixels that we need to skip.
+ *
+ * \todo Take into account weights. That is, if the weights are low
+ * enough we can potentially reduce the predivider to increase
+ * precision. This needs some investigation however, as this hardware
+ * behavior is undocumented and is only an educated guess.
+ */
+ int count = mode == RKISP1_CIF_ISP_HISTOGRAM_MODE_RGB_COMBINED ? 3 : 1;
+ double factor = size.width * size.height * count / 65536.0;
+ double root = std::sqrt(factor);
+ uint8_t predivider = static_cast<uint8_t>(std::ceil(root));
+
+ return std::clamp<uint8_t>(predivider, 3, 127);
+}
Agc::Agc()
- : frameCount_(0), filteredExposure_(0s)
{
supportsRaw_ = true;
}
/**
+ * \brief Initialise the AGC algorithm from tuning files
+ * \param[in] context The shared IPA context
+ * \param[in] tuningData The YamlObject containing Agc tuning data
+ *
+ * This function calls the base class' tuningData parsers to discover which
+ * control values are supported.
+ *
+ * \return 0 on success or errors from the base class
+ */
+int Agc::init(IPAContext &context, const YamlObject &tuningData)
+{
+ int ret;
+
+ ret = parseTuningData(tuningData);
+ if (ret)
+ return ret;
+
+ const YamlObject &yamlMeteringModes = tuningData["AeMeteringMode"];
+ ret = parseMeteringModes(context, yamlMeteringModes);
+ if (ret)
+ return ret;
+
+ context.ctrlMap[&controls::ExposureTimeMode] =
+ ControlInfo({ { ControlValue(controls::ExposureTimeModeAuto),
+ ControlValue(controls::ExposureTimeModeManual) } },
+ ControlValue(controls::ExposureTimeModeAuto));
+ context.ctrlMap[&controls::AnalogueGainMode] =
+ ControlInfo({ { ControlValue(controls::AnalogueGainModeAuto),
+ ControlValue(controls::AnalogueGainModeManual) } },
+ ControlValue(controls::AnalogueGainModeAuto));
+ /* \todo Move this to the Camera class */
+ context.ctrlMap[&controls::AeEnable] = ControlInfo(false, true, true);
+ context.ctrlMap.merge(controls());
+
+ return 0;
+}
+
+/**
* \brief Configure the AGC given a configInfo
* \param[in] context The shared IPA context
* \param[in] configInfo The IPA configuration data
@@ -79,7 +178,20 @@ int Agc::configure(IPAContext &context, const IPACameraSensorInfo &configInfo)
10ms / context.configuration.sensor.lineDuration;
context.activeState.agc.manual.gain = context.activeState.agc.automatic.gain;
context.activeState.agc.manual.exposure = context.activeState.agc.automatic.exposure;
- context.activeState.agc.autoEnabled = !context.configuration.raw;
+ context.activeState.agc.autoExposureEnabled = !context.configuration.raw;
+ context.activeState.agc.autoGainEnabled = !context.configuration.raw;
+
+ context.activeState.agc.constraintMode =
+ static_cast<controls::AeConstraintModeEnum>(constraintModes().begin()->first);
+ context.activeState.agc.exposureMode =
+ static_cast<controls::AeExposureModeEnum>(exposureModeHelpers().begin()->first);
+ context.activeState.agc.meteringMode =
+ static_cast<controls::AeMeteringModeEnum>(meteringModes_.begin()->first);
+
+ /* Limit the frame duration to match current initialisation */
+ ControlInfo &frameDurationLimits = context.ctrlMap[&controls::FrameDurationLimits];
+ context.activeState.agc.minFrameDuration = std::chrono::microseconds(frameDurationLimits.min().get<int64_t>());
+ context.activeState.agc.maxFrameDuration = std::chrono::microseconds(frameDurationLimits.max().get<int64_t>());
/*
* Define the measurement window for AGC as a centered rectangle
@@ -90,11 +202,13 @@ int Agc::configure(IPAContext &context, const IPACameraSensorInfo &configInfo)
context.configuration.agc.measureWindow.h_size = 3 * configInfo.outputSize.width / 4;
context.configuration.agc.measureWindow.v_size = 3 * configInfo.outputSize.height / 4;
- /*
- * \todo Use the upcoming per-frame context API that will provide a
- * frame index
- */
- frameCount_ = 0;
+ setLimits(context.configuration.sensor.minExposureTime,
+ context.configuration.sensor.maxExposureTime,
+ context.configuration.sensor.minAnalogueGain,
+ context.configuration.sensor.maxAnalogueGain);
+
+ resetFrameCount();
+
return 0;
}
@@ -109,18 +223,47 @@ void Agc::queueRequest(IPAContext &context,
auto &agc = context.activeState.agc;
if (!context.configuration.raw) {
- const auto &agcEnable = controls.get(controls::AeEnable);
- if (agcEnable && *agcEnable != agc.autoEnabled) {
- agc.autoEnabled = *agcEnable;
+ const auto &aeEnable = controls.get(controls::ExposureTimeMode);
+ if (aeEnable &&
+ (*aeEnable == controls::ExposureTimeModeAuto) != agc.autoExposureEnabled) {
+ agc.autoExposureEnabled = (*aeEnable == controls::ExposureTimeModeAuto);
+
+ LOG(RkISP1Agc, Debug)
+ << (agc.autoExposureEnabled ? "Enabling" : "Disabling")
+ << " AGC (exposure)";
+
+ /*
+ * If we go from auto -> manual with no manual control
+ * set, use the last computed value, which we don't
+ * know until prepare() so save this information.
+ *
+ * \todo Check the previous frame at prepare() time
+ * instead of saving a flag here
+ */
+ if (!agc.autoExposureEnabled && !controls.get(controls::ExposureTime))
+ frameContext.agc.autoExposureModeChange = true;
+ }
+
+ const auto &agEnable = controls.get(controls::AnalogueGainMode);
+ if (agEnable &&
+ (*agEnable == controls::AnalogueGainModeAuto) != agc.autoGainEnabled) {
+ agc.autoGainEnabled = (*agEnable == controls::AnalogueGainModeAuto);
LOG(RkISP1Agc, Debug)
- << (agc.autoEnabled ? "Enabling" : "Disabling")
- << " AGC";
+ << (agc.autoGainEnabled ? "Enabling" : "Disabling")
+ << " AGC (gain)";
+ /*
+ * If we go from auto -> manual with no manual control
+ * set, use the last computed value, which we don't
+ * know until prepare() so save this information.
+ */
+ if (!agc.autoGainEnabled && !controls.get(controls::AnalogueGain))
+ frameContext.agc.autoGainModeChange = true;
}
}
const auto &exposure = controls.get(controls::ExposureTime);
- if (exposure && !agc.autoEnabled) {
+ if (exposure && !agc.autoExposureEnabled) {
agc.manual.exposure = *exposure * 1.0us
/ context.configuration.sensor.lineDuration;
@@ -129,186 +272,150 @@ void Agc::queueRequest(IPAContext &context,
}
const auto &gain = controls.get(controls::AnalogueGain);
- if (gain && !agc.autoEnabled) {
+ if (gain && !agc.autoGainEnabled) {
agc.manual.gain = *gain;
LOG(RkISP1Agc, Debug) << "Set gain to " << agc.manual.gain;
}
- frameContext.agc.autoEnabled = agc.autoEnabled;
+ frameContext.agc.autoExposureEnabled = agc.autoExposureEnabled;
+ frameContext.agc.autoGainEnabled = agc.autoGainEnabled;
- if (!frameContext.agc.autoEnabled) {
+ if (!frameContext.agc.autoExposureEnabled)
frameContext.agc.exposure = agc.manual.exposure;
+ if (!frameContext.agc.autoGainEnabled)
frameContext.agc.gain = agc.manual.gain;
+
+ const auto &meteringMode = controls.get(controls::AeMeteringMode);
+ if (meteringMode) {
+ frameContext.agc.updateMetering = agc.meteringMode != *meteringMode;
+ agc.meteringMode =
+ static_cast<controls::AeMeteringModeEnum>(*meteringMode);
}
+ frameContext.agc.meteringMode = agc.meteringMode;
+
+ const auto &exposureMode = controls.get(controls::AeExposureMode);
+ if (exposureMode)
+ agc.exposureMode =
+ static_cast<controls::AeExposureModeEnum>(*exposureMode);
+ frameContext.agc.exposureMode = agc.exposureMode;
+
+ const auto &constraintMode = controls.get(controls::AeConstraintMode);
+ if (constraintMode)
+ agc.constraintMode =
+ static_cast<controls::AeConstraintModeEnum>(*constraintMode);
+ frameContext.agc.constraintMode = agc.constraintMode;
+
+ const auto &frameDurationLimits = controls.get(controls::FrameDurationLimits);
+ if (frameDurationLimits) {
+ /* Limit the control value to the limits in ControlInfo */
+ ControlInfo &limits = context.ctrlMap[&controls::FrameDurationLimits];
+ int64_t minFrameDuration =
+ std::clamp((*frameDurationLimits).front(),
+ limits.min().get<int64_t>(),
+ limits.max().get<int64_t>());
+ int64_t maxFrameDuration =
+ std::clamp((*frameDurationLimits).back(),
+ limits.min().get<int64_t>(),
+ limits.max().get<int64_t>());
+
+ agc.minFrameDuration = std::chrono::microseconds(minFrameDuration);
+ agc.maxFrameDuration = std::chrono::microseconds(maxFrameDuration);
+ }
+ frameContext.agc.minFrameDuration = agc.minFrameDuration;
+ frameContext.agc.maxFrameDuration = agc.maxFrameDuration;
}
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
void Agc::prepare(IPAContext &context, const uint32_t frame,
- IPAFrameContext &frameContext, rkisp1_params_cfg *params)
+ IPAFrameContext &frameContext, RkISP1Params *params)
{
- if (frameContext.agc.autoEnabled) {
- frameContext.agc.exposure = context.activeState.agc.automatic.exposure;
- frameContext.agc.gain = context.activeState.agc.automatic.gain;
- }
+ uint32_t activeAutoExposure = context.activeState.agc.automatic.exposure;
+ double activeAutoGain = context.activeState.agc.automatic.gain;
- if (frame > 0)
- return;
-
- /* Configure the measurement window. */
- params->meas.aec_config.meas_window = context.configuration.agc.measureWindow;
- /* Use a continuous method for measure. */
- params->meas.aec_config.autostop = RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0;
- /* Estimate Y as (R + G + B) x (85/256). */
- params->meas.aec_config.mode = RKISP1_CIF_ISP_EXP_MEASURING_MODE_1;
-
- params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_AEC;
- params->module_ens |= RKISP1_CIF_ISP_MODULE_AEC;
- params->module_en_update |= RKISP1_CIF_ISP_MODULE_AEC;
-
- /* Configure histogram. */
- params->meas.hst_config.meas_window = context.configuration.agc.measureWindow;
- /* Produce the luminance histogram. */
- params->meas.hst_config.mode = RKISP1_CIF_ISP_HISTOGRAM_MODE_Y_HISTOGRAM;
- /* Set an average weighted histogram. */
- Span<uint8_t> weights{
- params->meas.hst_config.hist_weight,
- context.hw->numHistogramWeights
- };
- std::fill(weights.begin(), weights.end(), 1);
- /* Step size can't be less than 3. */
- params->meas.hst_config.histogram_predivider = 4;
-
- /* Update the configuration for histogram. */
- params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_HST;
- /* Enable the histogram measure unit. */
- params->module_ens |= RKISP1_CIF_ISP_MODULE_HST;
- params->module_en_update |= RKISP1_CIF_ISP_MODULE_HST;
-}
-
-/**
- * \brief Apply a filter on the exposure value to limit the speed of changes
- * \param[in] exposureValue The target exposure from the AGC algorithm
- *
- * The speed of the filter is adaptive, and will produce the target quicker
- * during startup, or when the target exposure is within 20% of the most recent
- * filter output.
- *
- * \return The filtered exposure
- */
-utils::Duration Agc::filterExposure(utils::Duration exposureValue)
-{
- double speed = 0.2;
-
- /* Adapt instantly if we are in startup phase. */
- if (frameCount_ < kNumStartupFrames)
- speed = 1.0;
+ /* Populate exposure and gain in auto mode */
+ if (frameContext.agc.autoExposureEnabled)
+ frameContext.agc.exposure = activeAutoExposure;
+ if (frameContext.agc.autoGainEnabled)
+ frameContext.agc.gain = activeAutoGain;
/*
- * If we are close to the desired result, go faster to avoid making
- * multiple micro-adjustments.
- * \todo Make this customisable?
+ * Populate manual exposure and gain from the active auto values when
+ * transitioning from auto to manual
*/
- if (filteredExposure_ < 1.2 * exposureValue &&
- filteredExposure_ > 0.8 * exposureValue)
- speed = sqrt(speed);
-
- filteredExposure_ = speed * exposureValue +
- filteredExposure_ * (1.0 - speed);
-
- LOG(RkISP1Agc, Debug) << "After filtering, exposure " << filteredExposure_;
-
- return filteredExposure_;
-}
-
-/**
- * \brief Estimate the new exposure and gain values
- * \param[inout] context The shared IPA Context
- * \param[in] frameContext The FrameContext for this frame
- * \param[in] yGain The gain calculated on the current brightness level
- * \param[in] iqMeanGain The gain calculated based on the relative luminance target
- */
-void Agc::computeExposure(IPAContext &context, IPAFrameContext &frameContext,
- double yGain, double iqMeanGain)
-{
- IPASessionConfiguration &configuration = context.configuration;
- IPAActiveState &activeState = context.activeState;
-
- /* Get the effective exposure and gain applied on the sensor. */
- uint32_t exposure = frameContext.sensor.exposure;
- double analogueGain = frameContext.sensor.gain;
-
- /* Use the highest of the two gain estimates. */
- double evGain = std::max(yGain, iqMeanGain);
-
- utils::Duration minShutterSpeed = configuration.sensor.minShutterSpeed;
- utils::Duration maxShutterSpeed = std::min(configuration.sensor.maxShutterSpeed,
- kMaxShutterSpeed);
-
- double minAnalogueGain = std::max(configuration.sensor.minAnalogueGain,
- kMinAnalogueGain);
- double maxAnalogueGain = configuration.sensor.maxAnalogueGain;
+ if (!frameContext.agc.autoExposureEnabled && frameContext.agc.autoExposureModeChange) {
+ context.activeState.agc.manual.exposure = activeAutoExposure;
+ frameContext.agc.exposure = activeAutoExposure;
+ }
+ if (!frameContext.agc.autoGainEnabled && frameContext.agc.autoGainModeChange) {
+ context.activeState.agc.manual.gain = activeAutoGain;
+ frameContext.agc.gain = activeAutoGain;
+ }
- /* Consider within 1% of the target as correctly exposed. */
- if (utils::abs_diff(evGain, 1.0) < 0.01)
+ if (frame > 0 && !frameContext.agc.updateMetering)
return;
- /* extracted from Rpi::Agc::computeTargetExposure. */
-
- /* Calculate the shutter time in seconds. */
- utils::Duration currentShutter = exposure * configuration.sensor.lineDuration;
-
/*
- * Update the exposure value for the next computation using the values
- * of exposure and gain really used by the sensor.
+ * Configure the AEC measurements. Set the window, measure
+ * continuously, and estimate Y as (R + G + B) x (85/256).
*/
- utils::Duration effectiveExposureValue = currentShutter * analogueGain;
+ auto aecConfig = params->block<BlockType::Aec>();
+ aecConfig.setEnabled(true);
- LOG(RkISP1Agc, Debug) << "Actual total exposure " << currentShutter * analogueGain
- << " Shutter speed " << currentShutter
- << " Gain " << analogueGain
- << " Needed ev gain " << evGain;
+ aecConfig->meas_window = context.configuration.agc.measureWindow;
+ aecConfig->autostop = RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0;
+ aecConfig->mode = RKISP1_CIF_ISP_EXP_MEASURING_MODE_1;
/*
- * Calculate the current exposure value for the scene as the latest
- * exposure value applied multiplied by the new estimated gain.
+ * Configure the histogram measurement. Set the window, produce a
+ * luminance histogram, and set the weights and predivider.
*/
- utils::Duration exposureValue = effectiveExposureValue * evGain;
+ auto hstConfig = params->block<BlockType::Hst>();
+ hstConfig.setEnabled(true);
- /* Clamp the exposure value to the min and max authorized. */
- utils::Duration maxTotalExposure = maxShutterSpeed * maxAnalogueGain;
- exposureValue = std::min(exposureValue, maxTotalExposure);
- LOG(RkISP1Agc, Debug) << "Target total exposure " << exposureValue
- << ", maximum is " << maxTotalExposure;
+ hstConfig->meas_window = context.configuration.agc.measureWindow;
+ hstConfig->mode = RKISP1_CIF_ISP_HISTOGRAM_MODE_Y_HISTOGRAM;
- /*
- * Divide the exposure value as new exposure and gain values.
- * \todo estimate if we need to desaturate
- */
- exposureValue = filterExposure(exposureValue);
-
- /*
- * Push the shutter time up to the maximum first, and only then
- * increase the gain.
- */
- utils::Duration shutterTime = std::clamp<utils::Duration>(exposureValue / minAnalogueGain,
- minShutterSpeed, maxShutterSpeed);
- double stepGain = std::clamp(exposureValue / shutterTime,
- minAnalogueGain, maxAnalogueGain);
- LOG(RkISP1Agc, Debug) << "Divided up shutter and gain are "
- << shutterTime << " and "
- << stepGain;
+ Span<uint8_t> weights{
+ hstConfig->hist_weight,
+ context.hw->numHistogramWeights
+ };
+ std::vector<uint8_t> &modeWeights = meteringModes_.at(frameContext.agc.meteringMode);
+ std::copy(modeWeights.begin(), modeWeights.end(), weights.begin());
+
+ struct rkisp1_cif_isp_window window = hstConfig->meas_window;
+ Size windowSize = { window.h_size, window.v_size };
+ hstConfig->histogram_predivider =
+ computeHistogramPredivider(windowSize,
+ static_cast<rkisp1_cif_isp_histogram_mode>(hstConfig->mode));
+}
- /* Update the estimated exposure and gain. */
- activeState.agc.automatic.exposure = shutterTime / configuration.sensor.lineDuration;
- activeState.agc.automatic.gain = stepGain;
+void Agc::fillMetadata(IPAContext &context, IPAFrameContext &frameContext,
+ ControlList &metadata)
+{
+ utils::Duration exposureTime = context.configuration.sensor.lineDuration
+ * frameContext.sensor.exposure;
+ metadata.set(controls::AnalogueGain, frameContext.sensor.gain);
+ metadata.set(controls::ExposureTime, exposureTime.get<std::micro>());
+ metadata.set(controls::FrameDuration, frameContext.agc.frameDuration.get<std::micro>());
+ metadata.set(controls::ExposureTimeMode,
+ frameContext.agc.autoExposureEnabled
+ ? controls::ExposureTimeModeAuto
+ : controls::ExposureTimeModeManual);
+ metadata.set(controls::AnalogueGainMode,
+ frameContext.agc.autoGainEnabled
+ ? controls::AnalogueGainModeAuto
+ : controls::AnalogueGainModeManual);
+
+ metadata.set(controls::AeMeteringMode, frameContext.agc.meteringMode);
+ metadata.set(controls::AeExposureMode, frameContext.agc.exposureMode);
+ metadata.set(controls::AeConstraintMode, frameContext.agc.constraintMode);
}
/**
* \brief Estimate the relative luminance of the frame with a given gain
- * \param[in] expMeans The mean luminance values, from the RkISP1 statistics
* \param[in] gain The gain to apply to the frame
*
* This function estimates the average relative luminance of the frame that
@@ -322,8 +429,6 @@ void Agc::computeExposure(IPAContext &context, IPAFrameContext &frameContext,
* YUV doesn't take into account the fact that the R, G and B components
* contribute differently to the relative luminance.
*
- * \todo Have a dedicated YUV algorithm ?
- *
* The values are normalized to the [0.0, 1.0] range, where 1.0 corresponds to a
* theoretical perfect reflector of 100% reference white.
*
@@ -332,45 +437,38 @@ void Agc::computeExposure(IPAContext &context, IPAFrameContext &frameContext,
*
* \return The relative luminance
*/
-double Agc::estimateLuminance(Span<const uint8_t> expMeans, double gain)
+double Agc::estimateLuminance(double gain) const
{
double ySum = 0.0;
/* Sum the averages, saturated to 255. */
- for (uint8_t expMean : expMeans)
+ for (uint8_t expMean : expMeans_)
ySum += std::min(expMean * gain, 255.0);
/* \todo Weight with the AWB gains */
- return ySum / expMeans.size() / 255;
+ return ySum / expMeans_.size() / 255;
}
/**
- * \brief Estimate the mean value of the top 2% of the histogram
- * \param[in] hist The histogram statistics computed by the RkISP1
- * \return The mean value of the top 2% of the histogram
+ * \brief Process frame duration and compute vblank
+ * \param[in] context The shared IPA context
+ * \param[in] frameContext The current frame context
+ * \param[in] frameDuration The target frame duration
+ *
+ * Compute and populate vblank from the target frame duration.
*/
-double Agc::measureBrightness(Span<const uint32_t> hist) const
+void Agc::processFrameDuration(IPAContext &context,
+ IPAFrameContext &frameContext,
+ utils::Duration frameDuration)
{
- Histogram histogram{ hist };
- /* Estimate the quantile mean of the top 2% of the histogram. */
- return histogram.interQuantileMean(0.98, 1.0);
-}
+ IPACameraSensorInfo &sensorInfo = context.sensorInfo;
+ utils::Duration lineDuration = context.configuration.sensor.lineDuration;
-void Agc::fillMetadata(IPAContext &context, IPAFrameContext &frameContext,
- ControlList &metadata)
-{
- utils::Duration exposureTime = context.configuration.sensor.lineDuration
- * frameContext.sensor.exposure;
- metadata.set(controls::AnalogueGain, frameContext.sensor.gain);
- metadata.set(controls::ExposureTime, exposureTime.get<std::micro>());
+ frameContext.agc.vblank = (frameDuration / lineDuration) - sensorInfo.outputSize.height;
- /* \todo Use VBlank value calculated from each frame exposure. */
- uint32_t vTotal = context.configuration.sensor.size.height
- + context.configuration.sensor.defVBlank;
- utils::Duration frameDuration = context.configuration.sensor.lineDuration
- * vTotal;
- metadata.set(controls::FrameDuration, frameDuration.get<std::micro>());
+ /* Update frame duration accounting for line length quantization. */
+ frameContext.agc.frameDuration = (sensorInfo.outputSize.height + frameContext.agc.vblank) * lineDuration;
}
/**
@@ -389,10 +487,20 @@ void Agc::process(IPAContext &context, [[maybe_unused]] const uint32_t frame,
ControlList &metadata)
{
if (!stats) {
+ processFrameDuration(context, frameContext,
+ frameContext.agc.minFrameDuration);
+ fillMetadata(context, frameContext, metadata);
+ return;
+ }
+
+ if (!(stats->meas_type & RKISP1_CIF_ISP_STAT_AUTOEXP)) {
fillMetadata(context, frameContext, metadata);
+ LOG(RkISP1Agc, Error) << "AUTOEXP data is missing in statistics";
return;
}
+ const utils::Duration &lineDuration = context.configuration.sensor.lineDuration;
+
/*
* \todo Verify that the exposure and gain applied by the sensor for
* this frame match what has been requested. This isn't a hard
@@ -402,43 +510,75 @@ void Agc::process(IPAContext &context, [[maybe_unused]] const uint32_t frame,
*/
const rkisp1_cif_isp_stat *params = &stats->params;
- ASSERT(stats->meas_type & RKISP1_CIF_ISP_STAT_AUTOEXP);
- Span<const uint8_t> ae{ params->ae.exp_mean, context.hw->numAeCells };
- Span<const uint32_t> hist{
- params->hist.hist_bins,
- context.hw->numHistogramBins
- };
-
- double iqMean = measureBrightness(hist);
- double iqMeanGain = kEvGainTarget * hist.size() / iqMean;
+ /* The lower 4 bits are fractional and meant to be discarded. */
+ Histogram hist({ params->hist.hist_bins, context.hw->numHistogramBins },
+ [](uint32_t x) { return x >> 4; });
+ expMeans_ = { params->ae.exp_mean, context.hw->numAeCells };
/*
- * Estimate the gain needed to achieve a relative luminance target. To
- * account for non-linearity caused by saturation, the value needs to be
- * estimated in an iterative process, as multiplying by a gain will not
- * increase the relative luminance by the same factor if some image
- * regions are saturated.
+ * Set the AGC limits using the fixed exposure time and/or gain in
+ * manual mode, or the sensor limits in auto mode.
*/
- double yGain = 1.0;
- double yTarget = kRelativeLuminanceTarget;
-
- for (unsigned int i = 0; i < 8; i++) {
- double yValue = estimateLuminance(ae, yGain);
- double extra_gain = std::min(10.0, yTarget / (yValue + .001));
-
- yGain *= extra_gain;
- LOG(RkISP1Agc, Debug) << "Y value: " << yValue
- << ", Y target: " << yTarget
- << ", gives gain " << yGain;
- if (extra_gain < 1.01)
- break;
+ utils::Duration minExposureTime;
+ utils::Duration maxExposureTime;
+ double minAnalogueGain;
+ double maxAnalogueGain;
+
+ if (frameContext.agc.autoExposureEnabled) {
+ minExposureTime = context.configuration.sensor.minExposureTime;
+ maxExposureTime = std::clamp(frameContext.agc.maxFrameDuration,
+ context.configuration.sensor.minExposureTime,
+ context.configuration.sensor.maxExposureTime);
+ } else {
+ minExposureTime = context.configuration.sensor.lineDuration
+ * frameContext.agc.exposure;
+ maxExposureTime = minExposureTime;
+ }
+
+ if (frameContext.agc.autoGainEnabled) {
+ minAnalogueGain = context.configuration.sensor.minAnalogueGain;
+ maxAnalogueGain = context.configuration.sensor.maxAnalogueGain;
+ } else {
+ minAnalogueGain = frameContext.agc.gain;
+ maxAnalogueGain = frameContext.agc.gain;
}
- computeExposure(context, frameContext, yGain, iqMeanGain);
- frameCount_++;
+ setLimits(minExposureTime, maxExposureTime, minAnalogueGain, maxAnalogueGain);
+
+ /*
+ * The Agc algorithm needs to know the effective exposure value that was
+ * applied to the sensor when the statistics were collected.
+ */
+ utils::Duration exposureTime = lineDuration * frameContext.sensor.exposure;
+ double analogueGain = frameContext.sensor.gain;
+ utils::Duration effectiveExposureValue = exposureTime * analogueGain;
+
+ utils::Duration newExposureTime;
+ double aGain, dGain;
+ std::tie(newExposureTime, aGain, dGain) =
+ calculateNewEv(frameContext.agc.constraintMode,
+ frameContext.agc.exposureMode,
+ hist, effectiveExposureValue);
+
+ LOG(RkISP1Agc, Debug)
+ << "Divided up exposure time, analogue gain and digital gain are "
+ << newExposureTime << ", " << aGain << " and " << dGain;
+
+ IPAActiveState &activeState = context.activeState;
+ /* Update the estimated exposure and gain. */
+ activeState.agc.automatic.exposure = newExposureTime / lineDuration;
+ activeState.agc.automatic.gain = aGain;
+
+ /*
+ * Expand the target frame duration so that we do not run faster than
+ * the minimum frame duration when we have short exposures.
+ */
+ processFrameDuration(context, frameContext,
+ std::max(frameContext.agc.minFrameDuration, newExposureTime));
fillMetadata(context, frameContext, metadata);
+ expMeans_ = {};
}
REGISTER_IPA_ALGORITHM(Agc, "Agc")
diff --git a/src/ipa/rkisp1/algorithms/agc.h b/src/ipa/rkisp1/algorithms/agc.h
index fb82a33f..62bcde99 100644
--- a/src/ipa/rkisp1/algorithms/agc.h
+++ b/src/ipa/rkisp1/algorithms/agc.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * agc.h - RkISP1 AGC/AEC mean-based control algorithm
+ * RkISP1 AGC/AEC mean-based control algorithm
*/
#pragma once
@@ -14,18 +14,21 @@
#include <libcamera/geometry.h>
+#include "libipa/agc_mean_luminance.h"
+
#include "algorithm.h"
namespace libcamera {
namespace ipa::rkisp1::algorithms {
-class Agc : public Algorithm
+class Agc : public Algorithm, public AgcMeanLuminance
{
public:
Agc();
~Agc() = default;
+ int init(IPAContext &context, const YamlObject &tuningData) override;
int configure(IPAContext &context, const IPACameraSensorInfo &configInfo) override;
void queueRequest(IPAContext &context,
const uint32_t frame,
@@ -33,24 +36,27 @@ public:
const ControlList &controls) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
- rkisp1_params_cfg *params) override;
+ RkISP1Params *params) override;
void process(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
const rkisp1_stat_buffer *stats,
ControlList &metadata) override;
private:
- void computeExposure(IPAContext &Context, IPAFrameContext &frameContext,
- double yGain, double iqMeanGain);
- utils::Duration filterExposure(utils::Duration exposureValue);
- double estimateLuminance(Span<const uint8_t> expMeans, double gain);
- double measureBrightness(Span<const uint32_t> hist) const;
+ int parseMeteringModes(IPAContext &context, const YamlObject &tuningData);
+ uint8_t computeHistogramPredivider(const Size &size,
+ enum rkisp1_cif_isp_histogram_mode mode);
+
void fillMetadata(IPAContext &context, IPAFrameContext &frameContext,
ControlList &metadata);
+ double estimateLuminance(double gain) const override;
+ void processFrameDuration(IPAContext &context,
+ IPAFrameContext &frameContext,
+ utils::Duration frameDuration);
- uint64_t frameCount_;
+ Span<const uint8_t> expMeans_;
- utils::Duration filteredExposure_;
+ std::map<int32_t, std::vector<uint8_t>> meteringModes_;
};
} /* namespace ipa::rkisp1::algorithms */
diff --git a/src/ipa/rkisp1/algorithms/algorithm.h b/src/ipa/rkisp1/algorithms/algorithm.h
index 9454c9a1..715cfcd8 100644
--- a/src/ipa/rkisp1/algorithms/algorithm.h
+++ b/src/ipa/rkisp1/algorithms/algorithm.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas On Board
*
- * algorithm.h - RkISP1 control algorithm interface
+ * RkISP1 control algorithm interface
*/
#pragma once
diff --git a/src/ipa/rkisp1/algorithms/awb.cpp b/src/ipa/rkisp1/algorithms/awb.cpp
index 744f4a38..eafe9308 100644
--- a/src/ipa/rkisp1/algorithms/awb.cpp
+++ b/src/ipa/rkisp1/algorithms/awb.cpp
@@ -2,20 +2,24 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * awb.cpp - AWB control algorithm
+ * AWB control algorithm
*/
#include "awb.h"
#include <algorithm>
-#include <cmath>
-#include <iomanip>
+#include <ios>
#include <libcamera/base/log.h>
#include <libcamera/control_ids.h>
+
#include <libcamera/ipa/core_ipa_interface.h>
+#include "libipa/awb_bayes.h"
+#include "libipa/awb_grey.h"
+#include "libipa/colours.h"
+
/**
* \file awb.h
*/
@@ -31,27 +35,100 @@ namespace ipa::rkisp1::algorithms {
LOG_DEFINE_CATEGORY(RkISP1Awb)
+constexpr int32_t kMinColourTemperature = 2500;
+constexpr int32_t kMaxColourTemperature = 10000;
+constexpr int32_t kDefaultColourTemperature = 5000;
+
/* Minimum mean value below which AWB can't operate. */
constexpr double kMeanMinThreshold = 2.0;
+class RkISP1AwbStats final : public AwbStats
+{
+public:
+ RkISP1AwbStats(const RGB<double> &rgbMeans)
+ : rgbMeans_(rgbMeans)
+ {
+ rg_ = rgbMeans_.r() / rgbMeans_.g();
+ bg_ = rgbMeans_.b() / rgbMeans_.g();
+ }
+
+ double computeColourError(const RGB<double> &gains) const override
+ {
+ /*
+ * Compute the sum of the squared colour error (non-greyness) as
+ * it appears in the log likelihood equation.
+ */
+ double deltaR = gains.r() * rg_ - 1.0;
+ double deltaB = gains.b() * bg_ - 1.0;
+ double delta2 = deltaR * deltaR + deltaB * deltaB;
+
+ return delta2;
+ }
+
+ RGB<double> rgbMeans() const override
+ {
+ return rgbMeans_;
+ }
+
+private:
+ RGB<double> rgbMeans_;
+ double rg_;
+ double bg_;
+};
+
Awb::Awb()
: rgbMode_(false)
{
}
/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int Awb::init(IPAContext &context, const YamlObject &tuningData)
+{
+ auto &cmap = context.ctrlMap;
+ cmap[&controls::ColourTemperature] = ControlInfo(kMinColourTemperature,
+ kMaxColourTemperature,
+ kDefaultColourTemperature);
+
+ if (!tuningData.contains("algorithm"))
+ LOG(RkISP1Awb, Info) << "No AWB algorithm specified."
+ << " Default to grey world";
+
+ auto mode = tuningData["algorithm"].get<std::string>("grey");
+ if (mode == "grey") {
+ awbAlgo_ = std::make_unique<AwbGrey>();
+ } else if (mode == "bayes") {
+ awbAlgo_ = std::make_unique<AwbBayes>();
+ } else {
+ LOG(RkISP1Awb, Error) << "Unknown AWB algorithm: " << mode;
+ return -EINVAL;
+ }
+ LOG(RkISP1Awb, Debug) << "Using AWB algorithm: " << mode;
+
+ int ret = awbAlgo_->init(tuningData);
+ if (ret) {
+ LOG(RkISP1Awb, Error) << "Failed to init AWB algorithm";
+ return ret;
+ }
+
+ const auto &src = awbAlgo_->controls();
+ cmap.insert(src.begin(), src.end());
+
+ return 0;
+}
+
+/**
* \copydoc libcamera::ipa::Algorithm::configure
*/
int Awb::configure(IPAContext &context,
const IPACameraSensorInfo &configInfo)
{
- context.activeState.awb.gains.manual.red = 1.0;
- context.activeState.awb.gains.manual.blue = 1.0;
- context.activeState.awb.gains.manual.green = 1.0;
- context.activeState.awb.gains.automatic.red = 1.0;
- context.activeState.awb.gains.automatic.blue = 1.0;
- context.activeState.awb.gains.automatic.green = 1.0;
+ context.activeState.awb.gains.manual = RGB<double>{ 1.0 };
+ context.activeState.awb.gains.automatic =
+ awbAlgo_->gainsFromColourTemperature(kDefaultColourTemperature);
context.activeState.awb.autoEnabled = true;
+ context.activeState.awb.temperatureK = kDefaultColourTemperature;
/*
* Define the measurement window for AWB as a centered rectangle
@@ -85,64 +162,80 @@ void Awb::queueRequest(IPAContext &context,
<< (*awbEnable ? "Enabling" : "Disabling") << " AWB";
}
- const auto &colourGains = controls.get(controls::ColourGains);
- if (colourGains && !awb.autoEnabled) {
- awb.gains.manual.red = (*colourGains)[0];
- awb.gains.manual.blue = (*colourGains)[1];
-
- LOG(RkISP1Awb, Debug)
- << "Set colour gains to red: " << awb.gains.manual.red
- << ", blue: " << awb.gains.manual.blue;
- }
+ awbAlgo_->handleControls(controls);
frameContext.awb.autoEnabled = awb.autoEnabled;
- if (!awb.autoEnabled) {
- frameContext.awb.gains.red = awb.gains.manual.red;
- frameContext.awb.gains.green = 1.0;
- frameContext.awb.gains.blue = awb.gains.manual.blue;
+ if (awb.autoEnabled)
+ return;
+
+ const auto &colourGains = controls.get(controls::ColourGains);
+ const auto &colourTemperature = controls.get(controls::ColourTemperature);
+ bool update = false;
+ if (colourGains) {
+ awb.gains.manual.r() = (*colourGains)[0];
+ awb.gains.manual.b() = (*colourGains)[1];
+ /*
+ * \todo Colour temperature reported in metadata is now
+ * incorrect, as we can't deduce the temperature from the gains.
+ * This will be fixed with the bayes AWB algorithm.
+ */
+ update = true;
+ } else if (colourTemperature) {
+ const auto &gains = awbAlgo_->gainsFromColourTemperature(*colourTemperature);
+ awb.gains.manual.r() = gains.r();
+ awb.gains.manual.b() = gains.b();
+ awb.temperatureK = *colourTemperature;
+ update = true;
}
+
+ if (update)
+ LOG(RkISP1Awb, Debug)
+ << "Set colour gains to " << awb.gains.manual;
+
+ frameContext.awb.gains = awb.gains.manual;
+ frameContext.awb.temperatureK = awb.temperatureK;
}
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
void Awb::prepare(IPAContext &context, const uint32_t frame,
- IPAFrameContext &frameContext, rkisp1_params_cfg *params)
+ IPAFrameContext &frameContext, RkISP1Params *params)
{
/*
* This is the latest time we can read the active state. This is the
* most up-to-date automatic values we can read.
*/
if (frameContext.awb.autoEnabled) {
- frameContext.awb.gains.red = context.activeState.awb.gains.automatic.red;
- frameContext.awb.gains.green = context.activeState.awb.gains.automatic.green;
- frameContext.awb.gains.blue = context.activeState.awb.gains.automatic.blue;
+ frameContext.awb.gains = context.activeState.awb.gains.automatic;
+ frameContext.awb.temperatureK = context.activeState.awb.temperatureK;
}
- params->others.awb_gain_config.gain_green_b = 256 * frameContext.awb.gains.green;
- params->others.awb_gain_config.gain_blue = 256 * frameContext.awb.gains.blue;
- params->others.awb_gain_config.gain_red = 256 * frameContext.awb.gains.red;
- params->others.awb_gain_config.gain_green_r = 256 * frameContext.awb.gains.green;
+ auto gainConfig = params->block<BlockType::AwbGain>();
+ gainConfig.setEnabled(true);
- /* Update the gains. */
- params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_AWB_GAIN;
+ gainConfig->gain_green_b = std::clamp<int>(256 * frameContext.awb.gains.g(), 0, 0x3ff);
+ gainConfig->gain_blue = std::clamp<int>(256 * frameContext.awb.gains.b(), 0, 0x3ff);
+ gainConfig->gain_red = std::clamp<int>(256 * frameContext.awb.gains.r(), 0, 0x3ff);
+ gainConfig->gain_green_r = std::clamp<int>(256 * frameContext.awb.gains.g(), 0, 0x3ff);
/* If we have already set the AWB measurement parameters, return. */
if (frame > 0)
return;
- rkisp1_cif_isp_awb_meas_config &awb_config = params->meas.awb_meas_config;
+ auto awbConfig = params->block<BlockType::Awb>();
+ awbConfig.setEnabled(true);
/* Configure the measure window for AWB. */
- awb_config.awb_wnd = context.configuration.awb.measureWindow;
+ awbConfig->awb_wnd = context.configuration.awb.measureWindow;
/* Number of frames to use to estimate the means (0 means 1 frame). */
- awb_config.frames = 0;
+ awbConfig->frames = 0;
/* Select RGB or YCbCr means measurement. */
if (rgbMode_) {
- awb_config.awb_mode = RKISP1_CIF_ISP_AWB_MODE_RGB;
+ awbConfig->awb_mode = RKISP1_CIF_ISP_AWB_MODE_RGB;
/*
* For RGB-based measurements, pixels are selected with maximum
@@ -150,19 +243,19 @@ void Awb::prepare(IPAContext &context, const uint32_t frame,
* awb_ref_cr, awb_min_y and awb_ref_cb respectively. The other
* values are not used, set them to 0.
*/
- awb_config.awb_ref_cr = 250;
- awb_config.min_y = 250;
- awb_config.awb_ref_cb = 250;
+ awbConfig->awb_ref_cr = 250;
+ awbConfig->min_y = 250;
+ awbConfig->awb_ref_cb = 250;
- awb_config.max_y = 0;
- awb_config.min_c = 0;
- awb_config.max_csum = 0;
+ awbConfig->max_y = 0;
+ awbConfig->min_c = 0;
+ awbConfig->max_csum = 0;
} else {
- awb_config.awb_mode = RKISP1_CIF_ISP_AWB_MODE_YCBCR;
+ awbConfig->awb_mode = RKISP1_CIF_ISP_AWB_MODE_YCBCR;
/* Set the reference Cr and Cb (AWB target) to white. */
- awb_config.awb_ref_cb = 128;
- awb_config.awb_ref_cr = 128;
+ awbConfig->awb_ref_cb = 128;
+ awbConfig->awb_ref_cr = 128;
/*
* Filter out pixels based on luminance and chrominance values.
@@ -170,36 +263,11 @@ void Awb::prepare(IPAContext &context, const uint32_t frame,
* range, while the acceptable chroma values are specified with
* a minimum of 16 and a maximum Cb+Cr sum of 250.
*/
- awb_config.min_y = 16;
- awb_config.max_y = 250;
- awb_config.min_c = 16;
- awb_config.max_csum = 250;
+ awbConfig->min_y = 16;
+ awbConfig->max_y = 250;
+ awbConfig->min_c = 16;
+ awbConfig->max_csum = 250;
}
-
- /* Enable the AWB gains. */
- params->module_en_update |= RKISP1_CIF_ISP_MODULE_AWB_GAIN;
- params->module_ens |= RKISP1_CIF_ISP_MODULE_AWB_GAIN;
-
- /* Update the AWB measurement parameters and enable the AWB module. */
- params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_AWB;
- params->module_en_update |= RKISP1_CIF_ISP_MODULE_AWB;
- params->module_ens |= RKISP1_CIF_ISP_MODULE_AWB;
-}
-
-uint32_t Awb::estimateCCT(double red, double green, double blue)
-{
- /* Convert the RGB values to CIE tristimulus values (XYZ) */
- double X = (-0.14282) * (red) + (1.54924) * (green) + (-0.95641) * (blue);
- double Y = (-0.32466) * (red) + (1.57837) * (green) + (-0.73191) * (blue);
- double Z = (-0.68202) * (red) + (0.77073) * (green) + (0.56332) * (blue);
-
- /* Calculate the normalized chromaticity values */
- double x = X / (X + Y + Z);
- double y = Y / (X + Y + Z);
-
- /* Calculate CCT */
- double n = (x - 0.3320) / (0.1858 - y);
- return 449 * n * n * n + 3525 * n * n + 6823.3 * n + 5520.33;
}
/**
@@ -211,41 +279,108 @@ void Awb::process(IPAContext &context,
const rkisp1_stat_buffer *stats,
ControlList &metadata)
{
+ IPAActiveState &activeState = context.activeState;
+
+ metadata.set(controls::AwbEnable, frameContext.awb.autoEnabled);
+ metadata.set(controls::ColourGains, {
+ static_cast<float>(frameContext.awb.gains.r()),
+ static_cast<float>(frameContext.awb.gains.b())
+ });
+ metadata.set(controls::ColourTemperature, frameContext.awb.temperatureK);
+
+ if (!stats || !(stats->meas_type & RKISP1_CIF_ISP_STAT_AWB)) {
+ LOG(RkISP1Awb, Error) << "AWB data is missing in statistics";
+ return;
+ }
+
const rkisp1_cif_isp_stat *params = &stats->params;
const rkisp1_cif_isp_awb_stat *awb = &params->awb;
- IPAActiveState &activeState = context.activeState;
- double greenMean;
- double redMean;
- double blueMean;
+
+ RGB<double> rgbMeans = calculateRgbMeans(frameContext, awb);
+
+ /*
+ * If the means are too small we don't have enough information to
+ * meaningfully calculate gains. Freeze the algorithm in that case.
+ */
+ if (rgbMeans.r() < kMeanMinThreshold && rgbMeans.g() < kMeanMinThreshold &&
+ rgbMeans.b() < kMeanMinThreshold)
+ return;
+
+ RkISP1AwbStats awbStats{ rgbMeans };
+ AwbResult awbResult = awbAlgo_->calculateAwb(awbStats, frameContext.lux.lux);
+
+ activeState.awb.temperatureK = awbResult.colourTemperature;
+
+ /* Metadata shall contain the up to date measurement */
+ metadata.set(controls::ColourTemperature, activeState.awb.temperatureK);
+
+ /*
+ * Clamp the gain values to the hardware, which expresses gains as Q2.8
+ * unsigned integer values. Set the minimum just above zero to avoid
+ * divisions by zero when computing the raw means in subsequent
+ * iterations.
+ */
+ awbResult.gains = awbResult.gains.max(1.0 / 256).min(1023.0 / 256);
+
+ /* Filter the values to avoid oscillations. */
+ double speed = 0.2;
+ awbResult.gains = awbResult.gains * speed +
+ activeState.awb.gains.automatic * (1 - speed);
+
+ activeState.awb.gains.automatic = awbResult.gains;
+
+ LOG(RkISP1Awb, Debug)
+ << std::showpoint
+ << "Means " << rgbMeans << ", gains "
+ << activeState.awb.gains.automatic << ", temp "
+ << activeState.awb.temperatureK << "K";
+}
+
+RGB<double> Awb::calculateRgbMeans(const IPAFrameContext &frameContext, const rkisp1_cif_isp_awb_stat *awb) const
+{
+ Vector<double, 3> rgbMeans;
if (rgbMode_) {
- greenMean = awb->awb_mean[0].mean_y_or_g;
- redMean = awb->awb_mean[0].mean_cr_or_r;
- blueMean = awb->awb_mean[0].mean_cb_or_b;
+ rgbMeans = {{
+ static_cast<double>(awb->awb_mean[0].mean_cr_or_r),
+ static_cast<double>(awb->awb_mean[0].mean_y_or_g),
+ static_cast<double>(awb->awb_mean[0].mean_cb_or_b)
+ }};
} else {
/* Get the YCbCr mean values */
- double yMean = awb->awb_mean[0].mean_y_or_g;
- double cbMean = awb->awb_mean[0].mean_cb_or_b;
- double crMean = awb->awb_mean[0].mean_cr_or_r;
+ Vector<double, 3> yuvMeans({
+ static_cast<double>(awb->awb_mean[0].mean_y_or_g),
+ static_cast<double>(awb->awb_mean[0].mean_cb_or_b),
+ static_cast<double>(awb->awb_mean[0].mean_cr_or_r)
+ });
/*
- * Convert from YCbCr to RGB.
- * The hardware uses the following formulas:
- * Y = 16 + 0.2500 R + 0.5000 G + 0.1094 B
+ * Convert from YCbCr to RGB. The hardware uses the following
+ * formulas:
+ *
+ * Y = 16 + 0.2500 R + 0.5000 G + 0.1094 B
* Cb = 128 - 0.1406 R - 0.2969 G + 0.4375 B
* Cr = 128 + 0.4375 R - 0.3750 G - 0.0625 B
*
- * The inverse matrix is thus:
+ * This seems to be based on limited range BT.601 with Q1.6
+ * precision.
+ *
+ * The inverse matrix is:
+ *
* [[1,1636, -0,0623, 1,6008]
* [1,1636, -0,4045, -0,7949]
* [1,1636, 1,9912, -0,0250]]
*/
- yMean -= 16;
- cbMean -= 128;
- crMean -= 128;
- redMean = 1.1636 * yMean - 0.0623 * cbMean + 1.6008 * crMean;
- greenMean = 1.1636 * yMean - 0.4045 * cbMean - 0.7949 * crMean;
- blueMean = 1.1636 * yMean + 1.9912 * cbMean - 0.0250 * crMean;
+ static const Matrix<double, 3, 3> yuv2rgbMatrix({
+ 1.1636, -0.0623, 1.6008,
+ 1.1636, -0.4045, -0.7949,
+ 1.1636, 1.9912, -0.0250
+ });
+ static const Vector<double, 3> yuv2rgbOffset({
+ 16, 128, 128
+ });
+
+ rgbMeans = yuv2rgbMatrix * (yuvMeans - yuv2rgbOffset);
/*
* Due to hardware rounding errors in the YCbCr means, the
@@ -253,9 +388,7 @@ void Awb::process(IPAContext &context,
* negative gains, messing up calculation. Prevent this by
* clamping the means to positive values.
*/
- redMean = std::max(redMean, 0.0);
- greenMean = std::max(greenMean, 0.0);
- blueMean = std::max(blueMean, 0.0);
+ rgbMeans = rgbMeans.max(0.0);
}
/*
@@ -263,63 +396,9 @@ void Awb::process(IPAContext &context,
* divide by the gains that were used to get the raw means from the
* sensor.
*/
- redMean /= frameContext.awb.gains.red;
- greenMean /= frameContext.awb.gains.green;
- blueMean /= frameContext.awb.gains.blue;
-
- /*
- * If the means are too small we don't have enough information to
- * meaningfully calculate gains. Freeze the algorithm in that case.
- */
- if (redMean < kMeanMinThreshold && greenMean < kMeanMinThreshold &&
- blueMean < kMeanMinThreshold) {
- frameContext.awb.temperatureK = activeState.awb.temperatureK;
- return;
- }
-
- activeState.awb.temperatureK = estimateCCT(redMean, greenMean, blueMean);
-
- /*
- * Estimate the red and blue gains to apply in a grey world. The green
- * gain is hardcoded to 1.0. Avoid divisions by zero by clamping the
- * divisor to a minimum value of 1.0.
- */
- double redGain = greenMean / std::max(redMean, 1.0);
- double blueGain = greenMean / std::max(blueMean, 1.0);
-
- /*
- * Clamp the gain values to the hardware, which expresses gains as Q2.8
- * unsigned integer values. Set the minimum just above zero to avoid
- * divisions by zero when computing the raw means in subsequent
- * iterations.
- */
- redGain = std::clamp(redGain, 1.0 / 256, 1023.0 / 256);
- blueGain = std::clamp(blueGain, 1.0 / 256, 1023.0 / 256);
-
- /* Filter the values to avoid oscillations. */
- double speed = 0.2;
- redGain = speed * redGain + (1 - speed) * activeState.awb.gains.automatic.red;
- blueGain = speed * blueGain + (1 - speed) * activeState.awb.gains.automatic.blue;
-
- activeState.awb.gains.automatic.red = redGain;
- activeState.awb.gains.automatic.blue = blueGain;
- activeState.awb.gains.automatic.green = 1.0;
-
- frameContext.awb.temperatureK = activeState.awb.temperatureK;
-
- metadata.set(controls::AwbEnable, frameContext.awb.autoEnabled);
- metadata.set(controls::ColourGains, {
- static_cast<float>(frameContext.awb.gains.red),
- static_cast<float>(frameContext.awb.gains.blue)
- });
- metadata.set(controls::ColourTemperature, frameContext.awb.temperatureK);
+ rgbMeans /= frameContext.awb.gains;
- LOG(RkISP1Awb, Debug) << std::showpoint
- << "Means [" << redMean << ", " << greenMean << ", " << blueMean
- << "], gains [" << activeState.awb.gains.automatic.red << ", "
- << activeState.awb.gains.automatic.green << ", "
- << activeState.awb.gains.automatic.blue << "], temp "
- << frameContext.awb.temperatureK << "K";
+ return rgbMeans;
}
REGISTER_IPA_ALGORITHM(Awb, "Awb")
diff --git a/src/ipa/rkisp1/algorithms/awb.h b/src/ipa/rkisp1/algorithms/awb.h
index 9d45a442..7e6c3862 100644
--- a/src/ipa/rkisp1/algorithms/awb.h
+++ b/src/ipa/rkisp1/algorithms/awb.h
@@ -2,11 +2,18 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * awb.h - AWB control algorithm
+ * AWB control algorithm
*/
#pragma once
+#include <optional>
+
+#include "libcamera/internal/vector.h"
+
+#include "libipa/awb.h"
+#include "libipa/interpolator.h"
+
#include "algorithm.h"
namespace libcamera {
@@ -19,20 +26,24 @@ public:
Awb();
~Awb() = default;
+ int init(IPAContext &context, const YamlObject &tuningData) override;
int configure(IPAContext &context, const IPACameraSensorInfo &configInfo) override;
void queueRequest(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
const ControlList &controls) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
- rkisp1_params_cfg *params) override;
+ RkISP1Params *params) override;
void process(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
const rkisp1_stat_buffer *stats,
ControlList &metadata) override;
private:
- uint32_t estimateCCT(double red, double green, double blue);
+ RGB<double> calculateRgbMeans(const IPAFrameContext &frameContext,
+ const rkisp1_cif_isp_awb_stat *awb) const;
+
+ std::unique_ptr<AwbAlgorithm> awbAlgo_;
bool rgbMode_;
};
diff --git a/src/ipa/rkisp1/algorithms/blc.cpp b/src/ipa/rkisp1/algorithms/blc.cpp
index 15324fb1..98cb7145 100644
--- a/src/ipa/rkisp1/algorithms/blc.cpp
+++ b/src/ipa/rkisp1/algorithms/blc.cpp
@@ -2,13 +2,17 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * blc.cpp - RkISP1 Black Level Correction control
+ * RkISP1 Black Level Correction control
*/
#include "blc.h"
+#include <linux/videodev2.h>
+
#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
+
#include "libcamera/internal/yaml_parser.h"
/**
@@ -36,22 +40,62 @@ namespace ipa::rkisp1::algorithms {
LOG_DEFINE_CATEGORY(RkISP1Blc)
BlackLevelCorrection::BlackLevelCorrection()
- : tuningParameters_(false)
{
+ /*
+ * This is a bit of a hack. In raw mode no black level correction
+ * happens. This flag is used to ensure the metadata gets populated with
+ * the black level which is needed to capture proper raw images for
+ * tuning.
+ */
+ supportsRaw_ = true;
}
/**
* \copydoc libcamera::ipa::Algorithm::init
*/
-int BlackLevelCorrection::init([[maybe_unused]] IPAContext &context,
- const YamlObject &tuningData)
+int BlackLevelCorrection::init(IPAContext &context, const YamlObject &tuningData)
{
- blackLevelRed_ = tuningData["R"].get<int16_t>(256);
- blackLevelGreenR_ = tuningData["Gr"].get<int16_t>(256);
- blackLevelGreenB_ = tuningData["Gb"].get<int16_t>(256);
- blackLevelBlue_ = tuningData["B"].get<int16_t>(256);
-
- tuningParameters_ = true;
+ std::optional<int16_t> levelRed = tuningData["R"].get<int16_t>();
+ std::optional<int16_t> levelGreenR = tuningData["Gr"].get<int16_t>();
+ std::optional<int16_t> levelGreenB = tuningData["Gb"].get<int16_t>();
+ std::optional<int16_t> levelBlue = tuningData["B"].get<int16_t>();
+ bool tuningHasLevels = levelRed && levelGreenR && levelGreenB && levelBlue;
+
+ auto blackLevel = context.camHelper->blackLevel();
+ if (!blackLevel) {
+ /*
+ * Not all camera sensor helpers have been updated with black
+ * levels. Print a warning and fall back to the levels from the
+ * tuning data to preserve backward compatibility. This should
+ * be removed once all helpers provide the data.
+ */
+ LOG(RkISP1Blc, Warning)
+ << "No black levels provided by camera sensor helper"
+ << ", please fix";
+
+ blackLevelRed_ = levelRed.value_or(4096);
+ blackLevelGreenR_ = levelGreenR.value_or(4096);
+ blackLevelGreenB_ = levelGreenB.value_or(4096);
+ blackLevelBlue_ = levelBlue.value_or(4096);
+ } else if (tuningHasLevels) {
+ /*
+ * If black levels are provided in the tuning file, use them to
+ * avoid breaking existing camera tuning. This is deprecated and
+ * will be removed.
+ */
+ LOG(RkISP1Blc, Warning)
+ << "Deprecated: black levels overwritten by tuning file";
+
+ blackLevelRed_ = *levelRed;
+ blackLevelGreenR_ = *levelGreenR;
+ blackLevelGreenB_ = *levelGreenB;
+ blackLevelBlue_ = *levelBlue;
+ } else {
+ blackLevelRed_ = *blackLevel;
+ blackLevelGreenR_ = *blackLevel;
+ blackLevelGreenB_ = *blackLevel;
+ blackLevelBlue_ = *blackLevel;
+ }
LOG(RkISP1Blc, Debug)
<< "Black levels: red " << blackLevelRed_
@@ -62,29 +106,80 @@ int BlackLevelCorrection::init([[maybe_unused]] IPAContext &context,
return 0;
}
+int BlackLevelCorrection::configure(IPAContext &context,
+ [[maybe_unused]] const IPACameraSensorInfo &configInfo)
+{
+ /*
+ * BLC on ISP versions that include the companding block requires usage
+ * of the extensible parameters format.
+ */
+ supported_ = context.configuration.paramFormat == V4L2_META_FMT_RK_ISP1_EXT_PARAMS ||
+ !context.hw->compand;
+
+ if (!supported_)
+ LOG(RkISP1Blc, Warning)
+ << "BLC in companding block requires extensible parameters";
+
+ return 0;
+}
+
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
-void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context,
+void BlackLevelCorrection::prepare(IPAContext &context,
const uint32_t frame,
[[maybe_unused]] IPAFrameContext &frameContext,
- rkisp1_params_cfg *params)
+ RkISP1Params *params)
{
+ if (context.configuration.raw)
+ return;
+
if (frame > 0)
return;
- if (!tuningParameters_)
+ if (!supported_)
return;
- params->others.bls_config.enable_auto = 0;
- params->others.bls_config.fixed_val.r = blackLevelRed_;
- params->others.bls_config.fixed_val.gr = blackLevelGreenR_;
- params->others.bls_config.fixed_val.gb = blackLevelGreenB_;
- params->others.bls_config.fixed_val.b = blackLevelBlue_;
+ if (context.hw->compand) {
+ auto config = params->block<BlockType::CompandBls>();
+ config.setEnabled(true);
+
+ /*
+ * Scale up to the 20-bit black levels used by the companding
+ * block.
+ */
+ config->r = blackLevelRed_ << 4;
+ config->gr = blackLevelGreenR_ << 4;
+ config->gb = blackLevelGreenB_ << 4;
+ config->b = blackLevelBlue_ << 4;
+ } else {
+ auto config = params->block<BlockType::Bls>();
+ config.setEnabled(true);
+
+ config->enable_auto = 0;
+
+ /* Scale down to the 12-bit black levels used by the BLS block. */
+ config->fixed_val.r = blackLevelRed_ >> 4;
+ config->fixed_val.gr = blackLevelGreenR_ >> 4;
+ config->fixed_val.gb = blackLevelGreenB_ >> 4;
+ config->fixed_val.b = blackLevelBlue_ >> 4;
+ }
+}
- params->module_en_update |= RKISP1_CIF_ISP_MODULE_BLS;
- params->module_ens |= RKISP1_CIF_ISP_MODULE_BLS;
- params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_BLS;
+/**
+ * \copydoc libcamera::ipa::Algorithm::process
+ */
+void BlackLevelCorrection::process([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ [[maybe_unused]] const rkisp1_stat_buffer *stats,
+ ControlList &metadata)
+{
+ metadata.set(controls::SensorBlackLevels,
+ { static_cast<int32_t>(blackLevelRed_),
+ static_cast<int32_t>(blackLevelGreenR_),
+ static_cast<int32_t>(blackLevelGreenB_),
+ static_cast<int32_t>(blackLevelBlue_) });
}
REGISTER_IPA_ALGORITHM(BlackLevelCorrection, "BlackLevelCorrection")
diff --git a/src/ipa/rkisp1/algorithms/blc.h b/src/ipa/rkisp1/algorithms/blc.h
index 0b1a2d43..f797ae44 100644
--- a/src/ipa/rkisp1/algorithms/blc.h
+++ b/src/ipa/rkisp1/algorithms/blc.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * blc.h - RkISP1 Black Level Correction control
+ * RkISP1 Black Level Correction control
*/
#pragma once
@@ -20,12 +20,19 @@ public:
~BlackLevelCorrection() = default;
int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context,
+ const IPACameraSensorInfo &configInfo) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
- rkisp1_params_cfg *params) override;
+ RkISP1Params *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata) override;
private:
- bool tuningParameters_;
+ bool supported_;
+
int16_t blackLevelRed_;
int16_t blackLevelGreenR_;
int16_t blackLevelGreenB_;
diff --git a/src/ipa/rkisp1/algorithms/ccm.cpp b/src/ipa/rkisp1/algorithms/ccm.cpp
new file mode 100644
index 00000000..eb8ca39e
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/ccm.cpp
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * RkISP1 Color Correction Matrix control algorithm
+ */
+
+#include "ccm.h"
+
+#include <map>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/control_ids.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "libipa/fixedpoint.h"
+#include "libipa/interpolator.h"
+
+/**
+ * \file ccm.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class Ccm
+ * \brief A color correction matrix algorithm
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Ccm)
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int Ccm::init([[maybe_unused]] IPAContext &context, const YamlObject &tuningData)
+{
+ int ret = ccm_.readYaml(tuningData["ccms"], "ct", "ccm");
+ if (ret < 0) {
+ LOG(RkISP1Ccm, Warning)
+ << "Failed to parse 'ccm' "
+ << "parameter from tuning file; falling back to unit matrix";
+ ccm_.setData({ { 0, Matrix<float, 3, 3>::identity() } });
+ }
+
+ ret = offsets_.readYaml(tuningData["ccms"], "ct", "offsets");
+ if (ret < 0) {
+ LOG(RkISP1Ccm, Warning)
+ << "Failed to parse 'offsets' "
+ << "parameter from tuning file; falling back to zero offsets";
+
+ offsets_.setData({ { 0, Matrix<int16_t, 3, 1>({ 0, 0, 0 }) } });
+ }
+
+ return 0;
+}
+
+void Ccm::setParameters(struct rkisp1_cif_isp_ctk_config &config,
+ const Matrix<float, 3, 3> &matrix,
+ const Matrix<int16_t, 3, 1> &offsets)
+{
+ /*
+ * 4 bit integer and 7 bit fractional, ranging from -8 (0x400) to
+ * +7.992 (0x3ff)
+ */
+ for (unsigned int i = 0; i < 3; i++) {
+ for (unsigned int j = 0; j < 3; j++)
+ config.coeff[i][j] =
+ floatingToFixedPoint<4, 7, uint16_t, double>(matrix[i][j]);
+ }
+
+ for (unsigned int i = 0; i < 3; i++)
+ config.ct_offset[i] = offsets[i][0] & 0xfff;
+
+ LOG(RkISP1Ccm, Debug) << "Setting matrix " << matrix;
+ LOG(RkISP1Ccm, Debug) << "Setting offsets " << offsets;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void Ccm::prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext, RkISP1Params *params)
+{
+ uint32_t ct = context.activeState.awb.temperatureK;
+
+ /*
+ * \todo The colour temperature will likely be noisy, add filtering to
+ * avoid updating the CCM matrix all the time.
+ */
+ if (frame > 0 && ct == ct_) {
+ frameContext.ccm.ccm = context.activeState.ccm.ccm;
+ return;
+ }
+
+ ct_ = ct;
+ Matrix<float, 3, 3> ccm = ccm_.getInterpolated(ct);
+ Matrix<int16_t, 3, 1> offsets = offsets_.getInterpolated(ct);
+
+ context.activeState.ccm.ccm = ccm;
+ frameContext.ccm.ccm = ccm;
+
+ auto config = params->block<BlockType::Ctk>();
+ config.setEnabled(true);
+ setParameters(*config, ccm, offsets);
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::process
+ */
+void Ccm::process([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ [[maybe_unused]] const rkisp1_stat_buffer *stats,
+ ControlList &metadata)
+{
+ metadata.set(controls::ColourCorrectionMatrix, frameContext.ccm.ccm.data());
+}
+
+REGISTER_IPA_ALGORITHM(Ccm, "Ccm")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/ccm.h b/src/ipa/rkisp1/algorithms/ccm.h
new file mode 100644
index 00000000..a5d9a9a4
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/ccm.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * RkISP1 Color Correction Matrix control algorithm
+ */
+
+#pragma once
+
+#include <linux/rkisp1-config.h>
+
+#include "libcamera/internal/matrix.h"
+
+#include "libipa/interpolator.h"
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class Ccm : public Algorithm
+{
+public:
+ Ccm() {}
+ ~Ccm() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ RkISP1Params *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata) override;
+
+private:
+ void parseYaml(const YamlObject &tuningData);
+ void setParameters(struct rkisp1_cif_isp_ctk_config &config,
+ const Matrix<float, 3, 3> &matrix,
+ const Matrix<int16_t, 3, 1> &offsets);
+
+ unsigned int ct_;
+ Interpolator<Matrix<float, 3, 3>> ccm_;
+ Interpolator<Matrix<int16_t, 3, 1>> offsets_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/cproc.cpp b/src/ipa/rkisp1/algorithms/cproc.cpp
index eaa56c37..d1fff699 100644
--- a/src/ipa/rkisp1/algorithms/cproc.cpp
+++ b/src/ipa/rkisp1/algorithms/cproc.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * cproc.cpp - RkISP1 Color Processing control
+ * RkISP1 Color Processing control
*/
#include "cproc.h"
@@ -33,20 +33,71 @@ namespace ipa::rkisp1::algorithms {
LOG_DEFINE_CATEGORY(RkISP1CProc)
+namespace {
+
+constexpr float kDefaultBrightness = 0.0f;
+constexpr float kDefaultContrast = 1.0f;
+constexpr float kDefaultSaturation = 1.0f;
+
+int convertBrightness(const float v)
+{
+ return std::clamp<int>(std::lround(v * 128), -128, 127);
+}
+
+int convertContrastOrSaturation(const float v)
+{
+ return std::clamp<int>(std::lround(v * 128), 0, 255);
+}
+
+} /* namespace */
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int ColorProcessing::init(IPAContext &context,
+ [[maybe_unused]] const YamlObject &tuningData)
+{
+ auto &cmap = context.ctrlMap;
+
+ cmap[&controls::Brightness] = ControlInfo(-1.0f, 0.993f, kDefaultBrightness);
+ cmap[&controls::Contrast] = ControlInfo(0.0f, 1.993f, kDefaultContrast);
+ cmap[&controls::Saturation] = ControlInfo(0.0f, 1.993f, kDefaultSaturation);
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::configure
+ */
+int ColorProcessing::configure(IPAContext &context,
+ [[maybe_unused]] const IPACameraSensorInfo &configInfo)
+{
+ auto &cproc = context.activeState.cproc;
+
+ cproc.brightness = convertBrightness(kDefaultBrightness);
+ cproc.contrast = convertContrastOrSaturation(kDefaultContrast);
+ cproc.saturation = convertContrastOrSaturation(kDefaultSaturation);
+
+ return 0;
+}
+
/**
* \copydoc libcamera::ipa::Algorithm::queueRequest
*/
void ColorProcessing::queueRequest(IPAContext &context,
- [[maybe_unused]] const uint32_t frame,
+ const uint32_t frame,
IPAFrameContext &frameContext,
const ControlList &controls)
{
auto &cproc = context.activeState.cproc;
bool update = false;
+ if (frame == 0)
+ update = true;
+
const auto &brightness = controls.get(controls::Brightness);
if (brightness) {
- int value = std::clamp<int>(std::lround(*brightness * 128), -128, 127);
+ int value = convertBrightness(*brightness);
if (cproc.brightness != value) {
cproc.brightness = value;
update = true;
@@ -57,7 +108,7 @@ void ColorProcessing::queueRequest(IPAContext &context,
const auto &contrast = controls.get(controls::Contrast);
if (contrast) {
- int value = std::clamp<int>(std::lround(*contrast * 128), 0, 255);
+ int value = convertContrastOrSaturation(*contrast);
if (cproc.contrast != value) {
cproc.contrast = value;
update = true;
@@ -68,7 +119,7 @@ void ColorProcessing::queueRequest(IPAContext &context,
const auto saturation = controls.get(controls::Saturation);
if (saturation) {
- int value = std::clamp<int>(std::lround(*saturation * 128), 0, 255);
+ int value = convertContrastOrSaturation(*saturation);
if (cproc.saturation != value) {
cproc.saturation = value;
update = true;
@@ -89,19 +140,17 @@ void ColorProcessing::queueRequest(IPAContext &context,
void ColorProcessing::prepare([[maybe_unused]] IPAContext &context,
[[maybe_unused]] const uint32_t frame,
IPAFrameContext &frameContext,
- rkisp1_params_cfg *params)
+ RkISP1Params *params)
{
/* Check if the algorithm configuration has been updated. */
if (!frameContext.cproc.update)
return;
- params->others.cproc_config.brightness = frameContext.cproc.brightness;
- params->others.cproc_config.contrast = frameContext.cproc.contrast;
- params->others.cproc_config.sat = frameContext.cproc.saturation;
-
- params->module_en_update |= RKISP1_CIF_ISP_MODULE_CPROC;
- params->module_ens |= RKISP1_CIF_ISP_MODULE_CPROC;
- params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_CPROC;
+ auto config = params->block<BlockType::Cproc>();
+ config.setEnabled(true);
+ config->brightness = frameContext.cproc.brightness;
+ config->contrast = frameContext.cproc.contrast;
+ config->sat = frameContext.cproc.saturation;
}
REGISTER_IPA_ALGORITHM(ColorProcessing, "ColorProcessing")
diff --git a/src/ipa/rkisp1/algorithms/cproc.h b/src/ipa/rkisp1/algorithms/cproc.h
index ba6e901a..fd38fd17 100644
--- a/src/ipa/rkisp1/algorithms/cproc.h
+++ b/src/ipa/rkisp1/algorithms/cproc.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * cproc.h - RkISP1 Color Processing control
+ * RkISP1 Color Processing control
*/
#pragma once
@@ -21,12 +21,15 @@ public:
ColorProcessing() = default;
~ColorProcessing() = default;
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context,
+ const IPACameraSensorInfo &configInfo) override;
void queueRequest(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
const ControlList &controls) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
- rkisp1_params_cfg *params) override;
+ RkISP1Params *params) override;
};
} /* namespace ipa::rkisp1::algorithms */
diff --git a/src/ipa/rkisp1/algorithms/dpcc.cpp b/src/ipa/rkisp1/algorithms/dpcc.cpp
index 80a1b734..78946281 100644
--- a/src/ipa/rkisp1/algorithms/dpcc.cpp
+++ b/src/ipa/rkisp1/algorithms/dpcc.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * dpcc.cpp - RkISP1 Defect Pixel Cluster Correction control
+ * RkISP1 Defect Pixel Cluster Correction control
*/
#include "dpcc.h"
@@ -232,16 +232,14 @@ int DefectPixelClusterCorrection::init([[maybe_unused]] IPAContext &context,
void DefectPixelClusterCorrection::prepare([[maybe_unused]] IPAContext &context,
const uint32_t frame,
[[maybe_unused]] IPAFrameContext &frameContext,
- rkisp1_params_cfg *params)
+ RkISP1Params *params)
{
if (frame > 0)
return;
- params->others.dpcc_config = config_;
-
- params->module_en_update |= RKISP1_CIF_ISP_MODULE_DPCC;
- params->module_ens |= RKISP1_CIF_ISP_MODULE_DPCC;
- params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_DPCC;
+ auto config = params->block<BlockType::Dpcc>();
+ config.setEnabled(true);
+ *config = config_;
}
REGISTER_IPA_ALGORITHM(DefectPixelClusterCorrection, "DefectPixelClusterCorrection")
diff --git a/src/ipa/rkisp1/algorithms/dpcc.h b/src/ipa/rkisp1/algorithms/dpcc.h
index b1fac7d1..b77766c3 100644
--- a/src/ipa/rkisp1/algorithms/dpcc.h
+++ b/src/ipa/rkisp1/algorithms/dpcc.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * dpcc.h - RkISP1 Defect Pixel Cluster Correction control
+ * RkISP1 Defect Pixel Cluster Correction control
*/
#pragma once
@@ -22,7 +22,7 @@ public:
int init(IPAContext &context, const YamlObject &tuningData) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
- rkisp1_params_cfg *params) override;
+ RkISP1Params *params) override;
private:
rkisp1_cif_isp_dpcc_config config_;
diff --git a/src/ipa/rkisp1/algorithms/dpf.cpp b/src/ipa/rkisp1/algorithms/dpf.cpp
index 5bd7e59f..cb6095da 100644
--- a/src/ipa/rkisp1/algorithms/dpf.cpp
+++ b/src/ipa/rkisp1/algorithms/dpf.cpp
@@ -2,12 +2,14 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * dpf.cpp - RkISP1 Denoise Pre-Filter control
+ * RkISP1 Denoise Pre-Filter control
*/
#include "dpf.h"
-#include <cmath>
+#include <algorithm>
+#include <string>
+#include <vector>
#include <libcamera/base/log.h>
@@ -215,15 +217,21 @@ void Dpf::queueRequest(IPAContext &context,
* \copydoc libcamera::ipa::Algorithm::prepare
*/
void Dpf::prepare(IPAContext &context, const uint32_t frame,
- IPAFrameContext &frameContext, rkisp1_params_cfg *params)
+ IPAFrameContext &frameContext, RkISP1Params *params)
{
- if (frame == 0) {
- params->others.dpf_config = config_;
- params->others.dpf_strength_config = strengthConfig_;
+ if (!frameContext.dpf.update && frame > 0)
+ return;
+
+ auto config = params->block<BlockType::Dpf>();
+ config.setEnabled(frameContext.dpf.denoise);
+
+ if (frameContext.dpf.denoise) {
+ *config = config_;
const auto &awb = context.configuration.awb;
const auto &lsc = context.configuration.lsc;
- auto &mode = params->others.dpf_config.gain.mode;
+
+ auto &mode = config->gain.mode;
/*
* The DPF needs to take into account the total amount of
@@ -241,15 +249,12 @@ void Dpf::prepare(IPAContext &context, const uint32_t frame,
mode = RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS;
else
mode = RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED;
-
- params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_DPF |
- RKISP1_CIF_ISP_MODULE_DPF_STRENGTH;
}
- if (frameContext.dpf.update) {
- params->module_en_update |= RKISP1_CIF_ISP_MODULE_DPF;
- if (frameContext.dpf.denoise)
- params->module_ens |= RKISP1_CIF_ISP_MODULE_DPF;
+ if (frame == 0) {
+ auto strengthConfig = params->block<BlockType::DpfStrength>();
+ strengthConfig.setEnabled(true);
+ *strengthConfig = strengthConfig_;
}
}
diff --git a/src/ipa/rkisp1/algorithms/dpf.h b/src/ipa/rkisp1/algorithms/dpf.h
index 58f29f74..2dd8cd36 100644
--- a/src/ipa/rkisp1/algorithms/dpf.h
+++ b/src/ipa/rkisp1/algorithms/dpf.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * dpf.h - RkISP1 Denoise Pre-Filter control
+ * RkISP1 Denoise Pre-Filter control
*/
#pragma once
@@ -27,7 +27,7 @@ public:
const ControlList &controls) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
- rkisp1_params_cfg *params) override;
+ RkISP1Params *params) override;
private:
struct rkisp1_cif_isp_dpf_config config_;
diff --git a/src/ipa/rkisp1/algorithms/filter.cpp b/src/ipa/rkisp1/algorithms/filter.cpp
index 4b89c05a..7598ef8a 100644
--- a/src/ipa/rkisp1/algorithms/filter.cpp
+++ b/src/ipa/rkisp1/algorithms/filter.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * filter.cpp - RkISP1 Filter control
+ * RkISP1 Filter control
*/
#include "filter.h"
@@ -104,7 +104,7 @@ void Filter::queueRequest(IPAContext &context,
*/
void Filter::prepare([[maybe_unused]] IPAContext &context,
[[maybe_unused]] const uint32_t frame,
- IPAFrameContext &frameContext, rkisp1_params_cfg *params)
+ IPAFrameContext &frameContext, RkISP1Params *params)
{
/* Check if the algorithm configuration has been updated. */
if (!frameContext.filter.update)
@@ -160,23 +160,25 @@ void Filter::prepare([[maybe_unused]] IPAContext &context,
uint8_t denoise = frameContext.filter.denoise;
uint8_t sharpness = frameContext.filter.sharpness;
- auto &flt_config = params->others.flt_config;
-
- flt_config.fac_sh0 = filt_fac_sh0[sharpness];
- flt_config.fac_sh1 = filt_fac_sh1[sharpness];
- flt_config.fac_mid = filt_fac_mid[sharpness];
- flt_config.fac_bl0 = filt_fac_bl0[sharpness];
- flt_config.fac_bl1 = filt_fac_bl1[sharpness];
-
- flt_config.lum_weight = kFiltLumWeightDefault;
- flt_config.mode = kFiltModeDefault;
- flt_config.thresh_sh0 = filt_thresh_sh0[denoise];
- flt_config.thresh_sh1 = filt_thresh_sh1[denoise];
- flt_config.thresh_bl0 = filt_thresh_bl0[denoise];
- flt_config.thresh_bl1 = filt_thresh_bl1[denoise];
- flt_config.grn_stage1 = stage1_select[denoise];
- flt_config.chr_v_mode = filt_chr_v_mode[denoise];
- flt_config.chr_h_mode = filt_chr_h_mode[denoise];
+
+ auto config = params->block<BlockType::Flt>();
+ config.setEnabled(true);
+
+ config->fac_sh0 = filt_fac_sh0[sharpness];
+ config->fac_sh1 = filt_fac_sh1[sharpness];
+ config->fac_mid = filt_fac_mid[sharpness];
+ config->fac_bl0 = filt_fac_bl0[sharpness];
+ config->fac_bl1 = filt_fac_bl1[sharpness];
+
+ config->lum_weight = kFiltLumWeightDefault;
+ config->mode = kFiltModeDefault;
+ config->thresh_sh0 = filt_thresh_sh0[denoise];
+ config->thresh_sh1 = filt_thresh_sh1[denoise];
+ config->thresh_bl0 = filt_thresh_bl0[denoise];
+ config->thresh_bl1 = filt_thresh_bl1[denoise];
+ config->grn_stage1 = stage1_select[denoise];
+ config->chr_v_mode = filt_chr_v_mode[denoise];
+ config->chr_h_mode = filt_chr_h_mode[denoise];
/*
* Combined high denoising and high sharpening requires some
@@ -186,27 +188,23 @@ void Filter::prepare([[maybe_unused]] IPAContext &context,
*/
if (denoise == 9) {
if (sharpness > 3)
- flt_config.grn_stage1 = 2;
+ config->grn_stage1 = 2;
} else if (denoise == 10) {
if (sharpness > 5)
- flt_config.grn_stage1 = 2;
+ config->grn_stage1 = 2;
else if (sharpness > 3)
- flt_config.grn_stage1 = 1;
+ config->grn_stage1 = 1;
}
if (denoise > 7) {
if (sharpness > 7) {
- flt_config.fac_bl0 /= 2;
- flt_config.fac_bl1 /= 4;
+ config->fac_bl0 /= 2;
+ config->fac_bl1 /= 4;
} else if (sharpness > 4) {
- flt_config.fac_bl0 = flt_config.fac_bl0 * 3 / 4;
- flt_config.fac_bl1 /= 2;
+ config->fac_bl0 = config->fac_bl0 * 3 / 4;
+ config->fac_bl1 /= 2;
}
}
-
- params->module_en_update |= RKISP1_CIF_ISP_MODULE_FLT;
- params->module_ens |= RKISP1_CIF_ISP_MODULE_FLT;
- params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_FLT;
}
REGISTER_IPA_ALGORITHM(Filter, "Filter")
diff --git a/src/ipa/rkisp1/algorithms/filter.h b/src/ipa/rkisp1/algorithms/filter.h
index 3fd882ea..8f858e57 100644
--- a/src/ipa/rkisp1/algorithms/filter.h
+++ b/src/ipa/rkisp1/algorithms/filter.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * filter.h - RkISP1 Filter control
+ * RkISP1 Filter control
*/
#pragma once
@@ -26,7 +26,7 @@ public:
const ControlList &controls) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
- rkisp1_params_cfg *params) override;
+ RkISP1Params *params) override;
};
} /* namespace ipa::rkisp1::algorithms */
diff --git a/src/ipa/rkisp1/algorithms/goc.cpp b/src/ipa/rkisp1/algorithms/goc.cpp
new file mode 100644
index 00000000..a9493678
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/goc.cpp
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * RkISP1 Gamma out control
+ */
+#include "goc.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/control_ids.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "linux/rkisp1-config.h"
+
+/**
+ * \file goc.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class GammaOutCorrection
+ * \brief RkISP1 Gamma out correction
+ *
+ * This algorithm implements the gamma out curve for the RkISP1. It defaults to
+ * a gamma value of 2.2.
+ *
+ * As gamma is internally represented as a piecewise linear function with only
+ * 17 knots, the difference between gamma=2.2 and sRGB gamma is minimal.
+ * Therefore sRGB gamma was not implemented as special case.
+ *
+ * Useful links:
+ * - https://www.cambridgeincolour.com/tutorials/gamma-correction.htm
+ * - https://en.wikipedia.org/wiki/SRGB
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Gamma)
+
+const float kDefaultGamma = 2.2f;
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int GammaOutCorrection::init(IPAContext &context, const YamlObject &tuningData)
+{
+ if (context.hw->numGammaOutSamples !=
+ RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10) {
+ LOG(RkISP1Gamma, Error)
+ << "Gamma is not implemented for RkISP1 V12";
+ return -EINVAL;
+ }
+
+ defaultGamma_ = tuningData["gamma"].get<double>(kDefaultGamma);
+ context.ctrlMap[&controls::Gamma] = ControlInfo(0.1f, 10.0f, defaultGamma_);
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::configure
+ */
+int GammaOutCorrection::configure(IPAContext &context,
+ [[maybe_unused]] const IPACameraSensorInfo &configInfo)
+{
+ context.activeState.goc.gamma = defaultGamma_;
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::queueRequest
+ */
+void GammaOutCorrection::queueRequest(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls)
+{
+ if (frame == 0)
+ frameContext.goc.update = true;
+
+ const auto &gamma = controls.get(controls::Gamma);
+ if (gamma) {
+ context.activeState.goc.gamma = *gamma;
+ frameContext.goc.update = true;
+ LOG(RkISP1Gamma, Debug) << "Set gamma to " << *gamma;
+ }
+
+ frameContext.goc.gamma = context.activeState.goc.gamma;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void GammaOutCorrection::prepare(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ RkISP1Params *params)
+{
+ ASSERT(context.hw->numGammaOutSamples ==
+ RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10);
+
+ if (!frameContext.goc.update)
+ return;
+
+ /*
+ * The logarithmic segments as specified in the reference.
+ * Plus an additional 0 to make the loop easier
+ */
+ static constexpr std::array<unsigned int, RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10> segments = {
+ 64, 64, 64, 64, 128, 128, 128, 128, 256,
+ 256, 256, 512, 512, 512, 512, 512, 0
+ };
+
+ auto config = params->block<BlockType::Goc>();
+ config.setEnabled(true);
+
+ __u16 *gamma_y = config->gamma_y;
+
+ unsigned x = 0;
+ for (const auto [i, size] : utils::enumerate(segments)) {
+ gamma_y[i] = std::pow(x / 4096.0, 1.0 / frameContext.goc.gamma) * 1023.0;
+ x += size;
+ }
+
+ config->mode = RKISP1_CIF_ISP_GOC_MODE_LOGARITHMIC;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::process
+ */
+void GammaOutCorrection::process([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ [[maybe_unused]] const rkisp1_stat_buffer *stats,
+ ControlList &metadata)
+{
+ metadata.set(controls::Gamma, frameContext.goc.gamma);
+}
+
+REGISTER_IPA_ALGORITHM(GammaOutCorrection, "GammaOutCorrection")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/goc.h b/src/ipa/rkisp1/algorithms/goc.h
new file mode 100644
index 00000000..bb2ddfc9
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/goc.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * RkISP1 Gamma out control
+ */
+
+#pragma once
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class GammaOutCorrection : public Algorithm
+{
+public:
+ GammaOutCorrection() = default;
+ ~GammaOutCorrection() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context,
+ const IPACameraSensorInfo &configInfo) override;
+ void queueRequest(IPAContext &context,
+ const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ RkISP1Params *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata) override;
+
+private:
+ float defaultGamma_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/gsl.cpp b/src/ipa/rkisp1/algorithms/gsl.cpp
index b9f87912..9604c0ac 100644
--- a/src/ipa/rkisp1/algorithms/gsl.cpp
+++ b/src/ipa/rkisp1/algorithms/gsl.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * gsl.cpp - RkISP1 Gamma Sensor Linearization control
+ * RkISP1 Gamma Sensor Linearization control
*/
#include "gsl.h"
@@ -119,24 +119,20 @@ int GammaSensorLinearization::init([[maybe_unused]] IPAContext &context,
void GammaSensorLinearization::prepare([[maybe_unused]] IPAContext &context,
const uint32_t frame,
[[maybe_unused]] IPAFrameContext &frameContext,
- rkisp1_params_cfg *params)
+ RkISP1Params *params)
{
if (frame > 0)
return;
- params->others.sdg_config.xa_pnts.gamma_dx0 = gammaDx_[0];
- params->others.sdg_config.xa_pnts.gamma_dx1 = gammaDx_[1];
+ auto config = params->block<BlockType::Sdg>();
+ config.setEnabled(true);
- std::copy(curveYr_.begin(), curveYr_.end(),
- params->others.sdg_config.curve_r.gamma_y);
- std::copy(curveYg_.begin(), curveYg_.end(),
- params->others.sdg_config.curve_g.gamma_y);
- std::copy(curveYb_.begin(), curveYb_.end(),
- params->others.sdg_config.curve_b.gamma_y);
+ config->xa_pnts.gamma_dx0 = gammaDx_[0];
+ config->xa_pnts.gamma_dx1 = gammaDx_[1];
- params->module_en_update |= RKISP1_CIF_ISP_MODULE_SDG;
- params->module_ens |= RKISP1_CIF_ISP_MODULE_SDG;
- params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_SDG;
+ std::copy(curveYr_.begin(), curveYr_.end(), config->curve_r.gamma_y);
+ std::copy(curveYg_.begin(), curveYg_.end(), config->curve_g.gamma_y);
+ std::copy(curveYb_.begin(), curveYb_.end(), config->curve_b.gamma_y);
}
REGISTER_IPA_ALGORITHM(GammaSensorLinearization, "GammaSensorLinearization")
diff --git a/src/ipa/rkisp1/algorithms/gsl.h b/src/ipa/rkisp1/algorithms/gsl.h
index 0f1116a7..91cf6efa 100644
--- a/src/ipa/rkisp1/algorithms/gsl.h
+++ b/src/ipa/rkisp1/algorithms/gsl.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * gsl.h - RkISP1 Gamma Sensor Linearization control
+ * RkISP1 Gamma Sensor Linearization control
*/
#pragma once
@@ -22,7 +22,7 @@ public:
int init(IPAContext &context, const YamlObject &tuningData) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
- rkisp1_params_cfg *params) override;
+ RkISP1Params *params) override;
private:
uint32_t gammaDx_[2];
diff --git a/src/ipa/rkisp1/algorithms/lsc.cpp b/src/ipa/rkisp1/algorithms/lsc.cpp
index a7ccedb1..e47aa2f0 100644
--- a/src/ipa/rkisp1/algorithms/lsc.cpp
+++ b/src/ipa/rkisp1/algorithms/lsc.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * lsc.cpp - RkISP1 Lens Shading Correction control
+ * RkISP1 Lens Shading Correction control
*/
#include "lsc.h"
@@ -16,6 +16,7 @@
#include "libcamera/internal/yaml_parser.h"
+#include "libipa/lsc_polynomial.h"
#include "linux/rkisp1-config.h"
/**
@@ -24,6 +25,36 @@
namespace libcamera {
+namespace ipa {
+
+constexpr int kColourTemperatureChangeThreshhold = 10;
+
+template<typename T>
+void interpolateVector(const std::vector<T> &a, const std::vector<T> &b,
+ std::vector<T> &dest, double lambda)
+{
+ assert(a.size() == b.size());
+ dest.resize(a.size());
+ for (size_t i = 0; i < a.size(); i++) {
+ dest[i] = a[i] * (1.0 - lambda) + b[i] * lambda;
+ }
+}
+
+template<>
+void Interpolator<rkisp1::algorithms::LensShadingCorrection::Components>::
+ interpolate(const rkisp1::algorithms::LensShadingCorrection::Components &a,
+ const rkisp1::algorithms::LensShadingCorrection::Components &b,
+ rkisp1::algorithms::LensShadingCorrection::Components &dest,
+ double lambda)
+{
+ interpolateVector(a.r, b.r, dest.r, lambda);
+ interpolateVector(a.gr, b.gr, dest.gr, lambda);
+ interpolateVector(a.gb, b.gb, dest.gb, lambda);
+ interpolateVector(a.b, b.b, dest.b, lambda);
+}
+
+} /* namespace ipa */
+
namespace ipa::rkisp1::algorithms {
/**
@@ -40,6 +71,200 @@ namespace ipa::rkisp1::algorithms {
LOG_DEFINE_CATEGORY(RkISP1Lsc)
+class LscPolynomialLoader
+{
+public:
+ LscPolynomialLoader(const Size &sensorSize,
+ const Rectangle &cropRectangle,
+ const std::vector<double> &xSizes,
+ const std::vector<double> &ySizes)
+ : sensorSize_(sensorSize),
+ cropRectangle_(cropRectangle),
+ xSizes_(xSizes),
+ ySizes_(ySizes)
+ {
+ }
+
+ int parseLscData(const YamlObject &yamlSets,
+ std::map<unsigned int, LensShadingCorrection::Components> &lscData)
+ {
+ const auto &sets = yamlSets.asList();
+ for (const auto &yamlSet : sets) {
+ std::optional<LscPolynomial> pr, pgr, pgb, pb;
+ uint32_t ct = yamlSet["ct"].get<uint32_t>(0);
+
+ if (lscData.count(ct)) {
+ LOG(RkISP1Lsc, Error)
+ << "Multiple sets found for "
+ << "color temperature " << ct;
+ return -EINVAL;
+ }
+
+ LensShadingCorrection::Components &set = lscData[ct];
+ pr = yamlSet["r"].get<LscPolynomial>();
+ pgr = yamlSet["gr"].get<LscPolynomial>();
+ pgb = yamlSet["gb"].get<LscPolynomial>();
+ pb = yamlSet["b"].get<LscPolynomial>();
+
+ if (!(pr || pgr || pgb || pb)) {
+ LOG(RkISP1Lsc, Error)
+ << "Failed to parse polynomial for "
+ << "colour temperature " << ct;
+ return -EINVAL;
+ }
+
+ set.ct = ct;
+ pr->setReferenceImageSize(sensorSize_);
+ pgr->setReferenceImageSize(sensorSize_);
+ pgb->setReferenceImageSize(sensorSize_);
+ pb->setReferenceImageSize(sensorSize_);
+ set.r = samplePolynomial(*pr);
+ set.gr = samplePolynomial(*pgr);
+ set.gb = samplePolynomial(*pgb);
+ set.b = samplePolynomial(*pb);
+ }
+
+ if (lscData.empty()) {
+ LOG(RkISP1Lsc, Error) << "Failed to load any sets";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+private:
+ /*
+ * The lsc grid has custom spacing defined on half the range (see
+ * parseSizes() for details). For easier handling this function converts
+ * the spaces vector to positions and mirrors them. E.g.:
+ *
+ * input: | 0.2 | 0.3 |
+ * output: 0.0 0.2 0.5 0.8 1.0
+ */
+ std::vector<double> sizesListToPositions(const std::vector<double> &sizes)
+ {
+ const int half = sizes.size();
+ std::vector<double> res(half * 2 + 1);
+ double x = 0.0;
+
+ res[half] = 0.5;
+ for (int i = 1; i <= half; i++) {
+ x += sizes[half - i];
+ res[half - i] = 0.5 - x;
+ res[half + i] = 0.5 + x;
+ }
+
+ return res;
+ }
+
+ std::vector<uint16_t> samplePolynomial(const LscPolynomial &poly)
+ {
+ constexpr int k = RKISP1_CIF_ISP_LSC_SAMPLES_MAX;
+
+ double m = poly.getM();
+ double x0 = cropRectangle_.x / m;
+ double y0 = cropRectangle_.y / m;
+ double w = cropRectangle_.width / m;
+ double h = cropRectangle_.height / m;
+ std::vector<uint16_t> res;
+
+ assert(xSizes_.size() * 2 + 1 == k);
+ assert(ySizes_.size() * 2 + 1 == k);
+
+ res.reserve(k * k);
+
+ std::vector<double> xPos(sizesListToPositions(xSizes_));
+ std::vector<double> yPos(sizesListToPositions(ySizes_));
+
+ for (int y = 0; y < k; y++) {
+ for (int x = 0; x < k; x++) {
+ double xp = x0 + xPos[x] * w;
+ double yp = y0 + yPos[y] * h;
+ /*
+ * The hardware uses 2.10 fixed point format and
+ * limits the legal values to [1..3.999]. Scale
+ * and clamp the sampled value accordingly.
+ */
+ int v = static_cast<int>(
+ poly.sampleAtNormalizedPixelPos(xp, yp) *
+ 1024);
+ v = std::min(std::max(v, 1024), 4095);
+ res.push_back(v);
+ }
+ }
+ return res;
+ }
+
+ Size sensorSize_;
+ Rectangle cropRectangle_;
+ const std::vector<double> &xSizes_;
+ const std::vector<double> &ySizes_;
+};
+
+class LscTableLoader
+{
+public:
+ int parseLscData(const YamlObject &yamlSets,
+ std::map<unsigned int, LensShadingCorrection::Components> &lscData)
+ {
+ const auto &sets = yamlSets.asList();
+
+ for (const auto &yamlSet : sets) {
+ uint32_t ct = yamlSet["ct"].get<uint32_t>(0);
+
+ if (lscData.count(ct)) {
+ LOG(RkISP1Lsc, Error)
+ << "Multiple sets found for color temperature "
+ << ct;
+ return -EINVAL;
+ }
+
+ LensShadingCorrection::Components &set = lscData[ct];
+
+ set.ct = ct;
+ set.r = parseTable(yamlSet, "r");
+ set.gr = parseTable(yamlSet, "gr");
+ set.gb = parseTable(yamlSet, "gb");
+ set.b = parseTable(yamlSet, "b");
+
+ if (set.r.empty() || set.gr.empty() ||
+ set.gb.empty() || set.b.empty()) {
+ LOG(RkISP1Lsc, Error)
+ << "Set for color temperature " << ct
+ << " is missing tables";
+ return -EINVAL;
+ }
+ }
+
+ if (lscData.empty()) {
+ LOG(RkISP1Lsc, Error) << "Failed to load any sets";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+private:
+ std::vector<uint16_t> parseTable(const YamlObject &tuningData,
+ const char *prop)
+ {
+ static constexpr unsigned int kLscNumSamples =
+ RKISP1_CIF_ISP_LSC_SAMPLES_MAX * RKISP1_CIF_ISP_LSC_SAMPLES_MAX;
+
+ std::vector<uint16_t> table =
+ tuningData[prop].getList<uint16_t>().value_or(std::vector<uint16_t>{});
+ if (table.size() != kLscNumSamples) {
+ LOG(RkISP1Lsc, Error)
+ << "Invalid '" << prop << "' values: expected "
+ << kLscNumSamples
+ << " elements, got " << table.size();
+ return {};
+ }
+
+ return table;
+ }
+};
+
static std::vector<double> parseSizes(const YamlObject &tuningData,
const char *prop)
{
@@ -70,28 +295,10 @@ static std::vector<double> parseSizes(const YamlObject &tuningData,
return sizes;
}
-static std::vector<uint16_t> parseTable(const YamlObject &tuningData,
- const char *prop)
-{
- static constexpr unsigned int kLscNumSamples =
- RKISP1_CIF_ISP_LSC_SAMPLES_MAX * RKISP1_CIF_ISP_LSC_SAMPLES_MAX;
-
- std::vector<uint16_t> table =
- tuningData[prop].getList<uint16_t>().value_or(std::vector<uint16_t>{});
- if (table.size() != kLscNumSamples) {
- LOG(RkISP1Lsc, Error)
- << "Invalid '" << prop << "' values: expected "
- << kLscNumSamples
- << " elements, got " << table.size();
- return {};
- }
-
- return table;
-}
-
LensShadingCorrection::LensShadingCorrection()
- : lastCt_({ 0, 0 })
+ : lastAppliedCt_(0), lastAppliedQuantizedCt_(0)
{
+ sets_.setQuantization(kColourTemperatureChangeThreshhold);
}
/**
@@ -114,38 +321,30 @@ int LensShadingCorrection::init([[maybe_unused]] IPAContext &context,
return -EINVAL;
}
- const auto &sets = yamlSets.asList();
- for (const auto &yamlSet : sets) {
- uint32_t ct = yamlSet["ct"].get<uint32_t>(0);
-
- if (sets_.count(ct)) {
- LOG(RkISP1Lsc, Error)
- << "Multiple sets found for color temperature "
- << ct;
- return -EINVAL;
- }
-
- Components &set = sets_[ct];
-
- set.ct = ct;
- set.r = parseTable(yamlSet, "r");
- set.gr = parseTable(yamlSet, "gr");
- set.gb = parseTable(yamlSet, "gb");
- set.b = parseTable(yamlSet, "b");
-
- if (set.r.empty() || set.gr.empty() ||
- set.gb.empty() || set.b.empty()) {
- LOG(RkISP1Lsc, Error)
- << "Set for color temperature " << ct
- << " is missing tables";
- return -EINVAL;
- }
+ std::map<unsigned int, Components> lscData;
+ int res = 0;
+ std::string type = tuningData["type"].get<std::string>("table");
+ if (type == "table") {
+ LOG(RkISP1Lsc, Debug) << "Loading tabular LSC data.";
+ auto loader = LscTableLoader();
+ res = loader.parseLscData(yamlSets, lscData);
+ } else if (type == "polynomial") {
+ LOG(RkISP1Lsc, Debug) << "Loading polynomial LSC data.";
+ auto loader = LscPolynomialLoader(context.sensorInfo.activeAreaSize,
+ context.sensorInfo.analogCrop,
+ xSize_,
+ ySize_);
+ res = loader.parseLscData(yamlSets, lscData);
+ } else {
+ LOG(RkISP1Lsc, Error) << "Unsupported LSC data type '"
+ << type << "'";
+ res = -EINVAL;
}
- if (sets_.empty()) {
- LOG(RkISP1Lsc, Error) << "Failed to load any sets";
- return -EINVAL;
- }
+ if (res)
+ return res;
+
+ sets_.setData(std::move(lscData));
return 0;
}
@@ -185,18 +384,12 @@ int LensShadingCorrection::configure(IPAContext &context,
return 0;
}
-void LensShadingCorrection::setParameters(rkisp1_params_cfg *params)
+void LensShadingCorrection::setParameters(rkisp1_cif_isp_lsc_config &config)
{
- struct rkisp1_cif_isp_lsc_config &config = params->others.lsc_config;
-
memcpy(config.x_grad_tbl, xGrad_, sizeof(config.x_grad_tbl));
memcpy(config.y_grad_tbl, yGrad_, sizeof(config.y_grad_tbl));
memcpy(config.x_size_tbl, xSizes_, sizeof(config.x_size_tbl));
memcpy(config.y_size_tbl, ySizes_, sizeof(config.y_size_tbl));
-
- params->module_en_update |= RKISP1_CIF_ISP_MODULE_LSC;
- params->module_ens |= RKISP1_CIF_ISP_MODULE_LSC;
- params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_LSC;
}
void LensShadingCorrection::copyTable(rkisp1_cif_isp_lsc_config &config,
@@ -208,131 +401,34 @@ void LensShadingCorrection::copyTable(rkisp1_cif_isp_lsc_config &config,
std::copy(set.b.begin(), set.b.end(), &config.b_data_tbl[0][0]);
}
-/*
- * Interpolate LSC parameters based on color temperature value.
- */
-void LensShadingCorrection::interpolateTable(rkisp1_cif_isp_lsc_config &config,
- const Components &set0,
- const Components &set1,
- const uint32_t ct)
-{
- double coeff0 = (set1.ct - ct) / static_cast<double>(set1.ct - set0.ct);
- double coeff1 = (ct - set0.ct) / static_cast<double>(set1.ct - set0.ct);
-
- for (unsigned int i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; ++i) {
- for (unsigned int j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; ++j) {
- unsigned int sample = i * RKISP1_CIF_ISP_LSC_SAMPLES_MAX + j;
-
- config.r_data_tbl[i][j] =
- set0.r[sample] * coeff0 +
- set1.r[sample] * coeff1;
-
- config.gr_data_tbl[i][j] =
- set0.gr[sample] * coeff0 +
- set1.gr[sample] * coeff1;
-
- config.gb_data_tbl[i][j] =
- set0.gb[sample] * coeff0 +
- set1.gb[sample] * coeff1;
-
- config.b_data_tbl[i][j] =
- set0.b[sample] * coeff0 +
- set1.b[sample] * coeff1;
- }
- }
-}
-
/**
* \copydoc libcamera::ipa::Algorithm::prepare
*/
void LensShadingCorrection::prepare(IPAContext &context,
- const uint32_t frame,
+ [[maybe_unused]] const uint32_t frame,
[[maybe_unused]] IPAFrameContext &frameContext,
- rkisp1_params_cfg *params)
+ RkISP1Params *params)
{
- struct rkisp1_cif_isp_lsc_config &config = params->others.lsc_config;
-
- /*
- * If there is only one set, the configuration has already been done
- * for first frame.
- */
- if (sets_.size() == 1 && frame > 0)
- return;
-
- /*
- * If there is only one set, pick it. We can ignore lastCt_, as it will
- * never be relevant.
- */
- if (sets_.size() == 1) {
- setParameters(params);
- copyTable(config, sets_.cbegin()->second);
- return;
- }
-
uint32_t ct = context.activeState.awb.temperatureK;
- ct = std::clamp(ct, sets_.cbegin()->first, sets_.crbegin()->first);
-
- /*
- * If the original is the same, then it means the same adjustment would
- * be made. If the adjusted is the same, then it means that it's the
- * same as what was actually applied. Thus in these cases we can skip
- * reprogramming the LSC.
- *
- * original == adjusted can only happen if an interpolation
- * happened, or if original has an exact entry in sets_. This means
- * that if original != adjusted, then original was adjusted to
- * the nearest available entry in sets_, resulting in adjusted.
- * Clearly, any ct value that is in between original and adjusted
- * will be adjusted to the same adjusted value, so we can skip
- * reprogramming the LSC table.
- *
- * We also skip updating the original value, as the last one had a
- * larger bound and thus a larger range of ct values that will be
- * adjusted to the same adjusted.
- */
- if ((lastCt_.original <= ct && ct <= lastCt_.adjusted) ||
- (lastCt_.adjusted <= ct && ct <= lastCt_.original))
+ if (std::abs(static_cast<int>(ct) - static_cast<int>(lastAppliedCt_)) <
+ kColourTemperatureChangeThreshhold)
return;
-
- setParameters(params);
-
- /*
- * The color temperature matches exactly one of the available LSC tables.
- */
- if (sets_.count(ct)) {
- copyTable(config, sets_[ct]);
- lastCt_ = { ct, ct };
+ unsigned int quantizedCt;
+ const Components &set = sets_.getInterpolated(ct, &quantizedCt);
+ if (lastAppliedQuantizedCt_ == quantizedCt)
return;
- }
- /* No shortcuts left; we need to round or interpolate */
- auto iter = sets_.upper_bound(ct);
- const Components &set1 = iter->second;
- const Components &set0 = (--iter)->second;
- uint32_t ct0 = set0.ct;
- uint32_t ct1 = set1.ct;
- uint32_t diff0 = ct - ct0;
- uint32_t diff1 = ct1 - ct;
- static constexpr double kThreshold = 0.1;
- float threshold = kThreshold * (ct1 - ct0);
-
- if (diff0 < threshold || diff1 < threshold) {
- const Components &set = diff0 < diff1 ? set0 : set1;
- LOG(RkISP1Lsc, Debug) << "using LSC table for " << set.ct;
- copyTable(config, set);
- lastCt_ = { ct, set.ct };
- return;
- }
+ auto config = params->block<BlockType::Lsc>();
+ config.setEnabled(true);
+ setParameters(*config);
+ copyTable(*config, set);
+
+ lastAppliedCt_ = ct;
+ lastAppliedQuantizedCt_ = quantizedCt;
- /*
- * ct is not within 10% of the difference between the neighbouring
- * color temperatures, so we need to interpolate.
- */
LOG(RkISP1Lsc, Debug)
- << "ct is " << ct << ", interpolating between "
- << ct0 << " and " << ct1;
- interpolateTable(config, set0, set1, ct);
- lastCt_ = { ct, ct };
+ << "ct is " << ct << ", quantized to "
+ << quantizedCt;
}
REGISTER_IPA_ALGORITHM(LensShadingCorrection, "LensShadingCorrection")
diff --git a/src/ipa/rkisp1/algorithms/lsc.h b/src/ipa/rkisp1/algorithms/lsc.h
index e2a93a56..5a0824e3 100644
--- a/src/ipa/rkisp1/algorithms/lsc.h
+++ b/src/ipa/rkisp1/algorithms/lsc.h
@@ -2,13 +2,15 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * lsc.h - RkISP1 Lens Shading Correction control
+ * RkISP1 Lens Shading Correction control
*/
#pragma once
#include <map>
+#include "libipa/interpolator.h"
+
#include "algorithm.h"
namespace libcamera {
@@ -25,9 +27,8 @@ public:
int configure(IPAContext &context, const IPACameraSensorInfo &configInfo) override;
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
- rkisp1_params_cfg *params) override;
+ RkISP1Params *params) override;
-private:
struct Components {
uint32_t ct;
std::vector<uint16_t> r;
@@ -36,23 +37,23 @@ private:
std::vector<uint16_t> b;
};
- void setParameters(rkisp1_params_cfg *params);
+private:
+ void setParameters(rkisp1_cif_isp_lsc_config &config);
void copyTable(rkisp1_cif_isp_lsc_config &config, const Components &set0);
void interpolateTable(rkisp1_cif_isp_lsc_config &config,
const Components &set0, const Components &set1,
const uint32_t ct);
- std::map<uint32_t, Components> sets_;
+ ipa::Interpolator<Components> sets_;
std::vector<double> xSize_;
std::vector<double> ySize_;
uint16_t xGrad_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
uint16_t yGrad_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
uint16_t xSizes_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
uint16_t ySizes_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
- struct {
- uint32_t original;
- uint32_t adjusted;
- } lastCt_;
+
+ unsigned int lastAppliedCt_;
+ unsigned int lastAppliedQuantizedCt_;
};
} /* namespace ipa::rkisp1::algorithms */
diff --git a/src/ipa/rkisp1/algorithms/lux.cpp b/src/ipa/rkisp1/algorithms/lux.cpp
new file mode 100644
index 00000000..a467767e
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/lux.cpp
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * lux.cpp - RkISP1 Lux control
+ */
+
+#include "lux.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+#include "libipa/histogram.h"
+#include "libipa/lux.h"
+
+/**
+ * \file lux.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class Lux
+ * \brief RkISP1 Lux control
+ *
+ * The Lux algorithm is responsible for estimating the lux level of the image.
+ * It doesn't take or generate any controls, but it provides a lux level for
+ * other algorithms (such as AGC) to use.
+ */
+
+/**
+ * \brief Construct an rkisp1 Lux algo module
+ */
+Lux::Lux()
+{
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int Lux::init([[maybe_unused]] IPAContext &context, const YamlObject &tuningData)
+{
+ return lux_.parseTuningData(tuningData);
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::process
+ */
+void Lux::process(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata)
+{
+ utils::Duration exposureTime = context.configuration.sensor.lineDuration
+ * frameContext.sensor.exposure;
+ double gain = frameContext.sensor.gain;
+
+ /* \todo Deduplicate the histogram calculation from AGC */
+ const rkisp1_cif_isp_stat *params = &stats->params;
+ Histogram yHist({ params->hist.hist_bins, context.hw->numHistogramBins },
+ [](uint32_t x) { return x >> 4; });
+
+ double lux = lux_.estimateLux(exposureTime, gain, 1.0, yHist);
+ frameContext.lux.lux = lux;
+ metadata.set(controls::Lux, lux);
+}
+
+REGISTER_IPA_ALGORITHM(Lux, "Lux")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/lux.h b/src/ipa/rkisp1/algorithms/lux.h
new file mode 100644
index 00000000..8a90de55
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/lux.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * lux.h - RkISP1 Lux control
+ */
+
+#pragma once
+
+#include <sys/types.h>
+
+#include "libipa/lux.h"
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class Lux : public Algorithm
+{
+public:
+ Lux();
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata) override;
+
+private:
+ ipa::Lux lux_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/meson.build b/src/ipa/rkisp1/algorithms/meson.build
index 93a48329..c66b0b70 100644
--- a/src/ipa/rkisp1/algorithms/meson.build
+++ b/src/ipa/rkisp1/algorithms/meson.build
@@ -4,10 +4,13 @@ rkisp1_ipa_algorithms = files([
'agc.cpp',
'awb.cpp',
'blc.cpp',
+ 'ccm.cpp',
'cproc.cpp',
'dpcc.cpp',
'dpf.cpp',
'filter.cpp',
+ 'goc.cpp',
'gsl.cpp',
'lsc.cpp',
+ 'lux.cpp',
])
diff --git a/src/ipa/rkisp1/data/imx219.yaml b/src/ipa/rkisp1/data/imx219.yaml
index cbcc43b8..0d99cb52 100644
--- a/src/ipa/rkisp1/data/imx219.yaml
+++ b/src/ipa/rkisp1/data/imx219.yaml
@@ -6,10 +6,6 @@ algorithms:
- Agc:
- Awb:
- BlackLevelCorrection:
- R: 256
- Gr: 256
- Gb: 256
- B: 256
- LensShadingCorrection:
x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
diff --git a/src/ipa/rkisp1/data/imx258.yaml b/src/ipa/rkisp1/data/imx258.yaml
index 43dddf20..202af36a 100644
--- a/src/ipa/rkisp1/data/imx258.yaml
+++ b/src/ipa/rkisp1/data/imx258.yaml
@@ -5,6 +5,7 @@ version: 1
algorithms:
- Agc:
- Awb:
+ - BlackLevelCorrection:
- LensShadingCorrection:
x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
diff --git a/src/ipa/rkisp1/data/meson.build b/src/ipa/rkisp1/data/meson.build
index 7150e155..1e3522b2 100644
--- a/src/ipa/rkisp1/data/meson.build
+++ b/src/ipa/rkisp1/data/meson.build
@@ -2,8 +2,12 @@
conf_files = files([
'imx219.yaml',
+ 'imx258.yaml',
+ 'ov2685.yaml',
'ov4689.yaml',
'ov5640.yaml',
+ 'ov5695.yaml',
+ 'ov8858.yaml',
'uncalibrated.yaml',
])
diff --git a/src/ipa/rkisp1/data/ov4689.yaml b/src/ipa/rkisp1/data/ov4689.yaml
index 2068684c..60901296 100644
--- a/src/ipa/rkisp1/data/ov4689.yaml
+++ b/src/ipa/rkisp1/data/ov4689.yaml
@@ -6,8 +6,4 @@ algorithms:
- Agc:
- Awb:
- BlackLevelCorrection:
- R: 66
- Gr: 66
- Gb: 66
- B: 66
...
diff --git a/src/ipa/rkisp1/data/ov5640.yaml b/src/ipa/rkisp1/data/ov5640.yaml
index 897b83cb..4b21d412 100644
--- a/src/ipa/rkisp1/data/ov5640.yaml
+++ b/src/ipa/rkisp1/data/ov5640.yaml
@@ -6,10 +6,6 @@ algorithms:
- Agc:
- Awb:
- BlackLevelCorrection:
- R: 256
- Gr: 256
- Gb: 256
- B: 256
- ColorProcessing:
- GammaSensorLinearization:
x-intervals: [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 ]
diff --git a/src/ipa/rkisp1/data/uncalibrated.yaml b/src/ipa/rkisp1/data/uncalibrated.yaml
index a7bbd8d8..60901296 100644
--- a/src/ipa/rkisp1/data/uncalibrated.yaml
+++ b/src/ipa/rkisp1/data/uncalibrated.yaml
@@ -5,4 +5,5 @@ version: 1
algorithms:
- Agc:
- Awb:
+ - BlackLevelCorrection:
...
diff --git a/src/ipa/rkisp1/ipa_context.cpp b/src/ipa/rkisp1/ipa_context.cpp
index 070834fa..99611bd5 100644
--- a/src/ipa/rkisp1/ipa_context.cpp
+++ b/src/ipa/rkisp1/ipa_context.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * ipa_context.cpp - RkISP1 IPA Context
+ * RkISP1 IPA Context
*/
#include "ipa_context.h"
@@ -78,11 +78,11 @@ namespace libcamera::ipa::rkisp1 {
* \var IPASessionConfiguration::sensor
* \brief Sensor-specific configuration of the IPA
*
- * \var IPASessionConfiguration::sensor.minShutterSpeed
- * \brief Minimum shutter speed supported with the sensor
+ * \var IPASessionConfiguration::sensor.minExposureTime
+ * \brief Minimum exposure time supported with the sensor
*
- * \var IPASessionConfiguration::sensor.maxShutterSpeed
- * \brief Maximum shutter speed supported with the sensor
+ * \var IPASessionConfiguration::sensor.maxExposureTime
+ * \brief Maximum exposure time supported with the sensor
*
* \var IPASessionConfiguration::sensor.minAnalogueGain
* \brief Minimum analogue gain supported with the sensor
@@ -106,6 +106,11 @@ namespace libcamera::ipa::rkisp1 {
*/
/**
+ * \var IPASessionConfiguration::paramFormat
+ * \brief The fourcc of the parameters buffers format
+ */
+
+/**
* \struct IPAActiveState
* \brief Active state for algorithms
*
@@ -137,13 +142,49 @@ namespace libcamera::ipa::rkisp1 {
* \var IPAActiveState::agc
* \brief State for the Automatic Gain Control algorithm
*
- * The exposure and gain are the latest values computed by the AGC algorithm.
+ * The \a automatic variables track the latest values computed by algorithm
+ * based on the latest processed statistics. All other variables track the
+ * consolidated controls requested in queued requests.
*
- * \var IPAActiveState::agc.exposure
- * \brief Exposure time expressed as a number of lines
+ * \struct IPAActiveState::agc.manual
+ * \brief Manual exposure time and analog gain (set through requests)
*
- * \var IPAActiveState::agc.gain
- * \brief Analogue gain multiplier
+ * \var IPAActiveState::agc.manual.exposure
+ * \brief Manual exposure time expressed as a number of lines as set by the
+ * ExposureTime control
+ *
+ * \var IPAActiveState::agc.manual.gain
+ * \brief Manual analogue gain as set by the AnalogueGain control
+ *
+ * \struct IPAActiveState::agc.automatic
+ * \brief Automatic exposure time and analog gain (computed by the algorithm)
+ *
+ * \var IPAActiveState::agc.automatic.exposure
+ * \brief Automatic exposure time expressed as a number of lines
+ *
+ * \var IPAActiveState::agc.automatic.gain
+ * \brief Automatic analogue gain multiplier
+ *
+ * \var IPAActiveState::agc.autoExposureEnabled
+ * \brief Manual/automatic AGC state (exposure) as set by the ExposureTimeMode control
+ *
+ * \var IPAActiveState::agc.autoGainEnabled
+ * \brief Manual/automatic AGC state (gain) as set by the AnalogueGainMode control
+ *
+ * \var IPAActiveState::agc.constraintMode
+ * \brief Constraint mode as set by the AeConstraintMode control
+ *
+ * \var IPAActiveState::agc.exposureMode
+ * \brief Exposure mode as set by the AeExposureMode control
+ *
+ * \var IPAActiveState::agc.meteringMode
+ * \brief Metering mode as set by the AeMeteringMode control
+ *
+ * \var IPAActiveState::agc.minFrameDuration
+ * \brief Minimum frame duration as set by the FrameDurationLimits control
+ *
+ * \var IPAActiveState::agc.maxFrameDuration
+ * \brief Maximum frame duration as set by the FrameDurationLimits control
*/
/**
@@ -153,30 +194,12 @@ namespace libcamera::ipa::rkisp1 {
* \struct IPAActiveState::awb.gains
* \brief White balance gains
*
- * \struct IPAActiveState::awb.gains.manual
+ * \var IPAActiveState::awb.gains.manual
* \brief Manual white balance gains (set through requests)
*
- * \var IPAActiveState::awb.gains.manual.red
- * \brief Manual white balance gain for R channel
- *
- * \var IPAActiveState::awb.gains.manual.green
- * \brief Manual white balance gain for G channel
- *
- * \var IPAActiveState::awb.gains.manual.blue
- * \brief Manual white balance gain for B channel
- *
- * \struct IPAActiveState::awb.gains.automatic
+ * \var IPAActiveState::awb.gains.automatic
* \brief Automatic white balance gains (computed by the algorithm)
*
- * \var IPAActiveState::awb.gains.automatic.red
- * \brief Automatic white balance gain for R channel
- *
- * \var IPAActiveState::awb.gains.automatic.green
- * \brief Automatic white balance gain for G channel
- *
- * \var IPAActiveState::awb.gains.automatic.blue
- * \brief Automatic white balance gain for B channel
- *
* \var IPAActiveState::awb.temperatureK
* \brief Estimated color temperature
*
@@ -218,6 +241,14 @@ namespace libcamera::ipa::rkisp1 {
*/
/**
+ * \var IPAActiveState::goc
+ * \brief State for the goc algorithm
+ *
+ * \var IPAActiveState::goc.gamma
+ * \brief Gamma value applied as 1.0/gamma
+ */
+
+/**
* \struct IPAFrameContext
* \brief Per-frame context for algorithms
*
@@ -254,15 +285,57 @@ namespace libcamera::ipa::rkisp1 {
* \brief Automatic Gain Control parameters for this frame
*
* The exposure and gain are provided by the AGC algorithm, and are to be
- * applied to the sensor in order to take effect for this frame.
+ * applied to the sensor in order to take effect for this frame. Additionally
+ * the vertical blanking period is determined to maintain a consistent frame
+ * rate matched to the FrameDurationLimits as set by the user.
*
* \var IPAFrameContext::agc.exposure
- * \brief Exposure time expressed as a number of lines
+ * \brief Exposure time expressed as a number of lines computed by the algorithm
*
* \var IPAFrameContext::agc.gain
- * \brief Analogue gain multiplier
+ * \brief Analogue gain multiplier computed by the algorithm
*
* The gain should be adapted to the sensor specific gain code before applying.
+ *
+ * \var IPAFrameContext::agc.vblank
+ * \brief Vertical blanking parameter computed by the algorithm
+ *
+ * \var IPAFrameContext::agc.autoExposureEnabled
+ * \brief Manual/automatic AGC state (exposure) as set by the ExposureTimeMode control
+ *
+ * \var IPAFrameContext::agc.autoGainEnabled
+ * \brief Manual/automatic AGC state (gain) as set by the AnalogueGainMode control
+ *
+ * \var IPAFrameContext::agc.constraintMode
+ * \brief Constraint mode as set by the AeConstraintMode control
+ *
+ * \var IPAFrameContext::agc.exposureMode
+ * \brief Exposure mode as set by the AeExposureMode control
+ *
+ * \var IPAFrameContext::agc.meteringMode
+ * \brief Metering mode as set by the AeMeteringMode control
+ *
+ * \var IPAFrameContext::agc.minFrameDuration
+ * \brief Minimum frame duration as set by the FrameDurationLimits control
+ *
+ * \var IPAFrameContext::agc.maxFrameDuration
+ * \brief Maximum frame duration as set by the FrameDurationLimits control
+ *
+ * \var IPAFrameContext::agc.frameDuration
+ * \brief The actual FrameDuration used by the algorithm for the frame
+ *
+ * \var IPAFrameContext::agc.updateMetering
+ * \brief Indicate if new ISP AGC metering parameters need to be applied
+ *
+ * \var IPAFrameContext::agc.autoExposureModeChange
+ * \brief Indicate if autoExposureEnabled has changed from true in the previous
+ * frame to false in the current frame, and no manual exposure value has been
+ * supplied in the current frame.
+ *
+ * \var IPAFrameContext::agc.autoGainModeChange
+ * \brief Indicate if autoGainEnabled has changed from true in the previous
+ * frame to false in the current frame, and no manual gain value has been
+ * supplied in the current frame.
*/
/**
@@ -272,15 +345,6 @@ namespace libcamera::ipa::rkisp1 {
* \struct IPAFrameContext::awb.gains
* \brief White balance gains
*
- * \var IPAFrameContext::awb.gains.red
- * \brief White balance gain for R channel
- *
- * \var IPAFrameContext::awb.gains.green
- * \brief White balance gain for G channel
- *
- * \var IPAFrameContext::awb.gains.blue
- * \brief White balance gain for B channel
- *
* \var IPAFrameContext::awb.temperatureK
* \brief Estimated color temperature
*
@@ -334,6 +398,18 @@ namespace libcamera::ipa::rkisp1 {
*/
/**
+ * \var IPAFrameContext::goc
+ * \brief Gamma out correction parameters for this frame
+ *
+ * \var IPAFrameContext::goc.gamma
+ * \brief Gamma value applied as 1.0/gamma
+ *
+ * \var IPAFrameContext::goc.update
+ * \brief Indicates if the goc parameters have been updated compared to the
+ * previous frame
+ */
+
+/**
* \var IPAFrameContext::sensor
* \brief Sensor configuration that used been used for this frame
*
@@ -351,6 +427,9 @@ namespace libcamera::ipa::rkisp1 {
* \var IPAContext::hw
* \brief RkISP1 version-specific hardware parameters
*
+ * \var IPAContext::sensorInfo
+ * \brief The IPA session sensorInfo, immutable during the session
+ *
* \var IPAContext::configuration
* \brief The IPA session configuration, immutable during the session
*
diff --git a/src/ipa/rkisp1/ipa_context.h b/src/ipa/rkisp1/ipa_context.h
index 10d8f38c..474f7036 100644
--- a/src/ipa/rkisp1/ipa_context.h
+++ b/src/ipa/rkisp1/ipa_context.h
@@ -2,18 +2,29 @@
/*
* Copyright (C) 2021-2022, Ideas On Board
*
- * ipa_context.h - RkISP1 IPA Context
+ * RkISP1 IPA Context
*
*/
#pragma once
+#include <memory>
+
#include <linux/rkisp1-config.h>
#include <libcamera/base/utils.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
#include <libcamera/geometry.h>
+#include <libcamera/ipa/core_ipa_interface.h>
+
+#include "libcamera/internal/debug_controls.h"
+#include "libcamera/internal/matrix.h"
+#include "libcamera/internal/vector.h"
+
+#include <libipa/camera_sensor_helper.h>
#include <libipa/fc_queue.h>
namespace libcamera {
@@ -25,6 +36,7 @@ struct IPAHwSettings {
unsigned int numHistogramBins;
unsigned int numHistogramWeights;
unsigned int numGammaOutSamples;
+ bool compand;
};
struct IPASessionConfiguration {
@@ -42,8 +54,8 @@ struct IPASessionConfiguration {
} lsc;
struct {
- utils::Duration minShutterSpeed;
- utils::Duration maxShutterSpeed;
+ utils::Duration minExposureTime;
+ utils::Duration maxExposureTime;
double minAnalogueGain;
double maxAnalogueGain;
@@ -53,6 +65,7 @@ struct IPASessionConfiguration {
} sensor;
bool raw;
+ uint32_t paramFormat;
};
struct IPAActiveState {
@@ -66,21 +79,19 @@ struct IPAActiveState {
double gain;
} automatic;
- bool autoEnabled;
+ bool autoExposureEnabled;
+ bool autoGainEnabled;
+ controls::AeConstraintModeEnum constraintMode;
+ controls::AeExposureModeEnum exposureMode;
+ controls::AeMeteringModeEnum meteringMode;
+ utils::Duration minFrameDuration;
+ utils::Duration maxFrameDuration;
} agc;
struct {
struct {
- struct {
- double red;
- double green;
- double blue;
- } manual;
- struct {
- double red;
- double green;
- double blue;
- } automatic;
+ RGB<double> manual;
+ RGB<double> automatic;
} gains;
unsigned int temperatureK;
@@ -88,6 +99,10 @@ struct IPAActiveState {
} awb;
struct {
+ Matrix<float, 3, 3> ccm;
+ } ccm;
+
+ struct {
int8_t brightness;
uint8_t contrast;
uint8_t saturation;
@@ -101,24 +116,34 @@ struct IPAActiveState {
uint8_t denoise;
uint8_t sharpness;
} filter;
+
+ struct {
+ double gamma;
+ } goc;
};
struct IPAFrameContext : public FrameContext {
struct {
uint32_t exposure;
double gain;
- bool autoEnabled;
+ uint32_t vblank;
+ bool autoExposureEnabled;
+ bool autoGainEnabled;
+ controls::AeConstraintModeEnum constraintMode;
+ controls::AeExposureModeEnum exposureMode;
+ controls::AeMeteringModeEnum meteringMode;
+ utils::Duration minFrameDuration;
+ utils::Duration maxFrameDuration;
+ utils::Duration frameDuration;
+ bool updateMetering;
+ bool autoExposureModeChange;
+ bool autoGainModeChange;
} agc;
struct {
- struct {
- double red;
- double green;
- double blue;
- } gains;
-
- unsigned int temperatureK;
+ RGB<double> gains;
bool autoEnabled;
+ unsigned int temperatureK;
} awb;
struct {
@@ -140,17 +165,43 @@ struct IPAFrameContext : public FrameContext {
} filter;
struct {
+ double gamma;
+ bool update;
+ } goc;
+
+ struct {
uint32_t exposure;
double gain;
} sensor;
+
+ struct {
+ Matrix<float, 3, 3> ccm;
+ } ccm;
+
+ struct {
+ double lux;
+ } lux;
};
struct IPAContext {
+ IPAContext(unsigned int frameContextSize)
+ : hw(nullptr), frameContexts(frameContextSize)
+ {
+ }
+
const IPAHwSettings *hw;
+ IPACameraSensorInfo sensorInfo;
IPASessionConfiguration configuration;
IPAActiveState activeState;
FCQueue<IPAFrameContext> frameContexts;
+
+ ControlInfoMap::Map ctrlMap;
+
+ DebugMetadata debugMetadata;
+
+ /* Interface to the Camera Helper */
+ std::unique_ptr<CameraSensorHelper> camHelper;
};
} /* namespace ipa::rkisp1 */
diff --git a/src/ipa/rkisp1/meson.build b/src/ipa/rkisp1/meson.build
index e813da53..26a9fa40 100644
--- a/src/ipa/rkisp1/meson.build
+++ b/src/ipa/rkisp1/meson.build
@@ -7,17 +7,16 @@ ipa_name = 'ipa_rkisp1'
rkisp1_ipa_sources = files([
'ipa_context.cpp',
+ 'params.cpp',
'rkisp1.cpp',
])
rkisp1_ipa_sources += rkisp1_ipa_algorithms
-mod = shared_module(ipa_name,
- [rkisp1_ipa_sources, libcamera_generated_ipa_headers],
+mod = shared_module(ipa_name, rkisp1_ipa_sources,
name_prefix : '',
- include_directories : [ipa_includes, libipa_includes],
- dependencies : libcamera_private,
- link_with : libipa,
+ include_directories : [ipa_includes],
+ dependencies : [libcamera_private, libipa_dep],
install : true,
install_dir : ipa_install_dir)
diff --git a/src/ipa/rkisp1/module.h b/src/ipa/rkisp1/module.h
index 89f83208..69e9bc82 100644
--- a/src/ipa/rkisp1/module.h
+++ b/src/ipa/rkisp1/module.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas On Board
*
- * module.h - RkISP1 IPA Module
+ * RkISP1 IPA Module
*/
#pragma once
@@ -14,13 +14,14 @@
#include <libipa/module.h>
#include "ipa_context.h"
+#include "params.h"
namespace libcamera {
namespace ipa::rkisp1 {
using Module = ipa::Module<IPAContext, IPAFrameContext, IPACameraSensorInfo,
- rkisp1_params_cfg, rkisp1_stat_buffer>;
+ RkISP1Params, rkisp1_stat_buffer>;
} /* namespace ipa::rkisp1 */
diff --git a/src/ipa/rkisp1/params.cpp b/src/ipa/rkisp1/params.cpp
new file mode 100644
index 00000000..4c0b051c
--- /dev/null
+++ b/src/ipa/rkisp1/params.cpp
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * RkISP1 ISP Parameters
+ */
+
+#include "params.h"
+
+#include <map>
+#include <stddef.h>
+#include <string.h>
+
+#include <linux/rkisp1-config.h>
+#include <linux/videodev2.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(RkISP1Params)
+
+namespace ipa::rkisp1 {
+
+namespace {
+
+struct BlockTypeInfo {
+ enum rkisp1_ext_params_block_type type;
+ size_t size;
+ size_t offset;
+ uint32_t enableBit;
+};
+
+#define RKISP1_BLOCK_TYPE_ENTRY(block, id, type, category, bit) \
+ { BlockType::block, { \
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_##id, \
+ sizeof(struct rkisp1_cif_isp_##type##_config), \
+ offsetof(struct rkisp1_params_cfg, category.type##_config), \
+ RKISP1_CIF_ISP_MODULE_##bit, \
+ } }
+
+#define RKISP1_BLOCK_TYPE_ENTRY_MEAS(block, id, type) \
+ RKISP1_BLOCK_TYPE_ENTRY(block, id##_MEAS, type, meas, id)
+
+#define RKISP1_BLOCK_TYPE_ENTRY_OTHERS(block, id, type) \
+ RKISP1_BLOCK_TYPE_ENTRY(block, id, type, others, id)
+
+#define RKISP1_BLOCK_TYPE_ENTRY_EXT(block, id, type) \
+ { BlockType::block, { \
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_##id, \
+ sizeof(struct rkisp1_cif_isp_##type##_config), \
+ 0, 0, \
+ } }
+
+const std::map<BlockType, BlockTypeInfo> kBlockTypeInfo = {
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Bls, BLS, bls),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Dpcc, DPCC, dpcc),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Sdg, SDG, sdg),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(AwbGain, AWB_GAIN, awb_gain),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Flt, FLT, flt),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Bdm, BDM, bdm),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Ctk, CTK, ctk),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Goc, GOC, goc),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Dpf, DPF, dpf),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(DpfStrength, DPF_STRENGTH, dpf_strength),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Cproc, CPROC, cproc),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Ie, IE, ie),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Lsc, LSC, lsc),
+ RKISP1_BLOCK_TYPE_ENTRY_MEAS(Awb, AWB, awb_meas),
+ RKISP1_BLOCK_TYPE_ENTRY_MEAS(Hst, HST, hst),
+ RKISP1_BLOCK_TYPE_ENTRY_MEAS(Aec, AEC, aec),
+ RKISP1_BLOCK_TYPE_ENTRY_MEAS(Afc, AFC, afc),
+ RKISP1_BLOCK_TYPE_ENTRY_EXT(CompandBls, COMPAND_BLS, compand_bls),
+ RKISP1_BLOCK_TYPE_ENTRY_EXT(CompandExpand, COMPAND_EXPAND, compand_curve),
+ RKISP1_BLOCK_TYPE_ENTRY_EXT(CompandCompress, COMPAND_COMPRESS, compand_curve),
+};
+
+} /* namespace */
+
+RkISP1ParamsBlockBase::RkISP1ParamsBlockBase(RkISP1Params *params, BlockType type,
+ const Span<uint8_t> &data)
+ : params_(params), type_(type)
+{
+ if (params_->format() == V4L2_META_FMT_RK_ISP1_EXT_PARAMS) {
+ header_ = data.subspan(0, sizeof(rkisp1_ext_params_block_header));
+ data_ = data.subspan(sizeof(rkisp1_ext_params_block_header));
+ } else {
+ data_ = data;
+ }
+}
+
+void RkISP1ParamsBlockBase::setEnabled(bool enabled)
+{
+ /*
+ * For the legacy fixed format, blocks are enabled in the top-level
+ * header. Delegate to the RkISP1Params class.
+ */
+ if (params_->format() == V4L2_META_FMT_RK_ISP1_PARAMS)
+ return params_->setBlockEnabled(type_, enabled);
+
+ /*
+ * For the extensible format, set the enable and disable flags in the
+ * block header directly.
+ */
+ struct rkisp1_ext_params_block_header *header =
+ reinterpret_cast<struct rkisp1_ext_params_block_header *>(header_.data());
+ header->flags &= ~(RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE |
+ RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE);
+ header->flags |= enabled ? RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE
+ : RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE;
+}
+
+RkISP1Params::RkISP1Params(uint32_t format, Span<uint8_t> data)
+ : format_(format), data_(data), used_(0)
+{
+ if (format_ == V4L2_META_FMT_RK_ISP1_EXT_PARAMS) {
+ struct rkisp1_ext_params_cfg *cfg =
+ reinterpret_cast<struct rkisp1_ext_params_cfg *>(data.data());
+
+ cfg->version = RKISP1_EXT_PARAM_BUFFER_V1;
+ cfg->data_size = 0;
+
+ used_ += offsetof(struct rkisp1_ext_params_cfg, data);
+ } else {
+ memset(data.data(), 0, data.size());
+ used_ = sizeof(struct rkisp1_params_cfg);
+ }
+}
+
+void RkISP1Params::setBlockEnabled(BlockType type, bool enabled)
+{
+ const BlockTypeInfo &info = kBlockTypeInfo.at(type);
+
+ struct rkisp1_params_cfg *cfg =
+ reinterpret_cast<struct rkisp1_params_cfg *>(data_.data());
+ if (enabled)
+ cfg->module_ens |= info.enableBit;
+ else
+ cfg->module_ens &= ~info.enableBit;
+}
+
+Span<uint8_t> RkISP1Params::block(BlockType type)
+{
+ auto infoIt = kBlockTypeInfo.find(type);
+ if (infoIt == kBlockTypeInfo.end()) {
+ LOG(RkISP1Params, Error)
+ << "Invalid parameters block type "
+ << utils::to_underlying(type);
+ return {};
+ }
+
+ const BlockTypeInfo &info = infoIt->second;
+
+ /*
+ * For the legacy format, return a block referencing the fixed location
+ * of the data.
+ */
+ if (format_ == V4L2_META_FMT_RK_ISP1_PARAMS) {
+ /*
+ * Blocks available only in extended parameters have an offset
+ * of 0. Return nullptr in that case.
+ */
+ if (info.offset == 0) {
+ LOG(RkISP1Params, Error)
+ << "Block type " << utils::to_underlying(type)
+ << " unavailable in fixed parameters format";
+ return {};
+ }
+
+ struct rkisp1_params_cfg *cfg =
+ reinterpret_cast<struct rkisp1_params_cfg *>(data_.data());
+
+ cfg->module_cfg_update |= info.enableBit;
+ cfg->module_en_update |= info.enableBit;
+
+ return data_.subspan(info.offset, info.size);
+ }
+
+ /*
+ * For the extensible format, allocate memory for the block, including
+ * the header. Look up the block in the cache first. If an algorithm
+ * requests the same block type twice, it should get the same block.
+ */
+ auto cacheIt = blocks_.find(type);
+ if (cacheIt != blocks_.end())
+ return cacheIt->second;
+
+ /* Make sure we don't run out of space. */
+ size_t size = sizeof(struct rkisp1_ext_params_block_header)
+ + ((info.size + 7) & ~7);
+ if (size > data_.size() - used_) {
+ LOG(RkISP1Params, Error)
+ << "Out of memory to allocate block type "
+ << utils::to_underlying(type);
+ return {};
+ }
+
+ /* Allocate a new block, clear its memory, and initialize its header. */
+ Span<uint8_t> block = data_.subspan(used_, size);
+ used_ += size;
+
+ struct rkisp1_ext_params_cfg *cfg =
+ reinterpret_cast<struct rkisp1_ext_params_cfg *>(data_.data());
+ cfg->data_size += size;
+
+ memset(block.data(), 0, block.size());
+
+ struct rkisp1_ext_params_block_header *header =
+ reinterpret_cast<struct rkisp1_ext_params_block_header *>(block.data());
+ header->type = info.type;
+ header->size = block.size();
+
+ /* Update the cache. */
+ blocks_[type] = block;
+
+ return block;
+}
+
+} /* namespace ipa::rkisp1 */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/params.h b/src/ipa/rkisp1/params.h
new file mode 100644
index 00000000..40450e34
--- /dev/null
+++ b/src/ipa/rkisp1/params.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * RkISP1 ISP Parameters
+ */
+
+#pragma once
+
+#include <map>
+#include <stdint.h>
+
+#include <linux/rkisp1-config.h>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/span.h>
+
+namespace libcamera {
+
+namespace ipa::rkisp1 {
+
+enum class BlockType {
+ Bls,
+ Dpcc,
+ Sdg,
+ AwbGain,
+ Flt,
+ Bdm,
+ Ctk,
+ Goc,
+ Dpf,
+ DpfStrength,
+ Cproc,
+ Ie,
+ Lsc,
+ Awb,
+ Hst,
+ Aec,
+ Afc,
+ CompandBls,
+ CompandExpand,
+ CompandCompress,
+};
+
+namespace details {
+
+template<BlockType B>
+struct block_type {
+};
+
+#define RKISP1_DEFINE_BLOCK_TYPE(blockType, blockStruct) \
+template<> \
+struct block_type<BlockType::blockType> { \
+ using type = struct rkisp1_cif_isp_##blockStruct##_config; \
+};
+
+RKISP1_DEFINE_BLOCK_TYPE(Bls, bls)
+RKISP1_DEFINE_BLOCK_TYPE(Dpcc, dpcc)
+RKISP1_DEFINE_BLOCK_TYPE(Sdg, sdg)
+RKISP1_DEFINE_BLOCK_TYPE(AwbGain, awb_gain)
+RKISP1_DEFINE_BLOCK_TYPE(Flt, flt)
+RKISP1_DEFINE_BLOCK_TYPE(Bdm, bdm)
+RKISP1_DEFINE_BLOCK_TYPE(Ctk, ctk)
+RKISP1_DEFINE_BLOCK_TYPE(Goc, goc)
+RKISP1_DEFINE_BLOCK_TYPE(Dpf, dpf)
+RKISP1_DEFINE_BLOCK_TYPE(DpfStrength, dpf_strength)
+RKISP1_DEFINE_BLOCK_TYPE(Cproc, cproc)
+RKISP1_DEFINE_BLOCK_TYPE(Ie, ie)
+RKISP1_DEFINE_BLOCK_TYPE(Lsc, lsc)
+RKISP1_DEFINE_BLOCK_TYPE(Awb, awb_meas)
+RKISP1_DEFINE_BLOCK_TYPE(Hst, hst)
+RKISP1_DEFINE_BLOCK_TYPE(Aec, aec)
+RKISP1_DEFINE_BLOCK_TYPE(Afc, afc)
+RKISP1_DEFINE_BLOCK_TYPE(CompandBls, compand_bls)
+RKISP1_DEFINE_BLOCK_TYPE(CompandExpand, compand_curve)
+RKISP1_DEFINE_BLOCK_TYPE(CompandCompress, compand_curve)
+
+} /* namespace details */
+
+class RkISP1Params;
+
+class RkISP1ParamsBlockBase
+{
+public:
+ RkISP1ParamsBlockBase(RkISP1Params *params, BlockType type,
+ const Span<uint8_t> &data);
+
+ Span<uint8_t> data() const { return data_; }
+
+ void setEnabled(bool enabled);
+
+private:
+ LIBCAMERA_DISABLE_COPY(RkISP1ParamsBlockBase)
+
+ RkISP1Params *params_;
+ BlockType type_;
+ Span<uint8_t> header_;
+ Span<uint8_t> data_;
+};
+
+template<BlockType B>
+class RkISP1ParamsBlock : public RkISP1ParamsBlockBase
+{
+public:
+ using Type = typename details::block_type<B>::type;
+
+ RkISP1ParamsBlock(RkISP1Params *params, const Span<uint8_t> &data)
+ : RkISP1ParamsBlockBase(params, B, data)
+ {
+ }
+
+ const Type *operator->() const
+ {
+ return reinterpret_cast<const Type *>(data().data());
+ }
+
+ Type *operator->()
+ {
+ return reinterpret_cast<Type *>(data().data());
+ }
+
+ const Type &operator*() const &
+ {
+ return *reinterpret_cast<const Type *>(data().data());
+ }
+
+ Type &operator*() &
+ {
+ return *reinterpret_cast<Type *>(data().data());
+ }
+};
+
+class RkISP1Params
+{
+public:
+ RkISP1Params(uint32_t format, Span<uint8_t> data);
+
+ template<BlockType B>
+ RkISP1ParamsBlock<B> block()
+ {
+ return RkISP1ParamsBlock<B>(this, block(B));
+ }
+
+ uint32_t format() const { return format_; }
+ size_t size() const { return used_; }
+
+private:
+ friend class RkISP1ParamsBlockBase;
+
+ Span<uint8_t> block(BlockType type);
+ void setBlockEnabled(BlockType type, bool enabled);
+
+ uint32_t format_;
+
+ Span<uint8_t> data_;
+ size_t used_;
+
+ std::map<BlockType, Span<uint8_t>> blocks_;
+};
+
+} /* namespace ipa::rkisp1 */
+
+} /* namespace libcamera*/
diff --git a/src/ipa/rkisp1/rkisp1.cpp b/src/ipa/rkisp1/rkisp1.cpp
index 9dc5f53c..7547d2f2 100644
--- a/src/ipa/rkisp1/rkisp1.cpp
+++ b/src/ipa/rkisp1/rkisp1.cpp
@@ -2,12 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * rkisp1.cpp - RkISP1 Image Processing Algorithms
+ * RkISP1 Image Processing Algorithms
*/
#include <algorithm>
-#include <math.h>
-#include <queue>
+#include <array>
+#include <chrono>
#include <stdint.h>
#include <string.h>
@@ -18,20 +18,22 @@
#include <libcamera/base/log.h>
#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
#include <libcamera/framebuffer.h>
+#include <libcamera/request.h>
+
#include <libcamera/ipa/ipa_interface.h>
#include <libcamera/ipa/ipa_module_info.h>
#include <libcamera/ipa/rkisp1_ipa_interface.h>
-#include <libcamera/request.h>
#include "libcamera/internal/formats.h"
#include "libcamera/internal/mapped_framebuffer.h"
#include "libcamera/internal/yaml_parser.h"
#include "algorithms/algorithm.h"
-#include "libipa/camera_sensor_helper.h"
#include "ipa_context.h"
+#include "params.h"
namespace libcamera {
@@ -63,9 +65,9 @@ public:
void unmapBuffers(const std::vector<unsigned int> &ids) override;
void queueRequest(const uint32_t frame, const ControlList &controls) override;
- void fillParamsBuffer(const uint32_t frame, const uint32_t bufferId) override;
- void processStatsBuffer(const uint32_t frame, const uint32_t bufferId,
- const ControlList &sensorControls) override;
+ void computeParams(const uint32_t frame, const uint32_t bufferId) override;
+ void processStats(const uint32_t frame, const uint32_t bufferId,
+ const ControlList &sensorControls) override;
protected:
std::string logPrefix() const override;
@@ -81,9 +83,6 @@ private:
ControlInfoMap sensorControls_;
- /* Interface to the Camera Helper */
- std::unique_ptr<CameraSensorHelper> camHelper_;
-
/* Local parameter storage */
struct IPAContext context_;
};
@@ -95,6 +94,15 @@ const IPAHwSettings ipaHwSettingsV10{
RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10,
RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10,
RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10,
+ false,
+};
+
+const IPAHwSettings ipaHwSettingsIMX8MP{
+ RKISP1_CIF_ISP_AE_MEAN_MAX_V10,
+ RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10,
+ RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10,
+ RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10,
+ true,
};
const IPAHwSettings ipaHwSettingsV12{
@@ -102,16 +110,14 @@ const IPAHwSettings ipaHwSettingsV12{
RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12,
RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12,
RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12,
+ false,
};
/* List of controls handled by the RkISP1 IPA */
const ControlInfoMap::Map rkisp1Controls{
- { &controls::AeEnable, ControlInfo(false, true) },
{ &controls::AwbEnable, ControlInfo(false, true) },
{ &controls::ColourGains, ControlInfo(0.0f, 3.996f, 1.0f) },
- { &controls::Brightness, ControlInfo(-1.0f, 0.993f, 0.0f) },
- { &controls::Contrast, ControlInfo(0.0f, 1.993f, 1.0f) },
- { &controls::Saturation, ControlInfo(0.0f, 1.993f, 1.0f) },
+ { &controls::DebugMetadataEnable, ControlInfo(false, true, false) },
{ &controls::Sharpness, ControlInfo(0.0f, 10.0f, 1.0f) },
{ &controls::draft::NoiseReductionMode, ControlInfo(controls::draft::NoiseReductionModeValues) },
};
@@ -119,7 +125,7 @@ const ControlInfoMap::Map rkisp1Controls{
} /* namespace */
IPARkISP1::IPARkISP1()
- : context_({ {}, {}, {}, { kMaxFrameContexts } })
+ : context_(kMaxFrameContexts)
{
}
@@ -136,9 +142,11 @@ int IPARkISP1::init(const IPASettings &settings, unsigned int hwRevision,
/* \todo Add support for other revisions */
switch (hwRevision) {
case RKISP1_V10:
- case RKISP1_V_IMX8MP:
context_.hw = &ipaHwSettingsV10;
break;
+ case RKISP1_V_IMX8MP:
+ context_.hw = &ipaHwSettingsIMX8MP;
+ break;
case RKISP1_V12:
context_.hw = &ipaHwSettingsV12;
break;
@@ -151,16 +159,18 @@ int IPARkISP1::init(const IPASettings &settings, unsigned int hwRevision,
LOG(IPARkISP1, Debug) << "Hardware revision is " << hwRevision;
- camHelper_ = CameraSensorHelperFactoryBase::create(settings.sensorModel);
- if (!camHelper_) {
+ context_.sensorInfo = sensorInfo;
+
+ context_.camHelper = CameraSensorHelperFactoryBase::create(settings.sensorModel);
+ if (!context_.camHelper) {
LOG(IPARkISP1, Error)
<< "Failed to create camera sensor helper for "
<< settings.sensorModel;
return -ENODEV;
}
- context_.configuration.sensor.lineDuration = sensorInfo.minLineLength
- * 1.0s / sensorInfo.pixelRate;
+ context_.configuration.sensor.lineDuration =
+ sensorInfo.minLineLength * 1.0s / sensorInfo.pixelRate;
/* Load the tuning data file. */
File file(settings.configurationFile);
@@ -234,6 +244,8 @@ int IPARkISP1::configure(const IPAConfigInfo &ipaConfig,
context_.activeState = {};
context_.frameContexts.clear();
+ context_.configuration.paramFormat = ipaConfig.paramFormat;
+
const IPACameraSensorInfo &info = ipaConfig.sensorInfo;
const ControlInfo vBlank = sensorControls_.find(V4L2_CID_VBLANK)->second;
context_.configuration.sensor.defVBlank = vBlank.def().get<int32_t>();
@@ -245,17 +257,19 @@ int IPARkISP1::configure(const IPAConfigInfo &ipaConfig,
/*
* When the AGC computes the new exposure values for a frame, it needs
- * to know the limits for shutter speed and analogue gain.
- * As it depends on the sensor, update it with the controls.
+ * to know the limits for exposure time and analogue gain. As it depends
+ * on the sensor, update it with the controls.
*
- * \todo take VBLANK into account for maximum shutter speed
+ * \todo take VBLANK into account for maximum exposure time
*/
- context_.configuration.sensor.minShutterSpeed =
+ context_.configuration.sensor.minExposureTime =
minExposure * context_.configuration.sensor.lineDuration;
- context_.configuration.sensor.maxShutterSpeed =
+ context_.configuration.sensor.maxExposureTime =
maxExposure * context_.configuration.sensor.lineDuration;
- context_.configuration.sensor.minAnalogueGain = camHelper_->gain(minGain);
- context_.configuration.sensor.maxAnalogueGain = camHelper_->gain(maxGain);
+ context_.configuration.sensor.minAnalogueGain =
+ context_.camHelper->gain(minGain);
+ context_.configuration.sensor.maxAnalogueGain =
+ context_.camHelper->gain(maxGain);
context_.configuration.raw = std::any_of(streamConfig.begin(), streamConfig.end(),
[](auto &cfg) -> bool {
@@ -313,6 +327,7 @@ void IPARkISP1::unmapBuffers(const std::vector<unsigned int> &ids)
void IPARkISP1::queueRequest(const uint32_t frame, const ControlList &controls)
{
IPAFrameContext &frameContext = context_.frameContexts.alloc(frame);
+ context_.debugMetadata.enableByControl(controls);
for (auto const &a : algorithms()) {
Algorithm *algo = static_cast<Algorithm *>(a.get());
@@ -322,25 +337,21 @@ void IPARkISP1::queueRequest(const uint32_t frame, const ControlList &controls)
}
}
-void IPARkISP1::fillParamsBuffer(const uint32_t frame, const uint32_t bufferId)
+void IPARkISP1::computeParams(const uint32_t frame, const uint32_t bufferId)
{
IPAFrameContext &frameContext = context_.frameContexts.get(frame);
- rkisp1_params_cfg *params =
- reinterpret_cast<rkisp1_params_cfg *>(
- mappedBuffers_.at(bufferId).planes()[0].data());
-
- /* Prepare parameters buffer. */
- memset(params, 0, sizeof(*params));
+ RkISP1Params params(context_.configuration.paramFormat,
+ mappedBuffers_.at(bufferId).planes()[0]);
for (auto const &algo : algorithms())
- algo->prepare(context_, frame, frameContext, params);
+ algo->prepare(context_, frame, frameContext, &params);
- paramsBufferReady.emit(frame);
+ paramsComputed.emit(frame, params.size());
}
-void IPARkISP1::processStatsBuffer(const uint32_t frame, const uint32_t bufferId,
- const ControlList &sensorControls)
+void IPARkISP1::processStats(const uint32_t frame, const uint32_t bufferId,
+ const ControlList &sensorControls)
{
IPAFrameContext &frameContext = context_.frameContexts.get(frame);
@@ -356,7 +367,7 @@ void IPARkISP1::processStatsBuffer(const uint32_t frame, const uint32_t bufferId
frameContext.sensor.exposure =
sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
frameContext.sensor.gain =
- camHelper_->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>());
+ context_.camHelper->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>());
ControlList metadata(controls::controls);
@@ -369,6 +380,7 @@ void IPARkISP1::processStatsBuffer(const uint32_t frame, const uint32_t bufferId
setControls(frame);
+ context_.debugMetadata.moveEntries(metadata);
metadataReady.emit(frame, metadata);
}
@@ -393,9 +405,9 @@ void IPARkISP1::updateControls(const IPACameraSensorInfo &sensorInfo,
/* Compute the analogue gain limits. */
const ControlInfo &v4l2Gain = sensorControls.find(V4L2_CID_ANALOGUE_GAIN)->second;
- float minGain = camHelper_->gain(v4l2Gain.min().get<int32_t>());
- float maxGain = camHelper_->gain(v4l2Gain.max().get<int32_t>());
- float defGain = camHelper_->gain(v4l2Gain.def().get<int32_t>());
+ float minGain = context_.camHelper->gain(v4l2Gain.min().get<int32_t>());
+ float maxGain = context_.camHelper->gain(v4l2Gain.max().get<int32_t>());
+ float defGain = context_.camHelper->gain(v4l2Gain.def().get<int32_t>());
ctrlMap.emplace(std::piecewise_construct,
std::forward_as_tuple(&controls::AnalogueGain),
std::forward_as_tuple(minGain, maxGain, defGain));
@@ -423,10 +435,11 @@ void IPARkISP1::updateControls(const IPACameraSensorInfo &sensorInfo,
frameDurations[i] = frameSize / (sensorInfo.pixelRate / 1000000U);
}
- ctrlMap[&controls::FrameDurationLimits] = ControlInfo(frameDurations[0],
- frameDurations[1],
- frameDurations[2]);
+ /* \todo Move this (and other agc-related controls) to agc */
+ context_.ctrlMap[&controls::FrameDurationLimits] =
+ ControlInfo(frameDurations[0], frameDurations[1], frameDurations[2]);
+ ctrlMap.insert(context_.ctrlMap.begin(), context_.ctrlMap.end());
*ipaControls = ControlInfoMap(std::move(ctrlMap), controls::controls);
}
@@ -439,11 +452,13 @@ void IPARkISP1::setControls(unsigned int frame)
IPAFrameContext &frameContext = context_.frameContexts.get(frame);
uint32_t exposure = frameContext.agc.exposure;
- uint32_t gain = camHelper_->gainCode(frameContext.agc.gain);
+ uint32_t gain = context_.camHelper->gainCode(frameContext.agc.gain);
+ uint32_t vblank = frameContext.agc.vblank;
ControlList ctrls(sensorControls_);
ctrls.set(V4L2_CID_EXPOSURE, static_cast<int32_t>(exposure));
ctrls.set(V4L2_CID_ANALOGUE_GAIN, static_cast<int32_t>(gain));
+ ctrls.set(V4L2_CID_VBLANK, static_cast<int32_t>(vblank));
setSensorControls.emit(frame, ctrls);
}
@@ -458,7 +473,7 @@ extern "C" {
const struct IPAModuleInfo ipaModuleInfo = {
IPA_MODULE_API_VERSION,
1,
- "PipelineHandlerRkISP1",
+ "rkisp1",
"rkisp1",
};
diff --git a/src/ipa/rpi/cam_helper/cam_helper.cpp b/src/ipa/rpi/cam_helper/cam_helper.cpp
index ddd5e9a4..a78db9c1 100644
--- a/src/ipa/rpi/cam_helper/cam_helper.cpp
+++ b/src/ipa/rpi/cam_helper/cam_helper.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * cam_helper.cpp - helper information for different sensors
+ * helper information for different sensors
*/
#include <linux/videodev2.h>
@@ -156,17 +156,9 @@ void CamHelper::setCameraMode(const CameraMode &mode)
}
}
-void CamHelper::getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const
+void CamHelper::setHwConfig(const Controller::HardwareConfig &hwConfig)
{
- /*
- * These values are correct for many sensors. Other sensors will
- * need to over-ride this function.
- */
- exposureDelay = 2;
- gainDelay = 1;
- vblankDelay = 2;
- hblankDelay = 2;
+ hwConfig_ = hwConfig;
}
bool CamHelper::sensorEmbeddedDataPresent() const
@@ -241,7 +233,7 @@ void CamHelper::parseEmbeddedData(Span<const uint8_t> buffer,
return;
}
- deviceStatus.shutterSpeed = parsedDeviceStatus.shutterSpeed;
+ deviceStatus.exposureTime = parsedDeviceStatus.exposureTime;
deviceStatus.analogueGain = parsedDeviceStatus.analogueGain;
deviceStatus.frameLength = parsedDeviceStatus.frameLength;
deviceStatus.lineLength = parsedDeviceStatus.lineLength;
diff --git a/src/ipa/rpi/cam_helper/cam_helper.h b/src/ipa/rpi/cam_helper/cam_helper.h
index 58a4b202..4a826690 100644
--- a/src/ipa/rpi/cam_helper/cam_helper.h
+++ b/src/ipa/rpi/cam_helper/cam_helper.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * cam_helper.h - helper class providing camera information
+ * helper class providing camera information
*/
#pragma once
@@ -36,11 +36,6 @@ namespace RPiController {
* exposure time, and to convert between the sensor's gain codes and actual
* gains.
*
- * A function to return the number of frames of delay between updating exposure,
- * analogue gain and vblanking, and for the changes to take effect. For many
- * sensors these take the values 2, 1 and 2 respectively, but sensors that are
- * different will need to over-ride the default function provided.
- *
* A function to query if the sensor outputs embedded data that can be parsed.
*
* A function to return the sensitivity of a given camera mode.
@@ -76,6 +71,7 @@ public:
CamHelper(std::unique_ptr<MdParser> parser, unsigned int frameIntegrationDiff);
virtual ~CamHelper();
void setCameraMode(const CameraMode &mode);
+ void setHwConfig(const Controller::HardwareConfig &hwConfig);
virtual void prepare(libcamera::Span<const uint8_t> buffer,
Metadata &metadata);
virtual void process(StatisticsPtr &stats, Metadata &metadata);
@@ -91,8 +87,6 @@ public:
libcamera::utils::Duration lineLengthPckToDuration(uint32_t lineLengthPck) const;
virtual uint32_t gainCode(double gain) const = 0;
virtual double gain(uint32_t gainCode) const = 0;
- virtual void getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const;
virtual bool sensorEmbeddedDataPresent() const;
virtual double getModeSensitivity(const CameraMode &mode) const;
virtual unsigned int hideFramesStartup() const;
@@ -108,6 +102,7 @@ protected:
std::unique_ptr<MdParser> parser_;
CameraMode mode_;
+ Controller::HardwareConfig hwConfig_;
private:
/*
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx219.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx219.cpp
index c3337ed0..ba01153e 100644
--- a/src/ipa/rpi/cam_helper/cam_helper_imx219.cpp
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx219.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * cam_helper_imx219.cpp - camera helper for imx219 sensor
+ * camera helper for imx219 sensor
*/
#include <assert.h>
@@ -99,7 +99,7 @@ void CamHelperImx219::populateMetadata(const MdParser::RegisterMap &registers,
deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 +
registers.at(lineLengthLoReg));
- deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
+ deviceStatus.exposureTime = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
deviceStatus.lineLength);
deviceStatus.analogueGain = gain(registers.at(gainReg));
deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx283.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx283.cpp
new file mode 100644
index 00000000..efc03193
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx283.cpp
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2024, Raspberry Pi Ltd
+ *
+ * cam_helper_Imx283.cpp - camera information for Imx283 sensor
+ */
+
+#include <assert.h>
+
+#include "cam_helper.h"
+
+using namespace RPiController;
+
+class CamHelperImx283 : public CamHelper
+{
+public:
+ CamHelperImx283();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ unsigned int hideFramesModeSwitch() const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 4;
+};
+
+/*
+ * Imx283 doesn't output metadata, so we have to use the delayed controls which
+ * works by counting frames.
+ */
+
+CamHelperImx283::CamHelperImx283()
+ : CamHelper({}, frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperImx283::gainCode(double gain) const
+{
+ return static_cast<uint32_t>(2048.0 - 2048.0 / gain);
+}
+
+double CamHelperImx283::gain(uint32_t gainCode) const
+{
+ return static_cast<double>(2048.0 / (2048 - gainCode));
+}
+
+unsigned int CamHelperImx283::hideFramesModeSwitch() const
+{
+ /* After a mode switch, we seem to get 1 bad frame. */
+ return 1;
+}
+
+static CamHelper *create()
+{
+ return new CamHelperImx283();
+}
+
+static RegisterCamHelper reg("imx283", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx290.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx290.cpp
index d98b51cd..c1aa8528 100644
--- a/src/ipa/rpi/cam_helper/cam_helper_imx290.cpp
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx290.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2021, Raspberry Pi Ltd
*
- * cam_helper_imx290.cpp - camera helper for imx290 sensor
+ * camera helper for imx290 sensor
*/
-#include <math.h>
+#include <cmath>
#include "cam_helper.h"
@@ -17,8 +17,6 @@ public:
CamHelperImx290();
uint32_t gainCode(double gain) const override;
double gain(uint32_t gainCode) const override;
- void getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const override;
unsigned int hideFramesStartup() const override;
unsigned int hideFramesModeSwitch() const override;
@@ -37,22 +35,13 @@ CamHelperImx290::CamHelperImx290()
uint32_t CamHelperImx290::gainCode(double gain) const
{
- int code = 66.6667 * log10(gain);
+ int code = 66.6667 * std::log10(gain);
return std::max(0, std::min(code, 0xf0));
}
double CamHelperImx290::gain(uint32_t gainCode) const
{
- return pow(10, 0.015 * gainCode);
-}
-
-void CamHelperImx290::getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const
-{
- exposureDelay = 2;
- gainDelay = 2;
- vblankDelay = 2;
- hblankDelay = 2;
+ return std::pow(10, 0.015 * gainCode);
}
unsigned int CamHelperImx290::hideFramesStartup() const
@@ -73,3 +62,5 @@ static CamHelper *create()
}
static RegisterCamHelper reg("imx290", &create);
+static RegisterCamHelper reg327("imx327", &create);
+static RegisterCamHelper reg462("imx462", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx296.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx296.cpp
index ecb845e7..ac7ee2ea 100644
--- a/src/ipa/rpi/cam_helper/cam_helper_imx296.cpp
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx296.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * cam_helper_imx296.cpp - Camera helper for IMX296 sensor
+ * Camera helper for IMX296 sensor
*/
#include <algorithm>
@@ -23,8 +23,6 @@ public:
double gain(uint32_t gainCode) const override;
uint32_t exposureLines(const Duration exposure, const Duration lineLength) const override;
Duration exposure(uint32_t exposureLines, const Duration lineLength) const override;
- void getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const override;
private:
static constexpr uint32_t minExposureLines = 1;
@@ -66,15 +64,6 @@ Duration CamHelperImx296::exposure(uint32_t exposureLines,
return std::max<uint32_t>(minExposureLines, exposureLines) * timePerLine + 14.26us;
}
-void CamHelperImx296::getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const
-{
- exposureDelay = 2;
- gainDelay = 2;
- vblankDelay = 2;
- hblankDelay = 2;
-}
-
static CamHelper *create()
{
return new CamHelperImx296();
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx415.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx415.cpp
new file mode 100644
index 00000000..c0a09eee
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx415.cpp
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2025, Raspberry Pi Ltd
+ *
+ * camera helper for imx415 sensor
+ */
+
+#include <cmath>
+
+#include "cam_helper.h"
+
+using namespace RPiController;
+
+class CamHelperImx415 : public CamHelper
+{
+public:
+ CamHelperImx415();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ unsigned int hideFramesStartup() const override;
+ unsigned int hideFramesModeSwitch() const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 8;
+};
+
+CamHelperImx415::CamHelperImx415()
+ : CamHelper({}, frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperImx415::gainCode(double gain) const
+{
+ int code = 66.6667 * std::log10(gain);
+ return std::max(0, std::min(code, 0xf0));
+}
+
+double CamHelperImx415::gain(uint32_t gainCode) const
+{
+ return std::pow(10, 0.015 * gainCode);
+}
+
+unsigned int CamHelperImx415::hideFramesStartup() const
+{
+ /* On startup, we seem to get 1 bad frame. */
+ return 1;
+}
+
+unsigned int CamHelperImx415::hideFramesModeSwitch() const
+{
+ /* After a mode switch, we seem to get 1 bad frame. */
+ return 1;
+}
+
+static CamHelper *create()
+{
+ return new CamHelperImx415();
+}
+
+static RegisterCamHelper reg("imx415", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx477.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx477.cpp
index bc769ca7..a72ac67d 100644
--- a/src/ipa/rpi/cam_helper/cam_helper_imx477.cpp
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx477.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * cam_helper_imx477.cpp - camera helper for imx477 sensor
+ * camera helper for imx477 sensor
*/
#include <algorithm>
@@ -51,8 +51,6 @@ public:
void prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
std::pair<uint32_t, uint32_t> getBlanking(Duration &exposure, Duration minFrameDuration,
Duration maxFrameDuration) const override;
- void getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const override;
bool sensorEmbeddedDataPresent() const override;
private:
@@ -112,7 +110,7 @@ void CamHelperImx477::prepare(libcamera::Span<const uint8_t> buffer, Metadata &m
DeviceStatus parsedDeviceStatus;
metadata.get("device.status", parsedDeviceStatus);
- parsedDeviceStatus.shutterSpeed = deviceStatus.shutterSpeed;
+ parsedDeviceStatus.exposureTime = deviceStatus.exposureTime;
parsedDeviceStatus.frameLength = deviceStatus.frameLength;
metadata.set("device.status", parsedDeviceStatus);
@@ -159,15 +157,6 @@ std::pair<uint32_t, uint32_t> CamHelperImx477::getBlanking(Duration &exposure,
return { frameLength - mode_.height, hblank };
}
-void CamHelperImx477::getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const
-{
- exposureDelay = 2;
- gainDelay = 2;
- vblankDelay = 3;
- hblankDelay = 3;
-}
-
bool CamHelperImx477::sensorEmbeddedDataPresent() const
{
return true;
@@ -180,7 +169,7 @@ void CamHelperImx477::populateMetadata(const MdParser::RegisterMap &registers,
deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 +
registers.at(lineLengthLoReg));
- deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
+ deviceStatus.exposureTime = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
deviceStatus.lineLength);
deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx519.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx519.cpp
index c7262aa0..10cbea48 100644
--- a/src/ipa/rpi/cam_helper/cam_helper_imx519.cpp
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx519.cpp
@@ -3,7 +3,7 @@
* Based on cam_helper_imx477.cpp
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * cam_helper_imx519.cpp - camera helper for imx519 sensor
+ * camera helper for imx519 sensor
* Copyright (C) 2021, Arducam Technology co., Ltd.
*/
@@ -51,8 +51,6 @@ public:
void prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
std::pair<uint32_t, uint32_t> getBlanking(Duration &exposure, Duration minFrameDuration,
Duration maxFrameDuration) const override;
- void getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const override;
bool sensorEmbeddedDataPresent() const override;
private:
@@ -112,7 +110,7 @@ void CamHelperImx519::prepare(libcamera::Span<const uint8_t> buffer, Metadata &m
DeviceStatus parsedDeviceStatus;
metadata.get("device.status", parsedDeviceStatus);
- parsedDeviceStatus.shutterSpeed = deviceStatus.shutterSpeed;
+ parsedDeviceStatus.exposureTime = deviceStatus.exposureTime;
parsedDeviceStatus.frameLength = deviceStatus.frameLength;
metadata.set("device.status", parsedDeviceStatus);
@@ -159,15 +157,6 @@ std::pair<uint32_t, uint32_t> CamHelperImx519::getBlanking(Duration &exposure,
return { frameLength - mode_.height, hblank };
}
-void CamHelperImx519::getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const
-{
- exposureDelay = 2;
- gainDelay = 2;
- vblankDelay = 3;
- hblankDelay = 3;
-}
-
bool CamHelperImx519::sensorEmbeddedDataPresent() const
{
return true;
@@ -180,7 +169,7 @@ void CamHelperImx519::populateMetadata(const MdParser::RegisterMap &registers,
deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 +
registers.at(lineLengthLoReg));
- deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
+ deviceStatus.exposureTime = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
deviceStatus.lineLength);
deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx708.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx708.cpp
index 906c6fa2..6150909c 100644
--- a/src/ipa/rpi/cam_helper/cam_helper_imx708.cpp
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx708.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Raspberry Pi Ltd
*
- * cam_helper_imx708.cpp - camera helper for imx708 sensor
+ * camera helper for imx708 sensor
*/
#include <cmath>
@@ -54,8 +54,6 @@ public:
void process(StatisticsPtr &stats, Metadata &metadata) override;
std::pair<uint32_t, uint32_t> getBlanking(Duration &exposure, Duration minFrameDuration,
Duration maxFrameDuration) const override;
- void getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const override;
bool sensorEmbeddedDataPresent() const override;
double getModeSensitivity(const CameraMode &mode) const override;
unsigned int hideFramesModeSwitch() const override;
@@ -66,7 +64,7 @@ private:
* Smallest difference between the frame length and integration time,
* in units of lines.
*/
- static constexpr int frameIntegrationDiff = 22;
+ static constexpr int frameIntegrationDiff = 48;
/* Maximum frame length allowable for long exposure calculations. */
static constexpr int frameLengthMax = 0xffdc;
/* Largest long exposure scale factor given as a left shift on the frame length. */
@@ -155,7 +153,7 @@ void CamHelperImx708::prepare(libcamera::Span<const uint8_t> buffer, Metadata &m
DeviceStatus parsedDeviceStatus;
metadata.get("device.status", parsedDeviceStatus);
- parsedDeviceStatus.shutterSpeed = deviceStatus.shutterSpeed;
+ parsedDeviceStatus.exposureTime = deviceStatus.exposureTime;
parsedDeviceStatus.frameLength = deviceStatus.frameLength;
metadata.set("device.status", parsedDeviceStatus);
@@ -208,15 +206,6 @@ std::pair<uint32_t, uint32_t> CamHelperImx708::getBlanking(Duration &exposure,
return { frameLength - mode_.height, hblank };
}
-void CamHelperImx708::getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const
-{
- exposureDelay = 2;
- gainDelay = 2;
- vblankDelay = 3;
- hblankDelay = 3;
-}
-
bool CamHelperImx708::sensorEmbeddedDataPresent() const
{
return true;
@@ -255,7 +244,7 @@ void CamHelperImx708::populateMetadata(const MdParser::RegisterMap &registers,
deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 +
registers.at(lineLengthLoReg));
- deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
+ deviceStatus.exposureTime = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
deviceStatus.lineLength);
deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp b/src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp
index 5a99083d..40d6b6d7 100644
--- a/src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp
+++ b/src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * cam_helper_ov5647.cpp - camera information for ov5647 sensor
+ * camera information for ov5647 sensor
*/
#include <assert.h>
@@ -17,8 +17,6 @@ public:
CamHelperOv5647();
uint32_t gainCode(double gain) const override;
double gain(uint32_t gainCode) const override;
- void getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const override;
unsigned int hideFramesStartup() const override;
unsigned int hideFramesModeSwitch() const override;
unsigned int mistrustFramesStartup() const override;
@@ -52,19 +50,6 @@ double CamHelperOv5647::gain(uint32_t gainCode) const
return static_cast<double>(gainCode) / 16.0;
}
-void CamHelperOv5647::getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const
-{
- /*
- * We run this sensor in a mode where the gain delay is bumped up to
- * 2. It seems to be the only way to make the delays "predictable".
- */
- exposureDelay = 2;
- gainDelay = 2;
- vblankDelay = 2;
- hblankDelay = 2;
-}
-
unsigned int CamHelperOv5647::hideFramesStartup() const
{
/*
diff --git a/src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp b/src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp
index 27e449b1..980495a8 100644
--- a/src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp
+++ b/src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2021, Raspberry Pi Ltd
* Copyright (C) 2023, Ideas on Board Oy.
*
- * cam_helper_ov64a40.cpp - camera information for ov64a40 sensor
+ * camera information for ov64a40 sensor
*/
#include <assert.h>
@@ -18,8 +18,6 @@ public:
CamHelperOv64a40();
uint32_t gainCode(double gain) const override;
double gain(uint32_t gainCode) const override;
- void getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const override;
double getModeSensitivity(const CameraMode &mode) const override;
private:
@@ -45,16 +43,6 @@ double CamHelperOv64a40::gain(uint32_t gainCode) const
return static_cast<double>(gainCode) / 128.0;
}
-void CamHelperOv64a40::getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const
-{
- /* The driver appears to behave as follows: */
- exposureDelay = 2;
- gainDelay = 2;
- vblankDelay = 2;
- hblankDelay = 2;
-}
-
double CamHelperOv64a40::getModeSensitivity(const CameraMode &mode) const
{
if (mode.binX >= 2 && mode.scaleX >= 4) {
diff --git a/src/ipa/rpi/cam_helper/cam_helper_ov7251.cpp b/src/ipa/rpi/cam_helper/cam_helper_ov7251.cpp
new file mode 100644
index 00000000..fc7b999f
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_ov7251.cpp
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2021, Raspberry Pi Ltd
+ *
+ * camera information for ov7251 sensor
+ */
+
+#include <assert.h>
+
+#include "cam_helper.h"
+
+using namespace RPiController;
+
+class CamHelperOv7251 : public CamHelper
+{
+public:
+ CamHelperOv7251();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 4;
+};
+
+/*
+ * OV7251 doesn't output metadata, so we have to use the "unicam parser" which
+ * works by counting frames.
+ */
+
+CamHelperOv7251::CamHelperOv7251()
+ : CamHelper({}, frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperOv7251::gainCode(double gain) const
+{
+ return static_cast<uint32_t>(gain * 16.0);
+}
+
+double CamHelperOv7251::gain(uint32_t gainCode) const
+{
+ return static_cast<double>(gainCode) / 16.0;
+}
+
+static CamHelper *create()
+{
+ return new CamHelperOv7251();
+}
+
+static RegisterCamHelper reg("ov7251", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp b/src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp
index 86c5bc4c..e93a4691 100644
--- a/src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp
+++ b/src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Raspberry Pi Ltd
*
- * cam_helper_ov9281.cpp - camera information for ov9281 sensor
+ * camera information for ov9281 sensor
*/
#include <assert.h>
@@ -17,15 +17,13 @@ public:
CamHelperOv9281();
uint32_t gainCode(double gain) const override;
double gain(uint32_t gainCode) const override;
- void getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const override;
private:
/*
* Smallest difference between the frame length and integration time,
* in units of lines.
*/
- static constexpr int frameIntegrationDiff = 4;
+ static constexpr int frameIntegrationDiff = 25;
};
/*
@@ -48,16 +46,6 @@ double CamHelperOv9281::gain(uint32_t gainCode) const
return static_cast<double>(gainCode) / 16.0;
}
-void CamHelperOv9281::getDelays(int &exposureDelay, int &gainDelay,
- int &vblankDelay, int &hblankDelay) const
-{
- /* The driver appears to behave as follows: */
- exposureDelay = 2;
- gainDelay = 2;
- vblankDelay = 2;
- hblankDelay = 2;
-}
-
static CamHelper *create()
{
return new CamHelperOv9281();
diff --git a/src/ipa/rpi/cam_helper/md_parser.h b/src/ipa/rpi/cam_helper/md_parser.h
index 77d557aa..227c376c 100644
--- a/src/ipa/rpi/cam_helper/md_parser.h
+++ b/src/ipa/rpi/cam_helper/md_parser.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * md_parser.h - image sensor metadata parser interface
+ * image sensor metadata parser interface
*/
#pragma once
diff --git a/src/ipa/rpi/cam_helper/md_parser_smia.cpp b/src/ipa/rpi/cam_helper/md_parser_smia.cpp
index c5b806d7..c7bdcf94 100644
--- a/src/ipa/rpi/cam_helper/md_parser_smia.cpp
+++ b/src/ipa/rpi/cam_helper/md_parser_smia.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019-2021, Raspberry Pi Ltd
*
- * md_parser_smia.cpp - SMIA specification based embedded data parser
+ * SMIA specification based embedded data parser
*/
#include <libcamera/base/log.h>
diff --git a/src/ipa/rpi/cam_helper/meson.build b/src/ipa/rpi/cam_helper/meson.build
index 72625057..abf02147 100644
--- a/src/ipa/rpi/cam_helper/meson.build
+++ b/src/ipa/rpi/cam_helper/meson.build
@@ -4,12 +4,15 @@ rpi_ipa_cam_helper_sources = files([
'cam_helper.cpp',
'cam_helper_ov5647.cpp',
'cam_helper_imx219.cpp',
+ 'cam_helper_imx283.cpp',
'cam_helper_imx290.cpp',
'cam_helper_imx296.cpp',
+ 'cam_helper_imx415.cpp',
'cam_helper_imx477.cpp',
'cam_helper_imx519.cpp',
'cam_helper_imx708.cpp',
'cam_helper_ov64a40.cpp',
+ 'cam_helper_ov7251.cpp',
'cam_helper_ov9281.cpp',
'md_parser_smia.cpp',
])
diff --git a/src/ipa/rpi/common/ipa_base.cpp b/src/ipa/rpi/common/ipa_base.cpp
index 3c133c55..6734c32e 100644
--- a/src/ipa/rpi/common/ipa_base.cpp
+++ b/src/ipa/rpi/common/ipa_base.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019-2023, Raspberry Pi Ltd
*
- * ipa_base.cpp - Raspberry Pi IPA base class
+ * Raspberry Pi IPA base class
*/
#include "ipa_base.h"
@@ -25,7 +25,6 @@
#include "controller/contrast_algorithm.h"
#include "controller/denoise_algorithm.h"
#include "controller/hdr_algorithm.h"
-#include "controller/hdr_status.h"
#include "controller/lux_status.h"
#include "controller/sharpen_algorithm.h"
#include "controller/statistics.h"
@@ -56,9 +55,19 @@ constexpr Duration controllerMinFrameDuration = 1.0s / 30.0;
/* List of controls handled by the Raspberry Pi IPA */
const ControlInfoMap::Map ipaControls{
- { &controls::AeEnable, ControlInfo(false, true) },
- { &controls::ExposureTime, ControlInfo(0, 66666) },
- { &controls::AnalogueGain, ControlInfo(1.0f, 16.0f) },
+ /* \todo Move this to the Camera class */
+ { &controls::AeEnable, ControlInfo(false, true, true) },
+ { &controls::ExposureTimeMode,
+ ControlInfo(static_cast<int32_t>(controls::ExposureTimeModeAuto),
+ static_cast<int32_t>(controls::ExposureTimeModeManual),
+ static_cast<int32_t>(controls::ExposureTimeModeAuto)) },
+ { &controls::ExposureTime,
+ ControlInfo(1, 66666, static_cast<int32_t>(defaultExposureTime.get<std::micro>())) },
+ { &controls::AnalogueGainMode,
+ ControlInfo(static_cast<int32_t>(controls::AnalogueGainModeAuto),
+ static_cast<int32_t>(controls::AnalogueGainModeManual),
+ static_cast<int32_t>(controls::AnalogueGainModeAuto)) },
+ { &controls::AnalogueGain, ControlInfo(1.0f, 16.0f, 1.0f) },
{ &controls::AeMeteringMode, ControlInfo(controls::AeMeteringModeValues) },
{ &controls::AeConstraintMode, ControlInfo(controls::AeConstraintModeValues) },
{ &controls::AeExposureMode, ControlInfo(controls::AeExposureModeValues) },
@@ -72,9 +81,11 @@ const ControlInfoMap::Map ipaControls{
{ &controls::HdrMode, ControlInfo(controls::HdrModeValues) },
{ &controls::Sharpness, ControlInfo(0.0f, 16.0f, 1.0f) },
{ &controls::ScalerCrop, ControlInfo(Rectangle{}, Rectangle(65535, 65535, 65535, 65535), Rectangle{}) },
- { &controls::FrameDurationLimits, ControlInfo(INT64_C(33333), INT64_C(120000)) },
+ { &controls::FrameDurationLimits,
+ ControlInfo(INT64_C(33333), INT64_C(120000),
+ static_cast<int64_t>(defaultMinFrameDuration.get<std::micro>())) },
{ &controls::draft::NoiseReductionMode, ControlInfo(controls::draft::NoiseReductionModeValues) },
- { &controls::rpi::StatsOutputEnable, ControlInfo(false, true) },
+ { &controls::rpi::StatsOutputEnable, ControlInfo(false, true, false) },
};
/* IPA controls handled conditionally, if the sensor is not mono */
@@ -82,6 +93,7 @@ const ControlInfoMap::Map ipaColourControls{
{ &controls::AwbEnable, ControlInfo(false, true) },
{ &controls::AwbMode, ControlInfo(controls::AwbModeValues) },
{ &controls::ColourGains, ControlInfo(0.0f, 32.0f) },
+ { &controls::ColourTemperature, ControlInfo(100, 100000) },
{ &controls::Saturation, ControlInfo(0.0f, 32.0f, 1.0f) },
};
@@ -97,6 +109,13 @@ const ControlInfoMap::Map ipaAfControls{
{ &controls::LensPosition, ControlInfo(0.0f, 32.0f, 1.0f) }
};
+/* Platform specific controls */
+const std::map<const std::string, ControlInfoMap::Map> platformControls {
+ { "pisp", {
+ { &controls::rpi::ScalerCrops, ControlInfo(Rectangle{}, Rectangle(65535, 65535, 65535, 65535), Rectangle{}) }
+ } },
+};
+
} /* namespace */
LOG_DEFINE_CATEGORY(IPARPI)
@@ -105,8 +124,8 @@ namespace ipa::RPi {
IpaBase::IpaBase()
: controller_(), frameLengths_(FrameLengthsQueueSize, 0s), statsMetadataOutput_(false),
- frameCount_(0), mistrustCount_(0), lastRunTimestamp_(0), firstStart_(true),
- flickerState_({ 0, 0s })
+ stitchSwapBuffers_(false), frameCount_(0), mistrustCount_(0), lastRunTimestamp_(0),
+ firstStart_(true), flickerState_({ 0, 0s })
{
}
@@ -128,18 +147,8 @@ int32_t IpaBase::init(const IPASettings &settings, const InitParams &params, Ini
return -EINVAL;
}
- /*
- * Pass out the sensor config to the pipeline handler in order
- * to setup the staggered writer class.
- */
- int gainDelay, exposureDelay, vblankDelay, hblankDelay, sensorMetadata;
- helper_->getDelays(exposureDelay, gainDelay, vblankDelay, hblankDelay);
- sensorMetadata = helper_->sensorEmbeddedDataPresent();
-
- result->sensorConfig.gainDelay = gainDelay;
- result->sensorConfig.exposureDelay = exposureDelay;
- result->sensorConfig.vblankDelay = vblankDelay;
- result->sensorConfig.hblankDelay = hblankDelay;
+ /* Pass out the sensor metadata to the pipeline handler */
+ int sensorMetadata = helper_->sensorEmbeddedDataPresent();
result->sensorConfig.sensorMetadata = sensorMetadata;
/* Load the tuning file for this sensor. */
@@ -154,12 +163,17 @@ int32_t IpaBase::init(const IPASettings &settings, const InitParams &params, Ini
lensPresent_ = params.lensPresent;
controller_.initialise();
+ helper_->setHwConfig(controller_.getHardwareConfig());
/* Return the controls handled by the IPA */
ControlInfoMap::Map ctrlMap = ipaControls;
if (lensPresent_)
ctrlMap.merge(ControlInfoMap::Map(ipaAfControls));
+ auto platformCtrlsIt = platformControls.find(controller_.getTarget());
+ if (platformCtrlsIt != platformControls.end())
+ ctrlMap.merge(ControlInfoMap::Map(platformCtrlsIt->second));
+
monoSensor_ = params.sensorInfo.cfaPattern == properties::draft::ColorFilterArrangementEnum::MONO;
if (!monoSensor_)
ctrlMap.merge(ControlInfoMap::Map(ipaColourControls));
@@ -214,7 +228,7 @@ int32_t IpaBase::configure(const IPACameraSensorInfo &sensorInfo, const ConfigPa
/* Supply initial values for gain and exposure. */
AgcStatus agcStatus;
- agcStatus.shutterTime = defaultExposureTime;
+ agcStatus.exposureTime = defaultExposureTime;
agcStatus.analogueGain = defaultAnalogueGain;
applyAGC(&agcStatus, ctrls);
@@ -248,15 +262,18 @@ int32_t IpaBase::configure(const IPACameraSensorInfo &sensorInfo, const ConfigPa
ControlInfoMap::Map ctrlMap = ipaControls;
ctrlMap[&controls::FrameDurationLimits] =
ControlInfo(static_cast<int64_t>(mode_.minFrameDuration.get<std::micro>()),
- static_cast<int64_t>(mode_.maxFrameDuration.get<std::micro>()));
+ static_cast<int64_t>(mode_.maxFrameDuration.get<std::micro>()),
+ static_cast<int64_t>(defaultMinFrameDuration.get<std::micro>()));
ctrlMap[&controls::AnalogueGain] =
ControlInfo(static_cast<float>(mode_.minAnalogueGain),
- static_cast<float>(mode_.maxAnalogueGain));
+ static_cast<float>(mode_.maxAnalogueGain),
+ static_cast<float>(defaultAnalogueGain));
ctrlMap[&controls::ExposureTime] =
- ControlInfo(static_cast<int32_t>(mode_.minShutter.get<std::micro>()),
- static_cast<int32_t>(mode_.maxShutter.get<std::micro>()));
+ ControlInfo(static_cast<int32_t>(mode_.minExposureTime.get<std::micro>()),
+ static_cast<int32_t>(mode_.maxExposureTime.get<std::micro>()),
+ static_cast<int32_t>(defaultExposureTime.get<std::micro>()));
/* Declare colour processing related controls for non-mono sensors. */
if (!monoSensor_)
@@ -289,16 +306,18 @@ void IpaBase::start(const ControlList &controls, StartResult *result)
/* SwitchMode may supply updated exposure/gain values to use. */
AgcStatus agcStatus;
- agcStatus.shutterTime = 0.0s;
+ agcStatus.exposureTime = 0.0s;
agcStatus.analogueGain = 0.0;
metadata.get("agc.status", agcStatus);
- if (agcStatus.shutterTime && agcStatus.analogueGain) {
+ if (agcStatus.exposureTime && agcStatus.analogueGain) {
ControlList ctrls(sensorCtrls_);
applyAGC(&agcStatus, ctrls);
result->controls = std::move(ctrls);
setCameraTimeoutValue();
}
+ /* Make a note of this as it tells us the HDR status of the first few frames. */
+ hdrStatus_ = agcStatus.hdr;
/*
* Initialise frame counts, and decide how many frames must be hidden or
@@ -402,11 +421,17 @@ void IpaBase::prepareIsp(const PrepareParams &params)
* sensor exposure/gain changes. So fetch it from the metadata list
* indexed by the IPA cookie returned, and put it in the current frame
* metadata.
+ *
+ * Note if the HDR mode has changed, as things like tonemaps may need updating.
*/
AgcStatus agcStatus;
+ bool hdrChange = false;
RPiController::Metadata &delayedMetadata = rpiMetadata_[params.delayContext];
- if (!delayedMetadata.get<AgcStatus>("agc.status", agcStatus))
+ if (!delayedMetadata.get<AgcStatus>("agc.status", agcStatus)) {
rpiMetadata.set("agc.delayed_status", agcStatus);
+ hdrChange = agcStatus.hdr.mode != hdrStatus_.mode;
+ hdrStatus_ = agcStatus.hdr;
+ }
/*
* This may overwrite the DeviceStatus using values from the sensor
@@ -417,7 +442,7 @@ void IpaBase::prepareIsp(const PrepareParams &params)
/* Allow a 10% margin on the comparison below. */
Duration delta = (frameTimestamp - lastRunTimestamp_) * 1.0ns;
if (lastRunTimestamp_ && frameCount_ > dropFrameCount_ &&
- delta < controllerMinFrameDuration * 0.9) {
+ delta < controllerMinFrameDuration * 0.9 && !hdrChange) {
/*
* Ensure we merge the previous frame's metadata with the current
* frame. This will not overwrite exposure/gain values for the
@@ -454,7 +479,7 @@ void IpaBase::prepareIsp(const PrepareParams &params)
reportMetadata(ipaContext);
/* Ready to push the input buffer into the ISP. */
- prepareIspComplete.emit(params.buffers, false);
+ prepareIspComplete.emit(params.buffers, stitchSwapBuffers_);
}
void IpaBase::processStats(const ProcessParams &params)
@@ -581,16 +606,26 @@ void IpaBase::setMode(const IPACameraSensorInfo &sensorInfo)
mode_.sensitivity = helper_->getModeSensitivity(mode_);
const ControlInfo &gainCtrl = sensorCtrls_.at(V4L2_CID_ANALOGUE_GAIN);
- const ControlInfo &shutterCtrl = sensorCtrls_.at(V4L2_CID_EXPOSURE);
+ const ControlInfo &exposureTimeCtrl = sensorCtrls_.at(V4L2_CID_EXPOSURE);
mode_.minAnalogueGain = helper_->gain(gainCtrl.min().get<int32_t>());
mode_.maxAnalogueGain = helper_->gain(gainCtrl.max().get<int32_t>());
- /* Shutter speed is calculated based on the limits of the frame durations. */
- mode_.minShutter = helper_->exposure(shutterCtrl.min().get<int32_t>(), mode_.minLineLength);
- mode_.maxShutter = Duration::max();
- helper_->getBlanking(mode_.maxShutter,
- mode_.minFrameDuration, mode_.maxFrameDuration);
+ /*
+ * We need to give the helper the min/max frame durations so it can calculate
+ * the correct exposure limits below.
+ */
+ helper_->setCameraMode(mode_);
+
+ /*
+ * Exposure time is calculated based on the limits of the frame
+ * durations.
+ */
+ mode_.minExposureTime = helper_->exposure(exposureTimeCtrl.min().get<int32_t>(),
+ mode_.minLineLength);
+ mode_.maxExposureTime = Duration::max();
+ helper_->getBlanking(mode_.maxExposureTime, mode_.minFrameDuration,
+ mode_.maxFrameDuration);
}
void IpaBase::setCameraTimeoutValue()
@@ -695,14 +730,18 @@ static const std::map<int32_t, RPiController::AfAlgorithm::AfPause> AfPauseTable
static const std::map<int32_t, std::string> HdrModeTable = {
{ controls::HdrModeOff, "Off" },
+ { controls::HdrModeMultiExposureUnmerged, "MultiExposureUnmerged" },
{ controls::HdrModeMultiExposure, "MultiExposure" },
{ controls::HdrModeSingleExposure, "SingleExposure" },
+ { controls::HdrModeNight, "Night" },
};
void IpaBase::applyControls(const ControlList &controls)
{
using RPiController::AgcAlgorithm;
using RPiController::AfAlgorithm;
+ using RPiController::ContrastAlgorithm;
+ using RPiController::DenoiseAlgorithm;
using RPiController::HdrAlgorithm;
/* Clear the return metadata buffer. */
@@ -725,6 +764,42 @@ void IpaBase::applyControls(const ControlList &controls)
af->setMode(mode->second);
}
+ /*
+ * Because some AE controls are mode-specific, handle the AE-related
+ * mode changes first.
+ */
+ const auto analogueGainMode = controls.get(controls::AnalogueGainMode);
+ const auto exposureTimeMode = controls.get(controls::ExposureTimeMode);
+
+ if (analogueGainMode || exposureTimeMode) {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (agc) {
+ if (analogueGainMode) {
+ if (*analogueGainMode == controls::AnalogueGainModeManual)
+ agc->disableAutoGain();
+ else
+ agc->enableAutoGain();
+
+ libcameraMetadata_.set(controls::AnalogueGainMode,
+ *analogueGainMode);
+ }
+
+ if (exposureTimeMode) {
+ if (*exposureTimeMode == controls::ExposureTimeModeManual)
+ agc->disableAutoExposure();
+ else
+ agc->enableAutoExposure();
+
+ libcameraMetadata_.set(controls::ExposureTimeMode,
+ *exposureTimeMode);
+ }
+ } else {
+ LOG(IPARPI, Warning)
+ << "Could not set AnalogueGainMode or ExposureTimeMode - no AGC algorithm";
+ }
+ }
+
/* Iterate over controls */
for (auto const &ctrl : controls) {
LOG(IPARPI, Debug) << "Request ctrl: "
@@ -732,23 +807,8 @@ void IpaBase::applyControls(const ControlList &controls)
<< " = " << ctrl.second.toString();
switch (ctrl.first) {
- case controls::AE_ENABLE: {
- RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
- controller_.getAlgorithm("agc"));
- if (!agc) {
- LOG(IPARPI, Warning)
- << "Could not set AE_ENABLE - no AGC algorithm";
- break;
- }
-
- if (ctrl.second.get<bool>() == false)
- agc->disableAuto();
- else
- agc->enableAuto();
-
- libcameraMetadata_.set(controls::AeEnable, ctrl.second.get<bool>());
- break;
- }
+ case controls::EXPOSURE_TIME_MODE:
+ break; /* We already handled this one above */
case controls::EXPOSURE_TIME: {
RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
@@ -759,13 +819,23 @@ void IpaBase::applyControls(const ControlList &controls)
break;
}
+ /*
+ * Ignore manual exposure time when the auto exposure
+ * algorithm is running.
+ */
+ if (agc->autoExposureEnabled())
+ break;
+
/* The control provides units of microseconds. */
- agc->setFixedShutter(0, ctrl.second.get<int32_t>() * 1.0us);
+ agc->setFixedExposureTime(0, ctrl.second.get<int32_t>() * 1.0us);
libcameraMetadata_.set(controls::ExposureTime, ctrl.second.get<int32_t>());
break;
}
+ case controls::ANALOGUE_GAIN_MODE:
+ break; /* We already handled this one above */
+
case controls::ANALOGUE_GAIN: {
RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
controller_.getAlgorithm("agc"));
@@ -775,6 +845,13 @@ void IpaBase::applyControls(const ControlList &controls)
break;
}
+ /*
+ * Ignore manual analogue gain value when the auto gain
+ * algorithm is running.
+ */
+ if (agc->autoGainEnabled())
+ break;
+
agc->setFixedAnalogueGain(0, ctrl.second.get<float>());
libcameraMetadata_.set(controls::AnalogueGain,
@@ -831,6 +908,13 @@ void IpaBase::applyControls(const ControlList &controls)
break;
}
+ /*
+ * Ignore AE_EXPOSURE_MODE if the shutter or the gain
+ * are in auto mode.
+ */
+ if (agc->autoExposureEnabled() || agc->autoGainEnabled())
+ break;
+
int32_t idx = ctrl.second.get<int32_t>();
if (ExposureModeTable.count(idx)) {
agc->setExposureMode(ExposureModeTable.at(idx));
@@ -989,6 +1073,25 @@ void IpaBase::applyControls(const ControlList &controls)
break;
}
+ case controls::COLOUR_TEMPERATURE: {
+ /* Silently ignore this control for a mono sensor. */
+ if (monoSensor_)
+ break;
+
+ auto temperatureK = ctrl.second.get<int32_t>();
+ RPiController::AwbAlgorithm *awb = dynamic_cast<RPiController::AwbAlgorithm *>(
+ controller_.getAlgorithm("awb"));
+ if (!awb) {
+ LOG(IPARPI, Warning)
+ << "Could not set COLOUR_TEMPERATURE - no AWB algorithm";
+ break;
+ }
+
+ awb->setColourTemperature(temperatureK);
+ /* This metadata will get reported back automatically. */
+ break;
+ }
+
case controls::BRIGHTNESS: {
RPiController::ContrastAlgorithm *contrast = dynamic_cast<RPiController::ContrastAlgorithm *>(
controller_.getAlgorithm("contrast"));
@@ -1053,6 +1156,7 @@ void IpaBase::applyControls(const ControlList &controls)
break;
}
+ case controls::rpi::SCALER_CROPS:
case controls::SCALER_CROP: {
/* We do nothing with this, but should avoid the warning below. */
break;
@@ -1194,9 +1298,32 @@ void IpaBase::applyControls(const ControlList &controls)
break;
}
- if (hdr->setMode(mode->second) == 0)
+ if (hdr->setMode(mode->second) == 0) {
agc->setActiveChannels(hdr->getChannels());
- else
+
+ /* We also disable adpative contrast enhancement if HDR is running. */
+ ContrastAlgorithm *contrast =
+ dynamic_cast<ContrastAlgorithm *>(controller_.getAlgorithm("contrast"));
+ if (contrast) {
+ if (mode->second == "Off")
+ contrast->restoreCe();
+ else
+ contrast->enableCe(false);
+ }
+
+ DenoiseAlgorithm *denoise =
+ dynamic_cast<DenoiseAlgorithm *>(controller_.getAlgorithm("denoise"));
+ if (denoise) {
+ /* \todo - make the HDR mode say what denoise it wants? */
+ if (mode->second == "Night")
+ denoise->setConfig("night");
+ else if (mode->second == "SingleExposure")
+ denoise->setConfig("hdr");
+ /* MultiExposure doesn't need extra extra denoise. */
+ else
+ denoise->setConfig("normal");
+ }
+ } else
LOG(IPARPI, Warning)
<< "HDR mode " << mode->second << " not supported";
@@ -1229,7 +1356,7 @@ void IpaBase::fillDeviceStatus(const ControlList &sensorControls, unsigned int i
int32_t hblank = sensorControls.get(V4L2_CID_HBLANK).get<int32_t>();
deviceStatus.lineLength = helper_->hblankToLineLength(hblank);
- deviceStatus.shutterSpeed = helper_->exposure(exposureLines, deviceStatus.lineLength);
+ deviceStatus.exposureTime = helper_->exposure(exposureLines, deviceStatus.lineLength);
deviceStatus.analogueGain = helper_->gain(gainCode);
deviceStatus.frameLength = mode_.height + vblank;
@@ -1256,7 +1383,7 @@ void IpaBase::reportMetadata(unsigned int ipaContext)
DeviceStatus *deviceStatus = rpiMetadata.getLocked<DeviceStatus>("device.status");
if (deviceStatus) {
libcameraMetadata_.set(controls::ExposureTime,
- deviceStatus->shutterSpeed.get<std::micro>());
+ deviceStatus->exposureTime.get<std::micro>());
libcameraMetadata_.set(controls::AnalogueGain, deviceStatus->analogueGain);
libcameraMetadata_.set(controls::FrameDuration,
helper_->exposure(deviceStatus->frameLength, deviceStatus->lineLength).get<std::micro>());
@@ -1267,9 +1394,19 @@ void IpaBase::reportMetadata(unsigned int ipaContext)
}
AgcPrepareStatus *agcPrepareStatus = rpiMetadata.getLocked<AgcPrepareStatus>("agc.prepare_status");
- if (agcPrepareStatus) {
- libcameraMetadata_.set(controls::AeLocked, agcPrepareStatus->locked);
+ if (agcPrepareStatus)
libcameraMetadata_.set(controls::DigitalGain, agcPrepareStatus->digitalGain);
+
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (agc) {
+ if (!agc->autoExposureEnabled() && !agc->autoGainEnabled())
+ libcameraMetadata_.set(controls::AeState, controls::AeStateIdle);
+ else if (agcPrepareStatus)
+ libcameraMetadata_.set(controls::AeState,
+ agcPrepareStatus->locked
+ ? controls::AeStateConverged
+ : controls::AeStateSearching);
}
LuxStatus *luxStatus = rpiMetadata.getLocked<LuxStatus>("lux.status");
@@ -1354,12 +1491,31 @@ void IpaBase::reportMetadata(unsigned int ipaContext)
libcameraMetadata_.set(controls::AfPauseState, p);
}
- const HdrStatus *hdrStatus = rpiMetadata.getLocked<HdrStatus>("hdr.status");
- if (hdrStatus) {
- if (hdrStatus->channel == "short")
+ /*
+ * THe HDR algorithm sets the HDR channel into the agc.status at the time that those
+ * AGC parameters were calculated several frames ago, so it comes back to us now in
+ * the delayed_status. If this frame is too soon after a mode switch for the
+ * delayed_status to be available, we use the HDR status that came out of the
+ * switchMode call.
+ */
+ const AgcStatus *agcStatus = rpiMetadata.getLocked<AgcStatus>("agc.delayed_status");
+ const HdrStatus &hdrStatus = agcStatus ? agcStatus->hdr : hdrStatus_;
+ if (!hdrStatus.mode.empty() && hdrStatus.mode != "Off") {
+ int32_t hdrMode = controls::HdrModeOff;
+ for (auto const &[mode, name] : HdrModeTable) {
+ if (hdrStatus.mode == name) {
+ hdrMode = mode;
+ break;
+ }
+ }
+ libcameraMetadata_.set(controls::HdrMode, hdrMode);
+
+ if (hdrStatus.channel == "short")
libcameraMetadata_.set(controls::HdrChannel, controls::HdrChannelShort);
- else if (hdrStatus->channel == "long")
+ else if (hdrStatus.channel == "long")
libcameraMetadata_.set(controls::HdrChannel, controls::HdrChannelLong);
+ else if (hdrStatus.channel == "medium")
+ libcameraMetadata_.set(controls::HdrChannel, controls::HdrChannelMedium);
else
libcameraMetadata_.set(controls::HdrChannel, controls::HdrChannelNone);
}
@@ -1388,15 +1544,15 @@ void IpaBase::applyFrameDurations(Duration minFrameDuration, Duration maxFrameDu
/*
* Calculate the maximum exposure time possible for the AGC to use.
- * getBlanking() will update maxShutter with the largest exposure
+ * getBlanking() will update maxExposureTime with the largest exposure
* value possible.
*/
- Duration maxShutter = Duration::max();
- helper_->getBlanking(maxShutter, minFrameDuration_, maxFrameDuration_);
+ Duration maxExposureTime = Duration::max();
+ helper_->getBlanking(maxExposureTime, minFrameDuration_, maxFrameDuration_);
RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
controller_.getAlgorithm("agc"));
- agc->setMaxShutter(maxShutter);
+ agc->setMaxExposureTime(maxExposureTime);
}
void IpaBase::applyAGC(const struct AgcStatus *agcStatus, ControlList &ctrls)
@@ -1413,14 +1569,14 @@ void IpaBase::applyAGC(const struct AgcStatus *agcStatus, ControlList &ctrls)
gainCode = std::clamp<int32_t>(gainCode, minGainCode, maxGainCode);
/* getBlanking might clip exposure time to the fps limits. */
- Duration exposure = agcStatus->shutterTime;
+ Duration exposure = agcStatus->exposureTime;
auto [vblank, hblank] = helper_->getBlanking(exposure, minFrameDuration_, maxFrameDuration_);
int32_t exposureLines = helper_->exposureLines(exposure,
helper_->hblankToLineLength(hblank));
LOG(IPARPI, Debug) << "Applying AGC Exposure: " << exposure
- << " (Shutter lines: " << exposureLines << ", AGC requested "
- << agcStatus->shutterTime << ") Gain: "
+ << " (Exposure lines: " << exposureLines << ", AGC requested "
+ << agcStatus->exposureTime << ") Gain: "
<< agcStatus->analogueGain << " (Gain Code: "
<< gainCode << ")";
diff --git a/src/ipa/rpi/common/ipa_base.h b/src/ipa/rpi/common/ipa_base.h
index 4db4411e..1a811beb 100644
--- a/src/ipa/rpi/common/ipa_base.h
+++ b/src/ipa/rpi/common/ipa_base.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2023, Raspberry Pi Ltd
*
- * ipa_base.h - Raspberry Pi IPA base class
+ * Raspberry Pi IPA base class
*/
#pragma once
@@ -22,6 +22,7 @@
#include "controller/agc_status.h"
#include "controller/camera_mode.h"
#include "controller/controller.h"
+#include "controller/hdr_status.h"
#include "controller/metadata.h"
namespace libcamera {
@@ -48,6 +49,11 @@ public:
void processStats(const ProcessParams &params) override;
protected:
+ bool monoSensor() const
+ {
+ return monoSensor_;
+ }
+
/* Raspberry Pi controller specific defines. */
std::unique_ptr<RPiController::CamHelper> helper_;
RPiController::Controller controller_;
@@ -64,6 +70,12 @@ protected:
ControlList libcameraMetadata_;
bool statsMetadataOutput_;
+ /* Remember the HDR status after a mode switch. */
+ HdrStatus hdrStatus_;
+
+ /* Whether the stitch block (if available) needs to swap buffers. */
+ bool stitchSwapBuffers_;
+
private:
/* Number of metadata objects available in the context list. */
static constexpr unsigned int numMetadataContexts = 16;
diff --git a/src/ipa/rpi/controller/af_status.h b/src/ipa/rpi/controller/af_status.h
index 92c08812..c1487cc4 100644
--- a/src/ipa/rpi/controller/af_status.h
+++ b/src/ipa/rpi/controller/af_status.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Raspberry Pi Ltd
*
- * af_status.h - AF control algorithm status
+ * AF control algorithm status
*/
#pragma once
diff --git a/src/ipa/rpi/controller/agc_algorithm.h b/src/ipa/rpi/controller/agc_algorithm.h
index 534e38e2..fdaa10e6 100644
--- a/src/ipa/rpi/controller/agc_algorithm.h
+++ b/src/ipa/rpi/controller/agc_algorithm.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * agc_algorithm.h - AGC/AEC control algorithm interface
+ * AGC/AEC control algorithm interface
*/
#pragma once
@@ -23,15 +23,19 @@ public:
virtual std::vector<double> const &getWeights() const = 0;
virtual void setEv(unsigned int channel, double ev) = 0;
virtual void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) = 0;
- virtual void setFixedShutter(unsigned int channel,
- libcamera::utils::Duration fixedShutter) = 0;
- virtual void setMaxShutter(libcamera::utils::Duration maxShutter) = 0;
+ virtual void setFixedExposureTime(unsigned int channel,
+ libcamera::utils::Duration fixedExposureTime) = 0;
+ virtual void setMaxExposureTime(libcamera::utils::Duration maxExposureTime) = 0;
virtual void setFixedAnalogueGain(unsigned int channel, double fixedAnalogueGain) = 0;
virtual void setMeteringMode(std::string const &meteringModeName) = 0;
virtual void setExposureMode(std::string const &exposureModeName) = 0;
virtual void setConstraintMode(std::string const &contraintModeName) = 0;
- virtual void enableAuto() = 0;
- virtual void disableAuto() = 0;
+ virtual void enableAutoExposure() = 0;
+ virtual void disableAutoExposure() = 0;
+ virtual bool autoExposureEnabled() const = 0;
+ virtual void enableAutoGain() = 0;
+ virtual void disableAutoGain() = 0;
+ virtual bool autoGainEnabled() const = 0;
virtual void setActiveChannels(const std::vector<unsigned int> &activeChannels) = 0;
};
diff --git a/src/ipa/rpi/controller/agc_status.h b/src/ipa/rpi/controller/agc_status.h
index 68f89958..9308b156 100644
--- a/src/ipa/rpi/controller/agc_status.h
+++ b/src/ipa/rpi/controller/agc_status.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * agc_status.h - AGC/AEC control algorithm status
+ * AGC/AEC control algorithm status
*/
#pragma once
@@ -28,7 +28,7 @@
struct AgcStatus {
libcamera::utils::Duration totalExposureValue; /* value for all exposure and gain for this image */
libcamera::utils::Duration targetExposureValue; /* (unfiltered) target total exposure AGC is aiming for */
- libcamera::utils::Duration shutterTime;
+ libcamera::utils::Duration exposureTime;
double analogueGain;
std::string exposureMode;
std::string constraintMode;
@@ -36,7 +36,7 @@ struct AgcStatus {
double ev;
libcamera::utils::Duration flickerPeriod;
int floatingRegionEnable;
- libcamera::utils::Duration fixedShutter;
+ libcamera::utils::Duration fixedExposureTime;
double fixedAnalogueGain;
unsigned int channel;
HdrStatus hdr;
diff --git a/src/ipa/rpi/controller/algorithm.cpp b/src/ipa/rpi/controller/algorithm.cpp
index a957fde5..beed47a1 100644
--- a/src/ipa/rpi/controller/algorithm.cpp
+++ b/src/ipa/rpi/controller/algorithm.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * algorithm.cpp - ISP control algorithms
+ * ISP control algorithms
*/
#include "algorithm.h"
diff --git a/src/ipa/rpi/controller/algorithm.h b/src/ipa/rpi/controller/algorithm.h
index 4aa814eb..1971bfdc 100644
--- a/src/ipa/rpi/controller/algorithm.h
+++ b/src/ipa/rpi/controller/algorithm.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * algorithm.h - ISP control algorithm interface
+ * ISP control algorithm interface
*/
#pragma once
diff --git a/src/ipa/rpi/controller/alsc_status.h b/src/ipa/rpi/controller/alsc_status.h
index 49a9f4a0..329e8a37 100644
--- a/src/ipa/rpi/controller/alsc_status.h
+++ b/src/ipa/rpi/controller/alsc_status.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * alsc_status.h - ALSC (auto lens shading correction) control algorithm status
+ * ALSC (auto lens shading correction) control algorithm status
*/
#pragma once
diff --git a/src/ipa/rpi/controller/awb_algorithm.h b/src/ipa/rpi/controller/awb_algorithm.h
index 6009bdac..d941ed4e 100644
--- a/src/ipa/rpi/controller/awb_algorithm.h
+++ b/src/ipa/rpi/controller/awb_algorithm.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * awb_algorithm.h - AWB control algorithm interface
+ * AWB control algorithm interface
*/
#pragma once
@@ -19,6 +19,7 @@ public:
virtual void initialValues(double &gainR, double &gainB) = 0;
virtual void setMode(std::string const &modeName) = 0;
virtual void setManualGains(double manualR, double manualB) = 0;
+ virtual void setColourTemperature(double temperatureK) = 0;
virtual void enableAuto() = 0;
virtual void disableAuto() = 0;
};
diff --git a/src/ipa/rpi/controller/awb_status.h b/src/ipa/rpi/controller/awb_status.h
index dd5a79e3..125df1a0 100644
--- a/src/ipa/rpi/controller/awb_status.h
+++ b/src/ipa/rpi/controller/awb_status.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * awb_status.h - AWB control algorithm status
+ * AWB control algorithm status
*/
#pragma once
diff --git a/src/ipa/rpi/controller/black_level_algorithm.h b/src/ipa/rpi/controller/black_level_algorithm.h
index c2cff2f5..ce044e59 100644
--- a/src/ipa/rpi/controller/black_level_algorithm.h
+++ b/src/ipa/rpi/controller/black_level_algorithm.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2023, Raspberry Pi Ltd
*
- * black_level_algorithm.h - black level control algorithm interface
+ * black level control algorithm interface
*/
#pragma once
diff --git a/src/ipa/rpi/controller/black_level_status.h b/src/ipa/rpi/controller/black_level_status.h
index fd5e4ccb..57a0705a 100644
--- a/src/ipa/rpi/controller/black_level_status.h
+++ b/src/ipa/rpi/controller/black_level_status.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * black_level_status.h - black level control algorithm status
+ * black level control algorithm status
*/
#pragma once
diff --git a/src/ipa/rpi/controller/cac_status.h b/src/ipa/rpi/controller/cac_status.h
index 475d4c5c..adffce41 100644
--- a/src/ipa/rpi/controller/cac_status.h
+++ b/src/ipa/rpi/controller/cac_status.h
@@ -6,8 +6,6 @@
*/
#pragma once
-#include "pwl.h"
-
struct CacStatus {
std::vector<double> lutRx;
std::vector<double> lutRy;
diff --git a/src/ipa/rpi/controller/camera_mode.h b/src/ipa/rpi/controller/camera_mode.h
index 63b11778..61162b32 100644
--- a/src/ipa/rpi/controller/camera_mode.h
+++ b/src/ipa/rpi/controller/camera_mode.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019-2020, Raspberry Pi Ltd
*
- * camera_mode.h - description of a particular operating mode of a sensor
+ * description of a particular operating mode of a sensor
*/
#pragma once
@@ -50,9 +50,9 @@ struct CameraMode {
double sensitivity;
/* pixel clock rate */
uint64_t pixelRate;
- /* Mode specific shutter speed limits */
- libcamera::utils::Duration minShutter;
- libcamera::utils::Duration maxShutter;
+ /* Mode specific exposure time limits */
+ libcamera::utils::Duration minExposureTime;
+ libcamera::utils::Duration maxExposureTime;
/* Mode specific analogue gain limits */
double minAnalogueGain;
double maxAnalogueGain;
diff --git a/src/ipa/rpi/controller/ccm_algorithm.h b/src/ipa/rpi/controller/ccm_algorithm.h
index e2c4d771..6678ba75 100644
--- a/src/ipa/rpi/controller/ccm_algorithm.h
+++ b/src/ipa/rpi/controller/ccm_algorithm.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * ccm_algorithm.h - CCM (colour correction matrix) control algorithm interface
+ * CCM (colour correction matrix) control algorithm interface
*/
#pragma once
diff --git a/src/ipa/rpi/controller/ccm_status.h b/src/ipa/rpi/controller/ccm_status.h
index 5e28ee7c..c81bcd42 100644
--- a/src/ipa/rpi/controller/ccm_status.h
+++ b/src/ipa/rpi/controller/ccm_status.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * ccm_status.h - CCM (colour correction matrix) control algorithm status
+ * CCM (colour correction matrix) control algorithm status
*/
#pragma once
diff --git a/src/ipa/rpi/controller/contrast_algorithm.h b/src/ipa/rpi/controller/contrast_algorithm.h
index 895b36b0..2e983350 100644
--- a/src/ipa/rpi/controller/contrast_algorithm.h
+++ b/src/ipa/rpi/controller/contrast_algorithm.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * contrast_algorithm.h - contrast (gamma) control algorithm interface
+ * contrast (gamma) control algorithm interface
*/
#pragma once
diff --git a/src/ipa/rpi/controller/contrast_status.h b/src/ipa/rpi/controller/contrast_status.h
index fb9fe4ba..1f175872 100644
--- a/src/ipa/rpi/controller/contrast_status.h
+++ b/src/ipa/rpi/controller/contrast_status.h
@@ -2,11 +2,11 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * contrast_status.h - contrast (gamma) control algorithm status
+ * contrast (gamma) control algorithm status
*/
#pragma once
-#include "pwl.h"
+#include "libipa/pwl.h"
/*
* The "contrast" algorithm creates a gamma curve, optionally doing a little bit
@@ -14,7 +14,7 @@
*/
struct ContrastStatus {
- RPiController::Pwl gammaCurve;
+ libcamera::ipa::Pwl gammaCurve;
double brightness;
double contrast;
};
diff --git a/src/ipa/rpi/controller/controller.cpp b/src/ipa/rpi/controller/controller.cpp
index 5ca98b98..651fff63 100644
--- a/src/ipa/rpi/controller/controller.cpp
+++ b/src/ipa/rpi/controller/controller.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * controller.cpp - ISP controller
+ * ISP controller
*/
#include <assert.h>
@@ -39,6 +39,7 @@ static const std::map<std::string, Controller::HardwareConfig> HardwareConfigMap
.pipelineWidth = 13,
.statsInline = false,
.minPixelProcessingTime = 0s,
+ .dataBufferStrided = true,
}
},
{
@@ -71,6 +72,7 @@ static const std::map<std::string, Controller::HardwareConfig> HardwareConfigMap
* frames wider than ~16,000 pixels.
*/
.minPixelProcessingTime = 1.0us / 380,
+ .dataBufferStrided = false,
}
},
};
diff --git a/src/ipa/rpi/controller/controller.h b/src/ipa/rpi/controller/controller.h
index 170aea74..fdb46557 100644
--- a/src/ipa/rpi/controller/controller.h
+++ b/src/ipa/rpi/controller/controller.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * controller.h - ISP controller interface
+ * ISP controller interface
*/
#pragma once
@@ -49,6 +49,7 @@ public:
unsigned int pipelineWidth;
bool statsInline;
libcamera::utils::Duration minPixelProcessingTime;
+ bool dataBufferStrided;
};
Controller();
diff --git a/src/ipa/rpi/controller/denoise_algorithm.h b/src/ipa/rpi/controller/denoise_algorithm.h
index 444cbc25..b9a2a33c 100644
--- a/src/ipa/rpi/controller/denoise_algorithm.h
+++ b/src/ipa/rpi/controller/denoise_algorithm.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Raspberry Pi Ltd
*
- * denoise.h - Denoise control algorithm interface
+ * Denoise control algorithm interface
*/
#pragma once
diff --git a/src/ipa/rpi/controller/denoise_status.h b/src/ipa/rpi/controller/denoise_status.h
index 4d2bd291..eead6086 100644
--- a/src/ipa/rpi/controller/denoise_status.h
+++ b/src/ipa/rpi/controller/denoise_status.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019-2021, Raspberry Pi Ltd
*
- * denoise_status.h - Denoise control algorithm status
+ * Denoise control algorithm status
*/
#pragma once
diff --git a/src/ipa/rpi/controller/device_status.cpp b/src/ipa/rpi/controller/device_status.cpp
index c907efdd..1695764d 100644
--- a/src/ipa/rpi/controller/device_status.cpp
+++ b/src/ipa/rpi/controller/device_status.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Raspberry Pi Ltd
*
- * device_status.cpp - device (image sensor) status
+ * device (image sensor) status
*/
#include "device_status.h"
@@ -10,7 +10,7 @@ using namespace libcamera; /* for the Duration operator<< overload */
std::ostream &operator<<(std::ostream &out, const DeviceStatus &d)
{
- out << "Exposure: " << d.shutterSpeed
+ out << "Exposure time: " << d.exposureTime
<< " Frame length: " << d.frameLength
<< " Line length: " << d.lineLength
<< " Gain: " << d.analogueGain;
diff --git a/src/ipa/rpi/controller/device_status.h b/src/ipa/rpi/controller/device_status.h
index c45db749..b1792035 100644
--- a/src/ipa/rpi/controller/device_status.h
+++ b/src/ipa/rpi/controller/device_status.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019-2021, Raspberry Pi Ltd
*
- * device_status.h - device (image sensor) status
+ * device (image sensor) status
*/
#pragma once
@@ -12,21 +12,21 @@
#include <libcamera/base/utils.h>
/*
- * Definition of "device metadata" which stores things like shutter time and
+ * Definition of "device metadata" which stores things like exposure time and
* analogue gain that downstream control algorithms will want to know.
*/
struct DeviceStatus {
DeviceStatus()
- : shutterSpeed(std::chrono::seconds(0)), frameLength(0),
+ : exposureTime(std::chrono::seconds(0)), frameLength(0),
lineLength(std::chrono::seconds(0)), analogueGain(0.0)
{
}
friend std::ostream &operator<<(std::ostream &out, const DeviceStatus &d);
- /* time shutter is open */
- libcamera::utils::Duration shutterSpeed;
+ /* time the image is exposed */
+ libcamera::utils::Duration exposureTime;
/* frame length given in number of lines */
uint32_t frameLength;
/* line length for the current frame */
diff --git a/src/ipa/rpi/controller/dpc_status.h b/src/ipa/rpi/controller/dpc_status.h
index 46d0cf34..9f30d5d9 100644
--- a/src/ipa/rpi/controller/dpc_status.h
+++ b/src/ipa/rpi/controller/dpc_status.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * dpc_status.h - DPC (defective pixel correction) control algorithm status
+ * DPC (defective pixel correction) control algorithm status
*/
#pragma once
diff --git a/src/ipa/rpi/controller/geq_status.h b/src/ipa/rpi/controller/geq_status.h
index 2d749fc9..cb107a48 100644
--- a/src/ipa/rpi/controller/geq_status.h
+++ b/src/ipa/rpi/controller/geq_status.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * geq_status.h - GEQ (green equalisation) control algorithm status
+ * GEQ (green equalisation) control algorithm status
*/
#pragma once
diff --git a/src/ipa/rpi/controller/hdr_algorithm.h b/src/ipa/rpi/controller/hdr_algorithm.h
index f622e099..b889d8fd 100644
--- a/src/ipa/rpi/controller/hdr_algorithm.h
+++ b/src/ipa/rpi/controller/hdr_algorithm.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2023, Raspberry Pi Ltd
*
- * hdr_algorithm.h - HDR control algorithm interface
+ * HDR control algorithm interface
*/
#pragma once
diff --git a/src/ipa/rpi/controller/hdr_status.h b/src/ipa/rpi/controller/hdr_status.h
index 24b1a935..a4955778 100644
--- a/src/ipa/rpi/controller/hdr_status.h
+++ b/src/ipa/rpi/controller/hdr_status.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2023 Raspberry Pi Ltd
*
- * hdr_status.h - HDR control algorithm status
+ * HDR control algorithm status
*/
#pragma once
diff --git a/src/ipa/rpi/controller/histogram.cpp b/src/ipa/rpi/controller/histogram.cpp
index 78116141..13089839 100644
--- a/src/ipa/rpi/controller/histogram.cpp
+++ b/src/ipa/rpi/controller/histogram.cpp
@@ -2,9 +2,9 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * histogram.cpp - histogram calculations
+ * histogram calculations
*/
-#include <math.h>
+#include <cmath>
#include <stdio.h>
#include "histogram.h"
@@ -49,9 +49,9 @@ double Histogram::interBinMean(double binLo, double binHi) const
{
assert(binHi >= binLo);
double sumBinFreq = 0, cumulFreq = 0;
- for (double binNext = floor(binLo) + 1.0; binNext <= ceil(binHi);
+ for (double binNext = std::floor(binLo) + 1.0; binNext <= std::ceil(binHi);
binLo = binNext, binNext += 1.0) {
- int bin = floor(binLo);
+ int bin = std::floor(binLo);
double freq = (cumulative_[bin + 1] - cumulative_[bin]) *
(std::min(binNext, binHi) - binLo);
sumBinFreq += bin * freq;
diff --git a/src/ipa/rpi/controller/histogram.h b/src/ipa/rpi/controller/histogram.h
index e2c5509b..ab4e5e31 100644
--- a/src/ipa/rpi/controller/histogram.h
+++ b/src/ipa/rpi/controller/histogram.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * histogram.h - histogram calculation interface
+ * histogram calculation interface
*/
#pragma once
diff --git a/src/ipa/rpi/controller/lux_status.h b/src/ipa/rpi/controller/lux_status.h
index 5eb9faac..d8729f43 100644
--- a/src/ipa/rpi/controller/lux_status.h
+++ b/src/ipa/rpi/controller/lux_status.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * lux_status.h - Lux control algorithm status
+ * Lux control algorithm status
*/
#pragma once
diff --git a/src/ipa/rpi/controller/meson.build b/src/ipa/rpi/controller/meson.build
index 32a4d31c..74b74888 100644
--- a/src/ipa/rpi/controller/meson.build
+++ b/src/ipa/rpi/controller/meson.build
@@ -5,7 +5,6 @@ rpi_ipa_controller_sources = files([
'controller.cpp',
'device_status.cpp',
'histogram.cpp',
- 'pwl.cpp',
'rpi/af.cpp',
'rpi/agc.cpp',
'rpi/agc_channel.cpp',
@@ -32,4 +31,5 @@ rpi_ipa_controller_deps = [
]
rpi_ipa_controller_lib = static_library('rpi_ipa_controller', rpi_ipa_controller_sources,
+ include_directories : libipa_includes,
dependencies : rpi_ipa_controller_deps)
diff --git a/src/ipa/rpi/controller/metadata.h b/src/ipa/rpi/controller/metadata.h
index a232dcb1..77d3b074 100644
--- a/src/ipa/rpi/controller/metadata.h
+++ b/src/ipa/rpi/controller/metadata.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019-2021, Raspberry Pi Ltd
*
- * metadata.h - general metadata class
+ * general metadata class
*/
#pragma once
@@ -12,6 +12,7 @@
#include <map>
#include <mutex>
#include <string>
+#include <utility>
#include <libcamera/base/thread_annotations.h>
@@ -36,10 +37,10 @@ public:
}
template<typename T>
- void set(std::string const &tag, T const &value)
+ void set(std::string const &tag, T &&value)
{
std::scoped_lock lock(mutex_);
- data_[tag] = value;
+ data_[tag] = std::forward<T>(value);
}
template<typename T>
@@ -90,6 +91,12 @@ public:
data_.insert(other.data_.begin(), other.data_.end());
}
+ void erase(std::string const &tag)
+ {
+ std::scoped_lock lock(mutex_);
+ eraseLocked(tag);
+ }
+
template<typename T>
T *getLocked(std::string const &tag)
{
@@ -104,10 +111,18 @@ public:
}
template<typename T>
- void setLocked(std::string const &tag, T const &value)
+ void setLocked(std::string const &tag, T &&value)
{
/* Use this only if you're holding the lock yourself. */
- data_[tag] = value;
+ data_[tag] = std::forward<T>(value);
+ }
+
+ void eraseLocked(std::string const &tag)
+ {
+ auto it = data_.find(tag);
+ if (it == data_.end())
+ return;
+ data_.erase(it);
}
/*
diff --git a/src/ipa/rpi/controller/noise_status.h b/src/ipa/rpi/controller/noise_status.h
index da194f71..1919da32 100644
--- a/src/ipa/rpi/controller/noise_status.h
+++ b/src/ipa/rpi/controller/noise_status.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * noise_status.h - Noise control algorithm status
+ * Noise control algorithm status
*/
#pragma once
diff --git a/src/ipa/rpi/controller/pdaf_data.h b/src/ipa/rpi/controller/pdaf_data.h
index 470510f2..779b987d 100644
--- a/src/ipa/rpi/controller/pdaf_data.h
+++ b/src/ipa/rpi/controller/pdaf_data.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Raspberry Pi Ltd
*
- * pdaf_data.h - PDAF Metadata
+ * PDAF Metadata
*/
#pragma once
diff --git a/src/ipa/rpi/controller/pwl.cpp b/src/ipa/rpi/controller/pwl.cpp
deleted file mode 100644
index 70c2e24b..00000000
--- a/src/ipa/rpi/controller/pwl.cpp
+++ /dev/null
@@ -1,269 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi Ltd
- *
- * pwl.cpp - piecewise linear functions
- */
-
-#include <cassert>
-#include <cmath>
-#include <stdexcept>
-
-#include "pwl.h"
-
-using namespace RPiController;
-
-int Pwl::read(const libcamera::YamlObject &params)
-{
- if (!params.size() || params.size() % 2)
- return -EINVAL;
-
- const auto &list = params.asList();
-
- for (auto it = list.begin(); it != list.end(); it++) {
- auto x = it->get<double>();
- if (!x)
- return -EINVAL;
- if (it != list.begin() && *x <= points_.back().x)
- return -EINVAL;
-
- auto y = (++it)->get<double>();
- if (!y)
- return -EINVAL;
-
- points_.push_back(Point(*x, *y));
- }
-
- return 0;
-}
-
-void Pwl::append(double x, double y, const double eps)
-{
- if (points_.empty() || points_.back().x + eps < x)
- points_.push_back(Point(x, y));
-}
-
-void Pwl::prepend(double x, double y, const double eps)
-{
- if (points_.empty() || points_.front().x - eps > x)
- points_.insert(points_.begin(), Point(x, y));
-}
-
-Pwl::Interval Pwl::domain() const
-{
- return Interval(points_[0].x, points_[points_.size() - 1].x);
-}
-
-Pwl::Interval Pwl::range() const
-{
- double lo = points_[0].y, hi = lo;
- for (auto &p : points_)
- lo = std::min(lo, p.y), hi = std::max(hi, p.y);
- return Interval(lo, hi);
-}
-
-bool Pwl::empty() const
-{
- return points_.empty();
-}
-
-double Pwl::eval(double x, int *spanPtr, bool updateSpan) const
-{
- int span = findSpan(x, spanPtr && *spanPtr != -1 ? *spanPtr : points_.size() / 2 - 1);
- if (spanPtr && updateSpan)
- *spanPtr = span;
- return points_[span].y +
- (x - points_[span].x) * (points_[span + 1].y - points_[span].y) /
- (points_[span + 1].x - points_[span].x);
-}
-
-int Pwl::findSpan(double x, int span) const
-{
- /*
- * Pwls are generally small, so linear search may well be faster than
- * binary, though could review this if large PWls start turning up.
- */
- int lastSpan = points_.size() - 2;
- /*
- * some algorithms may call us with span pointing directly at the last
- * control point
- */
- span = std::max(0, std::min(lastSpan, span));
- while (span < lastSpan && x >= points_[span + 1].x)
- span++;
- while (span && x < points_[span].x)
- span--;
- return span;
-}
-
-Pwl::PerpType Pwl::invert(Point const &xy, Point &perp, int &span,
- const double eps) const
-{
- assert(span >= -1);
- bool prevOffEnd = false;
- for (span = span + 1; span < (int)points_.size() - 1; span++) {
- Point spanVec = points_[span + 1] - points_[span];
- double t = ((xy - points_[span]) % spanVec) / spanVec.len2();
- if (t < -eps) /* off the start of this span */
- {
- if (span == 0) {
- perp = points_[span];
- return PerpType::Start;
- } else if (prevOffEnd) {
- perp = points_[span];
- return PerpType::Vertex;
- }
- } else if (t > 1 + eps) /* off the end of this span */
- {
- if (span == (int)points_.size() - 2) {
- perp = points_[span + 1];
- return PerpType::End;
- }
- prevOffEnd = true;
- } else /* a true perpendicular */
- {
- perp = points_[span] + spanVec * t;
- return PerpType::Perpendicular;
- }
- }
- return PerpType::None;
-}
-
-Pwl Pwl::inverse(bool *trueInverse, const double eps) const
-{
- bool appended = false, prepended = false, neither = false;
- Pwl inverse;
-
- for (Point const &p : points_) {
- if (inverse.empty())
- inverse.append(p.y, p.x, eps);
- else if (std::abs(inverse.points_.back().x - p.y) <= eps ||
- std::abs(inverse.points_.front().x - p.y) <= eps)
- /* do nothing */;
- else if (p.y > inverse.points_.back().x) {
- inverse.append(p.y, p.x, eps);
- appended = true;
- } else if (p.y < inverse.points_.front().x) {
- inverse.prepend(p.y, p.x, eps);
- prepended = true;
- } else
- neither = true;
- }
-
- /*
- * This is not a proper inverse if we found ourselves putting points
- * onto both ends of the inverse, or if there were points that couldn't
- * go on either.
- */
- if (trueInverse)
- *trueInverse = !(neither || (appended && prepended));
-
- return inverse;
-}
-
-Pwl Pwl::compose(Pwl const &other, const double eps) const
-{
- double thisX = points_[0].x, thisY = points_[0].y;
- int thisSpan = 0, otherSpan = other.findSpan(thisY, 0);
- Pwl result({ { thisX, other.eval(thisY, &otherSpan, false) } });
- while (thisSpan != (int)points_.size() - 1) {
- double dx = points_[thisSpan + 1].x - points_[thisSpan].x,
- dy = points_[thisSpan + 1].y - points_[thisSpan].y;
- if (std::abs(dy) > eps &&
- otherSpan + 1 < (int)other.points_.size() &&
- points_[thisSpan + 1].y >=
- other.points_[otherSpan + 1].x + eps) {
- /*
- * next control point in result will be where this
- * function's y reaches the next span in other
- */
- thisX = points_[thisSpan].x +
- (other.points_[otherSpan + 1].x -
- points_[thisSpan].y) *
- dx / dy;
- thisY = other.points_[++otherSpan].x;
- } else if (std::abs(dy) > eps && otherSpan > 0 &&
- points_[thisSpan + 1].y <=
- other.points_[otherSpan - 1].x - eps) {
- /*
- * next control point in result will be where this
- * function's y reaches the previous span in other
- */
- thisX = points_[thisSpan].x +
- (other.points_[otherSpan + 1].x -
- points_[thisSpan].y) *
- dx / dy;
- thisY = other.points_[--otherSpan].x;
- } else {
- /* we stay in the same span in other */
- thisSpan++;
- thisX = points_[thisSpan].x,
- thisY = points_[thisSpan].y;
- }
- result.append(thisX, other.eval(thisY, &otherSpan, false),
- eps);
- }
- return result;
-}
-
-void Pwl::map(std::function<void(double x, double y)> f) const
-{
- for (auto &pt : points_)
- f(pt.x, pt.y);
-}
-
-void Pwl::map2(Pwl const &pwl0, Pwl const &pwl1,
- std::function<void(double x, double y0, double y1)> f)
-{
- int span0 = 0, span1 = 0;
- double x = std::min(pwl0.points_[0].x, pwl1.points_[0].x);
- f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false));
- while (span0 < (int)pwl0.points_.size() - 1 ||
- span1 < (int)pwl1.points_.size() - 1) {
- if (span0 == (int)pwl0.points_.size() - 1)
- x = pwl1.points_[++span1].x;
- else if (span1 == (int)pwl1.points_.size() - 1)
- x = pwl0.points_[++span0].x;
- else if (pwl0.points_[span0 + 1].x > pwl1.points_[span1 + 1].x)
- x = pwl1.points_[++span1].x;
- else
- x = pwl0.points_[++span0].x;
- f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false));
- }
-}
-
-Pwl Pwl::combine(Pwl const &pwl0, Pwl const &pwl1,
- std::function<double(double x, double y0, double y1)> f,
- const double eps)
-{
- Pwl result;
- map2(pwl0, pwl1, [&](double x, double y0, double y1) {
- result.append(x, f(x, y0, y1), eps);
- });
- return result;
-}
-
-void Pwl::matchDomain(Interval const &domain, bool clip, const double eps)
-{
- int span = 0;
- prepend(domain.start, eval(clip ? points_[0].x : domain.start, &span),
- eps);
- span = points_.size() - 2;
- append(domain.end, eval(clip ? points_.back().x : domain.end, &span),
- eps);
-}
-
-Pwl &Pwl::operator*=(double d)
-{
- for (auto &pt : points_)
- pt.y *= d;
- return *this;
-}
-
-void Pwl::debug(FILE *fp) const
-{
- fprintf(fp, "Pwl {\n");
- for (auto &p : points_)
- fprintf(fp, "\t(%g, %g)\n", p.x, p.y);
- fprintf(fp, "}\n");
-}
diff --git a/src/ipa/rpi/controller/pwl.h b/src/ipa/rpi/controller/pwl.h
deleted file mode 100644
index aacf6039..00000000
--- a/src/ipa/rpi/controller/pwl.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* SPDX-License-Identifier: BSD-2-Clause */
-/*
- * Copyright (C) 2019, Raspberry Pi Ltd
- *
- * pwl.h - piecewise linear functions interface
- */
-#pragma once
-
-#include <functional>
-#include <math.h>
-#include <vector>
-
-#include "libcamera/internal/yaml_parser.h"
-
-namespace RPiController {
-
-class Pwl
-{
-public:
- struct Interval {
- Interval(double _start, double _end)
- : start(_start), end(_end)
- {
- }
- double start, end;
- bool contains(double value)
- {
- return value >= start && value <= end;
- }
- double clip(double value)
- {
- return value < start ? start
- : (value > end ? end : value);
- }
- double len() const { return end - start; }
- };
- struct Point {
- Point() : x(0), y(0) {}
- Point(double _x, double _y)
- : x(_x), y(_y) {}
- double x, y;
- Point operator-(Point const &p) const
- {
- return Point(x - p.x, y - p.y);
- }
- Point operator+(Point const &p) const
- {
- return Point(x + p.x, y + p.y);
- }
- double operator%(Point const &p) const
- {
- return x * p.x + y * p.y;
- }
- Point operator*(double f) const { return Point(x * f, y * f); }
- Point operator/(double f) const { return Point(x / f, y / f); }
- double len2() const { return x * x + y * y; }
- double len() const { return sqrt(len2()); }
- };
- Pwl() {}
- Pwl(std::vector<Point> const &points) : points_(points) {}
- int read(const libcamera::YamlObject &params);
- void append(double x, double y, const double eps = 1e-6);
- void prepend(double x, double y, const double eps = 1e-6);
- Interval domain() const;
- Interval range() const;
- bool empty() const;
- /*
- * Evaluate Pwl, optionally supplying an initial guess for the
- * "span". The "span" may be optionally be updated. If you want to know
- * the "span" value but don't have an initial guess you can set it to
- * -1.
- */
- double eval(double x, int *spanPtr = nullptr,
- bool updateSpan = true) const;
- /*
- * Find perpendicular closest to xy, starting from span+1 so you can
- * call it repeatedly to check for multiple closest points (set span to
- * -1 on the first call). Also returns "pseudo" perpendiculars; see
- * PerpType enum.
- */
- enum class PerpType {
- None, /* no perpendicular found */
- Start, /* start of Pwl is closest point */
- End, /* end of Pwl is closest point */
- Vertex, /* vertex of Pwl is closest point */
- Perpendicular /* true perpendicular found */
- };
- PerpType invert(Point const &xy, Point &perp, int &span,
- const double eps = 1e-6) const;
- /*
- * Compute the inverse function. Indicate if it is a proper (true)
- * inverse, or only a best effort (e.g. input was non-monotonic).
- */
- Pwl inverse(bool *trueInverse = nullptr, const double eps = 1e-6) const;
- /* Compose two Pwls together, doing "this" first and "other" after. */
- Pwl compose(Pwl const &other, const double eps = 1e-6) const;
- /* Apply function to (x,y) values at every control point. */
- void map(std::function<void(double x, double y)> f) const;
- /*
- * Apply function to (x, y0, y1) values wherever either Pwl has a
- * control point.
- */
- static void map2(Pwl const &pwl0, Pwl const &pwl1,
- std::function<void(double x, double y0, double y1)> f);
- /*
- * Combine two Pwls, meaning we create a new Pwl where the y values are
- * given by running f wherever either has a knot.
- */
- static Pwl
- combine(Pwl const &pwl0, Pwl const &pwl1,
- std::function<double(double x, double y0, double y1)> f,
- const double eps = 1e-6);
- /*
- * Make "this" match (at least) the given domain. Any extension my be
- * clipped or linear.
- */
- void matchDomain(Interval const &domain, bool clip = true,
- const double eps = 1e-6);
- Pwl &operator*=(double d);
- void debug(FILE *fp = stdout) const;
-
-private:
- int findSpan(double x, int span) const;
- std::vector<Point> points_;
-};
-
-} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/region_stats.h b/src/ipa/rpi/controller/region_stats.h
index a8860dc8..c60f7d9a 100644
--- a/src/ipa/rpi/controller/region_stats.h
+++ b/src/ipa/rpi/controller/region_stats.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Raspberry Pi Ltd
*
- * region_stats.h - Raspberry Pi region based statistics container
+ * Raspberry Pi region based statistics container
*/
#pragma once
diff --git a/src/ipa/rpi/controller/rpi/af.cpp b/src/ipa/rpi/controller/rpi/af.cpp
index ed0c8a94..2157eb94 100644
--- a/src/ipa/rpi/controller/rpi/af.cpp
+++ b/src/ipa/rpi/controller/rpi/af.cpp
@@ -2,13 +2,13 @@
/*
* Copyright (C) 2022-2023, Raspberry Pi Ltd
*
- * af.cpp - Autofocus control algorithm
+ * Autofocus control algorithm
*/
#include "af.h"
+#include <cmath>
#include <iomanip>
-#include <math.h>
#include <stdlib.h>
#include <libcamera/base/log.h>
@@ -139,7 +139,7 @@ int Af::CfgParams::read(const libcamera::YamlObject &params)
readNumber<uint32_t>(skipFrames, params, "skip_frames");
if (params.contains("map"))
- map.read(params["map"]);
+ map = params["map"].get<ipa::Pwl>(ipa::Pwl{});
else
LOG(RPiAf, Warning) << "No map defined";
@@ -721,7 +721,7 @@ bool Af::setLensPosition(double dioptres, int *hwpos)
if (mode_ == AfModeManual) {
LOG(RPiAf, Debug) << "setLensPosition: " << dioptres;
- ftarget_ = cfg_.map.domain().clip(dioptres);
+ ftarget_ = cfg_.map.domain().clamp(dioptres);
changed = !(initted_ && fsmooth_ == ftarget_);
updateLensPosition();
}
diff --git a/src/ipa/rpi/controller/rpi/af.h b/src/ipa/rpi/controller/rpi/af.h
index 6d2bae67..317a51f3 100644
--- a/src/ipa/rpi/controller/rpi/af.h
+++ b/src/ipa/rpi/controller/rpi/af.h
@@ -2,14 +2,15 @@
/*
* Copyright (C) 2022-2023, Raspberry Pi Ltd
*
- * af.h - Autofocus control algorithm
+ * Autofocus control algorithm
*/
#pragma once
#include "../af_algorithm.h"
#include "../af_status.h"
#include "../pdaf_data.h"
-#include "../pwl.h"
+
+#include "libipa/pwl.h"
/*
* This algorithm implements a hybrid of CDAF and PDAF, favouring PDAF.
@@ -100,7 +101,7 @@ private:
uint32_t confThresh; /* PDAF confidence cell min (sensor-specific) */
uint32_t confClip; /* PDAF confidence cell max (sensor-specific) */
uint32_t skipFrames; /* frames to skip at start or modeswitch */
- Pwl map; /* converts dioptres -> lens driver position */
+ libcamera::ipa::Pwl map; /* converts dioptres -> lens driver position */
CfgParams();
int read(const libcamera::YamlObject &params);
diff --git a/src/ipa/rpi/controller/rpi/agc.cpp b/src/ipa/rpi/controller/rpi/agc.cpp
index 6549dedd..02bfdb4a 100644
--- a/src/ipa/rpi/controller/rpi/agc.cpp
+++ b/src/ipa/rpi/controller/rpi/agc.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * agc.cpp - AGC/AEC control algorithm
+ * AGC/AEC control algorithm
*/
#include "agc.h"
@@ -74,22 +74,62 @@ int Agc::checkChannel(unsigned int channelIndex) const
return 0;
}
-void Agc::disableAuto()
+void Agc::disableAutoExposure()
{
- LOG(RPiAgc, Debug) << "disableAuto";
+ LOG(RPiAgc, Debug) << "disableAutoExposure";
/* All channels are enabled/disabled together. */
for (auto &data : channelData_)
- data.channel.disableAuto();
+ data.channel.disableAutoExposure();
}
-void Agc::enableAuto()
+void Agc::enableAutoExposure()
{
- LOG(RPiAgc, Debug) << "enableAuto";
+ LOG(RPiAgc, Debug) << "enableAutoExposure";
/* All channels are enabled/disabled together. */
for (auto &data : channelData_)
- data.channel.enableAuto();
+ data.channel.enableAutoExposure();
+}
+
+bool Agc::autoExposureEnabled() const
+{
+ LOG(RPiAgc, Debug) << "autoExposureEnabled";
+
+ /*
+ * We always have at least one channel, and since all channels are
+ * enabled and disabled together we can simply check the first one.
+ */
+ return channelData_[0].channel.autoExposureEnabled();
+}
+
+void Agc::disableAutoGain()
+{
+ LOG(RPiAgc, Debug) << "disableAutoGain";
+
+ /* All channels are enabled/disabled together. */
+ for (auto &data : channelData_)
+ data.channel.disableAutoGain();
+}
+
+void Agc::enableAutoGain()
+{
+ LOG(RPiAgc, Debug) << "enableAutoGain";
+
+ /* All channels are enabled/disabled together. */
+ for (auto &data : channelData_)
+ data.channel.enableAutoGain();
+}
+
+bool Agc::autoGainEnabled() const
+{
+ LOG(RPiAgc, Debug) << "autoGainEnabled";
+
+ /*
+ * We always have at least one channel, and since all channels are
+ * enabled and disabled together we can simply check the first one.
+ */
+ return channelData_[0].channel.autoGainEnabled();
}
unsigned int Agc::getConvergenceFrames() const
@@ -127,21 +167,21 @@ void Agc::setFlickerPeriod(Duration flickerPeriod)
data.channel.setFlickerPeriod(flickerPeriod);
}
-void Agc::setMaxShutter(Duration maxShutter)
+void Agc::setMaxExposureTime(Duration maxExposureTime)
{
/* Frame durations will be the same across all channels too. */
for (auto &data : channelData_)
- data.channel.setMaxShutter(maxShutter);
+ data.channel.setMaxExposureTime(maxExposureTime);
}
-void Agc::setFixedShutter(unsigned int channelIndex, Duration fixedShutter)
+void Agc::setFixedExposureTime(unsigned int channelIndex, Duration fixedExposureTime)
{
if (checkChannel(channelIndex))
return;
- LOG(RPiAgc, Debug) << "setFixedShutter " << fixedShutter
+ LOG(RPiAgc, Debug) << "setFixedExposureTime " << fixedExposureTime
<< " for channel " << channelIndex;
- channelData_[channelIndex].channel.setFixedShutter(fixedShutter);
+ channelData_[channelIndex].channel.setFixedExposureTime(fixedExposureTime);
}
void Agc::setFixedAnalogueGain(unsigned int channelIndex, double fixedAnalogueGain)
diff --git a/src/ipa/rpi/controller/rpi/agc.h b/src/ipa/rpi/controller/rpi/agc.h
index 7d26bdf6..c3a940bf 100644
--- a/src/ipa/rpi/controller/rpi/agc.h
+++ b/src/ipa/rpi/controller/rpi/agc.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * agc.h - AGC/AEC control algorithm
+ * AGC/AEC control algorithm
*/
#pragma once
@@ -32,16 +32,20 @@ public:
std::vector<double> const &getWeights() const override;
void setEv(unsigned int channel, double ev) override;
void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) override;
- void setMaxShutter(libcamera::utils::Duration maxShutter) override;
- void setFixedShutter(unsigned int channelIndex,
- libcamera::utils::Duration fixedShutter) override;
+ void setMaxExposureTime(libcamera::utils::Duration maxExposureTime) override;
+ void setFixedExposureTime(unsigned int channelIndex,
+ libcamera::utils::Duration fixedExposureTime) override;
void setFixedAnalogueGain(unsigned int channelIndex,
double fixedAnalogueGain) override;
void setMeteringMode(std::string const &meteringModeName) override;
void setExposureMode(std::string const &exposureModeName) override;
void setConstraintMode(std::string const &contraintModeName) override;
- void enableAuto() override;
- void disableAuto() override;
+ void enableAutoExposure() override;
+ void disableAutoExposure() override;
+ bool autoExposureEnabled() const override;
+ void enableAutoGain() override;
+ void disableAutoGain() override;
+ bool autoGainEnabled() const override;
void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
void prepare(Metadata *imageMetadata) override;
void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
diff --git a/src/ipa/rpi/controller/rpi/agc_channel.cpp b/src/ipa/rpi/controller/rpi/agc_channel.cpp
index 8116c6c1..a5562760 100644
--- a/src/ipa/rpi/controller/rpi/agc_channel.cpp
+++ b/src/ipa/rpi/controller/rpi/agc_channel.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2023, Raspberry Pi Ltd
*
- * agc_channel.cpp - AGC/AEC control algorithm
+ * AGC/AEC control algorithm
*/
#include "agc_channel.h"
@@ -12,6 +12,10 @@
#include <libcamera/base/log.h>
+#include "libcamera/internal/vector.h"
+
+#include "libipa/colours.h"
+
#include "../awb_status.h"
#include "../device_status.h"
#include "../histogram.h"
@@ -65,7 +69,7 @@ int AgcExposureMode::read(const libcamera::YamlObject &params)
auto value = params["shutter"].getList<double>();
if (!value)
return -EINVAL;
- std::transform(value->begin(), value->end(), std::back_inserter(shutter),
+ std::transform(value->begin(), value->end(), std::back_inserter(exposureTime),
[](double v) { return v * 1us; });
value = params["gain"].getList<double>();
@@ -73,13 +77,13 @@ int AgcExposureMode::read(const libcamera::YamlObject &params)
return -EINVAL;
gain = std::move(*value);
- if (shutter.size() < 2 || gain.size() < 2) {
+ if (exposureTime.size() < 2 || gain.size() < 2) {
LOG(RPiAgc, Error)
<< "AgcExposureMode: must have at least two entries in exposure profile";
return -EINVAL;
}
- if (shutter.size() != gain.size()) {
+ if (exposureTime.size() != gain.size()) {
LOG(RPiAgc, Error)
<< "AgcExposureMode: expect same number of exposure and gain entries in exposure profile";
return -EINVAL;
@@ -130,7 +134,8 @@ int AgcConstraint::read(const libcamera::YamlObject &params)
return -EINVAL;
qHi = *value;
- return yTarget.read(params["y_target"]);
+ yTarget = params["y_target"].get<ipa::Pwl>(ipa::Pwl{});
+ return yTarget.empty() ? -EINVAL : 0;
}
static std::tuple<int, AgcConstraintMode>
@@ -237,9 +242,9 @@ int AgcConfig::read(const libcamera::YamlObject &params)
return ret;
}
- ret = yTarget.read(params["y_target"]);
- if (ret)
- return ret;
+ yTarget = params["y_target"].get<ipa::Pwl>(ipa::Pwl{});
+ if (yTarget.empty())
+ return -EINVAL;
speed = params["speed"].get<double>(0.2);
startupFrames = params["startup_frames"].get<uint16_t>(10);
@@ -259,7 +264,7 @@ int AgcConfig::read(const libcamera::YamlObject &params)
}
AgcChannel::ExposureValues::ExposureValues()
- : shutter(0s), analogueGain(0),
+ : exposureTime(0s), analogueGain(0),
totalExposure(0s), totalExposureNoDG(0s)
{
}
@@ -268,7 +273,7 @@ AgcChannel::AgcChannel()
: meteringMode_(nullptr), exposureMode_(nullptr), constraintMode_(nullptr),
frameCount_(0), lockCount_(0),
lastTargetExposure_(0s), ev_(1.0), flickerPeriod_(0s),
- maxShutter_(0s), fixedShutter_(0s), fixedAnalogueGain_(0.0)
+ maxExposureTime_(0s), fixedExposureTime_(0s), fixedAnalogueGain_(0.0)
{
/* Set AWB default values in case early frames have no updates in metadata. */
awb_.gainR = 1.0;
@@ -309,31 +314,49 @@ int AgcChannel::read(const libcamera::YamlObject &params,
exposureMode_ = &config_.exposureModes[exposureModeName_];
constraintModeName_ = config_.defaultConstraintMode;
constraintMode_ = &config_.constraintModes[constraintModeName_];
- /* Set up the "last shutter/gain" values, in case AGC starts "disabled". */
- status_.shutterTime = config_.defaultExposureTime;
+ /* Set up the "last exposure time/gain" values, in case AGC starts "disabled". */
+ status_.exposureTime = config_.defaultExposureTime;
status_.analogueGain = config_.defaultAnalogueGain;
return 0;
}
-void AgcChannel::disableAuto()
+void AgcChannel::disableAutoExposure()
+{
+ fixedExposureTime_ = status_.exposureTime;
+}
+
+void AgcChannel::enableAutoExposure()
+{
+ fixedExposureTime_ = 0s;
+}
+
+bool AgcChannel::autoExposureEnabled() const
+{
+ return fixedExposureTime_ == 0s;
+}
+
+void AgcChannel::disableAutoGain()
{
- fixedShutter_ = status_.shutterTime;
fixedAnalogueGain_ = status_.analogueGain;
}
-void AgcChannel::enableAuto()
+void AgcChannel::enableAutoGain()
{
- fixedShutter_ = 0s;
fixedAnalogueGain_ = 0;
}
+bool AgcChannel::autoGainEnabled() const
+{
+ return fixedAnalogueGain_ == 0;
+}
+
unsigned int AgcChannel::getConvergenceFrames() const
{
/*
- * If shutter and gain have been explicitly set, there is no
+ * If exposure time and gain have been explicitly set, there is no
* convergence to happen, so no need to drop any frames - return zero.
*/
- if (fixedShutter_ && fixedAnalogueGain_)
+ if (fixedExposureTime_ && fixedAnalogueGain_)
return 0;
else
return config_.convergenceFrames;
@@ -361,16 +384,16 @@ void AgcChannel::setFlickerPeriod(Duration flickerPeriod)
flickerPeriod_ = flickerPeriod;
}
-void AgcChannel::setMaxShutter(Duration maxShutter)
+void AgcChannel::setMaxExposureTime(Duration maxExposureTime)
{
- maxShutter_ = maxShutter;
+ maxExposureTime_ = maxExposureTime;
}
-void AgcChannel::setFixedShutter(Duration fixedShutter)
+void AgcChannel::setFixedExposureTime(Duration fixedExposureTime)
{
- fixedShutter_ = fixedShutter;
+ fixedExposureTime_ = fixedExposureTime;
/* Set this in case someone calls disableAuto() straight after. */
- status_.shutterTime = limitShutter(fixedShutter_);
+ status_.exposureTime = limitExposureTime(fixedExposureTime_);
}
void AgcChannel::setFixedAnalogueGain(double fixedAnalogueGain)
@@ -410,22 +433,22 @@ void AgcChannel::switchMode(CameraMode const &cameraMode,
double lastSensitivity = mode_.sensitivity;
mode_ = cameraMode;
- Duration fixedShutter = limitShutter(fixedShutter_);
- if (fixedShutter && fixedAnalogueGain_) {
+ Duration fixedExposureTime = limitExposureTime(fixedExposureTime_);
+ if (fixedExposureTime && fixedAnalogueGain_) {
/* We're going to reset the algorithm here with these fixed values. */
fetchAwbStatus(metadata);
double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 });
ASSERT(minColourGain != 0.0);
/* This is the equivalent of computeTargetExposure and applyDigitalGain. */
- target_.totalExposureNoDG = fixedShutter_ * fixedAnalogueGain_;
+ target_.totalExposureNoDG = fixedExposureTime_ * fixedAnalogueGain_;
target_.totalExposure = target_.totalExposureNoDG / minColourGain;
/* Equivalent of filterExposure. This resets any "history". */
filtered_ = target_;
/* Equivalent of divideUpExposure. */
- filtered_.shutter = fixedShutter;
+ filtered_.exposureTime = fixedExposureTime;
filtered_.analogueGain = fixedAnalogueGain_;
} else if (status_.totalExposureValue) {
/*
@@ -447,14 +470,15 @@ void AgcChannel::switchMode(CameraMode const &cameraMode,
divideUpExposure();
} else {
/*
- * We come through here on startup, when at least one of the shutter
- * or gain has not been fixed. We must still write those values out so
- * that they will be applied immediately. We supply some arbitrary defaults
- * for any that weren't set.
+ * We come through here on startup, when at least one of the
+ * exposure time or gain has not been fixed. We must still
+ * write those values out so that they will be applied
+ * immediately. We supply some arbitrary defaults for any that
+ * weren't set.
*/
/* Equivalent of divideUpExposure. */
- filtered_.shutter = fixedShutter ? fixedShutter : config_.defaultExposureTime;
+ filtered_.exposureTime = fixedExposureTime ? fixedExposureTime : config_.defaultExposureTime;
filtered_.analogueGain = fixedAnalogueGain_ ? fixedAnalogueGain_ : config_.defaultAnalogueGain;
}
@@ -480,7 +504,7 @@ void AgcChannel::prepare(Metadata *imageMetadata)
/* Process has run, so we have meaningful values. */
DeviceStatus deviceStatus;
if (imageMetadata->get("device.status", deviceStatus) == 0) {
- Duration actualExposure = deviceStatus.shutterSpeed *
+ Duration actualExposure = deviceStatus.exposureTime *
deviceStatus.analogueGain;
if (actualExposure) {
double digitalGain = totalExposureValue / actualExposure;
@@ -534,7 +558,7 @@ void AgcChannel::process(StatisticsPtr &stats, DeviceStatus const &deviceStatus,
*/
bool desaturate = applyDigitalGain(gain, targetY, channelBound);
/*
- * The last thing is to divide up the exposure value into a shutter time
+ * The last thing is to divide up the exposure value into a exposure time
* and analogue gain, according to the current exposure mode.
*/
divideUpExposure();
@@ -550,7 +574,7 @@ bool AgcChannel::updateLockStatus(DeviceStatus const &deviceStatus)
const double resetMargin = 1.5;
/* Add 200us to the exposure time error to allow for line quantisation. */
- Duration exposureError = lastDeviceStatus_.shutterSpeed * errorFactor + 200us;
+ Duration exposureError = lastDeviceStatus_.exposureTime * errorFactor + 200us;
double gainError = lastDeviceStatus_.analogueGain * errorFactor;
Duration targetError = lastTargetExposure_ * errorFactor;
@@ -559,15 +583,15 @@ bool AgcChannel::updateLockStatus(DeviceStatus const &deviceStatus)
* the values we keep requesting may be unachievable. For this reason
* we only insist that we're close to values in the past few frames.
*/
- if (deviceStatus.shutterSpeed > lastDeviceStatus_.shutterSpeed - exposureError &&
- deviceStatus.shutterSpeed < lastDeviceStatus_.shutterSpeed + exposureError &&
+ if (deviceStatus.exposureTime > lastDeviceStatus_.exposureTime - exposureError &&
+ deviceStatus.exposureTime < lastDeviceStatus_.exposureTime + exposureError &&
deviceStatus.analogueGain > lastDeviceStatus_.analogueGain - gainError &&
deviceStatus.analogueGain < lastDeviceStatus_.analogueGain + gainError &&
status_.targetExposureValue > lastTargetExposure_ - targetError &&
status_.targetExposureValue < lastTargetExposure_ + targetError)
lockCount_ = std::min(lockCount_ + 1, maxLockCount);
- else if (deviceStatus.shutterSpeed < lastDeviceStatus_.shutterSpeed - resetMargin * exposureError ||
- deviceStatus.shutterSpeed > lastDeviceStatus_.shutterSpeed + resetMargin * exposureError ||
+ else if (deviceStatus.exposureTime < lastDeviceStatus_.exposureTime - resetMargin * exposureError ||
+ deviceStatus.exposureTime > lastDeviceStatus_.exposureTime + resetMargin * exposureError ||
deviceStatus.analogueGain < lastDeviceStatus_.analogueGain - resetMargin * gainError ||
deviceStatus.analogueGain > lastDeviceStatus_.analogueGain + resetMargin * gainError ||
status_.targetExposureValue < lastTargetExposure_ - resetMargin * targetError ||
@@ -585,11 +609,11 @@ void AgcChannel::housekeepConfig()
{
/* First fetch all the up-to-date settings, so no one else has to do it. */
status_.ev = ev_;
- status_.fixedShutter = limitShutter(fixedShutter_);
+ status_.fixedExposureTime = limitExposureTime(fixedExposureTime_);
status_.fixedAnalogueGain = fixedAnalogueGain_;
status_.flickerPeriod = flickerPeriod_;
- LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixedShutter "
- << status_.fixedShutter << " fixedAnalogueGain "
+ LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixedExposureTime "
+ << status_.fixedExposureTime << " fixedAnalogueGain "
<< status_.fixedAnalogueGain;
/*
* Make sure the "mode" pointers point to the up-to-date things, if
@@ -633,10 +657,10 @@ void AgcChannel::housekeepConfig()
void AgcChannel::fetchCurrentExposure(DeviceStatus const &deviceStatus)
{
- current_.shutter = deviceStatus.shutterSpeed;
+ current_.exposureTime = deviceStatus.exposureTime;
current_.analogueGain = deviceStatus.analogueGain;
current_.totalExposure = 0s; /* this value is unused */
- current_.totalExposureNoDG = current_.shutter * current_.analogueGain;
+ current_.totalExposureNoDG = current_.exposureTime * current_.analogueGain;
}
void AgcChannel::fetchAwbStatus(Metadata *imageMetadata)
@@ -677,12 +701,13 @@ static double computeInitialY(StatisticsPtr &stats, AwbStatus const &awb,
* Note that the weights are applied by the IPA to the statistics directly,
* before they are given to us here.
*/
- double rSum = 0, gSum = 0, bSum = 0, pixelSum = 0;
+ RGB<double> sum{ 0.0 };
+ double pixelSum = 0;
for (unsigned int i = 0; i < stats->agcRegions.numRegions(); i++) {
auto &region = stats->agcRegions.get(i);
- rSum += std::min<double>(region.val.rSum * gain, (maxVal - 1) * region.counted);
- gSum += std::min<double>(region.val.gSum * gain, (maxVal - 1) * region.counted);
- bSum += std::min<double>(region.val.bSum * gain, (maxVal - 1) * region.counted);
+ sum.r() += std::min<double>(region.val.rSum * gain, (maxVal - 1) * region.counted);
+ sum.g() += std::min<double>(region.val.gSum * gain, (maxVal - 1) * region.counted);
+ sum.b() += std::min<double>(region.val.bSum * gain, (maxVal - 1) * region.counted);
pixelSum += region.counted;
}
if (pixelSum == 0.0) {
@@ -690,14 +715,11 @@ static double computeInitialY(StatisticsPtr &stats, AwbStatus const &awb,
return 0;
}
- double ySum;
/* Factor in the AWB correction if needed. */
- if (stats->agcStatsPos == Statistics::AgcStatsPos::PreWb) {
- ySum = rSum * awb.gainR * .299 +
- gSum * awb.gainG * .587 +
- bSum * awb.gainB * .114;
- } else
- ySum = rSum * .299 + gSum * .587 + bSum * .114;
+ if (stats->agcStatsPos == Statistics::AgcStatsPos::PreWb)
+ sum *= RGB<double>{ { awb.gainR, awb.gainR, awb.gainB } };
+
+ double ySum = ipa::rec601LuminanceFromRGB(sum);
return ySum / pixelSum / (1 << 16);
}
@@ -715,7 +737,7 @@ static constexpr double EvGainYTargetLimit = 0.9;
static double constraintComputeGain(AgcConstraint &c, const Histogram &h, double lux,
double evGain, double &targetY)
{
- targetY = c.yTarget.eval(c.yTarget.domain().clip(lux));
+ targetY = c.yTarget.eval(c.yTarget.domain().clamp(lux));
targetY = std::min(EvGainYTargetLimit, targetY * evGain);
double iqm = h.interQuantileMean(c.qLo, c.qHi);
return (targetY * h.bins()) / iqm;
@@ -734,7 +756,7 @@ void AgcChannel::computeGain(StatisticsPtr &statistics, Metadata *imageMetadata,
* The initial gain and target_Y come from some of the regions. After
* that we consider the histogram constraints.
*/
- targetY = config_.yTarget.eval(config_.yTarget.domain().clip(lux.lux));
+ targetY = config_.yTarget.eval(config_.yTarget.domain().clamp(lux.lux));
targetY = std::min(EvGainYTargetLimit, targetY * evGain);
/*
@@ -774,17 +796,17 @@ void AgcChannel::computeGain(StatisticsPtr &statistics, Metadata *imageMetadata,
void AgcChannel::computeTargetExposure(double gain)
{
- if (status_.fixedShutter && status_.fixedAnalogueGain) {
+ if (status_.fixedExposureTime && status_.fixedAnalogueGain) {
/*
- * When ag and shutter are both fixed, we need to drive the
- * total exposure so that we end up with a digital gain of at least
- * 1/minColourGain. Otherwise we'd desaturate channels causing
- * white to go cyan or magenta.
+ * When analogue gain and exposure time are both fixed, we need
+ * to drive the total exposure so that we end up with a digital
+ * gain of at least 1/minColourGain. Otherwise we'd desaturate
+ * channels causing white to go cyan or magenta.
*/
double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 });
ASSERT(minColourGain != 0.0);
target_.totalExposure =
- status_.fixedShutter * status_.fixedAnalogueGain / minColourGain;
+ status_.fixedExposureTime * status_.fixedAnalogueGain / minColourGain;
} else {
/*
* The statistics reflect the image without digital gain, so the final
@@ -792,12 +814,12 @@ void AgcChannel::computeTargetExposure(double gain)
*/
target_.totalExposure = current_.totalExposureNoDG * gain;
/* The final target exposure is also limited to what the exposure mode allows. */
- Duration maxShutter = status_.fixedShutter
- ? status_.fixedShutter
- : exposureMode_->shutter.back();
- maxShutter = limitShutter(maxShutter);
+ Duration maxExposureTime = status_.fixedExposureTime
+ ? status_.fixedExposureTime
+ : exposureMode_->exposureTime.back();
+ maxExposureTime = limitExposureTime(maxExposureTime);
Duration maxTotalExposure =
- maxShutter *
+ maxExposureTime *
(status_.fixedAnalogueGain != 0.0
? status_.fixedAnalogueGain
: exposureMode_->gain.back());
@@ -881,12 +903,16 @@ void AgcChannel::filterExposure()
double stableRegion = config_.stableRegion;
/*
- * AGC adapts instantly if both shutter and gain are directly specified
- * or we're in the startup phase.
+ * AGC adapts instantly if both exposure time and gain are directly
+ * specified or we're in the startup phase. Also disable the stable
+ * region, because we want to reflect any user exposure/gain updates,
+ * however small.
*/
- if ((status_.fixedShutter && status_.fixedAnalogueGain) ||
- frameCount_ <= config_.startupFrames)
+ if ((status_.fixedExposureTime && status_.fixedAnalogueGain) ||
+ frameCount_ <= config_.startupFrames) {
speed = 1.0;
+ stableRegion = 0.0;
+ }
if (!filtered_.totalExposure) {
filtered_.totalExposure = target_.totalExposure;
} else if (filtered_.totalExposure * (1.0 - stableRegion) < target_.totalExposure &&
@@ -910,34 +936,34 @@ void AgcChannel::filterExposure()
void AgcChannel::divideUpExposure()
{
/*
- * Sending the fixed shutter/gain cases through the same code may seem
- * unnecessary, but it will make more sense when extend this to cover
- * variable aperture.
+ * Sending the fixed exposure time/gain cases through the same code may
+ * seem unnecessary, but it will make more sense when extend this to
+ * cover variable aperture.
*/
Duration exposureValue = filtered_.totalExposureNoDG;
- Duration shutterTime;
+ Duration exposureTime;
double analogueGain;
- shutterTime = status_.fixedShutter ? status_.fixedShutter
- : exposureMode_->shutter[0];
- shutterTime = limitShutter(shutterTime);
+ exposureTime = status_.fixedExposureTime ? status_.fixedExposureTime
+ : exposureMode_->exposureTime[0];
+ exposureTime = limitExposureTime(exposureTime);
analogueGain = status_.fixedAnalogueGain != 0.0 ? status_.fixedAnalogueGain
: exposureMode_->gain[0];
analogueGain = limitGain(analogueGain);
- if (shutterTime * analogueGain < exposureValue) {
+ if (exposureTime * analogueGain < exposureValue) {
for (unsigned int stage = 1;
stage < exposureMode_->gain.size(); stage++) {
- if (!status_.fixedShutter) {
- Duration stageShutter =
- limitShutter(exposureMode_->shutter[stage]);
- if (stageShutter * analogueGain >= exposureValue) {
- shutterTime = exposureValue / analogueGain;
+ if (!status_.fixedExposureTime) {
+ Duration stageExposureTime =
+ limitExposureTime(exposureMode_->exposureTime[stage]);
+ if (stageExposureTime * analogueGain >= exposureValue) {
+ exposureTime = exposureValue / analogueGain;
break;
}
- shutterTime = stageShutter;
+ exposureTime = stageExposureTime;
}
if (status_.fixedAnalogueGain == 0.0) {
- if (exposureMode_->gain[stage] * shutterTime >= exposureValue) {
- analogueGain = exposureValue / shutterTime;
+ if (exposureMode_->gain[stage] * exposureTime >= exposureValue) {
+ analogueGain = exposureValue / exposureTime;
break;
}
analogueGain = exposureMode_->gain[stage];
@@ -945,18 +971,19 @@ void AgcChannel::divideUpExposure()
}
}
}
- LOG(RPiAgc, Debug) << "Divided up shutter and gain are " << shutterTime << " and "
- << analogueGain;
+ LOG(RPiAgc, Debug)
+ << "Divided up exposure time and gain are " << exposureTime
+ << " and " << analogueGain;
/*
- * Finally adjust shutter time for flicker avoidance (require both
- * shutter and gain not to be fixed).
+ * Finally adjust exposure time for flicker avoidance (require both
+ * exposure time and gain not to be fixed).
*/
- if (!status_.fixedShutter && !status_.fixedAnalogueGain &&
+ if (!status_.fixedExposureTime && !status_.fixedAnalogueGain &&
status_.flickerPeriod) {
- int flickerPeriods = shutterTime / status_.flickerPeriod;
+ int flickerPeriods = exposureTime / status_.flickerPeriod;
if (flickerPeriods) {
- Duration newShutterTime = flickerPeriods * status_.flickerPeriod;
- analogueGain *= shutterTime / newShutterTime;
+ Duration newExposureTime = flickerPeriods * status_.flickerPeriod;
+ analogueGain *= exposureTime / newExposureTime;
/*
* We should still not allow the ag to go over the
* largest value in the exposure mode. Note that this
@@ -965,12 +992,12 @@ void AgcChannel::divideUpExposure()
*/
analogueGain = std::min(analogueGain, exposureMode_->gain.back());
analogueGain = limitGain(analogueGain);
- shutterTime = newShutterTime;
+ exposureTime = newExposureTime;
}
- LOG(RPiAgc, Debug) << "After flicker avoidance, shutter "
- << shutterTime << " gain " << analogueGain;
+ LOG(RPiAgc, Debug) << "After flicker avoidance, exposure time "
+ << exposureTime << " gain " << analogueGain;
}
- filtered_.shutter = shutterTime;
+ filtered_.exposureTime = exposureTime;
filtered_.analogueGain = analogueGain;
}
@@ -978,7 +1005,7 @@ void AgcChannel::writeAndFinish(Metadata *imageMetadata, bool desaturate)
{
status_.totalExposureValue = filtered_.totalExposure;
status_.targetExposureValue = desaturate ? 0s : target_.totalExposure;
- status_.shutterTime = filtered_.shutter;
+ status_.exposureTime = filtered_.exposureTime;
status_.analogueGain = filtered_.analogueGain;
/*
* Write to metadata as well, in case anyone wants to update the camera
@@ -987,32 +1014,32 @@ void AgcChannel::writeAndFinish(Metadata *imageMetadata, bool desaturate)
imageMetadata->set("agc.status", status_);
LOG(RPiAgc, Debug) << "Output written, total exposure requested is "
<< filtered_.totalExposure;
- LOG(RPiAgc, Debug) << "Camera exposure update: shutter time " << filtered_.shutter
+ LOG(RPiAgc, Debug) << "Camera exposure update: exposure time " << filtered_.exposureTime
<< " analogue gain " << filtered_.analogueGain;
}
-Duration AgcChannel::limitShutter(Duration shutter)
+Duration AgcChannel::limitExposureTime(Duration exposureTime)
{
/*
- * shutter == 0 is a special case for fixed shutter values, and must pass
- * through unchanged
+ * exposureTime == 0 is a special case for fixed exposure time values,
+ * and must pass through unchanged.
*/
- if (!shutter)
- return shutter;
+ if (!exposureTime)
+ return exposureTime;
- shutter = std::clamp(shutter, mode_.minShutter, maxShutter_);
- return shutter;
+ exposureTime = std::clamp(exposureTime, mode_.minExposureTime, maxExposureTime_);
+ return exposureTime;
}
double AgcChannel::limitGain(double gain) const
{
/*
- * Only limit the lower bounds of the gain value to what the sensor limits.
- * The upper bound on analogue gain will be made up with additional digital
- * gain applied by the ISP.
+ * Only limit the lower bounds of the gain value to what the sensor
+ * limits. The upper bound on analogue gain will be made up with
+ * additional digital gain applied by the ISP.
*
- * gain == 0.0 is a special case for fixed shutter values, and must pass
- * through unchanged
+ * gain == 0.0 is a special case for fixed exposure time values, and
+ * must pass through unchanged.
*/
if (!gain)
return gain;
diff --git a/src/ipa/rpi/controller/rpi/agc_channel.h b/src/ipa/rpi/controller/rpi/agc_channel.h
index 4cf7233e..fa697e6f 100644
--- a/src/ipa/rpi/controller/rpi/agc_channel.h
+++ b/src/ipa/rpi/controller/rpi/agc_channel.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2023, Raspberry Pi Ltd
*
- * agc_channel.h - AGC/AEC control algorithm
+ * AGC/AEC control algorithm
*/
#pragma once
@@ -12,10 +12,11 @@
#include <libcamera/base/utils.h>
+#include <libipa/pwl.h>
+
#include "../agc_status.h"
#include "../awb_status.h"
#include "../controller.h"
-#include "../pwl.h"
/* This is our implementation of AGC. */
@@ -29,7 +30,7 @@ struct AgcMeteringMode {
};
struct AgcExposureMode {
- std::vector<libcamera::utils::Duration> shutter;
+ std::vector<libcamera::utils::Duration> exposureTime;
std::vector<double> gain;
int read(const libcamera::YamlObject &params);
};
@@ -40,7 +41,7 @@ struct AgcConstraint {
Bound bound;
double qLo;
double qHi;
- Pwl yTarget;
+ libcamera::ipa::Pwl yTarget;
int read(const libcamera::YamlObject &params);
};
@@ -61,7 +62,7 @@ struct AgcConfig {
std::map<std::string, AgcExposureMode> exposureModes;
std::map<std::string, AgcConstraintMode> constraintModes;
std::vector<AgcChannelConstraint> channelConstraints;
- Pwl yTarget;
+ libcamera::ipa::Pwl yTarget;
double speed;
uint16_t startupFrames;
unsigned int convergenceFrames;
@@ -89,14 +90,18 @@ public:
std::vector<double> const &getWeights() const;
void setEv(double ev);
void setFlickerPeriod(libcamera::utils::Duration flickerPeriod);
- void setMaxShutter(libcamera::utils::Duration maxShutter);
- void setFixedShutter(libcamera::utils::Duration fixedShutter);
+ void setMaxExposureTime(libcamera::utils::Duration maxExposureTime);
+ void setFixedExposureTime(libcamera::utils::Duration fixedExposureTime);
void setFixedAnalogueGain(double fixedAnalogueGain);
void setMeteringMode(std::string const &meteringModeName);
void setExposureMode(std::string const &exposureModeName);
void setConstraintMode(std::string const &contraintModeName);
- void enableAuto();
- void disableAuto();
+ void enableAutoExposure();
+ void disableAutoExposure();
+ bool autoExposureEnabled() const;
+ void enableAutoGain();
+ void disableAutoGain();
+ bool autoGainEnabled() const;
void switchMode(CameraMode const &cameraMode, Metadata *metadata);
void prepare(Metadata *imageMetadata);
void process(StatisticsPtr &stats, DeviceStatus const &deviceStatus, Metadata *imageMetadata,
@@ -116,7 +121,7 @@ private:
bool applyDigitalGain(double gain, double targetY, bool channelBound);
void divideUpExposure();
void writeAndFinish(Metadata *imageMetadata, bool desaturate);
- libcamera::utils::Duration limitShutter(libcamera::utils::Duration shutter);
+ libcamera::utils::Duration limitExposureTime(libcamera::utils::Duration exposureTime);
double limitGain(double gain) const;
AgcMeteringMode *meteringMode_;
AgcExposureMode *exposureMode_;
@@ -127,7 +132,7 @@ private:
struct ExposureValues {
ExposureValues();
- libcamera::utils::Duration shutter;
+ libcamera::utils::Duration exposureTime;
double analogueGain;
libcamera::utils::Duration totalExposure;
libcamera::utils::Duration totalExposureNoDG; /* without digital gain */
@@ -145,8 +150,8 @@ private:
std::string constraintModeName_;
double ev_;
libcamera::utils::Duration flickerPeriod_;
- libcamera::utils::Duration maxShutter_;
- libcamera::utils::Duration fixedShutter_;
+ libcamera::utils::Duration maxExposureTime_;
+ libcamera::utils::Duration fixedExposureTime_;
double fixedAnalogueGain_;
};
diff --git a/src/ipa/rpi/controller/rpi/alsc.cpp b/src/ipa/rpi/controller/rpi/alsc.cpp
index 8a205c60..21edb819 100644
--- a/src/ipa/rpi/controller/rpi/alsc.cpp
+++ b/src/ipa/rpi/controller/rpi/alsc.cpp
@@ -2,13 +2,14 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * alsc.cpp - ALSC (auto lens shading correction) control algorithm
+ * ALSC (auto lens shading correction) control algorithm
*/
#include <algorithm>
+#include <cmath>
#include <functional>
-#include <math.h>
#include <numeric>
+#include <vector>
#include <libcamera/base/log.h>
#include <libcamera/base/span.h>
@@ -251,12 +252,12 @@ static bool compareModes(CameraMode const &cm0, CameraMode const &cm1)
*/
if (cm0.transform != cm1.transform)
return true;
- int leftDiff = abs(cm0.cropX - cm1.cropX);
- int topDiff = abs(cm0.cropY - cm1.cropY);
- int rightDiff = fabs(cm0.cropX + cm0.scaleX * cm0.width -
- cm1.cropX - cm1.scaleX * cm1.width);
- int bottomDiff = fabs(cm0.cropY + cm0.scaleY * cm0.height -
- cm1.cropY - cm1.scaleY * cm1.height);
+ int leftDiff = std::abs(cm0.cropX - cm1.cropX);
+ int topDiff = std::abs(cm0.cropY - cm1.cropY);
+ int rightDiff = std::abs(cm0.cropX + cm0.scaleX * cm0.width -
+ cm1.cropX - cm1.scaleX * cm1.width);
+ int bottomDiff = std::abs(cm0.cropY + cm0.scaleY * cm0.height -
+ cm1.cropY - cm1.scaleY * cm1.height);
/*
* These thresholds are a rather arbitrary amount chosen to trigger
* when carrying on with the previously calculated tables might be
@@ -496,8 +497,9 @@ void resampleCalTable(const Array2D<double> &calTableIn,
* Precalculate and cache the x sampling locations and phases to save
* recomputing them on every row.
*/
- int xLo[X], xHi[X];
- double xf[X];
+ std::vector<int> xLo(X);
+ std::vector<int> xHi(X);
+ std::vector<double> xf(X);
double scaleX = cameraMode.sensorWidth /
(cameraMode.width * cameraMode.scaleX);
double xOff = cameraMode.cropX / (double)cameraMode.sensorWidth;
@@ -730,7 +732,7 @@ static double gaussSeidel2Sor(const SparseArray<double> &M, double omega,
double maxDiff = 0;
for (i = 0; i < XY; i++) {
lambda[i] = oldLambda[i] + (lambda[i] - oldLambda[i]) * omega;
- if (fabs(lambda[i] - oldLambda[i]) > fabs(maxDiff))
+ if (std::abs(lambda[i] - oldLambda[i]) > std::abs(maxDiff))
maxDiff = lambda[i] - oldLambda[i];
}
return maxDiff;
@@ -762,7 +764,7 @@ static void runMatrixIterations(const Array2D<double> &C,
constructM(C, W, M);
double lastMaxDiff = std::numeric_limits<double>::max();
for (unsigned int i = 0; i < nIter; i++) {
- double maxDiff = fabs(gaussSeidel2Sor(M, omega, lambda, lambdaBound));
+ double maxDiff = std::abs(gaussSeidel2Sor(M, omega, lambda, lambdaBound));
if (maxDiff < threshold) {
LOG(RPiAlsc, Debug)
<< "Stop after " << i + 1 << " iterations";
diff --git a/src/ipa/rpi/controller/rpi/alsc.h b/src/ipa/rpi/controller/rpi/alsc.h
index 0b6d9478..31087982 100644
--- a/src/ipa/rpi/controller/rpi/alsc.h
+++ b/src/ipa/rpi/controller/rpi/alsc.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * alsc.h - ALSC (auto lens shading correction) control algorithm
+ * ALSC (auto lens shading correction) control algorithm
*/
#pragma once
diff --git a/src/ipa/rpi/controller/rpi/awb.cpp b/src/ipa/rpi/controller/rpi/awb.cpp
index dde5785a..8479ae40 100644
--- a/src/ipa/rpi/controller/rpi/awb.cpp
+++ b/src/ipa/rpi/controller/rpi/awb.cpp
@@ -2,10 +2,11 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * awb.cpp - AWB control algorithm
+ * AWB control algorithm
*/
#include <assert.h>
+#include <cmath>
#include <functional>
#include <libcamera/base/log.h>
@@ -20,6 +21,8 @@ using namespace libcamera;
LOG_DEFINE_CATEGORY(RPiAwb)
+constexpr double kDefaultCT = 4500.0;
+
#define NAME "rpi.awb"
/*
@@ -49,10 +52,11 @@ int AwbPrior::read(const libcamera::YamlObject &params)
return -EINVAL;
lux = *value;
- return prior.read(params["prior"]);
+ prior = params["prior"].get<ipa::Pwl>(ipa::Pwl{});
+ return prior.empty() ? -EINVAL : 0;
}
-static int readCtCurve(Pwl &ctR, Pwl &ctB, const libcamera::YamlObject &params)
+static int readCtCurve(ipa::Pwl &ctR, ipa::Pwl &ctB, const libcamera::YamlObject &params)
{
if (params.size() % 3) {
LOG(RPiAwb, Error) << "AwbConfig: incomplete CT curve entry";
@@ -103,8 +107,8 @@ int AwbConfig::read(const libcamera::YamlObject &params)
if (ret)
return ret;
/* We will want the inverse functions of these too. */
- ctRInverse = ctR.inverse();
- ctBInverse = ctB.inverse();
+ ctRInverse = ctR.inverse().first;
+ ctBInverse = ctB.inverse().first;
}
if (params.contains("priors")) {
@@ -121,7 +125,7 @@ int AwbConfig::read(const libcamera::YamlObject &params)
}
if (priors.empty()) {
LOG(RPiAwb, Error) << "AwbConfig: no AWB priors configured";
- return ret;
+ return -EINVAL;
}
}
if (params.contains("modes")) {
@@ -166,6 +170,14 @@ int AwbConfig::read(const libcamera::YamlObject &params)
whitepointB = params["whitepoint_b"].get<double>(0.0);
if (bayes == false)
sensitivityR = sensitivityB = 1.0; /* nor do sensitivities make any sense */
+ /*
+ * The biasProportion parameter adds a small proportion of the counted
+ * pixles to a region biased to the biasCT colour temperature.
+ *
+ * A typical value for biasProportion would be between 0.05 to 0.1.
+ */
+ biasProportion = params["bias_proportion"].get<double>(0.0);
+ biasCT = params["bias_ct"].get<double>(kDefaultCT);
return 0;
}
@@ -207,13 +219,13 @@ void Awb::initialise()
* them.
*/
if (!config_.ctR.empty() && !config_.ctB.empty()) {
- syncResults_.temperatureK = config_.ctR.domain().clip(4000);
+ syncResults_.temperatureK = config_.ctR.domain().clamp(4000);
syncResults_.gainR = 1.0 / config_.ctR.eval(syncResults_.temperatureK);
syncResults_.gainG = 1.0;
syncResults_.gainB = 1.0 / config_.ctB.eval(syncResults_.temperatureK);
} else {
/* random values just to stop the world blowing up */
- syncResults_.temperatureK = 4500;
+ syncResults_.temperatureK = kDefaultCT;
syncResults_.gainR = syncResults_.gainG = syncResults_.gainB = 1.0;
}
prevSyncResults_ = syncResults_;
@@ -273,14 +285,32 @@ void Awb::setManualGains(double manualR, double manualB)
syncResults_.gainB = prevSyncResults_.gainB = manualB_;
if (config_.bayes) {
/* Also estimate the best corresponding colour temperature from the curves. */
- double ctR = config_.ctRInverse.eval(config_.ctRInverse.domain().clip(1 / manualR_));
- double ctB = config_.ctBInverse.eval(config_.ctBInverse.domain().clip(1 / manualB_));
+ double ctR = config_.ctRInverse.eval(config_.ctRInverse.domain().clamp(1 / manualR_));
+ double ctB = config_.ctBInverse.eval(config_.ctBInverse.domain().clamp(1 / manualB_));
prevSyncResults_.temperatureK = (ctR + ctB) / 2;
syncResults_.temperatureK = prevSyncResults_.temperatureK;
}
}
}
+void Awb::setColourTemperature(double temperatureK)
+{
+ if (!config_.bayes) {
+ LOG(RPiAwb, Warning) << "AWB uncalibrated - cannot set colour temperature";
+ return;
+ }
+
+ temperatureK = config_.ctR.domain().clamp(temperatureK);
+ manualR_ = 1 / config_.ctR.eval(temperatureK);
+ manualB_ = 1 / config_.ctB.eval(temperatureK);
+
+ syncResults_.temperatureK = temperatureK;
+ syncResults_.gainR = manualR_;
+ syncResults_.gainG = 1.0;
+ syncResults_.gainB = manualB_;
+ prevSyncResults_ = syncResults_;
+}
+
void Awb::switchMode([[maybe_unused]] CameraMode const &cameraMode,
Metadata *metadata)
{
@@ -406,7 +436,8 @@ void Awb::asyncFunc()
static void generateStats(std::vector<Awb::RGB> &zones,
StatisticsPtr &stats, double minPixels,
- double minG, Metadata &globalMetadata)
+ double minG, Metadata &globalMetadata,
+ double biasProportion, double biasCtR, double biasCtB)
{
std::scoped_lock<RPiController::Metadata> l(globalMetadata);
@@ -419,6 +450,14 @@ static void generateStats(std::vector<Awb::RGB> &zones,
continue;
zone.R = region.val.rSum / region.counted;
zone.B = region.val.bSum / region.counted;
+ /*
+ * Add some bias samples to allow the search to tend to a
+ * bias CT in failure cases.
+ */
+ const unsigned int proportion = biasProportion * region.counted;
+ zone.R += proportion * biasCtR;
+ zone.B += proportion * biasCtB;
+ zone.G += proportion * 1.0;
/* Factor in the ALSC applied colour shading correction if required. */
const AlscStatus *alscStatus = globalMetadata.getLocked<AlscStatus>("alsc.status");
if (stats->colourStatsPos == Statistics::ColourStatsPos::PreLsc && alscStatus) {
@@ -438,8 +477,11 @@ void Awb::prepareStats()
* LSC has already been applied to the stats in this pipeline, so stop
* any LSC compensation. We also ignore config_.fast in this version.
*/
+ const double biasCtR = config_.bayes ? config_.ctR.eval(config_.biasCT) : 0;
+ const double biasCtB = config_.bayes ? config_.ctB.eval(config_.biasCT) : 0;
generateStats(zones_, statistics_, config_.minPixels,
- config_.minG, getGlobalMetadata());
+ config_.minG, getGlobalMetadata(),
+ config_.biasProportion, biasCtR, biasCtB);
/*
* apply sensitivities, so values appear to come from our "canonical"
* sensor.
@@ -468,7 +510,7 @@ double Awb::computeDelta2Sum(double gainR, double gainB)
return delta2Sum;
}
-Pwl Awb::interpolatePrior()
+ipa::Pwl Awb::interpolatePrior()
{
/*
* Interpolate the prior log likelihood function for our current lux
@@ -485,7 +527,7 @@ Pwl Awb::interpolatePrior()
idx++;
double lux0 = config_.priors[idx].lux,
lux1 = config_.priors[idx + 1].lux;
- return Pwl::combine(config_.priors[idx].prior,
+ return ipa::Pwl::combine(config_.priors[idx].prior,
config_.priors[idx + 1].prior,
[&](double /*x*/, double y0, double y1) {
return y0 + (y1 - y0) *
@@ -494,26 +536,26 @@ Pwl Awb::interpolatePrior()
}
}
-static double interpolateQuadatric(Pwl::Point const &a, Pwl::Point const &b,
- Pwl::Point const &c)
+static double interpolateQuadatric(ipa::Pwl::Point const &a, ipa::Pwl::Point const &b,
+ ipa::Pwl::Point const &c)
{
/*
* Given 3 points on a curve, find the extremum of the function in that
* interval by fitting a quadratic.
*/
const double eps = 1e-3;
- Pwl::Point ca = c - a, ba = b - a;
- double denominator = 2 * (ba.y * ca.x - ca.y * ba.x);
- if (abs(denominator) > eps) {
- double numerator = ba.y * ca.x * ca.x - ca.y * ba.x * ba.x;
- double result = numerator / denominator + a.x;
- return std::max(a.x, std::min(c.x, result));
+ ipa::Pwl::Point ca = c - a, ba = b - a;
+ double denominator = 2 * (ba.y() * ca.x() - ca.y() * ba.x());
+ if (std::abs(denominator) > eps) {
+ double numerator = ba.y() * ca.x() * ca.x() - ca.y() * ba.x() * ba.x();
+ double result = numerator / denominator + a.x();
+ return std::max(a.x(), std::min(c.x(), result));
}
/* has degenerated to straight line segment */
- return a.y < c.y - eps ? a.x : (c.y < a.y - eps ? c.x : b.x);
+ return a.y() < c.y() - eps ? a.x() : (c.y() < a.y() - eps ? c.x() : b.x());
}
-double Awb::coarseSearch(Pwl const &prior)
+double Awb::coarseSearch(ipa::Pwl const &prior)
{
points_.clear(); /* assume doesn't deallocate memory */
size_t bestPoint = 0;
@@ -525,22 +567,22 @@ double Awb::coarseSearch(Pwl const &prior)
double b = config_.ctB.eval(t, &spanB);
double gainR = 1 / r, gainB = 1 / b;
double delta2Sum = computeDelta2Sum(gainR, gainB);
- double priorLogLikelihood = prior.eval(prior.domain().clip(t));
+ double priorLogLikelihood = prior.eval(prior.domain().clamp(t));
double finalLogLikelihood = delta2Sum - priorLogLikelihood;
LOG(RPiAwb, Debug)
<< "t: " << t << " gain R " << gainR << " gain B "
<< gainB << " delta2_sum " << delta2Sum
<< " prior " << priorLogLikelihood << " final "
<< finalLogLikelihood;
- points_.push_back(Pwl::Point(t, finalLogLikelihood));
- if (points_.back().y < points_[bestPoint].y)
+ points_.push_back(ipa::Pwl::Point({ t, finalLogLikelihood }));
+ if (points_.back().y() < points_[bestPoint].y())
bestPoint = points_.size() - 1;
if (t == mode_->ctHi)
break;
/* for even steps along the r/b curve scale them by the current t */
t = std::min(t + t / 10 * config_.coarseStep, mode_->ctHi);
}
- t = points_[bestPoint].x;
+ t = points_[bestPoint].x();
LOG(RPiAwb, Debug) << "Coarse search found CT " << t;
/*
* We have the best point of the search, but refine it with a quadratic
@@ -559,7 +601,7 @@ double Awb::coarseSearch(Pwl const &prior)
return t;
}
-void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior)
+void Awb::fineSearch(double &t, double &r, double &b, ipa::Pwl const &prior)
{
int spanR = -1, spanB = -1;
config_.ctR.eval(t, &spanR);
@@ -570,14 +612,14 @@ void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior)
config_.ctR.eval(t - nsteps * step, &spanR);
double bDiff = config_.ctB.eval(t + nsteps * step, &spanB) -
config_.ctB.eval(t - nsteps * step, &spanB);
- Pwl::Point transverse(bDiff, -rDiff);
- if (transverse.len2() < 1e-6)
+ ipa::Pwl::Point transverse({ bDiff, -rDiff });
+ if (transverse.length2() < 1e-6)
return;
/*
* unit vector orthogonal to the b vs. r function (pointing outwards
* with r and b increasing)
*/
- transverse = transverse / transverse.len();
+ transverse = transverse / transverse.length();
double bestLogLikelihood = 0, bestT = 0, bestR = 0, bestB = 0;
double transverseRange = config_.transverseNeg + config_.transversePos;
const int maxNumDeltas = 12;
@@ -592,26 +634,26 @@ void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior)
for (int i = -nsteps; i <= nsteps; i++) {
double tTest = t + i * step;
double priorLogLikelihood =
- prior.eval(prior.domain().clip(tTest));
+ prior.eval(prior.domain().clamp(tTest));
double rCurve = config_.ctR.eval(tTest, &spanR);
double bCurve = config_.ctB.eval(tTest, &spanB);
/* x will be distance off the curve, y the log likelihood there */
- Pwl::Point points[maxNumDeltas];
+ ipa::Pwl::Point points[maxNumDeltas];
int bestPoint = 0;
/* Take some measurements transversely *off* the CT curve. */
for (int j = 0; j < numDeltas; j++) {
- points[j].x = -config_.transverseNeg +
- (transverseRange * j) / (numDeltas - 1);
- Pwl::Point rbTest = Pwl::Point(rCurve, bCurve) +
- transverse * points[j].x;
- double rTest = rbTest.x, bTest = rbTest.y;
+ points[j][0] = -config_.transverseNeg +
+ (transverseRange * j) / (numDeltas - 1);
+ ipa::Pwl::Point rbTest = ipa::Pwl::Point({ rCurve, bCurve }) +
+ transverse * points[j].x();
+ double rTest = rbTest.x(), bTest = rbTest.y();
double gainR = 1 / rTest, gainB = 1 / bTest;
double delta2Sum = computeDelta2Sum(gainR, gainB);
- points[j].y = delta2Sum - priorLogLikelihood;
+ points[j][1] = delta2Sum - priorLogLikelihood;
LOG(RPiAwb, Debug)
<< "At t " << tTest << " r " << rTest << " b "
- << bTest << ": " << points[j].y;
- if (points[j].y < points[bestPoint].y)
+ << bTest << ": " << points[j].y();
+ if (points[j].y() < points[bestPoint].y())
bestPoint = j;
}
/*
@@ -619,11 +661,11 @@ void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior)
* now let's do a quadratic interpolation for the best result.
*/
bestPoint = std::max(1, std::min(bestPoint, numDeltas - 2));
- Pwl::Point rbTest = Pwl::Point(rCurve, bCurve) +
- transverse * interpolateQuadatric(points[bestPoint - 1],
- points[bestPoint],
- points[bestPoint + 1]);
- double rTest = rbTest.x, bTest = rbTest.y;
+ ipa::Pwl::Point rbTest = ipa::Pwl::Point({ rCurve, bCurve }) +
+ transverse * interpolateQuadatric(points[bestPoint - 1],
+ points[bestPoint],
+ points[bestPoint + 1]);
+ double rTest = rbTest.x(), bTest = rbTest.y();
double gainR = 1 / rTest, gainB = 1 / bTest;
double delta2Sum = computeDelta2Sum(gainR, gainB);
double finalLogLikelihood = delta2Sum - priorLogLikelihood;
@@ -653,7 +695,7 @@ void Awb::awbBayes()
* Get the current prior, and scale according to how many zones are
* valid... not entirely sure about this.
*/
- Pwl prior = interpolatePrior();
+ ipa::Pwl prior = interpolatePrior();
prior *= zones_.size() / (double)(statistics_->awbRegions.numRegions());
prior.map([](double x, double y) {
LOG(RPiAwb, Debug) << "(" << x << "," << y << ")";
@@ -715,7 +757,11 @@ void Awb::awbGrey()
sumR += *ri, sumB += *bi;
double gainR = sumR.G / (sumR.R + 1),
gainB = sumB.G / (sumB.B + 1);
- asyncResults_.temperatureK = 4500; /* don't know what it is */
+ /*
+ * The grey world model can't estimate the colour temperature, use a
+ * default value.
+ */
+ asyncResults_.temperatureK = kDefaultCT;
asyncResults_.gainR = gainR;
asyncResults_.gainG = 1.0;
asyncResults_.gainB = gainB;
diff --git a/src/ipa/rpi/controller/rpi/awb.h b/src/ipa/rpi/controller/rpi/awb.h
index cde6a62f..86640f8f 100644
--- a/src/ipa/rpi/controller/rpi/awb.h
+++ b/src/ipa/rpi/controller/rpi/awb.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * awb.h - AWB control algorithm
+ * AWB control algorithm
*/
#pragma once
@@ -10,11 +10,14 @@
#include <condition_variable>
#include <thread>
+#include <libcamera/geometry.h>
+
#include "../awb_algorithm.h"
-#include "../pwl.h"
#include "../awb_status.h"
#include "../statistics.h"
+#include "libipa/pwl.h"
+
namespace RPiController {
/* Control algorithm to perform AWB calculations. */
@@ -28,7 +31,7 @@ struct AwbMode {
struct AwbPrior {
int read(const libcamera::YamlObject &params);
double lux; /* lux level */
- Pwl prior; /* maps CT to prior log likelihood for this lux level */
+ libcamera::ipa::Pwl prior; /* maps CT to prior log likelihood for this lux level */
};
struct AwbConfig {
@@ -41,10 +44,10 @@ struct AwbConfig {
unsigned int convergenceFrames; /* approx number of frames to converge */
double speed; /* IIR filter speed applied to algorithm results */
bool fast; /* "fast" mode uses a 16x16 rather than 32x32 grid */
- Pwl ctR; /* function maps CT to r (= R/G) */
- Pwl ctB; /* function maps CT to b (= B/G) */
- Pwl ctRInverse; /* inverse of ctR */
- Pwl ctBInverse; /* inverse of ctB */
+ libcamera::ipa::Pwl ctR; /* function maps CT to r (= R/G) */
+ libcamera::ipa::Pwl ctB; /* function maps CT to b (= B/G) */
+ libcamera::ipa::Pwl ctRInverse; /* inverse of ctR */
+ libcamera::ipa::Pwl ctBInverse; /* inverse of ctB */
/* table of illuminant priors at different lux levels */
std::vector<AwbPrior> priors;
/* AWB "modes" (determines the search range) */
@@ -84,6 +87,10 @@ struct AwbConfig {
double whitepointR;
double whitepointB;
bool bayes; /* use Bayesian algorithm */
+ /* proportion of counted samples to add for the search bias */
+ double biasProportion;
+ /* CT target for the search bias */
+ double biasCT;
};
class Awb : public AwbAlgorithm
@@ -98,6 +105,7 @@ public:
void initialValues(double &gainR, double &gainB) override;
void setMode(std::string const &name) override;
void setManualGains(double manualR, double manualB) override;
+ void setColourTemperature(double temperatureK) override;
void enableAuto() override;
void disableAuto() override;
void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
@@ -161,11 +169,11 @@ private:
void awbGrey();
void prepareStats();
double computeDelta2Sum(double gainR, double gainB);
- Pwl interpolatePrior();
- double coarseSearch(Pwl const &prior);
- void fineSearch(double &t, double &r, double &b, Pwl const &prior);
+ libcamera::ipa::Pwl interpolatePrior();
+ double coarseSearch(libcamera::ipa::Pwl const &prior);
+ void fineSearch(double &t, double &r, double &b, libcamera::ipa::Pwl const &prior);
std::vector<RGB> zones_;
- std::vector<Pwl::Point> points_;
+ std::vector<libcamera::ipa::Pwl::Point> points_;
/* manual r setting */
double manualR_;
/* manual b setting */
diff --git a/src/ipa/rpi/controller/rpi/black_level.cpp b/src/ipa/rpi/controller/rpi/black_level.cpp
index 2e3db51f..4c968f14 100644
--- a/src/ipa/rpi/controller/rpi/black_level.cpp
+++ b/src/ipa/rpi/controller/rpi/black_level.cpp
@@ -2,10 +2,9 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * black_level.cpp - black level control algorithm
+ * black level control algorithm
*/
-#include <math.h>
#include <stdint.h>
#include <libcamera/base/log.h>
diff --git a/src/ipa/rpi/controller/rpi/black_level.h b/src/ipa/rpi/controller/rpi/black_level.h
index d8c41c62..f50729db 100644
--- a/src/ipa/rpi/controller/rpi/black_level.h
+++ b/src/ipa/rpi/controller/rpi/black_level.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * black_level.h - black level control algorithm
+ * black level control algorithm
*/
#pragma once
diff --git a/src/ipa/rpi/controller/rpi/cac.cpp b/src/ipa/rpi/controller/rpi/cac.cpp
index f2c8d282..17779ad5 100644
--- a/src/ipa/rpi/controller/rpi/cac.cpp
+++ b/src/ipa/rpi/controller/rpi/cac.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2023 Raspberry Pi Ltd
*
- * cac.cpp - Chromatic Aberration Correction algorithm
+ * Chromatic Aberration Correction algorithm
*/
#include "cac.h"
diff --git a/src/ipa/rpi/controller/rpi/ccm.cpp b/src/ipa/rpi/controller/rpi/ccm.cpp
index 2e2e6664..8607f152 100644
--- a/src/ipa/rpi/controller/rpi/ccm.cpp
+++ b/src/ipa/rpi/controller/rpi/ccm.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * ccm.cpp - CCM (colour correction matrix) control algorithm
+ * CCM (colour correction matrix) control algorithm
*/
#include <libcamera/base/log.h>
@@ -29,34 +29,7 @@ LOG_DEFINE_CATEGORY(RPiCcm)
#define NAME "rpi.ccm"
-Matrix::Matrix()
-{
- memset(m, 0, sizeof(m));
-}
-Matrix::Matrix(double m0, double m1, double m2, double m3, double m4, double m5,
- double m6, double m7, double m8)
-{
- m[0][0] = m0, m[0][1] = m1, m[0][2] = m2, m[1][0] = m3, m[1][1] = m4,
- m[1][2] = m5, m[2][0] = m6, m[2][1] = m7, m[2][2] = m8;
-}
-int Matrix::read(const libcamera::YamlObject &params)
-{
- double *ptr = (double *)m;
-
- if (params.size() != 9) {
- LOG(RPiCcm, Error) << "Wrong number of values in CCM";
- return -EINVAL;
- }
-
- for (const auto &param : params.asList()) {
- auto value = param.get<double>();
- if (!value)
- return -EINVAL;
- *ptr++ = *value;
- }
-
- return 0;
-}
+using Matrix3x3 = Matrix<double, 3, 3>;
Ccm::Ccm(Controller *controller)
: CcmAlgorithm(controller), saturation_(1.0) {}
@@ -68,12 +41,10 @@ char const *Ccm::name() const
int Ccm::read(const libcamera::YamlObject &params)
{
- int ret;
-
if (params.contains("saturation")) {
- ret = config_.saturation.read(params["saturation"]);
- if (ret)
- return ret;
+ config_.saturation = params["saturation"].get<ipa::Pwl>(ipa::Pwl{});
+ if (config_.saturation.empty())
+ return -EINVAL;
}
for (auto &p : params["ccms"].asList()) {
@@ -83,9 +54,12 @@ int Ccm::read(const libcamera::YamlObject &params)
CtCcm ctCcm;
ctCcm.ct = *value;
- ret = ctCcm.ccm.read(p["ccm"]);
- if (ret)
- return ret;
+
+ auto ccm = p["ccm"].get<Matrix3x3>();
+ if (!ccm)
+ return -EINVAL;
+
+ ctCcm.ccm = *ccm;
if (!config_.ccms.empty() && ctCcm.ct <= config_.ccms.back().ct) {
LOG(RPiCcm, Error)
@@ -113,8 +87,10 @@ void Ccm::initialise()
{
}
+namespace {
+
template<typename T>
-static bool getLocked(Metadata *metadata, std::string const &tag, T &value)
+bool getLocked(Metadata *metadata, std::string const &tag, T &value)
{
T *ptr = metadata->getLocked<T>(tag);
if (ptr == nullptr)
@@ -123,7 +99,7 @@ static bool getLocked(Metadata *metadata, std::string const &tag, T &value)
return true;
}
-Matrix calculateCcm(std::vector<CtCcm> const &ccms, double ct)
+Matrix3x3 calculateCcm(std::vector<CtCcm> const &ccms, double ct)
{
if (ct <= ccms.front().ct)
return ccms.front().ccm;
@@ -139,16 +115,25 @@ Matrix calculateCcm(std::vector<CtCcm> const &ccms, double ct)
}
}
-Matrix applySaturation(Matrix const &ccm, double saturation)
+Matrix3x3 applySaturation(Matrix3x3 const &ccm, double saturation)
{
- Matrix RGB2Y(0.299, 0.587, 0.114, -0.169, -0.331, 0.500, 0.500, -0.419,
- -0.081);
- Matrix Y2RGB(1.000, 0.000, 1.402, 1.000, -0.345, -0.714, 1.000, 1.771,
- 0.000);
- Matrix S(1, 0, 0, 0, saturation, 0, 0, 0, saturation);
+ static const Matrix3x3 RGB2Y({ 0.299, 0.587, 0.114,
+ -0.169, -0.331, 0.500,
+ 0.500, -0.419, -0.081 });
+
+ static const Matrix3x3 Y2RGB({ 1.000, 0.000, 1.402,
+ 1.000, -0.345, -0.714,
+ 1.000, 1.771, 0.000 });
+
+ Matrix3x3 S({ 1, 0, 0,
+ 0, saturation, 0,
+ 0, 0, saturation });
+
return Y2RGB * S * RGB2Y * ccm;
}
+} /* namespace */
+
void Ccm::prepare(Metadata *imageMetadata)
{
bool awbOk = false, luxOk = false;
@@ -166,18 +151,18 @@ void Ccm::prepare(Metadata *imageMetadata)
LOG(RPiCcm, Warning) << "no colour temperature found";
if (!luxOk)
LOG(RPiCcm, Warning) << "no lux value found";
- Matrix ccm = calculateCcm(config_.ccms, awb.temperatureK);
+ Matrix3x3 ccm = calculateCcm(config_.ccms, awb.temperatureK);
double saturation = saturation_;
struct CcmStatus ccmStatus;
ccmStatus.saturation = saturation;
if (!config_.saturation.empty())
saturation *= config_.saturation.eval(
- config_.saturation.domain().clip(lux.lux));
+ config_.saturation.domain().clamp(lux.lux));
ccm = applySaturation(ccm, saturation);
for (int j = 0; j < 3; j++)
for (int i = 0; i < 3; i++)
ccmStatus.matrix[j * 3 + i] =
- std::max(-8.0, std::min(7.9999, ccm.m[j][i]));
+ std::max(-8.0, std::min(7.9999, ccm[j][i]));
LOG(RPiCcm, Debug)
<< "colour temperature " << awb.temperatureK << "K";
LOG(RPiCcm, Debug)
diff --git a/src/ipa/rpi/controller/rpi/ccm.h b/src/ipa/rpi/controller/rpi/ccm.h
index 286d0b33..c05dbb17 100644
--- a/src/ipa/rpi/controller/rpi/ccm.h
+++ b/src/ipa/rpi/controller/rpi/ccm.h
@@ -2,59 +2,29 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * ccm.h - CCM (colour correction matrix) control algorithm
+ * CCM (colour correction matrix) control algorithm
*/
#pragma once
#include <vector>
+#include "libcamera/internal/matrix.h"
+#include <libipa/pwl.h>
+
#include "../ccm_algorithm.h"
-#include "../pwl.h"
namespace RPiController {
/* Algorithm to calculate colour matrix. Should be placed after AWB. */
-struct Matrix {
- Matrix(double m0, double m1, double m2, double m3, double m4, double m5,
- double m6, double m7, double m8);
- Matrix();
- double m[3][3];
- int read(const libcamera::YamlObject &params);
-};
-static inline Matrix operator*(double d, Matrix const &m)
-{
- return Matrix(m.m[0][0] * d, m.m[0][1] * d, m.m[0][2] * d,
- m.m[1][0] * d, m.m[1][1] * d, m.m[1][2] * d,
- m.m[2][0] * d, m.m[2][1] * d, m.m[2][2] * d);
-}
-static inline Matrix operator*(Matrix const &m1, Matrix const &m2)
-{
- Matrix m;
- for (int i = 0; i < 3; i++)
- for (int j = 0; j < 3; j++)
- m.m[i][j] = m1.m[i][0] * m2.m[0][j] +
- m1.m[i][1] * m2.m[1][j] +
- m1.m[i][2] * m2.m[2][j];
- return m;
-}
-static inline Matrix operator+(Matrix const &m1, Matrix const &m2)
-{
- Matrix m;
- for (int i = 0; i < 3; i++)
- for (int j = 0; j < 3; j++)
- m.m[i][j] = m1.m[i][j] + m2.m[i][j];
- return m;
-}
-
struct CtCcm {
double ct;
- Matrix ccm;
+ libcamera::Matrix<double, 3, 3> ccm;
};
struct CcmConfig {
std::vector<CtCcm> ccms;
- Pwl saturation;
+ libcamera::ipa::Pwl saturation;
};
class Ccm : public CcmAlgorithm
diff --git a/src/ipa/rpi/controller/rpi/contrast.cpp b/src/ipa/rpi/controller/rpi/contrast.cpp
index 4e038a02..fe866a54 100644
--- a/src/ipa/rpi/controller/rpi/contrast.cpp
+++ b/src/ipa/rpi/controller/rpi/contrast.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * contrast.cpp - contrast (gamma) control algorithm
+ * contrast (gamma) control algorithm
*/
#include <stdint.h>
@@ -53,7 +53,9 @@ int Contrast::read(const libcamera::YamlObject &params)
config_.hiHistogram = params["hi_histogram"].get<double>(0.95);
config_.hiLevel = params["hi_level"].get<double>(0.95);
config_.hiMax = params["hi_max"].get<double>(2000);
- return config_.gammaCurve.read(params["gamma_curve"]);
+
+ config_.gammaCurve = params["gamma_curve"].get<ipa::Pwl>(ipa::Pwl{});
+ return config_.gammaCurve.empty() ? -EINVAL : 0;
}
void Contrast::setBrightness(double brightness)
@@ -92,10 +94,12 @@ void Contrast::prepare(Metadata *imageMetadata)
imageMetadata->set("contrast.status", status_);
}
-Pwl computeStretchCurve(Histogram const &histogram,
+namespace {
+
+ipa::Pwl computeStretchCurve(Histogram const &histogram,
ContrastConfig const &config)
{
- Pwl enhance;
+ ipa::Pwl enhance;
enhance.append(0, 0);
/*
* If the start of the histogram is rather empty, try to pull it down a
@@ -136,10 +140,10 @@ Pwl computeStretchCurve(Histogram const &histogram,
return enhance;
}
-Pwl applyManualContrast(Pwl const &gammaCurve, double brightness,
- double contrast)
+ipa::Pwl applyManualContrast(ipa::Pwl const &gammaCurve, double brightness,
+ double contrast)
{
- Pwl newGammaCurve;
+ ipa::Pwl newGammaCurve;
LOG(RPiContrast, Debug)
<< "Manual brightness " << brightness << " contrast " << contrast;
gammaCurve.map([&](double x, double y) {
@@ -151,6 +155,8 @@ Pwl applyManualContrast(Pwl const &gammaCurve, double brightness,
return newGammaCurve;
}
+} /* namespace */
+
void Contrast::process(StatisticsPtr &stats,
[[maybe_unused]] Metadata *imageMetadata)
{
@@ -160,7 +166,7 @@ void Contrast::process(StatisticsPtr &stats,
* ways: 1. Adjust the gamma curve so as to pull the start of the
* histogram down, and possibly push the end up.
*/
- Pwl gammaCurve = config_.gammaCurve;
+ ipa::Pwl gammaCurve = config_.gammaCurve;
if (ceEnable_) {
if (config_.loMax != 0 || config_.hiMax != 0)
gammaCurve = computeStretchCurve(histogram, config_).compose(gammaCurve);
diff --git a/src/ipa/rpi/controller/rpi/contrast.h b/src/ipa/rpi/controller/rpi/contrast.h
index 59aa70dc..c0f7db98 100644
--- a/src/ipa/rpi/controller/rpi/contrast.h
+++ b/src/ipa/rpi/controller/rpi/contrast.h
@@ -2,14 +2,15 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * contrast.h - contrast (gamma) control algorithm
+ * contrast (gamma) control algorithm
*/
#pragma once
#include <mutex>
+#include <libipa/pwl.h>
+
#include "../contrast_algorithm.h"
-#include "../pwl.h"
namespace RPiController {
@@ -26,7 +27,7 @@ struct ContrastConfig {
double hiHistogram;
double hiLevel;
double hiMax;
- Pwl gammaCurve;
+ libcamera::ipa::Pwl gammaCurve;
};
class Contrast : public ContrastAlgorithm
diff --git a/src/ipa/rpi/controller/rpi/denoise.cpp b/src/ipa/rpi/controller/rpi/denoise.cpp
index 154ee604..ba851658 100644
--- a/src/ipa/rpi/controller/rpi/denoise.cpp
+++ b/src/ipa/rpi/controller/rpi/denoise.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022 Raspberry Pi Ltd
*
- * Denoise.cpp - Denoise (spatial, colour, temporal) control algorithm
+ * Denoise (spatial, colour, temporal) control algorithm
*/
#include "denoise.h"
diff --git a/src/ipa/rpi/controller/rpi/dpc.cpp b/src/ipa/rpi/controller/rpi/dpc.cpp
index be3871df..8aac03f7 100644
--- a/src/ipa/rpi/controller/rpi/dpc.cpp
+++ b/src/ipa/rpi/controller/rpi/dpc.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * dpc.cpp - DPC (defective pixel correction) control algorithm
+ * DPC (defective pixel correction) control algorithm
*/
#include <libcamera/base/log.h>
diff --git a/src/ipa/rpi/controller/rpi/dpc.h b/src/ipa/rpi/controller/rpi/dpc.h
index 84a05604..9cefb06d 100644
--- a/src/ipa/rpi/controller/rpi/dpc.h
+++ b/src/ipa/rpi/controller/rpi/dpc.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * dpc.h - DPC (defective pixel correction) control algorithm
+ * DPC (defective pixel correction) control algorithm
*/
#pragma once
diff --git a/src/ipa/rpi/controller/rpi/focus.h b/src/ipa/rpi/controller/rpi/focus.h
index 8556039d..ee014be9 100644
--- a/src/ipa/rpi/controller/rpi/focus.h
+++ b/src/ipa/rpi/controller/rpi/focus.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * focus.h - focus algorithm
+ * focus algorithm
*/
#pragma once
diff --git a/src/ipa/rpi/controller/rpi/geq.cpp b/src/ipa/rpi/controller/rpi/geq.cpp
index 510870e9..40e7191b 100644
--- a/src/ipa/rpi/controller/rpi/geq.cpp
+++ b/src/ipa/rpi/controller/rpi/geq.cpp
@@ -2,14 +2,13 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * geq.cpp - GEQ (green equalisation) control algorithm
+ * GEQ (green equalisation) control algorithm
*/
#include <libcamera/base/log.h>
#include "../device_status.h"
#include "../lux_status.h"
-#include "../pwl.h"
#include "geq.h"
@@ -45,9 +44,9 @@ int Geq::read(const libcamera::YamlObject &params)
}
if (params.contains("strength")) {
- int ret = config_.strength.read(params["strength"]);
- if (ret)
- return ret;
+ config_.strength = params["strength"].get<ipa::Pwl>(ipa::Pwl{});
+ if (config_.strength.empty())
+ return -EINVAL;
}
return 0;
@@ -67,7 +66,7 @@ void Geq::prepare(Metadata *imageMetadata)
GeqStatus geqStatus = {};
double strength = config_.strength.empty()
? 1.0
- : config_.strength.eval(config_.strength.domain().clip(luxStatus.lux));
+ : config_.strength.eval(config_.strength.domain().clamp(luxStatus.lux));
strength *= deviceStatus.analogueGain;
double offset = config_.offset * strength;
double slope = config_.slope * strength;
diff --git a/src/ipa/rpi/controller/rpi/geq.h b/src/ipa/rpi/controller/rpi/geq.h
index ee3a52ff..e8b9f427 100644
--- a/src/ipa/rpi/controller/rpi/geq.h
+++ b/src/ipa/rpi/controller/rpi/geq.h
@@ -2,10 +2,12 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * geq.h - GEQ (green equalisation) control algorithm
+ * GEQ (green equalisation) control algorithm
*/
#pragma once
+#include <libipa/pwl.h>
+
#include "../algorithm.h"
#include "../geq_status.h"
@@ -16,7 +18,7 @@ namespace RPiController {
struct GeqConfig {
uint16_t offset;
double slope;
- Pwl strength; /* lux to strength factor */
+ libcamera::ipa::Pwl strength; /* lux to strength factor */
};
class Geq : public Algorithm
diff --git a/src/ipa/rpi/controller/rpi/hdr.cpp b/src/ipa/rpi/controller/rpi/hdr.cpp
index fb580548..f3da8291 100644
--- a/src/ipa/rpi/controller/rpi/hdr.cpp
+++ b/src/ipa/rpi/controller/rpi/hdr.cpp
@@ -2,11 +2,13 @@
/*
* Copyright (C) 2023 Raspberry Pi Ltd
*
- * hdr.cpp - HDR control algorithm
+ * HDR control algorithm
*/
#include "hdr.h"
+#include <cmath>
+
#include <libcamera/base/log.h>
#include "../agc_status.h"
@@ -39,25 +41,52 @@ void HdrConfig::read(const libcamera::YamlObject &params, const std::string &mod
channelMap[v.get<unsigned int>().value()] = k;
/* Lens shading related parameters. */
- if (params.contains("spatial_gain")) {
- spatialGain.read(params["spatial_gain"]);
- diffusion = params["diffusion"].get<unsigned int>(3);
- /* Clip to an arbitrary limit just to stop typos from killing the system! */
- const unsigned int MAX_DIFFUSION = 15;
- if (diffusion > MAX_DIFFUSION) {
- diffusion = MAX_DIFFUSION;
- LOG(RPiHdr, Warning) << "Diffusion value clipped to " << MAX_DIFFUSION;
- }
+ if (params.contains("spatial_gain_curve")) {
+ spatialGainCurve = params["spatial_gain_curve"].get<ipa::Pwl>(ipa::Pwl{});
+ } else if (params.contains("spatial_gain")) {
+ double spatialGain = params["spatial_gain"].get<double>(2.0);
+ spatialGainCurve.append(0.0, spatialGain);
+ spatialGainCurve.append(0.01, spatialGain);
+ spatialGainCurve.append(0.06, 1.0); /* maybe make this programmable? */
+ spatialGainCurve.append(1.0, 1.0);
+ }
+
+ diffusion = params["diffusion"].get<unsigned int>(3);
+ /* Clip to an arbitrary limit just to stop typos from killing the system! */
+ const unsigned int MAX_DIFFUSION = 15;
+ if (diffusion > MAX_DIFFUSION) {
+ diffusion = MAX_DIFFUSION;
+ LOG(RPiHdr, Warning) << "Diffusion value clipped to " << MAX_DIFFUSION;
}
/* Read any tonemap parameters. */
tonemapEnable = params["tonemap_enable"].get<int>(0);
- detailConstant = params["detail_constant"].get<uint16_t>(50);
- detailSlope = params["detail_slope"].get<double>(8.0);
+ detailConstant = params["detail_constant"].get<uint16_t>(0);
+ detailSlope = params["detail_slope"].get<double>(0.0);
iirStrength = params["iir_strength"].get<double>(8.0);
strength = params["strength"].get<double>(1.5);
if (tonemapEnable)
- tonemap.read(params["tonemap"]);
+ tonemap = params["tonemap"].get<ipa::Pwl>(ipa::Pwl{});
+ speed = params["speed"].get<double>(1.0);
+ if (params.contains("hi_quantile_targets")) {
+ hiQuantileTargets = params["hi_quantile_targets"].getList<double>().value();
+ if (hiQuantileTargets.empty() || hiQuantileTargets.size() % 2)
+ LOG(RPiHdr, Fatal) << "hi_quantile_targets much be even and non-empty";
+ } else
+ hiQuantileTargets = { 0.95, 0.65, 0.5, 0.28, 0.3, 0.25 };
+ hiQuantileMaxGain = params["hi_quantile_max_gain"].get<double>(1.6);
+ if (params.contains("quantile_targets")) {
+ quantileTargets = params["quantile_targets"].getList<double>().value();
+ if (quantileTargets.empty() || quantileTargets.size() % 2)
+ LOG(RPiHdr, Fatal) << "quantile_targets much be even and non-empty";
+ } else
+ quantileTargets = { 0.2, 0.03, 1.0, 0.15 };
+ powerMin = params["power_min"].get<double>(0.65);
+ powerMax = params["power_max"].get<double>(1.0);
+ if (params.contains("contrast_adjustments")) {
+ contrastAdjustments = params["contrast_adjustments"].getList<double>().value();
+ } else
+ contrastAdjustments = { 0.5, 0.75 };
/* Read any stitch parameters. */
stitchEnable = params["stitch_enable"].get<int>(0);
@@ -159,7 +188,7 @@ void Hdr::prepare(Metadata *imageMetadata)
}
HdrConfig &config = it->second;
- if (config.spatialGain.empty())
+ if (config.spatialGainCurve.empty())
return;
AlscStatus alscStatus{}; /* some compilers seem to require the braces */
@@ -183,7 +212,7 @@ bool Hdr::updateTonemap([[maybe_unused]] StatisticsPtr &stats, HdrConfig &config
/* When there's a change of HDR mode we start over with a new tonemap curve. */
if (delayedStatus_.mode != previousMode_) {
previousMode_ = delayedStatus_.mode;
- tonemap_ = Pwl();
+ tonemap_ = ipa::Pwl();
}
/* No tonemapping. No need to output a tonemap.status. */
@@ -205,10 +234,61 @@ bool Hdr::updateTonemap([[maybe_unused]] StatisticsPtr &stats, HdrConfig &config
return true;
/*
- * If we wanted to build or adjust tonemaps dynamically, this would be the place
- * to do it. But for now we seem to be getting by without.
+ * Create a tonemap dynamically. We have three ingredients.
+ *
+ * 1. We have a list of "hi quantiles" and "targets". We use these to judge if
+ * the image does seem to be reasonably saturated. If it isn't, we calculate
+ * a gain that we will feed as a linear factor into the tonemap generation.
+ * This prevents unsaturated images from beoming quite so "flat".
+ *
+ * 2. We have a list of quantile/target pairs for the bottom of the histogram.
+ * We use these to calculate how much gain we must apply to the bottom of the
+ * tonemap. We apply this gain as a power curve so as not to blow out the top
+ * end.
+ *
+ * 3. Finally, when we generate the tonemap, we have some contrast adjustments
+ * for the bottom because we know that power curves can start quite steeply and
+ * cause a washed-out look.
*/
+ /* Compute the linear gain from the headroom for saturation at the top. */
+ double gain = 10; /* arbitrary, but hiQuantileMaxGain will clamp it later */
+ for (unsigned int i = 0; i < config.hiQuantileTargets.size(); i += 2) {
+ double quantile = config.hiQuantileTargets[i];
+ double target = config.hiQuantileTargets[i + 1];
+ double value = stats->yHist.interQuantileMean(quantile, 1.0) / 1024.0;
+ double newGain = target / (value + 0.01);
+ gain = std::min(gain, newGain);
+ }
+ gain = std::clamp(gain, 1.0, config.hiQuantileMaxGain);
+
+ /* Compute the power curve from the amount of gain needed at the bottom. */
+ double min_power = 2; /* arbitrary, but config.powerMax will clamp it later */
+ for (unsigned int i = 0; i < config.quantileTargets.size(); i += 2) {
+ double quantile = config.quantileTargets[i];
+ double target = config.quantileTargets[i + 1];
+ double value = stats->yHist.interQuantileMean(0, quantile) / 1024.0;
+ value = std::min(value * gain, 1.0);
+ double power = log(target + 1e-6) / log(value + 1e-6);
+ min_power = std::min(min_power, power);
+ }
+ double power = std::clamp(min_power, config.powerMin, config.powerMax);
+
+ /* Generate the tonemap, including the contrast adjustment factors. */
+ libcamera::ipa::Pwl tonemap;
+ tonemap.append(0, 0);
+ for (unsigned int i = 0; i <= 6; i++) {
+ double x = 1 << (i + 9); /* x loops from 512 to 32768 inclusive */
+ double y = pow(std::min(x * gain, 65535.0) / 65536.0, power) * 65536;
+ if (i < config.contrastAdjustments.size())
+ y *= config.contrastAdjustments[i];
+ if (!tonemap_.empty())
+ y = y * config.speed + tonemap_.eval(x) * (1 - config.speed);
+ tonemap.append(x, y);
+ }
+ tonemap.append(65535, 65535);
+ tonemap_ = tonemap;
+
return true;
}
@@ -255,7 +335,7 @@ static void averageGains(std::vector<double> &src, std::vector<double> &dst, con
void Hdr::updateGains(StatisticsPtr &stats, HdrConfig &config)
{
- if (config.spatialGain.empty())
+ if (config.spatialGainCurve.empty())
return;
/* When alternating exposures, only compute these gains for the short frame. */
@@ -270,7 +350,7 @@ void Hdr::updateGains(StatisticsPtr &stats, HdrConfig &config)
double g = region.val.gSum / counted;
double b = region.val.bSum / counted;
double brightness = std::max({ r, g, b }) / 65535;
- gains_[0][i] = config.spatialGain.eval(brightness);
+ gains_[0][i] = config.spatialGainCurve.eval(brightness);
}
/* Ping-pong between the two gains_ buffers. */
diff --git a/src/ipa/rpi/controller/rpi/hdr.h b/src/ipa/rpi/controller/rpi/hdr.h
index 980aa3d1..5c2f3988 100644
--- a/src/ipa/rpi/controller/rpi/hdr.h
+++ b/src/ipa/rpi/controller/rpi/hdr.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2023, Raspberry Pi Ltd
*
- * hdr.h - HDR control algorithm
+ * HDR control algorithm
*/
#pragma once
@@ -12,9 +12,10 @@
#include <libcamera/geometry.h>
+#include <libipa/pwl.h>
+
#include "../hdr_algorithm.h"
#include "../hdr_status.h"
-#include "../pwl.h"
/* This is our implementation of an HDR algorithm. */
@@ -26,7 +27,7 @@ struct HdrConfig {
std::map<unsigned int, std::string> channelMap;
/* Lens shading related parameters. */
- Pwl spatialGain; /* Brightness to gain curve for different image regions. */
+ libcamera::ipa::Pwl spatialGainCurve; /* Brightness to gain curve for different image regions. */
unsigned int diffusion; /* How much to diffuse the gain spatially. */
/* Tonemap related parameters. */
@@ -35,7 +36,15 @@ struct HdrConfig {
double detailSlope;
double iirStrength;
double strength;
- Pwl tonemap;
+ libcamera::ipa::Pwl tonemap;
+ /* These relate to adaptive tonemap calculation. */
+ double speed;
+ std::vector<double> hiQuantileTargets; /* quantiles to check for unsaturated images */
+ double hiQuantileMaxGain; /* the max gain we'll apply when unsaturated */
+ std::vector<double> quantileTargets; /* target values for histogram quantiles */
+ double powerMin; /* minimum tonemap power */
+ double powerMax; /* maximum tonemap power */
+ std::vector<double> contrastAdjustments; /* any contrast adjustment factors */
/* Stitch related parameters. */
bool stitchEnable;
@@ -67,7 +76,7 @@ private:
HdrStatus status_; /* track the current HDR mode and channel */
HdrStatus delayedStatus_; /* track the delayed HDR mode and channel */
std::string previousMode_;
- Pwl tonemap_;
+ libcamera::ipa::Pwl tonemap_;
libcamera::Size regions_; /* stats regions */
unsigned int numRegions_; /* total number of stats regions */
std::vector<double> gains_[2];
diff --git a/src/ipa/rpi/controller/rpi/lux.cpp b/src/ipa/rpi/controller/rpi/lux.cpp
index 06625f3a..27b89a8f 100644
--- a/src/ipa/rpi/controller/rpi/lux.cpp
+++ b/src/ipa/rpi/controller/rpi/lux.cpp
@@ -2,9 +2,8 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * lux.cpp - Lux control algorithm
+ * Lux control algorithm
*/
-#include <math.h>
#include <libcamera/base/log.h>
@@ -41,7 +40,7 @@ int Lux::read(const libcamera::YamlObject &params)
auto value = params["reference_shutter_speed"].get<double>();
if (!value)
return -EINVAL;
- referenceShutterSpeed_ = *value * 1.0us;
+ referenceExposureTime_ = *value * 1.0us;
value = params["reference_gain"].get<double>();
if (!value)
@@ -83,11 +82,11 @@ void Lux::process(StatisticsPtr &stats, Metadata *imageMetadata)
double currentAperture = deviceStatus.aperture.value_or(currentAperture_);
double currentY = stats->yHist.interQuantileMean(0, 1);
double gainRatio = referenceGain_ / currentGain;
- double shutterSpeedRatio =
- referenceShutterSpeed_ / deviceStatus.shutterSpeed;
+ double exposureTimeRatio =
+ referenceExposureTime_ / deviceStatus.exposureTime;
double apertureRatio = referenceAperture_ / currentAperture;
double yRatio = currentY * (65536 / stats->yHist.bins()) / referenceY_;
- double estimatedLux = shutterSpeedRatio * gainRatio *
+ double estimatedLux = exposureTimeRatio * gainRatio *
apertureRatio * apertureRatio *
yRatio * referenceLux_;
LuxStatus status;
diff --git a/src/ipa/rpi/controller/rpi/lux.h b/src/ipa/rpi/controller/rpi/lux.h
index 89411a54..da007fe9 100644
--- a/src/ipa/rpi/controller/rpi/lux.h
+++ b/src/ipa/rpi/controller/rpi/lux.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * lux.h - Lux control algorithm
+ * Lux control algorithm
*/
#pragma once
@@ -32,7 +32,7 @@ private:
* These values define the conditions of the reference image, against
* which we compare the new image.
*/
- libcamera::utils::Duration referenceShutterSpeed_;
+ libcamera::utils::Duration referenceExposureTime_;
double referenceGain_;
double referenceAperture_; /* units of 1/f */
double referenceY_; /* out of 65536 */
diff --git a/src/ipa/rpi/controller/rpi/noise.cpp b/src/ipa/rpi/controller/rpi/noise.cpp
index bcd8b9ed..145175fb 100644
--- a/src/ipa/rpi/controller/rpi/noise.cpp
+++ b/src/ipa/rpi/controller/rpi/noise.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * noise.cpp - Noise control algorithm
+ * Noise control algorithm
*/
-#include <math.h>
+#include <cmath>
#include <libcamera/base/log.h>
@@ -69,7 +69,7 @@ void Noise::prepare(Metadata *imageMetadata)
* make some adjustments based on the camera mode (such as
* binning), if we knew how to discover it...
*/
- double factor = sqrt(deviceStatus.analogueGain) / modeFactor_;
+ double factor = std::sqrt(deviceStatus.analogueGain) / modeFactor_;
struct NoiseStatus status;
status.noiseConstant = referenceConstant_ * factor;
status.noiseSlope = referenceSlope_ * factor;
diff --git a/src/ipa/rpi/controller/rpi/noise.h b/src/ipa/rpi/controller/rpi/noise.h
index 74c31e64..6deae1f0 100644
--- a/src/ipa/rpi/controller/rpi/noise.h
+++ b/src/ipa/rpi/controller/rpi/noise.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * noise.h - Noise control algorithm
+ * Noise control algorithm
*/
#pragma once
diff --git a/src/ipa/rpi/controller/rpi/saturation.cpp b/src/ipa/rpi/controller/rpi/saturation.cpp
index 813540e5..b83c5887 100644
--- a/src/ipa/rpi/controller/rpi/saturation.cpp
+++ b/src/ipa/rpi/controller/rpi/saturation.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022 Raspberry Pi Ltd
*
- * saturation.cpp - Saturation control algorithm
+ * Saturation control algorithm
*/
#include "saturation.h"
diff --git a/src/ipa/rpi/controller/rpi/sdn.cpp b/src/ipa/rpi/controller/rpi/sdn.cpp
index 2f777dd7..619178a8 100644
--- a/src/ipa/rpi/controller/rpi/sdn.cpp
+++ b/src/ipa/rpi/controller/rpi/sdn.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019-2021, Raspberry Pi Ltd
*
- * sdn.cpp - SDN (spatial denoise) control algorithm
+ * SDN (spatial denoise) control algorithm
*/
#include <libcamera/base/log.h>
diff --git a/src/ipa/rpi/controller/rpi/sdn.h b/src/ipa/rpi/controller/rpi/sdn.h
index 9dd73c38..cb226de8 100644
--- a/src/ipa/rpi/controller/rpi/sdn.h
+++ b/src/ipa/rpi/controller/rpi/sdn.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * sdn.h - SDN (spatial denoise) control algorithm
+ * SDN (spatial denoise) control algorithm
*/
#pragma once
diff --git a/src/ipa/rpi/controller/rpi/sharpen.cpp b/src/ipa/rpi/controller/rpi/sharpen.cpp
index 4f6f020a..1d143ff5 100644
--- a/src/ipa/rpi/controller/rpi/sharpen.cpp
+++ b/src/ipa/rpi/controller/rpi/sharpen.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * sharpen.cpp - sharpening control algorithm
+ * sharpening control algorithm
*/
-#include <math.h>
+#include <cmath>
#include <libcamera/base/log.h>
@@ -68,7 +68,7 @@ void Sharpen::prepare(Metadata *imageMetadata)
* we adjust the limit and threshold less aggressively. Using a sqrt
* function is an arbitrary but gentle way of accomplishing this.
*/
- double userStrengthSqrt = sqrt(userStrength_);
+ double userStrengthSqrt = std::sqrt(userStrength_);
struct SharpenStatus status;
/*
* Binned modes seem to need the sharpening toned down with this
diff --git a/src/ipa/rpi/controller/rpi/sharpen.h b/src/ipa/rpi/controller/rpi/sharpen.h
index 8bb7631e..96ccd609 100644
--- a/src/ipa/rpi/controller/rpi/sharpen.h
+++ b/src/ipa/rpi/controller/rpi/sharpen.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * sharpen.h - sharpening control algorithm
+ * sharpening control algorithm
*/
#pragma once
diff --git a/src/ipa/rpi/controller/rpi/tonemap.cpp b/src/ipa/rpi/controller/rpi/tonemap.cpp
index 5f8b2bf2..3422adfe 100644
--- a/src/ipa/rpi/controller/rpi/tonemap.cpp
+++ b/src/ipa/rpi/controller/rpi/tonemap.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022 Raspberry Pi Ltd
*
- * tonemap.cpp - Tonemap control algorithm
+ * Tonemap control algorithm
*/
#include "tonemap.h"
@@ -33,7 +33,7 @@ int Tonemap::read(const libcamera::YamlObject &params)
config_.detailSlope = params["detail_slope"].get<double>(0.1);
config_.iirStrength = params["iir_strength"].get<double>(1.0);
config_.strength = params["strength"].get<double>(1.0);
- config_.tonemap.read(params["tone_curve"]);
+ config_.tonemap = params["tone_curve"].get<ipa::Pwl>(ipa::Pwl{});
return 0;
}
diff --git a/src/ipa/rpi/controller/rpi/tonemap.h b/src/ipa/rpi/controller/rpi/tonemap.h
index f25aa47f..ba0cf5c4 100644
--- a/src/ipa/rpi/controller/rpi/tonemap.h
+++ b/src/ipa/rpi/controller/rpi/tonemap.h
@@ -6,8 +6,9 @@
*/
#pragma once
+#include <libipa/pwl.h>
+
#include "algorithm.h"
-#include "pwl.h"
namespace RPiController {
@@ -16,7 +17,7 @@ struct TonemapConfig {
double detailSlope;
double iirStrength;
double strength;
- Pwl tonemap;
+ libcamera::ipa::Pwl tonemap;
};
class Tonemap : public Algorithm
diff --git a/src/ipa/rpi/controller/saturation_status.h b/src/ipa/rpi/controller/saturation_status.h
index 337b66a3..c7fadc99 100644
--- a/src/ipa/rpi/controller/saturation_status.h
+++ b/src/ipa/rpi/controller/saturation_status.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022 Raspberry Pi Ltd
*
- * saturation_status.h - Saturation control algorithm status
+ * Saturation control algorithm status
*/
#pragma once
diff --git a/src/ipa/rpi/controller/sharpen_algorithm.h b/src/ipa/rpi/controller/sharpen_algorithm.h
index 3be21c32..abd82cb2 100644
--- a/src/ipa/rpi/controller/sharpen_algorithm.h
+++ b/src/ipa/rpi/controller/sharpen_algorithm.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * sharpen_algorithm.h - sharpness control algorithm interface
+ * sharpness control algorithm interface
*/
#pragma once
diff --git a/src/ipa/rpi/controller/sharpen_status.h b/src/ipa/rpi/controller/sharpen_status.h
index 106166db..74910199 100644
--- a/src/ipa/rpi/controller/sharpen_status.h
+++ b/src/ipa/rpi/controller/sharpen_status.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Raspberry Pi Ltd
*
- * sharpen_status.h - Sharpen control algorithm status
+ * Sharpen control algorithm status
*/
#pragma once
diff --git a/src/ipa/rpi/controller/statistics.h b/src/ipa/rpi/controller/statistics.h
index 015d4efc..cbd81161 100644
--- a/src/ipa/rpi/controller/statistics.h
+++ b/src/ipa/rpi/controller/statistics.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Raspberry Pi Ltd
*
- * statistics.h - Raspberry Pi generic statistics structure
+ * Raspberry Pi generic statistics structure
*/
#pragma once
diff --git a/src/ipa/rpi/controller/stitch_status.h b/src/ipa/rpi/controller/stitch_status.h
index b17800ed..7812f3e3 100644
--- a/src/ipa/rpi/controller/stitch_status.h
+++ b/src/ipa/rpi/controller/stitch_status.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2023 Raspberry Pi Ltd
*
- * stitch_status.h - stitch control algorithm status
+ * stitch control algorithm status
*/
#pragma once
diff --git a/src/ipa/rpi/controller/tonemap_status.h b/src/ipa/rpi/controller/tonemap_status.h
index 0e639946..0364ff66 100644
--- a/src/ipa/rpi/controller/tonemap_status.h
+++ b/src/ipa/rpi/controller/tonemap_status.h
@@ -2,16 +2,16 @@
/*
* Copyright (C) 2022 Raspberry Pi Ltd
*
- * hdr.h - Tonemap control algorithm status
+ * Tonemap control algorithm status
*/
#pragma once
-#include "pwl.h"
+#include <libipa/pwl.h>
struct TonemapStatus {
uint16_t detailConstant;
double detailSlope;
double iirStrength;
double strength;
- RPiController::Pwl tonemap;
+ libcamera::ipa::Pwl tonemap;
};
diff --git a/src/ipa/rpi/vc4/data/imx219.json b/src/ipa/rpi/vc4/data/imx219.json
index 54defc0b..a020b12f 100644
--- a/src/ipa/rpi/vc4/data/imx219.json
+++ b/src/ipa/rpi/vc4/data/imx219.json
@@ -131,282 +131,308 @@
{
"rpi.agc":
{
- "channels":
- [
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 10000, 30000, 60000, 66666 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 33333 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "long":
- {
- "shutter": [ 100, 10000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ],
- "shadows": [
- {
- "bound": "LOWER",
- "q_lo": 0.0,
- "q_hi": 0.5,
- "y_target":
- [
- 0, 0.17,
- 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ]
- },
- {
- "base_ev": 0.125,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 10000, 30000, 60000, 66666 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 33333 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "long":
- {
- "shutter": [ 100, 10000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ],
- "shadows": [
- {
- "bound": "LOWER",
- "q_lo": 0.0,
- "q_hi": 0.5,
- "y_target":
- [
- 0, 0.17,
- 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ]
- },
- {
- "base_ev": 1.5,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 10000, 30000, 60000, 66666 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 33333 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "long":
- {
- "shutter": [ 100, 10000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ],
- "shadows": [
- {
- "bound": "LOWER",
- "q_lo": 0.0,
- "q_hi": 0.5,
- "y_target":
- [
- 0, 0.17,
- 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ]
- }
- ]
- }
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ ]
+ }
},
{
"rpi.alsc":
@@ -651,20 +677,19 @@
{
"rpi.sharpen": { }
},
- {
- "rpi.hdr":
- {
- "MultiExposure":
- {
- "cadence": [ 1, 2 ],
- "channel_map": { "short": 1, "long": 2 }
- },
- "SingleExposure":
- {
- "cadence": [ 1 ],
- "channel_map": { "short": 1 }
- }
- }
- }
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
]
}
diff --git a/src/ipa/rpi/vc4/data/imx219_noir.json b/src/ipa/rpi/vc4/data/imx219_noir.json
index e823a90d..d8bc9639 100644
--- a/src/ipa/rpi/vc4/data/imx219_noir.json
+++ b/src/ipa/rpi/vc4/data/imx219_noir.json
@@ -47,282 +47,308 @@
{
"rpi.agc":
{
- "channels":
- [
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 10000, 30000, 60000, 66666 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 33333 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "long":
- {
- "shutter": [ 100, 10000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ],
- "shadows": [
- {
- "bound": "LOWER",
- "q_lo": 0.0,
- "q_hi": 0.5,
- "y_target":
- [
- 0, 0.17,
- 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ]
- },
- {
- "base_ev": 0.125,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 10000, 30000, 60000, 66666 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 33333 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "long":
- {
- "shutter": [ 100, 10000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ],
- "shadows": [
- {
- "bound": "LOWER",
- "q_lo": 0.0,
- "q_hi": 0.5,
- "y_target":
- [
- 0, 0.17,
- 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ]
- },
- {
- "base_ev": 1.5,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 10000, 30000, 60000, 66666 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 33333 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "long":
- {
- "shutter": [ 100, 10000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ],
- "shadows": [
- {
- "bound": "LOWER",
- "q_lo": 0.0,
- "q_hi": 0.5,
- "y_target":
- [
- 0, 0.17,
- 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ]
- }
- ]
- }
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ ]
+ }
},
{
"rpi.alsc":
@@ -585,20 +611,19 @@
{
"rpi.sharpen": { }
},
- {
- "rpi.hdr":
- {
- "MultiExposure":
- {
- "cadence": [ 1, 2 ],
- "channel_map": { "short": 1, "long": 2 }
- },
- "SingleExposure":
- {
- "cadence": [ 1 ],
- "channel_map": { "short": 1 }
- }
- }
- }
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
]
}
diff --git a/src/ipa/rpi/vc4/data/imx283.json b/src/ipa/rpi/vc4/data/imx283.json
new file mode 100644
index 00000000..bfacecc8
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx283.json
@@ -0,0 +1,313 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3200
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 2461,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 1148,
+ "reference_Y": 13314
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.204
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 199,
+ "slope": 0.01947
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2213.0, 0.9607, 0.2593,
+ 5313.0, 0.4822, 0.5909,
+ 6237.0, 0.4739, 0.6308
+ ],
+ "sensitivity_r": 1.0,
+ "sensitivity_b": 1.0,
+ "transverse_pos": 0.0144,
+ "transverse_neg": 0.01
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.7
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2213,
+ "ccm":
+ [
+ 1.91264, -0.27609, -0.63655,
+ -0.65708, 2.11718, -0.46009,
+ 0.03629, -1.38441, 2.34811
+ ]
+ },
+ {
+ "ct": 2255,
+ "ccm":
+ [
+ 1.90369, -0.29309, -0.61059,
+ -0.64693, 2.08169, -0.43476,
+ 0.04086, -1.29999, 2.25914
+ ]
+ },
+ {
+ "ct": 2259,
+ "ccm":
+ [
+ 1.92762, -0.35134, -0.57628,
+ -0.63523, 2.08481, -0.44958,
+ 0.06754, -1.32953, 2.26199
+ ]
+ },
+ {
+ "ct": 5313,
+ "ccm":
+ [
+ 1.75924, -0.54053, -0.21871,
+ -0.38159, 1.88671, -0.50511,
+ -0.00747, -0.53492, 1.54239
+ ]
+ },
+ {
+ "ct": 6237,
+ "ccm":
+ [
+ 2.19299, -0.74764, -0.44536,
+ -0.51678, 2.27651, -0.75972,
+ -0.06498, -0.74269, 1.80767
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+}
diff --git a/src/ipa/rpi/vc4/data/imx290.json b/src/ipa/rpi/vc4/data/imx290.json
index 8a7cadba..8f41bf51 100644
--- a/src/ipa/rpi/vc4/data/imx290.json
+++ b/src/ipa/rpi/vc4/data/imx290.json
@@ -52,15 +52,24 @@
{
"matrix":
{
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
},
"centre-weighted":
{
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
},
"spot":
{
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
}
},
"exposure_modes":
diff --git a/src/ipa/rpi/vc4/data/imx296.json b/src/ipa/rpi/vc4/data/imx296.json
index 7621f759..8f24ce5b 100644
--- a/src/ipa/rpi/vc4/data/imx296.json
+++ b/src/ipa/rpi/vc4/data/imx296.json
@@ -135,15 +135,24 @@
{
"centre-weighted":
{
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
},
"spot":
{
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
},
"matrix":
{
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
}
},
"exposure_modes":
@@ -431,4 +440,4 @@
}
}
]
-}
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx296_mono.json b/src/ipa/rpi/vc4/data/imx296_mono.json
index d4140c81..fe331569 100644
--- a/src/ipa/rpi/vc4/data/imx296_mono.json
+++ b/src/ipa/rpi/vc4/data/imx296_mono.json
@@ -38,15 +38,24 @@
{
"centre-weighted":
{
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
},
"spot":
{
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
},
"matrix":
{
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
}
},
"exposure_modes":
@@ -228,4 +237,4 @@
}
}
]
-}
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx327.json b/src/ipa/rpi/vc4/data/imx327.json
new file mode 100644
index 00000000..40a56842
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx327.json
@@ -0,0 +1,215 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "description": "This is an interim tuning only. Please consider doing a more formal tuning for your application.",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3840
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 6813,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 890,
+ "reference_Y": 12900
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.67
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 187,
+ "slope": 0.00842
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "bayes": 0
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "speed": 0.2,
+ "metering_modes":
+ {
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ },
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 10, 30000, 60000 ],
+ "gain": [ 1.0, 2.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 10, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.16,
+ 10000, 0.16
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.7,
+ "luminance_lut":
+ [
+ 2.844, 2.349, 2.018, 1.775, 1.599, 1.466, 1.371, 1.321, 1.306, 1.316, 1.357, 1.439, 1.552, 1.705, 1.915, 2.221,
+ 2.576, 2.151, 1.851, 1.639, 1.478, 1.358, 1.272, 1.231, 1.218, 1.226, 1.262, 1.335, 1.438, 1.571, 1.766, 2.067,
+ 2.381, 2.005, 1.739, 1.545, 1.389, 1.278, 1.204, 1.166, 1.153, 1.161, 1.194, 1.263, 1.356, 1.489, 1.671, 1.943,
+ 2.242, 1.899, 1.658, 1.481, 1.329, 1.225, 1.156, 1.113, 1.096, 1.107, 1.143, 1.201, 1.289, 1.423, 1.607, 1.861,
+ 2.152, 1.831, 1.602, 1.436, 1.291, 1.193, 1.121, 1.069, 1.047, 1.062, 1.107, 1.166, 1.249, 1.384, 1.562, 1.801,
+ 2.104, 1.795, 1.572, 1.407, 1.269, 1.174, 1.099, 1.041, 1.008, 1.029, 1.083, 1.146, 1.232, 1.364, 1.547, 1.766,
+ 2.104, 1.796, 1.572, 1.403, 1.264, 1.171, 1.097, 1.036, 1.001, 1.025, 1.077, 1.142, 1.231, 1.363, 1.549, 1.766,
+ 2.148, 1.827, 1.594, 1.413, 1.276, 1.184, 1.114, 1.062, 1.033, 1.049, 1.092, 1.153, 1.242, 1.383, 1.577, 1.795,
+ 2.211, 1.881, 1.636, 1.455, 1.309, 1.214, 1.149, 1.104, 1.081, 1.089, 1.125, 1.184, 1.273, 1.423, 1.622, 1.846,
+ 2.319, 1.958, 1.698, 1.516, 1.362, 1.262, 1.203, 1.156, 1.137, 1.142, 1.171, 1.229, 1.331, 1.484, 1.682, 1.933,
+ 2.459, 2.072, 1.789, 1.594, 1.441, 1.331, 1.261, 1.219, 1.199, 1.205, 1.232, 1.301, 1.414, 1.571, 1.773, 2.052,
+ 2.645, 2.206, 1.928, 1.728, 1.559, 1.451, 1.352, 1.301, 1.282, 1.289, 1.319, 1.395, 1.519, 1.685, 1.904, 2.227
+ ],
+ "sigma": 0.005,
+ "sigma_Cb": 0.005
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 3900,
+ "ccm":
+ [
+ 1.54659, -0.17707, -0.36953,
+ -0.51471, 1.72733, -0.21262,
+ 0.06667, -0.92279, 1.85612
+ ]
+ }
+ ]
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx378.json b/src/ipa/rpi/vc4/data/imx378.json
index f7b68011..363b47e1 100644
--- a/src/ipa/rpi/vc4/data/imx378.json
+++ b/src/ipa/rpi/vc4/data/imx378.json
@@ -133,15 +133,24 @@
{
"centre-weighted":
{
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
},
"spot":
{
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
},
"matrix":
{
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
}
},
"exposure_modes":
diff --git a/src/ipa/rpi/vc4/data/imx415.json b/src/ipa/rpi/vc4/data/imx415.json
new file mode 100755
index 00000000..6ed16b17
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx415.json
@@ -0,0 +1,413 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3840
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 19230,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 1198,
+ "reference_Y": 14876
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 17,
+ "reference_slope": 3.439
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 193,
+ "slope": 0.00902
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2698.0, 0.7681, 0.2026,
+ 2930.0, 0.7515, 0.2116,
+ 3643.0, 0.6355, 0.2858,
+ 4605.0, 0.4992, 0.4041,
+ 5658.0, 0.4498, 0.4574
+ ],
+ "sensitivity_r": 1.0,
+ "sensitivity_b": 1.0,
+ "transverse_pos": 0.0112,
+ "transverse_neg": 0.01424
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.8,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.025, 1.016, 1.013, 1.011, 1.008, 1.005, 1.003, 1.001, 1.003, 1.005, 1.008, 1.011, 1.014, 1.019, 1.027, 1.035,
+ 1.025, 1.017, 1.013, 1.011, 1.008, 1.005, 1.003, 1.003, 1.004, 1.005, 1.009, 1.012, 1.017, 1.023, 1.029, 1.035,
+ 1.022, 1.017, 1.013, 1.009, 1.007, 1.005, 1.003, 1.003, 1.004, 1.006, 1.009, 1.012, 1.017, 1.023, 1.029, 1.035,
+ 1.019, 1.015, 1.011, 1.007, 1.005, 1.003, 1.001, 1.001, 1.003, 1.004, 1.007, 1.009, 1.015, 1.022, 1.028, 1.035,
+ 1.018, 1.014, 1.009, 1.006, 1.004, 1.002, 1.001, 1.001, 1.001, 1.003, 1.006, 1.009, 1.015, 1.021, 1.028, 1.035,
+ 1.018, 1.013, 1.011, 1.006, 1.003, 1.002, 1.001, 1.001, 1.001, 1.003, 1.006, 1.009, 1.015, 1.022, 1.028, 1.036,
+ 1.018, 1.014, 1.011, 1.007, 1.004, 1.002, 1.001, 1.001, 1.001, 1.004, 1.007, 1.009, 1.015, 1.023, 1.029, 1.036,
+ 1.019, 1.014, 1.012, 1.008, 1.005, 1.003, 1.002, 1.001, 1.003, 1.005, 1.008, 1.012, 1.016, 1.024, 1.031, 1.037,
+ 1.021, 1.016, 1.013, 1.009, 1.008, 1.005, 1.003, 1.003, 1.005, 1.008, 1.011, 1.014, 1.019, 1.026, 1.033, 1.039,
+ 1.025, 1.021, 1.016, 1.013, 1.009, 1.008, 1.006, 1.006, 1.008, 1.011, 1.014, 1.019, 1.024, 1.031, 1.038, 1.046,
+ 1.029, 1.025, 1.021, 1.018, 1.014, 1.013, 1.011, 1.011, 1.012, 1.015, 1.019, 1.023, 1.028, 1.035, 1.046, 1.051,
+ 1.032, 1.029, 1.023, 1.021, 1.018, 1.015, 1.014, 1.014, 1.015, 1.018, 1.022, 1.027, 1.033, 1.041, 1.051, 1.054
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.025, 1.011, 1.009, 1.005, 1.004, 1.003, 1.001, 1.001, 1.002, 1.006, 1.009, 1.012, 1.016, 1.021, 1.031, 1.041,
+ 1.025, 1.014, 1.009, 1.007, 1.005, 1.004, 1.003, 1.003, 1.004, 1.007, 1.009, 1.013, 1.021, 1.028, 1.037, 1.041,
+ 1.023, 1.014, 1.009, 1.007, 1.005, 1.004, 1.003, 1.003, 1.005, 1.007, 1.011, 1.014, 1.021, 1.028, 1.037, 1.048,
+ 1.022, 1.012, 1.007, 1.005, 1.002, 1.001, 1.001, 1.001, 1.003, 1.005, 1.009, 1.014, 1.019, 1.028, 1.039, 1.048,
+ 1.022, 1.011, 1.006, 1.003, 1.001, 1.001, 1.001, 1.001, 1.002, 1.005, 1.009, 1.014, 1.021, 1.029, 1.039, 1.051,
+ 1.022, 1.012, 1.007, 1.003, 1.002, 1.001, 1.001, 1.001, 1.002, 1.005, 1.009, 1.015, 1.021, 1.031, 1.041, 1.053,
+ 1.023, 1.013, 1.009, 1.005, 1.003, 1.003, 1.001, 1.002, 1.004, 1.006, 1.011, 1.015, 1.022, 1.031, 1.042, 1.056,
+ 1.024, 1.015, 1.012, 1.008, 1.005, 1.004, 1.004, 1.004, 1.006, 1.009, 1.013, 1.018, 1.024, 1.034, 1.045, 1.057,
+ 1.027, 1.017, 1.015, 1.012, 1.009, 1.007, 1.007, 1.008, 1.009, 1.013, 1.018, 1.023, 1.029, 1.038, 1.051, 1.061,
+ 1.029, 1.023, 1.017, 1.015, 1.014, 1.012, 1.011, 1.011, 1.014, 1.018, 1.024, 1.029, 1.036, 1.044, 1.056, 1.066,
+ 1.034, 1.028, 1.023, 1.022, 1.019, 1.019, 1.018, 1.018, 1.021, 1.025, 1.031, 1.035, 1.042, 1.053, 1.066, 1.074,
+ 1.041, 1.034, 1.027, 1.025, 1.025, 1.023, 1.023, 1.023, 1.025, 1.031, 1.035, 1.041, 1.049, 1.059, 1.074, 1.079
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.001, 1.001, 1.007, 1.015, 1.027, 1.034, 1.038, 1.041, 1.042, 1.043, 1.043, 1.043, 1.041, 1.039, 1.049, 1.054,
+ 1.011, 1.011, 1.013, 1.023, 1.032, 1.039, 1.044, 1.047, 1.052, 1.056, 1.059, 1.059, 1.055, 1.051, 1.054, 1.056,
+ 1.015, 1.015, 1.019, 1.032, 1.039, 1.044, 1.047, 1.052, 1.055, 1.059, 1.061, 1.066, 1.063, 1.058, 1.061, 1.064,
+ 1.016, 1.017, 1.023, 1.032, 1.041, 1.045, 1.048, 1.053, 1.056, 1.061, 1.066, 1.069, 1.067, 1.064, 1.065, 1.068,
+ 1.018, 1.019, 1.025, 1.033, 1.042, 1.045, 1.049, 1.054, 1.058, 1.063, 1.071, 1.072, 1.071, 1.068, 1.069, 1.071,
+ 1.023, 1.024, 1.029, 1.035, 1.043, 1.048, 1.052, 1.057, 1.061, 1.065, 1.074, 1.075, 1.075, 1.072, 1.072, 1.075,
+ 1.027, 1.028, 1.031, 1.038, 1.045, 1.051, 1.054, 1.059, 1.064, 1.068, 1.075, 1.079, 1.078, 1.075, 1.076, 1.081,
+ 1.029, 1.031, 1.033, 1.044, 1.048, 1.054, 1.059, 1.064, 1.067, 1.073, 1.079, 1.082, 1.082, 1.079, 1.081, 1.085,
+ 1.033, 1.033, 1.035, 1.047, 1.053, 1.058, 1.064, 1.067, 1.073, 1.079, 1.084, 1.086, 1.086, 1.084, 1.089, 1.091,
+ 1.037, 1.037, 1.038, 1.049, 1.057, 1.062, 1.068, 1.073, 1.079, 1.084, 1.089, 1.092, 1.092, 1.092, 1.096, 1.104,
+ 1.041, 1.041, 1.043, 1.051, 1.061, 1.068, 1.073, 1.079, 1.083, 1.089, 1.092, 1.094, 1.097, 1.099, 1.105, 1.115,
+ 1.048, 1.044, 1.044, 1.051, 1.063, 1.071, 1.076, 1.082, 1.088, 1.091, 1.094, 1.097, 1.099, 1.104, 1.115, 1.126
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.001, 1.001, 1.005, 1.011, 1.014, 1.018, 1.019, 1.019, 1.019, 1.021, 1.021, 1.021, 1.019, 1.017, 1.014, 1.014,
+ 1.009, 1.009, 1.011, 1.014, 1.019, 1.024, 1.026, 1.029, 1.031, 1.032, 1.032, 1.031, 1.027, 1.023, 1.022, 1.022,
+ 1.011, 1.012, 1.015, 1.018, 1.024, 1.026, 1.029, 1.032, 1.035, 1.036, 1.036, 1.034, 1.031, 1.027, 1.025, 1.025,
+ 1.012, 1.013, 1.015, 1.019, 1.025, 1.029, 1.032, 1.035, 1.036, 1.038, 1.038, 1.036, 1.034, 1.029, 1.026, 1.026,
+ 1.013, 1.014, 1.016, 1.019, 1.027, 1.031, 1.034, 1.037, 1.039, 1.039, 1.041, 1.039, 1.036, 1.031, 1.028, 1.027,
+ 1.014, 1.014, 1.017, 1.021, 1.027, 1.033, 1.037, 1.039, 1.041, 1.041, 1.042, 1.042, 1.039, 1.033, 1.029, 1.028,
+ 1.015, 1.015, 1.018, 1.021, 1.027, 1.033, 1.037, 1.041, 1.041, 1.042, 1.042, 1.042, 1.039, 1.034, 1.029, 1.029,
+ 1.015, 1.016, 1.018, 1.022, 1.027, 1.033, 1.037, 1.041, 1.041, 1.042, 1.043, 1.043, 1.041, 1.035, 1.031, 1.031,
+ 1.015, 1.016, 1.018, 1.022, 1.027, 1.032, 1.037, 1.041, 1.042, 1.042, 1.044, 1.043, 1.041, 1.036, 1.034, 1.033,
+ 1.016, 1.017, 1.017, 1.022, 1.027, 1.032, 1.036, 1.039, 1.042, 1.042, 1.043, 1.043, 1.041, 1.039, 1.036, 1.034,
+ 1.017, 1.017, 1.018, 1.022, 1.027, 1.031, 1.035, 1.039, 1.041, 1.042, 1.042, 1.042, 1.042, 1.039, 1.039, 1.039,
+ 1.018, 1.017, 1.017, 1.021, 1.027, 1.031, 1.033, 1.038, 1.041, 1.041, 1.042, 1.042, 1.041, 1.041, 1.041, 1.041
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.102, 1.903, 1.658, 1.483, 1.358, 1.267, 1.202, 1.202, 1.202, 1.242, 1.323, 1.431, 1.585, 1.797, 2.096, 2.351,
+ 1.996, 1.776, 1.549, 1.385, 1.273, 1.204, 1.138, 1.133, 1.133, 1.185, 1.252, 1.343, 1.484, 1.679, 1.954, 2.228,
+ 1.923, 1.689, 1.474, 1.318, 1.204, 1.138, 1.079, 1.071, 1.071, 1.133, 1.185, 1.284, 1.415, 1.597, 1.854, 2.146,
+ 1.881, 1.631, 1.423, 1.272, 1.159, 1.079, 1.051, 1.026, 1.046, 1.071, 1.144, 1.245, 1.369, 1.543, 1.801, 2.095,
+ 1.867, 1.595, 1.391, 1.242, 1.131, 1.051, 1.013, 1.002, 1.013, 1.046, 1.121, 1.219, 1.343, 1.511, 1.752, 2.079,
+ 1.867, 1.589, 1.385, 1.236, 1.125, 1.048, 1.001, 1.001, 1.003, 1.045, 1.118, 1.217, 1.342, 1.511, 1.746, 2.079,
+ 1.867, 1.589, 1.385, 1.236, 1.125, 1.048, 1.011, 1.003, 1.011, 1.046, 1.118, 1.217, 1.343, 1.511, 1.746, 2.079,
+ 1.884, 1.621, 1.411, 1.261, 1.149, 1.071, 1.048, 1.024, 1.046, 1.069, 1.141, 1.239, 1.369, 1.541, 1.781, 2.093,
+ 1.913, 1.675, 1.459, 1.304, 1.191, 1.125, 1.071, 1.065, 1.069, 1.124, 1.181, 1.278, 1.413, 1.592, 1.842, 2.133,
+ 1.981, 1.755, 1.529, 1.368, 1.251, 1.191, 1.125, 1.124, 1.124, 1.181, 1.242, 1.337, 1.479, 1.669, 1.935, 2.207,
+ 2.078, 1.867, 1.625, 1.453, 1.344, 1.251, 1.202, 1.201, 1.201, 1.242, 1.333, 1.418, 1.571, 1.776, 2.063, 2.321,
+ 2.217, 2.011, 1.747, 1.562, 1.431, 1.331, 1.278, 1.278, 1.278, 1.313, 1.407, 1.523, 1.686, 1.911, 2.226, 2.484
+ ],
+ "sigma": 0.00135,
+ "sigma_Cb": 0.00279
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2698,
+ "ccm":
+ [
+ 1.57227, -0.32596, -0.24631,
+ -0.61264, 1.70791, -0.09526,
+ -0.43254, 0.48489, 0.94765
+ ]
+ },
+ {
+ "ct": 2930,
+ "ccm":
+ [
+ 1.69455, -0.52724, -0.16731,
+ -0.67131, 1.78468, -0.11338,
+ -0.41609, 0.54693, 0.86916
+ ]
+ },
+ {
+ "ct": 3643,
+ "ccm":
+ [
+ 1.74041, -0.77553, 0.03512,
+ -0.44073, 1.34131, 0.09943,
+ -0.11035, -0.93919, 2.04954
+ ]
+ },
+ {
+ "ct": 4605,
+ "ccm":
+ [
+ 1.49865, -0.41638, -0.08227,
+ -0.39445, 1.70114, -0.30669,
+ 0.01319, -0.88009, 1.86689
+ ]
+ },
+ {
+ "ct": 5658,
+ "ccm":
+ [
+ 1.38601, -0.23128, -0.15472,
+ -0.37641, 1.70444, -0.32803,
+ -0.01575, -0.71466, 1.73041
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+}
diff --git a/src/ipa/rpi/vc4/data/imx462.json b/src/ipa/rpi/vc4/data/imx462.json
new file mode 100644
index 00000000..40a56842
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx462.json
@@ -0,0 +1,215 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "description": "This is an interim tuning only. Please consider doing a more formal tuning for your application.",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3840
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 6813,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 890,
+ "reference_Y": 12900
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.67
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 187,
+ "slope": 0.00842
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "bayes": 0
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "speed": 0.2,
+ "metering_modes":
+ {
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ },
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 10, 30000, 60000 ],
+ "gain": [ 1.0, 2.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 10, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.16,
+ 10000, 0.16
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.7,
+ "luminance_lut":
+ [
+ 2.844, 2.349, 2.018, 1.775, 1.599, 1.466, 1.371, 1.321, 1.306, 1.316, 1.357, 1.439, 1.552, 1.705, 1.915, 2.221,
+ 2.576, 2.151, 1.851, 1.639, 1.478, 1.358, 1.272, 1.231, 1.218, 1.226, 1.262, 1.335, 1.438, 1.571, 1.766, 2.067,
+ 2.381, 2.005, 1.739, 1.545, 1.389, 1.278, 1.204, 1.166, 1.153, 1.161, 1.194, 1.263, 1.356, 1.489, 1.671, 1.943,
+ 2.242, 1.899, 1.658, 1.481, 1.329, 1.225, 1.156, 1.113, 1.096, 1.107, 1.143, 1.201, 1.289, 1.423, 1.607, 1.861,
+ 2.152, 1.831, 1.602, 1.436, 1.291, 1.193, 1.121, 1.069, 1.047, 1.062, 1.107, 1.166, 1.249, 1.384, 1.562, 1.801,
+ 2.104, 1.795, 1.572, 1.407, 1.269, 1.174, 1.099, 1.041, 1.008, 1.029, 1.083, 1.146, 1.232, 1.364, 1.547, 1.766,
+ 2.104, 1.796, 1.572, 1.403, 1.264, 1.171, 1.097, 1.036, 1.001, 1.025, 1.077, 1.142, 1.231, 1.363, 1.549, 1.766,
+ 2.148, 1.827, 1.594, 1.413, 1.276, 1.184, 1.114, 1.062, 1.033, 1.049, 1.092, 1.153, 1.242, 1.383, 1.577, 1.795,
+ 2.211, 1.881, 1.636, 1.455, 1.309, 1.214, 1.149, 1.104, 1.081, 1.089, 1.125, 1.184, 1.273, 1.423, 1.622, 1.846,
+ 2.319, 1.958, 1.698, 1.516, 1.362, 1.262, 1.203, 1.156, 1.137, 1.142, 1.171, 1.229, 1.331, 1.484, 1.682, 1.933,
+ 2.459, 2.072, 1.789, 1.594, 1.441, 1.331, 1.261, 1.219, 1.199, 1.205, 1.232, 1.301, 1.414, 1.571, 1.773, 2.052,
+ 2.645, 2.206, 1.928, 1.728, 1.559, 1.451, 1.352, 1.301, 1.282, 1.289, 1.319, 1.395, 1.519, 1.685, 1.904, 2.227
+ ],
+ "sigma": 0.005,
+ "sigma_Cb": 0.005
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 3900,
+ "ccm":
+ [
+ 1.54659, -0.17707, -0.36953,
+ -0.51471, 1.72733, -0.21262,
+ 0.06667, -0.92279, 1.85612
+ ]
+ }
+ ]
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx477.json b/src/ipa/rpi/vc4/data/imx477.json
index 853bfa67..fa25ee86 100644
--- a/src/ipa/rpi/vc4/data/imx477.json
+++ b/src/ipa/rpi/vc4/data/imx477.json
@@ -115,16 +115,16 @@
"ct_curve":
[
2360.0, 0.6009, 0.3093,
- 2848.0, 0.5071, 0.4000,
+ 2848.0, 0.5071, 0.4,
2903.0, 0.4905, 0.4392,
3628.0, 0.4261, 0.5564,
3643.0, 0.4228, 0.5623,
- 4660.0, 0.3529, 0.6800,
- 5579.0, 0.3227, 0.7000,
- 6125.0, 0.3129, 0.7100,
- 6671.0, 0.3065, 0.7200,
- 7217.0, 0.3014, 0.7300,
- 7763.0, 0.2950, 0.7400,
+ 4660.0, 0.3529, 0.68,
+ 5579.0, 0.3227, 0.7,
+ 6125.0, 0.3129, 0.71,
+ 6671.0, 0.3065, 0.72,
+ 7217.0, 0.3014, 0.73,
+ 7763.0, 0.295, 0.74,
9505.0, 0.2524, 0.7856
],
"sensitivity_r": 1.05,
@@ -136,282 +136,308 @@
{
"rpi.agc":
{
- "channels":
- [
+ "channels": [
{
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 10000, 30000, 60000, 66666 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 33333 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "long":
- {
- "shutter": [ 100, 10000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.3,
- 1000, 0.3
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.3,
- 1000, 0.3
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ],
- "shadows": [
- {
- "bound": "LOWER",
- "q_lo": 0.0,
- "q_hi": 0.5,
- "y_target":
- [
- 0, 0.17,
- 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ]
- },
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
{
- "base_ev": 0.125,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 10000, 30000, 60000, 66666 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 33333 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "long":
- {
- "shutter": [ 100, 10000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.3,
- 1000, 0.3
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.3,
- 1000, 0.3
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ],
- "shadows": [
- {
- "bound": "LOWER",
- "q_lo": 0.0,
- "q_hi": 0.5,
- "y_target":
- [
- 0, 0.17,
- 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ]
- },
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
{
- "base_ev": 1.5,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 10000, 30000, 60000, 66666 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 33333 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "long":
- {
- "shutter": [ 100, 10000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.3,
- 1000, 0.3
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.3,
- 1000, 0.3
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ],
- "shadows": [
- {
- "bound": "LOWER",
- "q_lo": 0.0,
- "q_hi": 0.5,
- "y_target":
- [
- 0, 0.17,
- 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ]
- }
- ]
- }
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ ]
+ }
},
{
"rpi.alsc":
@@ -656,20 +682,19 @@
{
"rpi.sharpen": { }
},
- {
- "rpi.hdr":
- {
- "MultiExposure":
- {
- "cadence": [ 1, 2 ],
- "channel_map": { "short": 1, "long": 2 }
- },
- "SingleExposure":
- {
- "cadence": [ 1 ],
- "channel_map": { "short": 1 }
- }
- }
- }
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
]
} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx477_noir.json b/src/ipa/rpi/vc4/data/imx477_noir.json
index 143e20bd..472f33fe 100644
--- a/src/ipa/rpi/vc4/data/imx477_noir.json
+++ b/src/ipa/rpi/vc4/data/imx477_noir.json
@@ -47,282 +47,308 @@
{
"rpi.agc":
{
- "channels":
- [
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 10000, 30000, 60000, 66666 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 33333 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "long":
- {
- "shutter": [ 100, 10000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.3,
- 1000, 0.3
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.3,
- 1000, 0.3
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ],
- "shadows": [
- {
- "bound": "LOWER",
- "q_lo": 0.0,
- "q_hi": 0.5,
- "y_target":
- [
- 0, 0.17,
- 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ]
- },
- {
- "base_ev": 0.125,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 10000, 30000, 60000, 66666 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 33333 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "long":
- {
- "shutter": [ 100, 10000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.3,
- 1000, 0.3
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.3,
- 1000, 0.3
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ],
- "shadows": [
- {
- "bound": "LOWER",
- "q_lo": 0.0,
- "q_hi": 0.5,
- "y_target":
- [
- 0, 0.17,
- 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ]
- },
- {
- "base_ev": 1.5,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 10000, 30000, 60000, 66666 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 33333 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "long":
- {
- "shutter": [ 100, 10000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.3,
- 1000, 0.3
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.3,
- 1000, 0.3
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ],
- "shadows": [
- {
- "bound": "LOWER",
- "q_lo": 0.0,
- "q_hi": 0.5,
- "y_target":
- [
- 0, 0.17,
- 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ]
- }
- ]
- }
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ ]
+ }
},
{
"rpi.alsc":
@@ -612,20 +638,19 @@
{
"rpi.sharpen": { }
},
- {
- "rpi.hdr":
- {
- "MultiExposure":
- {
- "cadence": [ 1, 2 ],
- "channel_map": { "short": 1, "long": 2 }
- },
- "SingleExposure":
- {
- "cadence": [ 1 ],
- "channel_map": { "short": 1 }
- }
- }
- }
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
]
}
diff --git a/src/ipa/rpi/vc4/data/imx477_scientific.json b/src/ipa/rpi/vc4/data/imx477_scientific.json
index 26c692fd..9dc32eb1 100644
--- a/src/ipa/rpi/vc4/data/imx477_scientific.json
+++ b/src/ipa/rpi/vc4/data/imx477_scientific.json
@@ -148,15 +148,24 @@
{
"centre-weighted":
{
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
},
"spot":
{
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
},
"matrix":
{
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
}
},
"exposure_modes":
diff --git a/src/ipa/rpi/vc4/data/imx477_v1.json b/src/ipa/rpi/vc4/data/imx477_v1.json
index d6402009..55e4adc1 100644
--- a/src/ipa/rpi/vc4/data/imx477_v1.json
+++ b/src/ipa/rpi/vc4/data/imx477_v1.json
@@ -138,15 +138,24 @@
{
"centre-weighted":
{
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
},
"spot":
{
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
},
"matrix":
{
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
}
},
"exposure_modes":
diff --git a/src/ipa/rpi/vc4/data/imx519.json b/src/ipa/rpi/vc4/data/imx519.json
index 1b0a7747..ce194256 100644
--- a/src/ipa/rpi/vc4/data/imx519.json
+++ b/src/ipa/rpi/vc4/data/imx519.json
@@ -133,15 +133,24 @@
{
"centre-weighted":
{
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
},
"spot":
{
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
},
"matrix":
{
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
}
},
"exposure_modes":
diff --git a/src/ipa/rpi/vc4/data/imx708.json b/src/ipa/rpi/vc4/data/imx708.json
index 26aafc95..4de6f079 100644
--- a/src/ipa/rpi/vc4/data/imx708.json
+++ b/src/ipa/rpi/vc4/data/imx708.json
@@ -139,255 +139,281 @@
{
"rpi.agc":
{
- "channels":
- [
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 15000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
- },
- "long":
- {
- "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ],
- "startup_frames": 5,
- "convergence_frames": 6,
- "speed": 0.15
- },
- {
- "base_ev": 0.125,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 15000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
- },
- "long":
- {
- "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ],
- "startup_frames": 5,
- "convergence_frames": 6,
- "speed": 0.15
- },
- {
- "base_ev": 1.5,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 15000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
- },
- "long":
- {
- "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ],
- "startup_frames": 5,
- "convergence_frames": 6,
- "speed": 0.15
- }
- ]
- }
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ }
+ ]
+ }
},
{
"rpi.alsc":
@@ -627,20 +653,19 @@
"map": [ 0.0, 445, 15.0, 925 ]
}
},
- {
- "rpi.hdr":
- {
- "MultiExposure":
- {
- "cadence": [ 1, 2 ],
- "channel_map": { "short": 1, "long": 2 }
- },
- "SingleExposure":
- {
- "cadence": [ 1 ],
- "channel_map": { "short": 1 }
- }
- }
- }
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
]
-}
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx708_noir.json b/src/ipa/rpi/vc4/data/imx708_noir.json
index 8259ca4d..7b7ee874 100644
--- a/src/ipa/rpi/vc4/data/imx708_noir.json
+++ b/src/ipa/rpi/vc4/data/imx708_noir.json
@@ -139,255 +139,281 @@
{
"rpi.agc":
{
- "channels":
- [
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 15000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
- },
- "long":
- {
- "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ],
- "startup_frames": 5,
- "convergence_frames": 6,
- "speed": 0.15
- },
- {
- "base_ev": 0.125,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 15000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
- },
- "long":
- {
- "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ],
- "startup_frames": 5,
- "convergence_frames": 6,
- "speed": 0.15
- },
- {
- "base_ev": 1.5,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 15000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
- },
- "long":
- {
- "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ],
- "startup_frames": 5,
- "convergence_frames": 6,
- "speed": 0.15
- }
- ]
- }
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ }
+ ]
+ }
},
{
"rpi.alsc":
@@ -726,20 +752,19 @@
"map": [ 0.0, 445, 15.0, 925 ]
}
},
- {
- "rpi.hdr":
- {
- "MultiExposure":
- {
- "cadence": [ 1, 2 ],
- "channel_map": { "short": 1, "long": 2 }
- },
- "SingleExposure":
- {
- "cadence": [ 1 ],
- "channel_map": { "short": 1 }
- }
- }
- }
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
]
-}
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx708_wide.json b/src/ipa/rpi/vc4/data/imx708_wide.json
index 0f846ea2..6f45aafc 100644
--- a/src/ipa/rpi/vc4/data/imx708_wide.json
+++ b/src/ipa/rpi/vc4/data/imx708_wide.json
@@ -129,255 +129,281 @@
{
"rpi.agc":
{
- "channels":
- [
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 15000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
- },
- "long":
- {
- "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ],
- "startup_frames": 5,
- "convergence_frames": 6,
- "speed": 0.15
- },
- {
- "base_ev": 0.125,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 15000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
- },
- "long":
- {
- "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ],
- "startup_frames": 5,
- "convergence_frames": 6,
- "speed": 0.15
- },
- {
- "base_ev": 1.5,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 15000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
- },
- "long":
- {
- "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ],
- "startup_frames": 5,
- "convergence_frames": 6,
- "speed": 0.15
- }
- ]
- }
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ }
+ ]
+ }
},
{
"rpi.alsc":
@@ -638,20 +664,19 @@
"map": [ 0.0, 420, 35.0, 920 ]
}
},
- {
- "rpi.hdr":
- {
- "MultiExposure":
- {
- "cadence": [ 1, 2 ],
- "channel_map": { "short": 1, "long": 2 }
- },
- "SingleExposure":
- {
- "cadence": [ 1 ],
- "channel_map": { "short": 1 }
- }
- }
- }
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
]
-}
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx708_wide_noir.json b/src/ipa/rpi/vc4/data/imx708_wide_noir.json
index f12ddbb6..b9a5227e 100644
--- a/src/ipa/rpi/vc4/data/imx708_wide_noir.json
+++ b/src/ipa/rpi/vc4/data/imx708_wide_noir.json
@@ -129,255 +129,281 @@
{
"rpi.agc":
{
- "channels":
- [
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 15000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
- },
- "long":
- {
- "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ],
- "startup_frames": 5,
- "convergence_frames": 6,
- "speed": 0.15
- },
- {
- "base_ev": 0.125,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 15000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
- },
- "long":
- {
- "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ],
- "startup_frames": 5,
- "convergence_frames": 6,
- "speed": 0.15
- },
- {
- "base_ev": 1.5,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 15000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
- },
- "long":
- {
- "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.2,
- 1000, 0.2
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ],
- "startup_frames": 5,
- "convergence_frames": 6,
- "speed": 0.15
- }
- ]
- }
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ }
+ ]
+ }
},
{
"rpi.alsc":
@@ -629,20 +655,19 @@
"map": [ 0.0, 420, 35.0, 920 ]
}
},
- {
- "rpi.hdr":
- {
- "MultiExposure":
- {
- "cadence": [ 1, 2 ],
- "channel_map": { "short": 1, "long": 2 }
- },
- "SingleExposure":
- {
- "cadence": [ 1 ],
- "channel_map": { "short": 1 }
- }
- }
- }
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
]
-}
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/meson.build b/src/ipa/rpi/vc4/data/meson.build
index afbf875a..7a8001ee 100644
--- a/src/ipa/rpi/vc4/data/meson.build
+++ b/src/ipa/rpi/vc4/data/meson.build
@@ -3,10 +3,14 @@
conf_files = files([
'imx219.json',
'imx219_noir.json',
+ 'imx283.json',
'imx290.json',
'imx296.json',
'imx296_mono.json',
+ 'imx327.json',
'imx378.json',
+ 'imx415.json',
+ 'imx462.json',
'imx477.json',
'imx477_noir.json',
'imx477_scientific.json',
@@ -18,6 +22,7 @@ conf_files = files([
'ov5647.json',
'ov5647_noir.json',
'ov64a40.json',
+ 'ov7251_mono.json',
'ov9281_mono.json',
'se327m12.json',
'uncalibrated.json',
diff --git a/src/ipa/rpi/vc4/data/ov5647.json b/src/ipa/rpi/vc4/data/ov5647.json
index 4def9ffc..40c6059c 100644
--- a/src/ipa/rpi/vc4/data/ov5647.json
+++ b/src/ipa/rpi/vc4/data/ov5647.json
@@ -131,285 +131,309 @@
{
"rpi.agc":
{
- "channels":
- [
- {
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 10000, 30000, 60000, 66666 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 33333 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "long":
- {
- "shutter": [ 100, 10000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ],
- "shadows": [
- {
- "bound": "LOWER",
- "q_lo": 0.0,
- "q_hi": 0.5,
- "y_target":
- [
- 0, 0.17,
- 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ],
- "base_ev": 1.25
- },
- {
- "base_ev": 0.125,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 10000, 30000, 60000, 66666 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 33333 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "long":
- {
- "shutter": [ 100, 10000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ],
- "shadows": [
- {
- "bound": "LOWER",
- "q_lo": 0.0,
- "q_hi": 0.5,
- "y_target":
- [
- 0, 0.17,
- 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ],
- "base_ev": 1.25
- },
- {
- "base_ev": 1.5,
- "metering_modes":
- {
- "centre-weighted":
- {
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
- },
- "spot":
- {
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
- },
- "matrix":
- {
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
- }
- },
- "exposure_modes":
- {
- "normal":
- {
- "shutter": [ 100, 10000, 30000, 60000, 66666 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "short":
- {
- "shutter": [ 100, 5000, 10000, 20000, 33333 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
- },
- "long":
- {
- "shutter": [ 100, 10000, 30000, 60000, 120000 ],
- "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
- }
- },
- "constraint_modes":
- {
- "normal": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- }
- ],
- "highlight": [
- {
- "bound": "LOWER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.5,
- 1000, 0.5
- ]
- },
- {
- "bound": "UPPER",
- "q_lo": 0.98,
- "q_hi": 1.0,
- "y_target":
- [
- 0, 0.8,
- 1000, 0.8
- ]
- }
- ],
- "shadows": [
- {
- "bound": "LOWER",
- "q_lo": 0.0,
- "q_hi": 0.5,
- "y_target":
- [
- 0, 0.17,
- 1000, 0.17
- ]
- }
- ]
- },
- "y_target":
- [
- 0, 0.16,
- 1000, 0.165,
- 10000, 0.17
- ],
- "base_ev": 1.25
- }
- ]
- }
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "base_ev": 1.25
+ },
+ {
+ "base_ev": 1.25,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 1.25,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ ]
+ }
},
{
"rpi.alsc":
@@ -654,20 +678,19 @@
{
"rpi.sharpen": { }
},
- {
- "rpi.hdr":
- {
- "MultiExposure":
- {
- "cadence": [ 1, 2 ],
- "channel_map": { "short": 1, "long": 2 }
- },
- "SingleExposure":
- {
- "cadence": [ 1 ],
- "channel_map": { "short": 1 }
- }
- }
- }
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
]
}
diff --git a/src/ipa/rpi/vc4/data/ov5647_noir.json b/src/ipa/rpi/vc4/data/ov5647_noir.json
index a6c6722f..488b7119 100644
--- a/src/ipa/rpi/vc4/data/ov5647_noir.json
+++ b/src/ipa/rpi/vc4/data/ov5647_noir.json
@@ -51,15 +51,24 @@
{
"centre-weighted":
{
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
},
"spot":
{
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
},
"matrix":
{
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
}
},
"exposure_modes":
diff --git a/src/ipa/rpi/vc4/data/ov7251_mono.json b/src/ipa/rpi/vc4/data/ov7251_mono.json
new file mode 100644
index 00000000..a9d05a01
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/ov7251_mono.json
@@ -0,0 +1,136 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 2000,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 800,
+ "reference_Y": 20000
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.5
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 4, 4, 4, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 3.0, 4.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 30000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.4,
+ 1000, 0.4
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "n_iter": 0,
+ "luminance_strength": 1.0,
+ "corner_strength": 1.5
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 0,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/ov9281_mono.json b/src/ipa/rpi/vc4/data/ov9281_mono.json
index 2b7292ec..a9d05a01 100644
--- a/src/ipa/rpi/vc4/data/ov9281_mono.json
+++ b/src/ipa/rpi/vc4/data/ov9281_mono.json
@@ -35,7 +35,10 @@
{
"centre-weighted":
{
- "weights": [ 4, 4, 4, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 4, 4, 4, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
}
},
"exposure_modes":
diff --git a/src/ipa/rpi/vc4/data/se327m12.json b/src/ipa/rpi/vc4/data/se327m12.json
index 8552ed92..948169db 100644
--- a/src/ipa/rpi/vc4/data/se327m12.json
+++ b/src/ipa/rpi/vc4/data/se327m12.json
@@ -133,15 +133,24 @@
{
"centre-weighted":
{
- "weights": [ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
},
"spot":
{
- "weights": [ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
},
"matrix":
{
- "weights": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
}
},
"exposure_modes":
diff --git a/src/ipa/rpi/vc4/data/uncalibrated.json b/src/ipa/rpi/vc4/data/uncalibrated.json
index 7654defa..cdc56b32 100644
--- a/src/ipa/rpi/vc4/data/uncalibrated.json
+++ b/src/ipa/rpi/vc4/data/uncalibrated.json
@@ -22,7 +22,10 @@
{
"centre-weighted":
{
- "weights": [ 4, 4, 4, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 ]
+ "weights":
+ [
+ 4, 4, 4, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
}
},
"exposure_modes":
diff --git a/src/ipa/rpi/vc4/meson.build b/src/ipa/rpi/vc4/meson.build
index 590e9197..c10fa17e 100644
--- a/src/ipa/rpi/vc4/meson.build
+++ b/src/ipa/rpi/vc4/meson.build
@@ -15,7 +15,6 @@ vc4_ipa_libs = [
vc4_ipa_includes = [
ipa_includes,
- libipa_includes,
]
vc4_ipa_sources = files([
@@ -24,12 +23,10 @@ vc4_ipa_sources = files([
vc4_ipa_includes += include_directories('..')
-mod = shared_module(ipa_name,
- [vc4_ipa_sources, libcamera_generated_ipa_headers],
+mod = shared_module(ipa_name, vc4_ipa_sources,
name_prefix : '',
include_directories : vc4_ipa_includes,
- dependencies : vc4_ipa_deps,
- link_with : libipa,
+ dependencies : [vc4_ipa_deps, libipa_dep],
link_whole : vc4_ipa_libs,
install : true,
install_dir : ipa_install_dir)
diff --git a/src/ipa/rpi/vc4/vc4.cpp b/src/ipa/rpi/vc4/vc4.cpp
index d2159a51..ba43e474 100644
--- a/src/ipa/rpi/vc4/vc4.cpp
+++ b/src/ipa/rpi/vc4/vc4.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019-2021, Raspberry Pi Ltd
*
- * rpi.cpp - Raspberry Pi VC4/BCM2835 ISP IPA.
+ * Raspberry Pi VC4/BCM2835 ISP IPA.
*/
#include <string.h>
@@ -583,7 +583,7 @@ extern "C" {
const struct IPAModuleInfo ipaModuleInfo = {
IPA_MODULE_API_VERSION,
1,
- "PipelineHandlerVc4",
+ "rpi/vc4",
"rpi/vc4",
};
diff --git a/src/ipa/simple/algorithms/agc.cpp b/src/ipa/simple/algorithms/agc.cpp
new file mode 100644
index 00000000..72aade14
--- /dev/null
+++ b/src/ipa/simple/algorithms/agc.cpp
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * Exposure and gain
+ */
+
+#include "agc.h"
+
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPASoftExposure)
+
+namespace ipa::soft::algorithms {
+
+/*
+ * The number of bins to use for the optimal exposure calculations.
+ */
+static constexpr unsigned int kExposureBinsCount = 5;
+
+/*
+ * The exposure is optimal when the mean sample value of the histogram is
+ * in the middle of the range.
+ */
+static constexpr float kExposureOptimal = kExposureBinsCount / 2.0;
+
+/*
+ * This implements the hysteresis for the exposure adjustment.
+ * It is small enough to have the exposure close to the optimal, and is big
+ * enough to prevent the exposure from wobbling around the optimal value.
+ */
+static constexpr float kExposureSatisfactory = 0.2;
+
+Agc::Agc()
+{
+}
+
+void Agc::updateExposure(IPAContext &context, IPAFrameContext &frameContext, double exposureMSV)
+{
+ /*
+ * kExpDenominator of 10 gives ~10% increment/decrement;
+ * kExpDenominator of 5 - about ~20%
+ */
+ static constexpr uint8_t kExpDenominator = 10;
+ static constexpr uint8_t kExpNumeratorUp = kExpDenominator + 1;
+ static constexpr uint8_t kExpNumeratorDown = kExpDenominator - 1;
+
+ double next;
+ int32_t &exposure = frameContext.sensor.exposure;
+ double &again = frameContext.sensor.gain;
+
+ if (exposureMSV < kExposureOptimal - kExposureSatisfactory) {
+ next = exposure * kExpNumeratorUp / kExpDenominator;
+ if (next - exposure < 1)
+ exposure += 1;
+ else
+ exposure = next;
+ if (exposure >= context.configuration.agc.exposureMax) {
+ next = again * kExpNumeratorUp / kExpDenominator;
+ if (next - again < context.configuration.agc.againMinStep)
+ again += context.configuration.agc.againMinStep;
+ else
+ again = next;
+ }
+ }
+
+ if (exposureMSV > kExposureOptimal + kExposureSatisfactory) {
+ if (exposure == context.configuration.agc.exposureMax &&
+ again > context.configuration.agc.againMin) {
+ next = again * kExpNumeratorDown / kExpDenominator;
+ if (again - next < context.configuration.agc.againMinStep)
+ again -= context.configuration.agc.againMinStep;
+ else
+ again = next;
+ } else {
+ next = exposure * kExpNumeratorDown / kExpDenominator;
+ if (exposure - next < 1)
+ exposure -= 1;
+ else
+ exposure = next;
+ }
+ }
+
+ exposure = std::clamp(exposure, context.configuration.agc.exposureMin,
+ context.configuration.agc.exposureMax);
+ again = std::clamp(again, context.configuration.agc.againMin,
+ context.configuration.agc.againMax);
+
+ LOG(IPASoftExposure, Debug)
+ << "exposureMSV " << exposureMSV
+ << " exp " << exposure << " again " << again;
+}
+
+void Agc::process(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ const SwIspStats *stats,
+ [[maybe_unused]] ControlList &metadata)
+{
+ /*
+ * Calculate Mean Sample Value (MSV) according to formula from:
+ * https://www.araa.asn.au/acra/acra2007/papers/paper84final.pdf
+ */
+ const auto &histogram = stats->yHistogram;
+ const unsigned int blackLevelHistIdx =
+ context.activeState.blc.level / (256 / SwIspStats::kYHistogramSize);
+ const unsigned int histogramSize =
+ SwIspStats::kYHistogramSize - blackLevelHistIdx;
+ const unsigned int yHistValsPerBin = histogramSize / kExposureBinsCount;
+ const unsigned int yHistValsPerBinMod =
+ histogramSize / (histogramSize % kExposureBinsCount + 1);
+ int exposureBins[kExposureBinsCount] = {};
+ unsigned int denom = 0;
+ unsigned int num = 0;
+
+ for (unsigned int i = 0; i < histogramSize; i++) {
+ unsigned int idx = (i - (i / yHistValsPerBinMod)) / yHistValsPerBin;
+ exposureBins[idx] += histogram[blackLevelHistIdx + i];
+ }
+
+ for (unsigned int i = 0; i < kExposureBinsCount; i++) {
+ LOG(IPASoftExposure, Debug) << i << ": " << exposureBins[i];
+ denom += exposureBins[i];
+ num += exposureBins[i] * (i + 1);
+ }
+
+ float exposureMSV = (denom == 0 ? 0 : static_cast<float>(num) / denom);
+ updateExposure(context, frameContext, exposureMSV);
+}
+
+REGISTER_IPA_ALGORITHM(Agc, "Agc")
+
+} /* namespace ipa::soft::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/agc.h b/src/ipa/simple/algorithms/agc.h
new file mode 100644
index 00000000..112d9f5a
--- /dev/null
+++ b/src/ipa/simple/algorithms/agc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * Exposure and gain
+ */
+
+#pragma once
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::soft::algorithms {
+
+class Agc : public Algorithm
+{
+public:
+ Agc();
+ ~Agc() = default;
+
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const SwIspStats *stats,
+ ControlList &metadata) override;
+
+private:
+ void updateExposure(IPAContext &context, IPAFrameContext &frameContext, double exposureMSV);
+};
+
+} /* namespace ipa::soft::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/algorithm.h b/src/ipa/simple/algorithms/algorithm.h
new file mode 100644
index 00000000..41f63170
--- /dev/null
+++ b/src/ipa/simple/algorithms/algorithm.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Red Hat, Inc.
+ *
+ * Software ISP control algorithm interface
+ */
+
+#pragma once
+
+#include <libipa/algorithm.h>
+
+#include "module.h"
+
+namespace libcamera {
+
+namespace ipa::soft {
+
+using Algorithm = libcamera::ipa::Algorithm<Module>;
+
+} /* namespace ipa::soft */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/awb.cpp b/src/ipa/simple/algorithms/awb.cpp
new file mode 100644
index 00000000..195de41d
--- /dev/null
+++ b/src/ipa/simple/algorithms/awb.cpp
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * Auto white balance
+ */
+
+#include "awb.h"
+
+#include <numeric>
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+
+#include "simple/ipa_context.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPASoftAwb)
+
+namespace ipa::soft::algorithms {
+
+int Awb::configure(IPAContext &context,
+ [[maybe_unused]] const IPAConfigInfo &configInfo)
+{
+ auto &gains = context.activeState.gains;
+ gains.red = gains.green = gains.blue = 1.0;
+
+ return 0;
+}
+
+void Awb::process(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ const SwIspStats *stats,
+ [[maybe_unused]] ControlList &metadata)
+{
+ const SwIspStats::Histogram &histogram = stats->yHistogram;
+ const uint8_t blackLevel = context.activeState.blc.level;
+
+ /*
+ * Black level must be subtracted to get the correct AWB ratios, they
+ * would be off if they were computed from the whole brightness range
+ * rather than from the sensor range.
+ */
+ const uint64_t nPixels = std::accumulate(
+ histogram.begin(), histogram.end(), 0);
+ const uint64_t offset = blackLevel * nPixels;
+ const uint64_t sumR = stats->sumR_ - offset / 4;
+ const uint64_t sumG = stats->sumG_ - offset / 2;
+ const uint64_t sumB = stats->sumB_ - offset / 4;
+
+ /*
+ * Calculate red and blue gains for AWB.
+ * Clamp max gain at 4.0, this also avoids 0 division.
+ */
+ auto &gains = context.activeState.gains;
+ gains.red = sumR <= sumG / 4 ? 4.0 : static_cast<double>(sumG) / sumR;
+ gains.blue = sumB <= sumG / 4 ? 4.0 : static_cast<double>(sumG) / sumB;
+ /* Green gain is fixed to 1.0 */
+
+ LOG(IPASoftAwb, Debug) << "gain R/B " << gains.red << "/" << gains.blue;
+}
+
+REGISTER_IPA_ALGORITHM(Awb, "Awb")
+
+} /* namespace ipa::soft::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/awb.h b/src/ipa/simple/algorithms/awb.h
new file mode 100644
index 00000000..db1496cd
--- /dev/null
+++ b/src/ipa/simple/algorithms/awb.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * Auto white balance
+ */
+
+#pragma once
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::soft::algorithms {
+
+class Awb : public Algorithm
+{
+public:
+ Awb() = default;
+ ~Awb() = default;
+
+ int configure(IPAContext &context, const IPAConfigInfo &configInfo) override;
+ void process(IPAContext &context,
+ const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const SwIspStats *stats,
+ ControlList &metadata) override;
+};
+
+} /* namespace ipa::soft::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/blc.cpp b/src/ipa/simple/algorithms/blc.cpp
new file mode 100644
index 00000000..1d7d370b
--- /dev/null
+++ b/src/ipa/simple/algorithms/blc.cpp
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * Black level handling
+ */
+
+#include "blc.h"
+
+#include <numeric>
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+namespace ipa::soft::algorithms {
+
+LOG_DEFINE_CATEGORY(IPASoftBL)
+
+BlackLevel::BlackLevel()
+{
+}
+
+int BlackLevel::init([[maybe_unused]] IPAContext &context,
+ const YamlObject &tuningData)
+{
+ auto blackLevel = tuningData["blackLevel"].get<int16_t>();
+ if (blackLevel.has_value()) {
+ /*
+ * Convert 16 bit values from the tuning file to 8 bit black
+ * level for the SoftISP.
+ */
+ definedLevel_ = blackLevel.value() >> 8;
+ }
+ return 0;
+}
+
+int BlackLevel::configure(IPAContext &context,
+ [[maybe_unused]] const IPAConfigInfo &configInfo)
+{
+ if (definedLevel_.has_value())
+ context.configuration.black.level = definedLevel_;
+ context.activeState.blc.level =
+ context.configuration.black.level.value_or(255);
+ return 0;
+}
+
+void BlackLevel::process(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const SwIspStats *stats,
+ [[maybe_unused]] ControlList &metadata)
+{
+ if (context.configuration.black.level.has_value())
+ return;
+
+ if (frameContext.sensor.exposure == exposure_ &&
+ frameContext.sensor.gain == gain_) {
+ return;
+ }
+
+ const SwIspStats::Histogram &histogram = stats->yHistogram;
+
+ /*
+ * The constant is selected to be "good enough", not overly
+ * conservative or aggressive. There is no magic about the given value.
+ */
+ constexpr float ignoredPercentage = 0.02;
+ const unsigned int total =
+ std::accumulate(begin(histogram), end(histogram), 0);
+ const unsigned int pixelThreshold = ignoredPercentage * total;
+ const unsigned int histogramRatio = 256 / SwIspStats::kYHistogramSize;
+ const unsigned int currentBlackIdx =
+ context.activeState.blc.level / histogramRatio;
+
+ for (unsigned int i = 0, seen = 0;
+ i < currentBlackIdx && i < SwIspStats::kYHistogramSize;
+ i++) {
+ seen += histogram[i];
+ if (seen >= pixelThreshold) {
+ context.activeState.blc.level = i * histogramRatio;
+ exposure_ = frameContext.sensor.exposure;
+ gain_ = frameContext.sensor.gain;
+ LOG(IPASoftBL, Debug)
+ << "Auto-set black level: "
+ << i << "/" << SwIspStats::kYHistogramSize
+ << " (" << 100 * (seen - histogram[i]) / total << "% below, "
+ << 100 * seen / total << "% at or below)";
+ break;
+ }
+ };
+}
+
+REGISTER_IPA_ALGORITHM(BlackLevel, "BlackLevel")
+
+} /* namespace ipa::soft::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/blc.h b/src/ipa/simple/algorithms/blc.h
new file mode 100644
index 00000000..52d59cab
--- /dev/null
+++ b/src/ipa/simple/algorithms/blc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * Black level handling
+ */
+
+#pragma once
+
+#include <optional>
+#include <stdint.h>
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::soft::algorithms {
+
+class BlackLevel : public Algorithm
+{
+public:
+ BlackLevel();
+ ~BlackLevel() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context, const IPAConfigInfo &configInfo) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const SwIspStats *stats,
+ ControlList &metadata) override;
+
+private:
+ int32_t exposure_;
+ double gain_;
+ std::optional<uint8_t> definedLevel_;
+};
+
+} /* namespace ipa::soft::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/lut.cpp b/src/ipa/simple/algorithms/lut.cpp
new file mode 100644
index 00000000..0ba2391f
--- /dev/null
+++ b/src/ipa/simple/algorithms/lut.cpp
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * Color lookup tables construction
+ */
+
+#include "lut.h"
+
+#include <algorithm>
+#include <cmath>
+#include <optional>
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+
+#include "simple/ipa_context.h"
+
+#include "control_ids.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPASoftLut)
+
+namespace ipa::soft::algorithms {
+
+int Lut::init(IPAContext &context,
+ [[maybe_unused]] const YamlObject &tuningData)
+{
+ context.ctrlMap[&controls::Contrast] = ControlInfo(0.0f, 2.0f, 1.0f);
+ return 0;
+}
+
+int Lut::configure(IPAContext &context,
+ [[maybe_unused]] const IPAConfigInfo &configInfo)
+{
+ /* Gamma value is fixed */
+ context.configuration.gamma = 0.5;
+ context.activeState.knobs.contrast = std::optional<double>();
+ updateGammaTable(context);
+
+ return 0;
+}
+
+void Lut::queueRequest(typename Module::Context &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] typename Module::FrameContext &frameContext,
+ const ControlList &controls)
+{
+ const auto &contrast = controls.get(controls::Contrast);
+ if (contrast.has_value()) {
+ context.activeState.knobs.contrast = contrast;
+ LOG(IPASoftLut, Debug) << "Setting contrast to " << contrast.value();
+ }
+}
+
+void Lut::updateGammaTable(IPAContext &context)
+{
+ auto &gammaTable = context.activeState.gamma.gammaTable;
+ const auto blackLevel = context.activeState.blc.level;
+ const unsigned int blackIndex = blackLevel * gammaTable.size() / 256;
+ const auto contrast = context.activeState.knobs.contrast.value_or(1.0);
+
+ std::fill(gammaTable.begin(), gammaTable.begin() + blackIndex, 0);
+ const float divisor = gammaTable.size() - blackIndex - 1.0;
+ for (unsigned int i = blackIndex; i < gammaTable.size(); i++) {
+ double normalized = (i - blackIndex) / divisor;
+ /* Convert 0..2 to 0..infinity; avoid actual inifinity at tan(pi/2) */
+ double contrastExp = tan(std::clamp(contrast * M_PI_4, 0.0, M_PI_2 - 0.00001));
+ /* Apply simple S-curve */
+ if (normalized < 0.5)
+ normalized = 0.5 * std::pow(normalized / 0.5, contrastExp);
+ else
+ normalized = 1.0 - 0.5 * std::pow((1.0 - normalized) / 0.5, contrastExp);
+ gammaTable[i] = UINT8_MAX *
+ std::pow(normalized, context.configuration.gamma);
+ }
+
+ context.activeState.gamma.blackLevel = blackLevel;
+ context.activeState.gamma.contrast = contrast;
+}
+
+void Lut::prepare(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ [[maybe_unused]] DebayerParams *params)
+{
+ /*
+ * Update the gamma table if needed. This means if black level changes
+ * and since the black level gets updated only if a lower value is
+ * observed, it's not permanently prone to minor fluctuations or
+ * rounding errors.
+ */
+ if (context.activeState.gamma.blackLevel != context.activeState.blc.level ||
+ context.activeState.gamma.contrast != context.activeState.knobs.contrast)
+ updateGammaTable(context);
+
+ auto &gains = context.activeState.gains;
+ auto &gammaTable = context.activeState.gamma.gammaTable;
+ const unsigned int gammaTableSize = gammaTable.size();
+
+ for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++) {
+ const double div = static_cast<double>(DebayerParams::kRGBLookupSize) /
+ gammaTableSize;
+ /* Apply gamma after gain! */
+ unsigned int idx;
+ idx = std::min({ static_cast<unsigned int>(i * gains.red / div),
+ gammaTableSize - 1 });
+ params->red[i] = gammaTable[idx];
+ idx = std::min({ static_cast<unsigned int>(i * gains.green / div),
+ gammaTableSize - 1 });
+ params->green[i] = gammaTable[idx];
+ idx = std::min({ static_cast<unsigned int>(i * gains.blue / div),
+ gammaTableSize - 1 });
+ params->blue[i] = gammaTable[idx];
+ }
+}
+
+REGISTER_IPA_ALGORITHM(Lut, "Lut")
+
+} /* namespace ipa::soft::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/lut.h b/src/ipa/simple/algorithms/lut.h
new file mode 100644
index 00000000..889f864b
--- /dev/null
+++ b/src/ipa/simple/algorithms/lut.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * Color lookup tables construction
+ */
+
+#pragma once
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::soft::algorithms {
+
+class Lut : public Algorithm
+{
+public:
+ Lut() = default;
+ ~Lut() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context, const IPAConfigInfo &configInfo) override;
+ void queueRequest(typename Module::Context &context,
+ const uint32_t frame,
+ typename Module::FrameContext &frameContext,
+ const ControlList &controls)
+ override;
+ void prepare(IPAContext &context,
+ const uint32_t frame,
+ IPAFrameContext &frameContext,
+ DebayerParams *params) override;
+
+private:
+ void updateGammaTable(IPAContext &context);
+};
+
+} /* namespace ipa::soft::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/meson.build b/src/ipa/simple/algorithms/meson.build
new file mode 100644
index 00000000..37a2eb53
--- /dev/null
+++ b/src/ipa/simple/algorithms/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: CC0-1.0
+
+soft_simple_ipa_algorithms = files([
+ 'awb.cpp',
+ 'agc.cpp',
+ 'blc.cpp',
+ 'lut.cpp',
+])
diff --git a/src/ipa/simple/data/meson.build b/src/ipa/simple/data/meson.build
new file mode 100644
index 00000000..92795ee4
--- /dev/null
+++ b/src/ipa/simple/data/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'uncalibrated.yaml',
+])
+
+# The install_dir must match the name from the IPAModuleInfo
+install_data(conf_files,
+ install_dir : ipa_data_dir / 'simple',
+ install_tag : 'runtime')
diff --git a/src/ipa/simple/data/uncalibrated.yaml b/src/ipa/simple/data/uncalibrated.yaml
new file mode 100644
index 00000000..3f147112
--- /dev/null
+++ b/src/ipa/simple/data/uncalibrated.yaml
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - BlackLevel:
+ - Awb:
+ - Lut:
+ - Agc:
+...
diff --git a/src/ipa/simple/ipa_context.cpp b/src/ipa/simple/ipa_context.cpp
new file mode 100644
index 00000000..3f94bbeb
--- /dev/null
+++ b/src/ipa/simple/ipa_context.cpp
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ * Copyright (C) 2024 Red Hat Inc.
+ *
+ * Software ISP IPA Context
+ */
+
+#include "ipa_context.h"
+
+/**
+ * \file ipa_context.h
+ * \brief Context and state information shared between the algorithms
+ */
+
+namespace libcamera::ipa::soft {
+
+/**
+ * \struct IPASessionConfiguration
+ * \brief Session configuration for the IPA module
+ *
+ * The session configuration contains all IPA configuration parameters that
+ * remain constant during the capture session, from IPA module start to stop.
+ * It is typically set during the configure() operation of the IPA module, but
+ * may also be updated in the start() operation.
+ */
+
+/**
+ * \struct IPAActiveState
+ * \brief The active state of the IPA algorithms
+ *
+ * The IPA is fed with the statistics generated from the latest frame processed.
+ * The statistics are then processed by the IPA algorithms to compute parameters
+ * required for the next frame capture and processing. The current state of the
+ * algorithms is reflected through the IPAActiveState to store the values most
+ * recently computed by the IPA algorithms.
+ */
+
+/**
+ * \struct IPAContext
+ * \brief Global IPA context data shared between all algorithms
+ *
+ * \var IPAContext::configuration
+ * \brief The IPA session configuration, immutable during the session
+ *
+ * \var IPAContext::frameContexts
+ * \brief Ring buffer of the IPAFrameContext(s)
+ *
+ * \var IPAContext::activeState
+ * \brief The current state of IPA algorithms
+ */
+
+/**
+ * \var IPASessionConfiguration::gamma
+ * \brief Gamma value to be used in the raw image processing
+ */
+
+/**
+ * \var IPAActiveState::black
+ * \brief Context for the Black Level algorithm
+ *
+ * \var IPAActiveState::black.level
+ * \brief Current determined black level
+ */
+
+/**
+ * \var IPAActiveState::gains
+ * \brief Context for gains in the Colors algorithm
+ *
+ * \var IPAActiveState::gains.red
+ * \brief Gain of red color
+ *
+ * \var IPAActiveState::gains.green
+ * \brief Gain of green color
+ *
+ * \var IPAActiveState::gains.blue
+ * \brief Gain of blue color
+ */
+
+/**
+ * \var IPAActiveState::agc
+ * \brief Context for the AGC algorithm
+ *
+ * \var IPAActiveState::agc.exposure
+ * \brief Current exposure value
+ *
+ * \var IPAActiveState::agc.again
+ * \brief Current analog gain value
+ */
+
+/**
+ * \var IPAActiveState::gamma
+ * \brief Context for gamma in the Colors algorithm
+ *
+ * \var IPAActiveState::gamma.gammaTable
+ * \brief Computed gamma table
+ *
+ * \var IPAActiveState::gamma.blackLevel
+ * \brief Black level used for the gamma table computation
+ */
+
+} /* namespace libcamera::ipa::soft */
diff --git a/src/ipa/simple/ipa_context.h b/src/ipa/simple/ipa_context.h
new file mode 100644
index 00000000..4af51306
--- /dev/null
+++ b/src/ipa/simple/ipa_context.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Red Hat, Inc.
+ *
+ * Simple pipeline IPA Context
+ */
+
+#pragma once
+
+#include <array>
+#include <optional>
+#include <stdint.h>
+
+#include <libcamera/controls.h>
+
+#include <libipa/fc_queue.h>
+
+namespace libcamera {
+
+namespace ipa::soft {
+
+struct IPASessionConfiguration {
+ float gamma;
+ struct {
+ int32_t exposureMin, exposureMax;
+ double againMin, againMax, againMinStep;
+ } agc;
+ struct {
+ std::optional<uint8_t> level;
+ } black;
+};
+
+struct IPAActiveState {
+ struct {
+ uint8_t level;
+ } blc;
+
+ struct {
+ double red;
+ double green;
+ double blue;
+ } gains;
+
+ static constexpr unsigned int kGammaLookupSize = 1024;
+ struct {
+ std::array<double, kGammaLookupSize> gammaTable;
+ uint8_t blackLevel;
+ double contrast;
+ } gamma;
+ struct {
+ /* 0..2 range, 1.0 = normal */
+ std::optional<double> contrast;
+ } knobs;
+};
+
+struct IPAFrameContext : public FrameContext {
+ struct {
+ int32_t exposure;
+ double gain;
+ } sensor;
+};
+
+struct IPAContext {
+ IPAContext(unsigned int frameContextSize)
+ : frameContexts(frameContextSize)
+ {
+ }
+
+ IPASessionConfiguration configuration;
+ IPAActiveState activeState;
+ FCQueue<IPAFrameContext> frameContexts;
+ ControlInfoMap::Map ctrlMap;
+};
+
+} /* namespace ipa::soft */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/meson.build b/src/ipa/simple/meson.build
new file mode 100644
index 00000000..2f9f15f4
--- /dev/null
+++ b/src/ipa/simple/meson.build
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: CC0-1.0
+
+subdir('algorithms')
+subdir('data')
+
+ipa_name = 'ipa_soft_simple'
+
+soft_simple_sources = files([
+ 'ipa_context.cpp',
+ 'soft_simple.cpp',
+])
+
+soft_simple_sources += soft_simple_ipa_algorithms
+
+mod = shared_module(ipa_name, soft_simple_sources,
+ name_prefix : '',
+ include_directories : [ipa_includes],
+ dependencies : [libcamera_private, libipa_dep],
+ install : true,
+ install_dir : ipa_install_dir)
+
+if ipa_sign_module
+ custom_target(ipa_name + '.so.sign',
+ input : mod,
+ output : ipa_name + '.so.sign',
+ command : [ipa_sign, ipa_priv_key, '@INPUT@', '@OUTPUT@'],
+ install : false,
+ build_by_default : true)
+endif
+
+ipa_names += ipa_name
diff --git a/src/ipa/simple/module.h b/src/ipa/simple/module.h
new file mode 100644
index 00000000..8d4d53fb
--- /dev/null
+++ b/src/ipa/simple/module.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Red Hat, Inc.
+ *
+ * Software ISP IPA Module
+ */
+
+#pragma once
+
+#include <libcamera/controls.h>
+
+#include <libcamera/ipa/soft_ipa_interface.h>
+
+#include "libcamera/internal/software_isp/debayer_params.h"
+#include "libcamera/internal/software_isp/swisp_stats.h"
+
+#include <libipa/module.h>
+
+#include "ipa_context.h"
+
+namespace libcamera {
+
+namespace ipa::soft {
+
+using Module = ipa::Module<IPAContext, IPAFrameContext, IPAConfigInfo,
+ DebayerParams, SwIspStats>;
+
+} /* namespace ipa::soft */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/soft_simple.cpp b/src/ipa/simple/soft_simple.cpp
new file mode 100644
index 00000000..b26e4e15
--- /dev/null
+++ b/src/ipa/simple/soft_simple.cpp
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * Simple Software Image Processing Algorithm module
+ */
+
+#include <stdint.h>
+#include <sys/mman.h>
+
+#include <linux/v4l2-controls.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/shared_fd.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+
+#include <libcamera/ipa/ipa_interface.h>
+#include <libcamera/ipa/ipa_module_info.h>
+#include <libcamera/ipa/soft_ipa_interface.h>
+
+#include "libcamera/internal/software_isp/debayer_params.h"
+#include "libcamera/internal/software_isp/swisp_stats.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "libipa/camera_sensor_helper.h"
+
+#include "module.h"
+
+namespace libcamera {
+LOG_DEFINE_CATEGORY(IPASoft)
+
+namespace ipa::soft {
+
+/* Maximum number of frame contexts to be held */
+static constexpr uint32_t kMaxFrameContexts = 16;
+
+class IPASoftSimple : public ipa::soft::IPASoftInterface, public Module
+{
+public:
+ IPASoftSimple()
+ : context_(kMaxFrameContexts)
+ {
+ }
+
+ ~IPASoftSimple();
+
+ int init(const IPASettings &settings,
+ const SharedFD &fdStats,
+ const SharedFD &fdParams,
+ const ControlInfoMap &sensorInfoMap,
+ ControlInfoMap *ipaControls) override;
+ int configure(const IPAConfigInfo &configInfo) override;
+
+ int start() override;
+ void stop() override;
+
+ void queueRequest(const uint32_t frame, const ControlList &controls) override;
+ void computeParams(const uint32_t frame) override;
+ void processStats(const uint32_t frame, const uint32_t bufferId,
+ const ControlList &sensorControls) override;
+
+protected:
+ std::string logPrefix() const override;
+
+private:
+ void updateExposure(double exposureMSV);
+
+ DebayerParams *params_;
+ SwIspStats *stats_;
+ std::unique_ptr<CameraSensorHelper> camHelper_;
+ ControlInfoMap sensorInfoMap_;
+
+ /* Local parameter storage */
+ struct IPAContext context_;
+};
+
+IPASoftSimple::~IPASoftSimple()
+{
+ if (stats_)
+ munmap(stats_, sizeof(SwIspStats));
+ if (params_)
+ munmap(params_, sizeof(DebayerParams));
+}
+
+int IPASoftSimple::init(const IPASettings &settings,
+ const SharedFD &fdStats,
+ const SharedFD &fdParams,
+ const ControlInfoMap &sensorInfoMap,
+ ControlInfoMap *ipaControls)
+{
+ camHelper_ = CameraSensorHelperFactoryBase::create(settings.sensorModel);
+ if (!camHelper_) {
+ LOG(IPASoft, Warning)
+ << "Failed to create camera sensor helper for "
+ << settings.sensorModel;
+ }
+
+ /* Load the tuning data file */
+ File file(settings.configurationFile);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ int ret = file.error();
+ LOG(IPASoft, Error)
+ << "Failed to open configuration file "
+ << settings.configurationFile << ": " << strerror(-ret);
+ return ret;
+ }
+
+ std::unique_ptr<libcamera::YamlObject> data = YamlParser::parse(file);
+ if (!data)
+ return -EINVAL;
+
+ /* \todo Use the IPA configuration file for real. */
+ unsigned int version = (*data)["version"].get<uint32_t>(0);
+ LOG(IPASoft, Debug) << "Tuning file version " << version;
+
+ if (!data->contains("algorithms")) {
+ LOG(IPASoft, Error) << "Tuning file doesn't contain algorithms";
+ return -EINVAL;
+ }
+
+ int ret = createAlgorithms(context_, (*data)["algorithms"]);
+ if (ret)
+ return ret;
+
+ params_ = nullptr;
+ stats_ = nullptr;
+
+ if (!fdStats.isValid()) {
+ LOG(IPASoft, Error) << "Invalid Statistics handle";
+ return -ENODEV;
+ }
+
+ if (!fdParams.isValid()) {
+ LOG(IPASoft, Error) << "Invalid Parameters handle";
+ return -ENODEV;
+ }
+
+ {
+ void *mem = mmap(nullptr, sizeof(DebayerParams), PROT_WRITE,
+ MAP_SHARED, fdParams.get(), 0);
+ if (mem == MAP_FAILED) {
+ LOG(IPASoft, Error) << "Unable to map Parameters";
+ return -errno;
+ }
+
+ params_ = static_cast<DebayerParams *>(mem);
+ }
+
+ {
+ void *mem = mmap(nullptr, sizeof(SwIspStats), PROT_READ,
+ MAP_SHARED, fdStats.get(), 0);
+ if (mem == MAP_FAILED) {
+ LOG(IPASoft, Error) << "Unable to map Statistics";
+ return -errno;
+ }
+
+ stats_ = static_cast<SwIspStats *>(mem);
+ }
+
+ ControlInfoMap::Map ctrlMap = context_.ctrlMap;
+ *ipaControls = ControlInfoMap(std::move(ctrlMap), controls::controls);
+
+ /*
+ * Check if the sensor driver supports the controls required by the
+ * Soft IPA.
+ * Don't save the min and max control values yet, as e.g. the limits
+ * for V4L2_CID_EXPOSURE depend on the configured sensor resolution.
+ */
+ if (sensorInfoMap.find(V4L2_CID_EXPOSURE) == sensorInfoMap.end()) {
+ LOG(IPASoft, Error) << "Don't have exposure control";
+ return -EINVAL;
+ }
+
+ if (sensorInfoMap.find(V4L2_CID_ANALOGUE_GAIN) == sensorInfoMap.end()) {
+ LOG(IPASoft, Error) << "Don't have gain control";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int IPASoftSimple::configure(const IPAConfigInfo &configInfo)
+{
+ sensorInfoMap_ = configInfo.sensorControls;
+
+ const ControlInfo &exposureInfo = sensorInfoMap_.find(V4L2_CID_EXPOSURE)->second;
+ const ControlInfo &gainInfo = sensorInfoMap_.find(V4L2_CID_ANALOGUE_GAIN)->second;
+
+ /* Clear the IPA context before the streaming session. */
+ context_.configuration = {};
+ context_.activeState = {};
+ context_.frameContexts.clear();
+
+ context_.configuration.agc.exposureMin = exposureInfo.min().get<int32_t>();
+ context_.configuration.agc.exposureMax = exposureInfo.max().get<int32_t>();
+ if (!context_.configuration.agc.exposureMin) {
+ LOG(IPASoft, Warning) << "Minimum exposure is zero, that can't be linear";
+ context_.configuration.agc.exposureMin = 1;
+ }
+
+ int32_t againMin = gainInfo.min().get<int32_t>();
+ int32_t againMax = gainInfo.max().get<int32_t>();
+
+ if (camHelper_) {
+ context_.configuration.agc.againMin = camHelper_->gain(againMin);
+ context_.configuration.agc.againMax = camHelper_->gain(againMax);
+ context_.configuration.agc.againMinStep =
+ (context_.configuration.agc.againMax -
+ context_.configuration.agc.againMin) /
+ 100.0;
+ if (camHelper_->blackLevel().has_value()) {
+ /*
+ * The black level from camHelper_ is a 16 bit value, software ISP
+ * works with 8 bit pixel values, both regardless of the actual
+ * sensor pixel width. Hence we obtain the pixel-based black value
+ * by dividing the value from the helper by 256.
+ */
+ context_.configuration.black.level =
+ camHelper_->blackLevel().value() / 256;
+ }
+ } else {
+ /*
+ * The camera sensor gain (g) is usually not equal to the value written
+ * into the gain register (x). But the way how the AGC algorithm changes
+ * the gain value to make the total exposure closer to the optimum
+ * assumes that g(x) is not too far from linear function. If the minimal
+ * gain is 0, the g(x) is likely to be far from the linear, like
+ * g(x) = a / (b * x + c). To avoid unexpected changes to the gain by
+ * the AGC algorithm (abrupt near one edge, and very small near the
+ * other) we limit the range of the gain values used.
+ */
+ context_.configuration.agc.againMax = againMax;
+ if (!againMin) {
+ LOG(IPASoft, Warning)
+ << "Minimum gain is zero, that can't be linear";
+ context_.configuration.agc.againMin =
+ std::min(100, againMin / 2 + againMax / 2);
+ }
+ context_.configuration.agc.againMinStep = 1.0;
+ }
+
+ for (auto const &algo : algorithms()) {
+ int ret = algo->configure(context_, configInfo);
+ if (ret)
+ return ret;
+ }
+
+ LOG(IPASoft, Info)
+ << "Exposure " << context_.configuration.agc.exposureMin << "-"
+ << context_.configuration.agc.exposureMax
+ << ", gain " << context_.configuration.agc.againMin << "-"
+ << context_.configuration.agc.againMax
+ << " (" << context_.configuration.agc.againMinStep << ")";
+
+ return 0;
+}
+
+int IPASoftSimple::start()
+{
+ return 0;
+}
+
+void IPASoftSimple::stop()
+{
+ context_.frameContexts.clear();
+}
+
+void IPASoftSimple::queueRequest(const uint32_t frame, const ControlList &controls)
+{
+ IPAFrameContext &frameContext = context_.frameContexts.alloc(frame);
+
+ for (auto const &algo : algorithms())
+ algo->queueRequest(context_, frame, frameContext, controls);
+}
+
+void IPASoftSimple::computeParams(const uint32_t frame)
+{
+ IPAFrameContext &frameContext = context_.frameContexts.get(frame);
+ for (auto const &algo : algorithms())
+ algo->prepare(context_, frame, frameContext, params_);
+ setIspParams.emit();
+}
+
+void IPASoftSimple::processStats(const uint32_t frame,
+ [[maybe_unused]] const uint32_t bufferId,
+ const ControlList &sensorControls)
+{
+ IPAFrameContext &frameContext = context_.frameContexts.get(frame);
+
+ frameContext.sensor.exposure =
+ sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
+ int32_t again = sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>();
+ frameContext.sensor.gain = camHelper_ ? camHelper_->gain(again) : again;
+
+ /*
+ * Software ISP currently does not produce any metadata. Use an empty
+ * ControlList for now.
+ *
+ * \todo Implement proper metadata handling
+ */
+ ControlList metadata(controls::controls);
+ for (auto const &algo : algorithms())
+ algo->process(context_, frame, frameContext, stats_, metadata);
+
+ /* Sanity check */
+ if (!sensorControls.contains(V4L2_CID_EXPOSURE) ||
+ !sensorControls.contains(V4L2_CID_ANALOGUE_GAIN)) {
+ LOG(IPASoft, Error) << "Control(s) missing";
+ return;
+ }
+
+ ControlList ctrls(sensorInfoMap_);
+
+ auto &againNew = frameContext.sensor.gain;
+ ctrls.set(V4L2_CID_EXPOSURE, frameContext.sensor.exposure);
+ ctrls.set(V4L2_CID_ANALOGUE_GAIN,
+ static_cast<int32_t>(camHelper_ ? camHelper_->gainCode(againNew) : againNew));
+
+ setSensorControls.emit(ctrls);
+}
+
+std::string IPASoftSimple::logPrefix() const
+{
+ return "IPASoft";
+}
+
+} /* namespace ipa::soft */
+
+/*
+ * External IPA module interface
+ */
+extern "C" {
+const struct IPAModuleInfo ipaModuleInfo = {
+ IPA_MODULE_API_VERSION,
+ 0,
+ "simple",
+ "simple",
+};
+
+IPAInterface *ipaCreate()
+{
+ return new ipa::soft::IPASoftSimple();
+}
+
+} /* extern "C" */
+
+} /* namespace libcamera */
diff --git a/src/ipa/vimc/meson.build b/src/ipa/vimc/meson.build
index 264a2d9a..2cc5f80b 100644
--- a/src/ipa/vimc/meson.build
+++ b/src/ipa/vimc/meson.build
@@ -2,12 +2,10 @@
ipa_name = 'ipa_vimc'
-mod = shared_module(ipa_name,
- ['vimc.cpp', libcamera_generated_ipa_headers],
+mod = shared_module(ipa_name, 'vimc.cpp',
name_prefix : '',
- include_directories : [ipa_includes, libipa_includes],
- dependencies : libcamera_private,
- link_with : libipa,
+ include_directories : [ipa_includes],
+ dependencies : [libcamera_private, libipa_dep],
install : true,
install_dir : ipa_install_dir)
diff --git a/src/ipa/vimc/vimc.cpp b/src/ipa/vimc/vimc.cpp
index 2c255778..a1351a0f 100644
--- a/src/ipa/vimc/vimc.cpp
+++ b/src/ipa/vimc/vimc.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * vimc.cpp - Vimc Image Processing Algorithm module
+ * Vimc Image Processing Algorithm module
*/
#include <libcamera/ipa/vimc_ipa_interface.h>
@@ -14,6 +14,7 @@
#include <iostream>
#include <libcamera/base/file.h>
+#include <libcamera/base/flags.h>
#include <libcamera/base/log.h>
#include <libcamera/ipa/ipa_interface.h>
@@ -47,7 +48,7 @@ public:
void unmapBuffers(const std::vector<unsigned int> &ids) override;
void queueRequest(uint32_t frame, const ControlList &controls) override;
- void fillParamsBuffer(uint32_t frame, uint32_t bufferId) override;
+ void computeParams(uint32_t frame, uint32_t bufferId) override;
private:
void initTrace();
@@ -149,7 +150,7 @@ void IPAVimc::queueRequest([[maybe_unused]] uint32_t frame,
{
}
-void IPAVimc::fillParamsBuffer([[maybe_unused]] uint32_t frame, uint32_t bufferId)
+void IPAVimc::computeParams([[maybe_unused]] uint32_t frame, uint32_t bufferId)
{
auto it = buffers_.find(bufferId);
if (it == buffers_.end()) {
@@ -158,7 +159,7 @@ void IPAVimc::fillParamsBuffer([[maybe_unused]] uint32_t frame, uint32_t bufferI
}
Flags<ipa::vimc::TestFlag> flags;
- paramsBufferReady.emit(bufferId, flags);
+ paramsComputed.emit(bufferId, flags);
}
void IPAVimc::initTrace()
@@ -200,7 +201,7 @@ extern "C" {
const struct IPAModuleInfo ipaModuleInfo = {
IPA_MODULE_API_VERSION,
0,
- "PipelineHandlerVimc",
+ "vimc",
"vimc",
};
diff --git a/src/libcamera/base/backtrace.cpp b/src/libcamera/base/backtrace.cpp
index be30589d..0b04629c 100644
--- a/src/libcamera/base/backtrace.cpp
+++ b/src/libcamera/base/backtrace.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * backtrace.h - Call stack backtraces
+ * Call stack backtraces
*/
#include <libcamera/base/backtrace.h>
diff --git a/src/libcamera/base/bound_method.cpp b/src/libcamera/base/bound_method.cpp
index c83d623f..322029a8 100644
--- a/src/libcamera/base/bound_method.cpp
+++ b/src/libcamera/base/bound_method.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * bound_method.cpp - Method bind and invocation
+ * Method bind and invocation
*/
#include <libcamera/base/bound_method.h>
diff --git a/src/libcamera/base/class.cpp b/src/libcamera/base/class.cpp
index 9c2d9f21..61998398 100644
--- a/src/libcamera/base/class.cpp
+++ b/src/libcamera/base/class.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * class.cpp - Utilities and helpers for classes
+ * Utilities and helpers for classes
*/
#include <libcamera/base/class.h>
diff --git a/src/libcamera/base/event_dispatcher.cpp b/src/libcamera/base/event_dispatcher.cpp
index 4be89e81..5f4a5cb4 100644
--- a/src/libcamera/base/event_dispatcher.cpp
+++ b/src/libcamera/base/event_dispatcher.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_dispatcher.cpp - Event dispatcher
+ * Event dispatcher
*/
#include <libcamera/base/event_dispatcher.h>
diff --git a/src/libcamera/base/event_dispatcher_poll.cpp b/src/libcamera/base/event_dispatcher_poll.cpp
index 7238a316..52bfb34e 100644
--- a/src/libcamera/base/event_dispatcher_poll.cpp
+++ b/src/libcamera/base/event_dispatcher_poll.cpp
@@ -2,19 +2,18 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_dispatcher_poll.cpp - Poll-based event dispatcher
+ * Poll-based event dispatcher
*/
#include <libcamera/base/event_dispatcher_poll.h>
-#include <algorithm>
-#include <chrono>
#include <iomanip>
#include <poll.h>
#include <stdint.h>
#include <string.h>
#include <sys/eventfd.h>
#include <unistd.h>
+#include <vector>
#include <libcamera/base/event_notifier.h>
#include <libcamera/base/log.h>
diff --git a/src/libcamera/base/event_notifier.cpp b/src/libcamera/base/event_notifier.cpp
index a519aec3..495c281d 100644
--- a/src/libcamera/base/event_notifier.cpp
+++ b/src/libcamera/base/event_notifier.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_notifier.cpp - File descriptor event notifier
+ * File descriptor event notifier
*/
#include <libcamera/base/event_notifier.h>
diff --git a/src/libcamera/base/file.cpp b/src/libcamera/base/file.cpp
index d1ab1aa5..2b83a517 100644
--- a/src/libcamera/base/file.cpp
+++ b/src/libcamera/base/file.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * file.cpp - File I/O operations
+ * File I/O operations
*/
#include <libcamera/base/file.h>
diff --git a/src/libcamera/base/flags.cpp b/src/libcamera/base/flags.cpp
index 3e4320ac..9981f2ed 100644
--- a/src/libcamera/base/flags.cpp
+++ b/src/libcamera/base/flags.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * flags.cpp - Type-safe enum-based bitfields
+ * Type-safe enum-based bitfields
*/
#include <libcamera/base/flags.h>
diff --git a/src/libcamera/base/log.cpp b/src/libcamera/base/log.cpp
index c8045ef7..8bf3e1da 100644
--- a/src/libcamera/base/log.cpp
+++ b/src/libcamera/base/log.cpp
@@ -2,18 +2,21 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * log.cpp - Logging infrastructure
+ * Logging infrastructure
*/
#include <libcamera/base/log.h>
#include <array>
+#include <charconv>
+#include <fnmatch.h>
#include <fstream>
#include <iostream>
#include <list>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <string_view>
#include <syslog.h>
#include <time.h>
#include <unordered_set>
@@ -38,8 +41,8 @@
* The levels are configurable through the LIBCAMERA_LOG_LEVELS environment
* variable that contains a comma-separated list of 'category:level' pairs.
*
- * The category names are strings and can include a wildcard ('*') character at
- * the end to match multiple categories.
+ * The category names are strings and can include a wildcard ('*') character to
+ * match multiple categories.
*
* The level are either numeric values, or strings containing the log level
* name. The available log levels are DEBUG, INFO, WARN, ERROR and FATAL. Log
@@ -311,15 +314,15 @@ private:
void parseLogFile();
void parseLogLevels();
- static LogSeverity parseLogLevel(const std::string &level);
+ static LogSeverity parseLogLevel(std::string_view level);
friend LogCategory;
- void registerCategory(LogCategory *category);
- LogCategory *findCategory(const char *name) const;
+ LogCategory *findOrCreateCategory(std::string_view name);
static bool destroyed_;
- std::vector<LogCategory *> categories_;
+ Mutex mutex_;
+ std::vector<std::unique_ptr<LogCategory>> categories_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
std::list<std::pair<std::string, LogSeverity>> levels_;
std::shared_ptr<LogOutput> output_;
@@ -436,9 +439,6 @@ void logSetLevel(const char *category, const char *level)
Logger::~Logger()
{
destroyed_ = true;
-
- for (LogCategory *category : categories_)
- delete category;
}
/**
@@ -569,7 +569,9 @@ void Logger::logSetLevel(const char *category, const char *level)
if (severity == LogInvalid)
return;
- for (LogCategory *c : categories_) {
+ MutexLocker locker(mutex_);
+
+ for (const auto &c : categories_) {
if (c->name() == category) {
c->setSeverity(severity);
break;
@@ -640,17 +642,17 @@ void Logger::parseLogLevels()
if (!len)
continue;
- std::string category;
- std::string level;
+ std::string_view category;
+ std::string_view level;
const char *colon = static_cast<const char *>(memchr(pair, ':', len));
if (!colon) {
/* 'x' is a shortcut for '*:x'. */
category = "*";
- level = std::string(pair, len);
+ level = std::string_view(pair, len);
} else {
- category = std::string(pair, colon - pair);
- level = std::string(colon + 1, comma - colon - 1);
+ category = std::string_view(pair, colon - pair);
+ level = std::string_view(colon + 1, comma - colon - 1);
}
/* Both the category and the level must be specified. */
@@ -661,7 +663,7 @@ void Logger::parseLogLevels()
if (severity == LogInvalid)
continue;
- levels_.push_back({ category, severity });
+ levels_.emplace_back(category, severity);
}
}
@@ -675,7 +677,7 @@ void Logger::parseLogLevels()
*
* \return The log severity, or LogInvalid if the string is invalid
*/
-LogSeverity Logger::parseLogLevel(const std::string &level)
+LogSeverity Logger::parseLogLevel(std::string_view level)
{
static const char *const names[] = {
"DEBUG",
@@ -685,15 +687,13 @@ LogSeverity Logger::parseLogLevel(const std::string &level)
"FATAL",
};
- int severity;
+ unsigned int severity = LogInvalid;
if (std::isdigit(level[0])) {
- char *endptr;
- severity = strtoul(level.c_str(), &endptr, 10);
- if (*endptr != '\0' || severity > LogFatal)
+ auto [end, ec] = std::from_chars(level.data(), level.data() + level.size(), severity);
+ if (ec != std::errc() || *end != '\0' || severity > LogFatal)
severity = LogInvalid;
} else {
- severity = LogInvalid;
for (unsigned int i = 0; i < std::size(names); ++i) {
if (names[i] == level) {
severity = i;
@@ -706,52 +706,28 @@ LogSeverity Logger::parseLogLevel(const std::string &level)
}
/**
- * \brief Register a log category with the logger
- * \param[in] category The log category
- *
- * Log categories must have unique names. It is invalid to call this function
- * if a log category with the same name already exists.
+ * \brief Find an existing log category with the given name or create one
+ * \param[in] name Name of the log category
+ * \return The pointer to the log category found or created
*/
-void Logger::registerCategory(LogCategory *category)
+LogCategory *Logger::findOrCreateCategory(std::string_view name)
{
- categories_.push_back(category);
-
- const std::string &name = category->name();
- for (const std::pair<std::string, LogSeverity> &level : levels_) {
- bool match = true;
-
- for (unsigned int i = 0; i < level.first.size(); ++i) {
- if (level.first[i] == '*')
- break;
-
- if (i >= name.size() ||
- name[i] != level.first[i]) {
- match = false;
- break;
- }
- }
+ MutexLocker locker(mutex_);
- if (match) {
- category->setSeverity(level.second);
- break;
- }
+ for (const auto &category : categories_) {
+ if (category->name() == name)
+ return category.get();
}
-}
-/**
- * \brief Find an existing log category with the given name
- * \param[in] name Name of the log category
- * \return The pointer to the found log category or nullptr if not found
- */
-LogCategory *Logger::findCategory(const char *name) const
-{
- if (auto it = std::find_if(categories_.begin(), categories_.end(),
- [name](auto c) { return c->name() == name; });
- it != categories_.end()) {
- return *it;
+ LogCategory *category = categories_.emplace_back(std::unique_ptr<LogCategory>(new LogCategory(name))).get();
+ const char *categoryName = category->name().c_str();
+
+ for (const auto &[pattern, severity] : levels_) {
+ if (fnmatch(pattern.c_str(), categoryName, FNM_NOESCAPE) == 0)
+ category->setSeverity(severity);
}
- return nullptr;
+ return category;
}
/**
@@ -787,25 +763,16 @@ LogCategory *Logger::findCategory(const char *name) const
*
* \return The pointer to the LogCategory
*/
-LogCategory *LogCategory::create(const char *name)
+LogCategory *LogCategory::create(std::string_view name)
{
- static Mutex mutex_;
- MutexLocker locker(mutex_);
- LogCategory *category = Logger::instance()->findCategory(name);
-
- if (!category) {
- category = new LogCategory(name);
- Logger::instance()->registerCategory(category);
- }
-
- return category;
+ return Logger::instance()->findOrCreateCategory(name);
}
/**
* \brief Construct a log category
* \param[in] name The category name
*/
-LogCategory::LogCategory(const char *name)
+LogCategory::LogCategory(std::string_view name)
: name_(name), severity_(LogSeverity::LogInfo)
{
}
@@ -824,15 +791,12 @@ LogCategory::LogCategory(const char *name)
*/
/**
+ * \fn LogCategory::setSeverity(LogSeverity severity)
* \brief Set the severity of the log category
*
* Messages of severity higher than or equal to the severity of the log category
* are printed, other messages are discarded.
*/
-void LogCategory::setSeverity(LogSeverity severity)
-{
- severity_ = severity;
-}
/**
* \brief Retrieve the default log category
@@ -874,39 +838,12 @@ const LogCategory &LogCategory::defaultCategory()
*/
LogMessage::LogMessage(const char *fileName, unsigned int line,
const LogCategory &category, LogSeverity severity,
- const std::string &prefix)
- : category_(category), severity_(severity), prefix_(prefix)
+ std::string prefix)
+ : category_(category), severity_(severity),
+ timestamp_(utils::clock::now()),
+ fileInfo_(static_cast<std::ostringstream &&>(std::ostringstream() << utils::basename(fileName) << ":" << line).str()),
+ prefix_(std::move(prefix))
{
- init(fileName, line);
-}
-
-/**
- * \brief Move-construct a log message
- * \param[in] other The other message
- *
- * The move constructor is meant to support the _log() functions. Thanks to copy
- * elision it will likely never be called, but C++11 only permits copy elision,
- * it doesn't enforce it unlike C++17. To avoid potential link errors depending
- * on the compiler type and version, and optimization level, the move
- * constructor is defined even if it will likely never be called, and ensures
- * that the destructor of the \a other message will not output anything to the
- * log by setting the severity to LogInvalid.
- */
-LogMessage::LogMessage(LogMessage &&other)
- : msgStream_(std::move(other.msgStream_)), category_(other.category_),
- severity_(other.severity_)
-{
- other.severity_ = LogInvalid;
-}
-
-void LogMessage::init(const char *fileName, unsigned int line)
-{
- /* Log the timestamp, severity and file information. */
- timestamp_ = utils::clock::now();
-
- std::ostringstream ossFileInfo;
- ossFileInfo << utils::basename(fileName) << ":" << line;
- fileInfo_ = ossFileInfo.str();
}
LogMessage::~LogMessage()
diff --git a/src/libcamera/base/memfd.cpp b/src/libcamera/base/memfd.cpp
new file mode 100644
index 00000000..ed0b299b
--- /dev/null
+++ b/src/libcamera/base/memfd.cpp
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Anonymous file creation
+ */
+
+#include <libcamera/base/memfd.h>
+
+#include <fcntl.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file base/memfd.h
+ * \brief Anonymous file creation
+ */
+
+#ifndef __DOXYGEN__
+namespace {
+
+/* uClibc doesn't provide the file sealing API. */
+#if not HAVE_FILE_SEALS
+#define F_ADD_SEALS 1033
+#define F_SEAL_SHRINK 0x0002
+#define F_SEAL_GROW 0x0004
+#endif
+
+#if not HAVE_MEMFD_CREATE
+int memfd_create(const char *name, unsigned int flags)
+{
+ return syscall(SYS_memfd_create, name, flags);
+}
+#endif
+
+} /* namespace */
+#endif /* __DOXYGEN__ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(File)
+
+/**
+ * \class MemFd
+ * \brief Helper class to create anonymous files
+ *
+ * Anonymous files behave like regular files, and can be modified, truncated,
+ * memory-mapped and so on. Unlike regular files, they however live in RAM and
+ * don't have permanent backing storage.
+ */
+
+/**
+ * \enum MemFd::Seal
+ * \brief Seals for the MemFd::create() function
+ * \var MemFd::Seal::None
+ * \brief No seals (used as default value)
+ * \var MemFd::Seal::Shrink
+ * \brief Prevent the memfd from shrinking
+ * \var MemFd::Seal::Grow
+ * \brief Prevent the memfd from growing
+ */
+
+/**
+ * \typedef MemFd::Seals
+ * \brief A bitwise combination of MemFd::Seal values
+ */
+
+/**
+ * \brief Create an anonymous file
+ * \param[in] name The file name (displayed in symbolic links in /proc/self/fd/)
+ * \param[in] size The file size
+ * \param[in] seals The file seals
+ *
+ * This function is a helper that wraps anonymous file (memfd) creation and
+ * sets the file size and optional seals.
+ *
+ * \return The descriptor of the anonymous file if creation succeeded, or an
+ * invalid UniqueFD otherwise
+ */
+UniqueFD MemFd::create(const char *name, std::size_t size, Seals seals)
+{
+ int ret = memfd_create(name, MFD_ALLOW_SEALING | MFD_CLOEXEC);
+ if (ret < 0) {
+ ret = errno;
+ LOG(File, Error)
+ << "Failed to allocate memfd storage for " << name
+ << ": " << strerror(ret);
+ return {};
+ }
+
+ UniqueFD memfd(ret);
+
+ ret = ftruncate(memfd.get(), size);
+ if (ret < 0) {
+ ret = errno;
+ LOG(File, Error)
+ << "Failed to set memfd size for " << name
+ << ": " << strerror(ret);
+ return {};
+ }
+
+ if (seals) {
+ int fileSeals = (seals & Seal::Shrink ? F_SEAL_SHRINK : 0)
+ | (seals & Seal::Grow ? F_SEAL_GROW : 0);
+
+ ret = fcntl(memfd.get(), F_ADD_SEALS, fileSeals);
+ if (ret < 0) {
+ ret = errno;
+ LOG(File, Error)
+ << "Failed to seal the memfd for " << name
+ << ": " << strerror(ret);
+ return {};
+ }
+ }
+
+ return memfd;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/meson.build b/src/libcamera/base/meson.build
index 7a7fd7e4..a742dfdf 100644
--- a/src/libcamera/base/meson.build
+++ b/src/libcamera/base/meson.build
@@ -1,24 +1,28 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_base_sources = files([
- 'backtrace.cpp',
- 'class.cpp',
+libcamera_base_public_sources = files([
'bound_method.cpp',
+ 'class.cpp',
+ 'flags.cpp',
+ 'object.cpp',
+ 'shared_fd.cpp',
+ 'signal.cpp',
+ 'unique_fd.cpp',
+])
+
+libcamera_base_internal_sources = files([
+ 'backtrace.cpp',
'event_dispatcher.cpp',
'event_dispatcher_poll.cpp',
'event_notifier.cpp',
'file.cpp',
- 'flags.cpp',
'log.cpp',
+ 'memfd.cpp',
'message.cpp',
'mutex.cpp',
- 'object.cpp',
'semaphore.cpp',
- 'shared_fd.cpp',
- 'signal.cpp',
'thread.cpp',
'timer.cpp',
- 'unique_fd.cpp',
'utils.cpp',
])
@@ -49,7 +53,11 @@ libcamera_base_deps = [
libcamera_base_args = [ '-DLIBCAMERA_BASE_PRIVATE' ]
libcamera_base_lib = shared_library('libcamera-base',
- [libcamera_base_sources, libcamera_base_headers],
+ [
+ libcamera_base_public_sources,
+ libcamera_base_internal_sources,
+ libcamera_base_headers,
+ ],
version : libcamera_version,
soversion : libcamera_soversion,
name_prefix : '',
diff --git a/src/libcamera/base/message.cpp b/src/libcamera/base/message.cpp
index 2da2a7ed..098faac6 100644
--- a/src/libcamera/base/message.cpp
+++ b/src/libcamera/base/message.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * message.cpp - Message queue support
+ * Message queue support
*/
#include <libcamera/base/message.h>
diff --git a/src/libcamera/base/mutex.cpp b/src/libcamera/base/mutex.cpp
index e34e8618..2a4542c4 100644
--- a/src/libcamera/base/mutex.cpp
+++ b/src/libcamera/base/mutex.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * mutex.cpp - Mutex classes with clang thread safety annotation
+ * Mutex classes with clang thread safety annotation
*/
#include <libcamera/base/mutex.h>
diff --git a/src/libcamera/base/object.cpp b/src/libcamera/base/object.cpp
index 81054b58..37d133cc 100644
--- a/src/libcamera/base/object.cpp
+++ b/src/libcamera/base/object.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * object.cpp - Base object
+ * Base object
*/
#include <libcamera/base/object.h>
@@ -350,7 +350,7 @@ void Object::connect(SignalBase *signal)
void Object::disconnect(SignalBase *signal)
{
- for (auto iter = signals_.begin(); iter != signals_.end(); ) {
+ for (auto iter = signals_.begin(); iter != signals_.end();) {
if (*iter == signal)
iter = signals_.erase(iter);
else
diff --git a/src/libcamera/base/semaphore.cpp b/src/libcamera/base/semaphore.cpp
index 6217e386..862f3b31 100644
--- a/src/libcamera/base/semaphore.cpp
+++ b/src/libcamera/base/semaphore.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * semaphore.cpp - General-purpose counting semaphore
+ * General-purpose counting semaphore
*/
#include <libcamera/base/semaphore.h>
diff --git a/src/libcamera/base/shared_fd.cpp b/src/libcamera/base/shared_fd.cpp
index c711cf57..7afc8ca5 100644
--- a/src/libcamera/base/shared_fd.cpp
+++ b/src/libcamera/base/shared_fd.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * shared_fd.cpp - File descriptor wrapper with shared ownership
+ * File descriptor wrapper with shared ownership
*/
#include <libcamera/base/shared_fd.h>
diff --git a/src/libcamera/base/signal.cpp b/src/libcamera/base/signal.cpp
index f1018b37..7876a21d 100644
--- a/src/libcamera/base/signal.cpp
+++ b/src/libcamera/base/signal.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * signal.cpp - Signal & slot implementation
+ * Signal & slot implementation
*/
#include <libcamera/base/signal.h>
@@ -49,7 +49,7 @@ void SignalBase::disconnect(std::function<bool(SlotList::iterator &)> match)
{
MutexLocker locker(signalsLock);
- for (auto iter = slots_.begin(); iter != slots_.end(); ) {
+ for (auto iter = slots_.begin(); iter != slots_.end();) {
if (match(iter)) {
Object *object = (*iter)->object();
if (object)
diff --git a/src/libcamera/base/thread.cpp b/src/libcamera/base/thread.cpp
index 4ac72036..d8fe0d69 100644
--- a/src/libcamera/base/thread.cpp
+++ b/src/libcamera/base/thread.cpp
@@ -2,13 +2,14 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * thread.cpp - Thread support
+ * Thread support
*/
#include <libcamera/base/thread.h>
#include <atomic>
#include <list>
+#include <optional>
#include <sys/syscall.h>
#include <sys/types.h>
#include <unistd.h>
@@ -64,42 +65,6 @@
* receiver's event loop, running in the receiver's thread. This mechanism can
* be overridden by selecting a different connection type when calling
* Signal::connect().
- *
- * \section thread-reentrancy Reentrancy and Thread-Safety
- *
- * Through the documentation, several terms are used to define how classes and
- * their member functions can be used from multiple threads.
- *
- * - A **reentrant** function may be called simultaneously from multiple
- * threads if and only if each invocation uses a different instance of the
- * class. This is the default for all member functions not explictly marked
- * otherwise.
- *
- * - \anchor thread-safe A **thread-safe** function may be called
- * simultaneously from multiple threads on the same instance of a class. A
- * thread-safe function is thus reentrant. Thread-safe functions may also be
- * called simultaneously with any other reentrant function of the same class
- * on the same instance.
- *
- * - \anchor thread-bound A **thread-bound** function may be called only from
- * the thread that the class instances lives in (see section \ref
- * thread-objects). For instances of classes that do not derive from the
- * Object class, this is the thread in which the instance was created. A
- * thread-bound function is not thread-safe, and may or may not be reentrant.
- *
- * Neither reentrancy nor thread-safety, in this context, mean that a function
- * may be called simultaneously from the same thread, for instance from a
- * callback invoked by the function. This may deadlock and isn't allowed unless
- * separately documented.
- *
- * A class is defined as reentrant, thread-safe or thread-bound if all its
- * member functions are reentrant, thread-safe or thread-bound respectively.
- * Some member functions may additionally be documented as having additional
- * thread-related attributes.
- *
- * Most classes are reentrant but not thread-safe, as making them fully
- * thread-safe would incur locking costs considered prohibitive for the
- * expected use cases.
*/
/**
@@ -164,6 +129,8 @@ private:
int exitCode_;
MessageQueue messages_;
+
+ std::optional<cpu_set_t> cpuset_;
};
/**
@@ -290,6 +257,8 @@ void Thread::start()
data_->exit_.store(false, std::memory_order_relaxed);
thread_ = std::thread(&Thread::startThread, this);
+
+ setThreadAffinityInternal();
}
void Thread::startThread()
@@ -447,6 +416,48 @@ bool Thread::wait(utils::duration duration)
}
/**
+ * \brief Set the CPU affinity mask of the thread
+ * \param[in] cpus The list of CPU indices that the thread is set affinity to
+ *
+ * The CPU indices should be within [0, std::thread::hardware_concurrency()).
+ * If any index is invalid, this function won't modify the thread affinity and
+ * will return an error.
+ *
+ * \return 0 if all indices are valid, -EINVAL otherwise
+ */
+int Thread::setThreadAffinity(const Span<const unsigned int> &cpus)
+{
+ const unsigned int numCpus = std::thread::hardware_concurrency();
+
+ MutexLocker locker(data_->mutex_);
+ data_->cpuset_ = cpu_set_t();
+ CPU_ZERO(&data_->cpuset_.value());
+
+ for (const unsigned int &cpu : cpus) {
+ if (cpu >= numCpus) {
+ LOG(Thread, Error) << "Invalid CPU " << cpu << "for thread affinity";
+ return -EINVAL;
+ }
+
+ CPU_SET(cpu, &data_->cpuset_.value());
+ }
+
+ if (data_->running_)
+ setThreadAffinityInternal();
+
+ return 0;
+}
+
+void Thread::setThreadAffinityInternal()
+{
+ if (!data_->cpuset_)
+ return;
+
+ const cpu_set_t &cpuset = data_->cpuset_.value();
+ pthread_setaffinity_np(thread_.native_handle(), sizeof(cpuset), &cpuset);
+}
+
+/**
* \brief Check if the thread is running
*
* A Thread instance is considered as running once the underlying thread has
@@ -593,10 +604,12 @@ void Thread::removeMessages(Object *receiver)
/**
* \brief Dispatch posted messages for this thread
* \param[in] type The message type
+ * \param[in] receiver The receiver whose messages to dispatch
*
- * This function immediately dispatches all the messages previously posted for
- * this thread with postMessage() that match the message \a type. If the \a type
- * is Message::Type::None, all messages are dispatched.
+ * This function immediately dispatches all the messages of the given \a type
+ * previously posted to this thread for the \a receiver with postMessage(). If
+ * the \a type is Message::Type::None, all messages types are dispatched. If the
+ * \a receiver is null, messages to all receivers are dispatched.
*
* Messages shall only be dispatched from the current thread, typically within
* the thread from the run() function. Calling this function outside of the
@@ -606,7 +619,7 @@ void Thread::removeMessages(Object *receiver)
* same thread from an object's message handler. It guarantees delivery of
* messages in the order they have been posted in all cases.
*/
-void Thread::dispatchMessages(Message::Type type)
+void Thread::dispatchMessages(Message::Type type, Object *receiver)
{
ASSERT(data_ == ThreadData::current());
@@ -623,6 +636,9 @@ void Thread::dispatchMessages(Message::Type type)
if (type != Message::Type::None && msg->type() != type)
continue;
+ if (receiver && receiver != msg->receiver_)
+ continue;
+
/*
* Move the message, setting the entry in the list to null. It
* will cause recursive calls to ignore the entry, and the erase
@@ -630,12 +646,12 @@ void Thread::dispatchMessages(Message::Type type)
*/
std::unique_ptr<Message> message = std::move(msg);
- Object *receiver = message->receiver_;
- ASSERT(data_ == receiver->thread()->data_);
- receiver->pendingMessages_--;
+ Object *messageReceiver = message->receiver_;
+ ASSERT(data_ == messageReceiver->thread()->data_);
+ messageReceiver->pendingMessages_--;
locker.unlock();
- receiver->message(message.get());
+ messageReceiver->message(message.get());
message.reset();
locker.lock();
}
@@ -646,7 +662,7 @@ void Thread::dispatchMessages(Message::Type type)
* the outer calls.
*/
if (!--data_->messages_.recursion_) {
- for (auto iter = messages.begin(); iter != messages.end(); ) {
+ for (auto iter = messages.begin(); iter != messages.end();) {
if (!*iter)
iter = messages.erase(iter);
else
diff --git a/src/libcamera/base/timer.cpp b/src/libcamera/base/timer.cpp
index 24dbf1e8..7b0f3725 100644
--- a/src/libcamera/base/timer.cpp
+++ b/src/libcamera/base/timer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * timer.cpp - Generic timer
+ * Generic timer
*/
#include <libcamera/base/timer.h>
diff --git a/src/libcamera/base/unique_fd.cpp b/src/libcamera/base/unique_fd.cpp
index 83d6919c..d0649e4d 100644
--- a/src/libcamera/base/unique_fd.cpp
+++ b/src/libcamera/base/unique_fd.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * unique_fd.cpp - File descriptor wrapper that owns a file descriptor
+ * File descriptor wrapper that owns a file descriptor
*/
#include <libcamera/base/unique_fd.h>
diff --git a/src/libcamera/base/utils.cpp b/src/libcamera/base/utils.cpp
index 2f4c3177..bcfc1941 100644
--- a/src/libcamera/base/utils.cpp
+++ b/src/libcamera/base/utils.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * utils.cpp - Miscellaneous utility functions
+ * Miscellaneous utility functions
*/
#include <libcamera/base/utils.h>
@@ -276,21 +276,6 @@ std::string details::StringSplitter::iterator::operator*() const
return ss_->str_.substr(pos_, count);
}
-bool details::StringSplitter::iterator::operator!=(const details::StringSplitter::iterator &other) const
-{
- return pos_ != other.pos_;
-}
-
-details::StringSplitter::iterator details::StringSplitter::begin() const
-{
- return iterator(this, 0);
-}
-
-details::StringSplitter::iterator details::StringSplitter::end() const
-{
- return iterator(this, std::string::npos);
-}
-
/**
* \fn template<typename Container, typename UnaryOp> \
* std::string utils::join(const Container &items, const std::string &sep, UnaryOp op)
@@ -517,7 +502,7 @@ double strtod(const char *__restrict nptr, char **__restrict endptr)
* If the libc implementation doesn't provide locale object support,
* assume that strtod() is locale-independent.
*/
- return strtod(nptr, endptr);
+ return ::strtod(nptr, endptr);
#endif
}
@@ -531,6 +516,138 @@ double strtod(const char *__restrict nptr, char **__restrict endptr)
* \return The value of e converted to its underlying type
*/
+/**
+ * \class ScopeExitActions
+ * \brief An object that performs actions upon destruction
+ *
+ * The ScopeExitActions class is a simple object that performs user-provided
+ * actions upon destruction. It is meant to simplify cleanup tasks in error
+ * handling paths.
+ *
+ * When the code flow performs multiple sequential actions that each need a
+ * corresponding cleanup action, error handling quickly become tedious:
+ *
+ * \code{.cpp}
+ * {
+ * int ret = allocateMemory();
+ * if (ret)
+ * return ret;
+ *
+ * ret = startProducer();
+ * if (ret) {
+ * freeMemory();
+ * return ret;
+ * }
+ *
+ * ret = startConsumer();
+ * if (ret) {
+ * stopProducer();
+ * freeMemory();
+ * return ret;
+ * }
+ *
+ * return 0;
+ * }
+ * \endcode
+ *
+ * This is prone to programming mistakes, as cleanup actions can easily be
+ * forgotten or ordered incorrectly. One strategy to simplify error handling is
+ * to use goto statements:
+ *
+ * \code{.cpp}
+ * {
+ * int ret = allocateMemory();
+ * if (ret)
+ * return ret;
+ *
+ * ret = startProducer();
+ * if (ret)
+ * goto error_free;
+ *
+ * ret = startConsumer();
+ * if (ret)
+ * goto error_stop;
+ *
+ * return 0;
+ *
+ * error_stop:
+ * stopProducer();
+ * error_free:
+ * freeMemory();
+ * return ret;
+ * }
+ * \endcode
+ *
+ * While this may be considered better, this solution is still quite
+ * error-prone. Beside the risk of picking the wrong error label, the error
+ * handling logic is separated from the normal code flow, which increases the
+ * risk of error when refactoring the code. Additionally, C++ doesn't allow
+ * goto statements to jump over local variable declarations, which can make
+ * usage of this pattern more difficult.
+ *
+ * The ScopeExitActions class solves these issues by allowing code that
+ * requires cleanup actions to be grouped with its corresponding error handling
+ * code:
+ *
+ * \code{.cpp}
+ * {
+ * ScopeExitActions actions;
+ *
+ * int ret = allocateMemory();
+ * if (ret)
+ * return ret;
+ *
+ * actions += [&]() { freeMemory(); };
+ *
+ * ret = startProducer();
+ * if (ret)
+ * return ret;
+ *
+ * actions += [&]() { stopProducer(); };
+ *
+ * ret = startConsumer();
+ * if (ret)
+ * return ret;
+ *
+ * actions.release();
+ * return 0;
+ * }
+ * \endcode
+ *
+ * Error handlers are executed when the ScopeExitActions instance is destroyed,
+ * in the reverse order of their addition.
+ */
+
+ScopeExitActions::~ScopeExitActions()
+{
+ for (const auto &action : utils::reverse(actions_))
+ action();
+}
+
+/**
+ * \brief Add an exit action
+ * \param[in] action The action
+ *
+ * Add an exit action to the ScopeExitActions. Actions will be called upon
+ * destruction in the reverse order of their addition.
+ */
+void ScopeExitActions::operator+=(std::function<void()> &&action)
+{
+ actions_.push_back(std::move(action));
+}
+
+/**
+ * \brief Remove all exit actions
+ *
+ * This function should be called in scope exit paths that don't need the
+ * actions to be executed, such as success return paths from a function when
+ * the ScopeExitActions is used for error cleanup.
+ */
+void ScopeExitActions::release()
+{
+ actions_.clear();
+}
+
} /* namespace utils */
#ifndef __DOXYGEN__
diff --git a/src/libcamera/bayer_format.cpp b/src/libcamera/bayer_format.cpp
index 20aedfa6..3dab91fc 100644
--- a/src/libcamera/bayer_format.cpp
+++ b/src/libcamera/bayer_format.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * bayer_format.cpp - Class to represent Bayer formats
+ * Class to represent Bayer formats
*/
#include "libcamera/internal/bayer_format.h"
@@ -61,6 +61,10 @@ namespace libcamera {
* \brief Format uses MIPI CSI-2 style packing
* \var BayerFormat::Packing::IPU3
* \brief Format uses IPU3 style packing
+ * \var BayerFormat::Packing::PISP1
+ * \brief Format uses PISP mode 1 compression
+ * \var BayerFormat::Packing::PISP2
+ * \brief Format uses PISP mode 2 compression
*/
namespace {
@@ -164,6 +168,14 @@ const std::map<BayerFormat, Formats, BayerFormatComparator> bayerToFormat{
{ formats::SGRBG16, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG16) } },
{ { BayerFormat::RGGB, 16, BayerFormat::Packing::None },
{ formats::SRGGB16, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16) } },
+ { { BayerFormat::BGGR, 16, BayerFormat::Packing::PISP1 },
+ { formats::BGGR_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR) } },
+ { { BayerFormat::GBRG, 16, BayerFormat::Packing::PISP1 },
+ { formats::GBRG_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG) } },
+ { { BayerFormat::GRBG, 16, BayerFormat::Packing::PISP1 },
+ { formats::GRBG_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG) } },
+ { { BayerFormat::RGGB, 16, BayerFormat::Packing::PISP1 },
+ { formats::RGGB_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB) } },
{ { BayerFormat::MONO, 8, BayerFormat::Packing::None },
{ formats::R8, V4L2PixelFormat(V4L2_PIX_FMT_GREY) } },
{ { BayerFormat::MONO, 10, BayerFormat::Packing::None },
@@ -172,8 +184,12 @@ const std::map<BayerFormat, Formats, BayerFormatComparator> bayerToFormat{
{ formats::R10_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_Y10P) } },
{ { BayerFormat::MONO, 12, BayerFormat::Packing::None },
{ formats::R12, V4L2PixelFormat(V4L2_PIX_FMT_Y12) } },
+ { { BayerFormat::MONO, 12, BayerFormat::Packing::CSI2 },
+ { formats::R12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_Y12P) } },
{ { BayerFormat::MONO, 16, BayerFormat::Packing::None },
{ formats::R16, V4L2PixelFormat(V4L2_PIX_FMT_Y16) } },
+ { { BayerFormat::MONO, 16, BayerFormat::Packing::PISP1 },
+ { formats::MONO_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO) } },
};
const std::unordered_map<unsigned int, BayerFormat> mbusCodeToBayer{
@@ -209,6 +225,10 @@ const std::unordered_map<unsigned int, BayerFormat> mbusCodeToBayer{
{ MEDIA_BUS_FMT_SGBRG16_1X16, { BayerFormat::GBRG, 16, BayerFormat::Packing::None } },
{ MEDIA_BUS_FMT_SGRBG16_1X16, { BayerFormat::GRBG, 16, BayerFormat::Packing::None } },
{ MEDIA_BUS_FMT_SRGGB16_1X16, { BayerFormat::RGGB, 16, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR20_1X20, { BayerFormat::BGGR, 20, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG20_1X20, { BayerFormat::GBRG, 20, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG20_1X20, { BayerFormat::GRBG, 20, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB20_1X20, { BayerFormat::RGGB, 20, BayerFormat::Packing::None } },
{ MEDIA_BUS_FMT_Y8_1X8, { BayerFormat::MONO, 8, BayerFormat::Packing::None } },
{ MEDIA_BUS_FMT_Y10_1X10, { BayerFormat::MONO, 10, BayerFormat::Packing::None } },
{ MEDIA_BUS_FMT_Y12_1X12, { BayerFormat::MONO, 12, BayerFormat::Packing::None } },
@@ -303,6 +323,10 @@ std::ostream &operator<<(std::ostream &out, const BayerFormat &f)
out << "-CSI2P";
else if (f.packing == BayerFormat::Packing::IPU3)
out << "-IPU3P";
+ else if (f.packing == BayerFormat::Packing::PISP1)
+ out << "-PISP1";
+ else if (f.packing == BayerFormat::Packing::PISP2)
+ out << "-PISP2";
return out;
}
diff --git a/src/libcamera/byte_stream_buffer.cpp b/src/libcamera/byte_stream_buffer.cpp
index 881cd371..fba9a6f3 100644
--- a/src/libcamera/byte_stream_buffer.cpp
+++ b/src/libcamera/byte_stream_buffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * byte_stream_buffer.cpp - Byte stream buffer
+ * Byte stream buffer
*/
#include "libcamera/internal/byte_stream_buffer.h"
diff --git a/src/libcamera/camera.cpp b/src/libcamera/camera.cpp
index a71dc933..56c58519 100644
--- a/src/libcamera/camera.cpp
+++ b/src/libcamera/camera.cpp
@@ -2,26 +2,30 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * camera.cpp - Camera device
+ * Camera device
*/
#include <libcamera/camera.h>
#include <array>
#include <atomic>
-#include <iomanip>
+#include <ios>
+#include <memory>
+#include <optional>
+#include <set>
+#include <sstream>
#include <libcamera/base/log.h>
#include <libcamera/base/thread.h>
#include <libcamera/color_space.h>
+#include <libcamera/control_ids.h>
#include <libcamera/framebuffer_allocator.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_controls.h"
-#include "libcamera/internal/formats.h"
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/request.h"
@@ -117,6 +121,12 @@
* of view is affected by the pipeline.
*/
+/**
+ * \internal
+ * \file libcamera/internal/camera.h
+ * \brief Internal camera device handling
+ */
+
namespace libcamera {
LOG_DECLARE_CATEGORY(Camera)
@@ -327,7 +337,7 @@ void CameraConfiguration::addConfiguration(const StreamConfiguration &cfg)
* \retval CameraConfiguration::Invalid The configuration is invalid and can't
* be adjusted. This may only occur in extreme cases such as when the
* configuration is empty.
- * \retval CameraConfigutation::Adjusted The configuration has been adjusted
+ * \retval CameraConfiguration::Adjusted The configuration has been adjusted
* and is now valid. Parameters may have changed for any stream, and stream
* configurations may have been removed. The caller shall check the
* configuration carefully.
@@ -559,6 +569,7 @@ CameraConfiguration::Status CameraConfiguration::validateColorSpaces(ColorSpaceF
* \brief The vector of stream configurations
*/
+#ifndef __DOXYGEN_PUBLIC__
/**
* \class Camera::Private
* \brief Base class for camera private data
@@ -594,6 +605,11 @@ Camera::Private::~Private()
*/
/**
+ * \fn Camera::Private::pipe() const
+ * \copydoc Camera::Private::pipe()
+ */
+
+/**
* \fn Camera::Private::validator()
* \brief Retrieve the control validator related to this camera
* \return The control validator associated with this camera
@@ -719,6 +735,7 @@ void Camera::Private::setState(State state)
{
state_.store(state, std::memory_order_release);
}
+#endif /* __DOXYGEN_PUBLIC__ */
/**
* \class Camera
@@ -813,6 +830,7 @@ void Camera::Private::setState(State state)
*/
/**
+ * \internal
* \brief Create a camera instance
* \param[in] d Camera private data
* \param[in] id The ID of the camera device
@@ -986,7 +1004,8 @@ int Camera::acquire()
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
- if (!d->pipe_->acquire()) {
+ if (!d->pipe_->invokeMethod(&PipelineHandler::acquire,
+ ConnectionTypeBlocking, this)) {
LOG(Camera, Info)
<< "Pipeline handler in use by another process";
return -EBUSY;
@@ -1021,7 +1040,8 @@ int Camera::release()
return ret == -EACCES ? -EBUSY : ret;
if (d->isAcquired())
- d->pipe_->release(this);
+ d->pipe_->invokeMethod(&PipelineHandler::release,
+ ConnectionTypeBlocking, this);
d->setState(Private::CameraAvailable);
@@ -1164,8 +1184,8 @@ int Camera::configure(CameraConfiguration *config)
if (ret < 0)
return ret;
- for (auto it : *config)
- it.setStream(nullptr);
+ for (auto &cfg : *config)
+ cfg.setStream(nullptr);
if (config->validate() != CameraConfiguration::Valid) {
LOG(Camera, Error)
@@ -1306,6 +1326,25 @@ int Camera::queueRequest(Request *request)
}
}
+ /* Pre-process AeEnable. */
+ ControlList &controls = request->controls();
+ const auto &aeEnable = controls.get(controls::AeEnable);
+ if (aeEnable) {
+ if (_d()->controlInfo_.count(controls::AnalogueGainMode.id()) &&
+ !controls.contains(controls::AnalogueGainMode.id())) {
+ controls.set(controls::AnalogueGainMode,
+ *aeEnable ? controls::AnalogueGainModeAuto
+ : controls::AnalogueGainModeManual);
+ }
+
+ if (_d()->controlInfo_.count(controls::ExposureTimeMode.id()) &&
+ !controls.contains(controls::ExposureTimeMode.id())) {
+ controls.set(controls::ExposureTimeMode,
+ *aeEnable ? controls::ExposureTimeModeAuto
+ : controls::ExposureTimeModeManual);
+ }
+ }
+
d->pipe_->invokeMethod(&PipelineHandler::queueRequest,
ConnectionTypeQueued, request);
diff --git a/src/libcamera/camera_controls.cpp b/src/libcamera/camera_controls.cpp
index cabdcf75..b672c7cf 100644
--- a/src/libcamera/camera_controls.cpp
+++ b/src/libcamera/camera_controls.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_controls.cpp - Camera controls
+ * Camera controls
*/
#include "libcamera/internal/camera_controls.h"
diff --git a/src/libcamera/camera_lens.cpp b/src/libcamera/camera_lens.cpp
index b3d48199..ccc2a6a6 100644
--- a/src/libcamera/camera_lens.cpp
+++ b/src/libcamera/camera_lens.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_lens.cpp - A camera lens
+ * A camera lens
*/
#include "libcamera/internal/camera_lens.h"
diff --git a/src/libcamera/camera_manager.cpp b/src/libcamera/camera_manager.cpp
index 355f3ada..87e6717e 100644
--- a/src/libcamera/camera_manager.cpp
+++ b/src/libcamera/camera_manager.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * camera_manager.h - Camera management
+ * Camera management
*/
#include "libcamera/internal/camera_manager.h"
@@ -15,6 +15,7 @@
#include "libcamera/internal/camera.h"
#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/pipeline_handler.h"
/**
@@ -23,6 +24,7 @@
*/
/**
+ * \internal
* \file libcamera/internal/camera_manager.h
* \brief Internal camera manager support
*/
@@ -34,9 +36,11 @@ namespace libcamera {
LOG_DEFINE_CATEGORY(Camera)
+#ifndef __DOXYGEN_PUBLIC__
CameraManager::Private::Private()
: initialized_(false)
{
+ ipaManager_ = std::make_unique<IPAManager>();
}
int CameraManager::Private::start()
@@ -76,8 +80,10 @@ void CameraManager::Private::run()
mutex_.unlock();
cv_.notify_one();
- if (ret < 0)
+ if (ret < 0) {
+ cleanup();
return;
+ }
/* Now start processing events and messages. */
exec();
@@ -99,16 +105,37 @@ int CameraManager::Private::init()
void CameraManager::Private::createPipelineHandlers()
{
- CameraManager *const o = LIBCAMERA_O_PTR();
-
/*
* \todo Try to read handlers and order from configuration
- * file and only fallback on all handlers if there is no
- * configuration file.
+ * file and only fallback on environment variable or all handlers, if
+ * there is no configuration file.
*/
+ const char *pipesList =
+ utils::secure_getenv("LIBCAMERA_PIPELINES_MATCH_LIST");
+ if (pipesList) {
+ /*
+ * When a list of preferred pipelines is defined, iterate
+ * through the ordered list to match the enumerated devices.
+ */
+ for (const auto &pipeName : utils::split(pipesList, ",")) {
+ const PipelineHandlerFactoryBase *factory;
+ factory = PipelineHandlerFactoryBase::getFactoryByName(pipeName);
+ if (!factory)
+ continue;
+
+ LOG(Camera, Debug)
+ << "Found listed pipeline handler '"
+ << pipeName << "'";
+ pipelineFactoryMatch(factory);
+ }
+
+ return;
+ }
+
const std::vector<PipelineHandlerFactoryBase *> &factories =
PipelineHandlerFactoryBase::factories();
+ /* Match all the registered pipeline handlers. */
for (const PipelineHandlerFactoryBase *factory : factories) {
LOG(Camera, Debug)
<< "Found registered pipeline handler '"
@@ -117,15 +144,23 @@ void CameraManager::Private::createPipelineHandlers()
* Try each pipeline handler until it exhaust
* all pipelines it can provide.
*/
- while (1) {
- std::shared_ptr<PipelineHandler> pipe = factory->create(o);
- if (!pipe->match(enumerator_.get()))
- break;
+ pipelineFactoryMatch(factory);
+ }
+}
- LOG(Camera, Debug)
- << "Pipeline handler \"" << factory->name()
- << "\" matched";
- }
+void CameraManager::Private::pipelineFactoryMatch(const PipelineHandlerFactoryBase *factory)
+{
+ CameraManager *const o = LIBCAMERA_O_PTR();
+
+ /* Provide as many matching pipelines as possible. */
+ while (1) {
+ std::shared_ptr<PipelineHandler> pipe = factory->create(o);
+ if (!pipe->match(enumerator_.get()))
+ break;
+
+ LOG(Camera, Debug)
+ << "Pipeline handler \"" << factory->name()
+ << "\" matched";
}
}
@@ -221,6 +256,14 @@ void CameraManager::Private::removeCamera(std::shared_ptr<Camera> camera)
}
/**
+ * \fn CameraManager::Private::ipaManager() const
+ * \brief Retrieve the IPAManager
+ * \context This function is \threadsafe.
+ * \return The IPAManager for this CameraManager
+ */
+#endif /* __DOXYGEN_PUBLIC__ */
+
+/**
* \class CameraManager
* \brief Provide access and manage all cameras in the system
*
@@ -345,7 +388,7 @@ std::shared_ptr<Camera> CameraManager::get(const std::string &id)
MutexLocker locker(d->mutex_);
- for (std::shared_ptr<Camera> camera : d->cameras_) {
+ for (const std::shared_ptr<Camera> &camera : d->cameras_) {
if (camera->id() == id)
return camera;
}
diff --git a/src/libcamera/color_space.cpp b/src/libcamera/color_space.cpp
index 7356bf7d..3d1c456c 100644
--- a/src/libcamera/color_space.cpp
+++ b/src/libcamera/color_space.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Raspberry Pi Ltd
*
- * color_space.cpp - color spaces.
+ * color spaces.
*/
#include <libcamera/color_space.h>
diff --git a/src/libcamera/control_ids.cpp.in b/src/libcamera/control_ids.cpp.in
index d283c1c1..65668d48 100644
--- a/src/libcamera/control_ids.cpp.in
+++ b/src/libcamera/control_ids.cpp.in
@@ -2,51 +2,122 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_ids.cpp : Control ID list
+ * {{mode}} ID list
*
* This file is auto-generated. Do not edit.
*/
-#include <libcamera/control_ids.h>
+#include <libcamera/{{filename}}.h>
#include <libcamera/controls.h>
/**
- * \file control_ids.h
- * \brief Camera control identifiers
+ * \file {{filename}}.h
+ * \brief Camera {{mode}} identifiers
*/
namespace libcamera {
/**
- * \brief Namespace for libcamera controls
+ * \brief Namespace for libcamera {{mode}}
*/
-namespace controls {
+namespace {{mode}} {
-${controls_doc}
+{%- for vendor, ctrls in controls -%}
-${vendor_controls_doc}
+{%- if vendor != 'libcamera' %}
+/**
+ * \brief Namespace for {{vendor}} {{mode}}
+ */
+namespace {{vendor}} {
+{%- endif -%}
+
+{% for ctrl in ctrls %}
+
+{% if ctrl.is_enum -%}
+/**
+ * \enum {{ctrl.name}}Enum
+ * \brief Supported {{ctrl.name}} values
+{%- for enum in ctrl.enum_values %}
+ *
+ * \var {{enum.name}}
+ * \brief {{enum.description|format_description}}
+{%- endfor %}
+ */
+
+/**
+ * \var {{ctrl.name}}Values
+ * \brief List of all {{ctrl.name}} supported values
+ */
+
+/**
+ * \var {{ctrl.name}}NameValueMap
+ * \brief Map of all {{ctrl.name}} supported value names (in std::string format) to value
+ */
+
+{% endif -%}
+/**
+ * \var {{ctrl.name}}
+ * \brief {{ctrl.description|format_description}}
+ */
+{%- endfor %}
+{% if vendor != 'libcamera' %}
+} /* namespace {{vendor}} */
+{% endif -%}
+
+{%- endfor %}
#ifndef __DOXYGEN__
/*
- * Keep the controls definitions hidden from doxygen as it incorrectly parses
+ * Keep the {{mode}} definitions hidden from doxygen as it incorrectly parses
* them as functions.
*/
-${controls_def}
+{% for vendor, ctrls in controls -%}
+
+{% if vendor != 'libcamera' %}
+namespace {{vendor}} {
+{% endif %}
+
+{%- for ctrl in ctrls %}
+{% if ctrl.is_enum -%}
+extern const std::array<const ControlValue, {{ctrl.enum_values_count}}> {{ctrl.name}}Values = {
+{%- for enum in ctrl.enum_values %}
+ static_cast<{{ctrl.type}}>({{enum.name}}),
+{%- endfor %}
+};
+extern const std::map<std::string, {{ctrl.type}}> {{ctrl.name}}NameValueMap = {
+{%- for enum in ctrl.enum_values %}
+ { "{{enum.name}}", {{enum.name}} },
+{%- endfor %}
+};
+extern const Control<{{ctrl.type}}> {{ctrl.name}}({{ctrl.name|snake_case|upper}}, "{{ctrl.name}}", "{{vendor}}", {{ctrl.direction}}, {{ctrl.name}}NameValueMap);
+{% else -%}
+extern const Control<{{ctrl.type}}> {{ctrl.name}}({{ctrl.name|snake_case|upper}}, "{{ctrl.name}}", "{{vendor}}", {{ctrl.direction}});
+{% endif -%}
+{%- endfor %}
-${vendor_controls_def}
+{% if vendor != 'libcamera' %}
+} /* namespace {{vendor}} */
+{% endif -%}
-#endif
+{%- endfor %}
+#endif /* __DOXYGEN__ */
/**
- * \brief List of all supported libcamera controls
+ * \brief List of all supported libcamera {{mode}}
+{%- if mode == 'controls' %}
*
* Unless otherwise stated, all controls are bi-directional, i.e. they can be
* set through Request::controls() and returned out through Request::metadata().
+{%- endif %}
*/
-extern const ControlIdMap controls {
-${controls_map}
+extern const ControlIdMap {{mode}} {
+{%- for vendor, ctrls in controls -%}
+{%- for ctrl in ctrls %}
+ { {{ctrl.namespace}}{{ctrl.name|snake_case|upper}}, &{{ctrl.namespace}}{{ctrl.name}} },
+{%- endfor -%}
+{%- endfor %}
};
-} /* namespace controls */
+} /* namespace {{mode}} */
} /* namespace libcamera */
diff --git a/src/libcamera/control_ids_core.yaml b/src/libcamera/control_ids_core.yaml
index bf1f1a83..aa744864 100644
--- a/src/libcamera/control_ids_core.yaml
+++ b/src/libcamera/control_ids_core.yaml
@@ -10,32 +10,95 @@ vendor: libcamera
controls:
- AeEnable:
type: bool
+ direction: in
description: |
- Enable or disable the AE.
+ Enable or disable the AEGC algorithm. When this control is set to true,
+ both ExposureTimeMode and AnalogueGainMode are set to auto, and if this
+ control is set to false then both are set to manual.
- \sa ExposureTime AnalogueGain
+ If ExposureTimeMode or AnalogueGainMode are also set in the same
+ request as AeEnable, then the modes supplied by ExposureTimeMode or
+ AnalogueGainMode will take precedence.
- - AeLocked:
- type: bool
+ \sa ExposureTimeMode AnalogueGainMode
+
+ - AeState:
+ type: int32_t
+ direction: out
description: |
- Report the lock status of a running AE algorithm.
+ Report the AEGC algorithm state.
- If the AE algorithm is locked the value shall be set to true, if it's
- converging it shall be set to false. If the AE algorithm is not
- running the control shall not be present in the metadata control list.
+ The AEGC algorithm computes the exposure time and the analogue gain
+ to be applied to the image sensor.
+
+ The AEGC algorithm behaviour is controlled by the ExposureTimeMode and
+ AnalogueGainMode controls, which allow applications to decide how
+ the exposure time and gain are computed, in Auto or Manual mode,
+ independently from one another.
+
+ The AeState control reports the AEGC algorithm state through a single
+ value and describes it as a single computation block which computes
+ both the exposure time and the analogue gain values.
+
+ When both the exposure time and analogue gain values are configured to
+ be in Manual mode, the AEGC algorithm is quiescent and does not actively
+ compute any value and the AeState control will report AeStateIdle.
+
+ When at least the exposure time or analogue gain are configured to be
+ computed by the AEGC algorithm, the AeState control will report if the
+ algorithm has converged to stable values for all of the controls set
+ to be computed in Auto mode.
+
+ \sa AnalogueGainMode
+ \sa ExposureTimeMode
+
+ enum:
+ - name: AeStateIdle
+ value: 0
+ description: |
+ The AEGC algorithm is inactive.
+
+ This state is returned when both AnalogueGainMode and
+ ExposureTimeMode are set to Manual and the algorithm is not
+ actively computing any value.
+ - name: AeStateSearching
+ value: 1
+ description: |
+ The AEGC algorithm is actively computing new values, for either the
+ exposure time or the analogue gain, but has not converged to a
+ stable result yet.
+
+ This state is returned if at least one of AnalogueGainMode or
+ ExposureTimeMode is auto and the algorithm hasn't converged yet.
+
+ The AEGC algorithm converges once stable values are computed for
+ all of the controls set to be computed in Auto mode. Once the
+ algorithm converges the state is moved to AeStateConverged.
+ - name: AeStateConverged
+ value: 2
+ description: |
+ The AEGC algorithm has converged.
- \sa AeEnable
+ This state is returned if at least one of AnalogueGainMode or
+ ExposureTimeMode is Auto, and the AEGC algorithm has converged to a
+ stable value.
+
+ If the measurements move too far away from the convergence point
+ then the AEGC algorithm might start adjusting again, in which case
+ the state is moved to AeStateSearching.
# AeMeteringMode needs further attention:
# - Auto-generate max enum value.
# - Better handling of custom types.
- AeMeteringMode:
type: int32_t
+ direction: inout
description: |
- Specify a metering mode for the AE algorithm to use. The metering
- modes determine which parts of the image are used to determine the
- scene brightness. Metering modes may be platform specific and not
- all metering modes may be supported.
+ Specify a metering mode for the AE algorithm to use.
+
+ The metering modes determine which parts of the image are used to
+ determine the scene brightness. Metering modes may be platform specific
+ and not all metering modes may be supported.
enum:
- name: MeteringCentreWeighted
value: 0
@@ -55,45 +118,63 @@ controls:
# - Better handling of custom types.
- AeConstraintMode:
type: int32_t
+ direction: inout
description: |
- Specify a constraint mode for the AE algorithm to use. These determine
- how the measured scene brightness is adjusted to reach the desired
- target exposure. Constraint modes may be platform specific, and not
- all constraint modes may be supported.
+ Specify a constraint mode for the AE algorithm to use.
+
+ The constraint modes determine how the measured scene brightness is
+ adjusted to reach the desired target exposure. Constraint modes may be
+ platform specific, and not all constraint modes may be supported.
enum:
- name: ConstraintNormal
value: 0
- description: Default constraint mode.
+ description: |
+ Default constraint mode.
+
This mode aims to balance the exposure of different parts of the
image so as to reach a reasonable average level. However, highlights
in the image may appear over-exposed and lowlights may appear
under-exposed.
- name: ConstraintHighlight
value: 1
- description: Highlight constraint mode.
+ description: |
+ Highlight constraint mode.
+
This mode adjusts the exposure levels in order to try and avoid
over-exposing the brightest parts (highlights) of an image.
Other non-highlight parts of the image may appear under-exposed.
- name: ConstraintShadows
value: 2
- description: Shadows constraint mode.
+ description: |
+ Shadows constraint mode.
+
This mode adjusts the exposure levels in order to try and avoid
under-exposing the dark parts (shadows) of an image. Other normally
exposed parts of the image may appear over-exposed.
- name: ConstraintCustom
value: 3
- description: Custom constraint mode.
+ description: |
+ Custom constraint mode.
# AeExposureMode needs further attention:
# - Auto-generate max enum value.
# - Better handling of custom types.
- AeExposureMode:
type: int32_t
+ direction: inout
description: |
- Specify an exposure mode for the AE algorithm to use. These specify
- how the desired total exposure is divided between the shutter time
- and the sensor's analogue gain. The exposure modes are platform
- specific, and not all exposure modes may be supported.
+ Specify an exposure mode for the AE algorithm to use.
+
+ The exposure modes specify how the desired total exposure is divided
+ between the exposure time and the sensor's analogue gain. They are
+ platform specific, and not all exposure modes may be supported.
+
+ When one of AnalogueGainMode or ExposureTimeMode is set to Manual,
+ the fixed values will override any choices made by AeExposureMode.
+
+ \sa AnalogueGainMode
+ \sa ExposureTimeMode
+
enum:
- name: ExposureNormal
value: 0
@@ -110,59 +191,222 @@ controls:
- ExposureValue:
type: float
+ direction: inout
description: |
- Specify an Exposure Value (EV) parameter. The EV parameter will only be
- applied if the AE algorithm is currently enabled.
+ Specify an Exposure Value (EV) parameter.
+
+ The EV parameter will only be applied if the AE algorithm is currently
+ enabled, that is, at least one of AnalogueGainMode and ExposureTimeMode
+ are in Auto mode.
By convention EV adjusts the exposure as log2. For example
- EV = [-2, -1, 0.5, 0, 0.5, 1, 2] results in an exposure adjustment
+ EV = [-2, -1, -0.5, 0, 0.5, 1, 2] results in an exposure adjustment
of [1/4x, 1/2x, 1/sqrt(2)x, 1x, sqrt(2)x, 2x, 4x].
- \sa AeEnable
+ \sa AnalogueGainMode
+ \sa ExposureTimeMode
- ExposureTime:
type: int32_t
+ direction: inout
description: |
- Exposure time (shutter speed) for the frame applied in the sensor
- device. This value is specified in micro-seconds.
+ Exposure time for the frame applied in the sensor device.
- Setting this value means that it is now fixed and the AE algorithm may
- not change it. Setting it back to zero returns it to the control of the
- AE algorithm.
+ This value is specified in micro-seconds.
- \sa AnalogueGain AeEnable
+ This control will only take effect if ExposureTimeMode is Manual. If
+ this control is set when ExposureTimeMode is Auto, the value will be
+ ignored and will not be retained.
- \todo Document the interactions between AeEnable and setting a fixed
- value for this control. Consider interactions with other AE features,
- such as aperture and aperture/shutter priority mode, and decide if
- control of which features should be automatically adjusted shouldn't
- better be handled through a separate AE mode control.
+ When reported in metadata, this control indicates what exposure time
+ was used for the current frame, regardless of ExposureTimeMode.
+ ExposureTimeMode will indicate the source of the exposure time value,
+ whether it came from the AE algorithm or not.
+
+ \sa AnalogueGain
+ \sa ExposureTimeMode
+
+ - ExposureTimeMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Controls the source of the exposure time that is applied to the image
+ sensor.
+
+ When set to Auto, the AE algorithm computes the exposure time and
+ configures the image sensor accordingly. When set to Manual, the value
+ of the ExposureTime control is used.
+
+ When transitioning from Auto to Manual mode and no ExposureTime control
+ is provided by the application, the last value computed by the AE
+ algorithm when the mode was Auto will be used. If the ExposureTimeMode
+ was never set to Auto (either because the camera started in Manual mode,
+ or Auto is not supported by the camera), the camera should use a
+ best-effort default value.
+
+ If ExposureTimeModeManual is supported, the ExposureTime control must
+ also be supported.
+
+ Cameras that support manual control of the sensor shall support manual
+ mode for both ExposureTimeMode and AnalogueGainMode, and shall expose
+ the ExposureTime and AnalogueGain controls. If the camera also has an
+ AEGC implementation, both ExposureTimeMode and AnalogueGainMode shall
+ support both manual and auto mode. If auto mode is available, it shall
+ be the default mode. These rules do not apply to black box cameras
+ such as UVC cameras, where the available gain and exposure modes are
+ completely dependent on what the device exposes.
+
+ \par Flickerless exposure mode transitions
+
+ Applications that wish to transition from ExposureTimeModeAuto to direct
+ control of the exposure time without causing extra flicker can do so by
+ selecting an ExposureTime value as close as possible to the last value
+ computed by the auto exposure algorithm in order to avoid any visible
+ flickering.
+
+ To select the correct value to use as ExposureTime value, applications
+ should accommodate the natural delay in applying controls caused by the
+ capture pipeline frame depth.
+
+ When switching to manual exposure mode, applications should not
+ immediately specify an ExposureTime value in the same request where
+ ExposureTimeMode is set to Manual. They should instead wait for the
+ first Request where ExposureTimeMode is reported as
+ ExposureTimeModeManual in the Request metadata, and use the reported
+ ExposureTime to populate the control value in the next Request to be
+ queued to the Camera.
+
+ The implementation of the auto-exposure algorithm should equally try to
+ minimize flickering and when transitioning from manual exposure mode to
+ auto exposure use the last value provided by the application as starting
+ point.
+
+ 1. Start with ExposureTimeMode set to Auto
+
+ 2. Set ExposureTimeMode to Manual
+
+ 3. Wait for the first completed request that has ExposureTimeMode
+ set to Manual
+
+ 4. Copy the value reported in ExposureTime into a new request, and
+ submit it
+
+ 5. Proceed to run manual exposure time as desired
+
+ \sa ExposureTime
+ enum:
+ - name: ExposureTimeModeAuto
+ value: 0
+ description: |
+ The exposure time will be calculated automatically and set by the
+ AE algorithm.
+
+ If ExposureTime is set while this mode is active, it will be
+ ignored, and its value will not be retained.
+
+ When transitioning from Manual to Auto mode, the AEGC should start
+ its adjustments based on the last set manual ExposureTime value.
+ - name: ExposureTimeModeManual
+ value: 1
+ description: |
+ The exposure time will not be updated by the AE algorithm.
+
+ When transitioning from Auto to Manual mode, the last computed
+ exposure value is used until a new value is specified through the
+ ExposureTime control. If an ExposureTime value is specified in the
+ same request where the ExposureTimeMode is changed from Auto to
+ Manual, the provided ExposureTime is applied immediately.
- AnalogueGain:
type: float
+ direction: inout
description: |
Analogue gain value applied in the sensor device.
+
The value of the control specifies the gain multiplier applied to all
colour channels. This value cannot be lower than 1.0.
- Setting this value means that it is now fixed and the AE algorithm may
- not change it. Setting it back to zero returns it to the control of the
- AE algorithm.
+ This control will only take effect if AnalogueGainMode is Manual. If
+ this control is set when AnalogueGainMode is Auto, the value will be
+ ignored and will not be retained.
- \sa ExposureTime AeEnable
+ When reported in metadata, this control indicates what analogue gain
+ was used for the current request, regardless of AnalogueGainMode.
+ AnalogueGainMode will indicate the source of the analogue gain value,
+ whether it came from the AEGC algorithm or not.
- \todo Document the interactions between AeEnable and setting a fixed
- value for this control. Consider interactions with other AE features,
- such as aperture and aperture/shutter priority mode, and decide if
- control of which features should be automatically adjusted shouldn't
- better be handled through a separate AE mode control.
+ \sa ExposureTime
+ \sa AnalogueGainMode
+
+ - AnalogueGainMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Controls the source of the analogue gain that is applied to the image
+ sensor.
+
+ When set to Auto, the AEGC algorithm computes the analogue gain and
+ configures the image sensor accordingly. When set to Manual, the value
+ of the AnalogueGain control is used.
+
+ When transitioning from Auto to Manual mode and no AnalogueGain control
+ is provided by the application, the last value computed by the AEGC
+ algorithm when the mode was Auto will be used. If the AnalogueGainMode
+ was never set to Auto (either because the camera started in Manual mode,
+ or Auto is not supported by the camera), the camera should use a
+ best-effort default value.
+
+ If AnalogueGainModeManual is supported, the AnalogueGain control must
+ also be supported.
+
+ For cameras where we have control over the ISP, both ExposureTimeMode
+ and AnalogueGainMode are expected to support manual mode, and both
+ controls (as well as ExposureTimeMode and AnalogueGain) are expected to
+ be present. If the camera also has an AEGC implementation, both
+ ExposureTimeMode and AnalogueGainMode shall support both manual and
+ auto mode. If auto mode is available, it shall be the default mode.
+ These rules do not apply to black box cameras such as UVC cameras,
+ where the available gain and exposure modes are completely dependent on
+ what the hardware exposes.
+
+ The same procedure described for performing flickerless transitions in
+ the ExposureTimeMode control documentation can be applied to analogue
+ gain.
+
+ \sa ExposureTimeMode
+ \sa AnalogueGain
+ enum:
+ - name: AnalogueGainModeAuto
+ value: 0
+ description: |
+ The analogue gain will be calculated automatically and set by the
+ AEGC algorithm.
+
+ If AnalogueGain is set while this mode is active, it will be
+ ignored, and it will also not be retained.
+
+ When transitioning from Manual to Auto mode, the AEGC should start
+ its adjustments based on the last set manual AnalogueGain value.
+ - name: AnalogueGainModeManual
+ value: 1
+ description: |
+ The analogue gain will not be updated by the AEGC algorithm.
+
+ When transitioning from Auto to Manual mode, the last computed
+ gain value is used until a new value is specified through the
+ AnalogueGain control. If an AnalogueGain value is specified in the
+ same request where the AnalogueGainMode is changed from Auto to
+ Manual, the provided AnalogueGain is applied immediately.
- AeFlickerMode:
type: int32_t
+ direction: inout
description: |
- Set the flicker mode, which determines whether, and how, the AGC/AEC
- algorithm attempts to hide flicker effects caused by the duty cycle of
- artificial lighting.
+ Set the flicker avoidance mode for AGC/AEC.
+
+ The flicker mode determines whether, and how, the AGC/AEC algorithm
+ attempts to hide flicker effects caused by the duty cycle of artificial
+ lighting.
Although implementation dependent, many algorithms for "flicker
avoidance" work by restricting this exposure time to integer multiples
@@ -176,16 +420,21 @@ controls:
enum:
- name: FlickerOff
value: 0
- description: No flicker avoidance is performed.
+ description: |
+ No flicker avoidance is performed.
- name: FlickerManual
value: 1
- description: Manual flicker avoidance.
+ description: |
+ Manual flicker avoidance.
+
Suppress flicker effects caused by lighting running with a period
specified by the AeFlickerPeriod control.
\sa AeFlickerPeriod
- name: FlickerAuto
value: 2
- description: Automatic flicker period detection and avoidance.
+ description: |
+ Automatic flicker period detection and avoidance.
+
The system will automatically determine the most likely value of
flicker period, and avoid flicker of this frequency. Once flicker
is being corrected, it is implementation dependent whether the
@@ -194,7 +443,10 @@ controls:
- AeFlickerPeriod:
type: int32_t
- description: Manual flicker period in microseconds.
+ direction: inout
+ description: |
+ Manual flicker period in microseconds.
+
This value sets the current flicker period to avoid. It is used when
AeFlickerMode is set to FlickerManual.
@@ -212,7 +464,10 @@ controls:
- AeFlickerDetected:
type: int32_t
- description: Flicker period detected in microseconds.
+ direction: out
+ description: |
+ Flicker period detected in microseconds.
+
The value reported here indicates the currently detected flicker
period, or zero if no flicker at all is detected.
@@ -232,38 +487,61 @@ controls:
- Brightness:
type: float
+ direction: inout
description: |
- Specify a fixed brightness parameter. Positive values (up to 1.0)
- produce brighter images; negative values (up to -1.0) produce darker
- images and 0.0 leaves pixels unchanged.
+ Specify a fixed brightness parameter.
+
+ Positive values (up to 1.0) produce brighter images; negative values
+ (up to -1.0) produce darker images and 0.0 leaves pixels unchanged.
- Contrast:
type: float
+ direction: inout
description: |
- Specify a fixed contrast parameter. Normal contrast is given by the
- value 1.0; larger values produce images with more contrast.
+ Specify a fixed contrast parameter.
+
+ Normal contrast is given by the value 1.0; larger values produce images
+ with more contrast.
- Lux:
type: float
+ direction: out
description: |
- Report an estimate of the current illuminance level in lux. The Lux
- control can only be returned in metadata.
+ Report an estimate of the current illuminance level in lux.
+
+ The Lux control can only be returned in metadata.
- AwbEnable:
type: bool
+ direction: inout
description: |
Enable or disable the AWB.
+ When AWB is enabled, the algorithm estimates the colour temperature of
+ the scene and computes colour gains and the colour correction matrix
+ automatically. The computed colour temperature, gains and correction
+ matrix are reported in metadata. The corresponding controls are ignored
+ if set in a request.
+
+ When AWB is disabled, the colour temperature, gains and correction
+ matrix are not updated automatically and can be set manually in
+ requests.
+
+ \sa ColourCorrectionMatrix
\sa ColourGains
+ \sa ColourTemperature
# AwbMode needs further attention:
# - Auto-generate max enum value.
# - Better handling of custom types.
- AwbMode:
type: int32_t
+ direction: inout
description: |
- Specify the range of illuminants to use for the AWB algorithm. The modes
- supported are platform specific, and not all modes may be supported.
+ Specify the range of illuminants to use for the AWB algorithm.
+
+ The modes supported are platform specific, and not all modes may be
+ supported.
enum:
- name: AwbAuto
value: 0
@@ -292,6 +570,7 @@ controls:
- AwbLocked:
type: bool
+ direction: out
description: |
Report the lock status of a running AWB algorithm.
@@ -303,39 +582,67 @@ controls:
- ColourGains:
type: float
+ direction: inout
description: |
Pair of gain values for the Red and Blue colour channels, in that
- order. ColourGains can only be applied in a Request when the AWB is
- disabled.
+ order.
+
+ ColourGains can only be applied in a Request when the AWB is disabled.
+ If ColourGains is set in a request but ColourTemperature is not, the
+ implementation shall calculate and set the ColourTemperature based on
+ the ColourGains.
\sa AwbEnable
+ \sa ColourTemperature
size: [2]
- ColourTemperature:
type: int32_t
- description: Report the current estimate of the colour temperature, in
- kelvin, for this frame. The ColourTemperature control can only be
- returned in metadata.
+ direction: out
+ description: |
+ ColourTemperature of the frame, in kelvin.
+
+ ColourTemperature can only be applied in a Request when the AWB is
+ disabled.
+
+ If ColourTemperature is set in a request but ColourGains is not, the
+ implementation shall calculate and set the ColourGains based on the
+ given ColourTemperature. If ColourTemperature is set (either directly,
+ or indirectly by setting ColourGains) but ColourCorrectionMatrix is not,
+ the ColourCorrectionMatrix is updated based on the ColourTemperature.
+
+ The ColourTemperature used to process the frame is reported in metadata.
+
+ \sa AwbEnable
+ \sa ColourCorrectionMatrix
+ \sa ColourGains
- Saturation:
type: float
+ direction: inout
description: |
- Specify a fixed saturation parameter. Normal saturation is given by
- the value 1.0; larger values produce more saturated colours; 0.0
- produces a greyscale image.
+ Specify a fixed saturation parameter.
+
+ Normal saturation is given by the value 1.0; larger values produce more
+ saturated colours; 0.0 produces a greyscale image.
- SensorBlackLevels:
type: int32_t
+ direction: out
description: |
- Reports the sensor black levels used for processing a frame, in the
- order R, Gr, Gb, B. These values are returned as numbers out of a 16-bit
- pixel range (as if pixels ranged from 0 to 65535). The SensorBlackLevels
- control can only be returned in metadata.
+ Reports the sensor black levels used for processing a frame.
+
+ The values are in the order R, Gr, Gb, B. They are returned as numbers
+ out of a 16-bit pixel range (as if pixels ranged from 0 to 65535). The
+ SensorBlackLevels control can only be returned in metadata.
size: [4]
- Sharpness:
type: float
+ direction: inout
description: |
+ Intensity of the sharpening applied to the image.
+
A value of 0.0 means no sharpening. The minimum value means
minimal sharpening, and shall be 0.0 unless the camera can't
disable sharpening completely. The default value shall give a
@@ -347,8 +654,10 @@ controls:
- FocusFoM:
type: int32_t
+ direction: out
description: |
Reports a Figure of Merit (FoM) to indicate how in-focus the frame is.
+
A larger FocusFoM value indicates a more in-focus frame. This singular
value may be based on a combination of statistics gathered from
multiple focus regions within an image. The number of focus regions and
@@ -358,23 +667,34 @@ controls:
- ColourCorrectionMatrix:
type: float
+ direction: inout
description: |
- The 3x3 matrix that converts camera RGB to sRGB within the
- imaging pipeline. This should describe the matrix that is used
- after pixels have been white-balanced, but before any gamma
- transformation. The 3x3 matrix is stored in conventional reading
- order in an array of 9 floating point values.
+ The 3x3 matrix that converts camera RGB to sRGB within the imaging
+ pipeline.
+ This should describe the matrix that is used after pixels have been
+ white-balanced, but before any gamma transformation. The 3x3 matrix is
+ stored in conventional reading order in an array of 9 floating point
+ values.
+
+ ColourCorrectionMatrix can only be applied in a Request when the AWB is
+ disabled.
+
+ \sa AwbEnable
+ \sa ColourTemperature
size: [3,3]
- ScalerCrop:
type: Rectangle
+ direction: inout
description: |
Sets the image portion that will be scaled to form the whole of
- the final output image. The (x,y) location of this rectangle is
- relative to the PixelArrayActiveAreas that is being used. The units
- remain native sensor pixels, even if the sensor is being used in
- a binning or skipping mode.
+ the final output image.
+
+ The (x,y) location of this rectangle is relative to the
+ PixelArrayActiveAreas that is being used. The units remain native
+ sensor pixels, even if the sensor is being used in a binning or
+ skipping mode.
This control is only present when the pipeline supports scaling. Its
maximum valid value is given by the properties::ScalerCropMaximum
@@ -382,6 +702,7 @@ controls:
- DigitalGain:
type: float
+ direction: inout
description: |
Digital gain value applied during the processing steps applied
to the image as captured from the sensor.
@@ -399,13 +720,16 @@ controls:
- FrameDuration:
type: int64_t
+ direction: out
description: |
The instantaneous frame duration from start of frame exposure to start
- of next exposure, expressed in microseconds. This control is meant to
- be returned in metadata.
+ of next exposure, expressed in microseconds.
+
+ This control is meant to be returned in metadata.
- FrameDurationLimits:
type: int64_t
+ direction: inout
description: |
The minimum and maximum (in that order) frame duration, expressed in
microseconds.
@@ -419,14 +743,13 @@ controls:
values to be the same. Setting both values to 0 reverts to using the
camera defaults.
- The maximum frame duration provides the absolute limit to the shutter
- speed computed by the AE algorithm and it overrides any exposure mode
+ The maximum frame duration provides the absolute limit to the exposure
+ time computed by the AE algorithm and it overrides any exposure mode
setting specified with controls::AeExposureMode. Similarly, when a
manual exposure time is set through controls::ExposureTime, it also
gets clipped to the limits set by this control. When reported in
- metadata, the control expresses the minimum and maximum frame
- durations used after being clipped to the sensor provided frame
- duration limits.
+ metadata, the control expresses the minimum and maximum frame durations
+ used after being clipped to the sensor provided frame duration limits.
\sa AeExposureMode
\sa ExposureTime
@@ -443,16 +766,20 @@ controls:
- SensorTemperature:
type: float
+ direction: out
description: |
- Temperature measure from the camera sensor in Celsius. This is typically
- obtained by a thermal sensor present on-die or in the camera module. The
- range of reported temperatures is device dependent.
+ Temperature measure from the camera sensor in Celsius.
+
+ This value is typically obtained by a thermal sensor present on-die or
+ in the camera module. The range of reported temperatures is device
+ dependent.
The SensorTemperature control will only be returned in metadata if a
thermal sensor is present.
- SensorTimestamp:
type: int64_t
+ direction: out
description: |
The time when the first row of the image sensor active array is exposed.
@@ -467,8 +794,9 @@ controls:
- AfMode:
type: int32_t
+ direction: inout
description: |
- Control to set the mode of the AF (autofocus) algorithm.
+ The mode of the AF (autofocus) algorithm.
An implementation may choose not to implement all the modes.
@@ -476,12 +804,12 @@ controls:
- name: AfModeManual
value: 0
description: |
- The AF algorithm is in manual mode. In this mode it will never
- perform any action nor move the lens of its own accord, but an
- application can specify the desired lens position using the
- LensPosition control.
+ The AF algorithm is in manual mode.
- In this mode the AfState will always report AfStateIdle.
+ In this mode it will never perform any action nor move the lens of
+ its own accord, but an application can specify the desired lens
+ position using the LensPosition control. The AfState will always
+ report AfStateIdle.
If the camera is started in AfModeManual, it will move the focus
lens to the position specified by the LensPosition control.
@@ -492,31 +820,34 @@ controls:
- name: AfModeAuto
value: 1
description: |
- The AF algorithm is in auto mode. This means that the algorithm
- will never move the lens or change state unless the AfTrigger
- control is used. The AfTrigger control can be used to initiate a
- focus scan, the results of which will be reported by AfState.
-
- If the autofocus algorithm is moved from AfModeAuto to another
- mode while a scan is in progress, the scan is cancelled
- immediately, without waiting for the scan to finish.
-
- When first entering this mode the AfState will report
- AfStateIdle. When a trigger control is sent, AfState will
- report AfStateScanning for a period before spontaneously
- changing to AfStateFocused or AfStateFailed, depending on
- the outcome of the scan. It will remain in this state until
- another scan is initiated by the AfTrigger control. If a scan is
- cancelled (without changing to another mode), AfState will return
- to AfStateIdle.
+ The AF algorithm is in auto mode.
+
+ In this mode the algorithm will never move the lens or change state
+ unless the AfTrigger control is used. The AfTrigger control can be
+ used to initiate a focus scan, the results of which will be
+ reported by AfState.
+
+ If the autofocus algorithm is moved from AfModeAuto to another mode
+ while a scan is in progress, the scan is cancelled immediately,
+ without waiting for the scan to finish.
+
+ When first entering this mode the AfState will report AfStateIdle.
+ When a trigger control is sent, AfState will report AfStateScanning
+ for a period before spontaneously changing to AfStateFocused or
+ AfStateFailed, depending on the outcome of the scan. It will remain
+ in this state until another scan is initiated by the AfTrigger
+ control. If a scan is cancelled (without changing to another mode),
+ AfState will return to AfStateIdle.
- name: AfModeContinuous
value: 2
description: |
- The AF algorithm is in continuous mode. This means that the lens can
- re-start a scan spontaneously at any moment, without any user
- intervention. The AfState still reports whether the algorithm is
- currently scanning or not, though the application has no ability to
- initiate or cancel scans, nor to move the lens for itself.
+ The AF algorithm is in continuous mode.
+
+ In this mode the lens can re-start a scan spontaneously at any
+ moment, without any user intervention. The AfState still reports
+ whether the algorithm is currently scanning or not, though the
+ application has no ability to initiate or cancel scans, nor to move
+ the lens for itself.
However, applications can pause the AF algorithm from continuously
scanning by using the AfPause control. This allows video or still
@@ -528,35 +859,43 @@ controls:
- AfRange:
type: int32_t
+ direction: inout
description: |
- Control to set the range of focus distances that is scanned. An
- implementation may choose not to implement all the options here.
+ The range of focus distances that is scanned.
+
+ An implementation may choose not to implement all the options here.
enum:
- name: AfRangeNormal
value: 0
description: |
- A wide range of focus distances is scanned, all the way from
- infinity down to close distances, though depending on the
- implementation, possibly not including the very closest macro
- positions.
+ A wide range of focus distances is scanned.
+
+ Scanned distances cover all the way from infinity down to close
+ distances, though depending on the implementation, possibly not
+ including the very closest macro positions.
- name: AfRangeMacro
value: 1
- description: Only close distances are scanned.
+ description: |
+ Only close distances are scanned.
- name: AfRangeFull
value: 2
description: |
- The full range of focus distances is scanned just as with
- AfRangeNormal but this time including the very closest macro
- positions.
+ The full range of focus distances is scanned.
+
+ This range is similar to AfRangeNormal but includes the very
+ closest macro positions.
- AfSpeed:
type: int32_t
+ direction: inout
description: |
- Control that determines whether the AF algorithm is to move the lens
- as quickly as possible or more steadily. For example, during video
- recording it may be desirable not to move the lens too abruptly, but
- when in a preview mode (waiting for a still capture) it may be
- helpful to move the lens as quickly as is reasonably possible.
+ Determine whether the AF is to move the lens as quickly as possible or
+ more steadily.
+
+ For example, during video recording it may be desirable not to move the
+ lens too abruptly, but when in a preview mode (waiting for a still
+ capture) it may be helpful to move the lens as quickly as is reasonably
+ possible.
enum:
- name: AfSpeedNormal
value: 0
@@ -567,26 +906,30 @@ controls:
- AfMetering:
type: int32_t
+ direction: inout
description: |
- Instruct the AF algorithm how it should decide which parts of the image
- should be used to measure focus.
+ The parts of the image used by the AF algorithm to measure focus.
enum:
- name: AfMeteringAuto
value: 0
- description: The AF algorithm should decide for itself where it will
- measure focus.
+ description: |
+ Let the AF algorithm decide for itself where it will measure focus.
- name: AfMeteringWindows
value: 1
- description: The AF algorithm should use the rectangles defined by
- the AfWindows control to measure focus. If no windows are specified
- the behaviour is platform dependent.
+ description: |
+ Use the rectangles defined by the AfWindows control to measure focus.
+
+ If no windows are specified the behaviour is platform dependent.
- AfWindows:
type: Rectangle
+ direction: inout
description: |
- Sets the focus windows used by the AF algorithm when AfMetering is set
- to AfMeteringWindows. The units used are pixels within the rectangle
- returned by the ScalerCropMaximum property.
+ The focus windows used by the AF algorithm when AfMetering is set to
+ AfMeteringWindows.
+
+ The units used are pixels within the rectangle returned by the
+ ScalerCropMaximum property.
In order to be activated, a rectangle must be programmed with non-zero
width and height. Internally, these rectangles are intersected with the
@@ -610,24 +953,36 @@ controls:
- AfTrigger:
type: int32_t
+ direction: in
description: |
- This control starts an autofocus scan when AfMode is set to AfModeAuto,
- and can also be used to terminate a scan early.
+ Start an autofocus scan.
- It is ignored if AfMode is set to AfModeManual or AfModeContinuous.
+ This control starts an autofocus scan when AfMode is set to AfModeAuto,
+ and is ignored if AfMode is set to AfModeManual or AfModeContinuous. It
+ can also be used to terminate a scan early.
enum:
- name: AfTriggerStart
value: 0
- description: Start an AF scan. Ignored if a scan is in progress.
+ description: |
+ Start an AF scan.
+
+ Setting the control to AfTriggerStart is ignored if a scan is in
+ progress.
- name: AfTriggerCancel
value: 1
- description: Cancel an AF scan. This does not cause the lens to move
- anywhere else. Ignored if no scan is in progress.
+ description: |
+ Cancel an AF scan.
+
+ This does not cause the lens to move anywhere else. Ignored if no
+ scan is in progress.
- AfPause:
type: int32_t
+ direction: in
description: |
+ Pause lens movements when in continuous autofocus mode.
+
This control has no effect except when in continuous autofocus mode
(AfModeContinuous). It can be used to pause any lens movements while
(for example) images are captured. The algorithm remains inactive
@@ -637,17 +992,21 @@ controls:
- name: AfPauseImmediate
value: 0
description: |
- Pause the continuous autofocus algorithm immediately, whether or not
- any kind of scan is underway. AfPauseState will subsequently report
+ Pause the continuous autofocus algorithm immediately.
+
+ The autofocus algorithm is paused whether or not any kind of scan
+ is underway. AfPauseState will subsequently report
AfPauseStatePaused. AfState may report any of AfStateScanning,
AfStateFocused or AfStateFailed, depending on the algorithm's state
when it received this control.
- name: AfPauseDeferred
value: 1
description: |
- This is similar to AfPauseImmediate, and if the AfState is currently
- reporting AfStateFocused or AfStateFailed it will remain in that
- state and AfPauseState will report AfPauseStatePaused.
+ Pause the continuous autofocus algorithm at the end of the scan.
+
+ This is similar to AfPauseImmediate, and if the AfState is
+ currently reporting AfStateFocused or AfStateFailed it will remain
+ in that state and AfPauseState will report AfPauseStatePaused.
However, if the algorithm is scanning (AfStateScanning),
AfPauseState will report AfPauseStatePausing until the scan is
@@ -658,15 +1017,19 @@ controls:
- name: AfPauseResume
value: 2
description: |
- Resume continuous autofocus operation. The algorithm starts again
- from exactly where it left off, and AfPauseState will report
- AfPauseStateRunning.
+ Resume continuous autofocus operation.
+
+ The algorithm starts again from exactly where it left off, and
+ AfPauseState will report AfPauseStateRunning.
- LensPosition:
type: float
+ direction: inout
description: |
- Acts as a control to instruct the lens to move to a particular position
- and also reports back the position of the lens for each frame.
+ Set and report the focus lens position.
+
+ This control instructs the lens to move to a particular position and
+ also reports back the position of the lens for each frame.
The LensPosition control is ignored unless the AfMode is set to
AfModeManual, though the value is reported back unconditionally in all
@@ -680,39 +1043,43 @@ controls:
For example:
- 0 moves the lens to infinity.
- 0.5 moves the lens to focus on objects 2m away.
- 2 moves the lens to focus on objects 50cm away.
- And larger values will focus the lens closer.
+ - 0 moves the lens to infinity.
+ - 0.5 moves the lens to focus on objects 2m away.
+ - 2 moves the lens to focus on objects 50cm away.
+ - And larger values will focus the lens closer.
- The default value of the control should indicate a good general position
- for the lens, often corresponding to the hyperfocal distance (the
- closest position for which objects at infinity are still acceptably
- sharp). The minimum will often be zero (meaning infinity), and the
- maximum value defines the closest focus position.
+ The default value of the control should indicate a good general
+ position for the lens, often corresponding to the hyperfocal distance
+ (the closest position for which objects at infinity are still
+ acceptably sharp). The minimum will often be zero (meaning infinity),
+ and the maximum value defines the closest focus position.
\todo Define a property to report the Hyperfocal distance of calibrated
lenses.
- AfState:
type: int32_t
+ direction: out
description: |
- Reports the current state of the AF algorithm in conjunction with the
- reported AfMode value and (in continuous AF mode) the AfPauseState
- value. The possible state changes are described below, though we note
- the following state transitions that occur when the AfMode is changed.
+ The current state of the AF algorithm.
+
+ This control reports the current state of the AF algorithm in
+ conjunction with the reported AfMode value and (in continuous AF mode)
+ the AfPauseState value. The possible state changes are described below,
+ though we note the following state transitions that occur when the
+ AfMode is changed.
If the AfMode is set to AfModeManual, then the AfState will always
- report AfStateIdle (even if the lens is subsequently moved). Changing to
- the AfModeManual state does not initiate any lens movement.
+ report AfStateIdle (even if the lens is subsequently moved). Changing
+ to the AfModeManual state does not initiate any lens movement.
If the AfMode is set to AfModeAuto then the AfState will report
- AfStateIdle. However, if AfModeAuto and AfTriggerStart are sent together
- then AfState will omit AfStateIdle and move straight to AfStateScanning
- (and start a scan).
+ AfStateIdle. However, if AfModeAuto and AfTriggerStart are sent
+ together then AfState will omit AfStateIdle and move straight to
+ AfStateScanning (and start a scan).
- If the AfMode is set to AfModeContinuous then the AfState will initially
- report AfStateScanning.
+ If the AfMode is set to AfModeContinuous then the AfState will
+ initially report AfStateScanning.
enum:
- name: AfStateIdle
@@ -725,11 +1092,12 @@ controls:
value: 1
description: |
The AF algorithm is in auto mode (AfModeAuto), and a scan has been
- started using the AfTrigger control. The scan can be cancelled by
- sending AfTriggerCancel at which point the algorithm will either
- move back to AfStateIdle or, if the scan actually completes before
- the cancel request is processed, to one of AfStateFocused or
- AfStateFailed.
+ started using the AfTrigger control.
+
+ The scan can be cancelled by sending AfTriggerCancel at which point
+ the algorithm will either move back to AfStateIdle or, if the scan
+ actually completes before the cancel request is processed, to one
+ of AfStateFocused or AfStateFailed.
Alternatively the AF algorithm could be in continuous mode
(AfModeContinuous) at which point it may enter this state
@@ -749,10 +1117,14 @@ controls:
- AfPauseState:
type: int32_t
+ direction: out
description: |
- Only applicable in continuous (AfModeContinuous) mode, this reports
- whether the algorithm is currently running, paused or pausing (that is,
- will pause as soon as any in-progress scan completes).
+ Report whether the autofocus is currently running, paused or pausing.
+
+ This control is only applicable in continuous (AfModeContinuous) mode,
+ and reports whether the algorithm is currently running, paused or
+ pausing (that is, will pause as soon as any in-progress scan
+ completes).
Any change to AfMode will cause AfPauseStateRunning to be reported.
@@ -766,22 +1138,28 @@ controls:
value: 1
description: |
Continuous AF has been sent an AfPauseDeferred control, and will
- pause as soon as any in-progress scan completes (and then report
- AfPauseStatePaused). No new scans will be start spontaneously until
+ pause as soon as any in-progress scan completes.
+
+ When the scan completes, the AfPauseState control will report
+ AfPauseStatePaused. No new scans will be start spontaneously until
the AfPauseResume control is sent.
- name: AfPauseStatePaused
value: 2
description: |
- Continuous AF is paused. No further state changes or lens movements
- will occur until the AfPauseResume control is sent.
+ Continuous AF is paused.
+
+ No further state changes or lens movements will occur until the
+ AfPauseResume control is sent.
- HdrMode:
type: int32_t
+ direction: inout
description: |
- Control to set the mode to be used for High Dynamic Range (HDR)
- imaging. HDR techniques typically include multiple exposure, image
- fusion and tone mapping techniques to improve the dynamic range of the
- resulting images.
+ Set the mode to be used for High Dynamic Range (HDR) imaging.
+
+ HDR techniques typically include multiple exposure, image fusion and
+ tone mapping techniques to improve the dynamic range of the resulting
+ images.
When using an HDR mode, images are captured with different sets of AGC
settings called HDR channels. Channels indicate in particular the type
@@ -795,16 +1173,18 @@ controls:
- name: HdrModeOff
value: 0
description: |
- HDR is disabled. Metadata for this frame will not include the
- HdrChannel control.
+ HDR is disabled.
+
+ Metadata for this frame will not include the HdrChannel control.
- name: HdrModeMultiExposureUnmerged
value: 1
description: |
Multiple exposures will be generated in an alternating fashion.
- However, they will not be merged together and will be returned to
- the application as they are. Each image will be tagged with the
- correct HDR channel, indicating what kind of exposure it is. The
- tag should be the same as in the HdrModeMultiExposure case.
+
+ The multiple exposures will not be merged together and will be
+ returned to the application as they are. Each image will be tagged
+ with the correct HDR channel, indicating what kind of exposure it
+ is. The tag should be the same as in the HdrModeMultiExposure case.
The expectation is that an application using this mode would merge
the frames to create HDR images for itself if it requires them.
@@ -812,8 +1192,10 @@ controls:
value: 2
description: |
Multiple exposures will be generated and merged to create HDR
- images. Each image will be tagged with the HDR channel (long, medium
- or short) that arrived and which caused this image to be output.
+ images.
+
+ Each image will be tagged with the HDR channel (long, medium or
+ short) that arrived and which caused this image to be output.
Systems that use two channels for HDR will return images tagged
alternately as the short and long channel. Systems that use three
@@ -823,24 +1205,30 @@ controls:
value: 3
description: |
Multiple frames all at a single exposure will be used to create HDR
- images. These images should be reported as all corresponding to the
- HDR short channel.
+ images.
+
+ These images should be reported as all corresponding to the HDR
+ short channel.
- name: HdrModeNight
value: 4
description: |
- Multiple frames will be combined to produce "night mode" images. It
- is up to the implementation exactly which HDR channels it uses, and
- the images will all be tagged accordingly with the correct HDR
+ Multiple frames will be combined to produce "night mode" images.
+
+ It is up to the implementation exactly which HDR channels it uses,
+ and the images will all be tagged accordingly with the correct HDR
channel information.
- HdrChannel:
type: int32_t
+ direction: out
description: |
+ The HDR channel used to capture the frame.
+
This value is reported back to the application so that it can discover
- whether this capture corresponds to the short or long exposure image (or
- any other image used by the HDR procedure). An application can monitor
- the HDR channel to discover when the differently exposed images have
- arrived.
+ whether this capture corresponds to the short or long exposure image
+ (or any other image used by the HDR procedure). An application can
+ monitor the HDR channel to discover when the differently exposed images
+ have arrived.
This metadata is only available when an HDR mode has been enabled.
@@ -865,4 +1253,19 @@ controls:
description: |
This is a long exposure image.
+ - Gamma:
+ type: float
+ direction: inout
+ description: |
+ Specify a fixed gamma value.
+
+ The default gamma value must be 2.2 which closely mimics sRGB gamma.
+ Note that this is camera gamma, so it is applied as 1.0/gamma.
+
+ - DebugMetadataEnable:
+ type: bool
+ direction: inout
+ description: |
+ Enable or disable the debug metadata.
+
...
diff --git a/src/libcamera/control_ids_debug.yaml b/src/libcamera/control_ids_debug.yaml
new file mode 100644
index 00000000..79753271
--- /dev/null
+++ b/src/libcamera/control_ids_debug.yaml
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+%YAML 1.1
+---
+vendor: debug
+controls: []
diff --git a/src/libcamera/control_ids_draft.yaml b/src/libcamera/control_ids_draft.yaml
index 9bef5bf1..03309eea 100644
--- a/src/libcamera/control_ids_draft.yaml
+++ b/src/libcamera/control_ids_draft.yaml
@@ -10,6 +10,7 @@ vendor: draft
controls:
- AePrecaptureTrigger:
type: int32_t
+ direction: inout
description: |
Control for AE metering trigger. Currently identical to
ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER.
@@ -31,6 +32,7 @@ controls:
- NoiseReductionMode:
type: int32_t
+ direction: inout
description: |
Control to select the noise reduction algorithm mode. Currently
identical to ANDROID_NOISE_REDUCTION_MODE.
@@ -59,6 +61,7 @@ controls:
- ColorCorrectionAberrationMode:
type: int32_t
+ direction: inout
description: |
Control to select the color correction aberration mode. Currently
identical to ANDROID_COLOR_CORRECTION_ABERRATION_MODE.
@@ -77,37 +80,9 @@ controls:
High quality aberration correction which might reduce the frame
rate.
- - AeState:
- type: int32_t
- description: |
- Control to report the current AE algorithm state. Currently identical to
- ANDROID_CONTROL_AE_STATE.
-
- Current state of the AE algorithm.
- enum:
- - name: AeStateInactive
- value: 0
- description: The AE algorithm is inactive.
- - name: AeStateSearching
- value: 1
- description: The AE algorithm has not converged yet.
- - name: AeStateConverged
- value: 2
- description: The AE algorithm has converged.
- - name: AeStateLocked
- value: 3
- description: The AE algorithm is locked.
- - name: AeStateFlashRequired
- value: 4
- description: The AE algorithm would need a flash for good results
- - name: AeStatePrecapture
- value: 5
- description: |
- The AE algorithm has started a pre-capture metering session.
- \sa AePrecaptureTrigger
-
- AwbState:
type: int32_t
+ direction: out
description: |
Control to report the current AWB algorithm state. Currently identical
to ANDROID_CONTROL_AWB_STATE.
@@ -129,6 +104,7 @@ controls:
- SensorRollingShutterSkew:
type: int64_t
+ direction: out
description: |
Control to report the time between the start of exposure of the first
row and the start of exposure of the last row. Currently identical to
@@ -136,6 +112,7 @@ controls:
- LensShadingMapMode:
type: int32_t
+ direction: inout
description: |
Control to report if the lens shading map is available. Currently
identical to ANDROID_STATISTICS_LENS_SHADING_MAP_MODE.
@@ -149,6 +126,7 @@ controls:
- PipelineDepth:
type: int32_t
+ direction: out
description: |
Specifies the number of pipeline stages the frame went through from when
it was exposed to when the final completed result was available to the
@@ -163,6 +141,7 @@ controls:
- MaxLatency:
type: int32_t
+ direction: out
description: |
The maximum number of frames that can occur after a request (different
than the previous) has been submitted, and before the result's state
@@ -172,6 +151,7 @@ controls:
- TestPatternMode:
type: int32_t
+ direction: inout
description: |
Control to select the test pattern mode. Currently identical to
ANDROID_SENSOR_TEST_PATTERN_MODE.
@@ -227,4 +207,91 @@ controls:
value. All of the custom test patterns will be static (that is the
raw image must not vary from frame to frame).
+ - FaceDetectMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Control to select the face detection mode used by the pipeline.
+
+ Currently identical to ANDROID_STATISTICS_FACE_DETECT_MODE.
+
+ \sa FaceDetectFaceRectangles
+ \sa FaceDetectFaceScores
+ \sa FaceDetectFaceLandmarks
+ \sa FaceDetectFaceIds
+
+ enum:
+ - name: FaceDetectModeOff
+ value: 0
+ description: |
+ Pipeline doesn't perform face detection and doesn't report any
+ control related to face detection.
+ - name: FaceDetectModeSimple
+ value: 1
+ description: |
+ Pipeline performs face detection and reports the
+ FaceDetectFaceRectangles and FaceDetectFaceScores controls for each
+ detected face. FaceDetectFaceLandmarks and FaceDetectFaceIds are
+ optional.
+ - name: FaceDetectModeFull
+ value: 2
+ description: |
+ Pipeline performs face detection and reports all the controls
+ related to face detection including FaceDetectFaceRectangles,
+ FaceDetectFaceScores, FaceDetectFaceLandmarks, and
+ FaceDeteceFaceIds for each detected face.
+
+ - FaceDetectFaceRectangles:
+ type: Rectangle
+ direction: out
+ description: |
+ Boundary rectangles of the detected faces. The number of values is
+ the number of detected faces.
+
+ The FaceDetectFaceRectangles control can only be returned in metadata.
+
+ Currently identical to ANDROID_STATISTICS_FACE_RECTANGLES.
+ size: [n]
+
+ - FaceDetectFaceScores:
+ type: uint8_t
+ direction: out
+ description: |
+ Confidence score of each of the detected faces. The range of score is
+ [0, 100]. The number of values should be the number of faces reported
+ in FaceDetectFaceRectangles.
+
+ The FaceDetectFaceScores control can only be returned in metadata.
+
+ Currently identical to ANDROID_STATISTICS_FACE_SCORES.
+ size: [n]
+
+ - FaceDetectFaceLandmarks:
+ type: Point
+ direction: out
+ description: |
+ Array of human face landmark coordinates in format [..., left_eye_i,
+ right_eye_i, mouth_i, left_eye_i+1, ...], with i = index of face. The
+ number of values should be 3 * the number of faces reported in
+ FaceDetectFaceRectangles.
+
+ The FaceDetectFaceLandmarks control can only be returned in metadata.
+
+ Currently identical to ANDROID_STATISTICS_FACE_LANDMARKS.
+ size: [n]
+
+ - FaceDetectFaceIds:
+ type: int32_t
+ direction: out
+ description: |
+ Each detected face is given a unique ID that is valid for as long as the
+ face is visible to the camera device. A face that leaves the field of
+ view and later returns may be assigned a new ID. The number of values
+ should be the number of faces reported in FaceDetectFaceRectangles.
+
+ The FaceDetectFaceIds control can only be returned in metadata.
+
+ Currently identical to ANDROID_STATISTICS_FACE_IDS.
+ size: [n]
+
...
diff --git a/src/libcamera/control_ids_rpi.yaml b/src/libcamera/control_ids_rpi.yaml
index cb097f88..7524c5d2 100644
--- a/src/libcamera/control_ids_rpi.yaml
+++ b/src/libcamera/control_ids_rpi.yaml
@@ -9,21 +9,53 @@ vendor: rpi
controls:
- StatsOutputEnable:
type: bool
+ direction: inout
description: |
- Toggles the Raspberry Pi IPA to output a binary dump of the hardware
- generated statistics through the Request metadata in the Bcm2835StatsOutput
- control.
+ Toggles the Raspberry Pi IPA to output the hardware generated statistics.
+
+ When this control is set to true, the IPA outputs a binary dump of the
+ hardware generated statistics through the Request metadata in the
+ Bcm2835StatsOutput control.
\sa Bcm2835StatsOutput
- Bcm2835StatsOutput:
type: uint8_t
size: [n]
+ direction: out
description: |
- Span of the BCM2835 ISP generated statistics for the current frame. This
- is sent in the Request metadata if the StatsOutputEnable is set to true.
- The statistics struct definition can be found in include/linux/bcm2835-isp.h.
+ Span of the BCM2835 ISP generated statistics for the current frame.
+
+ This is sent in the Request metadata if the StatsOutputEnable is set to
+ true. The statistics struct definition can be found in
+ include/linux/bcm2835-isp.h.
\sa StatsOutputEnable
+ - ScalerCrops:
+ type: Rectangle
+ size: [n]
+ direction: out
+ description: |
+ An array of rectangles, where each singular value has identical
+ functionality to the ScalerCrop control. This control allows the
+ Raspberry Pi pipeline handler to control individual scaler crops per
+ output stream.
+
+ The order of rectangles passed into the control must match the order of
+ streams configured by the application. The pipeline handler will only
+ configure crop retangles up-to the number of output streams configured.
+ All subsequent rectangles passed into this control are ignored by the
+ pipeline handler.
+
+ If both rpi::ScalerCrops and ScalerCrop controls are present in a
+ ControlList, the latter is discarded, and crops are obtained from this
+ control.
+
+ Note that using different crop rectangles for each output stream with
+ this control is only applicable on the Pi5/PiSP platform. This control
+ should also be considered temporary/draft and will be replaced with
+ official libcamera API support for per-stream controls in the future.
+
+ \sa ScalerCrop
...
diff --git a/src/libcamera/control_ranges.yaml b/src/libcamera/control_ranges.yaml
index d42447d0..6752eb98 100644
--- a/src/libcamera/control_ranges.yaml
+++ b/src/libcamera/control_ranges.yaml
@@ -13,6 +13,8 @@ ranges:
draft: 10000
# Raspberry Pi vendor controls
rpi: 20000
- # Next range starts at 30000
+ # Controls for debug metadata
+ debug: 30000
+ # Next range starts at 40000
...
diff --git a/src/libcamera/control_serializer.cpp b/src/libcamera/control_serializer.cpp
index 0cf719bd..17834648 100644
--- a/src/libcamera/control_serializer.cpp
+++ b/src/libcamera/control_serializer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_serializer.cpp - Control (de)serializer
+ * Control (de)serializer
*/
#include "libcamera/internal/control_serializer.h"
@@ -281,6 +281,7 @@ int ControlSerializer::serialize(const ControlInfoMap &infoMap,
entry.id = id->id();
entry.type = id->type();
entry.offset = values.offset();
+ entry.direction = static_cast<ControlId::DirectionFlags::Type>(id->direction());
entries.write(&entry);
store(info, values);
@@ -493,12 +494,17 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
/* If we're using a local id map, populate it. */
if (localIdMap) {
+ ControlId::DirectionFlags flags{
+ static_cast<ControlId::Direction>(entry->direction)
+ };
+
/**
* \todo Find a way to preserve the control name for
* debugging purpose.
*/
controlIds_.emplace_back(std::make_unique<ControlId>(entry->id,
- "", type));
+ "", "local", type,
+ flags));
(*localIdMap)[entry->id] = controlIds_.back().get();
}
diff --git a/src/libcamera/control_validator.cpp b/src/libcamera/control_validator.cpp
index cf08b34a..93982cff 100644
--- a/src/libcamera/control_validator.cpp
+++ b/src/libcamera/control_validator.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_validator.cpp - Control validator
+ * Control validator
*/
#include "libcamera/internal/control_validator.h"
diff --git a/src/libcamera/controls.cpp b/src/libcamera/controls.cpp
index 16d3547c..70f6f609 100644
--- a/src/libcamera/controls.cpp
+++ b/src/libcamera/controls.cpp
@@ -2,15 +2,14 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * controls.cpp - Control handling
+ * Control handling
*/
#include <libcamera/controls.h>
-#include <iomanip>
#include <sstream>
-#include <string>
#include <string.h>
+#include <string>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
@@ -55,12 +54,15 @@ static constexpr size_t ControlValueSize[] = {
[ControlTypeNone] = 0,
[ControlTypeBool] = sizeof(bool),
[ControlTypeByte] = sizeof(uint8_t),
+ [ControlTypeUnsigned16] = sizeof(uint16_t),
+ [ControlTypeUnsigned32] = sizeof(uint32_t),
[ControlTypeInteger32] = sizeof(int32_t),
[ControlTypeInteger64] = sizeof(int64_t),
[ControlTypeFloat] = sizeof(float),
[ControlTypeString] = sizeof(char),
[ControlTypeRectangle] = sizeof(Rectangle),
[ControlTypeSize] = sizeof(Size),
+ [ControlTypePoint] = sizeof(Point),
};
} /* namespace */
@@ -74,10 +76,14 @@ static constexpr size_t ControlValueSize[] = {
* The control stores a boolean value
* \var ControlTypeByte
* The control stores a byte value as an unsigned 8-bit integer
+ * \var ControlTypeUnsigned16
+ * The control stores an unsigned 16-bit integer value
+ * \var ControlTypeUnsigned32
+ * The control stores an unsigned 32-bit integer value
* \var ControlTypeInteger32
- * The control stores a 32-bit integer value
+ * The control stores a signed 32-bit integer value
* \var ControlTypeInteger64
- * The control stores a 64-bit integer value
+ * The control stores a signed 64-bit integer value
* \var ControlTypeFloat
* The control stores a 32-bit floating point value
* \var ControlTypeString
@@ -230,6 +236,16 @@ std::string ControlValue::toString() const
str += std::to_string(*value);
break;
}
+ case ControlTypeUnsigned16: {
+ const uint16_t *value = reinterpret_cast<const uint16_t *>(data);
+ str += std::to_string(*value);
+ break;
+ }
+ case ControlTypeUnsigned32: {
+ const uint32_t *value = reinterpret_cast<const uint32_t *>(data);
+ str += std::to_string(*value);
+ break;
+ }
case ControlTypeInteger32: {
const int32_t *value = reinterpret_cast<const int32_t *>(data);
str += std::to_string(*value);
@@ -255,6 +271,11 @@ std::string ControlValue::toString() const
str += value->toString();
break;
}
+ case ControlTypePoint: {
+ const Point *value = reinterpret_cast<const Point *>(data);
+ str += value->toString();
+ break;
+ }
case ControlTypeNone:
case ControlTypeString:
break;
@@ -389,8 +410,22 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen
* \brief Construct a ControlId instance
* \param[in] id The control numerical ID
* \param[in] name The control name
+ * \param[in] vendor The vendor name
* \param[in] type The control data type
- */
+ * \param[in] direction The direction of the control, if it can be used in Controls or Metadata
+ * \param[in] size The size of the array control, or 0 if scalar control
+ * \param[in] enumStrMap The map from enum names to values (optional)
+ */
+ControlId::ControlId(unsigned int id, const std::string &name,
+ const std::string &vendor, ControlType type,
+ DirectionFlags direction, std::size_t size,
+ const std::map<std::string, int32_t> &enumStrMap)
+ : id_(id), name_(name), vendor_(vendor), type_(type),
+ direction_(direction), size_(size), enumStrMap_(enumStrMap)
+{
+ for (const auto &pair : enumStrMap_)
+ reverseMap_[pair.second] = pair.first;
+}
/**
* \fn unsigned int ControlId::id() const
@@ -405,12 +440,68 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen
*/
/**
+ * \fn const std::string &ControlId::vendor() const
+ * \brief Retrieve the vendor name
+ * \return The vendor name, as a string
+ */
+
+/**
* \fn ControlType ControlId::type() const
* \brief Retrieve the control data type
* \return The control data type
*/
/**
+ * \fn DirectionFlags ControlId::direction() const
+ * \brief Return the direction that the control can be used in
+ *
+ * This is similar to \sa isInput() and \sa isOutput(), but returns the flags
+ * direction instead of booleans for each direction.
+ *
+ * \return The direction flags corresponding to if the control can be used as
+ * an input control or as output metadata
+ */
+
+/**
+ * \fn bool ControlId::isInput() const
+ * \brief Determine if the control is available to be used as an input control
+ *
+ * Controls can be used either as input in controls, or as output in metadata.
+ * This function checks if the control is allowed to be used as the former.
+ *
+ * \return True if the control can be used as an input control, false otherwise
+ */
+
+/**
+ * \fn bool ControlId::isOutput() const
+ * \brief Determine if the control is available to be used in output metadata
+ *
+ * Controls can be used either as input in controls, or as output in metadata.
+ * This function checks if the control is allowed to be used as the latter.
+ *
+ * \return True if the control can be returned in output metadata, false otherwise
+ */
+
+/**
+ * \fn bool ControlId::isArray() const
+ * \brief Determine if the control is an array control
+ * \return True if the control is an array control, false otherwise
+ */
+
+/**
+ * \fn std::size_t ControlId::size() const
+ * \brief Retrieve the size of the control if it is an array control
+ * \return The size of the array control, size_t::max for dynamic extent, or 0
+ * for non-array
+ */
+
+/**
+ * \fn const std::map<int32_t, std::string> &ControlId::enumerators() const
+ * \brief Retrieve the map of enum values to enum names
+ * \return The map of enum values to enum names
+ */
+
+/**
* \fn bool operator==(unsigned int lhs, const ControlId &rhs)
* \brief Compare a ControlId with a control numerical ID
* \param[in] lhs Left-hand side numerical ID
@@ -429,6 +520,22 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen
*/
/**
+ * \enum ControlId::Direction
+ * \brief The direction the control is capable of being passed from/to
+ *
+ * \var ControlId::Direction::In
+ * \brief The control can be passed as input in controls
+ *
+ * \var ControlId::Direction::Out
+ * \brief The control can be returned as output in metadata
+ */
+
+/**
+ * \typedef ControlId::DirectionFlags
+ * \brief A wrapper for ControlId::Direction so that it can be used as flags
+ */
+
+/**
* \class Control
* \brief Describe a control and its intrinsic properties
*
@@ -456,10 +563,14 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen
*/
/**
- * \fn Control::Control(unsigned int id, const char *name)
+ * \fn Control::Control(unsigned int id, const char *name, const char *vendor)
* \brief Construct a Control instance
* \param[in] id The control numerical ID
* \param[in] name The control name
+ * \param[in] vendor The vendor name
+ * \param[in] direction The direction of the control, if it can be used in
+ * Controls or Metadata
+ * \param[in] enumStrMap The map from enum names to values (optional)
*
* The control data type is automatically deduced from the template type T.
*/
diff --git a/src/libcamera/converter.cpp b/src/libcamera/converter.cpp
index 9f64eb51..d551b908 100644
--- a/src/libcamera/converter.cpp
+++ b/src/libcamera/converter.cpp
@@ -2,7 +2,7 @@
/*
* Copyright 2022 NXP
*
- * converter.cpp - Generic format converter interface
+ * Generic format converter interface
*/
#include "libcamera/internal/converter.h"
@@ -11,10 +11,12 @@
#include <libcamera/base/log.h>
+#include <libcamera/stream.h>
+
#include "libcamera/internal/media_device.h"
/**
- * \file internal/converter.h
+ * \file converter.h
* \brief Abstract converter
*/
@@ -35,13 +37,38 @@ LOG_DEFINE_CATEGORY(Converter)
*/
/**
+ * \enum Converter::Feature
+ * \brief Specify the features supported by the converter
+ * \var Converter::Feature::None
+ * \brief No extra features supported by the converter
+ * \var Converter::Feature::InputCrop
+ * \brief Cropping capability at input is supported by the converter
+ */
+
+/**
+ * \typedef Converter::Features
+ * \brief A bitwise combination of features supported by the converter
+ */
+
+/**
+ * \enum Converter::Alignment
+ * \brief The alignment mode specified when adjusting the converter input or
+ * output sizes
+ * \var Converter::Alignment::Down
+ * \brief Adjust the Converter sizes to a smaller valid size
+ * \var Converter::Alignment::Up
+ * \brief Adjust the Converter sizes to a larger valid size
+ */
+
+/**
* \brief Construct a Converter instance
* \param[in] media The media device implementing the converter
+ * \param[in] features Features flags representing supported features
*
* This searches for the entity implementing the data streaming function in the
* media graph entities and use its device node as the converter device node.
*/
-Converter::Converter(MediaDevice *media)
+Converter::Converter(MediaDevice *media, Features features)
{
const std::vector<MediaEntity *> &entities = media->entities();
auto it = std::find_if(entities.begin(), entities.end(),
@@ -56,6 +83,7 @@ Converter::Converter(MediaDevice *media)
}
deviceNode_ = (*it)->deviceNode();
+ features_ = features;
}
Converter::~Converter()
@@ -93,6 +121,26 @@ Converter::~Converter()
*/
/**
+ * \fn Converter::adjustInputSize()
+ * \brief Adjust the converter input \a size to a valid value
+ * \param[in] pixFmt The pixel format of the converter input stream
+ * \param[in] size The converter input size to adjust to a valid value
+ * \param[in] align The desired alignment
+ * \return The adjusted converter input size or a null Size if \a size cannot
+ * be adjusted
+ */
+
+/**
+ * \fn Converter::adjustOutputSize()
+ * \brief Adjust the converter output \a size to a valid value
+ * \param[in] pixFmt The pixel format of the converter output stream
+ * \param[in] size The converter output size to adjust to a valid value
+ * \param[in] align The desired alignment
+ * \return The adjusted converter output size or a null Size if \a size cannot
+ * be adjusted
+ */
+
+/**
* \fn Converter::strideAndFrameSize()
* \brief Retrieve the output stride and frame size for an input configutation
* \param[in] pixelFormat Input stream pixel format
@@ -101,6 +149,16 @@ Converter::~Converter()
*/
/**
+ * \fn Converter::validateOutput()
+ * \brief Validate and possibily adjust \a cfg to a valid converter output
+ * \param[inout] cfg The StreamConfiguration to validate and adjust
+ * \param[out] adjusted Set to true if \a cfg has been adjusted
+ * \param[in] align The desired alignment
+ * \return 0 if \a cfg is valid or has been adjusted, a negative error code
+ * otherwise if \a cfg cannot be adjusted
+ */
+
+/**
* \fn Converter::configure()
* \brief Configure a set of output stream conversion from an input stream
* \param[in] inputCfg Input stream configuration
@@ -109,14 +167,21 @@ Converter::~Converter()
*/
/**
+ * \fn Converter::isConfigured()
+ * \brief Check if a given stream is configured
+ * \param[in] stream The output stream
+ * \return True if the \a stream is configured or false otherwise
+ */
+
+/**
* \fn Converter::exportBuffers()
* \brief Export buffers from the converter device
- * \param[in] output Output stream index exporting the buffers
+ * \param[in] stream Output stream pointer exporting the buffers
* \param[in] count Number of buffers to allocate
* \param[out] buffers Vector to store allocated buffers
*
* This function operates similarly to V4L2VideoDevice::exportBuffers() on the
- * output stream indicated by the \a output index.
+ * output stream indicated by the \a output.
*
* \return The number of allocated buffers on success or a negative error code
* otherwise
@@ -137,7 +202,7 @@ Converter::~Converter()
* \fn Converter::queueBuffers()
* \brief Queue buffers to converter device
* \param[in] input The frame buffer to apply the conversion
- * \param[out] outputs The container holding the output stream indexes and
+ * \param[out] outputs The container holding the output stream pointers and
* their respective frame buffer outputs.
*
* This function queues the \a input frame buffer on the output streams of the
@@ -148,6 +213,52 @@ Converter::~Converter()
*/
/**
+ * \fn Converter::setInputCrop()
+ * \brief Set the crop rectangle \a rect for \a stream
+ * \param[in] stream The output stream
+ * \param[inout] rect The crop rectangle to apply and return the rectangle
+ * that is actually applied
+ *
+ * Set the crop rectangle \a rect for \a stream provided the converter supports
+ * cropping. The converter has the Feature::InputCrop flag in this case.
+ *
+ * The underlying hardware can adjust the rectangle supplied by the user
+ * due to hardware constraints. The caller can inspect \a rect to determine the
+ * actual rectangle that has been applied by the converter, after this function
+ * returns.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::inputCropBounds()
+ * \brief Retrieve the crop bounds of the converter
+ *
+ * Retrieve the minimum and maximum crop bounds of the converter. This can be
+ * used to query the crop bounds before configuring a stream.
+ *
+ * \return A pair containing the minimum and maximum crop bound in that order
+ */
+
+/**
+ * \fn Converter::inputCropBounds(const Stream *stream)
+ * \brief Retrieve the crop bounds for \a stream
+ * \param[in] stream The output stream
+ *
+ * Retrieve the minimum and maximum crop bounds for \a stream. The converter
+ * should support cropping (Feature::InputCrop).
+ *
+ * The crop bounds depend on the configuration of the output stream and hence
+ * this function should be called after the \a stream has been configured using
+ * configure().
+ *
+ * When called with an unconfigured \a stream, this function returns a pair of
+ * null rectangles.
+ *
+ * \return A pair containing the minimum and maximum crop bound in that order
+ */
+
+/**
* \var Converter::inputBufferReady
* \brief A signal emitted when the input frame buffer completes
*/
@@ -158,12 +269,23 @@ Converter::~Converter()
*/
/**
+ * \var Converter::features_
+ * \brief Stores the features supported by the converter
+ */
+
+/**
* \fn Converter::deviceNode()
* \brief The converter device node attribute accessor
* \return The converter device node string
*/
/**
+ * \fn Converter::features()
+ * \brief Retrieve the features supported by the converter
+ * \return The converter Features flags
+ */
+
+/**
* \class ConverterFactoryBase
* \brief Base class for converter factories
*
@@ -263,8 +385,9 @@ std::vector<std::string> ConverterFactoryBase::names()
for (ConverterFactoryBase *factory : factories) {
list.push_back(factory->name_);
- for (auto alias : factory->compatibles())
- list.push_back(alias);
+
+ const auto &compatibles = factory->compatibles();
+ list.insert(list.end(), compatibles.begin(), compatibles.end());
}
return list;
diff --git a/src/libcamera/converter/converter_v4l2_m2m.cpp b/src/libcamera/converter/converter_v4l2_m2m.cpp
index a5fc979b..ee05b798 100644
--- a/src/libcamera/converter/converter_v4l2_m2m.cpp
+++ b/src/libcamera/converter/converter_v4l2_m2m.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Laurent Pinchart
* Copyright 2022 NXP
*
- * converter_v4l2_m2m.cpp - V4L2 M2M Format converter
+ * V4L2 M2M Format converter
*/
#include "libcamera/internal/converter/converter_v4l2_m2m.h"
@@ -23,7 +23,7 @@
#include "libcamera/internal/v4l2_videodevice.h"
/**
- * \file internal/converter/converter_v4l2_m2m.h
+ * \file converter/converter_v4l2_m2m.h
* \brief V4L2 M2M based converter
*/
@@ -31,25 +31,71 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(Converter)
+namespace {
+
+int getCropBounds(V4L2VideoDevice *device, Rectangle &minCrop,
+ Rectangle &maxCrop)
+{
+ Rectangle minC;
+ Rectangle maxC;
+
+ /* Find crop bounds */
+ minC.width = 1;
+ minC.height = 1;
+ maxC.width = UINT_MAX;
+ maxC.height = UINT_MAX;
+
+ int ret = device->setSelection(V4L2_SEL_TGT_CROP, &minC);
+ if (ret) {
+ LOG(Converter, Error)
+ << "Could not query minimum selection crop: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ ret = device->getSelection(V4L2_SEL_TGT_CROP_BOUNDS, &maxC);
+ if (ret) {
+ LOG(Converter, Error)
+ << "Could not query maximum selection crop: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ /* Reset the crop to its maximum */
+ ret = device->setSelection(V4L2_SEL_TGT_CROP, &maxC);
+ if (ret) {
+ LOG(Converter, Error)
+ << "Could not reset selection crop: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ minCrop = minC;
+ maxCrop = maxC;
+ return 0;
+}
+
+} /* namespace */
+
/* -----------------------------------------------------------------------------
- * V4L2M2MConverter::Stream
+ * V4L2M2MConverter::V4L2M2MStream
*/
-V4L2M2MConverter::Stream::Stream(V4L2M2MConverter *converter, unsigned int index)
- : converter_(converter), index_(index)
+V4L2M2MConverter::V4L2M2MStream::V4L2M2MStream(V4L2M2MConverter *converter, const Stream *stream)
+ : converter_(converter), stream_(stream)
{
m2m_ = std::make_unique<V4L2M2MDevice>(converter->deviceNode());
- m2m_->output()->bufferReady.connect(this, &Stream::outputBufferReady);
- m2m_->capture()->bufferReady.connect(this, &Stream::captureBufferReady);
+ m2m_->output()->bufferReady.connect(this, &V4L2M2MStream::outputBufferReady);
+ m2m_->capture()->bufferReady.connect(this, &V4L2M2MStream::captureBufferReady);
int ret = m2m_->open();
if (ret < 0)
m2m_.reset();
}
-int V4L2M2MConverter::Stream::configure(const StreamConfiguration &inputCfg,
- const StreamConfiguration &outputCfg)
+int V4L2M2MConverter::V4L2M2MStream::configure(const StreamConfiguration &inputCfg,
+ const StreamConfiguration &outputCfg)
{
V4L2PixelFormat videoFormat =
m2m_->output()->toV4L2PixelFormat(inputCfg.pixelFormat);
@@ -98,16 +144,23 @@ int V4L2M2MConverter::Stream::configure(const StreamConfiguration &inputCfg,
inputBufferCount_ = inputCfg.bufferCount;
outputBufferCount_ = outputCfg.bufferCount;
+ if (converter_->features() & Feature::InputCrop) {
+ ret = getCropBounds(m2m_->output(), inputCropBounds_.first,
+ inputCropBounds_.second);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
-int V4L2M2MConverter::Stream::exportBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+int V4L2M2MConverter::V4L2M2MStream::exportBuffers(unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
return m2m_->capture()->exportBuffers(count, buffers);
}
-int V4L2M2MConverter::Stream::start()
+int V4L2M2MConverter::V4L2M2MStream::start()
{
int ret = m2m_->output()->importBuffers(inputBufferCount_);
if (ret < 0)
@@ -134,7 +187,7 @@ int V4L2M2MConverter::Stream::start()
return 0;
}
-void V4L2M2MConverter::Stream::stop()
+void V4L2M2MConverter::V4L2M2MStream::stop()
{
m2m_->capture()->streamOff();
m2m_->output()->streamOff();
@@ -142,7 +195,7 @@ void V4L2M2MConverter::Stream::stop()
m2m_->output()->releaseBuffers();
}
-int V4L2M2MConverter::Stream::queueBuffers(FrameBuffer *input, FrameBuffer *output)
+int V4L2M2MConverter::V4L2M2MStream::queueBuffers(FrameBuffer *input, FrameBuffer *output)
{
int ret = m2m_->output()->queueBuffer(input);
if (ret < 0)
@@ -155,12 +208,27 @@ int V4L2M2MConverter::Stream::queueBuffers(FrameBuffer *input, FrameBuffer *outp
return 0;
}
-std::string V4L2M2MConverter::Stream::logPrefix() const
+int V4L2M2MConverter::V4L2M2MStream::getInputSelection(unsigned int target, Rectangle *rect)
+{
+ return m2m_->output()->getSelection(target, rect);
+}
+
+int V4L2M2MConverter::V4L2M2MStream::setInputSelection(unsigned int target, Rectangle *rect)
+{
+ return m2m_->output()->setSelection(target, rect);
+}
+
+std::pair<Rectangle, Rectangle> V4L2M2MConverter::V4L2M2MStream::inputCropBounds()
{
- return "stream" + std::to_string(index_);
+ return inputCropBounds_;
}
-void V4L2M2MConverter::Stream::outputBufferReady(FrameBuffer *buffer)
+std::string V4L2M2MConverter::V4L2M2MStream::logPrefix() const
+{
+ return stream_->configuration().toString();
+}
+
+void V4L2M2MConverter::V4L2M2MStream::outputBufferReady(FrameBuffer *buffer)
{
auto it = converter_->queue_.find(buffer);
if (it == converter_->queue_.end())
@@ -172,7 +240,7 @@ void V4L2M2MConverter::Stream::outputBufferReady(FrameBuffer *buffer)
}
}
-void V4L2M2MConverter::Stream::captureBufferReady(FrameBuffer *buffer)
+void V4L2M2MConverter::V4L2M2MStream::captureBufferReady(FrameBuffer *buffer)
{
converter_->outputBufferReady.emit(buffer);
}
@@ -205,6 +273,15 @@ V4L2M2MConverter::V4L2M2MConverter(MediaDevice *media)
m2m_.reset();
return;
}
+
+ ret = getCropBounds(m2m_->output(), inputCropBounds_.first,
+ inputCropBounds_.second);
+ if (!ret && inputCropBounds_.first != inputCropBounds_.second) {
+ features_ |= Feature::InputCrop;
+
+ LOG(Converter, Info)
+ << "Converter supports cropping on its input";
+ }
}
/**
@@ -325,6 +402,127 @@ V4L2M2MConverter::strideAndFrameSize(const PixelFormat &pixelFormat,
}
/**
+ * \copydoc libcamera::Converter::adjustInputSize
+ */
+Size V4L2M2MConverter::adjustInputSize(const PixelFormat &pixFmt,
+ const Size &size, Alignment align)
+{
+ auto formats = m2m_->output()->formats();
+ V4L2PixelFormat v4l2PixFmt = m2m_->output()->toV4L2PixelFormat(pixFmt);
+
+ auto it = formats.find(v4l2PixFmt);
+ if (it == formats.end()) {
+ LOG(Converter, Info)
+ << "Unsupported pixel format " << pixFmt;
+ return {};
+ }
+
+ return adjustSizes(size, it->second, align);
+}
+
+/**
+ * \copydoc libcamera::Converter::adjustOutputSize
+ */
+Size V4L2M2MConverter::adjustOutputSize(const PixelFormat &pixFmt,
+ const Size &size, Alignment align)
+{
+ auto formats = m2m_->capture()->formats();
+ V4L2PixelFormat v4l2PixFmt = m2m_->capture()->toV4L2PixelFormat(pixFmt);
+
+ auto it = formats.find(v4l2PixFmt);
+ if (it == formats.end()) {
+ LOG(Converter, Info)
+ << "Unsupported pixel format " << pixFmt;
+ return {};
+ }
+
+ return adjustSizes(size, it->second, align);
+}
+
+Size V4L2M2MConverter::adjustSizes(const Size &cfgSize,
+ const std::vector<SizeRange> &ranges,
+ Alignment align)
+{
+ Size size = cfgSize;
+
+ if (ranges.size() == 1) {
+ /*
+ * The device supports either V4L2_FRMSIZE_TYPE_CONTINUOUS or
+ * V4L2_FRMSIZE_TYPE_STEPWISE.
+ */
+ const SizeRange &range = *ranges.begin();
+
+ size.width = std::clamp(size.width, range.min.width,
+ range.max.width);
+ size.height = std::clamp(size.height, range.min.height,
+ range.max.height);
+
+ /*
+ * Check if any alignment is needed. If the sizes are already
+ * aligned, or the device supports V4L2_FRMSIZE_TYPE_CONTINUOUS
+ * with hStep and vStep equal to 1, we're done here.
+ */
+ int widthR = size.width % range.hStep;
+ int heightR = size.height % range.vStep;
+
+ /* Align up or down according to the caller request. */
+
+ if (widthR != 0)
+ size.width = size.width - widthR
+ + ((align == Alignment::Up) ? range.hStep : 0);
+
+ if (heightR != 0)
+ size.height = size.height - heightR
+ + ((align == Alignment::Up) ? range.vStep : 0);
+ } else {
+ /*
+ * The device supports V4L2_FRMSIZE_TYPE_DISCRETE, find the
+ * size closer to the requested output configuration.
+ *
+ * The size ranges vector is not ordered, so we sort it first.
+ * If we align up, start from the larger element.
+ */
+ std::vector<Size> sizes(ranges.size());
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+ std::sort(sizes.begin(), sizes.end());
+
+ if (align == Alignment::Up)
+ std::reverse(sizes.begin(), sizes.end());
+
+ /*
+ * Return true if s2 is valid according to the desired
+ * alignment: smaller than s1 if we align down, larger than s1
+ * if we align up.
+ */
+ auto nextSizeValid = [](const Size &s1, const Size &s2, Alignment a) {
+ return a == Alignment::Down
+ ? (s1.width > s2.width && s1.height > s2.height)
+ : (s1.width < s2.width && s1.height < s2.height);
+ };
+
+ Size newSize;
+ for (const Size &sz : sizes) {
+ if (!nextSizeValid(size, sz, align))
+ break;
+
+ newSize = sz;
+ }
+
+ if (newSize.isNull()) {
+ LOG(Converter, Error)
+ << "Cannot adjust " << cfgSize
+ << " to a supported converter size";
+ return {};
+ }
+
+ size = newSize;
+ }
+
+ return size;
+}
+
+/**
* \copydoc libcamera::Converter::configure
*/
int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg,
@@ -333,21 +531,24 @@ int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg,
int ret = 0;
streams_.clear();
- streams_.reserve(outputCfgs.size());
for (unsigned int i = 0; i < outputCfgs.size(); ++i) {
- Stream &stream = streams_.emplace_back(this, i);
+ const StreamConfiguration &cfg = outputCfgs[i];
+ std::unique_ptr<V4L2M2MStream> stream =
+ std::make_unique<V4L2M2MStream>(this, cfg.stream());
- if (!stream.isValid()) {
+ if (!stream->isValid()) {
LOG(Converter, Error)
<< "Failed to create stream " << i;
ret = -EINVAL;
break;
}
- ret = stream.configure(inputCfg, outputCfgs[i]);
+ ret = stream->configure(inputCfg, cfg);
if (ret < 0)
break;
+
+ streams_.emplace(cfg.stream(), std::move(stream));
}
if (ret < 0) {
@@ -359,15 +560,61 @@ int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg,
}
/**
+ * \copydoc libcamera::Converter::isConfigured
+ */
+bool V4L2M2MConverter::isConfigured(const Stream *stream) const
+{
+ return streams_.find(stream) != streams_.end();
+}
+
+/**
* \copydoc libcamera::Converter::exportBuffers
*/
-int V4L2M2MConverter::exportBuffers(unsigned int output, unsigned int count,
+int V4L2M2MConverter::exportBuffers(const Stream *stream, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
- if (output >= streams_.size())
+ auto iter = streams_.find(stream);
+ if (iter == streams_.end())
+ return -EINVAL;
+
+ return iter->second->exportBuffers(count, buffers);
+}
+
+/**
+ * \copydoc libcamera::Converter::setInputCrop
+ */
+int V4L2M2MConverter::setInputCrop(const Stream *stream, Rectangle *rect)
+{
+ if (!(features_ & Feature::InputCrop))
+ return -ENOTSUP;
+
+ auto iter = streams_.find(stream);
+ if (iter == streams_.end()) {
+ LOG(Converter, Error) << "Invalid output stream";
return -EINVAL;
+ }
+
+ return iter->second->setInputSelection(V4L2_SEL_TGT_CROP, rect);
+}
+
+/**
+ * \fn libcamera::V4L2M2MConverter::inputCropBounds()
+ * \copydoc libcamera::Converter::inputCropBounds()
+ */
+
+/**
+ * \copydoc libcamera::Converter::inputCropBounds(const Stream *stream)
+ */
+std::pair<Rectangle, Rectangle>
+V4L2M2MConverter::inputCropBounds(const Stream *stream)
+{
+ auto iter = streams_.find(stream);
+ if (iter == streams_.end()) {
+ LOG(Converter, Error) << "Invalid output stream";
+ return {};
+ }
- return streams_[output].exportBuffers(count, buffers);
+ return iter->second->inputCropBounds();
}
/**
@@ -377,8 +624,8 @@ int V4L2M2MConverter::start()
{
int ret;
- for (Stream &stream : streams_) {
- ret = stream.start();
+ for (auto &iter : streams_) {
+ ret = iter.second->start();
if (ret < 0) {
stop();
return ret;
@@ -393,41 +640,87 @@ int V4L2M2MConverter::start()
*/
void V4L2M2MConverter::stop()
{
- for (Stream &stream : utils::reverse(streams_))
- stream.stop();
+ for (auto &iter : streams_)
+ iter.second->stop();
+}
+
+/**
+ * \copydoc libcamera::Converter::validateOutput
+ */
+int V4L2M2MConverter::validateOutput(StreamConfiguration *cfg, bool *adjusted,
+ Alignment align)
+{
+ V4L2VideoDevice *capture = m2m_->capture();
+ V4L2VideoDevice::Formats fmts = capture->formats();
+
+ if (adjusted)
+ *adjusted = false;
+
+ PixelFormat fmt = cfg->pixelFormat;
+ V4L2PixelFormat v4l2PixFmt = capture->toV4L2PixelFormat(fmt);
+
+ auto it = fmts.find(v4l2PixFmt);
+ if (it == fmts.end()) {
+ it = fmts.begin();
+ v4l2PixFmt = it->first;
+ cfg->pixelFormat = v4l2PixFmt.toPixelFormat();
+
+ if (adjusted)
+ *adjusted = true;
+
+ LOG(Converter, Info)
+ << "Converter output pixel format adjusted to "
+ << cfg->pixelFormat;
+ }
+
+ const Size cfgSize = cfg->size;
+ cfg->size = adjustSizes(cfgSize, it->second, align);
+
+ if (cfg->size.isNull())
+ return -EINVAL;
+
+ if (cfg->size.width != cfgSize.width ||
+ cfg->size.height != cfgSize.height) {
+ LOG(Converter, Info)
+ << "Converter size adjusted to "
+ << cfg->size;
+ if (adjusted)
+ *adjusted = true;
+ }
+
+ return 0;
}
/**
* \copydoc libcamera::Converter::queueBuffers
*/
int V4L2M2MConverter::queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs)
+ const std::map<const Stream *, FrameBuffer *> &outputs)
{
- unsigned int mask = 0;
+ std::set<FrameBuffer *> outputBufs;
int ret;
/*
* Validate the outputs as a sanity check: at least one output is
* required, all outputs must reference a valid stream and no two
- * outputs can reference the same stream.
+ * streams can reference same output framebuffers.
*/
if (outputs.empty())
return -EINVAL;
- for (auto [index, buffer] : outputs) {
+ for (auto [stream, buffer] : outputs) {
if (!buffer)
return -EINVAL;
- if (index >= streams_.size())
- return -EINVAL;
- if (mask & (1 << index))
- return -EINVAL;
- mask |= 1 << index;
+ outputBufs.insert(buffer);
}
+ if (outputBufs.size() != streams_.size())
+ return -EINVAL;
+
/* Queue the input and output buffers to all the streams. */
- for (auto [index, buffer] : outputs) {
- ret = streams_[index].queueBuffers(input, buffer);
+ for (auto [stream, buffer] : outputs) {
+ ret = streams_.at(stream)->queueBuffers(input, buffer);
if (ret < 0)
return ret;
}
@@ -444,6 +737,10 @@ int V4L2M2MConverter::queueBuffers(FrameBuffer *input,
return 0;
}
+/*
+ * \todo This should be extended to include Feature::Flag to denote
+ * what each converter supports feature-wise.
+ */
static std::initializer_list<std::string> compatibles = {
"mtk-mdp",
"pxp",
diff --git a/src/libcamera/converter/meson.build b/src/libcamera/converter/meson.build
index 2aa72fe4..af1a80fe 100644
--- a/src/libcamera/converter/meson.build
+++ b/src/libcamera/converter/meson.build
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources += files([
+libcamera_internal_sources += files([
'converter_v4l2_m2m.cpp'
])
diff --git a/src/libcamera/debug_controls.cpp b/src/libcamera/debug_controls.cpp
new file mode 100644
index 00000000..33960231
--- /dev/null
+++ b/src/libcamera/debug_controls.cpp
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Helper to easily record debug metadata inside libcamera.
+ */
+
+#include "libcamera/internal/debug_controls.h"
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(DebugControls)
+
+/**
+ * \file debug_controls.h
+ * \brief Helper to easily record debug metadata inside libcamera
+ */
+
+/**
+ * \class DebugMetadata
+ * \brief Helper to record metadata for later use
+ *
+ * Metadata is a useful tool for debugging the internal state of libcamera. It
+ * has the benefit that it is easy to use and related tooling is readily
+ * available. The difficulty is that the metadata control list is often not
+ * directly available (either because the variable to debug lives inside
+ * process() of an IPA or inside a closed algorithm class with no direct access
+ * to the IPA and therefore the metadata list).
+ *
+ * This class helps in both cases. It allows to forward the data to a parent or
+ * alternatively record the data and at a later point in time copy it to the
+ * metadata list when it becomes available. Both mechanisms allow easy reuse and
+ * loose coupling.
+ *
+ * Typical usage is to instantiate a DebugMetadata object in every
+ * class/algorithm where debug metadata shall be recorded (the inner object). If
+ * the IPA doesn't support debug metadata, the object is still usable, but the
+ * debug data gets dropped. If the IPA supports debug metadata it will either
+ * register a parent DebugMetadata object on the inner object or manually
+ * retrieve the data using enable()/moveToList().
+ *
+ * The concepts of forwarding to a parent and recording for later retrieval are
+ * mutually exclusive and the parent takes precedence. E.g. it is not allowed to
+ * enable a DebugMetadata object, log entries to it and later set the parent.
+ *
+ * This is done to keep the path open for using other means of data transport
+ * (like tracing). For every tracing event a corresponding context needs to be
+ * available on set() time. The parent can be treated as such, the top level
+ * object (the one where enable() get's called) also lives in a place where that
+ * information is also available.
+ */
+
+/**
+ * \fn DebugMetadata::enableByControl()
+ * \brief Enable based on controls::DebugMetadataEnable in the supplied
+ * ControlList
+ * \param[in] controls The supplied ControlList
+ *
+ * This function looks for controls::DebugMetadataEnable and enables or disables
+ * debug metadata handling accordingly.
+ */
+void DebugMetadata::enableByControl(const ControlList &controls)
+{
+ const auto &ctrl = controls.get(controls::DebugMetadataEnable);
+ if (ctrl)
+ enable(*ctrl);
+}
+
+/**
+ * \fn DebugMetadata::enable()
+ * \brief Enable or disable metadata handling
+ * \param[in] enable The enable state
+ *
+ * When \a enable is true, all calls to set() get cached and can later be
+ * retrieved using moveEntries(). When \a enable is false, the cache gets
+ * cleared and no further metadata is recorded.
+ *
+ * Forwarding to a parent is independent of the enabled state.
+ */
+void DebugMetadata::enable(bool enable)
+{
+ enabled_ = enable;
+ if (!enabled_)
+ cache_.clear();
+}
+
+/**
+ * \fn DebugMetadata::setParent()
+ * \brief Set the parent metadata handler to \a parent
+ * \param[in] parent The parent handler
+ *
+ * When a \a parent is set, all further calls to set() are unconditionally
+ * forwarded to that instance.
+ *
+ * The parent can be reset by passing a nullptr.
+ */
+void DebugMetadata::setParent(DebugMetadata *parent)
+{
+ parent_ = parent;
+
+ if (!parent_)
+ return;
+
+ if (!cache_.empty())
+ LOG(DebugControls, Error)
+ << "Controls were recorded before setting a parent."
+ << " These are dropped.";
+
+ cache_.clear();
+}
+
+/**
+ * \fn DebugMetadata::moveEntries()
+ * \brief Move all cached entries into control list \a list
+ * \param[in] list The control list
+ *
+ * This function moves all entries into the list specified by \a list. Duplicate
+ * entries in \a list get overwritten.
+ */
+void DebugMetadata::moveEntries(ControlList &list)
+{
+ list.merge(std::move(cache_), ControlList::MergePolicy::OverwriteExisting);
+ cache_.clear();
+}
+
+/**
+ * \fn DebugMetadata::set(const Control<T> &ctrl, const V &value)
+ * \brief Set the value of \a ctrl to \a value
+ * \param[in] ctrl The control to set
+ * \param[in] value The control value
+ *
+ * If a parent is set, the value gets passed there unconditionally. Otherwise it
+ * gets cached if the instance is enabled or dropped silently when disabled.
+ *
+ * \sa enable()
+ */
+
+/**
+ * \fn DebugMetadata::set(unsigned int id, const ControlValue &value)
+ * \brief Set the value of control \a id to \a value
+ * \param[in] id The id of the control
+ * \param[in] value The control value
+ *
+ * If a parent is set, the value gets passed there unconditionally. Otherwise it
+ * gets cached if the instance is enabled or dropped silently when disabled.
+ *
+ * \sa enable()
+ */
+void DebugMetadata::set(unsigned int id, const ControlValue &value)
+{
+ if (parent_) {
+ parent_->set(id, value);
+ return;
+ }
+
+ if (!enabled_)
+ return;
+
+ cache_.set(id, value);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/delayed_controls.cpp b/src/libcamera/delayed_controls.cpp
index 777441e8..94d0a575 100644
--- a/src/libcamera/delayed_controls.cpp
+++ b/src/libcamera/delayed_controls.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * delayed_controls.h - Helper to deal with controls that take effect with a delay
+ * Helper to deal with controls that take effect with a delay
*/
#include "libcamera/internal/delayed_controls.h"
diff --git a/src/libcamera/device_enumerator.cpp b/src/libcamera/device_enumerator.cpp
index 42b5ba6c..ae17862f 100644
--- a/src/libcamera/device_enumerator.cpp
+++ b/src/libcamera/device_enumerator.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * device_enumerator.cpp - Enumeration and matching
+ * Enumeration and matching
*/
#include "libcamera/internal/device_enumerator.h"
@@ -56,7 +56,7 @@ LOG_DEFINE_CATEGORY(DeviceEnumerator)
* names can be added as match criteria.
*
* Pipeline handlers are recommended to add entities to DeviceMatch as
- * appropriare to ensure that the media device they need can be uniquely
+ * appropriate to ensure that the media device they need can be uniquely
* identified. This is useful when the corresponding kernel driver can produce
* different graphs, for instance as a result of different driver versions or
* hardware configurations, and not all those graphs are suitable for a pipeline
diff --git a/src/libcamera/device_enumerator_sysfs.cpp b/src/libcamera/device_enumerator_sysfs.cpp
index 686bb809..7866885c 100644
--- a/src/libcamera/device_enumerator_sysfs.cpp
+++ b/src/libcamera/device_enumerator_sysfs.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * device_enumerator_sysfs.cpp - sysfs-based device enumerator
+ * sysfs-based device enumerator
*/
#include "libcamera/internal/device_enumerator_sysfs.h"
@@ -33,7 +33,7 @@ int DeviceEnumeratorSysfs::init()
int DeviceEnumeratorSysfs::enumerate()
{
struct dirent *ent;
- DIR *dir;
+ DIR *dir = nullptr;
static const char * const sysfs_dirs[] = {
"/sys/subsystem/media/devices",
diff --git a/src/libcamera/device_enumerator_udev.cpp b/src/libcamera/device_enumerator_udev.cpp
index 0abc1248..4e20a3cc 100644
--- a/src/libcamera/device_enumerator_udev.cpp
+++ b/src/libcamera/device_enumerator_udev.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018-2019, Google Inc.
*
- * device_enumerator_udev.cpp - udev-based device enumerator
+ * udev-based device enumerator
*/
#include "libcamera/internal/device_enumerator_udev.h"
@@ -332,6 +332,14 @@ int DeviceEnumeratorUdev::addV4L2Device(dev_t devnum)
void DeviceEnumeratorUdev::udevNotify()
{
struct udev_device *dev = udev_monitor_receive_device(monitor_);
+ if (!dev) {
+ int err = errno;
+ LOG(DeviceEnumerator, Warning)
+ << "Ignoring notfication received without a device: "
+ << strerror(err);
+ return;
+ }
+
std::string_view action(udev_device_get_action(dev));
std::string_view deviceNode(udev_device_get_devnode(dev));
diff --git a/src/libcamera/dma_buf_allocator.cpp b/src/libcamera/dma_buf_allocator.cpp
new file mode 100644
index 00000000..d8c62dd6
--- /dev/null
+++ b/src/libcamera/dma_buf_allocator.cpp
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper class for dma-buf allocations.
+ */
+
+#include "libcamera/internal/dma_buf_allocator.h"
+
+#include <array>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/udmabuf.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/memfd.h>
+#include <libcamera/base/shared_fd.h>
+
+#include <libcamera/framebuffer.h>
+
+/**
+ * \file dma_buf_allocator.cpp
+ * \brief dma-buf allocator
+ */
+
+namespace libcamera {
+
+#ifndef __DOXYGEN__
+struct DmaBufAllocatorInfo {
+ DmaBufAllocator::DmaBufAllocatorFlag type;
+ const char *deviceNodeName;
+};
+#endif
+
+static constexpr std::array<DmaBufAllocatorInfo, 4> providerInfos = { {
+ /*
+ * /dev/dma_heap/linux,cma is the CMA dma-heap. When the cma heap size is
+ * specified on the kernel command line, this gets renamed to "reserved".
+ */
+ { DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap, "/dev/dma_heap/linux,cma" },
+ { DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap, "/dev/dma_heap/reserved" },
+ { DmaBufAllocator::DmaBufAllocatorFlag::SystemHeap, "/dev/dma_heap/system" },
+ { DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf, "/dev/udmabuf" },
+} };
+
+LOG_DEFINE_CATEGORY(DmaBufAllocator)
+
+/**
+ * \class DmaBufAllocator
+ * \brief Helper class for dma-buf allocations
+ *
+ * This class wraps a userspace dma-buf provider selected at construction time,
+ * and exposes functions to allocate dma-buffers from this provider.
+ *
+ * Different providers may provide dma-buffers with different properties for
+ * the underlying memory. Which providers are acceptable is specified through
+ * the type argument passed to the DmaBufAllocator() constructor.
+ */
+
+/**
+ * \enum DmaBufAllocator::DmaBufAllocatorFlag
+ * \brief Type of the dma-buf provider
+ * \var DmaBufAllocator::CmaHeap
+ * \brief Allocate from a CMA dma-heap, providing physically-contiguous memory
+ * \var DmaBufAllocator::SystemHeap
+ * \brief Allocate from the system dma-heap, using the page allocator
+ * \var DmaBufAllocator::UDmaBuf
+ * \brief Allocate using a memfd + /dev/udmabuf
+ */
+
+/**
+ * \typedef DmaBufAllocator::DmaBufAllocatorFlags
+ * \brief A bitwise combination of DmaBufAllocator::DmaBufAllocatorFlag values
+ */
+
+/**
+ * \brief Construct a DmaBufAllocator of a given type
+ * \param[in] type The type(s) of the dma-buf providers to allocate from
+ *
+ * The dma-buf provider type is selected with the \a type parameter, which
+ * defaults to the CMA heap. If no provider of the given type can be accessed,
+ * the constructed DmaBufAllocator instance is invalid as indicated by
+ * the isValid() function.
+ *
+ * Multiple types can be selected by combining type flags, in which case
+ * the constructed DmaBufAllocator will match one of the types. If multiple
+ * requested types can work on the system, which provider is used is undefined.
+ */
+DmaBufAllocator::DmaBufAllocator(DmaBufAllocatorFlags type)
+{
+ for (const auto &info : providerInfos) {
+ if (!(type & info.type))
+ continue;
+
+ int ret = ::open(info.deviceNodeName, O_RDWR | O_CLOEXEC, 0);
+ if (ret < 0) {
+ ret = errno;
+ LOG(DmaBufAllocator, Debug)
+ << "Failed to open " << info.deviceNodeName << ": "
+ << strerror(ret);
+ continue;
+ }
+
+ LOG(DmaBufAllocator, Debug) << "Using " << info.deviceNodeName;
+ providerHandle_ = UniqueFD(ret);
+ type_ = info.type;
+ break;
+ }
+
+ if (!providerHandle_.isValid())
+ LOG(DmaBufAllocator, Error) << "Could not open any dma-buf provider";
+}
+
+/**
+ * \brief Destroy the DmaBufAllocator instance
+ */
+DmaBufAllocator::~DmaBufAllocator() = default;
+
+/**
+ * \fn DmaBufAllocator::isValid()
+ * \brief Check if the DmaBufAllocator instance is valid
+ * \return True if the DmaBufAllocator is valid, false otherwise
+ */
+UniqueFD DmaBufAllocator::allocFromUDmaBuf(const char *name, std::size_t size)
+{
+ /* Size must be a multiple of the page size. Round it up. */
+ std::size_t pageMask = sysconf(_SC_PAGESIZE) - 1;
+ size = (size + pageMask) & ~pageMask;
+
+ /* udmabuf dma-buffers *must* have the F_SEAL_SHRINK seal. */
+ UniqueFD memfd = MemFd::create(name, size, MemFd::Seal::Shrink);
+ if (!memfd.isValid())
+ return {};
+
+ struct udmabuf_create create;
+
+ create.memfd = memfd.get();
+ create.flags = UDMABUF_FLAGS_CLOEXEC;
+ create.offset = 0;
+ create.size = size;
+
+ int ret = ::ioctl(providerHandle_.get(), UDMABUF_CREATE, &create);
+ if (ret < 0) {
+ ret = errno;
+ LOG(DmaBufAllocator, Error)
+ << "Failed to create dma buf for " << name
+ << ": " << strerror(ret);
+ return {};
+ }
+
+ /* The underlying memfd is kept as as a reference in the kernel. */
+ return UniqueFD(ret);
+}
+
+UniqueFD DmaBufAllocator::allocFromHeap(const char *name, std::size_t size)
+{
+ struct dma_heap_allocation_data alloc = {};
+ int ret;
+
+ alloc.len = size;
+ alloc.fd_flags = O_CLOEXEC | O_RDWR;
+
+ ret = ::ioctl(providerHandle_.get(), DMA_HEAP_IOCTL_ALLOC, &alloc);
+ if (ret < 0) {
+ LOG(DmaBufAllocator, Error)
+ << "dma-heap allocation failure for " << name;
+ return {};
+ }
+
+ UniqueFD allocFd(alloc.fd);
+ ret = ::ioctl(allocFd.get(), DMA_BUF_SET_NAME, name);
+ if (ret < 0) {
+ LOG(DmaBufAllocator, Error)
+ << "dma-heap naming failure for " << name;
+ return {};
+ }
+
+ return allocFd;
+}
+
+/**
+ * \brief Allocate a dma-buf from the DmaBufAllocator
+ * \param [in] name The name to set for the allocated buffer
+ * \param [in] size The size of the buffer to allocate
+ *
+ * Allocates a dma-buf with read/write access.
+ *
+ * If the allocation fails, return an invalid UniqueFD.
+ *
+ * \return The UniqueFD of the allocated buffer
+ */
+UniqueFD DmaBufAllocator::alloc(const char *name, std::size_t size)
+{
+ if (!name)
+ return {};
+
+ if (type_ == DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf)
+ return allocFromUDmaBuf(name, size);
+ else
+ return allocFromHeap(name, size);
+}
+
+/**
+ * \brief Allocate and export buffers from the DmaBufAllocator
+ * \param[in] count The number of requested FrameBuffers
+ * \param[in] planeSizes The sizes of planes in each FrameBuffer
+ * \param[out] buffers Array of buffers successfully allocated
+ *
+ * Planes in a FrameBuffer are allocated with a single dma buf.
+ * \todo Add the option to allocate each plane with a dma buf respectively.
+ *
+ * \return The number of allocated buffers on success or a negative error code
+ * otherwise
+ */
+int DmaBufAllocator::exportBuffers(unsigned int count,
+ const std::vector<unsigned int> &planeSizes,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ for (unsigned int i = 0; i < count; ++i) {
+ std::unique_ptr<FrameBuffer> buffer =
+ createBuffer("frame-" + std::to_string(i), planeSizes);
+ if (!buffer) {
+ LOG(DmaBufAllocator, Error) << "Unable to create buffer";
+
+ buffers->clear();
+ return -EINVAL;
+ }
+
+ buffers->push_back(std::move(buffer));
+ }
+
+ return count;
+}
+
+std::unique_ptr<FrameBuffer>
+DmaBufAllocator::createBuffer(std::string name,
+ const std::vector<unsigned int> &planeSizes)
+{
+ std::vector<FrameBuffer::Plane> planes;
+
+ unsigned int frameSize = 0, offset = 0;
+ for (auto planeSize : planeSizes)
+ frameSize += planeSize;
+
+ SharedFD fd(alloc(name.c_str(), frameSize));
+ if (!fd.isValid())
+ return nullptr;
+
+ for (auto planeSize : planeSizes) {
+ planes.emplace_back(FrameBuffer::Plane{ fd, offset, planeSize });
+ offset += planeSize;
+ }
+
+ return std::make_unique<FrameBuffer>(planes);
+}
+
+/**
+ * \class DmaSyncer
+ * \brief Helper class for dma-buf's synchronization
+ *
+ * This class wraps a userspace dma-buf's synchronization process with an
+ * object's lifetime.
+ *
+ * It's used when the user needs to access a dma-buf with CPU, mostly mapped
+ * with MappedFrameBuffer, so that the buffer is synchronized between CPU and
+ * ISP.
+ */
+
+/**
+ * \enum DmaSyncer::SyncType
+ * \brief Read and/or write access via the CPU map
+ * \var DmaSyncer::Read
+ * \brief Indicates that the mapped dma-buf will be read by the client via the
+ * CPU map
+ * \var DmaSyncer::Write
+ * \brief Indicates that the mapped dm-buf will be written by the client via the
+ * CPU map
+ * \var DmaSyncer::ReadWrite
+ * \brief Indicates that the mapped dma-buf will be read and written by the
+ * client via the CPU map
+ */
+
+/**
+ * \brief Construct a DmaSyncer with a dma-buf's fd and the access type
+ * \param[in] fd The dma-buf's file descriptor to synchronize
+ * \param[in] type Read and/or write access via the CPU map
+ */
+DmaSyncer::DmaSyncer(SharedFD fd, SyncType type)
+ : fd_(fd)
+{
+ switch (type) {
+ case SyncType::Read:
+ flags_ = DMA_BUF_SYNC_READ;
+ break;
+ case SyncType::Write:
+ flags_ = DMA_BUF_SYNC_WRITE;
+ break;
+ case SyncType::ReadWrite:
+ flags_ = DMA_BUF_SYNC_RW;
+ break;
+ }
+
+ sync(DMA_BUF_SYNC_START);
+}
+
+/**
+ * \fn DmaSyncer::DmaSyncer(DmaSyncer &&other);
+ * \param[in] other The other instance
+ * \brief Enable move on class DmaSyncer
+ */
+
+/**
+ * \fn DmaSyncer::operator=(DmaSyncer &&other);
+ * \param[in] other The other instance
+ * \brief Enable move on class DmaSyncer
+ */
+
+DmaSyncer::~DmaSyncer()
+{
+ /*
+ * DmaSyncer might be moved and left with an empty SharedFD.
+ * Avoid syncing with an invalid file descriptor in this case.
+ */
+ if (fd_.isValid())
+ sync(DMA_BUF_SYNC_END);
+}
+
+void DmaSyncer::sync(uint64_t step)
+{
+ struct dma_buf_sync sync = {
+ .flags = flags_ | step
+ };
+
+ int ret;
+ do {
+ ret = ioctl(fd_.get(), DMA_BUF_IOCTL_SYNC, &sync);
+ } while (ret && (errno == EINTR || errno == EAGAIN));
+
+ if (ret) {
+ ret = errno;
+ LOG(DmaBufAllocator, Error)
+ << "Unable to sync dma fd: " << fd_.get()
+ << ", err: " << strerror(ret)
+ << ", flags: " << sync.flags;
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/fence.cpp b/src/libcamera/fence.cpp
index 7b784778..73299b40 100644
--- a/src/libcamera/fence.cpp
+++ b/src/libcamera/fence.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * fence.cpp - Synchronization fence
+ * Synchronization fence
*/
#include "libcamera/fence.h"
@@ -11,7 +11,7 @@ namespace libcamera {
/**
*
- * \file libcamera/fence.h
+ * \file fence.h
* \brief Definition of the Fence class
*/
diff --git a/src/libcamera/formats.cpp b/src/libcamera/formats.cpp
index 955c3fba..bfcdfc08 100644
--- a/src/libcamera/formats.cpp
+++ b/src/libcamera/formats.cpp
@@ -2,13 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * formats.cpp - libcamera image formats
+ * libcamera image formats
*/
#include "libcamera/internal/formats.h"
-#include <algorithm>
-#include <errno.h>
+#include <map>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
@@ -16,7 +15,7 @@
#include <libcamera/formats.h>
/**
- * \file internal/formats.h
+ * \file libcamera/internal/formats.h
* \brief Types and helper functions to handle libcamera image formats
*/
@@ -158,7 +157,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
.pixelsPerGroup = 1,
- .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
} },
{ formats::RGB565_BE, {
.name = "RGB565_BE",
@@ -168,7 +167,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
.packed = false,
.pixelsPerGroup = 1,
- .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
} },
{ formats::BGR888, {
.name = "BGR888",
@@ -270,6 +269,26 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.pixelsPerGroup = 1,
.planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
} },
+ { formats::BGR161616, {
+ .name = "BGR161616",
+ .format = formats::BGR161616,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB48), },
+ .bitsPerPixel = 48,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGB161616, {
+ .name = "RGB161616",
+ .format = formats::RGB161616,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGR48), },
+ .bitsPerPixel = 48,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
/* YUV packed formats. */
{ formats::YUYV, {
@@ -507,6 +526,16 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.pixelsPerGroup = 4,
.planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
} },
+ { formats::R12_CSI2P, {
+ .name = "R12_CSI2P",
+ .format = formats::R12_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y12P), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
{ formats::R12, {
.name = "R12",
.format = formats::R12,
@@ -527,6 +556,16 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.pixelsPerGroup = 1,
.planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
} },
+ { formats::MONO_PISP_COMP1, {
+ .name = "MONO_PISP_COMP1",
+ .format = formats::MONO_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = true,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
/* Bayer formats. */
{ formats::SBGGR8, {
@@ -890,7 +929,46 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
.pixelsPerGroup = 25,
.planes = {{ { 32, 1 }, { 0, 0 }, { 0, 0 } }},
} },
-
+ { formats::BGGR_PISP_COMP1, {
+ .name = "BGGR_PISP_COMP1",
+ .format = formats::BGGR_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::GBRG_PISP_COMP1, {
+ .name = "GBRG_PISP_COMP1",
+ .format = formats::GBRG_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::GRBG_PISP_COMP1, {
+ .name = "GRBG_PISP_COMP1",
+ .format = formats::GRBG_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGGB_PISP_COMP1, {
+ .name = "RGGB_PISP_COMP1",
+ .format = formats::RGGB_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
/* Compressed formats. */
{ formats::MJPEG, {
.name = "MJPEG",
diff --git a/src/libcamera/formats.yaml b/src/libcamera/formats.yaml
index d8a37992..2d54d391 100644
--- a/src/libcamera/formats.yaml
+++ b/src/libcamera/formats.yaml
@@ -43,6 +43,11 @@ formats:
- BGRA8888:
fourcc: DRM_FORMAT_BGRA8888
+ - RGB161616:
+ fourcc: DRM_FORMAT_RGB161616
+ - BGR161616:
+ fourcc: DRM_FORMAT_BGR161616
+
- YUYV:
fourcc: DRM_FORMAT_YUYV
- YVYU:
@@ -133,6 +138,9 @@ formats:
- R10_CSI2P:
fourcc: DRM_FORMAT_R10
mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - R12_CSI2P:
+ fourcc: DRM_FORMAT_R12
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
- SRGGB10_CSI2P:
fourcc: DRM_FORMAT_SRGGB10
@@ -185,4 +193,20 @@ formats:
- SBGGR10_IPU3:
fourcc: DRM_FORMAT_SBGGR10
mod: IPU3_FORMAT_MOD_PACKED
+
+ - RGGB_PISP_COMP1:
+ fourcc: DRM_FORMAT_SRGGB16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - GRBG_PISP_COMP1:
+ fourcc: DRM_FORMAT_SGRBG16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - GBRG_PISP_COMP1:
+ fourcc: DRM_FORMAT_SGBRG16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - BGGR_PISP_COMP1:
+ fourcc: DRM_FORMAT_SBGGR16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - MONO_PISP_COMP1:
+ fourcc: DRM_FORMAT_R16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
...
diff --git a/src/libcamera/framebuffer.cpp b/src/libcamera/framebuffer.cpp
index 5a7f3c0b..826848f7 100644
--- a/src/libcamera/framebuffer.cpp
+++ b/src/libcamera/framebuffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * framebuffer.cpp - Frame buffer handling
+ * Frame buffer handling
*/
#include <libcamera/framebuffer.h>
@@ -16,7 +16,10 @@
/**
* \file libcamera/framebuffer.h
* \brief Frame buffer handling
- *
+ */
+
+/**
+ * \internal
* \file libcamera/internal/framebuffer.h
* \brief Internal frame buffer handling support
*/
@@ -104,6 +107,7 @@ LOG_DEFINE_CATEGORY(Buffer)
* \return The array of per-plane metadata
*/
+#ifndef __DOXYGEN_PUBLIC__
/**
* \class FrameBuffer::Private
* \brief Base class for FrameBuffer private data
@@ -206,6 +210,7 @@ FrameBuffer::Private::~Private()
* \brief Retrieve the dynamic metadata
* \return Dynamic metadata for the frame contained in the buffer
*/
+#endif /* __DOXYGEN_PUBLIC__ */
/**
* \class FrameBuffer
diff --git a/src/libcamera/framebuffer_allocator.cpp b/src/libcamera/framebuffer_allocator.cpp
index dbd0db19..3d53bde2 100644
--- a/src/libcamera/framebuffer_allocator.cpp
+++ b/src/libcamera/framebuffer_allocator.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * framebuffer_allocator.cpp - FrameBuffer allocator
+ * FrameBuffer allocator
*/
#include <libcamera/framebuffer_allocator.h>
diff --git a/src/libcamera/geometry.cpp b/src/libcamera/geometry.cpp
index 8d85b758..81cc8cd5 100644
--- a/src/libcamera/geometry.cpp
+++ b/src/libcamera/geometry.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * geometry.cpp - Geometry-related structures
+ * Geometry-related structures
*/
#include <libcamera/geometry.h>
@@ -594,6 +594,8 @@ std::ostream &operator<<(std::ostream &out, const SizeRange &sr)
*
* Rectangles are used to identify an area of an image. They are specified by
* the coordinates of top-left corner and their horizontal and vertical size.
+ * By convention, the top-left corner is defined as the corner with the lowest
+ * x and y coordinates, regardless of the origin and direction of the axes.
*
* The measure unit of the rectangle coordinates and size, as well as the
* reference point from which the Rectangle::x and Rectangle::y displacements
@@ -611,6 +613,8 @@ std::ostream &operator<<(std::ostream &out, const SizeRange &sr)
* \param[in] x The horizontal coordinate of the top-left corner
* \param[in] y The vertical coordinate of the top-left corner
* \param[in] size The size
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
*/
/**
@@ -620,6 +624,8 @@ std::ostream &operator<<(std::ostream &out, const SizeRange &sr)
* \param[in] y The vertical coordinate of the top-left corner
* \param[in] width The width
* \param[in] height The height
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
*/
/**
@@ -630,13 +636,24 @@ std::ostream &operator<<(std::ostream &out, const SizeRange &sr)
*/
/**
+ * \fn Rectangle::Rectangle(const Point &point1, const Point &point2)
+ * \brief Construct a Rectangle from two opposite corners
+ * \param[in] point1 One of corners of the rectangle
+ * \param[in] point2 The opposite corner of \a point1
+ */
+
+/**
* \var Rectangle::x
* \brief The horizontal coordinate of the rectangle's top-left corner
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
*/
/**
* \var Rectangle::y
* \brief The vertical coordinate of the rectangle's top-left corner
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
*/
/**
@@ -685,6 +702,9 @@ Point Rectangle::center() const
/**
* \fn Point Rectangle::topLeft() const
* \brief Retrieve the coordinates of the top left corner of this Rectangle
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
+ *
* \return The Rectangle's top left corner
*/
@@ -818,6 +838,55 @@ Rectangle Rectangle::translatedBy(const Point &point) const
}
/**
+ * \brief Transform a Rectangle from one reference rectangle to another
+ * \param[in] source The \a source reference rectangle
+ * \param[in] destination The \a destination reference rectangle
+ *
+ * The \a source and \a destination parameters describe two rectangles defined
+ * in different reference systems. The Rectangle is translated from the source
+ * reference system into the destination reference system.
+ *
+ * The typical use case for this function is to translate a selection rectangle
+ * specified in a reference system, in example the sensor's pixel array, into
+ * the same rectangle re-scaled and translated into a different reference
+ * system, in example the output frame on which the selection rectangle is
+ * applied to.
+ *
+ * For example, consider a sensor with a resolution of 4040x2360 pixels and a
+ * assume a rectangle of (100, 100)/3840x2160 (sensorFrame) in sensor
+ * coordinates is mapped to a rectangle (0,0)/(1920,1080) (displayFrame) in
+ * display coordinates. This function can be used to transform an arbitrary
+ * rectangle from display coordinates to sensor coordinates or vice versa:
+ *
+ * \code{.cpp}
+ * Rectangle sensorReference(100, 100, 3840, 2160);
+ * Rectangle displayReference(0, 0, 1920, 1080);
+ *
+ * // Bottom right quarter in sensor coordinates
+ * Rectangle sensorRect(2020, 100, 1920, 1080);
+ * displayRect = sensorRect.transformedBetween(sensorReference, displayReference);
+ * // displayRect is now (960, 540)/960x540
+ *
+ * // Transformation back to sensor coordinates
+ * sensorRect = displayRect.transformedBetween(displayReference, sensorReference);
+ * \endcode
+ */
+Rectangle Rectangle::transformedBetween(const Rectangle &source,
+ const Rectangle &destination) const
+{
+ Rectangle r;
+ double sx = static_cast<double>(destination.width) / source.width;
+ double sy = static_cast<double>(destination.height) / source.height;
+
+ r.x = static_cast<int>((x - source.x) * sx) + destination.x;
+ r.y = static_cast<int>((y - source.y) * sy) + destination.y;
+ r.width = static_cast<int>(width * sx);
+ r.height = static_cast<int>(height * sy);
+
+ return r;
+}
+
+/**
* \brief Compare rectangles for equality
* \return True if the two rectangles are equal, false otherwise
*/
diff --git a/src/libcamera/ipa_controls.cpp b/src/libcamera/ipa_controls.cpp
index 870a443b..12d92ebe 100644
--- a/src/libcamera/ipa_controls.cpp
+++ b/src/libcamera/ipa_controls.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_controls.cpp - IPA control handling
+ * IPA control handling
*/
#include <libcamera/ipa/ipa_controls.h>
@@ -220,6 +220,10 @@ static_assert(sizeof(ipa_control_value_entry) == 16,
* \var ipa_control_info_entry::offset
* The offset in bytes from the beginning of the data section to the control
* info data (shall be a multiple of 8 bytes)
+ * \var ipa_control_info_entry::direction
+ * The directions in which the control is allowed to be sent. This is a flags
+ * value, where 0x1 signifies input (as controls), and 0x2 signifies output (as
+ * metadata). \sa ControlId::Direction
* \var ipa_control_info_entry::padding
* Padding bytes (shall be set to 0)
*/
diff --git a/src/libcamera/ipa_data_serializer.cpp b/src/libcamera/ipa_data_serializer.cpp
index 0a259305..2189a246 100644
--- a/src/libcamera/ipa_data_serializer.cpp
+++ b/src/libcamera/ipa_data_serializer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipa_data_serializer.cpp - Image Processing Algorithm data serializer
+ * Image Processing Algorithm data serializer
*/
#include "libcamera/internal/ipa_data_serializer.h"
@@ -11,6 +11,8 @@
#include <libcamera/base/log.h>
+#include "libcamera/internal/byte_stream_buffer.h"
+
/**
* \file ipa_data_serializer.h
* \brief IPA Data Serializer
@@ -537,7 +539,6 @@ IPADataSerializer<SharedFD>::serialize(const SharedFD &data,
if (data.isValid())
fdVec.push_back(data);
-
return { dataVec, fdVec };
}
@@ -604,7 +605,7 @@ IPADataSerializer<FrameBuffer::Plane>::deserialize(std::vector<uint8_t>::const_i
FrameBuffer::Plane ret;
ret.fd = IPADataSerializer<SharedFD>::deserialize(dataBegin, dataBegin + 4,
- fdsBegin, fdsBegin + 1);
+ fdsBegin, fdsBegin + 1);
ret.offset = readPOD<uint32_t>(dataBegin, 4, dataEnd);
ret.length = readPOD<uint32_t>(dataBegin, 8, dataEnd);
diff --git a/src/libcamera/ipa_interface.cpp b/src/libcamera/ipa_interface.cpp
index 8ea6cbee..a9dc54ad 100644
--- a/src/libcamera/ipa_interface.cpp
+++ b/src/libcamera/ipa_interface.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_interface.cpp - Image Processing Algorithm interface
+ * Image Processing Algorithm interface
*/
#include <libcamera/ipa/ipa_interface.h>
diff --git a/src/libcamera/ipa_manager.cpp b/src/libcamera/ipa_manager.cpp
index 7a4515d9..cfc24d38 100644
--- a/src/libcamera/ipa_manager.cpp
+++ b/src/libcamera/ipa_manager.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_manager.cpp - Image Processing Algorithm module manager
+ * Image Processing Algorithm module manager
*/
#include "libcamera/internal/ipa_manager.h"
@@ -95,8 +95,6 @@ LOG_DEFINE_CATEGORY(IPAManager)
* IPC.
*/
-IPAManager *IPAManager::self_ = nullptr;
-
/**
* \brief Construct an IPAManager instance
*
@@ -105,10 +103,6 @@ IPAManager *IPAManager::self_ = nullptr;
*/
IPAManager::IPAManager()
{
- if (self_)
- LOG(IPAManager, Fatal)
- << "Multiple IPAManager objects are not allowed";
-
#if HAVE_IPA_PUBKEY
if (!pubKey_.isValid())
LOG(IPAManager, Warning) << "Public key not valid";
@@ -153,16 +147,12 @@ IPAManager::IPAManager()
if (!ipaCount)
LOG(IPAManager, Warning)
<< "No IPA found in '" IPA_MODULE_DIR "'";
-
- self_ = this;
}
IPAManager::~IPAManager()
{
for (IPAModule *module : modules_)
delete module;
-
- self_ = nullptr;
}
/**
diff --git a/src/libcamera/ipa_module.cpp b/src/libcamera/ipa_module.cpp
index f2dd87e5..9ca74be6 100644
--- a/src/libcamera/ipa_module.cpp
+++ b/src/libcamera/ipa_module.cpp
@@ -2,13 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_module.cpp - Image Processing Algorithm module
+ * Image Processing Algorithm module
*/
#include "libcamera/internal/ipa_module.h"
#include <algorithm>
-#include <array>
#include <ctype.h>
#include <dlfcn.h>
#include <elf.h>
@@ -51,8 +50,8 @@ typename std::remove_extent_t<T> *elfPointer(Span<const uint8_t> elf,
if (size > elf.size() || size < objSize)
return nullptr;
- return reinterpret_cast<typename std::remove_extent_t<T> *>
- (reinterpret_cast<const char *>(elf.data()) + offset);
+ return reinterpret_cast<typename std::remove_extent_t<T> *>(
+ reinterpret_cast<const char *>(elf.data()) + offset);
}
template<typename T>
diff --git a/src/libcamera/ipa_proxy.cpp b/src/libcamera/ipa_proxy.cpp
index 3f2cc6b8..9907b961 100644
--- a/src/libcamera/ipa_proxy.cpp
+++ b/src/libcamera/ipa_proxy.cpp
@@ -2,12 +2,11 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_proxy.cpp - Image Processing Algorithm proxy
+ * Image Processing Algorithm proxy
*/
#include "libcamera/internal/ipa_proxy.h"
-#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
@@ -72,6 +71,7 @@ IPAProxy::~IPAProxy()
/**
* \brief Retrieve the absolute path to an IPA configuration file
* \param[in] name The configuration file name
+ * \param[in] fallbackName The name of a fallback configuration file
*
* This function locates the configuration file for an IPA and returns its
* absolute path. It searches the following directories, in order:
@@ -89,21 +89,42 @@ IPAProxy::~IPAProxy()
* named after the IPA module name, as reported in IPAModuleInfo::name, and for
* a file named \a name within that directory. The \a name is IPA-specific.
*
+ * If the file named \a name is not found and \a fallbackName is non-empty then
+ * the whole search is repeated for \a fallbackName.
+ *
* \return The full path to the IPA configuration file, or an empty string if
* no configuration file can be found
*/
-std::string IPAProxy::configurationFile(const std::string &name) const
+std::string IPAProxy::configurationFile(const std::string &name,
+ const std::string &fallbackName) const
{
- struct stat statbuf;
- int ret;
-
/*
* The IPA module name can be used as-is to build directory names as it
* has been validated when loading the module.
*/
- std::string ipaName = ipam_->info().name;
+ const std::string ipaName = ipam_->info().name;
+
+ /*
+ * Start with any user override through the module-specific environment
+ * variable. Use the name of the IPA module up to the first '/' to
+ * construct the variable name.
+ */
+ std::string ipaEnvName = ipaName.substr(0, ipaName.find('/'));
+ std::transform(ipaEnvName.begin(), ipaEnvName.end(), ipaEnvName.begin(),
+ [](unsigned char c) { return std::toupper(c); });
+ ipaEnvName = "LIBCAMERA_" + ipaEnvName + "_TUNING_FILE";
+
+ char const *configFromEnv = utils::secure_getenv(ipaEnvName.c_str());
+ if (configFromEnv && *configFromEnv != '\0')
+ return { configFromEnv };
+
+ struct stat statbuf;
+ int ret;
- /* Check the environment variable first. */
+ /*
+ * Check the directory pointed to by the IPA config path environment
+ * variable next.
+ */
const char *confPaths = utils::secure_getenv("LIBCAMERA_IPA_CONFIG_PATH");
if (confPaths) {
for (const auto &dir : utils::split(confPaths, ":")) {
@@ -146,11 +167,18 @@ std::string IPAProxy::configurationFile(const std::string &name) const
}
}
- LOG(IPAProxy, Error)
- << "Configuration file '" << name
- << "' not found for IPA module '" << ipaName << "'";
+ if (fallbackName.empty()) {
+ LOG(IPAProxy, Error)
+ << "Configuration file '" << name
+ << "' not found for IPA module '" << ipaName << "'";
+ return std::string();
+ }
- return std::string();
+ LOG(IPAProxy, Warning)
+ << "Configuration file '" << name
+ << "' not found for IPA module '" << ipaName
+ << "', falling back to '" << fallbackName << "'";
+ return configurationFile(fallbackName);
}
/**
diff --git a/src/libcamera/ipa_pub_key.cpp.in b/src/libcamera/ipa_pub_key.cpp.in
index 01e5333b..5d8c92c2 100644
--- a/src/libcamera/ipa_pub_key.cpp.in
+++ b/src/libcamera/ipa_pub_key.cpp.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
- * ipa_pub_key.cpp - IPA module signing public key
+ * IPA module signing public key
*
* This file is auto-generated. Do not edit.
*/
diff --git a/src/libcamera/ipc_pipe.cpp b/src/libcamera/ipc_pipe.cpp
index 31a0ca09..548299d0 100644
--- a/src/libcamera/ipc_pipe.cpp
+++ b/src/libcamera/ipc_pipe.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipc_pipe.cpp - Image Processing Algorithm IPC module for IPA proxies
+ * Image Processing Algorithm IPC module for IPA proxies
*/
#include "libcamera/internal/ipc_pipe.h"
diff --git a/src/libcamera/ipc_pipe_unixsocket.cpp b/src/libcamera/ipc_pipe_unixsocket.cpp
index da2cffc3..668ec73b 100644
--- a/src/libcamera/ipc_pipe_unixsocket.cpp
+++ b/src/libcamera/ipc_pipe_unixsocket.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipc_pipe_unixsocket.cpp - Image Processing Algorithm IPC module using unix socket
+ * Image Processing Algorithm IPC module using unix socket
*/
#include "libcamera/internal/ipc_pipe_unixsocket.h"
diff --git a/src/libcamera/ipc_unixsocket.cpp b/src/libcamera/ipc_unixsocket.cpp
index 1980d374..002053e3 100644
--- a/src/libcamera/ipc_unixsocket.cpp
+++ b/src/libcamera/ipc_unixsocket.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipc_unixsocket.cpp - IPC mechanism based on Unix sockets
+ * IPC mechanism based on Unix sockets
*/
#include "libcamera/internal/ipc_unixsocket.h"
@@ -12,6 +12,7 @@
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
+#include <vector>
#include <libcamera/base/event_notifier.h>
#include <libcamera/base/log.h>
@@ -247,10 +248,9 @@ int IPCUnixSocket::sendData(const void *buffer, size_t length,
iov[0].iov_base = const_cast<void *>(buffer);
iov[0].iov_len = length;
- char buf[CMSG_SPACE(num * sizeof(uint32_t))];
- memset(buf, 0, sizeof(buf));
+ std::vector<uint8_t> buf(CMSG_SPACE(num * sizeof(uint32_t)));
- struct cmsghdr *cmsg = (struct cmsghdr *)buf;
+ struct cmsghdr *cmsg = reinterpret_cast<struct cmsghdr *>(buf.data());
cmsg->cmsg_len = CMSG_LEN(num * sizeof(uint32_t));
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
@@ -283,10 +283,9 @@ int IPCUnixSocket::recvData(void *buffer, size_t length,
iov[0].iov_base = buffer;
iov[0].iov_len = length;
- char buf[CMSG_SPACE(num * sizeof(uint32_t))];
- memset(buf, 0, sizeof(buf));
+ std::vector<uint8_t> buf(CMSG_SPACE(num * sizeof(uint32_t)));
- struct cmsghdr *cmsg = (struct cmsghdr *)buf;
+ struct cmsghdr *cmsg = reinterpret_cast<struct cmsghdr *>(buf.data());
cmsg->cmsg_len = CMSG_LEN(num * sizeof(uint32_t));
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
diff --git a/src/libcamera/mapped_framebuffer.cpp b/src/libcamera/mapped_framebuffer.cpp
index 6860069b..f54bbf21 100644
--- a/src/libcamera/mapped_framebuffer.cpp
+++ b/src/libcamera/mapped_framebuffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * mapped_framebuffer.cpp - Mapped Framebuffer support
+ * Mapped Framebuffer support
*/
#include "libcamera/internal/mapped_framebuffer.h"
@@ -16,7 +16,7 @@
#include <libcamera/base/log.h>
/**
- * \file libcamera/internal/mapped_framebuffer.h
+ * \file mapped_framebuffer.h
* \brief Frame buffer memory mapping support
*/
@@ -72,7 +72,7 @@ MappedBuffer::MappedBuffer(MappedBuffer &&other)
/**
* \brief Move assignment operator, replace the mappings with those of \a other
-* \param[in] other The other MappedBuffer
+ * \param[in] other The other MappedBuffer
*
* Moving a MappedBuffer moves the mappings contained in the \a other to the new
* MappedBuffer and invalidates the \a other.
diff --git a/src/libcamera/matrix.cpp b/src/libcamera/matrix.cpp
new file mode 100644
index 00000000..e7e02722
--- /dev/null
+++ b/src/libcamera/matrix.cpp
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Matrix and related operations
+ */
+
+#include "libcamera/internal/matrix.h"
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file matrix.h
+ * \brief Matrix class
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Matrix)
+
+/**
+ * \class Matrix
+ * \brief Matrix class
+ * \tparam T Type of numerical values to be stored in the matrix
+ * \tparam Rows Number of rows in the matrix
+ * \tparam Cols Number of columns in the matrix
+ */
+
+/**
+ * \fn Matrix::Matrix()
+ * \brief Construct a zero matrix
+ */
+
+/**
+ * \fn Matrix::Matrix(const std::array<T, Rows * Cols> &data)
+ * \brief Construct a matrix from supplied data
+ * \param[in] data Data from which to construct a matrix
+ *
+ * \a data is a one-dimensional vector and will be turned into a matrix in
+ * row-major order. The size of \a data must be equal to the product of the
+ * number of rows and columns of the matrix (Rows x Cols).
+ */
+
+/**
+ * \fn Matrix::identity()
+ * \brief Construct an identity matrix
+ */
+
+/**
+ * \fn Matrix::toString()
+ * \brief Assemble and return a string describing the matrix
+ * \return A string describing the matrix
+ */
+
+/**
+ * \fn Matrix::data()
+ * \brief Access the matrix data as a linear array
+ *
+ * Access the contents of the matrix as a one-dimensional linear array of
+ * values in row-major order. The size of the array is equal to the product of
+ * the number of rows and columns of the matrix (Rows x Cols).
+ *
+ * \return A span referencing the matrix data as a linear array
+ */
+
+/**
+ * \fn Span<const T, Cols> Matrix::operator[](size_t i) const
+ * \brief Index to a row in the matrix
+ * \param[in] i Index of row to retrieve
+ *
+ * This operator[] returns a Span, which can then be indexed into again with
+ * another operator[], allowing a convenient m[i][j] to access elements of the
+ * matrix. Note that the lifetime of the Span returned by this first-level
+ * operator[] is bound to that of the Matrix itself, so it is not recommended
+ * to save the Span that is the result of this operator[].
+ *
+ * \return Row \a i from the matrix, as a Span
+ */
+
+/**
+ * \fn Matrix::operator[](size_t i)
+ * \copydoc Matrix::operator[](size_t i) const
+ */
+
+/**
+ * \fn Matrix<T, Rows, Cols> &Matrix::operator*=(U d)
+ * \brief Multiply the matrix by a scalar in-place
+ * \tparam U Type of the numerical scalar value
+ * \param d The scalar multiplier
+ * \return Product of this matrix and scalar \a d
+ */
+
+/**
+ * \fn Matrix::Matrix<U, Rows, Cols> operator*(T d, const Matrix<U, Rows, Cols> &m)
+ * \brief Multiply the matrix by a scalar
+ * \tparam T Type of the numerical scalar value
+ * \tparam U Type of numerical values in the matrix
+ * \tparam Rows Number of rows in the matrix
+ * \tparam Cols Number of columns in the matrix
+ * \param d The scalar multiplier
+ * \param m The matrix
+ * \return Product of scalar \a d and matrix \a m
+ */
+
+/**
+ * \fn Matrix::Matrix<U, Rows, Cols> operator*(const Matrix<U, Rows, Cols> &m, T d)
+ * \copydoc operator*(T d, const Matrix<U, Rows, Cols> &m)
+ */
+
+/**
+ * \fn Matrix<T, R1, C2> operator*(const Matrix<T, R1, C1> &m1, const Matrix<T, R2, C2> &m2)
+ * \brief Matrix multiplication
+ * \tparam T Type of numerical values in the matrices
+ * \tparam R1 Number of rows in the first matrix
+ * \tparam C1 Number of columns in the first matrix
+ * \tparam R2 Number of rows in the second matrix
+ * \tparam C2 Number of columns in the second matrix
+ * \param m1 Multiplicand matrix
+ * \param m2 Multiplier matrix
+ * \return Matrix product of matrices \a m1 and \a m2
+ */
+
+/**
+ * \fn Matrix<T, Rows, Cols> operator+(const Matrix<T, Rows, Cols> &m1, const Matrix<T, Rows, Cols> &m2)
+ * \brief Matrix addition
+ * \tparam T Type of numerical values in the matrices
+ * \tparam Rows Number of rows in the matrices
+ * \tparam Cols Number of columns in the matrices
+ * \param m1 Summand matrix
+ * \param m2 Summand matrix
+ * \return Matrix sum of matrices \a m1 and \a m2
+ */
+
+#ifndef __DOXYGEN__
+/*
+ * The YAML data shall be a list of numerical values. Its size shall be equal
+ * to the product of the number of rows and columns of the matrix (Rows x
+ * Cols). The values shall be stored in row-major order.
+ */
+bool matrixValidateYaml(const YamlObject &obj, unsigned int size)
+{
+ if (!obj.isList())
+ return false;
+
+ if (obj.size() != size) {
+ LOG(Matrix, Error)
+ << "Wrong number of values in matrix: expected "
+ << size << ", got " << obj.size();
+ return false;
+ }
+
+ return true;
+}
+#endif /* __DOXYGEN__ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/media_device.cpp b/src/libcamera/media_device.cpp
index 2949816b..d71dad74 100644
--- a/src/libcamera/media_device.cpp
+++ b/src/libcamera/media_device.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * media_device.cpp - Media device handler
+ * Media device handler
*/
#include "libcamera/internal/media_device.h"
@@ -818,20 +818,12 @@ int MediaDevice::setupLink(const MediaLink *link, unsigned int flags)
if (ret) {
ret = -errno;
LOG(MediaDevice, Error)
- << "Failed to setup link "
- << source->entity()->name() << "["
- << source->index() << "] -> "
- << sink->entity()->name() << "["
- << sink->index() << "]: "
+ << "Failed to setup link " << *link << ": "
<< strerror(-ret);
return ret;
}
- LOG(MediaDevice, Debug)
- << source->entity()->name() << "["
- << source->index() << "] -> "
- << sink->entity()->name() << "["
- << sink->index() << "]: " << flags;
+ LOG(MediaDevice, Debug) << *link << ": " << flags;
return 0;
}
diff --git a/src/libcamera/media_object.cpp b/src/libcamera/media_object.cpp
index c78f4758..3e3772a6 100644
--- a/src/libcamera/media_object.cpp
+++ b/src/libcamera/media_object.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * media_object.cpp - Media device objects: entities, pads and links
+ * Media device objects: entities, pads and links
*/
#include "libcamera/internal/media_object.h"
@@ -147,6 +147,31 @@ MediaLink::MediaLink(const struct media_v2_link *link, MediaPad *source,
}
/**
+ * \brief Generate a string representation of the MediaLink
+ * \return A string representing the MediaLink
+ */
+std::string MediaLink::toString() const
+{
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \brief Insert a text representation of a Link into an output stream
+ * \param[in] out The output stream
+ * \param[in] link The MediaLink
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const MediaLink &link)
+{
+ out << *link.source() << " -> " << *link.sink();
+
+ return out;
+}
+
+/**
* \fn MediaLink::source()
* \brief Retrieve the link's source pad
* \return The source pad at the origin of the link
@@ -236,6 +261,31 @@ void MediaPad::addLink(MediaLink *link)
}
/**
+ * \brief Generate a string representation of the MediaPad
+ * \return A string representing the MediaPad
+ */
+std::string MediaPad::toString() const
+{
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \brief Insert a text representation of a MediaPad into an output stream
+ * \param[in] out The output stream
+ * \param[in] pad The MediaPad
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const MediaPad &pad)
+{
+ out << "'" << pad.entity()->name() << "'[" << pad.index() << "]";
+
+ return out;
+}
+
+/**
* \class MediaEntity
* \brief The MediaEntity represents an entity in the media graph
*
diff --git a/src/libcamera/meson.build b/src/libcamera/meson.build
index 2e7b0c77..de22b8e6 100644
--- a/src/libcamera/meson.build
+++ b/src/libcamera/meson.build
@@ -1,25 +1,35 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources = files([
- 'bayer_format.cpp',
- 'byte_stream_buffer.cpp',
+libcamera_public_sources = files([
'camera.cpp',
- 'camera_controls.cpp',
- 'camera_lens.cpp',
'camera_manager.cpp',
'color_space.cpp',
'controls.cpp',
+ 'fence.cpp',
+ 'framebuffer.cpp',
+ 'framebuffer_allocator.cpp',
+ 'geometry.cpp',
+ 'orientation.cpp',
+ 'pixel_format.cpp',
+ 'request.cpp',
+ 'stream.cpp',
+ 'transform.cpp',
+])
+
+libcamera_internal_sources = files([
+ 'bayer_format.cpp',
+ 'byte_stream_buffer.cpp',
+ 'camera_controls.cpp',
+ 'camera_lens.cpp',
'control_serializer.cpp',
'control_validator.cpp',
'converter.cpp',
+ 'debug_controls.cpp',
'delayed_controls.cpp',
'device_enumerator.cpp',
'device_enumerator_sysfs.cpp',
- 'fence.cpp',
+ 'dma_buf_allocator.cpp',
'formats.cpp',
- 'framebuffer.cpp',
- 'framebuffer_allocator.cpp',
- 'geometry.cpp',
'ipa_controls.cpp',
'ipa_data_serializer.cpp',
'ipa_interface.cpp',
@@ -30,29 +40,23 @@ libcamera_sources = files([
'ipc_pipe_unixsocket.cpp',
'ipc_unixsocket.cpp',
'mapped_framebuffer.cpp',
+ 'matrix.cpp',
'media_device.cpp',
'media_object.cpp',
- 'orientation.cpp',
'pipeline_handler.cpp',
- 'pixel_format.cpp',
'process.cpp',
'pub_key.cpp',
- 'request.cpp',
+ 'shared_mem_object.cpp',
'source_paths.cpp',
- 'stream.cpp',
'sysfs.cpp',
- 'transform.cpp',
'v4l2_device.cpp',
'v4l2_pixelformat.cpp',
'v4l2_subdevice.cpp',
'v4l2_videodevice.cpp',
+ 'vector.cpp',
'yaml_parser.cpp',
])
-libcamera_sources += libcamera_public_headers
-libcamera_sources += libcamera_generated_ipa_headers
-libcamera_sources += libcamera_tracepoint_header
-
includes = [
libcamera_includes,
]
@@ -68,6 +72,7 @@ subdir('ipa')
subdir('pipeline')
subdir('proxy')
subdir('sensor')
+subdir('software_isp')
null_dep = dependency('', required : false)
@@ -101,14 +106,14 @@ endif
if liblttng.found()
tracing_enabled = true
config_h.set('HAVE_TRACING', 1)
- libcamera_sources += files(['tracepoints.cpp'])
+ libcamera_internal_sources += files(['tracepoints.cpp'])
else
tracing_enabled = false
endif
if libudev.found()
config_h.set('HAVE_LIBUDEV', 1)
- libcamera_sources += files([
+ libcamera_internal_sources += files([
'device_enumerator_udev.cpp',
])
endif
@@ -127,29 +132,33 @@ endif
control_sources = []
controls_mode_files = {
- 'controls' : controls_files,
- 'properties' : properties_files,
+ 'controls': [
+ controls_files,
+ 'control_ids.cpp',
+ ],
+ 'properties': [
+ properties_files,
+ 'property_ids.cpp',
+ ],
}
-foreach mode, input_files : controls_mode_files
- input_files = files(input_files)
-
- if mode == 'controls'
- template_file = files('control_ids.cpp.in')
- else
- template_file = files('property_ids.cpp.in')
- endif
+foreach mode, inout_files : controls_mode_files
+ input_files = inout_files[0]
+ output_file = inout_files[1]
+ template_file = files('control_ids.cpp.in')
ranges_file = files('control_ranges.yaml')
- control_sources += custom_target(mode + '_cpp',
+
+ control_sources += custom_target(mode + '_ids_cpp',
input : input_files,
- output : mode + '_ids.cpp',
+ output : output_file,
command : [gen_controls, '-o', '@OUTPUT@',
'--mode', mode, '-t', template_file,
- '-r', ranges_file, '@INPUT@'])
+ '-r', ranges_file, '@INPUT@'],
+ env : py_build_env)
endforeach
-libcamera_sources += control_sources
+libcamera_public_sources += control_sources
gen_version = meson.project_source_root() / 'utils' / 'gen-version.sh'
@@ -160,7 +169,7 @@ version_cpp = vcs_tag(command : [gen_version, meson.project_build_root(), meson.
output : 'version.cpp',
fallback : meson.project_version())
-libcamera_sources += version_cpp
+libcamera_public_sources += version_cpp
if ipa_sign_module
ipa_pub_key_cpp = custom_target('ipa_pub_key_cpp',
@@ -168,7 +177,7 @@ if ipa_sign_module
output : 'ipa_pub_key.cpp',
command : [gen_ipa_pub_key, '@INPUT@', '@OUTPUT@'])
- libcamera_sources += ipa_pub_key_cpp
+ libcamera_internal_sources += ipa_pub_key_cpp
endif
libcamera_deps += [
@@ -188,7 +197,13 @@ libcamera_deps += [
# for the presence or abscence of the dynamic tag.
libcamera = shared_library('libcamera',
- libcamera_sources,
+ [
+ libcamera_public_headers,
+ libcamera_public_sources,
+ libcamera_ipa_headers,
+ libcamera_internal_headers,
+ libcamera_internal_sources,
+ ],
version : libcamera_version,
soversion : libcamera_soversion,
name_prefix : '',
@@ -198,7 +213,6 @@ libcamera = shared_library('libcamera',
dependencies : libcamera_deps)
libcamera_public = declare_dependency(sources : [
- libcamera_ipa_headers,
libcamera_public_headers,
],
include_directories : libcamera_includes,
@@ -207,7 +221,7 @@ libcamera_public = declare_dependency(sources : [
# Internal dependency for components and plugins which can use private APIs
libcamera_private = declare_dependency(sources : [
- libcamera_generated_ipa_headers,
+ libcamera_ipa_headers,
],
dependencies : [
libcamera_public,
diff --git a/src/libcamera/orientation.cpp b/src/libcamera/orientation.cpp
index 965f5a8b..7d7d21ae 100644
--- a/src/libcamera/orientation.cpp
+++ b/src/libcamera/orientation.cpp
@@ -2,16 +2,15 @@
/*
* Copyright (C) 2023, Ideas On Board Oy
*
- * orientation.cpp - Image orientation
+ * Image orientation
*/
#include <libcamera/orientation.h>
#include <array>
-#include <string>
/**
- * \file libcamera/orientation.h
+ * \file orientation.h
* \brief Image orientation definition
*/
@@ -102,10 +101,14 @@ std::ostream &operator<<(std::ostream &out, const Orientation &orientation)
{
constexpr std::array<const char *, 9> orientationNames = {
"", /* Orientation starts counting from 1. */
- "Rotate0", "Rotate0Mirror",
- "Rotate180", "Rotate180Mirror",
- "Rotate90Mirror", "Rotate270",
- "Rotate270Mirror", "Rotate90",
+ "Rotate0",
+ "Rotate0Mirror",
+ "Rotate180",
+ "Rotate180Mirror",
+ "Rotate90Mirror",
+ "Rotate270",
+ "Rotate270Mirror",
+ "Rotate90",
};
out << orientationNames[static_cast<unsigned int>(orientation)];
diff --git a/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp
index 63082cea..4e66b336 100644
--- a/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp
+++ b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022 - Jacopo Mondi <jacopo@jmondi.org>
*
- * imx8-isi.cpp - Pipeline handler for ISI interface found on NXP i.MX8 SoC
+ * Pipeline handler for ISI interface found on NXP i.MX8 SoC
*/
#include <algorithm>
@@ -157,11 +157,10 @@ PipelineHandlerISI *ISICameraData::pipe()
/* Open and initialize pipe components. */
int ISICameraData::init()
{
- int ret = sensor_->init();
- if (ret)
- return ret;
+ if (!sensor_)
+ return -ENODEV;
- ret = csis_->open();
+ int ret = csis_->open();
if (ret)
return ret;
@@ -1057,7 +1056,7 @@ bool PipelineHandlerISI::match(DeviceEnumerator *enumerator)
std::unique_ptr<ISICameraData> data =
std::make_unique<ISICameraData>(this);
- data->sensor_ = std::make_unique<CameraSensor>(sensor);
+ data->sensor_ = CameraSensorFactoryBase::create(sensor);
data->csis_ = std::make_unique<V4L2Subdevice>(csi);
data->xbarSink_ = sink;
@@ -1112,6 +1111,6 @@ void PipelineHandlerISI::bufferReady(FrameBuffer *buffer)
completeRequest(request);
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerISI)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerISI, "imx8-isi")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/imx8-isi/meson.build b/src/libcamera/pipeline/imx8-isi/meson.build
index ffd0ce54..b369b031 100644
--- a/src/libcamera/pipeline/imx8-isi/meson.build
+++ b/src/libcamera/pipeline/imx8-isi/meson.build
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources += files([
+libcamera_internal_sources += files([
'imx8-isi.cpp'
])
diff --git a/src/libcamera/pipeline/ipu3/cio2.cpp b/src/libcamera/pipeline/ipu3/cio2.cpp
index 43c816ba..aa544d7b 100644
--- a/src/libcamera/pipeline/ipu3/cio2.cpp
+++ b/src/libcamera/pipeline/ipu3/cio2.cpp
@@ -2,13 +2,13 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * cio2.cpp - Intel IPU3 CIO2
+ * Intel IPU3 CIO2
*/
#include "cio2.h"
+#include <cmath>
#include <limits>
-#include <math.h>
#include <linux/media-bus-format.h>
@@ -134,10 +134,9 @@ int CIO2Device::init(const MediaDevice *media, unsigned int index)
MediaLink *link = links[0];
MediaEntity *sensorEntity = link->source()->entity();
- sensor_ = std::make_unique<CameraSensor>(sensorEntity);
- ret = sensor_->init();
- if (ret)
- return ret;
+ sensor_ = CameraSensorFactoryBase::create(sensorEntity);
+ if (!sensor_)
+ return -ENODEV;
ret = link->setEnabled(true);
if (ret)
@@ -304,7 +303,7 @@ V4L2SubdeviceFormat CIO2Device::getSensorFormat(const std::vector<unsigned int>
* comparing it with a single precision digit is enough.
*/
ratio = static_cast<unsigned int>(ratio * 10) / 10.0;
- float ratioDiff = fabsf(ratio - desiredRatio);
+ float ratioDiff = std::abs(ratio - desiredRatio);
unsigned int area = sz.width * sz.height;
unsigned int areaDiff = area - desiredArea;
diff --git a/src/libcamera/pipeline/ipu3/cio2.h b/src/libcamera/pipeline/ipu3/cio2.h
index bbd87eb8..963c2f6b 100644
--- a/src/libcamera/pipeline/ipu3/cio2.h
+++ b/src/libcamera/pipeline/ipu3/cio2.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * cio2.h - Intel IPU3 CIO2
+ * Intel IPU3 CIO2
*/
#pragma once
diff --git a/src/libcamera/pipeline/ipu3/frames.cpp b/src/libcamera/pipeline/ipu3/frames.cpp
index a4c3477c..bc0526a7 100644
--- a/src/libcamera/pipeline/ipu3/frames.cpp
+++ b/src/libcamera/pipeline/ipu3/frames.cpp
@@ -2,17 +2,18 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * frames.cpp - Intel IPU3 Frames helper
+ * Intel IPU3 Frames helper
*/
#include "frames.h"
+#include <libcamera/base/log.h>
+
#include <libcamera/framebuffer.h>
#include <libcamera/request.h>
#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/pipeline_handler.h"
-#include "libcamera/internal/v4l2_videodevice.h"
namespace libcamera {
diff --git a/src/libcamera/pipeline/ipu3/frames.h b/src/libcamera/pipeline/ipu3/frames.h
index 6e3cb915..a347b66f 100644
--- a/src/libcamera/pipeline/ipu3/frames.h
+++ b/src/libcamera/pipeline/ipu3/frames.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * frames.h - Intel IPU3 Frames helper
+ * Intel IPU3 Frames helper
*/
#pragma once
diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp b/src/libcamera/pipeline/ipu3/imgu.cpp
index 2202438a..7be78091 100644
--- a/src/libcamera/pipeline/ipu3/imgu.cpp
+++ b/src/libcamera/pipeline/ipu3/imgu.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * imgu.cpp - Intel IPU3 ImgU
+ * Intel IPU3 ImgU
*/
#include "imgu.h"
diff --git a/src/libcamera/pipeline/ipu3/imgu.h b/src/libcamera/pipeline/ipu3/imgu.h
index 0af4dd8a..fa508316 100644
--- a/src/libcamera/pipeline/ipu3/imgu.h
+++ b/src/libcamera/pipeline/ipu3/imgu.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * imgu.h - Intel IPU3 ImgU
+ * Intel IPU3 ImgU
*/
#pragma once
diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
index fa4bd0bb..e31e3879 100644
--- a/src/libcamera/pipeline/ipu3/ipu3.cpp
+++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
@@ -2,11 +2,10 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipu3.cpp - Pipeline handler for Intel IPU3
+ * Pipeline handler for Intel IPU3
*/
#include <algorithm>
-#include <iomanip>
#include <memory>
#include <queue>
#include <vector>
@@ -19,15 +18,17 @@
#include <libcamera/camera.h>
#include <libcamera/control_ids.h>
#include <libcamera/formats.h>
-#include <libcamera/ipa/ipu3_ipa_interface.h>
-#include <libcamera/ipa/ipu3_ipa_proxy.h>
#include <libcamera/property_ids.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
+#include <libcamera/ipa/ipu3_ipa_interface.h>
+#include <libcamera/ipa/ipu3_ipa_proxy.h>
+
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_lens.h"
#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
#include "libcamera/internal/delayed_controls.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/framebuffer.h"
@@ -88,7 +89,7 @@ public:
private:
void metadataReady(unsigned int id, const ControlList &metadata);
- void paramsBufferReady(unsigned int id);
+ void paramsComputed(unsigned int id);
void setSensorControls(unsigned int id, const ControlList &sensorControls,
const ControlList &lensControls);
};
@@ -958,7 +959,7 @@ int PipelineHandlerIPU3::updateControls(IPU3CameraData *data)
values.reserve(testPatternModes.size());
for (auto pattern : testPatternModes)
- values.emplace_back(static_cast<int32_t>(pattern));
+ values.emplace_back(pattern);
controls[&controls::draft::TestPatternMode] = ControlInfo(values);
}
@@ -1077,14 +1078,10 @@ int PipelineHandlerIPU3::registerCameras()
if (ret)
continue;
- /*
- * \todo Read delay values from the sensor itself or from a
- * a sensor database. For now use generic values taken from
- * the Raspberry Pi and listed as 'generic values'.
- */
+ const CameraSensorProperties::SensorDelays &delays = cio2->sensor()->sensorDelays();
std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
- { V4L2_CID_ANALOGUE_GAIN, { 1, false } },
- { V4L2_CID_EXPOSURE, { 2, false } },
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
};
data->delayedCtrls_ =
@@ -1117,19 +1114,19 @@ int PipelineHandlerIPU3::registerCameras()
* returned through the ImgU main and secondary outputs.
*/
data->cio2_.bufferReady().connect(data.get(),
- &IPU3CameraData::cio2BufferReady);
+ &IPU3CameraData::cio2BufferReady);
data->cio2_.bufferAvailable.connect(
data.get(), &IPU3CameraData::queuePendingRequests);
data->imgu_->input_->bufferReady.connect(&data->cio2_,
- &CIO2Device::tryReturnBuffer);
+ &CIO2Device::tryReturnBuffer);
data->imgu_->output_->bufferReady.connect(data.get(),
- &IPU3CameraData::imguOutputBufferReady);
+ &IPU3CameraData::imguOutputBufferReady);
data->imgu_->viewfinder_->bufferReady.connect(data.get(),
- &IPU3CameraData::imguOutputBufferReady);
+ &IPU3CameraData::imguOutputBufferReady);
data->imgu_->param_->bufferReady.connect(data.get(),
- &IPU3CameraData::paramBufferReady);
+ &IPU3CameraData::paramBufferReady);
data->imgu_->stat_->bufferReady.connect(data.get(),
- &IPU3CameraData::statBufferReady);
+ &IPU3CameraData::statBufferReady);
/* Create and register the Camera instance. */
const std::string &cameraId = cio2->sensor()->id();
@@ -1156,7 +1153,7 @@ int IPU3CameraData::loadIPA()
return -ENOENT;
ipa_->setSensorControls.connect(this, &IPU3CameraData::setSensorControls);
- ipa_->paramsBufferReady.connect(this, &IPU3CameraData::paramsBufferReady);
+ ipa_->paramsComputed.connect(this, &IPU3CameraData::paramsComputed);
ipa_->metadataReady.connect(this, &IPU3CameraData::metadataReady);
/*
@@ -1186,9 +1183,8 @@ int IPU3CameraData::loadIPA()
* The API tuning file is made from the sensor name. If the tuning file
* isn't found, fall back to the 'uncalibrated' file.
*/
- std::string ipaTuningFile = ipa_->configurationFile(sensor->model() + ".yaml");
- if (ipaTuningFile.empty())
- ipaTuningFile = ipa_->configurationFile("uncalibrated.yaml");
+ std::string ipaTuningFile =
+ ipa_->configurationFile(sensor->model() + ".yaml", "uncalibrated.yaml");
ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() },
sensorInfo, sensor->controls(), &ipaControls_);
@@ -1218,7 +1214,7 @@ void IPU3CameraData::setSensorControls([[maybe_unused]] unsigned int id,
focusLens->setFocusPosition(focusValue.get<int32_t>());
}
-void IPU3CameraData::paramsBufferReady(unsigned int id)
+void IPU3CameraData::paramsComputed(unsigned int id)
{
IPU3Frames::Info *info = frameInfos_.find(id);
if (!info)
@@ -1329,7 +1325,7 @@ void IPU3CameraData::cio2BufferReady(FrameBuffer *buffer)
if (request->findBuffer(&rawStream_))
pipe()->completeBuffer(request, buffer);
- ipa_->fillParamsBuffer(info->id, info->paramBuffer->cookie());
+ ipa_->computeParams(info->id, info->paramBuffer->cookie());
}
void IPU3CameraData::paramBufferReady(FrameBuffer *buffer)
@@ -1373,8 +1369,8 @@ void IPU3CameraData::statBufferReady(FrameBuffer *buffer)
return;
}
- ipa_->processStatsBuffer(info->id, request->metadata().get(controls::SensorTimestamp).value_or(0),
- info->statBuffer->cookie(), info->effectiveSensorControls);
+ ipa_->processStats(info->id, request->metadata().get(controls::SensorTimestamp).value_or(0),
+ info->statBuffer->cookie(), info->effectiveSensorControls);
}
/*
@@ -1420,6 +1416,6 @@ void IPU3CameraData::frameStart(uint32_t sequence)
*testPatternMode);
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerIPU3)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerIPU3, "ipu3")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/ipu3/meson.build b/src/libcamera/pipeline/ipu3/meson.build
index a1b0b31a..f2904b4a 100644
--- a/src/libcamera/pipeline/ipu3/meson.build
+++ b/src/libcamera/pipeline/ipu3/meson.build
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources += files([
+libcamera_internal_sources += files([
'cio2.cpp',
'frames.cpp',
'imgu.cpp',
diff --git a/src/libcamera/pipeline/mali-c55/mali-c55.cpp b/src/libcamera/pipeline/mali-c55/mali-c55.cpp
index 78343553..a05e11fc 100644
--- a/src/libcamera/pipeline/mali-c55/mali-c55.cpp
+++ b/src/libcamera/pipeline/mali-c55/mali-c55.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2024, Ideas on Board Oy
*
- * mali-c55.cpp - Pipeline Handler for ARM's Mali-C55 ISP
+ * Pipeline Handler for ARM's Mali-C55 ISP
*/
#include <algorithm>
@@ -12,6 +12,7 @@
#include <set>
#include <string>
+#include <linux/mali-c55-config.h>
#include <linux/media-bus-format.h>
#include <linux/media.h>
@@ -20,14 +21,24 @@
#include <libcamera/camera.h>
#include <libcamera/formats.h>
#include <libcamera/geometry.h>
+#include <libcamera/property_ids.h>
#include <libcamera/stream.h>
+#include <libcamera/ipa/core_ipa_interface.h>
+#include <libcamera/ipa/mali-c55_ipa_interface.h>
+#include <libcamera/ipa/mali-c55_ipa_proxy.h>
+
#include "libcamera/internal/bayer_format.h"
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/delayed_controls.h"
#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/request.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
@@ -57,32 +68,27 @@ const std::map<libcamera::PixelFormat, unsigned int> maliC55FmtToCode = {
{ formats::NV21, MEDIA_BUS_FMT_YUV10_1X30 },
/* RAW formats, FR pipe only. */
- { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
- { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
- { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
- { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
- { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
- { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
- { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
- { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
- { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
- { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
- { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
- { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
- { formats::SGBRG14, MEDIA_BUS_FMT_SGBRG14_1X14 },
- { formats::SRGGB14, MEDIA_BUS_FMT_SRGGB14_1X14 },
- { formats::SBGGR14, MEDIA_BUS_FMT_SBGGR14_1X14 },
- { formats::SGRBG14, MEDIA_BUS_FMT_SGRBG14_1X14 },
{ formats::SGBRG16, MEDIA_BUS_FMT_SGBRG16_1X16 },
{ formats::SRGGB16, MEDIA_BUS_FMT_SRGGB16_1X16 },
{ formats::SBGGR16, MEDIA_BUS_FMT_SBGGR16_1X16 },
{ formats::SGRBG16, MEDIA_BUS_FMT_SGRBG16_1X16 },
};
+constexpr Size kMaliC55MinInputSize = { 640, 480 };
constexpr Size kMaliC55MinSize = { 128, 128 };
constexpr Size kMaliC55MaxSize = { 8192, 8192 };
constexpr unsigned int kMaliC55ISPInternalFormat = MEDIA_BUS_FMT_RGB121212_1X36;
+struct MaliC55FrameInfo {
+ Request *request;
+
+ FrameBuffer *paramBuffer;
+ FrameBuffer *statBuffer;
+
+ bool paramsDone;
+ bool statsDone;
+};
+
class MaliC55CameraData : public Camera::Private
{
public:
@@ -92,13 +98,16 @@ public:
}
int init();
+ int loadIPA();
/* Deflect these functionalities to either TPG or CameraSensor. */
- const std::vector<unsigned int> mbusCodes() const;
const std::vector<Size> sizes(unsigned int mbusCode) const;
const Size resolution() const;
- PixelFormat bestRawFormat() const;
+ int pixfmtToMbusCode(const PixelFormat &pixFmt) const;
+ const PixelFormat &bestRawFormat() const;
+
+ void updateControls(const ControlInfoMap &ipaControls);
PixelFormat adjustRawFormat(const PixelFormat &pixFmt) const;
Size adjustRawSizes(const PixelFormat &pixFmt, const Size &rawSize) const;
@@ -111,8 +120,15 @@ public:
Stream frStream_;
Stream dsStream_;
+ std::unique_ptr<ipa::mali_c55::IPAProxyMaliC55> ipa_;
+ std::vector<IPABuffer> ipaStatBuffers_;
+ std::vector<IPABuffer> ipaParamBuffers_;
+
+ std::unique_ptr<DelayedControls> delayedCtrls_;
+
private:
void initTPGData();
+ void setSensorControls(const ControlList &sensorControls);
std::string id_;
std::vector<unsigned int> tpgCodes_;
@@ -141,9 +157,8 @@ int MaliC55CameraData::init()
* Register a CameraSensor if we connect to a sensor and create
* an entity for the connected CSI-2 receiver.
*/
- sensor_ = std::make_unique<CameraSensor>(entity_);
- ret = sensor_->init();
- if (ret)
+ sensor_ = CameraSensorFactoryBase::create(entity_);
+ if (!sensor_)
return ret;
const MediaPad *sourcePad = entity_->getPadByIndex(0);
@@ -177,12 +192,9 @@ void MaliC55CameraData::initTPGData()
tpgResolution_ = tpgSizes_.back();
}
-const std::vector<unsigned int> MaliC55CameraData::mbusCodes() const
+void MaliC55CameraData::setSensorControls(const ControlList &sensorControls)
{
- if (sensor_)
- return sensor_->mbusCodes();
-
- return tpgCodes_;
+ delayedCtrls_->push(sensorControls);
}
const std::vector<Size> MaliC55CameraData::sizes(unsigned int mbusCode) const
@@ -216,33 +228,102 @@ const Size MaliC55CameraData::resolution() const
return tpgResolution_;
}
-PixelFormat MaliC55CameraData::bestRawFormat() const
+/*
+ * The Mali C55 ISP can only produce 16-bit RAW output in bypass modes, but the
+ * sensors connected to it might produce 8/10/12/16 bits. We simply search the
+ * sensor's supported formats for the one with a matching bayer order and the
+ * greatest bitdepth.
+ */
+int MaliC55CameraData::pixfmtToMbusCode(const PixelFormat &pixFmt) const
{
+ auto it = maliC55FmtToCode.find(pixFmt);
+ if (it == maliC55FmtToCode.end())
+ return -EINVAL;
+
+ BayerFormat bayerFormat = BayerFormat::fromMbusCode(it->second);
+ if (!bayerFormat.isValid())
+ return -EINVAL;
+
+ V4L2Subdevice::Formats formats = sd_->formats(0);
+ unsigned int sensorMbusCode = 0;
unsigned int bitDepth = 0;
- PixelFormat rawFormat;
- /*
- * Iterate over all the supported PixelFormat and find the one
- * supported by the camera with the largest bitdepth.
- */
- for (const auto &maliFormat : maliC55FmtToCode) {
- PixelFormat pixFmt = maliFormat.first;
- if (!isFormatRaw(pixFmt))
+ for (const auto &[code, sizes] : formats) {
+ BayerFormat sdBayerFormat = BayerFormat::fromMbusCode(code);
+ if (!sdBayerFormat.isValid())
continue;
- unsigned int rawCode = maliFormat.second;
- const auto rawSizes = sizes(rawCode);
- if (rawSizes.empty())
+ if (sdBayerFormat.order != bayerFormat.order)
continue;
- BayerFormat bayer = BayerFormat::fromMbusCode(rawCode);
- if (bayer.bitDepth > bitDepth) {
- bitDepth = bayer.bitDepth;
- rawFormat = pixFmt;
+ if (sdBayerFormat.bitDepth > bitDepth) {
+ bitDepth = sdBayerFormat.bitDepth;
+ sensorMbusCode = code;
}
}
- return rawFormat;
+ if (!sensorMbusCode)
+ return -EINVAL;
+
+ return sensorMbusCode;
+}
+
+/*
+ * Find a RAW PixelFormat supported by both the ISP and the sensor.
+ *
+ * The situation is mildly complicated by the fact that we expect the sensor to
+ * output something like RAW8/10/12/16, but the ISP can only accept as input
+ * RAW20 and can only produce as output RAW16. The one constant in that is the
+ * bayer order of the data, so we'll simply check that the sensor produces a
+ * format with a bayer order that matches that of one of the formats we support,
+ * and select that.
+ */
+const PixelFormat &MaliC55CameraData::bestRawFormat() const
+{
+ static const PixelFormat invalidPixFmt = {};
+
+ for (const auto &fmt : sd_->formats(0)) {
+ BayerFormat sensorBayer = BayerFormat::fromMbusCode(fmt.first);
+
+ if (!sensorBayer.isValid())
+ continue;
+
+ for (const auto &[pixFmt, rawCode] : maliC55FmtToCode) {
+ if (!isFormatRaw(pixFmt))
+ continue;
+
+ BayerFormat bayer = BayerFormat::fromMbusCode(rawCode);
+ if (bayer.order == sensorBayer.order)
+ return pixFmt;
+ }
+ }
+
+ LOG(MaliC55, Error) << "Sensor doesn't provide a compatible format";
+ return invalidPixFmt;
+}
+
+void MaliC55CameraData::updateControls(const ControlInfoMap &ipaControls)
+{
+ if (!sensor_)
+ return;
+
+ IPACameraSensorInfo sensorInfo;
+ int ret = sensor_->sensorInfo(&sensorInfo);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to retrieve sensor info";
+ return;
+ }
+
+ ControlInfoMap::Map controls;
+ Rectangle ispMinCrop{ 0, 0, 640, 480 };
+ controls[&controls::ScalerCrop] =
+ ControlInfo(ispMinCrop, sensorInfo.analogCrop,
+ sensorInfo.analogCrop);
+
+ for (auto const &c : ipaControls)
+ controls.emplace(c.first, c.second);
+
+ controlInfo_ = ControlInfoMap(std::move(controls), controls::controls);
}
/*
@@ -251,13 +332,11 @@ PixelFormat MaliC55CameraData::bestRawFormat() const
*/
PixelFormat MaliC55CameraData::adjustRawFormat(const PixelFormat &rawFmt) const
{
- /* Make sure the provided raw format is supported by the pipeline. */
- auto it = maliC55FmtToCode.find(rawFmt);
- if (it == maliC55FmtToCode.end())
+ /* Make sure the RAW mbus code is supported by the image source. */
+ int rawCode = pixfmtToMbusCode(rawFmt);
+ if (rawCode < 0)
return bestRawFormat();
- /* Now make sure the RAW mbus code is supported by the image source. */
- unsigned int rawCode = it->second;
const auto rawSizes = sizes(rawCode);
if (rawSizes.empty())
return bestRawFormat();
@@ -265,15 +344,16 @@ PixelFormat MaliC55CameraData::adjustRawFormat(const PixelFormat &rawFmt) const
return rawFmt;
}
-Size MaliC55CameraData::adjustRawSizes(const PixelFormat &rawFmt, const Size &rawSize) const
+Size MaliC55CameraData::adjustRawSizes(const PixelFormat &rawFmt, const Size &size) const
{
- /* Just make sure the format is supported. */
- auto it = maliC55FmtToCode.find(rawFmt);
- if (it == maliC55FmtToCode.end())
- return {};
+ /* Expand the RAW size to the minimum ISP input size. */
+ Size rawSize = size.expandedTo(kMaliC55MinInputSize);
/* Check if the size is natively supported. */
- unsigned int rawCode = it->second;
+ int rawCode = pixfmtToMbusCode(rawFmt);
+ if (rawCode < 0)
+ return {};
+
const auto rawSizes = sizes(rawCode);
auto sizeIt = std::find(rawSizes.begin(), rawSizes.end(), rawSize);
if (sizeIt != rawSizes.end())
@@ -282,20 +362,59 @@ Size MaliC55CameraData::adjustRawSizes(const PixelFormat &rawFmt, const Size &ra
/* Or adjust it to the closest supported size. */
uint16_t distance = std::numeric_limits<uint16_t>::max();
Size bestSize;
- for (const Size &size : rawSizes) {
+ for (const Size &sz : rawSizes) {
uint16_t dist = std::abs(static_cast<int>(rawSize.width) -
- static_cast<int>(size.width)) +
+ static_cast<int>(sz.width)) +
std::abs(static_cast<int>(rawSize.height) -
- static_cast<int>(size.height));
+ static_cast<int>(sz.height));
if (dist < distance) {
dist = distance;
- bestSize = size;
+ bestSize = sz;
}
}
return bestSize;
}
+int MaliC55CameraData::loadIPA()
+{
+ int ret;
+
+ /* Do not initialize IPA for TPG. */
+ if (!sensor_)
+ return 0;
+
+ ipa_ = IPAManager::createIPA<ipa::mali_c55::IPAProxyMaliC55>(pipe(), 1, 1);
+ if (!ipa_)
+ return -ENOENT;
+
+ ipa_->setSensorControls.connect(this, &MaliC55CameraData::setSensorControls);
+
+ std::string ipaTuningFile = ipa_->configurationFile(sensor_->model() + ".yaml",
+ "uncalibrated.yaml");
+
+ /* We need to inform the IPA of the sensor configuration */
+ ipa::mali_c55::IPAConfigInfo ipaConfig{};
+
+ ret = sensor_->sensorInfo(&ipaConfig.sensorInfo);
+ if (ret)
+ return ret;
+
+ ipaConfig.sensorControls = sensor_->controls();
+
+ ControlInfoMap ipaControls;
+ ret = ipa_->init({ ipaTuningFile, sensor_->model() }, ipaConfig,
+ &ipaControls);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to initialise the Mali-C55 IPA";
+ return ret;
+ }
+
+ updateControls(ipaControls);
+
+ return 0;
+}
+
class MaliC55CameraConfiguration : public CameraConfiguration
{
public:
@@ -305,6 +424,7 @@ public:
}
Status validate() override;
+ const Transform &combinedTransform() { return combinedTransform_; }
V4L2SubdeviceFormat sensorFormat_;
@@ -312,6 +432,7 @@ private:
static constexpr unsigned int kMaxStreams = 2;
const MaliC55CameraData *data_;
+ Transform combinedTransform_;
};
CameraConfiguration::Status MaliC55CameraConfiguration::validate()
@@ -321,6 +442,19 @@ CameraConfiguration::Status MaliC55CameraConfiguration::validate()
if (config_.empty())
return Invalid;
+ /*
+ * The TPG doesn't support flips, so we only need to calculate a
+ * transform if we have a sensor.
+ */
+ if (data_->sensor_) {
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->sensor_->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
+ status = Adjusted;
+ } else {
+ combinedTransform_ = Transform::Rot0;
+ }
+
/* Only 2 streams available. */
if (config_.size() > kMaxStreams) {
config_.resize(kMaxStreams);
@@ -342,7 +476,11 @@ CameraConfiguration::Status MaliC55CameraConfiguration::validate()
rawConfig = &config;
}
- Size maxSize = kMaliC55MaxSize;
+ /*
+ * The C55 can not upscale. Limit the configuration to the ISP
+ * capabilities and the sensor resolution.
+ */
+ Size maxSize = kMaliC55MaxSize.boundedTo(data_->resolution());
if (rawConfig) {
/*
* \todo Take into account the Bayer components ordering once
@@ -350,6 +488,10 @@ CameraConfiguration::Status MaliC55CameraConfiguration::validate()
*/
PixelFormat rawFormat =
data_->adjustRawFormat(rawConfig->pixelFormat);
+
+ if (!rawFormat.isValid())
+ return Invalid;
+
if (rawFormat != rawConfig->pixelFormat) {
LOG(MaliC55, Debug)
<< "RAW format adjusted to " << rawFormat;
@@ -368,12 +510,21 @@ CameraConfiguration::Status MaliC55CameraConfiguration::validate()
maxSize = rawSize;
+ const PixelFormatInfo &info = PixelFormatInfo::info(rawConfig->pixelFormat);
+ rawConfig->stride = info.stride(rawConfig->size.width, 0, 4);
+ rawConfig->frameSize = info.frameSize(rawConfig->size, 4);
+
rawConfig->setStream(const_cast<Stream *>(&data_->frStream_));
frPipeAvailable = false;
}
- /* Adjust processed streams. */
- Size maxYuvSize;
+ /*
+ * Adjust processed streams.
+ *
+ * Compute the minimum sensor size to be later used to select the
+ * sensor configuration.
+ */
+ Size minSensorSize = kMaliC55MinInputSize;
for (StreamConfiguration &config : config_) {
if (isFormatRaw(config.pixelFormat))
continue;
@@ -395,8 +546,8 @@ CameraConfiguration::Status MaliC55CameraConfiguration::validate()
status = Adjusted;
}
- if (maxYuvSize < size)
- maxYuvSize = size;
+ if (minSensorSize < size)
+ minSensorSize = size;
if (frPipeAvailable) {
config.setStream(const_cast<Stream *>(&data_->frStream_));
@@ -410,30 +561,30 @@ CameraConfiguration::Status MaliC55CameraConfiguration::validate()
/* If there's a RAW config, sensor configuration follows it. */
if (rawConfig) {
- const auto it = maliC55FmtToCode.find(rawConfig->pixelFormat);
- sensorFormat_.code = it->second;
- sensorFormat_.size = rawConfig->size;
+ sensorFormat_.code = data_->pixfmtToMbusCode(rawConfig->pixelFormat);
+ sensorFormat_.size = rawConfig->size.expandedTo(minSensorSize);
return status;
}
/* If there's no RAW config, compute the sensor configuration here. */
PixelFormat rawFormat = data_->bestRawFormat();
- const auto it = maliC55FmtToCode.find(rawFormat);
- sensorFormat_.code = it->second;
+ if (!rawFormat.isValid())
+ return Invalid;
+
+ sensorFormat_.code = data_->pixfmtToMbusCode(rawFormat);
uint16_t distance = std::numeric_limits<uint16_t>::max();
- const auto sizes = data_->sizes(it->second);
+ const auto sizes = data_->sizes(sensorFormat_.code);
Size bestSize;
for (const auto &size : sizes) {
- /* Skip sensor sizes that are smaller than the max YUV size. */
- if (maxYuvSize.width > size.width ||
- maxYuvSize.height > size.height)
+ if (minSensorSize.width > size.width ||
+ minSensorSize.height > size.height)
continue;
- uint16_t dist = std::abs(static_cast<int>(maxYuvSize.width) -
+ uint16_t dist = std::abs(static_cast<int>(minSensorSize.width) -
static_cast<int>(size.width)) +
- std::abs(static_cast<int>(maxYuvSize.height) -
+ std::abs(static_cast<int>(minSensorSize.height) -
static_cast<int>(size.height));
if (dist < distance) {
dist = distance;
@@ -458,13 +609,19 @@ public:
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+ int allocateBuffers(Camera *camera);
+ void freeBuffers(Camera *camera);
int start(Camera *camera, const ControlList *controls) override;
void stopDevice(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
- void bufferReady(FrameBuffer *buffer);
+ void imageBufferReady(FrameBuffer *buffer);
+ void paramsBufferReady(FrameBuffer *buffer);
+ void statsBufferReady(FrameBuffer *buffer);
+ void paramsComputed(unsigned int requestId);
+ void statsProcessed(unsigned int requestId, const ControlList &metadata);
bool match(DeviceEnumerator *enumerator) override;
@@ -472,6 +629,7 @@ private:
struct MaliC55Pipe {
std::unique_ptr<V4L2Subdevice> resizer;
std::unique_ptr<V4L2VideoDevice> cap;
+ MediaLink *link;
Stream *stream;
};
@@ -508,6 +666,10 @@ private:
pipe.stream = nullptr;
}
+ MaliC55FrameInfo *findFrameInfo(FrameBuffer *buffer);
+ MaliC55FrameInfo *findFrameInfo(Request *request);
+ void tryComplete(MaliC55FrameInfo *info);
+
int configureRawStream(MaliC55CameraData *data,
const StreamConfiguration &config,
V4L2SubdeviceFormat &subdevFormat);
@@ -515,13 +677,25 @@ private:
const StreamConfiguration &config,
V4L2SubdeviceFormat &subdevFormat);
- void registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
+ void applyScalerCrop(Camera *camera, const ControlList &controls);
+
+ bool registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
const std::string &name);
bool registerTPGCamera(MediaLink *link);
bool registerSensorCamera(MediaLink *link);
MediaDevice *media_;
std::unique_ptr<V4L2Subdevice> isp_;
+ std::unique_ptr<V4L2VideoDevice> stats_;
+ std::unique_ptr<V4L2VideoDevice> params_;
+
+ std::vector<std::unique_ptr<FrameBuffer>> statsBuffers_;
+ std::queue<FrameBuffer *> availableStatsBuffers_;
+
+ std::vector<std::unique_ptr<FrameBuffer>> paramsBuffers_;
+ std::queue<FrameBuffer *> availableParamsBuffers_;
+
+ std::map<unsigned int, MaliC55FrameInfo> frameInfoMap_;
std::array<MaliC55Pipe, MaliC55NumPipes> pipes_;
@@ -607,7 +781,10 @@ PipelineHandlerMaliC55::generateConfiguration(Camera *camera,
if (isRaw) {
/* Make sure the mbus code is supported. */
- unsigned int rawCode = maliFormat.second;
+ int rawCode = data->pixfmtToMbusCode(pixFmt);
+ if (rawCode < 0)
+ continue;
+
const auto sizes = data->sizes(rawCode);
if (sizes.empty())
continue;
@@ -715,16 +892,28 @@ int PipelineHandlerMaliC55::configureProcessedStream(MaliC55CameraData *data,
if (ret)
return ret;
- /* \todo Configure the resizer crop/compose rectangles. */
- Rectangle ispCrop = { 0, 0, config.size };
+ /*
+ * Compute the scaler-in to scaler-out ratio: first center-crop to align
+ * the FOV to the desired resolution, then scale to the desired size.
+ */
+ Size scalerIn = subdevFormat.size.boundedToAspectRatio(config.size);
+ int xCrop = (subdevFormat.size.width - scalerIn.width) / 2;
+ int yCrop = (subdevFormat.size.height - scalerIn.height) / 2;
+ Rectangle ispCrop = { xCrop, yCrop, scalerIn };
ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_CROP, &ispCrop);
if (ret)
return ret;
- ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_COMPOSE, &ispCrop);
+ Rectangle ispCompose = { 0, 0, config.size };
+ ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_COMPOSE, &ispCompose);
if (ret)
return ret;
+ /*
+ * The source pad format size comes directly from the sink
+ * compose rectangle.
+ */
+ subdevFormat.size = ispCompose.size();
subdevFormat.code = maliC55FmtToCode.find(config.pixelFormat)->second;
return pipe->resizer->setFormat(1, &subdevFormat);
}
@@ -756,16 +945,33 @@ int PipelineHandlerMaliC55::configure(Camera *camera,
if (ret)
return ret;
+ if (data->sensor_) {
+ ret = data->sensor_->setFormat(&subdevFormat,
+ maliConfig->combinedTransform());
+ if (ret)
+ return ret;
+ }
+
if (data->csi_) {
ret = data->csi_->setFormat(0, &subdevFormat);
if (ret)
return ret;
- ret = data->csi_->setFormat(1, &subdevFormat);
+ ret = data->csi_->getFormat(1, &subdevFormat);
if (ret)
return ret;
}
+ V4L2DeviceFormat statsFormat;
+ ret = stats_->getFormat(&statsFormat);
+ if (ret)
+ return ret;
+
+ if (statsFormat.planes[0].size != sizeof(struct mali_c55_stats_buffer)) {
+ LOG(MaliC55, Error) << "3a stats buffer size invalid";
+ return -EINVAL;
+ }
+
/*
* Propagate the format to the ISP sink pad and configure the input
* crop rectangle (no crop at the moment).
@@ -795,6 +1001,17 @@ int PipelineHandlerMaliC55::configure(Camera *camera,
Stream *stream = streamConfig.stream();
MaliC55Pipe *pipe = pipeFromStream(data, stream);
+ /*
+ * Enable the media link between the pipe's resizer and the
+ * capture video device
+ */
+
+ ret = pipe->link->setEnabled(true);
+ if (ret) {
+ LOG(MaliC55, Error) << "Couldn't enable resizer's link";
+ return ret;
+ }
+
if (isFormatRaw(streamConfig.pixelFormat))
ret = configureRawStream(data, streamConfig, subdevFormat);
else
@@ -816,6 +1033,56 @@ int PipelineHandlerMaliC55::configure(Camera *camera,
pipe->stream = stream;
}
+ if (!data->ipa_)
+ return 0;
+
+ /*
+ * Enable the media link between the ISP subdevice and the statistics
+ * video device.
+ */
+ const MediaEntity *ispEntity = isp_->entity();
+ ret = ispEntity->getPadByIndex(3)->links()[0]->setEnabled(true);
+ if (ret) {
+ LOG(MaliC55, Error) << "Couldn't enable statistics link";
+ return ret;
+ }
+
+ /*
+ * Enable the media link between the ISP subdevice and the parameters
+ * video device.
+ */
+ ret = ispEntity->getPadByIndex(4)->links()[0]->setEnabled(true);
+ if (ret) {
+ LOG(MaliC55, Error) << "Couldn't enable parameters link";
+ return ret;
+ }
+
+ /* We need to inform the IPA of the sensor configuration */
+ ipa::mali_c55::IPAConfigInfo ipaConfig{};
+
+ ret = data->sensor_->sensorInfo(&ipaConfig.sensorInfo);
+ if (ret)
+ return ret;
+
+ ipaConfig.sensorControls = data->sensor_->controls();
+
+ /*
+ * And we also need to tell the IPA the bayerOrder of the data (as
+ * affected by any flips that we've configured)
+ */
+ const Transform &combinedTransform = maliConfig->combinedTransform();
+ BayerFormat::Order bayerOrder = data->sensor_->bayerOrder(combinedTransform);
+
+ ControlInfoMap ipaControls;
+ ret = data->ipa_->configure(ipaConfig, utils::to_underlying(bayerOrder),
+ &ipaControls);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to configure IPA";
+ return ret;
+ }
+
+ data->updateControls(ipaControls);
+
return 0;
}
@@ -828,32 +1095,166 @@ int PipelineHandlerMaliC55::exportFrameBuffers(Camera *camera, Stream *stream,
return pipe->cap->exportBuffers(count, buffers);
}
-int PipelineHandlerMaliC55::start([[maybe_unused]] Camera *camera, [[maybe_unused]] const ControlList *controls)
+void PipelineHandlerMaliC55::freeBuffers(Camera *camera)
{
+ MaliC55CameraData *data = cameraData(camera);
+
+ while (!availableStatsBuffers_.empty())
+ availableStatsBuffers_.pop();
+ while (!availableParamsBuffers_.empty())
+ availableParamsBuffers_.pop();
+
+ statsBuffers_.clear();
+ paramsBuffers_.clear();
+
+ if (data->ipa_) {
+ data->ipa_->unmapBuffers(data->ipaStatBuffers_);
+ data->ipa_->unmapBuffers(data->ipaParamBuffers_);
+ }
+ data->ipaStatBuffers_.clear();
+ data->ipaParamBuffers_.clear();
+
+ if (stats_->releaseBuffers())
+ LOG(MaliC55, Error) << "Failed to release stats buffers";
+
+ if (params_->releaseBuffers())
+ LOG(MaliC55, Error) << "Failed to release params buffers";
+
+ return;
+}
+
+int PipelineHandlerMaliC55::allocateBuffers(Camera *camera)
+{
+ MaliC55CameraData *data = cameraData(camera);
+ unsigned int ipaBufferId = 1;
+ unsigned int bufferCount;
+ int ret;
+
+ bufferCount = std::max({
+ data->frStream_.configuration().bufferCount,
+ data->dsStream_.configuration().bufferCount,
+ });
+
+ ret = stats_->allocateBuffers(bufferCount, &statsBuffers_);
+ if (ret < 0)
+ return ret;
+
+ for (std::unique_ptr<FrameBuffer> &buffer : statsBuffers_) {
+ buffer->setCookie(ipaBufferId++);
+ data->ipaStatBuffers_.emplace_back(buffer->cookie(),
+ buffer->planes());
+ availableStatsBuffers_.push(buffer.get());
+ }
+
+ ret = params_->allocateBuffers(bufferCount, &paramsBuffers_);
+ if (ret < 0)
+ return ret;
+
+ for (std::unique_ptr<FrameBuffer> &buffer : paramsBuffers_) {
+ buffer->setCookie(ipaBufferId++);
+ data->ipaParamBuffers_.emplace_back(buffer->cookie(),
+ buffer->planes());
+ availableParamsBuffers_.push(buffer.get());
+ }
+
+ if (data->ipa_) {
+ data->ipa_->mapBuffers(data->ipaStatBuffers_, true);
+ data->ipa_->mapBuffers(data->ipaParamBuffers_, false);
+ }
+
+ return 0;
+}
+
+int PipelineHandlerMaliC55::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
+{
+ MaliC55CameraData *data = cameraData(camera);
+ int ret;
+
+ ret = allocateBuffers(camera);
+ if (ret)
+ return ret;
+
+ if (data->ipa_) {
+ ret = data->ipa_->start();
+ if (ret) {
+ LOG(MaliC55, Error)
+ << "Failed to start IPA" << camera->id();
+ freeBuffers(camera);
+ return ret;
+ }
+ }
+
for (MaliC55Pipe &pipe : pipes_) {
if (!pipe.stream)
continue;
Stream *stream = pipe.stream;
- int ret = pipe.cap->importBuffers(stream->configuration().bufferCount);
+ ret = pipe.cap->importBuffers(stream->configuration().bufferCount);
if (ret) {
LOG(MaliC55, Error) << "Failed to import buffers";
+ if (data->ipa_)
+ data->ipa_->stop();
+ freeBuffers(camera);
return ret;
}
ret = pipe.cap->streamOn();
if (ret) {
LOG(MaliC55, Error) << "Failed to start stream";
+ if (data->ipa_)
+ data->ipa_->stop();
+ freeBuffers(camera);
return ret;
}
}
+ ret = stats_->streamOn();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to start stats stream";
+
+ if (data->ipa_)
+ data->ipa_->stop();
+
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (pipe.stream)
+ pipe.cap->streamOff();
+ }
+
+ freeBuffers(camera);
+ return ret;
+ }
+
+ ret = params_->streamOn();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to start params stream";
+
+ stats_->streamOff();
+ if (data->ipa_)
+ data->ipa_->stop();
+
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (pipe.stream)
+ pipe.cap->streamOff();
+ }
+
+ freeBuffers(camera);
+ return ret;
+ }
+
+ ret = isp_->setFrameStartEnabled(true);
+ if (ret)
+ LOG(MaliC55, Error) << "Failed to enable frame start events";
+
return 0;
}
-void PipelineHandlerMaliC55::stopDevice([[maybe_unused]] Camera *camera)
+void PipelineHandlerMaliC55::stopDevice(Camera *camera)
{
+ MaliC55CameraData *data = cameraData(camera);
+
+ isp_->setFrameStartEnabled(false);
+
for (MaliC55Pipe &pipe : pipes_) {
if (!pipe.stream)
continue;
@@ -861,38 +1262,285 @@ void PipelineHandlerMaliC55::stopDevice([[maybe_unused]] Camera *camera)
pipe.cap->streamOff();
pipe.cap->releaseBuffers();
}
+
+ stats_->streamOff();
+ params_->streamOff();
+ if (data->ipa_)
+ data->ipa_->stop();
+ freeBuffers(camera);
+}
+
+void PipelineHandlerMaliC55::applyScalerCrop(Camera *camera,
+ const ControlList &controls)
+{
+ MaliC55CameraData *data = cameraData(camera);
+
+ const auto &scalerCrop = controls.get<Rectangle>(controls::ScalerCrop);
+ if (!scalerCrop)
+ return;
+
+ if (!data->sensor_) {
+ LOG(MaliC55, Error) << "ScalerCrop not supported for TPG";
+ return;
+ }
+
+ Rectangle nativeCrop = *scalerCrop;
+
+ IPACameraSensorInfo sensorInfo;
+ int ret = data->sensor_->sensorInfo(&sensorInfo);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to retrieve sensor info";
+ return;
+ }
+
+ /*
+ * The ScalerCrop rectangle re-scaling in the ISP crop rectangle
+ * comes straight from the RPi pipeline handler.
+ *
+ * Create a version of the crop rectangle aligned to the analogue crop
+ * rectangle top-left coordinates and scaled in the [analogue crop to
+ * output frame] ratio to take into account binning/skipping on the
+ * sensor.
+ */
+ Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo.analogCrop
+ .topLeft());
+ ispCrop.scaleBy(sensorInfo.outputSize, sensorInfo.analogCrop.size());
+
+ /*
+ * The crop rectangle should be:
+ * 1. At least as big as ispMinCropSize_, once that's been
+ * enlarged to the same aspect ratio.
+ * 2. With the same mid-point, if possible.
+ * 3. But it can't go outside the sensor area.
+ */
+ Rectangle ispMinCrop{ 0, 0, 640, 480 };
+ Size minSize = ispMinCrop.size().expandedToAspectRatio(nativeCrop.size());
+ Size size = ispCrop.size().expandedTo(minSize);
+ ispCrop = size.centeredTo(ispCrop.center())
+ .enclosedIn(Rectangle(sensorInfo.outputSize));
+
+ /*
+ * As the resizer can't upscale, the crop rectangle has to be larger
+ * than the larger stream output size.
+ */
+ Size maxYuvSize;
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ const StreamConfiguration &config = pipe.stream->configuration();
+ if (isFormatRaw(config.pixelFormat)) {
+ LOG(MaliC55, Debug) << "Cannot crop with a RAW stream";
+ return;
+ }
+
+ Size streamSize = config.size;
+ if (streamSize.width > maxYuvSize.width)
+ maxYuvSize.width = streamSize.width;
+ if (streamSize.height > maxYuvSize.height)
+ maxYuvSize.height = streamSize.height;
+ }
+
+ ispCrop.size().expandTo(maxYuvSize);
+
+ /*
+ * Now apply the scaler crop to each enabled output. This overrides the
+ * crop configuration performed at configure() time and can cause
+ * square pixels if the crop rectangle and scaler output FOV ratio are
+ * different.
+ */
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ /* Create a copy to avoid setSelection() to modify ispCrop. */
+ Rectangle pipeCrop = ispCrop;
+ ret = pipe.resizer->setSelection(0, V4L2_SEL_TGT_CROP, &pipeCrop);
+ if (ret) {
+ LOG(MaliC55, Error)
+ << "Failed to apply crop to "
+ << (pipe.stream == &data->frStream_ ?
+ "FR" : "DS") << " pipe";
+ return;
+ }
+ }
}
int PipelineHandlerMaliC55::queueRequestDevice(Camera *camera, Request *request)
{
- int ret;
+ MaliC55CameraData *data = cameraData(camera);
- for (auto &[stream, buffer] : request->buffers()) {
- MaliC55Pipe *pipe = pipeFromStream(cameraData(camera), stream);
+ /* Do not run the IPA if the TPG is in use. */
+ if (!data->ipa_) {
+ MaliC55FrameInfo frameInfo;
+ frameInfo.request = request;
+ frameInfo.statBuffer = nullptr;
+ frameInfo.paramBuffer = nullptr;
+ frameInfo.paramsDone = true;
+ frameInfo.statsDone = true;
- ret = pipe->cap->queueBuffer(buffer);
- if (ret)
- return ret;
+ frameInfoMap_[request->sequence()] = frameInfo;
+
+ for (auto &[stream, buffer] : request->buffers()) {
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ pipe->cap->queueBuffer(buffer);
+ }
+
+ return 0;
+ }
+
+ if (availableStatsBuffers_.empty()) {
+ LOG(MaliC55, Error) << "Stats buffer underrun";
+ return -ENOENT;
+ }
+
+ if (availableParamsBuffers_.empty()) {
+ LOG(MaliC55, Error) << "Params buffer underrun";
+ return -ENOENT;
}
+ MaliC55FrameInfo frameInfo;
+ frameInfo.request = request;
+
+ frameInfo.statBuffer = availableStatsBuffers_.front();
+ availableStatsBuffers_.pop();
+ frameInfo.paramBuffer = availableParamsBuffers_.front();
+ availableParamsBuffers_.pop();
+
+ frameInfo.paramsDone = false;
+ frameInfo.statsDone = false;
+
+ frameInfoMap_[request->sequence()] = frameInfo;
+
+ data->ipa_->queueRequest(request->sequence(), request->controls());
+ data->ipa_->fillParams(request->sequence(),
+ frameInfo.paramBuffer->cookie());
+
return 0;
}
-void PipelineHandlerMaliC55::bufferReady(FrameBuffer *buffer)
+MaliC55FrameInfo *PipelineHandlerMaliC55::findFrameInfo(Request *request)
{
- Request *request = buffer->request();
+ for (auto &[sequence, info] : frameInfoMap_) {
+ if (info.request == request)
+ return &info;
+ }
+
+ return nullptr;
+}
- completeBuffer(request, buffer);
+MaliC55FrameInfo *PipelineHandlerMaliC55::findFrameInfo(FrameBuffer *buffer)
+{
+ for (auto &[sequence, info] : frameInfoMap_) {
+ if (info.paramBuffer == buffer ||
+ info.statBuffer == buffer)
+ return &info;
+ }
+ return nullptr;
+}
+
+void PipelineHandlerMaliC55::tryComplete(MaliC55FrameInfo *info)
+{
+ if (!info->paramsDone)
+ return;
+ if (!info->statsDone)
+ return;
+
+ Request *request = info->request;
if (request->hasPendingBuffers())
return;
+ if (info->statBuffer)
+ availableStatsBuffers_.push(info->statBuffer);
+ if (info->paramBuffer)
+ availableParamsBuffers_.push(info->paramBuffer);
+
+ frameInfoMap_.erase(request->sequence());
+
completeRequest(request);
}
-void PipelineHandlerMaliC55::registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
+void PipelineHandlerMaliC55::imageBufferReady(FrameBuffer *buffer)
+{
+ Request *request = buffer->request();
+ MaliC55FrameInfo *info = findFrameInfo(request);
+ ASSERT(info);
+
+ if (completeBuffer(request, buffer))
+ tryComplete(info);
+}
+
+void PipelineHandlerMaliC55::paramsBufferReady(FrameBuffer *buffer)
+{
+ MaliC55FrameInfo *info = findFrameInfo(buffer);
+ ASSERT(info);
+
+ info->paramsDone = true;
+
+ tryComplete(info);
+}
+
+void PipelineHandlerMaliC55::statsBufferReady(FrameBuffer *buffer)
+{
+ MaliC55FrameInfo *info = findFrameInfo(buffer);
+ ASSERT(info);
+
+ Request *request = info->request;
+ MaliC55CameraData *data = cameraData(request->_d()->camera());
+
+ ControlList sensorControls = data->delayedCtrls_->get(buffer->metadata().sequence);
+
+ data->ipa_->processStats(request->sequence(), buffer->cookie(),
+ sensorControls);
+}
+
+void PipelineHandlerMaliC55::paramsComputed(unsigned int requestId)
+{
+ MaliC55FrameInfo &frameInfo = frameInfoMap_[requestId];
+ Request *request = frameInfo.request;
+ MaliC55CameraData *data = cameraData(request->_d()->camera());
+
+ /*
+ * Queue buffers for stats and params, then queue buffers to the capture
+ * video devices.
+ */
+
+ frameInfo.paramBuffer->_d()->metadata().planes()[0].bytesused =
+ sizeof(struct mali_c55_params_buffer);
+ params_->queueBuffer(frameInfo.paramBuffer);
+ stats_->queueBuffer(frameInfo.statBuffer);
+
+ for (auto &[stream, buffer] : request->buffers()) {
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ pipe->cap->queueBuffer(buffer);
+ }
+}
+
+void PipelineHandlerMaliC55::statsProcessed(unsigned int requestId,
+ const ControlList &metadata)
+{
+ MaliC55FrameInfo &frameInfo = frameInfoMap_[requestId];
+
+ frameInfo.statsDone = true;
+ frameInfo.request->metadata().merge(metadata);
+
+ tryComplete(&frameInfo);
+}
+
+bool PipelineHandlerMaliC55::registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
const std::string &name)
{
+ if (data->loadIPA())
+ return false;
+
+ if (data->ipa_) {
+ data->ipa_->statsProcessed.connect(this, &PipelineHandlerMaliC55::statsProcessed);
+ data->ipa_->paramsComputed.connect(this, &PipelineHandlerMaliC55::paramsComputed);
+ }
+
std::set<Stream *> streams{ &data->frStream_ };
if (dsFitted_)
streams.insert(&data->dsStream_);
@@ -900,6 +1548,8 @@ void PipelineHandlerMaliC55::registerMaliCamera(std::unique_ptr<MaliC55CameraDat
std::shared_ptr<Camera> camera = Camera::create(std::move(data),
name, streams);
registerCamera(std::move(camera));
+
+ return true;
}
/*
@@ -925,9 +1575,7 @@ bool PipelineHandlerMaliC55::registerTPGCamera(MediaLink *link)
if (data->init())
return false;
- registerMaliCamera(std::move(data), name);
-
- return true;
+ return registerMaliCamera(std::move(data), name);
}
/*
@@ -953,9 +1601,24 @@ bool PipelineHandlerMaliC55::registerSensorCamera(MediaLink *ispLink)
if (data->init())
return false;
- /* \todo: Init properties and controls. */
+ data->properties_ = data->sensor_->properties();
- registerMaliCamera(std::move(data), sensor->name());
+ const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays();
+ std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ };
+
+ data->delayedCtrls_ =
+ std::make_unique<DelayedControls>(data->sensor_->device(),
+ params);
+ isp_->frameStart.connect(data->delayedCtrls_.get(),
+ &DelayedControls::applyControls);
+
+ /* \todo Init properties. */
+
+ if (!registerMaliCamera(std::move(data), sensor->name()))
+ return false;
}
return true;
@@ -966,7 +1629,7 @@ bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator)
const MediaPad *ispSink;
/*
- * We search for just the ISP subdevice and the full resolution pipe.
+ * We search for just the always-available elements of the media graph.
* The TPG and the downscale pipe are both optional blocks and may not
* be fitted.
*/
@@ -974,6 +1637,8 @@ bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator)
dm.add("mali-c55 isp");
dm.add("mali-c55 resizer fr");
dm.add("mali-c55 fr");
+ dm.add("mali-c55 3a stats");
+ dm.add("mali-c55 3a params");
media_ = acquireMediaDevice(enumerator, dm);
if (!media_)
@@ -983,6 +1648,14 @@ bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator)
if (isp_->open() < 0)
return false;
+ stats_ = V4L2VideoDevice::fromEntityName(media_, "mali-c55 3a stats");
+ if (stats_->open() < 0)
+ return false;
+
+ params_ = V4L2VideoDevice::fromEntityName(media_, "mali-c55 3a params");
+ if (params_->open() < 0)
+ return false;
+
MaliC55Pipe *frPipe = &pipes_[MaliC55FR];
frPipe->resizer = V4L2Subdevice::fromEntityName(media_, "mali-c55 resizer fr");
if (frPipe->resizer->open() < 0)
@@ -992,7 +1665,13 @@ bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator)
if (frPipe->cap->open() < 0)
return false;
- frPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::bufferReady);
+ frPipe->link = media_->link("mali-c55 resizer fr", 1, "mali-c55 fr", 0);
+ if (!frPipe->link) {
+ LOG(MaliC55, Error) << "No link between fr resizer and video node";
+ return false;
+ }
+
+ frPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::imageBufferReady);
dsFitted_ = !!media_->getEntityByName("mali-c55 ds");
if (dsFitted_) {
@@ -1008,9 +1687,19 @@ bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator)
if (dsPipe->cap->open() < 0)
return false;
- dsPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::bufferReady);
+ dsPipe->link = media_->link("mali-c55 resizer ds", 1,
+ "mali-c55 ds", 0);
+ if (!dsPipe->link) {
+ LOG(MaliC55, Error) << "No link between ds resizer and video node";
+ return false;
+ }
+
+ dsPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::imageBufferReady);
}
+ stats_->bufferReady.connect(this, &PipelineHandlerMaliC55::statsBufferReady);
+ params_->bufferReady.connect(this, &PipelineHandlerMaliC55::paramsBufferReady);
+
ispSink = isp_->entity()->getPadByIndex(0);
if (!ispSink || ispSink->links().empty()) {
LOG(MaliC55, Error) << "ISP sink pad error";
@@ -1061,6 +1750,6 @@ bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator)
return true;
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerMaliC55)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerMaliC55, "mali-c55")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/mali-c55/meson.build b/src/libcamera/pipeline/mali-c55/meson.build
index 30fd29b9..eba8e5a3 100644
--- a/src/libcamera/pipeline/mali-c55/meson.build
+++ b/src/libcamera/pipeline/mali-c55/meson.build
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources += files([
+libcamera_internal_sources += files([
'mali-c55.cpp'
])
diff --git a/src/libcamera/pipeline/rkisp1/meson.build b/src/libcamera/pipeline/rkisp1/meson.build
index cad66535..d21a6ef9 100644
--- a/src/libcamera/pipeline/rkisp1/meson.build
+++ b/src/libcamera/pipeline/rkisp1/meson.build
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources += files([
+libcamera_internal_sources += files([
'rkisp1.cpp',
'rkisp1_path.cpp',
])
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
index abb21968..52633fe3 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
@@ -2,15 +2,16 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * rkisp1.cpp - Pipeline handler for Rockchip ISP1
+ * Pipeline handler for Rockchip ISP1
*/
#include <algorithm>
-#include <array>
-#include <iomanip>
+#include <map>
#include <memory>
#include <numeric>
+#include <optional>
#include <queue>
+#include <vector>
#include <linux/media-bus-format.h>
#include <linux/rkisp1-config.h>
@@ -23,6 +24,7 @@
#include <libcamera/control_ids.h>
#include <libcamera/formats.h>
#include <libcamera/framebuffer.h>
+#include <libcamera/property_ids.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
#include <libcamera/transform.h>
@@ -33,6 +35,8 @@
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/converter/converter_v4l2_m2m.h"
#include "libcamera/internal/delayed_controls.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/framebuffer.h"
@@ -94,6 +98,7 @@ public:
}
PipelineHandlerRkISP1 *pipe();
+ const PipelineHandlerRkISP1 *pipe() const;
int loadIPA(unsigned int hwRevision);
Stream mainPathStream_;
@@ -109,8 +114,10 @@ public:
std::unique_ptr<ipa::rkisp1::IPAProxyRkISP1> ipa_;
+ ControlInfoMap ipaControls_;
+
private:
- void paramFilled(unsigned int frame);
+ void paramsComputed(unsigned int frame, unsigned int bytesused);
void setSensorControls(unsigned int frame,
const ControlList &sensorControls);
@@ -170,20 +177,24 @@ private:
}
friend RkISP1CameraData;
+ friend RkISP1CameraConfiguration;
friend RkISP1Frames;
int initLinks(Camera *camera, const CameraSensor *sensor,
const RkISP1CameraConfiguration &config);
int createCamera(MediaEntity *sensor);
void tryCompleteRequest(RkISP1FrameInfo *info);
- void bufferReady(FrameBuffer *buffer);
- void paramReady(FrameBuffer *buffer);
- void statReady(FrameBuffer *buffer);
+ void imageBufferReady(FrameBuffer *buffer);
+ void paramBufferReady(FrameBuffer *buffer);
+ void statBufferReady(FrameBuffer *buffer);
+ void dewarpBufferReady(FrameBuffer *buffer);
void frameStart(uint32_t sequence);
int allocateBuffers(Camera *camera);
int freeBuffers(Camera *camera);
+ int updateControls(RkISP1CameraData *data);
+
MediaDevice *media_;
std::unique_ptr<V4L2Subdevice> isp_;
std::unique_ptr<V4L2VideoDevice> param_;
@@ -196,6 +207,16 @@ private:
RkISP1MainPath mainPath_;
RkISP1SelfPath selfPath_;
+ std::unique_ptr<V4L2M2MConverter> dewarper_;
+ Rectangle scalerMaxCrop_;
+ bool useDewarper_;
+
+ std::optional<Rectangle> activeCrop_;
+
+ /* Internal buffers used when dewarper is being used */
+ std::vector<std::unique_ptr<FrameBuffer>> mainPathBuffers_;
+ std::queue<FrameBuffer *> availableMainPathBuffers_;
+
std::vector<std::unique_ptr<FrameBuffer>> paramBuffers_;
std::vector<std::unique_ptr<FrameBuffer>> statBuffers_;
std::queue<FrameBuffer *> availableParamBuffers_;
@@ -218,6 +239,8 @@ RkISP1FrameInfo *RkISP1Frames::create(const RkISP1CameraData *data, Request *req
FrameBuffer *paramBuffer = nullptr;
FrameBuffer *statBuffer = nullptr;
+ FrameBuffer *mainPathBuffer = nullptr;
+ FrameBuffer *selfPathBuffer = nullptr;
if (!isRaw) {
if (pipe_->availableParamBuffers_.empty()) {
@@ -235,10 +258,16 @@ RkISP1FrameInfo *RkISP1Frames::create(const RkISP1CameraData *data, Request *req
statBuffer = pipe_->availableStatBuffers_.front();
pipe_->availableStatBuffers_.pop();
+
+ if (pipe_->useDewarper_) {
+ mainPathBuffer = pipe_->availableMainPathBuffers_.front();
+ pipe_->availableMainPathBuffers_.pop();
+ }
}
- FrameBuffer *mainPathBuffer = request->findBuffer(&data->mainPathStream_);
- FrameBuffer *selfPathBuffer = request->findBuffer(&data->selfPathStream_);
+ if (!mainPathBuffer)
+ mainPathBuffer = request->findBuffer(&data->mainPathStream_);
+ selfPathBuffer = request->findBuffer(&data->selfPathStream_);
RkISP1FrameInfo *info = new RkISP1FrameInfo;
@@ -264,6 +293,7 @@ int RkISP1Frames::destroy(unsigned int frame)
pipe_->availableParamBuffers_.push(info->paramBuffer);
pipe_->availableStatBuffers_.push(info->statBuffer);
+ pipe_->availableMainPathBuffers_.push(info->mainPathBuffer);
frameInfo_.erase(info->frame);
@@ -279,6 +309,7 @@ void RkISP1Frames::clear()
pipe_->availableParamBuffers_.push(info->paramBuffer);
pipe_->availableStatBuffers_.push(info->statBuffer);
+ pipe_->availableMainPathBuffers_.push(info->mainPathBuffer);
delete info;
}
@@ -334,6 +365,11 @@ PipelineHandlerRkISP1 *RkISP1CameraData::pipe()
return static_cast<PipelineHandlerRkISP1 *>(Camera::Private::pipe());
}
+const PipelineHandlerRkISP1 *RkISP1CameraData::pipe() const
+{
+ return static_cast<const PipelineHandlerRkISP1 *>(Camera::Private::pipe());
+}
+
int RkISP1CameraData::loadIPA(unsigned int hwRevision)
{
ipa_ = IPAManager::createIPA<ipa::rkisp1::IPAProxyRkISP1>(pipe(), 1, 1);
@@ -341,26 +377,12 @@ int RkISP1CameraData::loadIPA(unsigned int hwRevision)
return -ENOENT;
ipa_->setSensorControls.connect(this, &RkISP1CameraData::setSensorControls);
- ipa_->paramsBufferReady.connect(this, &RkISP1CameraData::paramFilled);
+ ipa_->paramsComputed.connect(this, &RkISP1CameraData::paramsComputed);
ipa_->metadataReady.connect(this, &RkISP1CameraData::metadataReady);
- /*
- * The API tuning file is made from the sensor name unless the
- * environment variable overrides it.
- */
- std::string ipaTuningFile;
- char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RKISP1_TUNING_FILE");
- if (!configFromEnv || *configFromEnv == '\0') {
- ipaTuningFile = ipa_->configurationFile(sensor_->model() + ".yaml");
- /*
- * If the tuning file isn't found, fall back to the
- * 'uncalibrated' configuration file.
- */
- if (ipaTuningFile.empty())
- ipaTuningFile = ipa_->configurationFile("uncalibrated.yaml");
- } else {
- ipaTuningFile = std::string(configFromEnv);
- }
+ /* The IPA tuning file is made from the sensor name. */
+ std::string ipaTuningFile =
+ ipa_->configurationFile(sensor_->model() + ".yaml", "uncalibrated.yaml");
IPACameraSensorInfo sensorInfo{};
int ret = sensor_->sensorInfo(&sensorInfo);
@@ -370,7 +392,7 @@ int RkISP1CameraData::loadIPA(unsigned int hwRevision)
}
ret = ipa_->init({ ipaTuningFile, sensor_->model() }, hwRevision,
- sensorInfo, sensor_->controls(), &controlInfo_);
+ sensorInfo, sensor_->controls(), &ipaControls_);
if (ret < 0) {
LOG(RkISP1, Error) << "IPA initialization failure";
return ret;
@@ -379,15 +401,14 @@ int RkISP1CameraData::loadIPA(unsigned int hwRevision)
return 0;
}
-void RkISP1CameraData::paramFilled(unsigned int frame)
+void RkISP1CameraData::paramsComputed(unsigned int frame, unsigned int bytesused)
{
PipelineHandlerRkISP1 *pipe = RkISP1CameraData::pipe();
RkISP1FrameInfo *info = frameInfo_.find(frame);
if (!info)
return;
- info->paramBuffer->_d()->metadata().planes()[0].bytesused =
- sizeof(struct rkisp1_params_cfg);
+ info->paramBuffer->_d()->metadata().planes()[0].bytesused = bytesused;
pipe->param_->queueBuffer(info->paramBuffer);
pipe->stat_->queueBuffer(info->statBuffer);
@@ -454,11 +475,12 @@ bool RkISP1CameraConfiguration::fitsAllPaths(const StreamConfiguration &cfg)
StreamConfiguration config;
config = cfg;
- if (data_->mainPath_->validate(sensor, &config) != Valid)
+ if (data_->mainPath_->validate(sensor, sensorConfig, &config) != Valid)
return false;
config = cfg;
- if (data_->selfPath_ && data_->selfPath_->validate(sensor, &config) != Valid)
+ if (data_->selfPath_ &&
+ data_->selfPath_->validate(sensor, sensorConfig, &config) != Valid)
return false;
return true;
@@ -466,6 +488,7 @@ bool RkISP1CameraConfiguration::fitsAllPaths(const StreamConfiguration &cfg)
CameraConfiguration::Status RkISP1CameraConfiguration::validate()
{
+ const PipelineHandlerRkISP1 *pipe = data_->pipe();
const CameraSensor *sensor = data_->sensor_.get();
unsigned int pathCount = data_->selfPath_ ? 2 : 1;
Status status;
@@ -475,6 +498,27 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
+ /*
+ * Make sure that if a sensor configuration has been requested it
+ * is valid.
+ */
+ if (sensorConfig) {
+ if (!sensorConfig->isValid()) {
+ LOG(RkISP1, Error)
+ << "Invalid sensor configuration request";
+
+ return Invalid;
+ }
+
+ unsigned int bitDepth = sensorConfig->bitDepth;
+ if (bitDepth != 8 && bitDepth != 10 && bitDepth != 12) {
+ LOG(RkISP1, Error)
+ << "Invalid sensor configuration bit depth";
+
+ return Invalid;
+ }
+ }
+
/* Cap the number of entries to the available streams. */
if (config_.size() > pathCount) {
config_.resize(pathCount);
@@ -501,6 +545,18 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
}
}
+ bool useDewarper = false;
+ if (pipe->dewarper_) {
+ /*
+ * Platforms with dewarper support, such as i.MX8MP, support
+ * only a single stream. We can inspect config_[0] only here.
+ */
+ bool isRaw = PixelFormatInfo::info(config_[0].pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW;
+ if (!isRaw)
+ useDewarper = true;
+ }
+
/*
* If there are more than one stream in the configuration figure out the
* order to evaluate the streams. The first stream has the highest
@@ -513,50 +569,72 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
if (config_.size() == 2 && fitsAllPaths(config_[0]))
std::reverse(order.begin(), order.end());
+ /*
+ * Validate the configuration against the desired path and, if the
+ * platform supports it, the dewarper.
+ */
+ auto validateConfig = [&](StreamConfiguration &cfg, RkISP1Path *path,
+ Stream *stream, Status expectedStatus) {
+ StreamConfiguration tryCfg = cfg;
+
+ Status ret = path->validate(sensor, sensorConfig, &tryCfg);
+ if (ret == Invalid)
+ return false;
+
+ if (!useDewarper &&
+ (expectedStatus == Valid && ret == Adjusted))
+ return false;
+
+ if (useDewarper) {
+ bool adjusted;
+
+ pipe->dewarper_->validateOutput(&tryCfg, &adjusted,
+ Converter::Alignment::Down);
+ if (expectedStatus == Valid && adjusted)
+ return false;
+ }
+
+ cfg = tryCfg;
+ cfg.setStream(stream);
+ return true;
+ };
+
bool mainPathAvailable = true;
bool selfPathAvailable = data_->selfPath_;
+ RkISP1Path *mainPath = data_->mainPath_;
+ RkISP1Path *selfPath = data_->selfPath_;
+ Stream *mainPathStream = const_cast<Stream *>(&data_->mainPathStream_);
+ Stream *selfPathStream = const_cast<Stream *>(&data_->selfPathStream_);
for (unsigned int index : order) {
StreamConfiguration &cfg = config_[index];
/* Try to match stream without adjusting configuration. */
if (mainPathAvailable) {
- StreamConfiguration tryCfg = cfg;
- if (data_->mainPath_->validate(sensor, &tryCfg) == Valid) {
+ if (validateConfig(cfg, mainPath, mainPathStream, Valid)) {
mainPathAvailable = false;
- cfg = tryCfg;
- cfg.setStream(const_cast<Stream *>(&data_->mainPathStream_));
continue;
}
}
if (selfPathAvailable) {
- StreamConfiguration tryCfg = cfg;
- if (data_->selfPath_->validate(sensor, &tryCfg) == Valid) {
+ if (validateConfig(cfg, selfPath, selfPathStream, Valid)) {
selfPathAvailable = false;
- cfg = tryCfg;
- cfg.setStream(const_cast<Stream *>(&data_->selfPathStream_));
continue;
}
}
/* Try to match stream allowing adjusting configuration. */
if (mainPathAvailable) {
- StreamConfiguration tryCfg = cfg;
- if (data_->mainPath_->validate(sensor, &tryCfg) == Adjusted) {
+ if (validateConfig(cfg, mainPath, mainPathStream, Adjusted)) {
mainPathAvailable = false;
- cfg = tryCfg;
- cfg.setStream(const_cast<Stream *>(&data_->mainPathStream_));
status = Adjusted;
continue;
}
}
if (selfPathAvailable) {
- StreamConfiguration tryCfg = cfg;
- if (data_->selfPath_->validate(sensor, &tryCfg) == Adjusted) {
+ if (validateConfig(cfg, selfPath, selfPathStream, Adjusted)) {
selfPathAvailable = false;
- cfg = tryCfg;
- cfg.setStream(const_cast<Stream *>(&data_->selfPathStream_));
status = Adjusted;
continue;
}
@@ -590,7 +668,8 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
[](const auto &value) { return value.second; });
}
- sensorFormat_ = sensor->getFormat(mbusCodes, maxSize);
+ sensorFormat_ = sensor->getFormat(mbusCodes, maxSize,
+ mainPath->maxResolution());
if (sensorFormat_.size.isNull())
sensorFormat_.size = sensor->resolution();
@@ -603,7 +682,7 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate()
*/
PipelineHandlerRkISP1::PipelineHandlerRkISP1(CameraManager *manager)
- : PipelineHandler(manager), hasSelfPath_(true)
+ : PipelineHandler(manager), hasSelfPath_(true), useDewarper_(false)
{
}
@@ -730,7 +809,13 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
V4L2SubdeviceFormat format = config->sensorFormat();
LOG(RkISP1, Debug) << "Configuring sensor with " << format;
- ret = sensor->setFormat(&format, config->combinedTransform());
+ if (config->sensorConfig)
+ ret = sensor->applyConfiguration(*config->sensorConfig,
+ config->combinedTransform(),
+ &format);
+ else
+ ret = sensor->setFormat(&format, config->combinedTransform());
+
if (ret < 0)
return ret;
@@ -746,28 +831,48 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
if (ret < 0)
return ret;
- Rectangle rect(0, 0, format.size);
- ret = isp_->setSelection(0, V4L2_SEL_TGT_CROP, &rect);
+ Rectangle inputCrop(0, 0, format.size);
+ ret = isp_->setSelection(0, V4L2_SEL_TGT_CROP, &inputCrop);
if (ret < 0)
return ret;
LOG(RkISP1, Debug)
<< "ISP input pad configured with " << format
- << " crop " << rect;
+ << " crop " << inputCrop;
+ Rectangle outputCrop = inputCrop;
const PixelFormat &streamFormat = config->at(0).pixelFormat;
const PixelFormatInfo &info = PixelFormatInfo::info(streamFormat);
isRaw_ = info.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
+ useDewarper_ = dewarper_ && !isRaw_;
/* YUYV8_2X8 is required on the ISP source path pad for YUV output. */
if (!isRaw_)
format.code = MEDIA_BUS_FMT_YUYV8_2X8;
+ /*
+ * On devices without DUAL_CROP (like the imx8mp) cropping needs to be
+ * done on the ISP/IS output.
+ */
+ if (media_->hwRevision() == RKISP1_V_IMX8MP) {
+ /* imx8mp has only a single path. */
+ const auto &cfg = config->at(0);
+ Size ispCrop = format.size.boundedToAspectRatio(cfg.size);
+ if (useDewarper_)
+ ispCrop = dewarper_->adjustInputSize(cfg.pixelFormat,
+ ispCrop);
+ else
+ ispCrop.alignUpTo(2, 2);
+
+ outputCrop = ispCrop.centeredTo(Rectangle(format.size).center());
+ format.size = ispCrop;
+ }
+
LOG(RkISP1, Debug)
<< "Configuring ISP output pad with " << format
- << " crop " << rect;
+ << " crop " << outputCrop;
- ret = isp_->setSelection(2, V4L2_SEL_TGT_CROP, &rect);
+ ret = isp_->setSelection(2, V4L2_SEL_TGT_CROP, &outputCrop);
if (ret < 0)
return ret;
@@ -778,15 +883,37 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
LOG(RkISP1, Debug)
<< "ISP output pad configured with " << format
- << " crop " << rect;
+ << " crop " << outputCrop;
+
+ IPACameraSensorInfo sensorInfo;
+ ret = data->sensor_->sensorInfo(&sensorInfo);
+ if (ret)
+ return ret;
std::map<unsigned int, IPAStream> streamConfig;
+ std::vector<std::reference_wrapper<StreamConfiguration>> outputCfgs;
for (const StreamConfiguration &cfg : *config) {
if (cfg.stream() == &data->mainPathStream_) {
ret = mainPath_.configure(cfg, format);
streamConfig[0] = IPAStream(cfg.pixelFormat,
cfg.size);
+ /* Configure dewarp */
+ if (dewarper_ && !isRaw_) {
+ outputCfgs.push_back(const_cast<StreamConfiguration &>(cfg));
+ ret = dewarper_->configure(cfg, outputCfgs);
+ if (ret)
+ return ret;
+
+ /*
+ * Calculate the crop rectangle of the data
+ * flowing into the dewarper in sensor
+ * coordinates.
+ */
+ scalerMaxCrop_ =
+ outputCrop.transformedBetween(inputCrop,
+ sensorInfo.analogCrop);
+ }
} else if (hasSelfPath_) {
ret = selfPath_.configure(cfg, format);
streamConfig[1] = IPAStream(cfg.pixelFormat,
@@ -800,7 +927,7 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
}
V4L2DeviceFormat paramFormat;
- paramFormat.fourcc = V4L2PixelFormat(V4L2_META_FMT_RK_ISP1_PARAMS);
+ paramFormat.fourcc = V4L2PixelFormat(V4L2_META_FMT_RK_ISP1_EXT_PARAMS);
ret = param_->setFormat(&paramFormat);
if (ret)
return ret;
@@ -812,20 +939,17 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
return ret;
/* Inform IPA of stream configuration and sensor controls. */
- ipa::rkisp1::IPAConfigInfo ipaConfig{};
-
- ret = data->sensor_->sensorInfo(&ipaConfig.sensorInfo);
- if (ret)
- return ret;
+ ipa::rkisp1::IPAConfigInfo ipaConfig{ sensorInfo,
+ data->sensor_->controls(),
+ paramFormat.fourcc };
- ipaConfig.sensorControls = data->sensor_->controls();
-
- ret = data->ipa_->configure(ipaConfig, streamConfig, &data->controlInfo_);
+ ret = data->ipa_->configure(ipaConfig, streamConfig, &data->ipaControls_);
if (ret) {
LOG(RkISP1, Error) << "failed configuring IPA (" << ret << ")";
return ret;
}
- return 0;
+
+ return updateControls(data);
}
int PipelineHandlerRkISP1::exportFrameBuffers([[maybe_unused]] Camera *camera, Stream *stream,
@@ -834,10 +958,19 @@ int PipelineHandlerRkISP1::exportFrameBuffers([[maybe_unused]] Camera *camera, S
RkISP1CameraData *data = cameraData(camera);
unsigned int count = stream->configuration().bufferCount;
- if (stream == &data->mainPathStream_)
- return mainPath_.exportBuffers(count, buffers);
- else if (hasSelfPath_ && stream == &data->selfPathStream_)
+ if (stream == &data->mainPathStream_) {
+ /*
+ * Currently, i.MX8MP is the only platform with DW100 dewarper.
+ * It has mainpath and no self path. Hence, export buffers from
+ * dewarper just for the main path stream, for now.
+ */
+ if (useDewarper_)
+ return dewarper_->exportBuffers(&data->mainPathStream_, count, buffers);
+ else
+ return mainPath_.exportBuffers(count, buffers);
+ } else if (hasSelfPath_ && stream == &data->selfPathStream_) {
return selfPath_.exportBuffers(count, buffers);
+ }
return -EINVAL;
}
@@ -863,6 +996,16 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
goto error;
}
+ /* If the dewarper is being used, allocate internal buffers for ISP. */
+ if (useDewarper_) {
+ ret = mainPath_.exportBuffers(maxCount, &mainPathBuffers_);
+ if (ret < 0)
+ goto error;
+
+ for (std::unique_ptr<FrameBuffer> &buffer : mainPathBuffers_)
+ availableMainPathBuffers_.push(buffer.get());
+ }
+
for (std::unique_ptr<FrameBuffer> &buffer : paramBuffers_) {
buffer->setCookie(ipaBufferId++);
data->ipaBuffers_.emplace_back(buffer->cookie(),
@@ -884,6 +1027,7 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
error:
paramBuffers_.clear();
statBuffers_.clear();
+ mainPathBuffers_.clear();
return ret;
}
@@ -898,8 +1042,12 @@ int PipelineHandlerRkISP1::freeBuffers(Camera *camera)
while (!availableParamBuffers_.empty())
availableParamBuffers_.pop();
+ while (!availableMainPathBuffers_.empty())
+ availableMainPathBuffers_.pop();
+
paramBuffers_.clear();
statBuffers_.clear();
+ mainPathBuffers_.clear();
std::vector<unsigned int> ids;
for (IPABuffer &ipabuf : data->ipaBuffers_)
@@ -920,71 +1068,71 @@ int PipelineHandlerRkISP1::freeBuffers(Camera *camera)
int PipelineHandlerRkISP1::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
{
RkISP1CameraData *data = cameraData(camera);
+ utils::ScopeExitActions actions;
int ret;
/* Allocate buffers for internal pipeline usage. */
ret = allocateBuffers(camera);
if (ret)
return ret;
+ actions += [&]() { freeBuffers(camera); };
ret = data->ipa_->start();
if (ret) {
- freeBuffers(camera);
LOG(RkISP1, Error)
<< "Failed to start IPA " << camera->id();
return ret;
}
+ actions += [&]() { data->ipa_->stop(); };
data->frame_ = 0;
if (!isRaw_) {
ret = param_->streamOn();
if (ret) {
- data->ipa_->stop();
- freeBuffers(camera);
LOG(RkISP1, Error)
<< "Failed to start parameters " << camera->id();
return ret;
}
+ actions += [&]() { param_->streamOff(); };
ret = stat_->streamOn();
if (ret) {
- param_->streamOff();
- data->ipa_->stop();
- freeBuffers(camera);
LOG(RkISP1, Error)
<< "Failed to start statistics " << camera->id();
return ret;
}
+ actions += [&]() { stat_->streamOff(); };
+
+ if (useDewarper_) {
+ ret = dewarper_->start();
+ if (ret) {
+ LOG(RkISP1, Error) << "Failed to start dewarper";
+ return ret;
+ }
+ actions += [&]() { dewarper_->stop(); };
+ }
}
if (data->mainPath_->isEnabled()) {
ret = mainPath_.start();
- if (ret) {
- param_->streamOff();
- stat_->streamOff();
- data->ipa_->stop();
- freeBuffers(camera);
+ if (ret)
return ret;
- }
+ actions += [&]() { mainPath_.stop(); };
}
if (hasSelfPath_ && data->selfPath_->isEnabled()) {
ret = selfPath_.start();
- if (ret) {
- mainPath_.stop();
- param_->streamOff();
- stat_->streamOff();
- data->ipa_->stop();
- freeBuffers(camera);
+ if (ret)
return ret;
- }
}
isp_->setFrameStartEnabled(true);
activeCamera_ = camera;
- return ret;
+
+ actions.release();
+ return 0;
}
void PipelineHandlerRkISP1::stopDevice(Camera *camera)
@@ -1010,6 +1158,9 @@ void PipelineHandlerRkISP1::stopDevice(Camera *camera)
if (ret)
LOG(RkISP1, Warning)
<< "Failed to stop parameters for " << camera->id();
+
+ if (useDewarper_)
+ dewarper_->stop();
}
ASSERT(data->queuedRequests_.empty());
@@ -1036,8 +1187,8 @@ int PipelineHandlerRkISP1::queueRequestDevice(Camera *camera, Request *request)
if (data->selfPath_ && info->selfPathBuffer)
data->selfPath_->queueBuffer(info->selfPathBuffer);
} else {
- data->ipa_->fillParamsBuffer(data->frame_,
- info->paramBuffer->cookie());
+ data->ipa_->computeParams(data->frame_,
+ info->paramBuffer->cookie());
}
data->frame_++;
@@ -1101,6 +1252,58 @@ int PipelineHandlerRkISP1::initLinks(Camera *camera,
return 0;
}
+/**
+ * \brief Update the camera controls
+ * \param[in] data The camera data
+ *
+ * Compute the camera controls by calculating controls which the pipeline
+ * is reponsible for and merge them with the controls computed by the IPA.
+ *
+ * This function needs data->ipaControls_ to be refreshed when a new
+ * configuration is applied to the camera by the IPA configure() function.
+ *
+ * Always call this function after IPA configure() to make sure to have a
+ * properly refreshed IPA controls list.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int PipelineHandlerRkISP1::updateControls(RkISP1CameraData *data)
+{
+ ControlInfoMap::Map controls;
+
+ if (dewarper_) {
+ std::pair<Rectangle, Rectangle> cropLimits;
+ if (dewarper_->isConfigured(&data->mainPathStream_))
+ cropLimits = dewarper_->inputCropBounds(&data->mainPathStream_);
+ else
+ cropLimits = dewarper_->inputCropBounds();
+
+ /*
+ * ScalerCrop is specified to be in Sensor coordinates.
+ * So we need to transform the limits to sensor coordinates.
+ * We can safely assume that the maximum crop limit contains the
+ * full fov of the dewarper.
+ */
+ Rectangle min = cropLimits.first.transformedBetween(cropLimits.second,
+ scalerMaxCrop_);
+
+ controls[&controls::ScalerCrop] = ControlInfo(min,
+ scalerMaxCrop_,
+ scalerMaxCrop_);
+ data->properties_.set(properties::ScalerCropMaximum, scalerMaxCrop_);
+ activeCrop_ = scalerMaxCrop_;
+ }
+
+ /* Add the IPA registered controls to list of camera controls. */
+ for (const auto &ipaControl : data->ipaControls_)
+ controls[ipaControl.first] = ipaControl.second;
+
+ data->controlInfo_ = ControlInfoMap(std::move(controls),
+ controls::controls);
+
+ return 0;
+}
+
int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor)
{
int ret;
@@ -1109,22 +1312,20 @@ int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor)
std::make_unique<RkISP1CameraData>(this, &mainPath_,
hasSelfPath_ ? &selfPath_ : nullptr);
- data->sensor_ = std::make_unique<CameraSensor>(sensor);
- ret = data->sensor_->init();
- if (ret)
- return ret;
+ data->sensor_ = CameraSensorFactoryBase::create(sensor);
+ if (!data->sensor_)
+ return -ENODEV;
/* Initialize the camera properties. */
data->properties_ = data->sensor_->properties();
- /*
- * \todo Read dealy values from the sensor itself or from a
- * a sensor database. For now use generic values taken from
- * the Raspberry Pi and listed as generic values.
- */
+ scalerMaxCrop_ = Rectangle(data->sensor_->resolution());
+
+ const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays();
std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
- { V4L2_CID_ANALOGUE_GAIN, { 1, false } },
- { V4L2_CID_EXPOSURE, { 2, false } },
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ { V4L2_CID_VBLANK, { 1, false } },
};
data->delayedCtrls_ =
@@ -1137,6 +1338,8 @@ int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor)
if (ret)
return ret;
+ updateControls(data.get());
+
std::set<Stream *> streams{
&data->mainPathStream_,
&data->selfPathStream_,
@@ -1209,11 +1412,34 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
if (hasSelfPath_ && !selfPath_.init(media_))
return false;
- mainPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady);
+ mainPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::imageBufferReady);
if (hasSelfPath_)
- selfPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady);
- stat_->bufferReady.connect(this, &PipelineHandlerRkISP1::statReady);
- param_->bufferReady.connect(this, &PipelineHandlerRkISP1::paramReady);
+ selfPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::imageBufferReady);
+ stat_->bufferReady.connect(this, &PipelineHandlerRkISP1::statBufferReady);
+ param_->bufferReady.connect(this, &PipelineHandlerRkISP1::paramBufferReady);
+
+ /* If dewarper is present, create its instance. */
+ DeviceMatch dwp("dw100");
+ dwp.add("dw100-source");
+ dwp.add("dw100-sink");
+
+ std::shared_ptr<MediaDevice> dwpMediaDevice = enumerator->search(dwp);
+ if (dwpMediaDevice) {
+ dewarper_ = std::make_unique<V4L2M2MConverter>(dwpMediaDevice.get());
+ if (dewarper_->isValid()) {
+ dewarper_->outputBufferReady.connect(
+ this, &PipelineHandlerRkISP1::dewarpBufferReady);
+
+ LOG(RkISP1, Info)
+ << "Using DW100 dewarper " << dewarper_->deviceNode();
+ } else {
+ LOG(RkISP1, Warning)
+ << "Found DW100 dewarper " << dewarper_->deviceNode()
+ << " but invalid";
+
+ dewarper_.reset();
+ }
+ }
/*
* Enumerate all sensors connected to the ISP and create one
@@ -1251,7 +1477,7 @@ void PipelineHandlerRkISP1::tryCompleteRequest(RkISP1FrameInfo *info)
completeRequest(request);
}
-void PipelineHandlerRkISP1::bufferReady(FrameBuffer *buffer)
+void PipelineHandlerRkISP1::imageBufferReady(FrameBuffer *buffer)
{
ASSERT(activeCamera_);
RkISP1CameraData *data = cameraData(activeCamera_);
@@ -1261,7 +1487,7 @@ void PipelineHandlerRkISP1::bufferReady(FrameBuffer *buffer)
return;
const FrameMetadata &metadata = buffer->metadata();
- Request *request = buffer->request();
+ Request *request = info->request;
if (metadata.status != FrameMetadata::FrameCancelled) {
/*
@@ -1276,18 +1502,102 @@ void PipelineHandlerRkISP1::bufferReady(FrameBuffer *buffer)
if (isRaw_) {
const ControlList &ctrls =
data->delayedCtrls_->get(metadata.sequence);
- data->ipa_->processStatsBuffer(info->frame, 0, ctrls);
+ data->ipa_->processStats(info->frame, 0, ctrls);
}
} else {
if (isRaw_)
info->metadataProcessed = true;
}
+ if (!useDewarper_) {
+ completeBuffer(request, buffer);
+ tryCompleteRequest(info);
+
+ return;
+ }
+
+ /* Do not queue cancelled frames to dewarper. */
+ if (metadata.status == FrameMetadata::FrameCancelled) {
+ /*
+ * i.MX8MP is the only known platform with dewarper. It has
+ * no self path. Hence, only main path buffer completion is
+ * required.
+ *
+ * Also, we cannot completeBuffer(request, buffer) as buffer
+ * here, is an internal buffer (between ISP and dewarper) and
+ * is not associated to the any specific request. The request
+ * buffer associated with main path stream is the one that
+ * is required to be completed (not the internal buffer).
+ */
+ for (auto it : request->buffers()) {
+ if (it.first == &data->mainPathStream_)
+ completeBuffer(request, it.second);
+ }
+
+ tryCompleteRequest(info);
+ return;
+ }
+
+ /* Handle scaler crop control. */
+ const auto &crop = request->controls().get(controls::ScalerCrop);
+ if (crop) {
+ Rectangle rect = crop.value();
+
+ /*
+ * ScalerCrop is specified to be in Sensor coordinates.
+ * So we need to transform it into dewarper coordinates.
+ * We can safely assume that the maximum crop limit contains the
+ * full fov of the dewarper.
+ */
+ std::pair<Rectangle, Rectangle> cropLimits =
+ dewarper_->inputCropBounds(&data->mainPathStream_);
+
+ rect = rect.transformedBetween(scalerMaxCrop_, cropLimits.second);
+ int ret = dewarper_->setInputCrop(&data->mainPathStream_,
+ &rect);
+ rect = rect.transformedBetween(cropLimits.second, scalerMaxCrop_);
+ if (!ret && rect != crop.value()) {
+ /*
+ * If the rectangle is changed by setInputCrop on the
+ * dewarper, log a debug message and cache the actual
+ * applied rectangle for metadata reporting.
+ */
+ LOG(RkISP1, Debug)
+ << "Applied rectangle " << rect.toString()
+ << " differs from requested " << crop.value().toString();
+ }
+
+ activeCrop_ = rect;
+ }
+
+ /*
+ * Queue input and output buffers to the dewarper. The output
+ * buffers for the dewarper are the buffers of the request, supplied
+ * by the application.
+ */
+ int ret = dewarper_->queueBuffers(buffer, request->buffers());
+ if (ret < 0)
+ LOG(RkISP1, Error) << "Cannot queue buffers to dewarper: "
+ << strerror(-ret);
+
+ request->metadata().set(controls::ScalerCrop, activeCrop_.value());
+}
+
+void PipelineHandlerRkISP1::dewarpBufferReady(FrameBuffer *buffer)
+{
+ ASSERT(activeCamera_);
+ RkISP1CameraData *data = cameraData(activeCamera_);
+ Request *request = buffer->request();
+
+ RkISP1FrameInfo *info = data->frameInfo_.find(buffer->request());
+ if (!info)
+ return;
+
completeBuffer(request, buffer);
tryCompleteRequest(info);
}
-void PipelineHandlerRkISP1::paramReady(FrameBuffer *buffer)
+void PipelineHandlerRkISP1::paramBufferReady(FrameBuffer *buffer)
{
ASSERT(activeCamera_);
RkISP1CameraData *data = cameraData(activeCamera_);
@@ -1300,7 +1610,7 @@ void PipelineHandlerRkISP1::paramReady(FrameBuffer *buffer)
tryCompleteRequest(info);
}
-void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
+void PipelineHandlerRkISP1::statBufferReady(FrameBuffer *buffer)
{
ASSERT(activeCamera_);
RkISP1CameraData *data = cameraData(activeCamera_);
@@ -1318,10 +1628,10 @@ void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
if (data->frame_ <= buffer->metadata().sequence)
data->frame_ = buffer->metadata().sequence + 1;
- data->ipa_->processStatsBuffer(info->frame, info->statBuffer->cookie(),
- data->delayedCtrls_->get(buffer->metadata().sequence));
+ data->ipa_->processStats(info->frame, info->statBuffer->cookie(),
+ data->delayedCtrls_->get(buffer->metadata().sequence));
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerRkISP1)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerRkISP1, "rkisp1")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
index 9195aad2..eee5b09e 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * rkisp1path.cpp - Rockchip ISP1 path helper
+ * Rockchip ISP1 path helper
*/
#include "rkisp1_path.h"
@@ -54,11 +54,8 @@ const std::map<PixelFormat, uint32_t> formatToMediaBus = {
} /* namespace */
-RkISP1Path::RkISP1Path(const char *name, const Span<const PixelFormat> &formats,
- const Size &minResolution, const Size &maxResolution)
- : name_(name), running_(false), formats_(formats),
- minResolution_(minResolution), maxResolution_(maxResolution),
- link_(nullptr)
+RkISP1Path::RkISP1Path(const char *name, const Span<const PixelFormat> &formats)
+ : name_(name), running_(false), formats_(formats), link_(nullptr)
{
}
@@ -126,12 +123,50 @@ void RkISP1Path::populateFormats()
}
}
+/**
+ * \brief Filter the sensor resolutions that can be supported
+ * \param[in] sensor The camera sensor
+ *
+ * This function retrieves all the sizes supported by the sensor and
+ * filters all the resolutions that can be supported on the pipeline.
+ * It is possible that the sensor's maximum output resolution is higher
+ * than the ISP maximum input. In that case, this function filters out all
+ * the resolution incapable of being supported and returns the maximum
+ * sensor resolution that can be supported by the pipeline.
+ *
+ * \return Maximum sensor size supported on the pipeline
+ */
+Size RkISP1Path::filterSensorResolution(const CameraSensor *sensor)
+{
+ auto iter = sensorSizesMap_.find(sensor);
+ if (iter != sensorSizesMap_.end())
+ return iter->second.back();
+
+ std::vector<Size> &sizes = sensorSizesMap_[sensor];
+ for (unsigned int code : sensor->mbusCodes()) {
+ for (const Size &size : sensor->sizes(code)) {
+ if (size.width > maxResolution_.width ||
+ size.height > maxResolution_.height)
+ continue;
+
+ sizes.push_back(size);
+ }
+ }
+
+ /* Sort in increasing order and remove duplicates. */
+ std::sort(sizes.begin(), sizes.end());
+ auto last = std::unique(sizes.begin(), sizes.end());
+ sizes.erase(last, sizes.end());
+
+ return sizes.back();
+}
+
StreamConfiguration
RkISP1Path::generateConfiguration(const CameraSensor *sensor, const Size &size,
StreamRole role)
{
const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes();
- const Size &resolution = sensor->resolution();
+ Size resolution = filterSensorResolution(sensor);
/* Min and max resolutions to populate the available stream formats. */
Size maxResolution = maxResolution_.boundedToAspectRatio(resolution)
@@ -216,11 +251,13 @@ RkISP1Path::generateConfiguration(const CameraSensor *sensor, const Size &size,
return cfg;
}
-CameraConfiguration::Status RkISP1Path::validate(const CameraSensor *sensor,
- StreamConfiguration *cfg)
+CameraConfiguration::Status
+RkISP1Path::validate(const CameraSensor *sensor,
+ const std::optional<SensorConfiguration> &sensorConfig,
+ StreamConfiguration *cfg)
{
const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes();
- const Size &resolution = sensor->resolution();
+ Size resolution = filterSensorResolution(sensor);
const StreamConfiguration reqCfg = *cfg;
CameraConfiguration::Status status = CameraConfiguration::Valid;
@@ -247,9 +284,14 @@ CameraConfiguration::Status RkISP1Path::validate(const CameraSensor *sensor,
continue;
/*
- * Store the raw format with the highest bits per pixel
- * for later usage.
+ * If the bits per pixel is supplied from the sensor
+ * configuration, choose a raw format that complies with
+ * it. Otherwise, store the raw format with the highest
+ * bits per pixel for later usage.
*/
+ if (sensorConfig && info.bitsPerPixel != sensorConfig->bitDepth)
+ continue;
+
if (info.bitsPerPixel > rawBitsPerPixel) {
rawBitsPerPixel = info.bitsPerPixel;
rawFormat = format;
@@ -262,6 +304,9 @@ CameraConfiguration::Status RkISP1Path::validate(const CameraSensor *sensor,
}
}
+ if (sensorConfig && !rawFormat.isValid())
+ return CameraConfiguration::Invalid;
+
bool isRaw = PixelFormatInfo::info(cfg->pixelFormat).colourEncoding ==
PixelFormatInfo::ColourEncodingRAW;
@@ -281,14 +326,48 @@ CameraConfiguration::Status RkISP1Path::validate(const CameraSensor *sensor,
if (isRaw) {
/*
* Use the sensor output size closest to the requested stream
- * size.
+ * size while ensuring the output size doesn't exceed ISP limits.
+ *
+ * As 'resolution' is the largest sensor resolution
+ * supported by the ISP, CameraSensor::getFormat() will never
+ * return a V4L2SubdeviceFormat with a larger size.
*/
uint32_t mbusCode = formatToMediaBus.at(cfg->pixelFormat);
+ cfg->size.boundTo(resolution);
+
+ Size rawSize = sensorConfig ? sensorConfig->outputSize
+ : cfg->size;
+
V4L2SubdeviceFormat sensorFormat =
- sensor->getFormat({ mbusCode }, cfg->size);
+ sensor->getFormat({ mbusCode }, rawSize);
+
+ if (sensorConfig &&
+ sensorConfig->outputSize != sensorFormat.size)
+ return CameraConfiguration::Invalid;
minResolution = sensorFormat.size;
maxResolution = sensorFormat.size;
+ } else if (sensorConfig) {
+ /*
+ * We have already ensured 'rawFormat' has the matching bit
+ * depth with sensorConfig.bitDepth hence, only validate the
+ * sensorConfig's output size here.
+ */
+ Size sensorSize = sensorConfig->outputSize;
+
+ if (sensorSize > resolution)
+ return CameraConfiguration::Invalid;
+
+ uint32_t mbusCode = formatToMediaBus.at(rawFormat);
+ V4L2SubdeviceFormat sensorFormat =
+ sensor->getFormat({ mbusCode }, sensorSize);
+
+ if (sensorFormat.size != sensorSize)
+ return CameraConfiguration::Invalid;
+
+ minResolution = minResolution_.expandedToAspectRatio(sensorSize);
+ maxResolution = maxResolution_.boundedTo(sensorSize)
+ .boundedToAspectRatio(sensorSize);
} else {
/*
* Adjust the size based on the sensor resolution and absolute
@@ -338,11 +417,14 @@ int RkISP1Path::configure(const StreamConfiguration &config,
/*
* Crop on the resizer input to maintain FOV before downscaling.
*
- * \todo The alignment to a multiple of 2 pixels is required but may
- * change the aspect ratio very slightly. A more advanced algorithm to
- * compute the resizer input crop rectangle is needed, and it should
- * also take into account the need to crop away the edge pixels affected
- * by the ISP processing blocks.
+ * Note that this does not apply to devices without DUAL_CROP support
+ * (like imx8mp) , where the cropping needs to be done on the
+ * ImageStabilizer block on the ISP source pad and therefore is
+ * configured before this stage. For simplicity we still set the crop.
+ * This gets ignored by the kernel driver because the hardware is
+ * missing the capability.
+ *
+ * Alignment to a multiple of 2 pixels is required by the resizer.
*/
Size ispCrop = inputFormat.size.boundedToAspectRatio(config.size)
.alignedUpTo(2, 2);
@@ -435,12 +517,10 @@ void RkISP1Path::stop()
}
/*
- * \todo Remove the hardcoded resolutions and formats once all users will have
- * migrated to a recent enough kernel.
+ * \todo Remove the hardcoded formats once all users will have migrated to a
+ * recent enough kernel.
*/
namespace {
-constexpr Size RKISP1_RSZ_MP_SRC_MIN{ 32, 16 };
-constexpr Size RKISP1_RSZ_MP_SRC_MAX{ 4416, 3312 };
constexpr std::array<PixelFormat, 18> RKISP1_RSZ_MP_FORMATS{
formats::YUYV,
formats::NV16,
@@ -462,8 +542,6 @@ constexpr std::array<PixelFormat, 18> RKISP1_RSZ_MP_FORMATS{
formats::SRGGB12,
};
-constexpr Size RKISP1_RSZ_SP_SRC_MIN{ 32, 16 };
-constexpr Size RKISP1_RSZ_SP_SRC_MAX{ 1920, 1920 };
constexpr std::array<PixelFormat, 8> RKISP1_RSZ_SP_FORMATS{
formats::YUYV,
formats::NV16,
@@ -477,14 +555,12 @@ constexpr std::array<PixelFormat, 8> RKISP1_RSZ_SP_FORMATS{
} /* namespace */
RkISP1MainPath::RkISP1MainPath()
- : RkISP1Path("main", RKISP1_RSZ_MP_FORMATS,
- RKISP1_RSZ_MP_SRC_MIN, RKISP1_RSZ_MP_SRC_MAX)
+ : RkISP1Path("main", RKISP1_RSZ_MP_FORMATS)
{
}
RkISP1SelfPath::RkISP1SelfPath()
- : RkISP1Path("self", RKISP1_RSZ_SP_FORMATS,
- RKISP1_RSZ_SP_SRC_MIN, RKISP1_RSZ_SP_SRC_MAX)
+ : RkISP1Path("self", RKISP1_RSZ_SP_FORMATS)
{
}
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
index cd77957e..2a1ef0ab 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
@@ -2,11 +2,12 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * rkisp1path.h - Rockchip ISP1 path helper
+ * Rockchip ISP1 path helper
*/
#pragma once
+#include <map>
#include <memory>
#include <set>
#include <vector>
@@ -25,6 +26,7 @@ namespace libcamera {
class CameraSensor;
class MediaDevice;
+class SensorConfiguration;
class V4L2Subdevice;
struct StreamConfiguration;
struct V4L2SubdeviceFormat;
@@ -32,8 +34,7 @@ struct V4L2SubdeviceFormat;
class RkISP1Path
{
public:
- RkISP1Path(const char *name, const Span<const PixelFormat> &formats,
- const Size &minResolution, const Size &maxResolution);
+ RkISP1Path(const char *name, const Span<const PixelFormat> &formats);
bool init(MediaDevice *media);
@@ -44,6 +45,7 @@ public:
const Size &resolution,
StreamRole role);
CameraConfiguration::Status validate(const CameraSensor *sensor,
+ const std::optional<SensorConfiguration> &sensorConfig,
StreamConfiguration *cfg);
int configure(const StreamConfiguration &config,
@@ -60,9 +62,11 @@ public:
int queueBuffer(FrameBuffer *buffer) { return video_->queueBuffer(buffer); }
Signal<FrameBuffer *> &bufferReady() { return video_->bufferReady; }
+ const Size &maxResolution() const { return maxResolution_; }
private:
void populateFormats();
+ Size filterSensorResolution(const CameraSensor *sensor);
static constexpr unsigned int RKISP1_BUFFER_COUNT = 4;
@@ -77,6 +81,12 @@ private:
std::unique_ptr<V4L2Subdevice> resizer_;
std::unique_ptr<V4L2VideoDevice> video_;
MediaLink *link_;
+
+ /*
+ * Map from camera sensors to the sizes (in increasing order),
+ * which are guaranteed to be supported by the pipeline.
+ */
+ std::map<const CameraSensor *, std::vector<Size>> sensorSizesMap_;
};
class RkISP1MainPath : public RkISP1Path
diff --git a/src/libcamera/pipeline/rpi/common/delayed_controls.cpp b/src/libcamera/pipeline/rpi/common/delayed_controls.cpp
index 3db92e7d..ad50a7c8 100644
--- a/src/libcamera/pipeline/rpi/common/delayed_controls.cpp
+++ b/src/libcamera/pipeline/rpi/common/delayed_controls.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * delayed_controls.cpp - Helper to deal with controls that take effect with a delay
+ * Helper to deal with controls that take effect with a delay
*
* Note: This has been forked from the libcamera core implementation.
*/
diff --git a/src/libcamera/pipeline/rpi/common/delayed_controls.h b/src/libcamera/pipeline/rpi/common/delayed_controls.h
index 61f755f0..487b0057 100644
--- a/src/libcamera/pipeline/rpi/common/delayed_controls.h
+++ b/src/libcamera/pipeline/rpi/common/delayed_controls.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * delayed_controls.h - Helper to deal with controls that take effect with a delay
+ * Helper to deal with controls that take effect with a delay
*
* Note: This has been forked from the libcamera core implementation.
*/
diff --git a/src/libcamera/pipeline/rpi/common/meson.build b/src/libcamera/pipeline/rpi/common/meson.build
index 8fb7e823..b2b1a0a6 100644
--- a/src/libcamera/pipeline/rpi/common/meson.build
+++ b/src/libcamera/pipeline/rpi/common/meson.build
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources += files([
+libcamera_internal_sources += files([
'delayed_controls.cpp',
'pipeline_base.cpp',
'rpi_stream.cpp',
diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.cpp b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
index 7e420b3f..1f13e523 100644
--- a/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
+++ b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019-2023, Raspberry Pi Ltd
*
- * pipeline_base.cpp - Pipeline handler base class for Raspberry Pi devices
+ * Pipeline handler base class for Raspberry Pi devices
*/
#include "pipeline_base.h"
@@ -105,7 +105,7 @@ CameraConfiguration::Status RPiCameraConfiguration::validateColorSpaces([[maybe_
Status status = Valid;
yuvColorSpace_.reset();
- for (auto cfg : config_) {
+ for (auto &cfg : config_) {
/* First fix up raw streams to have the "raw" colour space. */
if (PipelineHandlerBase::isRaw(cfg.pixelFormat)) {
/* If there was no value here, that doesn't count as "adjusted". */
@@ -130,7 +130,7 @@ CameraConfiguration::Status RPiCameraConfiguration::validateColorSpaces([[maybe_
rgbColorSpace_->range = ColorSpace::Range::Full;
/* Go through the streams again and force everyone to the same colour space. */
- for (auto cfg : config_) {
+ for (auto &cfg : config_) {
if (cfg.colorSpace == ColorSpace::Raw)
continue;
@@ -181,12 +181,14 @@ CameraConfiguration::Status RPiCameraConfiguration::validate()
rawStreams_.clear();
outStreams_.clear();
+ unsigned int rawStreamIndex = 0;
+ unsigned int outStreamIndex = 0;
- for (const auto &[index, cfg] : utils::enumerate(config_)) {
+ for (auto &cfg : config_) {
if (PipelineHandlerBase::isRaw(cfg.pixelFormat))
- rawStreams_.emplace_back(index, &cfg);
+ rawStreams_.emplace_back(rawStreamIndex++, &cfg);
else
- outStreams_.emplace_back(index, &cfg);
+ outStreams_.emplace_back(outStreamIndex++, &cfg);
}
/* Sort the streams so the highest resolution is first. */
@@ -474,7 +476,11 @@ PipelineHandlerBase::generateConfiguration(Camera *camera, Span<const StreamRole
*/
for (const auto &format : fmts) {
PixelFormat pf = format.first.toPixelFormat();
- if (pf.isValid()) {
+ /*
+ * Some V4L2 formats translate to the same pixel format (e.g. YU12, YM12
+ * both give YUV420). We must avoid duplicating the range in this case.
+ */
+ if (pf.isValid() && deviceFormats.find(pf) == deviceFormats.end()) {
const SizeRange &ispSizes = format.second[0];
deviceFormats[pf].emplace_back(ispSizes.min, sensorSize,
ispSizes.hStep, ispSizes.vStep);
@@ -492,8 +498,6 @@ PipelineHandlerBase::generateConfiguration(Camera *camera, Span<const StreamRole
config->addConfiguration(cfg);
}
- config->validate();
-
return config;
}
@@ -531,6 +535,7 @@ int PipelineHandlerBase::configure(Camera *camera, CameraConfiguration *config)
* Platform specific internal stream configuration. This also assigns
* external streams which get configured below.
*/
+ data->cropParams_.clear();
ret = data->platformConfigure(rpiConfig);
if (ret)
return ret;
@@ -543,12 +548,6 @@ int PipelineHandlerBase::configure(Camera *camera, CameraConfiguration *config)
}
/*
- * Set the scaler crop to the value we are using (scaled to native sensor
- * coordinates).
- */
- data->scalerCrop_ = data->scaleIspCrop(data->ispCrop_);
-
- /*
* Update the ScalerCropMaximum to the correct value for this camera mode.
* For us, it's the same as the "analogue crop".
*
@@ -565,9 +564,28 @@ int PipelineHandlerBase::configure(Camera *camera, CameraConfiguration *config)
for (auto const &c : result.controlInfo)
ctrlMap.emplace(c.first, c.second);
- /* Add the ScalerCrop control limits based on the current mode. */
- Rectangle ispMinCrop = data->scaleIspCrop(Rectangle(data->ispMinCropSize_));
- ctrlMap[&controls::ScalerCrop] = ControlInfo(ispMinCrop, data->sensorInfo_.analogCrop, data->scalerCrop_);
+ const auto cropParamsIt = data->cropParams_.find(0);
+ if (cropParamsIt != data->cropParams_.end()) {
+ const CameraData::CropParams &cropParams = cropParamsIt->second;
+ /*
+ * Add the ScalerCrop control limits based on the current mode and
+ * the first configured stream.
+ */
+ Rectangle ispMinCrop = data->scaleIspCrop(Rectangle(cropParams.ispMinCropSize));
+ ctrlMap[&controls::ScalerCrop] = ControlInfo(ispMinCrop, data->sensorInfo_.analogCrop,
+ data->scaleIspCrop(cropParams.ispCrop));
+ if (data->cropParams_.size() == 2) {
+ /*
+ * The control map for rpi::ScalerCrops has the min value
+ * as the default crop for stream 0, max value as the default
+ * value for stream 1.
+ */
+ ctrlMap[&controls::rpi::ScalerCrops] =
+ ControlInfo(data->scaleIspCrop(data->cropParams_.at(0).ispCrop),
+ data->scaleIspCrop(data->cropParams_.at(1).ispCrop),
+ ctrlMap[&controls::ScalerCrop].def());
+ }
+ }
data->controlInfo_ = ControlInfoMap(std::move(ctrlMap), result.controlInfo.idmap());
@@ -772,13 +790,10 @@ int PipelineHandlerBase::registerCamera(std::unique_ptr<RPi::CameraData> &camera
CameraData *data = cameraData.get();
int ret;
- data->sensor_ = std::make_unique<CameraSensor>(sensorEntity);
+ data->sensor_ = CameraSensorFactoryBase::create(sensorEntity);
if (!data->sensor_)
return -EINVAL;
- if (data->sensor_->init())
- return -EINVAL;
-
/* Populate the map of sensor supported formats and sizes. */
for (auto const mbusCode : data->sensor_->mbusCodes())
data->sensorFormats_.emplace(mbusCode,
@@ -801,11 +816,12 @@ int PipelineHandlerBase::registerCamera(std::unique_ptr<RPi::CameraData> &camera
* Setup our delayed control writer with the sensor default
* gain and exposure delays. Mark VBLANK for priority write.
*/
+ const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays();
std::unordered_map<uint32_t, RPi::DelayedControls::ControlParams> params = {
- { V4L2_CID_ANALOGUE_GAIN, { result.sensorConfig.gainDelay, false } },
- { V4L2_CID_EXPOSURE, { result.sensorConfig.exposureDelay, false } },
- { V4L2_CID_HBLANK, { result.sensorConfig.hblankDelay, false } },
- { V4L2_CID_VBLANK, { result.sensorConfig.vblankDelay, true } }
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ { V4L2_CID_HBLANK, { delays.hblankDelay, false } },
+ { V4L2_CID_VBLANK, { delays.vblankDelay, true } }
};
data->delayedCtrls_ = std::make_unique<RPi::DelayedControls>(data->sensor_->device(), params);
data->sensorMetadata_ = result.sensorConfig.sensorMetadata;
@@ -1140,20 +1156,11 @@ int CameraData::loadIPA(ipa::RPi::InitResult *result)
if (!ipa_)
return -ENOENT;
- /*
- * The configuration (tuning file) is made from the sensor name unless
- * the environment variable overrides it.
- */
- std::string configurationFile;
- char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_TUNING_FILE");
- if (!configFromEnv || *configFromEnv == '\0') {
- std::string model = sensor_->model();
- if (isMonoSensor(sensor_))
- model += "_mono";
- configurationFile = ipa_->configurationFile(model + ".json");
- } else {
- configurationFile = std::string(configFromEnv);
- }
+ /* The configuration (tuning file) is made from the sensor name. */
+ std::string model = sensor_->model();
+ if (isMonoSensor(sensor_))
+ model += "_mono";
+ std::string configurationFile = ipa_->configurationFile(model + ".json");
IPASettings settings(configurationFile, sensor_->model());
ipa::RPi::InitParams params;
@@ -1293,9 +1300,30 @@ Rectangle CameraData::scaleIspCrop(const Rectangle &ispCrop) const
void CameraData::applyScalerCrop(const ControlList &controls)
{
- const auto &scalerCrop = controls.get<Rectangle>(controls::ScalerCrop);
- if (scalerCrop) {
- Rectangle nativeCrop = *scalerCrop;
+ const auto &scalerCropRPi = controls.get<Span<const Rectangle>>(controls::rpi::ScalerCrops);
+ const auto &scalerCropCore = controls.get<Rectangle>(controls::ScalerCrop);
+ std::vector<Rectangle> scalerCrops;
+
+ /*
+ * First thing to do is create a vector of crops to apply to each ISP output
+ * based on either controls::ScalerCrop or controls::rpi::ScalerCrops if
+ * present.
+ *
+ * If controls::rpi::ScalerCrops is preset, apply the given crops to the
+ * ISP output streams, indexed by the same order in which they had been
+ * configured. This is not the same as the ISP output index. Otherwise
+ * if controls::ScalerCrop is present, apply the same crop to all ISP
+ * output streams.
+ */
+ for (unsigned int i = 0; i < cropParams_.size(); i++) {
+ if (scalerCropRPi && i < scalerCropRPi->size())
+ scalerCrops.push_back(scalerCropRPi->data()[i]);
+ else if (scalerCropCore)
+ scalerCrops.push_back(*scalerCropCore);
+ }
+
+ for (auto const &[i, scalerCrop] : utils::enumerate(scalerCrops)) {
+ Rectangle nativeCrop = scalerCrop;
if (!nativeCrop.width || !nativeCrop.height)
nativeCrop = { 0, 0, 1, 1 };
@@ -1311,20 +1339,13 @@ void CameraData::applyScalerCrop(const ControlList &controls)
* 2. With the same mid-point, if possible.
* 3. But it can't go outside the sensor area.
*/
- Size minSize = ispMinCropSize_.expandedToAspectRatio(nativeCrop.size());
+ Size minSize = cropParams_.at(i).ispMinCropSize.expandedToAspectRatio(nativeCrop.size());
Size size = ispCrop.size().expandedTo(minSize);
ispCrop = size.centeredTo(ispCrop.center()).enclosedIn(Rectangle(sensorInfo_.outputSize));
- if (ispCrop != ispCrop_) {
- ispCrop_ = ispCrop;
- platformSetIspCrop();
-
- /*
- * Also update the ScalerCrop in the metadata with what we actually
- * used. But we must first rescale that from ISP (camera mode) pixels
- * back into sensor native pixels.
- */
- scalerCrop_ = scaleIspCrop(ispCrop_);
+ if (ispCrop != cropParams_.at(i).ispCrop) {
+ cropParams_.at(i).ispCrop = ispCrop;
+ platformSetIspCrop(cropParams_.at(i).ispIndex, ispCrop);
}
}
}
@@ -1481,7 +1502,18 @@ void CameraData::fillRequestMetadata(const ControlList &bufferControls, Request
request->metadata().set(controls::SensorTimestamp,
bufferControls.get(controls::SensorTimestamp).value_or(0));
- request->metadata().set(controls::ScalerCrop, scalerCrop_);
+ if (cropParams_.size()) {
+ std::vector<Rectangle> crops;
+
+ for (auto const &[k, v] : cropParams_)
+ crops.push_back(scaleIspCrop(v.ispCrop));
+
+ request->metadata().set(controls::ScalerCrop, crops[0]);
+ if (crops.size() > 1) {
+ request->metadata().set(controls::rpi::ScalerCrops,
+ Span<const Rectangle>(crops.data(), crops.size()));
+ }
+ }
}
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.h b/src/libcamera/pipeline/rpi/common/pipeline_base.h
index 0608bbe5..aae0c2f3 100644
--- a/src/libcamera/pipeline/rpi/common/pipeline_base.h
+++ b/src/libcamera/pipeline/rpi/common/pipeline_base.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019-2023, Raspberry Pi Ltd
*
- * pipeline_base.h - Pipeline handler base class for Raspberry Pi devices
+ * Pipeline handler base class for Raspberry Pi devices
*/
#include <map>
@@ -83,7 +83,7 @@ public:
Rectangle scaleIspCrop(const Rectangle &ispCrop) const;
void applyScalerCrop(const ControlList &controls);
- virtual void platformSetIspCrop() = 0;
+ virtual void platformSetIspCrop(unsigned int index, const Rectangle &ispCrop) = 0;
void cameraTimeout();
void frameStarted(uint32_t sequence);
@@ -133,9 +133,23 @@ public:
/* For handling digital zoom. */
IPACameraSensorInfo sensorInfo_;
- Rectangle ispCrop_; /* crop in ISP (camera mode) pixels */
- Rectangle scalerCrop_; /* crop in sensor native pixels */
- Size ispMinCropSize_;
+
+ struct CropParams {
+ CropParams(Rectangle ispCrop_, Size ispMinCropSize_, unsigned int ispIndex_)
+ : ispCrop(ispCrop_), ispMinCropSize(ispMinCropSize_), ispIndex(ispIndex_)
+ {
+ }
+
+ /* Crop in ISP (camera mode) pixels */
+ Rectangle ispCrop;
+ /* Minimum crop size in ISP output pixels */
+ Size ispMinCropSize;
+ /* Index of the ISP output channel for this crop */
+ unsigned int ispIndex;
+ };
+
+ /* Mapping of CropParams keyed by the output stream order in CameraConfiguration */
+ std::map<unsigned int, CropParams> cropParams_;
unsigned int dropFrameCount_;
diff --git a/src/libcamera/pipeline/rpi/common/rpi_stream.cpp b/src/libcamera/pipeline/rpi/common/rpi_stream.cpp
index 70f115f1..accf59eb 100644
--- a/src/libcamera/pipeline/rpi/common/rpi_stream.cpp
+++ b/src/libcamera/pipeline/rpi/common/rpi_stream.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * rpi_stream.cpp - Raspberry Pi device stream abstraction class.
+ * Raspberry Pi device stream abstraction class.
*/
#include "rpi_stream.h"
diff --git a/src/libcamera/pipeline/rpi/common/rpi_stream.h b/src/libcamera/pipeline/rpi/common/rpi_stream.h
index 48ed41ab..a13d5dc0 100644
--- a/src/libcamera/pipeline/rpi/common/rpi_stream.h
+++ b/src/libcamera/pipeline/rpi/common/rpi_stream.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * rpi_stream.h - Raspberry Pi device stream abstraction class.
+ * Raspberry Pi device stream abstraction class.
*/
#pragma once
diff --git a/src/libcamera/pipeline/rpi/common/shared_mem_object.h b/src/libcamera/pipeline/rpi/common/shared_mem_object.h
deleted file mode 100644
index aa56c220..00000000
--- a/src/libcamera/pipeline/rpi/common/shared_mem_object.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2023, Raspberry Pi Ltd
- *
- * shared_mem_object.h - Helper class for shared memory allocations
- */
-#pragma once
-
-#include <cstddef>
-#include <fcntl.h>
-#include <string>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <utility>
-
-#include <libcamera/base/class.h>
-#include <libcamera/base/shared_fd.h>
-
-namespace libcamera {
-
-namespace RPi {
-
-template<class T>
-class SharedMemObject
-{
-public:
- static constexpr std::size_t SIZE = sizeof(T);
-
- SharedMemObject()
- : obj_(nullptr)
- {
- }
-
- template<class... Args>
- SharedMemObject(const std::string &name, Args &&...args)
- : name_(name), obj_(nullptr)
- {
- void *mem;
- int ret;
-
- ret = memfd_create(name_.c_str(), MFD_CLOEXEC);
- if (ret < 0)
- return;
-
- fd_ = SharedFD(std::move(ret));
- if (!fd_.isValid())
- return;
-
- ret = ftruncate(fd_.get(), SIZE);
- if (ret < 0)
- return;
-
- mem = mmap(nullptr, SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
- fd_.get(), 0);
- if (mem == MAP_FAILED)
- return;
-
- obj_ = new (mem) T(std::forward<Args>(args)...);
- }
-
- SharedMemObject(SharedMemObject<T> &&rhs)
- {
- this->name_ = std::move(rhs.name_);
- this->fd_ = std::move(rhs.fd_);
- this->obj_ = rhs.obj_;
- rhs.obj_ = nullptr;
- }
-
- ~SharedMemObject()
- {
- if (obj_) {
- obj_->~T();
- munmap(obj_, SIZE);
- }
- }
-
- /* Make SharedMemObject non-copyable for now. */
- LIBCAMERA_DISABLE_COPY(SharedMemObject)
-
- SharedMemObject<T> &operator=(SharedMemObject<T> &&rhs)
- {
- this->name_ = std::move(rhs.name_);
- this->fd_ = std::move(rhs.fd_);
- this->obj_ = rhs.obj_;
- rhs.obj_ = nullptr;
- return *this;
- }
-
- T *operator->()
- {
- return obj_;
- }
-
- const T *operator->() const
- {
- return obj_;
- }
-
- T &operator*()
- {
- return *obj_;
- }
-
- const T &operator*() const
- {
- return *obj_;
- }
-
- const SharedFD &fd() const
- {
- return fd_;
- }
-
- explicit operator bool() const
- {
- return !!obj_;
- }
-
-private:
- std::string name_;
- SharedFD fd_;
- T *obj_;
-};
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/vc4/dma_heaps.cpp b/src/libcamera/pipeline/rpi/vc4/dma_heaps.cpp
deleted file mode 100644
index 317b1fc1..00000000
--- a/src/libcamera/pipeline/rpi/vc4/dma_heaps.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Raspberry Pi Ltd
- *
- * dma_heaps.h - Helper class for dma-heap allocations.
- */
-
-#include "dma_heaps.h"
-
-#include <array>
-#include <fcntl.h>
-#include <linux/dma-buf.h>
-#include <linux/dma-heap.h>
-#include <sys/ioctl.h>
-#include <unistd.h>
-
-#include <libcamera/base/log.h>
-
-/*
- * /dev/dma-heap/linux,cma is the dma-heap allocator, which allows dmaheap-cma
- * to only have to worry about importing.
- *
- * Annoyingly, should the cma heap size be specified on the kernel command line
- * instead of DT, the heap gets named "reserved" instead.
- */
-static constexpr std::array<const char *, 2> heapNames = {
- "/dev/dma_heap/linux,cma",
- "/dev/dma_heap/reserved"
-};
-
-namespace libcamera {
-
-LOG_DECLARE_CATEGORY(RPI)
-
-namespace RPi {
-
-DmaHeap::DmaHeap()
-{
- for (const char *name : heapNames) {
- int ret = ::open(name, O_RDWR | O_CLOEXEC, 0);
- if (ret < 0) {
- ret = errno;
- LOG(RPI, Debug) << "Failed to open " << name << ": "
- << strerror(ret);
- continue;
- }
-
- dmaHeapHandle_ = UniqueFD(ret);
- break;
- }
-
- if (!dmaHeapHandle_.isValid())
- LOG(RPI, Error) << "Could not open any dmaHeap device";
-}
-
-DmaHeap::~DmaHeap() = default;
-
-UniqueFD DmaHeap::alloc(const char *name, std::size_t size)
-{
- int ret;
-
- if (!name)
- return {};
-
- struct dma_heap_allocation_data alloc = {};
-
- alloc.len = size;
- alloc.fd_flags = O_CLOEXEC | O_RDWR;
-
- ret = ::ioctl(dmaHeapHandle_.get(), DMA_HEAP_IOCTL_ALLOC, &alloc);
- if (ret < 0) {
- LOG(RPI, Error) << "dmaHeap allocation failure for "
- << name;
- return {};
- }
-
- UniqueFD allocFd(alloc.fd);
- ret = ::ioctl(allocFd.get(), DMA_BUF_SET_NAME, name);
- if (ret < 0) {
- LOG(RPI, Error) << "dmaHeap naming failure for "
- << name;
- return {};
- }
-
- return allocFd;
-}
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/vc4/dma_heaps.h b/src/libcamera/pipeline/rpi/vc4/dma_heaps.h
deleted file mode 100644
index 0a4a8d86..00000000
--- a/src/libcamera/pipeline/rpi/vc4/dma_heaps.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2020, Raspberry Pi Ltd
- *
- * dma_heaps.h - Helper class for dma-heap allocations.
- */
-
-#pragma once
-
-#include <stddef.h>
-
-#include <libcamera/base/unique_fd.h>
-
-namespace libcamera {
-
-namespace RPi {
-
-class DmaHeap
-{
-public:
- DmaHeap();
- ~DmaHeap();
- bool isValid() const { return dmaHeapHandle_.isValid(); }
- UniqueFD alloc(const char *name, std::size_t size);
-
-private:
- UniqueFD dmaHeapHandle_;
-};
-
-} /* namespace RPi */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/vc4/meson.build b/src/libcamera/pipeline/rpi/vc4/meson.build
index cdb049c5..9b37c2f0 100644
--- a/src/libcamera/pipeline/rpi/vc4/meson.build
+++ b/src/libcamera/pipeline/rpi/vc4/meson.build
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources += files([
- 'dma_heaps.cpp',
+libcamera_internal_sources += files([
'vc4.cpp',
])
diff --git a/src/libcamera/pipeline/rpi/vc4/vc4.cpp b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
index ad7dddde..fd8d84b1 100644
--- a/src/libcamera/pipeline/rpi/vc4/vc4.cpp
+++ b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019-2023, Raspberry Pi Ltd
*
- * vc4.cpp - Pipeline handler for VC4-based Raspberry Pi devices
+ * Pipeline handler for VC4-based Raspberry Pi devices
*/
#include <linux/bcm2835-isp.h>
@@ -12,12 +12,11 @@
#include <libcamera/formats.h>
#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/dma_buf_allocator.h"
#include "../common/pipeline_base.h"
#include "../common/rpi_stream.h"
-#include "dma_heaps.h"
-
using namespace std::chrono_literals;
namespace libcamera {
@@ -87,7 +86,7 @@ public:
RPi::Device<Isp, 4> isp_;
/* DMAHEAP allocation helper. */
- RPi::DmaHeap dmaHeap_;
+ DmaBufAllocator dmaHeap_;
SharedFD lsTable_;
struct Config {
@@ -110,9 +109,10 @@ public:
Config config_;
private:
- void platformSetIspCrop() override
+ void platformSetIspCrop([[maybe_unused]] unsigned int index, const Rectangle &ispCrop) override
{
- isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &ispCrop_);
+ Rectangle crop = ispCrop;
+ isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop);
}
int platformConfigure(const RPi::RPiCameraConfiguration *rpiConfig) override;
@@ -702,13 +702,19 @@ int Vc4CameraData::platformConfigure(const RPi::RPiCameraConfiguration *rpiConfi
/* Figure out the smallest selection the ISP will allow. */
Rectangle testCrop(0, 0, 1, 1);
isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &testCrop);
- ispMinCropSize_ = testCrop.size();
/* Adjust aspect ratio by providing crops on the input image. */
Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
- ispCrop_ = size.centeredTo(Rectangle(unicamFormat.size).center());
+ Rectangle ispCrop = size.centeredTo(Rectangle(unicamFormat.size).center());
- platformSetIspCrop();
+ platformSetIspCrop(0, ispCrop);
+ /*
+ * Set the scaler crop to the value we are using (scaled to native sensor
+ * coordinates).
+ */
+ cropParams_.emplace(std::piecewise_construct,
+ std::forward_as_tuple(0),
+ std::forward_as_tuple(ispCrop, testCrop.size(), 0));
return 0;
}
@@ -803,7 +809,7 @@ void Vc4CameraData::ispInputDequeue(FrameBuffer *buffer)
void Vc4CameraData::ispOutputDequeue(FrameBuffer *buffer)
{
RPi::Stream *stream = nullptr;
- unsigned int index;
+ unsigned int index = 0;
if (!isRunning())
return;
@@ -1019,6 +1025,6 @@ bool Vc4CameraData::findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&em
return true;
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerVc4)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVc4, "rpi/vc4")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/simple/meson.build b/src/libcamera/pipeline/simple/meson.build
index 42b0896d..dda3de97 100644
--- a/src/libcamera/pipeline/simple/meson.build
+++ b/src/libcamera/pipeline/simple/meson.build
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources += files([
+libcamera_internal_sources += files([
'simple.cpp',
])
diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
index 01f2a977..6e039bf3 100644
--- a/src/libcamera/pipeline/simple/simple.cpp
+++ b/src/libcamera/pipeline/simple/simple.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2020, Laurent Pinchart
* Copyright (C) 2019, Martijn Braam
*
- * simple.cpp - Pipeline handler for simple pipelines
+ * Pipeline handler for simple pipelines
*/
#include <algorithm>
@@ -13,8 +13,9 @@
#include <memory>
#include <queue>
#include <set>
-#include <string>
+#include <stdint.h>
#include <string.h>
+#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
@@ -30,14 +31,16 @@
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
#include "libcamera/internal/converter.h"
+#include "libcamera/internal/delayed_controls.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/software_isp/software_isp.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
-
namespace libcamera {
LOG_DEFINE_CATEGORY(SimplePipeline)
@@ -163,7 +166,7 @@ LOG_DEFINE_CATEGORY(SimplePipeline)
* handler has no a priori knowledge of. The pipeline handler thus implements a
* heuristic to handle sharing of hardware resources in a generic fashion.
*
- * Two cameras are considered to be mutually exclusive if their share common
+ * Two cameras are considered to be mutually exclusive if they share common
* pads along the pipeline from the camera sensor to the video node. An entity
* can thus be used concurrently by multiple cameras, as long as pads are
* distinct.
@@ -185,18 +188,25 @@ struct SimplePipelineInfo {
* and the number of streams it supports.
*/
std::vector<std::pair<const char *, unsigned int>> converters;
+ /*
+ * Using Software ISP is to be enabled per driver.
+ *
+ * The Software ISP can't be used together with the converters.
+ */
+ bool swIspEnabled;
};
namespace {
static const SimplePipelineInfo supportedDevices[] = {
- { "dcmipp", {} },
- { "imx7-csi", { { "pxp", 1 } } },
- { "j721e-csi2rx", {} },
- { "mtk-seninf", { { "mtk-mdp", 3 } } },
- { "mxc-isi", {} },
- { "qcom-camss", {} },
- { "sun6i-csi", {} },
+ { "dcmipp", {}, false },
+ { "imx7-csi", { { "pxp", 1 } }, false },
+ { "intel-ipu6", {}, true },
+ { "j721e-csi2rx", {}, true },
+ { "mtk-seninf", { { "mtk-mdp", 3 } }, false },
+ { "mxc-isi", {}, false },
+ { "qcom-camss", {}, true },
+ { "sun6i-csi", {}, false },
};
} /* namespace */
@@ -216,7 +226,8 @@ public:
int setupFormats(V4L2SubdeviceFormat *format,
V4L2Subdevice::Whence whence,
Transform transform = Transform::Identity);
- void bufferReady(FrameBuffer *buffer);
+ void imageBufferReady(FrameBuffer *buffer);
+ void clearIncompleteRequests();
unsigned int streamIndex(const Stream *stream) const
{
@@ -270,17 +281,28 @@ public:
std::vector<Configuration> configs_;
std::map<PixelFormat, std::vector<const Configuration *>> formats_;
+ std::unique_ptr<DelayedControls> delayedCtrls_;
+
+ std::vector<std::unique_ptr<FrameBuffer>> conversionBuffers_;
+ struct RequestOutputs {
+ Request *request;
+ std::map<const Stream *, FrameBuffer *> outputs;
+ };
+ std::queue<RequestOutputs> conversionQueue_;
+ bool useConversion_;
+
std::unique_ptr<Converter> converter_;
- std::vector<std::unique_ptr<FrameBuffer>> converterBuffers_;
- bool useConverter_;
- std::queue<std::map<unsigned int, FrameBuffer *>> converterQueue_;
+ std::unique_ptr<SoftwareIsp> swIsp_;
private:
void tryPipeline(unsigned int code, const Size &size);
static std::vector<const MediaPad *> routedSourcePads(MediaPad *sink);
- void converterInputDone(FrameBuffer *buffer);
- void converterOutputDone(FrameBuffer *buffer);
+ void conversionInputDone(FrameBuffer *buffer);
+ void conversionOutputDone(FrameBuffer *buffer);
+
+ void ispStatsReady(uint32_t frame, uint32_t bufferId);
+ void setSensorControls(const ControlList &sensorControls);
};
class SimpleCameraConfiguration : public CameraConfiguration
@@ -332,6 +354,7 @@ public:
V4L2VideoDevice *video(const MediaEntity *entity);
V4L2Subdevice *subdev(const MediaEntity *entity);
MediaDevice *converter() { return converter_; }
+ bool swIspEnabled() const { return swIspEnabled_; }
protected:
int queueRequestDevice(Camera *camera, Request *request) override;
@@ -350,16 +373,16 @@ private:
return static_cast<SimpleCameraData *>(camera->_d());
}
- std::vector<MediaEntity *> locateSensors();
+ std::vector<MediaEntity *> locateSensors(MediaDevice *media);
static int resetRoutingTable(V4L2Subdevice *subdev);
const MediaPad *acquirePipeline(SimpleCameraData *data);
void releasePipeline(SimpleCameraData *data);
- MediaDevice *media_;
std::map<const MediaEntity *, EntityData> entities_;
MediaDevice *converter_;
+ bool swIspEnabled_;
};
/* -----------------------------------------------------------------------------
@@ -371,8 +394,6 @@ SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe,
MediaEntity *sensor)
: Camera::Private(pipe), streams_(numStreams)
{
- int ret;
-
/*
* Find the shortest path from the camera sensor to a video capture
* device using the breadth-first search algorithm. This heuristic will
@@ -463,12 +484,9 @@ SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe,
}
/* Finally also remember the sensor. */
- sensor_ = std::make_unique<CameraSensor>(sensor);
- ret = sensor_->init();
- if (ret) {
- sensor_.reset();
+ sensor_ = CameraSensorFactoryBase::create(sensor);
+ if (!sensor_)
return;
- }
LOG(SimplePipeline, Debug)
<< "Found pipeline: "
@@ -504,8 +522,25 @@ int SimpleCameraData::init()
<< "Failed to create converter, disabling format conversion";
converter_.reset();
} else {
- converter_->inputBufferReady.connect(this, &SimpleCameraData::converterInputDone);
- converter_->outputBufferReady.connect(this, &SimpleCameraData::converterOutputDone);
+ converter_->inputBufferReady.connect(this, &SimpleCameraData::conversionInputDone);
+ converter_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
+ }
+ }
+
+ /*
+ * Instantiate Soft ISP if this is enabled for the given driver and no converter is used.
+ */
+ if (!converter_ && pipe->swIspEnabled()) {
+ swIsp_ = std::make_unique<SoftwareIsp>(pipe, sensor_.get(), &controlInfo_);
+ if (!swIsp_->isValid()) {
+ LOG(SimplePipeline, Warning)
+ << "Failed to create software ISP, disabling software debayering";
+ swIsp_.reset();
+ } else {
+ swIsp_->inputBufferReady.connect(this, &SimpleCameraData::conversionInputDone);
+ swIsp_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
+ swIsp_->ispStatsReady.connect(this, &SimpleCameraData::ispStatsReady);
+ swIsp_->setSensorControls.connect(this, &SimpleCameraData::setSensorControls);
}
}
@@ -599,12 +634,20 @@ void SimpleCameraData::tryPipeline(unsigned int code, const Size &size)
config.captureFormat = pixelFormat;
config.captureSize = format.size;
- if (!converter_) {
- config.outputFormats = { pixelFormat };
- config.outputSizes = config.captureSize;
- } else {
+ if (converter_) {
config.outputFormats = converter_->formats(pixelFormat);
config.outputSizes = converter_->sizes(format.size);
+ } else if (swIsp_) {
+ config.outputFormats = swIsp_->formats(pixelFormat);
+ config.outputSizes = swIsp_->sizes(pixelFormat, format.size);
+ if (config.outputFormats.empty()) {
+ /* Do not use swIsp for unsupported pixelFormat's. */
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
+ }
+ } else {
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
}
configs_.push_back(config);
@@ -721,17 +764,14 @@ int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format,
}
LOG(SimplePipeline, Debug)
- << "Link '" << source->entity()->name()
- << "':" << source->index()
- << " -> '" << sink->entity()->name()
- << "':" << sink->index()
- << " configured with format " << *format;
+ << "Link " << *link << ": configured with format "
+ << *format;
}
return 0;
}
-void SimpleCameraData::bufferReady(FrameBuffer *buffer)
+void SimpleCameraData::imageBufferReady(FrameBuffer *buffer)
{
SimplePipelineHandler *pipe = SimpleCameraData::pipe();
@@ -741,7 +781,7 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
* point converting an erroneous buffer.
*/
if (buffer->metadata().status != FrameMetadata::FrameSuccess) {
- if (!useConverter_) {
+ if (!useConversion_) {
/* No conversion, just complete the request. */
Request *request = buffer->request();
pipe->completeBuffer(request, buffer);
@@ -750,26 +790,22 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
}
/*
- * The converter is in use. Requeue the internal buffer for
- * capture (unless the stream is being stopped), and complete
- * the request with all the user-facing buffers.
+ * The converter or Software ISP is in use. Requeue the internal
+ * buffer for capture (unless the stream is being stopped), and
+ * complete the request with all the user-facing buffers.
*/
if (buffer->metadata().status != FrameMetadata::FrameCancelled)
video_->queueBuffer(buffer);
- if (converterQueue_.empty())
+ if (conversionQueue_.empty())
return;
- Request *request = nullptr;
- for (auto &item : converterQueue_.front()) {
- FrameBuffer *outputBuffer = item.second;
- request = outputBuffer->request();
- pipe->completeBuffer(request, outputBuffer);
- }
- converterQueue_.pop();
+ const RequestOutputs &outputs = conversionQueue_.front();
+ for (auto &[stream, buf] : outputs.outputs)
+ pipe->completeBuffer(outputs.request, buf);
+ pipe->completeRequest(outputs.request);
+ conversionQueue_.pop();
- if (request)
- pipe->completeRequest(request);
return;
}
@@ -783,9 +819,9 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
*/
Request *request = buffer->request();
- if (useConverter_ && !converterQueue_.empty()) {
- const std::map<unsigned int, FrameBuffer *> &outputs =
- converterQueue_.front();
+ if (useConversion_ && !conversionQueue_.empty()) {
+ const std::map<const Stream *, FrameBuffer *> &outputs =
+ conversionQueue_.front().outputs;
if (!outputs.empty()) {
FrameBuffer *outputBuffer = outputs.begin()->second;
if (outputBuffer)
@@ -798,18 +834,28 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
buffer->metadata().timestamp);
/*
- * Queue the captured and the request buffer to the converter if format
- * conversion is needed. If there's no queued request, just requeue the
- * captured buffer for capture.
+ * Queue the captured and the request buffer to the converter or Software
+ * ISP if format conversion is needed. If there's no queued request, just
+ * requeue the captured buffer for capture.
*/
- if (useConverter_) {
- if (converterQueue_.empty()) {
+ if (useConversion_) {
+ if (conversionQueue_.empty()) {
video_->queueBuffer(buffer);
return;
}
- converter_->queueBuffers(buffer, converterQueue_.front());
- converterQueue_.pop();
+ if (converter_)
+ converter_->queueBuffers(buffer, conversionQueue_.front().outputs);
+ else
+ /*
+ * request->sequence() cannot be retrieved from `buffer' inside
+ * queueBuffers because unique_ptr's make buffer->request() invalid
+ * already here.
+ */
+ swIsp_->queueBuffers(request->sequence(), buffer,
+ conversionQueue_.front().outputs);
+
+ conversionQueue_.pop();
return;
}
@@ -818,13 +864,21 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
pipe->completeRequest(request);
}
-void SimpleCameraData::converterInputDone(FrameBuffer *buffer)
+void SimpleCameraData::clearIncompleteRequests()
+{
+ while (!conversionQueue_.empty()) {
+ pipe()->cancelRequest(conversionQueue_.front().request);
+ conversionQueue_.pop();
+ }
+}
+
+void SimpleCameraData::conversionInputDone(FrameBuffer *buffer)
{
/* Queue the input buffer back for capture. */
video_->queueBuffer(buffer);
}
-void SimpleCameraData::converterOutputDone(FrameBuffer *buffer)
+void SimpleCameraData::conversionOutputDone(FrameBuffer *buffer)
{
SimplePipelineHandler *pipe = SimpleCameraData::pipe();
@@ -834,6 +888,19 @@ void SimpleCameraData::converterOutputDone(FrameBuffer *buffer)
pipe->completeRequest(request);
}
+void SimpleCameraData::ispStatsReady(uint32_t frame, uint32_t bufferId)
+{
+ swIsp_->processStats(frame, bufferId,
+ delayedCtrls_->get(frame));
+}
+
+void SimpleCameraData::setSensorControls(const ControlList &sensorControls)
+{
+ delayedCtrls_->push(sensorControls);
+ ControlList ctrls(sensorControls);
+ sensor_->setControls(&ctrls);
+}
+
/* Retrieve all source pads connected to a sink pad through active routes. */
std::vector<const MediaPad *> SimpleCameraData::routedSourcePads(MediaPad *sink)
{
@@ -882,6 +949,33 @@ SimpleCameraConfiguration::SimpleCameraConfiguration(Camera *camera,
{
}
+namespace {
+
+static Size adjustSize(const Size &requestedSize, const SizeRange &supportedSizes)
+{
+ ASSERT(supportedSizes.min <= supportedSizes.max);
+
+ if (supportedSizes.min == supportedSizes.max)
+ return supportedSizes.max;
+
+ unsigned int hStep = supportedSizes.hStep;
+ unsigned int vStep = supportedSizes.vStep;
+
+ if (hStep == 0)
+ hStep = supportedSizes.max.width - supportedSizes.min.width;
+ if (vStep == 0)
+ vStep = supportedSizes.max.height - supportedSizes.min.height;
+
+ Size adjusted = requestedSize.boundedTo(supportedSizes.max)
+ .expandedTo(supportedSizes.min);
+
+ return adjusted.shrunkBy(supportedSizes.min)
+ .alignedDownTo(hStep, vStep)
+ .grownBy(supportedSizes.min);
+}
+
+} /* namespace */
+
CameraConfiguration::Status SimpleCameraConfiguration::validate()
{
const CameraSensor *sensor = data_->sensor_.get();
@@ -998,10 +1092,19 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
}
if (!pipeConfig_->outputSizes.contains(cfg.size)) {
+ Size adjustedSize = pipeConfig_->captureSize;
+ /*
+ * The converter (when present) may not be able to output
+ * a size identical to its input size. The capture size is thus
+ * not guaranteed to be a valid output size. In such cases, use
+ * the smaller valid output size closest to the requested.
+ */
+ if (!pipeConfig_->outputSizes.contains(adjustedSize))
+ adjustedSize = adjustSize(cfg.size, pipeConfig_->outputSizes);
LOG(SimplePipeline, Debug)
<< "Adjusting size from " << cfg.size
- << " to " << pipeConfig_->captureSize;
- cfg.size = pipeConfig_->captureSize;
+ << " to " << adjustedSize;
+ cfg.size = adjustedSize;
status = Adjusted;
}
@@ -1013,8 +1116,11 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
/* Set the stride, frameSize and bufferCount. */
if (needConversion_) {
std::tie(cfg.stride, cfg.frameSize) =
- data_->converter_->strideAndFrameSize(cfg.pixelFormat,
- cfg.size);
+ data_->converter_
+ ? data_->converter_->strideAndFrameSize(cfg.pixelFormat,
+ cfg.size)
+ : data_->swIsp_->strideAndFrameSize(cfg.pixelFormat,
+ cfg.size);
if (cfg.stride == 0)
return Invalid;
} else {
@@ -1030,7 +1136,7 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
cfg.frameSize = format.planes[0].size;
}
- cfg.bufferCount = 3;
+ cfg.bufferCount = 4;
}
return status;
@@ -1157,27 +1263,44 @@ int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
/* Configure the converter if needed. */
std::vector<std::reference_wrapper<StreamConfiguration>> outputCfgs;
- data->useConverter_ = config->needConversion();
+ data->useConversion_ = config->needConversion();
for (unsigned int i = 0; i < config->size(); ++i) {
StreamConfiguration &cfg = config->at(i);
cfg.setStream(&data->streams_[i]);
- if (data->useConverter_)
+ if (data->useConversion_)
outputCfgs.push_back(cfg);
}
if (outputCfgs.empty())
return 0;
+ const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays();
+ std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ };
+ data->delayedCtrls_ =
+ std::make_unique<DelayedControls>(data->sensor_->device(),
+ params);
+ data->video_->frameStart.connect(data->delayedCtrls_.get(),
+ &DelayedControls::applyControls);
+
StreamConfiguration inputCfg;
inputCfg.pixelFormat = pipeConfig->captureFormat;
inputCfg.size = pipeConfig->captureSize;
inputCfg.stride = captureFormat.planes[0].bpl;
inputCfg.bufferCount = kNumInternalBuffers;
- return data->converter_->configure(inputCfg, outputCfgs);
+ if (data->converter_) {
+ return data->converter_->configure(inputCfg, outputCfgs);
+ } else {
+ ipa::soft::IPAConfigInfo configInfo;
+ configInfo.sensorControls = data->sensor_->controls();
+ return data->swIsp_->configure(inputCfg, outputCfgs, configInfo);
+ }
}
int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
@@ -1190,9 +1313,10 @@ int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
* Export buffers on the converter or capture video node, depending on
* whether the converter is used or not.
*/
- if (data->useConverter_)
- return data->converter_->exportBuffers(data->streamIndex(stream),
- count, buffers);
+ if (data->useConversion_)
+ return data->converter_
+ ? data->converter_->exportBuffers(stream, count, buffers)
+ : data->swIsp_->exportBuffers(stream, count, buffers);
else
return data->video_->exportBuffers(count, buffers);
}
@@ -1211,13 +1335,13 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
return -EBUSY;
}
- if (data->useConverter_) {
+ if (data->useConversion_) {
/*
* When using the converter allocate a fixed number of internal
* buffers.
*/
ret = video->allocateBuffers(kNumInternalBuffers,
- &data->converterBuffers_);
+ &data->conversionBuffers_);
} else {
/* Otherwise, prepare for using buffers from the only stream. */
Stream *stream = &data->streams_[0];
@@ -1228,7 +1352,7 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
return ret;
}
- video->bufferReady.connect(data, &SimpleCameraData::bufferReady);
+ video->bufferReady.connect(data, &SimpleCameraData::imageBufferReady);
ret = video->streamOn();
if (ret < 0) {
@@ -1236,15 +1360,21 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
return ret;
}
- if (data->useConverter_) {
- ret = data->converter_->start();
+ if (data->useConversion_) {
+ if (data->converter_)
+ ret = data->converter_->start();
+ else if (data->swIsp_)
+ ret = data->swIsp_->start();
+ else
+ ret = 0;
+
if (ret < 0) {
stop(camera);
return ret;
}
/* Queue all internal buffers for capture. */
- for (std::unique_ptr<FrameBuffer> &buffer : data->converterBuffers_)
+ for (std::unique_ptr<FrameBuffer> &buffer : data->conversionBuffers_)
video->queueBuffer(buffer.get());
}
@@ -1256,15 +1386,20 @@ void SimplePipelineHandler::stopDevice(Camera *camera)
SimpleCameraData *data = cameraData(camera);
V4L2VideoDevice *video = data->video_;
- if (data->useConverter_)
- data->converter_->stop();
+ if (data->useConversion_) {
+ if (data->converter_)
+ data->converter_->stop();
+ else if (data->swIsp_)
+ data->swIsp_->stop();
+ }
video->streamOff();
video->releaseBuffers();
- video->bufferReady.disconnect(data, &SimpleCameraData::bufferReady);
+ video->bufferReady.disconnect(data, &SimpleCameraData::imageBufferReady);
- data->converterBuffers_.clear();
+ data->clearIncompleteRequests();
+ data->conversionBuffers_.clear();
releasePipeline(data);
}
@@ -1274,7 +1409,7 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
SimpleCameraData *data = cameraData(camera);
int ret;
- std::map<unsigned int, FrameBuffer *> buffers;
+ std::map<const Stream *, FrameBuffer *> buffers;
for (auto &[stream, buffer] : request->buffers()) {
/*
@@ -1282,8 +1417,8 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
* queue, it will be handed to the converter in the capture
* completion handler.
*/
- if (data->useConverter_) {
- buffers.emplace(data->streamIndex(stream), buffer);
+ if (data->useConversion_) {
+ buffers.emplace(stream, buffer);
} else {
ret = data->video_->queueBuffer(buffer);
if (ret < 0)
@@ -1291,8 +1426,11 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
}
}
- if (data->useConverter_)
- data->converterQueue_.push(std::move(buffers));
+ if (data->useConversion_) {
+ data->conversionQueue_.push({ request, std::move(buffers) });
+ if (data->swIsp_)
+ data->swIsp_->queueRequest(request->sequence(), request->controls());
+ }
return 0;
}
@@ -1301,7 +1439,8 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
* Match and Setup
*/
-std::vector<MediaEntity *> SimplePipelineHandler::locateSensors()
+std::vector<MediaEntity *>
+SimplePipelineHandler::locateSensors(MediaDevice *media)
{
std::vector<MediaEntity *> entities;
@@ -1309,7 +1448,7 @@ std::vector<MediaEntity *> SimplePipelineHandler::locateSensors()
* Gather all the camera sensor entities based on the function they
* expose.
*/
- for (MediaEntity *entity : media_->entities()) {
+ for (MediaEntity *entity : media->entities()) {
if (entity->function() == MEDIA_ENT_F_CAM_SENSOR)
entities.push_back(entity);
}
@@ -1397,17 +1536,18 @@ bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
{
const SimplePipelineInfo *info = nullptr;
unsigned int numStreams = 1;
+ MediaDevice *media;
for (const SimplePipelineInfo &inf : supportedDevices) {
DeviceMatch dm(inf.driver);
- media_ = acquireMediaDevice(enumerator, dm);
- if (media_) {
+ media = acquireMediaDevice(enumerator, dm);
+ if (media) {
info = &inf;
break;
}
}
- if (!media_)
+ if (!media)
return false;
for (const auto &[name, streams] : info->converters) {
@@ -1419,13 +1559,17 @@ bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
}
}
+ swIspEnabled_ = info->swIspEnabled;
+
/* Locate the sensors. */
- std::vector<MediaEntity *> sensors = locateSensors();
+ std::vector<MediaEntity *> sensors = locateSensors(media);
if (sensors.empty()) {
- LOG(SimplePipeline, Error) << "No sensor found";
+ LOG(SimplePipeline, Info) << "No sensor found for " << media->deviceNode();
return false;
}
+ LOG(SimplePipeline, Debug) << "Sensor found for " << media->deviceNode();
+
/*
* Create one camera data instance for each sensor and gather all
* entities in all pipelines.
@@ -1489,8 +1633,8 @@ bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
if (subdev->caps().hasStreams()) {
/*
* Reset the routing table to its default state
- * to make sure entities are enumerate according
- * to the defaul routing configuration.
+ * to make sure entities are enumerated according
+ * to the default routing configuration.
*/
ret = resetRoutingTable(subdev.get());
if (ret) {
@@ -1605,6 +1749,6 @@ void SimplePipelineHandler::releasePipeline(SimpleCameraData *data)
}
}
-REGISTER_PIPELINE_HANDLER(SimplePipelineHandler)
+REGISTER_PIPELINE_HANDLER(SimplePipelineHandler, "simple")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/uvcvideo/meson.build b/src/libcamera/pipeline/uvcvideo/meson.build
index a3c2efd4..a3a91074 100644
--- a/src/libcamera/pipeline/uvcvideo/meson.build
+++ b/src/libcamera/pipeline/uvcvideo/meson.build
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources += files([
+libcamera_internal_sources += files([
'uvcvideo.cpp',
])
diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
index ed9c7f88..7470b562 100644
--- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
+++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
@@ -2,17 +2,20 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * uvcvideo.cpp - Pipeline handler for uvcvideo devices
+ * Pipeline handler for uvcvideo devices
*/
#include <algorithm>
+#include <cmath>
#include <fstream>
-#include <iomanip>
-#include <math.h>
+#include <map>
#include <memory>
-#include <tuple>
+#include <set>
+#include <string>
+#include <vector>
#include <libcamera/base/log.h>
+#include <libcamera/base/mutex.h>
#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
@@ -44,10 +47,11 @@ public:
int init(MediaDevice *media);
void addControl(uint32_t cid, const ControlInfo &v4l2info,
ControlInfoMap::Map *ctrls);
- void bufferReady(FrameBuffer *buffer);
+ void imageBufferReady(FrameBuffer *buffer);
const std::string &id() const { return id_; }
+ Mutex openLock_;
std::unique_ptr<V4L2VideoDevice> video_;
Stream stream_;
std::map<PixelFormat, std::vector<SizeRange>> formats_;
@@ -93,6 +97,9 @@ private:
const ControlValue &value);
int processControls(UVCCameraData *data, Request *request);
+ bool acquireDevice(Camera *camera) override;
+ void releaseDevice(Camera *camera) override;
+
UVCCameraData *cameraData(Camera *camera)
{
return static_cast<UVCCameraData *>(camera->_d());
@@ -158,9 +165,29 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
- int ret = data_->video_->tryFormat(&format);
- if (ret)
- return Invalid;
+ /*
+ * For power-consumption reasons video_ is closed when the camera is not
+ * acquired. Open it here if necessary.
+ */
+ {
+ bool opened = false;
+
+ MutexLocker locker(data_->openLock_);
+
+ if (!data_->video_->isOpen()) {
+ int ret = data_->video_->open();
+ if (ret)
+ return Invalid;
+
+ opened = true;
+ }
+
+ int ret = data_->video_->tryFormat(&format);
+ if (opened)
+ data_->video_->close();
+ if (ret)
+ return Invalid;
+ }
cfg.stride = format.planes[0].bpl;
cfg.frameSize = format.planes[0].size;
@@ -271,7 +298,7 @@ int PipelineHandlerUVC::processControl(ControlList *controls, unsigned int id,
cid = V4L2_CID_CONTRAST;
else if (id == controls::Saturation)
cid = V4L2_CID_SATURATION;
- else if (id == controls::AeEnable)
+ else if (id == controls::ExposureTimeMode)
cid = V4L2_CID_EXPOSURE_AUTO;
else if (id == controls::ExposureTime)
cid = V4L2_CID_EXPOSURE_ABSOLUTE;
@@ -293,14 +320,14 @@ int PipelineHandlerUVC::processControl(ControlList *controls, unsigned int id,
case V4L2_CID_BRIGHTNESS: {
float scale = std::max(max - def, def - min);
float fvalue = value.get<float>() * scale + def;
- controls->set(cid, static_cast<int32_t>(lroundf(fvalue)));
+ controls->set(cid, static_cast<int32_t>(std::lround(fvalue)));
break;
}
case V4L2_CID_SATURATION: {
float scale = def - min;
float fvalue = value.get<float>() * scale + min;
- controls->set(cid, static_cast<int32_t>(lroundf(fvalue)));
+ controls->set(cid, static_cast<int32_t>(std::lround(fvalue)));
break;
}
@@ -327,7 +354,7 @@ int PipelineHandlerUVC::processControl(ControlList *controls, unsigned int id,
}
float fvalue = (value.get<float>() - p) / m;
- controls->set(cid, static_cast<int32_t>(lroundf(fvalue)));
+ controls->set(cid, static_cast<int32_t>(std::lround(fvalue)));
break;
}
@@ -411,6 +438,23 @@ bool PipelineHandlerUVC::match(DeviceEnumerator *enumerator)
return true;
}
+bool PipelineHandlerUVC::acquireDevice(Camera *camera)
+{
+ UVCCameraData *data = cameraData(camera);
+
+ MutexLocker locker(data->openLock_);
+
+ return data->video_->open() == 0;
+}
+
+void PipelineHandlerUVC::releaseDevice(Camera *camera)
+{
+ UVCCameraData *data = cameraData(camera);
+
+ MutexLocker locker(data->openLock_);
+ data->video_->close();
+}
+
int UVCCameraData::init(MediaDevice *media)
{
int ret;
@@ -432,7 +476,7 @@ int UVCCameraData::init(MediaDevice *media)
if (ret)
return ret;
- video_->bufferReady.connect(this, &UVCCameraData::bufferReady);
+ video_->bufferReady.connect(this, &UVCCameraData::imageBufferReady);
/* Generate the camera ID. */
if (!generateId()) {
@@ -512,6 +556,12 @@ int UVCCameraData::init(MediaDevice *media)
controlInfo_ = ControlInfoMap(std::move(ctrls), controls::controls);
+ /*
+ * Close to allow camera to go into runtime-suspend, video_ will be
+ * re-opened from acquireDevice() and validate().
+ */
+ video_->close();
+
return 0;
}
@@ -597,7 +647,7 @@ void UVCCameraData::addControl(uint32_t cid, const ControlInfo &v4l2Info,
id = &controls::Saturation;
break;
case V4L2_CID_EXPOSURE_AUTO:
- id = &controls::AeEnable;
+ id = &controls::ExposureTimeMode;
break;
case V4L2_CID_EXPOSURE_ABSOLUTE:
id = &controls::ExposureTime;
@@ -610,6 +660,7 @@ void UVCCameraData::addControl(uint32_t cid, const ControlInfo &v4l2Info,
}
/* Map the control info. */
+ const std::vector<ControlValue> &v4l2Values = v4l2Info.values();
int32_t min = v4l2Info.min().get<int32_t>();
int32_t max = v4l2Info.max().get<int32_t>();
int32_t def = v4l2Info.def().get<int32_t>();
@@ -647,10 +698,52 @@ void UVCCameraData::addControl(uint32_t cid, const ControlInfo &v4l2Info,
};
break;
- case V4L2_CID_EXPOSURE_AUTO:
- info = ControlInfo{ false, true, true };
+ case V4L2_CID_EXPOSURE_AUTO: {
+ /*
+ * From the V4L2_CID_EXPOSURE_AUTO documentation:
+ *
+ * ------------------------------------------------------------
+ * V4L2_EXPOSURE_AUTO:
+ * Automatic exposure time, automatic iris aperture.
+ *
+ * V4L2_EXPOSURE_MANUAL:
+ * Manual exposure time, manual iris.
+ *
+ * V4L2_EXPOSURE_SHUTTER_PRIORITY:
+ * Manual exposure time, auto iris.
+ *
+ * V4L2_EXPOSURE_APERTURE_PRIORITY:
+ * Auto exposure time, manual iris.
+ *-------------------------------------------------------------
+ *
+ * ExposureTimeModeAuto = { V4L2_EXPOSURE_AUTO,
+ * V4L2_EXPOSURE_APERTURE_PRIORITY }
+ *
+ *
+ * ExposureTimeModeManual = { V4L2_EXPOSURE_MANUAL,
+ * V4L2_EXPOSURE_SHUTTER_PRIORITY }
+ */
+ std::array<int32_t, 2> values{};
+
+ auto it = std::find_if(v4l2Values.begin(), v4l2Values.end(),
+ [&](const ControlValue &val) {
+ return (val.get<int32_t>() == V4L2_EXPOSURE_APERTURE_PRIORITY ||
+ val.get<int32_t>() == V4L2_EXPOSURE_AUTO) ? true : false;
+ });
+ if (it != v4l2Values.end())
+ values.back() = static_cast<int32_t>(controls::ExposureTimeModeAuto);
+
+ it = std::find_if(v4l2Values.begin(), v4l2Values.end(),
+ [&](const ControlValue &val) {
+ return (val.get<int32_t>() == V4L2_EXPOSURE_SHUTTER_PRIORITY ||
+ val.get<int32_t>() == V4L2_EXPOSURE_MANUAL) ? true : false;
+ });
+ if (it != v4l2Values.end())
+ values.back() = static_cast<int32_t>(controls::ExposureTimeModeManual);
+
+ info = ControlInfo{Span<int32_t>{values}, values[0]};
break;
-
+ }
case V4L2_CID_EXPOSURE_ABSOLUTE:
/*
* ExposureTime is in units of 1 µs, and UVC expects
@@ -697,7 +790,7 @@ void UVCCameraData::addControl(uint32_t cid, const ControlInfo &v4l2Info,
ctrls->emplace(id, info);
}
-void UVCCameraData::bufferReady(FrameBuffer *buffer)
+void UVCCameraData::imageBufferReady(FrameBuffer *buffer)
{
Request *request = buffer->request();
@@ -709,6 +802,6 @@ void UVCCameraData::bufferReady(FrameBuffer *buffer)
pipe()->completeRequest(request);
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerUVC)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerUVC, "uvcvideo")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/vimc/meson.build b/src/libcamera/pipeline/vimc/meson.build
index 290eefb5..868e2546 100644
--- a/src/libcamera/pipeline/vimc/meson.build
+++ b/src/libcamera/pipeline/vimc/meson.build
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources += files([
+libcamera_internal_sources += files([
'vimc.cpp',
])
diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
index 5e66ee1d..07273bd2 100644
--- a/src/libcamera/pipeline/vimc/vimc.cpp
+++ b/src/libcamera/pipeline/vimc/vimc.cpp
@@ -2,18 +2,19 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * vimc.cpp - Pipeline handler for the vimc device
+ * Pipeline handler for the vimc device
*/
#include <algorithm>
+#include <cmath>
#include <iomanip>
#include <map>
-#include <math.h>
#include <tuple>
#include <linux/media-bus-format.h>
#include <linux/version.h>
+#include <libcamera/base/flags.h>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
@@ -21,6 +22,8 @@
#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
#include <libcamera/formats.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
@@ -53,8 +56,8 @@ public:
int init();
int allocateMockIPABuffers();
- void bufferReady(FrameBuffer *buffer);
- void paramsBufferReady(unsigned int id, const Flags<ipa::vimc::TestFlag> flags);
+ void imageBufferReady(FrameBuffer *buffer);
+ void paramsComputed(unsigned int id, const Flags<ipa::vimc::TestFlag> flags);
MediaDevice *media_;
std::unique_ptr<CameraSensor> sensor_;
@@ -114,6 +117,9 @@ static const std::map<PixelFormat, uint32_t> pixelformats{
{ formats::BGR888, MEDIA_BUS_FMT_RGB888_1X24 },
};
+static constexpr Size kMinSize{ 16, 16 };
+static constexpr Size kMaxSize{ 4096, 2160 };
+
} /* namespace */
VimcCameraConfiguration::VimcCameraConfiguration(VimcCameraData *data)
@@ -153,14 +159,20 @@ CameraConfiguration::Status VimcCameraConfiguration::validate()
const Size size = cfg.size;
/*
- * The scaler hardcodes a x3 scale-up ratio, and the sensor output size
- * is aligned to two pixels in both directions. The output width and
- * height thus have to be multiples of 6.
+ * The sensor output size is aligned to two pixels in both directions.
+ * Additionally, prior to v5.16, the scaler hardcodes a x3 scale-up
+ * ratio, requiring the output width and height to be multiples of 6.
*/
- cfg.size.width = std::max(48U, std::min(4096U, cfg.size.width));
- cfg.size.height = std::max(48U, std::min(2160U, cfg.size.height));
- cfg.size.width -= cfg.size.width % 6;
- cfg.size.height -= cfg.size.height % 6;
+ Size minSize{ kMinSize };
+ unsigned int alignment = 2;
+
+ if (data_->media_->version() < KERNEL_VERSION(5, 16, 0)) {
+ minSize *= 3;
+ alignment *= 3;
+ }
+
+ cfg.size.expandTo(minSize).boundTo(kMaxSize)
+ .alignDownTo(alignment, alignment);
if (cfg.size != size) {
LOG(VIMC, Debug)
@@ -216,10 +228,12 @@ PipelineHandlerVimc::generateConfiguration(Camera *camera,
}
}
- /* The scaler hardcodes a x3 scale-up ratio. */
- std::vector<SizeRange> sizes{
- SizeRange{ { 48, 48 }, { 4096, 2160 } }
- };
+ /* Prior to v5.16, the scaler hardcodes a x3 scale-up ratio. */
+ Size minSize{ kMinSize };
+ if (data->media_->version() < KERNEL_VERSION(5, 16, 0))
+ minSize *= 3;
+
+ std::vector<SizeRange> sizes{ { minSize, kMaxSize } };
formats[pixelformat.first] = sizes;
}
@@ -242,10 +256,18 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
StreamConfiguration &cfg = config->at(0);
int ret;
- /* The scaler hardcodes a x3 scale-up ratio. */
+ /*
+ * Prior to v5.16, the scaler hardcodes a x3 scale-up ratio. For newer
+ * kernels, use a sensor resolution of 1920x1080 and let the scaler
+ * produce the requested stream size.
+ */
+ Size sensorSize{ 1920, 1080 };
+ if (data->media_->version() < KERNEL_VERSION(5, 16, 0))
+ sensorSize = { cfg.size.width / 3, cfg.size.height / 3 };
+
V4L2SubdeviceFormat subformat = {};
subformat.code = MEDIA_BUS_FMT_SGRBG8_1X8;
- subformat.size = { cfg.size.width / 3, cfg.size.height / 3 };
+ subformat.size = sensorSize;
ret = data->sensor_->setFormat(&subformat);
if (ret)
@@ -293,7 +315,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
* vimc driver will fail pipeline validation.
*/
format.fourcc = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8);
- format.size = { cfg.size.width / 3, cfg.size.height / 3 };
+ format.size = sensorSize;
ret = data->raw_->setFormat(&format);
if (ret)
@@ -398,7 +420,7 @@ int PipelineHandlerVimc::processControls(VimcCameraData *data, Request *request)
continue;
}
- int32_t value = lroundf(it.second.get<float>() * 128 + offset);
+ int32_t value = std::lround(it.second.get<float>() * 128 + offset);
controls.set(cid, std::clamp(value, 0, 255));
}
@@ -470,7 +492,7 @@ bool PipelineHandlerVimc::match(DeviceEnumerator *enumerator)
return false;
}
- data->ipa_->paramsBufferReady.connect(data.get(), &VimcCameraData::paramsBufferReady);
+ data->ipa_->paramsComputed.connect(data.get(), &VimcCameraData::paramsComputed);
std::string conf = data->ipa_->configurationFile("vimc.conf");
Flags<ipa::vimc::TestFlag> inFlags = ipa::vimc::TestFlag::Flag2;
@@ -510,10 +532,9 @@ int VimcCameraData::init()
return ret;
/* Create and open the camera sensor, debayer, scaler and video device. */
- sensor_ = std::make_unique<CameraSensor>(media_->getEntityByName("Sensor B"));
- ret = sensor_->init();
- if (ret)
- return ret;
+ sensor_ = CameraSensorFactoryBase::create(media_->getEntityByName("Sensor B"));
+ if (!sensor_)
+ return -ENODEV;
debayer_ = V4L2Subdevice::fromEntityName(media_, "Debayer B");
if (debayer_->open())
@@ -527,7 +548,7 @@ int VimcCameraData::init()
if (video_->open())
return -ENODEV;
- video_->bufferReady.connect(this, &VimcCameraData::bufferReady);
+ video_->bufferReady.connect(this, &VimcCameraData::imageBufferReady);
raw_ = V4L2VideoDevice::fromEntityName(media_, "Raw Capture 1");
if (raw_->open())
@@ -575,7 +596,7 @@ int VimcCameraData::init()
return 0;
}
-void VimcCameraData::bufferReady(FrameBuffer *buffer)
+void VimcCameraData::imageBufferReady(FrameBuffer *buffer)
{
PipelineHandlerVimc *pipe =
static_cast<PipelineHandlerVimc *>(this->pipe());
@@ -600,7 +621,7 @@ void VimcCameraData::bufferReady(FrameBuffer *buffer)
pipe->completeBuffer(request, buffer);
pipe->completeRequest(request);
- ipa_->fillParamsBuffer(request->sequence(), mockIPABufs_[0]->cookie());
+ ipa_->computeParams(request->sequence(), mockIPABufs_[0]->cookie());
}
int VimcCameraData::allocateMockIPABuffers()
@@ -618,11 +639,11 @@ int VimcCameraData::allocateMockIPABuffers()
return video_->exportBuffers(kBufCount, &mockIPABufs_);
}
-void VimcCameraData::paramsBufferReady([[maybe_unused]] unsigned int id,
- [[maybe_unused]] const Flags<ipa::vimc::TestFlag> flags)
+void VimcCameraData::paramsComputed([[maybe_unused]] unsigned int id,
+ [[maybe_unused]] const Flags<ipa::vimc::TestFlag> flags)
{
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerVimc)
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVimc, "vimc")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/README.md b/src/libcamera/pipeline/virtual/README.md
new file mode 100644
index 00000000..a9f39c15
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/README.md
@@ -0,0 +1,65 @@
+# Virtual Pipeline Handler
+
+Virtual pipeline handler emulates fake external camera(s) for testing.
+
+## Parse config file and register cameras
+
+- A sample config file is located at `src/libcamera/pipeline/virtual/data/virtual.yaml`.
+- If libcamera is installed, the config file should be installed at
+ `share/libcamera/pipeline/virtual/virtual.yaml`.
+
+### Config File Format
+The config file contains the information about cameras' properties to register.
+The config file should be a yaml file with dictionary of the cameraIds
+associated with their properties as top level. The default value will be applied
+when any property is empty.
+
+Each camera block is a dictionary, containing the following keys:
+- `supported_formats` (list of `VirtualCameraData::Resolution`, optional):
+ List of supported resolution and frame rates of the emulated camera
+ - `width` (`unsigned int`, default=1920): Width of the window resolution.
+ This needs to be even.
+ - `height` (`unsigned int`, default=1080): Height of the window resolution.
+ - `frame_rates` (list of `int`, default=`[30,60]` ): Range of the frame
+ rate (per second). If the list contains one value, it's the lower bound
+ and the upper bound. If the list contains two values, the first is the
+ lower bound and the second is the upper bound. No other number of values
+ is allowed.
+- `test_pattern` (`string`): Which test pattern to use as frames. The options
+ are "bars", "lines". Cannot be set with `frames`.
+ - The test patterns are "bars" which means color bars, and "lines" which means
+ diagonal lines.
+- `frames` (dictionary):
+ - `path` (`string`): Path to an image, or path to a directory of a series of
+ images. Cannot be set with `test_pattern`.
+ - The path to an image has ".jpg" extension.
+ - The path to a directory ends with "/". The name of the images in the
+ directory are "{n}.jpg" with {n} is the sequence of images starting with 0.
+- `location` (`string`, default="front"): The location of the camera. Support
+ "CameraLocationFront", "CameraLocationBack", and "CameraLocationExternal".
+- `model` (`string`, default="Unknown"): The model name of the camera.
+
+Check `data/virtual.yaml` as the sample config file.
+
+### Implementation
+
+`Parser` class provides methods to parse the config file to register cameras
+in Virtual Pipeline Handler. `parseConfigFile()` is exposed to use in
+Virtual Pipeline Handler.
+
+This is the procedure of the Parser class:
+1. `parseConfigFile()` parses the config file to `YamlObject` using `YamlParser::parse()`.
+ - Parse the top level of config file which are the camera ids and look into
+ each camera properties.
+2. For each camera, `parseCameraConfigData()` returns a camera with the configuration.
+ - The methods in the next step fill the data with the pointer to the Camera object.
+ - If the config file contains invalid configuration, this method returns
+ nullptr. The camera will be skipped.
+3. Parse each property and register the data.
+ - `parseSupportedFormats()`: Parses `supported_formats` in the config, which
+ contains resolutions and frame rates.
+ - `parseFrameGenerator()`: Parses `test_pattern` or `frames` in the config.
+ - `parseLocation()`: Parses `location` in the config.
+ - `parseModel()`: Parses `model` in the config.
+4. Back to `parseConfigFile()` and append the camera configuration.
+5. Returns a list of camera configurations.
diff --git a/src/libcamera/pipeline/virtual/config_parser.cpp b/src/libcamera/pipeline/virtual/config_parser.cpp
new file mode 100644
index 00000000..1e009946
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/config_parser.cpp
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Virtual cameras helper to parse config file
+ */
+
+#include "config_parser.h"
+
+#include <string.h>
+#include <utility>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "virtual.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Virtual)
+
+std::vector<std::unique_ptr<VirtualCameraData>>
+ConfigParser::parseConfigFile(File &file, PipelineHandler *pipe)
+{
+ std::vector<std::unique_ptr<VirtualCameraData>> configurations;
+
+ std::unique_ptr<YamlObject> cameras = YamlParser::parse(file);
+ if (!cameras) {
+ LOG(Virtual, Error) << "Failed to pass config file.";
+ return configurations;
+ }
+
+ if (!cameras->isDictionary()) {
+ LOG(Virtual, Error) << "Config file is not a dictionary at the top level.";
+ return configurations;
+ }
+
+ /* Look into the configuration of each camera */
+ for (const auto &[cameraId, cameraConfigData] : cameras->asDict()) {
+ std::unique_ptr<VirtualCameraData> data =
+ parseCameraConfigData(cameraConfigData, pipe);
+ /* Parse configData to data */
+ if (!data) {
+ /* Skip the camera if it has invalid config */
+ LOG(Virtual, Error) << "Failed to parse config of the camera: "
+ << cameraId;
+ continue;
+ }
+
+ data->config_.id = cameraId;
+ ControlInfoMap::Map controls;
+ /* \todo Check which resolution's frame rate to be reported */
+ controls[&controls::FrameDurationLimits] =
+ ControlInfo(1000000 / data->config_.resolutions[0].frameRates[1],
+ 1000000 / data->config_.resolutions[0].frameRates[0]);
+
+ std::vector<ControlValue> supportedFaceDetectModes{
+ static_cast<int32_t>(controls::draft::FaceDetectModeOff),
+ };
+ controls[&controls::draft::FaceDetectMode] = ControlInfo(supportedFaceDetectModes);
+
+ data->controlInfo_ = ControlInfoMap(std::move(controls), controls::controls);
+ configurations.push_back(std::move(data));
+ }
+
+ return configurations;
+}
+
+std::unique_ptr<VirtualCameraData>
+ConfigParser::parseCameraConfigData(const YamlObject &cameraConfigData,
+ PipelineHandler *pipe)
+{
+ std::vector<VirtualCameraData::Resolution> resolutions;
+ if (parseSupportedFormats(cameraConfigData, &resolutions))
+ return nullptr;
+
+ std::unique_ptr<VirtualCameraData> data =
+ std::make_unique<VirtualCameraData>(pipe, resolutions);
+
+ if (parseFrameGenerator(cameraConfigData, data.get()))
+ return nullptr;
+
+ if (parseLocation(cameraConfigData, data.get()))
+ return nullptr;
+
+ if (parseModel(cameraConfigData, data.get()))
+ return nullptr;
+
+ return data;
+}
+
+int ConfigParser::parseSupportedFormats(const YamlObject &cameraConfigData,
+ std::vector<VirtualCameraData::Resolution> *resolutions)
+{
+ if (cameraConfigData.contains("supported_formats")) {
+ const YamlObject &supportedResolutions = cameraConfigData["supported_formats"];
+
+ for (const YamlObject &supportedResolution : supportedResolutions.asList()) {
+ unsigned int width = supportedResolution["width"].get<unsigned int>(1920);
+ unsigned int height = supportedResolution["height"].get<unsigned int>(1080);
+ if (width == 0 || height == 0) {
+ LOG(Virtual, Error) << "Invalid width or/and height";
+ return -EINVAL;
+ }
+ if (width % 2 != 0) {
+ LOG(Virtual, Error) << "Invalid width: width needs to be even";
+ return -EINVAL;
+ }
+
+ std::vector<int64_t> frameRates;
+ if (supportedResolution.contains("frame_rates")) {
+ auto frameRatesList =
+ supportedResolution["frame_rates"].getList<int>();
+ if (!frameRatesList || (frameRatesList->size() != 1 &&
+ frameRatesList->size() != 2)) {
+ LOG(Virtual, Error) << "Invalid frame_rates: either one or two values";
+ return -EINVAL;
+ }
+
+ if (frameRatesList->size() == 2 &&
+ frameRatesList.value()[0] > frameRatesList.value()[1]) {
+ LOG(Virtual, Error) << "frame_rates's first value(lower bound)"
+ << " is higher than the second value(upper bound)";
+ return -EINVAL;
+ }
+ /*
+ * Push the min and max framerates. A
+ * single rate is duplicated.
+ */
+ frameRates.push_back(frameRatesList.value().front());
+ frameRates.push_back(frameRatesList.value().back());
+ } else {
+ frameRates.push_back(30);
+ frameRates.push_back(60);
+ }
+
+ resolutions->emplace_back(
+ VirtualCameraData::Resolution{ Size{ width, height },
+ frameRates });
+ }
+ } else {
+ resolutions->emplace_back(
+ VirtualCameraData::Resolution{ Size{ 1920, 1080 },
+ { 30, 60 } });
+ }
+
+ return 0;
+}
+
+int ConfigParser::parseFrameGenerator(const YamlObject &cameraConfigData, VirtualCameraData *data)
+{
+ const std::string testPatternKey = "test_pattern";
+ const std::string framesKey = "frames";
+ if (cameraConfigData.contains(testPatternKey)) {
+ if (cameraConfigData.contains(framesKey)) {
+ LOG(Virtual, Error) << "A camera should use either "
+ << testPatternKey << " or " << framesKey;
+ return -EINVAL;
+ }
+
+ auto testPattern = cameraConfigData[testPatternKey].get<std::string>("");
+
+ if (testPattern == "bars") {
+ data->config_.frame = TestPattern::ColorBars;
+ } else if (testPattern == "lines") {
+ data->config_.frame = TestPattern::DiagonalLines;
+ } else {
+ LOG(Virtual, Debug) << "Test pattern: " << testPattern
+ << " is not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ const YamlObject &frames = cameraConfigData[framesKey];
+
+ /* When there is no frames provided in the config file, use color bar test pattern */
+ if (!frames) {
+ data->config_.frame = TestPattern::ColorBars;
+ return 0;
+ }
+
+ if (!frames.isDictionary()) {
+ LOG(Virtual, Error) << "'frames' is not a dictionary.";
+ return -EINVAL;
+ }
+
+ auto path = frames["path"].get<std::string>();
+
+ if (!path) {
+ LOG(Virtual, Error) << "Test pattern or path should be specified.";
+ return -EINVAL;
+ }
+
+ std::vector<std::filesystem::path> files;
+
+ switch (std::filesystem::symlink_status(*path).type()) {
+ case std::filesystem::file_type::regular:
+ files.push_back(*path);
+ break;
+
+ case std::filesystem::file_type::directory:
+ for (const auto &dentry : std::filesystem::directory_iterator{ *path }) {
+ if (dentry.is_regular_file())
+ files.push_back(dentry.path());
+ }
+
+ std::sort(files.begin(), files.end(), [](const auto &a, const auto &b) {
+ return ::strverscmp(a.c_str(), b.c_str()) < 0;
+ });
+
+ if (files.empty()) {
+ LOG(Virtual, Error) << "Directory has no files: " << *path;
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ LOG(Virtual, Error) << "Frame: " << *path << " is not supported";
+ return -EINVAL;
+ }
+
+ data->config_.frame = ImageFrames{ std::move(files) };
+
+ return 0;
+}
+
+int ConfigParser::parseLocation(const YamlObject &cameraConfigData, VirtualCameraData *data)
+{
+ std::string location = cameraConfigData["location"].get<std::string>("CameraLocationFront");
+
+ /* Default value is properties::CameraLocationFront */
+ auto it = properties::LocationNameValueMap.find(location);
+ if (it == properties::LocationNameValueMap.end()) {
+ LOG(Virtual, Error)
+ << "location: " << location << " is not supported";
+ return -EINVAL;
+ }
+
+ data->properties_.set(properties::Location, it->second);
+
+ return 0;
+}
+
+int ConfigParser::parseModel(const YamlObject &cameraConfigData, VirtualCameraData *data)
+{
+ std::string model = cameraConfigData["model"].get<std::string>("Unknown");
+
+ data->properties_.set(properties::Model, model);
+
+ return 0;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/config_parser.h b/src/libcamera/pipeline/virtual/config_parser.h
new file mode 100644
index 00000000..d2000de9
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/config_parser.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Virtual cameras helper to parse config file
+ */
+
+#pragma once
+
+#include <memory>
+#include <vector>
+
+#include <libcamera/base/file.h>
+
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "virtual.h"
+
+namespace libcamera {
+
+class ConfigParser
+{
+public:
+ std::vector<std::unique_ptr<VirtualCameraData>>
+ parseConfigFile(File &file, PipelineHandler *pipe);
+
+private:
+ std::unique_ptr<VirtualCameraData>
+ parseCameraConfigData(const YamlObject &cameraConfigData, PipelineHandler *pipe);
+
+ int parseSupportedFormats(const YamlObject &cameraConfigData,
+ std::vector<VirtualCameraData::Resolution> *resolutions);
+ int parseFrameGenerator(const YamlObject &cameraConfigData, VirtualCameraData *data);
+ int parseLocation(const YamlObject &cameraConfigData, VirtualCameraData *data);
+ int parseModel(const YamlObject &cameraConfigData, VirtualCameraData *data);
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/data/meson.build b/src/libcamera/pipeline/virtual/data/meson.build
new file mode 100644
index 00000000..ce63f9a2
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/data/meson.build
@@ -0,0 +1,4 @@
+install_data('virtual.yaml',
+ install_dir : pipeline_data_dir / 'virtual',
+ install_tag : 'runtime',
+ rename: 'virtual.yaml.example')
diff --git a/src/libcamera/pipeline/virtual/data/virtual.yaml b/src/libcamera/pipeline/virtual/data/virtual.yaml
new file mode 100644
index 00000000..20471bb9
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/data/virtual.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+"Virtual0":
+ supported_formats:
+ - width: 1920
+ height: 1080
+ frame_rates:
+ - 30
+ - 60
+ - width: 1680
+ height: 1050
+ frame_rates:
+ - 70
+ - 80
+ test_pattern: "lines"
+ location: "CameraLocationFront"
+ model: "Virtual Video Device"
+"Virtual1":
+ supported_formats:
+ - width: 800
+ height: 600
+ frame_rates:
+ - 60
+ test_pattern: "bars"
+ location: "CameraLocationBack"
+ model: "Virtual Video Device1"
+"Virtual2":
+ supported_formats:
+ - width: 400
+ height: 300
+ test_pattern: "lines"
+ location: "CameraLocationFront"
+ model: "Virtual Video Device2"
+"Virtual3":
+ test_pattern: "bars"
diff --git a/src/libcamera/pipeline/virtual/frame_generator.h b/src/libcamera/pipeline/virtual/frame_generator.h
new file mode 100644
index 00000000..a0658c45
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/frame_generator.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Virtual cameras helper to generate frames
+ */
+
+#pragma once
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+
+namespace libcamera {
+
+class FrameGenerator
+{
+public:
+ virtual ~FrameGenerator() = default;
+
+ virtual void configure(const Size &size) = 0;
+
+ virtual int generateFrame(const Size &size,
+ const FrameBuffer *buffer) = 0;
+
+protected:
+ FrameGenerator() {}
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/image_frame_generator.cpp b/src/libcamera/pipeline/virtual/image_frame_generator.cpp
new file mode 100644
index 00000000..d1545b5d
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/image_frame_generator.cpp
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Derived class of FrameGenerator for generating frames from images
+ */
+
+#include "image_frame_generator.h"
+
+#include <string>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+#include <libcamera/framebuffer.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include "libyuv/convert.h"
+#include "libyuv/scale.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Virtual)
+
+/*
+ * Factory function to create an ImageFrameGenerator object.
+ * Read the images and convert them to buffers in NV12 format.
+ * Store the pointers to the buffers to a list (imageFrameDatas)
+ */
+std::unique_ptr<ImageFrameGenerator>
+ImageFrameGenerator::create(ImageFrames &imageFrames)
+{
+ std::unique_ptr<ImageFrameGenerator> imageFrameGenerator =
+ std::make_unique<ImageFrameGenerator>();
+ imageFrameGenerator->imageFrames_ = &imageFrames;
+
+ /*
+ * For each file in the directory, load the image,
+ * convert it to NV12, and store the pointer.
+ */
+ for (const auto &path : imageFrames.files) {
+ File file(path);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(Virtual, Error) << "Failed to open image file " << file.fileName()
+ << ": " << strerror(file.error());
+ return nullptr;
+ }
+
+ /* Read the image file to data */
+ auto fileSize = file.size();
+ auto buffer = std::make_unique<uint8_t[]>(fileSize);
+ if (file.read({ buffer.get(), static_cast<size_t>(fileSize) }) != fileSize) {
+ LOG(Virtual, Error) << "Failed to read file " << file.fileName()
+ << ": " << strerror(file.error());
+ return nullptr;
+ }
+
+ /* Get the width and height of the image */
+ int width, height;
+ if (libyuv::MJPGSize(buffer.get(), fileSize, &width, &height)) {
+ LOG(Virtual, Error) << "Failed to get the size of the image file: "
+ << file.fileName();
+ return nullptr;
+ }
+
+ std::unique_ptr<uint8_t[]> dstY =
+ std::make_unique<uint8_t[]>(width * height);
+ std::unique_ptr<uint8_t[]> dstUV =
+ std::make_unique<uint8_t[]>(width * height / 2);
+ int ret = libyuv::MJPGToNV12(buffer.get(), fileSize,
+ dstY.get(), width, dstUV.get(),
+ width, width, height, width, height);
+ if (ret != 0)
+ LOG(Virtual, Error) << "MJPGToNV12() failed with " << ret;
+
+ imageFrameGenerator->imageFrameDatas_.emplace_back(
+ ImageFrameData{ std::move(dstY), std::move(dstUV),
+ Size(width, height) });
+ }
+
+ ASSERT(!imageFrameGenerator->imageFrameDatas_.empty());
+
+ return imageFrameGenerator;
+}
+
+/*
+ * \var ImageFrameGenerator::frameRepeat
+ * \brief Number of frames to repeat before proceeding to the next frame
+ */
+
+/* Scale the buffers for image frames. */
+void ImageFrameGenerator::configure(const Size &size)
+{
+ /* Reset the source images to prevent multiple configuration calls */
+ scaledFrameDatas_.clear();
+ frameIndex_ = 0;
+ parameter_ = 0;
+
+ for (unsigned int i = 0; i < imageFrameDatas_.size(); i++) {
+ /* Scale the imageFrameDatas_ to scaledY and scaledUV */
+ unsigned int halfSizeWidth = (size.width + 1) / 2;
+ unsigned int halfSizeHeight = (size.height + 1) / 2;
+ std::unique_ptr<uint8_t[]> scaledY =
+ std::make_unique<uint8_t[]>(size.width * size.height);
+ std::unique_ptr<uint8_t[]> scaledUV =
+ std::make_unique<uint8_t[]>(halfSizeWidth * halfSizeHeight * 2);
+ auto &src = imageFrameDatas_[i];
+
+ /*
+ * \todo Some platforms might enforce stride due to GPU.
+ * The width needs to be a multiple of the stride to work
+ * properly for now.
+ */
+ libyuv::NV12Scale(src.Y.get(), src.size.width,
+ src.UV.get(), src.size.width,
+ src.size.width, src.size.height,
+ scaledY.get(), size.width, scaledUV.get(), size.width,
+ size.width, size.height, libyuv::FilterMode::kFilterBilinear);
+
+ scaledFrameDatas_.emplace_back(
+ ImageFrameData{ std::move(scaledY), std::move(scaledUV), size });
+ }
+}
+
+int ImageFrameGenerator::generateFrame(const Size &size, const FrameBuffer *buffer)
+{
+ ASSERT(!scaledFrameDatas_.empty());
+
+ MappedFrameBuffer mappedFrameBuffer(buffer, MappedFrameBuffer::MapFlag::Write);
+
+ const auto &planes = mappedFrameBuffer.planes();
+
+ /* Loop only around the number of images available */
+ frameIndex_ %= imageFrameDatas_.size();
+
+ /* Write the scaledY and scaledUV to the mapped frame buffer */
+ libyuv::NV12Copy(scaledFrameDatas_[frameIndex_].Y.get(), size.width,
+ scaledFrameDatas_[frameIndex_].UV.get(), size.width, planes[0].begin(),
+ size.width, planes[1].begin(), size.width,
+ size.width, size.height);
+
+ /* Proceed to the next image every 4 frames */
+ /* \todo Consider setting the frameRepeat in the config file */
+ parameter_++;
+ if (parameter_ % frameRepeat == 0)
+ frameIndex_++;
+
+ return 0;
+}
+
+/*
+ * \var ImageFrameGenerator::imageFrameDatas_
+ * \brief List of pointers to the not scaled image buffers
+ */
+
+/*
+ * \var ImageFrameGenerator::scaledFrameDatas_
+ * \brief List of pointers to the scaled image buffers
+ */
+
+/*
+ * \var ImageFrameGenerator::imageFrames_
+ * \brief Pointer to the imageFrames_ in VirtualCameraData
+ */
+
+/*
+ * \var ImageFrameGenerator::parameter_
+ * \brief Speed parameter. Change to the next image every parameter_ frames
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/image_frame_generator.h b/src/libcamera/pipeline/virtual/image_frame_generator.h
new file mode 100644
index 00000000..42a077ba
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/image_frame_generator.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Derived class of FrameGenerator for generating frames from images
+ */
+
+#pragma once
+
+#include <filesystem>
+#include <memory>
+#include <stdint.h>
+#include <sys/types.h>
+#include <vector>
+
+#include "frame_generator.h"
+
+namespace libcamera {
+
+/* Frame configuration provided by the config file */
+struct ImageFrames {
+ std::vector<std::filesystem::path> files;
+};
+
+class ImageFrameGenerator : public FrameGenerator
+{
+public:
+ static std::unique_ptr<ImageFrameGenerator> create(ImageFrames &imageFrames);
+
+private:
+ static constexpr unsigned int frameRepeat = 4;
+
+ struct ImageFrameData {
+ std::unique_ptr<uint8_t[]> Y;
+ std::unique_ptr<uint8_t[]> UV;
+ Size size;
+ };
+
+ void configure(const Size &size) override;
+ int generateFrame(const Size &size, const FrameBuffer *buffer) override;
+
+ std::vector<ImageFrameData> imageFrameDatas_;
+ std::vector<ImageFrameData> scaledFrameDatas_;
+ ImageFrames *imageFrames_;
+ unsigned int frameIndex_;
+ unsigned int parameter_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/meson.build b/src/libcamera/pipeline/virtual/meson.build
new file mode 100644
index 00000000..c8434593
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'config_parser.cpp',
+ 'image_frame_generator.cpp',
+ 'test_pattern_generator.cpp',
+ 'virtual.cpp',
+])
+
+libjpeg = dependency('libjpeg', required : true)
+
+libcamera_deps += [libyuv_dep]
+libcamera_deps += [libjpeg]
+
+subdir('data')
diff --git a/src/libcamera/pipeline/virtual/test_pattern_generator.cpp b/src/libcamera/pipeline/virtual/test_pattern_generator.cpp
new file mode 100644
index 00000000..745be83b
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/test_pattern_generator.cpp
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Derived class of FrameGenerator for generating test patterns
+ */
+
+#include "test_pattern_generator.h"
+
+#include <string.h>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include <libyuv/convert_from_argb.h>
+
+namespace {
+
+template<size_t SampleSize>
+void rotateLeft1Column(const libcamera::Size &size, uint8_t *image)
+{
+ if (size.width < 2)
+ return;
+
+ const size_t stride = size.width * SampleSize;
+ uint8_t first[SampleSize];
+
+ for (size_t i = 0; i < size.height; i++, image += stride) {
+ memcpy(first, &image[0], SampleSize);
+ memmove(&image[0], &image[SampleSize], stride - SampleSize);
+ memcpy(&image[stride - SampleSize], first, SampleSize);
+ }
+}
+
+} /* namespace */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Virtual)
+
+static const unsigned int kARGBSize = 4;
+
+int TestPatternGenerator::generateFrame(const Size &size,
+ const FrameBuffer *buffer)
+{
+ MappedFrameBuffer mappedFrameBuffer(buffer,
+ MappedFrameBuffer::MapFlag::Write);
+
+ const auto &planes = mappedFrameBuffer.planes();
+
+ rotateLeft1Column<kARGBSize>(size, template_.get());
+
+ /* Convert the template_ to the frame buffer */
+ int ret = libyuv::ARGBToNV12(template_.get(), size.width * kARGBSize,
+ planes[0].begin(), size.width,
+ planes[1].begin(), size.width,
+ size.width, size.height);
+ if (ret != 0)
+ LOG(Virtual, Error) << "ARGBToNV12() failed with " << ret;
+
+ return ret;
+}
+
+void ColorBarsGenerator::configure(const Size &size)
+{
+ constexpr uint8_t kColorBar[8][3] = {
+ /* R, G, B */
+ { 0xff, 0xff, 0xff }, /* White */
+ { 0xff, 0xff, 0x00 }, /* Yellow */
+ { 0x00, 0xff, 0xff }, /* Cyan */
+ { 0x00, 0xff, 0x00 }, /* Green */
+ { 0xff, 0x00, 0xff }, /* Magenta */
+ { 0xff, 0x00, 0x00 }, /* Red */
+ { 0x00, 0x00, 0xff }, /* Blue */
+ { 0x00, 0x00, 0x00 }, /* Black */
+ };
+
+ template_ = std::make_unique<uint8_t[]>(
+ size.width * size.height * kARGBSize);
+
+ unsigned int colorBarWidth = size.width / std::size(kColorBar);
+
+ uint8_t *buf = template_.get();
+ for (size_t h = 0; h < size.height; h++) {
+ for (size_t w = 0; w < size.width; w++) {
+ /* Repeat when the width is exceed */
+ unsigned int index = (w / colorBarWidth) % std::size(kColorBar);
+
+ *buf++ = kColorBar[index][2]; /* B */
+ *buf++ = kColorBar[index][1]; /* G */
+ *buf++ = kColorBar[index][0]; /* R */
+ *buf++ = 0x00; /* A */
+ }
+ }
+}
+
+void DiagonalLinesGenerator::configure(const Size &size)
+{
+ constexpr uint8_t kColorBar[2][3] = {
+ /* R, G, B */
+ { 0xff, 0xff, 0xff }, /* White */
+ { 0x00, 0x00, 0x00 }, /* Black */
+ };
+
+ template_ = std::make_unique<uint8_t[]>(
+ size.width * size.height * kARGBSize);
+
+ unsigned int lineWidth = size.width / 10;
+
+ uint8_t *buf = template_.get();
+ for (size_t h = 0; h < size.height; h++) {
+ for (size_t w = 0; w < size.width; w++) {
+ /* Repeat when the width is exceed */
+ int index = ((w + h) / lineWidth) % 2;
+
+ *buf++ = kColorBar[index][2]; /* B */
+ *buf++ = kColorBar[index][1]; /* G */
+ *buf++ = kColorBar[index][0]; /* R */
+ *buf++ = 0x00; /* A */
+ }
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/test_pattern_generator.h b/src/libcamera/pipeline/virtual/test_pattern_generator.h
new file mode 100644
index 00000000..2a51bd31
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/test_pattern_generator.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Derived class of FrameGenerator for generating test patterns
+ */
+
+#pragma once
+
+#include <memory>
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+
+#include "frame_generator.h"
+
+namespace libcamera {
+
+enum class TestPattern : char {
+ ColorBars = 0,
+ DiagonalLines = 1,
+};
+
+class TestPatternGenerator : public FrameGenerator
+{
+public:
+ int generateFrame(const Size &size, const FrameBuffer *buffer) override;
+
+protected:
+ /* Buffer of test pattern template */
+ std::unique_ptr<uint8_t[]> template_;
+};
+
+class ColorBarsGenerator : public TestPatternGenerator
+{
+public:
+ /* Generate a template buffer of the color bar test pattern. */
+ void configure(const Size &size) override;
+};
+
+class DiagonalLinesGenerator : public TestPatternGenerator
+{
+public:
+ /* Generate a template buffer of the diagonal lines test pattern. */
+ void configure(const Size &size) override;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/virtual.cpp b/src/libcamera/pipeline/virtual/virtual.cpp
new file mode 100644
index 00000000..049ebcba
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/virtual.cpp
@@ -0,0 +1,419 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Pipeline handler for virtual cameras
+ */
+
+#include "virtual.h"
+
+#include <algorithm>
+#include <array>
+#include <chrono>
+#include <errno.h>
+#include <map>
+#include <memory>
+#include <ostream>
+#include <set>
+#include <stdint.h>
+#include <string>
+#include <time.h>
+#include <utility>
+#include <vector>
+
+#include <libcamera/base/flags.h>
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/formats.h>
+#include <libcamera/pixel_format.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/request.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/dma_buf_allocator.h"
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "pipeline/virtual/config_parser.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Virtual)
+
+namespace {
+
+uint64_t currentTimestamp()
+{
+ const auto now = std::chrono::steady_clock::now();
+ auto nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>(
+ now.time_since_epoch());
+
+ return nsecs.count();
+}
+
+} /* namespace */
+
+template<class... Ts>
+struct overloaded : Ts... {
+ using Ts::operator()...;
+};
+template<class... Ts>
+overloaded(Ts...) -> overloaded<Ts...>;
+
+class VirtualCameraConfiguration : public CameraConfiguration
+{
+public:
+ static constexpr unsigned int kBufferCount = 4;
+
+ VirtualCameraConfiguration(VirtualCameraData *data);
+
+ Status validate() override;
+
+private:
+ const VirtualCameraData *data_;
+};
+
+class PipelineHandlerVirtual : public PipelineHandler
+{
+public:
+ PipelineHandlerVirtual(CameraManager *manager);
+ ~PipelineHandlerVirtual();
+
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ static bool created_;
+
+ VirtualCameraData *cameraData(Camera *camera)
+ {
+ return static_cast<VirtualCameraData *>(camera->_d());
+ }
+
+ bool initFrameGenerator(Camera *camera);
+
+ DmaBufAllocator dmaBufAllocator_;
+
+ bool resetCreated_ = false;
+};
+
+VirtualCameraData::VirtualCameraData(PipelineHandler *pipe,
+ const std::vector<Resolution> &supportedResolutions)
+ : Camera::Private(pipe)
+{
+ config_.resolutions = supportedResolutions;
+ for (const auto &resolution : config_.resolutions) {
+ if (config_.minResolutionSize.isNull() || config_.minResolutionSize > resolution.size)
+ config_.minResolutionSize = resolution.size;
+
+ config_.maxResolutionSize = std::max(config_.maxResolutionSize, resolution.size);
+ }
+
+ properties_.set(properties::PixelArrayActiveAreas,
+ { Rectangle(config_.maxResolutionSize) });
+
+ /* \todo Support multiple streams and pass multi_stream_test */
+ streamConfigs_.resize(kMaxStream);
+}
+
+VirtualCameraConfiguration::VirtualCameraConfiguration(VirtualCameraData *data)
+ : CameraConfiguration(), data_(data)
+{
+}
+
+CameraConfiguration::Status VirtualCameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty()) {
+ LOG(Virtual, Error) << "Empty config";
+ return Invalid;
+ }
+
+ /* Only one stream is supported */
+ if (config_.size() > VirtualCameraData::kMaxStream) {
+ config_.resize(VirtualCameraData::kMaxStream);
+ status = Adjusted;
+ }
+
+ for (StreamConfiguration &cfg : config_) {
+ bool adjusted = false;
+ bool found = false;
+ for (const auto &resolution : data_->config_.resolutions) {
+ if (resolution.size.width == cfg.size.width &&
+ resolution.size.height == cfg.size.height) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /*
+ * \todo It's a pipeline's decision to choose a
+ * resolution when the exact one is not supported.
+ * Defining the default logic in PipelineHandler to
+ * find the closest resolution would be nice.
+ */
+ cfg.size = data_->config_.maxResolutionSize;
+ status = Adjusted;
+ adjusted = true;
+ }
+
+ if (cfg.pixelFormat != formats::NV12) {
+ cfg.pixelFormat = formats::NV12;
+ status = Adjusted;
+ adjusted = true;
+ }
+
+ if (adjusted)
+ LOG(Virtual, Info)
+ << "Stream configuration adjusted to " << cfg.toString();
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+ cfg.stride = info.stride(cfg.size.width, 0, 1);
+ cfg.frameSize = info.frameSize(cfg.size, 1);
+
+ cfg.bufferCount = VirtualCameraConfiguration::kBufferCount;
+ }
+
+ return status;
+}
+
+/* static */
+bool PipelineHandlerVirtual::created_ = false;
+
+PipelineHandlerVirtual::PipelineHandlerVirtual(CameraManager *manager)
+ : PipelineHandler(manager),
+ dmaBufAllocator_(DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap |
+ DmaBufAllocator::DmaBufAllocatorFlag::SystemHeap |
+ DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf)
+{
+}
+
+PipelineHandlerVirtual::~PipelineHandlerVirtual()
+{
+ if (resetCreated_)
+ created_ = false;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerVirtual::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
+{
+ VirtualCameraData *data = cameraData(camera);
+ auto config = std::make_unique<VirtualCameraConfiguration>(data);
+
+ if (roles.empty())
+ return config;
+
+ for (const StreamRole role : roles) {
+ switch (role) {
+ case StreamRole::StillCapture:
+ case StreamRole::VideoRecording:
+ case StreamRole::Viewfinder:
+ break;
+
+ case StreamRole::Raw:
+ default:
+ LOG(Virtual, Error)
+ << "Requested stream role not supported: " << role;
+ return {};
+ }
+
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+ PixelFormat pixelFormat = formats::NV12;
+ streamFormats[pixelFormat] = { { data->config_.minResolutionSize,
+ data->config_.maxResolutionSize } };
+ StreamFormats formats(streamFormats);
+ StreamConfiguration cfg(formats);
+ cfg.pixelFormat = pixelFormat;
+ cfg.size = data->config_.maxResolutionSize;
+ cfg.bufferCount = VirtualCameraConfiguration::kBufferCount;
+
+ config->addConfiguration(cfg);
+ }
+
+ ASSERT(config->validate() != CameraConfiguration::Invalid);
+
+ return config;
+}
+
+int PipelineHandlerVirtual::configure(Camera *camera,
+ CameraConfiguration *config)
+{
+ VirtualCameraData *data = cameraData(camera);
+ for (auto [i, c] : utils::enumerate(*config)) {
+ c.setStream(&data->streamConfigs_[i].stream);
+ /* Start reading the images/generating test patterns */
+ data->streamConfigs_[i].frameGenerator->configure(c.size);
+ }
+
+ return 0;
+}
+
+int PipelineHandlerVirtual::exportFrameBuffers([[maybe_unused]] Camera *camera,
+ Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ if (!dmaBufAllocator_.isValid())
+ return -ENOBUFS;
+
+ const StreamConfiguration &config = stream->configuration();
+ const PixelFormatInfo &info = PixelFormatInfo::info(config.pixelFormat);
+
+ std::vector<unsigned int> planeSizes;
+ for (size_t i = 0; i < info.numPlanes(); ++i)
+ planeSizes.push_back(info.planeSize(config.size, i));
+
+ return dmaBufAllocator_.exportBuffers(config.bufferCount, planeSizes, buffers);
+}
+
+int PipelineHandlerVirtual::start([[maybe_unused]] Camera *camera,
+ [[maybe_unused]] const ControlList *controls)
+{
+ VirtualCameraData *data = cameraData(camera);
+
+ for (auto &s : data->streamConfigs_)
+ s.seq = 0;
+
+ return 0;
+}
+
+void PipelineHandlerVirtual::stopDevice([[maybe_unused]] Camera *camera)
+{
+}
+
+int PipelineHandlerVirtual::queueRequestDevice([[maybe_unused]] Camera *camera,
+ Request *request)
+{
+ VirtualCameraData *data = cameraData(camera);
+ const auto timestamp = currentTimestamp();
+
+ for (auto const &[stream, buffer] : request->buffers()) {
+ bool found = false;
+ /* map buffer and fill test patterns */
+ for (auto &streamConfig : data->streamConfigs_) {
+ if (stream == &streamConfig.stream) {
+ FrameMetadata &fmd = buffer->_d()->metadata();
+
+ fmd.status = FrameMetadata::Status::FrameSuccess;
+ fmd.sequence = streamConfig.seq++;
+ fmd.timestamp = timestamp;
+
+ for (const auto [i, p] : utils::enumerate(buffer->planes()))
+ fmd.planes()[i].bytesused = p.length;
+
+ found = true;
+
+ if (streamConfig.frameGenerator->generateFrame(
+ stream->configuration().size, buffer))
+ fmd.status = FrameMetadata::Status::FrameError;
+
+ completeBuffer(request, buffer);
+ break;
+ }
+ }
+ ASSERT(found);
+ }
+
+ request->metadata().set(controls::SensorTimestamp, timestamp);
+ completeRequest(request);
+
+ return 0;
+}
+
+bool PipelineHandlerVirtual::match([[maybe_unused]] DeviceEnumerator *enumerator)
+{
+ if (created_)
+ return false;
+
+ created_ = true;
+
+ std::string configFile = configurationFile("virtual", "virtual.yaml", true);
+ if (configFile.empty()) {
+ LOG(Virtual, Debug)
+ << "Configuration file not found, skipping virtual cameras";
+ return false;
+ }
+
+ File file(configFile);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(Virtual, Error)
+ << "Failed to open config file `" << file.fileName() << "`";
+ return false;
+ }
+
+ ConfigParser parser;
+ auto configData = parser.parseConfigFile(file, this);
+ if (configData.size() == 0) {
+ LOG(Virtual, Error) << "Failed to parse any cameras from the config file: "
+ << file.fileName();
+ return false;
+ }
+
+ /* Configure and register cameras with configData */
+ for (auto &data : configData) {
+ std::set<Stream *> streams;
+ for (auto &streamConfig : data->streamConfigs_)
+ streams.insert(&streamConfig.stream);
+ std::string id = data->config_.id;
+ std::shared_ptr<Camera> camera = Camera::create(std::move(data), id, streams);
+
+ if (!initFrameGenerator(camera.get())) {
+ LOG(Virtual, Error) << "Failed to initialize frame "
+ << "generator for camera: " << id;
+ continue;
+ }
+
+ registerCamera(std::move(camera));
+ }
+
+ resetCreated_ = true;
+
+ return true;
+}
+
+bool PipelineHandlerVirtual::initFrameGenerator(Camera *camera)
+{
+ auto data = cameraData(camera);
+ auto &frame = data->config_.frame;
+ std::visit(overloaded{
+ [&](TestPattern &testPattern) {
+ for (auto &streamConfig : data->streamConfigs_) {
+ if (testPattern == TestPattern::DiagonalLines)
+ streamConfig.frameGenerator = std::make_unique<DiagonalLinesGenerator>();
+ else
+ streamConfig.frameGenerator = std::make_unique<ColorBarsGenerator>();
+ }
+ },
+ [&](ImageFrames &imageFrames) {
+ for (auto &streamConfig : data->streamConfigs_)
+ streamConfig.frameGenerator = ImageFrameGenerator::create(imageFrames);
+ } },
+ frame);
+
+ for (auto &streamConfig : data->streamConfigs_)
+ if (!streamConfig.frameGenerator)
+ return false;
+
+ return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVirtual, "virtual")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/virtual.h b/src/libcamera/pipeline/virtual/virtual.h
new file mode 100644
index 00000000..683cb82b
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/virtual.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Pipeline handler for virtual cameras
+ */
+
+#pragma once
+
+#include <string>
+#include <variant>
+#include <vector>
+
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/pipeline_handler.h"
+
+#include "frame_generator.h"
+#include "image_frame_generator.h"
+#include "test_pattern_generator.h"
+
+namespace libcamera {
+
+using VirtualFrame = std::variant<TestPattern, ImageFrames>;
+
+class VirtualCameraData : public Camera::Private
+{
+public:
+ const static unsigned int kMaxStream = 3;
+
+ struct Resolution {
+ Size size;
+ std::vector<int64_t> frameRates;
+ };
+ struct StreamConfig {
+ Stream stream;
+ std::unique_ptr<FrameGenerator> frameGenerator;
+ unsigned int seq = 0;
+ };
+ /* The config file is parsed to the Configuration struct */
+ struct Configuration {
+ std::string id;
+ std::vector<Resolution> resolutions;
+ VirtualFrame frame;
+
+ Size maxResolutionSize;
+ Size minResolutionSize;
+ };
+
+ VirtualCameraData(PipelineHandler *pipe,
+ const std::vector<Resolution> &supportedResolutions);
+
+ ~VirtualCameraData() = default;
+
+ Configuration config_;
+
+ std::vector<StreamConfig> streamConfigs_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline_handler.cpp b/src/libcamera/pipeline_handler.cpp
index 29e0c98a..d84dff3c 100644
--- a/src/libcamera/pipeline_handler.cpp
+++ b/src/libcamera/pipeline_handler.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * pipeline_handler.cpp - Pipeline handler infrastructure
+ * Pipeline handler infrastructure
*/
#include "libcamera/internal/pipeline_handler.h"
@@ -22,7 +22,6 @@
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_manager.h"
#include "libcamera/internal/device_enumerator.h"
-#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/request.h"
#include "libcamera/internal/tracepoints.h"
@@ -75,7 +74,7 @@ PipelineHandler::PipelineHandler(CameraManager *manager)
PipelineHandler::~PipelineHandler()
{
- for (std::shared_ptr<MediaDevice> media : mediaDevices_)
+ for (std::shared_ptr<MediaDevice> &media : mediaDevices_)
media->release();
}
@@ -157,26 +156,28 @@ MediaDevice *PipelineHandler::acquireMediaDevice(DeviceEnumerator *enumerator,
* Pipeline handlers shall not call this function directly as the Camera class
* handles access internally.
*
- * \context This function is \threadsafe.
+ * \context This function is called from the CameraManager thread.
*
* \return True if the pipeline handler was acquired, false if another process
* has already acquired it
* \sa release()
*/
-bool PipelineHandler::acquire()
+bool PipelineHandler::acquire(Camera *camera)
{
- MutexLocker locker(lock_);
-
- if (useCount_) {
- ++useCount_;
- return true;
+ if (useCount_ == 0) {
+ for (std::shared_ptr<MediaDevice> &media : mediaDevices_) {
+ if (!media->lock()) {
+ unlockMediaDevices();
+ return false;
+ }
+ }
}
- for (std::shared_ptr<MediaDevice> &media : mediaDevices_) {
- if (!media->lock()) {
+ if (!acquireDevice(camera)) {
+ if (useCount_ == 0)
unlockMediaDevices();
- return false;
- }
+
+ return false;
}
++useCount_;
@@ -195,29 +196,60 @@ bool PipelineHandler::acquire()
* Pipeline handlers shall not call this function directly as the Camera class
* handles access internally.
*
- * \context This function is \threadsafe.
+ * \context This function is called from the CameraManager thread.
*
* \sa acquire()
*/
void PipelineHandler::release(Camera *camera)
{
- MutexLocker locker(lock_);
-
ASSERT(useCount_);
- unlockMediaDevices();
-
releaseDevice(camera);
+ if (useCount_ == 1)
+ unlockMediaDevices();
+
--useCount_;
}
/**
+ * \brief Acquire resources associated with this camera
+ * \param[in] camera The camera for which to acquire resources
+ *
+ * Pipeline handlers may override this in order to get resources such as opening
+ * devices and allocating buffers when a camera is acquired.
+ *
+ * This is used by the uvcvideo pipeline handler to delay opening /dev/video#
+ * until the camera is acquired to avoid excess power consumption. The delayed
+ * opening of /dev/video# is a special case because the kernel uvcvideo driver
+ * powers on the USB device as soon as /dev/video# is opened. This behavior
+ * should *not* be copied by other pipeline handlers.
+ *
+ * \context This function is called from the CameraManager thread.
+ *
+ * \return True on success, false on failure
+ * \sa releaseDevice()
+ */
+bool PipelineHandler::acquireDevice([[maybe_unused]] Camera *camera)
+{
+ return true;
+}
+
+/**
* \brief Release resources associated with this camera
* \param[in] camera The camera for which to release resources
*
* Pipeline handlers may override this in order to perform cleanup operations
* when a camera is released, such as freeing memory.
+ *
+ * This is called once for every camera that is released. If there are resources
+ * shared by multiple cameras then the pipeline handler must take care to not
+ * release them until releaseDevice() has been called for all previously
+ * acquired cameras.
+ *
+ * \context This function is called from the CameraManager thread.
+ *
+ * \sa acquireDevice()
*/
void PipelineHandler::releaseDevice([[maybe_unused]] Camera *camera)
{
@@ -335,9 +367,7 @@ void PipelineHandler::stop(Camera *camera)
while (!waitingRequests_.empty()) {
Request *request = waitingRequests_.front();
waitingRequests_.pop();
-
- request->_d()->cancel();
- completeRequest(request);
+ cancelRequest(request);
}
/* Make sure no requests are pending. */
@@ -438,10 +468,8 @@ void PipelineHandler::doQueueRequest(Request *request)
}
int ret = queueRequestDevice(camera, request);
- if (ret) {
- request->_d()->cancel();
- completeRequest(request);
- }
+ if (ret)
+ cancelRequest(request);
}
/**
@@ -537,9 +565,23 @@ void PipelineHandler::completeRequest(Request *request)
}
/**
+ * \brief Cancel request and signal its completion
+ * \param[in] request The request to cancel
+ *
+ * This function cancels and completes the request. The same rules as for
+ * completeRequest() apply.
+ */
+void PipelineHandler::cancelRequest(Request *request)
+{
+ request->_d()->cancel();
+ completeRequest(request);
+}
+
+/**
* \brief Retrieve the absolute path to a platform configuration file
* \param[in] subdir The pipeline handler specific subdirectory name
* \param[in] name The configuration file name
+ * \param[in] silent Disable error messages
*
* This function locates a named platform configuration file and returns
* its absolute path to the pipeline handler. It searches the following
@@ -555,7 +597,8 @@ void PipelineHandler::completeRequest(Request *request)
* string if no configuration file can be found
*/
std::string PipelineHandler::configurationFile(const std::string &subdir,
- const std::string &name) const
+ const std::string &name,
+ bool silent) const
{
std::string confPath;
struct stat statbuf;
@@ -585,9 +628,11 @@ std::string PipelineHandler::configurationFile(const std::string &subdir,
if (ret == 0 && (statbuf.st_mode & S_IFMT) == S_IFREG)
return confPath;
- LOG(Pipeline, Error)
- << "Configuration file '" << confPath
- << "' not found for pipeline handler '" << PipelineHandler::name() << "'";
+ if (!silent)
+ LOG(Pipeline, Error)
+ << "Configuration file '" << confPath
+ << "' not found for pipeline handler '"
+ << PipelineHandler::name() << "'";
return std::string();
}
@@ -605,9 +650,14 @@ void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera)
{
cameras_.push_back(camera);
- if (mediaDevices_.empty())
- LOG(Pipeline, Fatal)
- << "Registering camera with no media devices!";
+ if (mediaDevices_.empty()) {
+ /*
+ * For virtual devices with no MediaDevice, there are no system
+ * devices to register.
+ */
+ manager_->_d()->addCamera(std::move(camera));
+ return;
+ }
/*
* Walk the entity list and map the devnums of all capture video nodes
@@ -720,6 +770,13 @@ void PipelineHandler::disconnect()
*/
/**
+ * \fn PipelineHandler::cameraManager() const
+ * \brief Retrieve the CameraManager that this pipeline handler belongs to
+ * \context This function is \threadsafe.
+ * \return The CameraManager for this pipeline handler
+ */
+
+/**
* \class PipelineHandlerFactoryBase
* \brief Base class for pipeline handler factories
*
@@ -795,6 +852,28 @@ std::vector<PipelineHandlerFactoryBase *> &PipelineHandlerFactoryBase::factories
}
/**
+ * \brief Return the factory for the pipeline handler with name \a name
+ * \param[in] name The pipeline handler name
+ * \return The factory of the pipeline with name \a name, or nullptr if not found
+ */
+const PipelineHandlerFactoryBase *PipelineHandlerFactoryBase::getFactoryByName(const std::string &name)
+{
+ const std::vector<PipelineHandlerFactoryBase *> &factories =
+ PipelineHandlerFactoryBase::factories();
+
+ auto iter = std::find_if(factories.begin(),
+ factories.end(),
+ [&name](const PipelineHandlerFactoryBase *f) {
+ return f->name() == name;
+ });
+
+ if (iter != factories.end())
+ return *iter;
+
+ return nullptr;
+}
+
+/**
* \class PipelineHandlerFactory
* \brief Registration of PipelineHandler classes and creation of instances
* \tparam _PipelineHandler The pipeline handler class type for this factory
@@ -830,6 +909,8 @@ std::vector<PipelineHandlerFactoryBase *> &PipelineHandlerFactoryBase::factories
* \def REGISTER_PIPELINE_HANDLER
* \brief Register a pipeline handler with the pipeline handler factory
* \param[in] handler Class name of PipelineHandler derived class to register
+ * \param[in] name Name assigned to the pipeline handler, matching the pipeline
+ * subdirectory name in the source tree.
*
* Register a PipelineHandler subclass with the factory and make it available to
* try and match devices.
diff --git a/src/libcamera/pixel_format.cpp b/src/libcamera/pixel_format.cpp
index 80c22072..314179a8 100644
--- a/src/libcamera/pixel_format.cpp
+++ b/src/libcamera/pixel_format.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * pixel_format.cpp - libcamera Pixel Format
+ * libcamera Pixel Format
*/
#include <libcamera/formats.h>
diff --git a/src/libcamera/process.cpp b/src/libcamera/process.cpp
index 86a382fb..68fad327 100644
--- a/src/libcamera/process.cpp
+++ b/src/libcamera/process.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * process.cpp - Process object
+ * Process object
*/
#include "libcamera/internal/process.h"
@@ -10,7 +10,6 @@
#include <algorithm>
#include <dirent.h>
#include <fcntl.h>
-#include <iostream>
#include <list>
#include <signal.h>
#include <string.h>
@@ -76,7 +75,7 @@ void ProcessManager::sighandler()
return;
}
- for (auto it = processes_.begin(); it != processes_.end(); ) {
+ for (auto it = processes_.begin(); it != processes_.end();) {
Process *process = *it;
int wstatus;
@@ -189,7 +188,6 @@ const struct sigaction &ProcessManager::oldsa() const
return oldsa_;
}
-
/**
* \class Process
* \brief Process object
@@ -271,8 +269,8 @@ int Process::start(const std::string &path,
unsigned int len = args.size();
argv[0] = path.c_str();
for (unsigned int i = 0; i < len; i++)
- argv[i+1] = args[i].c_str();
- argv[len+1] = nullptr;
+ argv[i + 1] = args[i].c_str();
+ argv[len + 1] = nullptr;
execv(path.c_str(), (char **)argv);
diff --git a/src/libcamera/property_ids.cpp.in b/src/libcamera/property_ids.cpp.in
deleted file mode 100644
index 8b274c38..00000000
--- a/src/libcamera/property_ids.cpp.in
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * property_ids.cpp : Property ID list
- *
- * This file is auto-generated. Do not edit.
- */
-
-#include <libcamera/property_ids.h>
-
-/**
- * \file property_ids.h
- * \brief Camera property identifiers
- */
-
-namespace libcamera {
-
-/**
- * \brief Namespace for libcamera properties
- */
-namespace properties {
-
-${controls_doc}
-
-${vendor_controls_doc}
-
-#ifndef __DOXYGEN__
-/*
- * Keep the properties definitions hidden from doxygen as it incorrectly parses
- * them as functions.
- */
-${controls_def}
-
-${vendor_controls_def}
-
-#endif
-
-/**
- * \brief List of all supported libcamera properties
- */
-extern const ControlIdMap properties {
-${controls_map}
-};
-
-} /* namespace properties */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/proxy/meson.build b/src/libcamera/proxy/meson.build
index 00ae5a8f..8bd1b135 100644
--- a/src/libcamera/proxy/meson.build
+++ b/src/libcamera/proxy/meson.build
@@ -13,7 +13,8 @@ foreach mojom : ipa_mojoms
'--libcamera_generate_proxy_cpp',
'--libcamera_output_path=@OUTPUT@',
'./' + '@INPUT@'
- ])
+ ],
+ env : py_build_env)
- libcamera_sources += proxy
+ libcamera_internal_sources += proxy
endforeach
diff --git a/src/libcamera/proxy/worker/meson.build b/src/libcamera/proxy/worker/meson.build
index aa4d9cd7..8c54a2e2 100644
--- a/src/libcamera/proxy/worker/meson.build
+++ b/src/libcamera/proxy/worker/meson.build
@@ -15,10 +15,10 @@ foreach mojom : ipa_mojoms
'--libcamera_generate_proxy_worker',
'--libcamera_output_path=@OUTPUT@',
'./' + '@INPUT@'
- ])
+ ],
+ env : py_build_env)
- proxy = executable(mojom['name'] + '_ipa_proxy',
- [worker, libcamera_generated_ipa_headers],
+ proxy = executable(mojom['name'] + '_ipa_proxy', worker,
install : true,
install_dir : proxy_install_dir,
dependencies : libcamera_private)
diff --git a/src/libcamera/pub_key.cpp b/src/libcamera/pub_key.cpp
index 64dfa234..f1d73a5c 100644
--- a/src/libcamera/pub_key.cpp
+++ b/src/libcamera/pub_key.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * pub_key.cpp - Public key signature verification
+ * Public key signature verification
*/
#include "libcamera/internal/pub_key.h"
diff --git a/src/libcamera/request.cpp b/src/libcamera/request.cpp
index 949c556f..b206ac13 100644
--- a/src/libcamera/request.cpp
+++ b/src/libcamera/request.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * request.cpp - Capture request handling
+ * Capture request handling
*/
#include "libcamera/internal/request.h"
@@ -28,10 +28,17 @@
* \brief Describes a frame capture request to be processed by a camera
*/
+/**
+ * \internal
+ * \file libcamera/internal/request.h
+ * \brief Internal support for request handling
+ */
+
namespace libcamera {
LOG_DEFINE_CATEGORY(Request)
+#ifndef __DOXYGEN_PUBLIC__
/**
* \class Request::Private
* \brief Request private data
@@ -300,6 +307,7 @@ void Request::Private::timeout()
emitPrepareCompleted();
}
+#endif /* __DOXYGEN_PUBLIC__ */
/**
* \enum Request::Status
@@ -467,6 +475,15 @@ int Request::addBuffer(const Stream *stream, FrameBuffer *buffer,
return -EINVAL;
}
+ /*
+ * Make sure the fence has been extracted from the buffer
+ * to avoid waiting on a stale fence.
+ */
+ if (buffer->_d()->fence()) {
+ LOG(Request, Error) << "Can't add buffer that still references a fence";
+ return -EEXIST;
+ }
+
auto it = bufferMap_.find(stream);
if (it != bufferMap_.end()) {
LOG(Request, Error) << "FrameBuffer already set for stream";
@@ -477,15 +494,6 @@ int Request::addBuffer(const Stream *stream, FrameBuffer *buffer,
_d()->pending_.insert(buffer);
bufferMap_[stream] = buffer;
- /*
- * Make sure the fence has been extracted from the buffer
- * to avoid waiting on a stale fence.
- */
- if (buffer->_d()->fence()) {
- LOG(Request, Error) << "Can't add buffer that still references a fence";
- return -EEXIST;
- }
-
if (fence && fence->isValid())
buffer->_d()->setFence(std::move(fence));
diff --git a/src/libcamera/sensor/camera_sensor.cpp b/src/libcamera/sensor/camera_sensor.cpp
index 5c4f3532..d19b5e2e 100644
--- a/src/libcamera/sensor/camera_sensor.cpp
+++ b/src/libcamera/sensor/camera_sensor.cpp
@@ -2,30 +2,17 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_sensor.cpp - A camera sensor
+ * A camera sensor
*/
#include "libcamera/internal/camera_sensor.h"
-#include "libcamera/internal/media_device.h"
-#include <algorithm>
-#include <float.h>
-#include <iomanip>
-#include <limits.h>
-#include <math.h>
-#include <string.h>
+#include <memory>
+#include <vector>
-#include <libcamera/camera.h>
-#include <libcamera/orientation.h>
-#include <libcamera/property_ids.h>
+#include <libcamera/base/log.h>
-#include <libcamera/base/utils.h>
-
-#include "libcamera/internal/bayer_format.h"
-#include "libcamera/internal/camera_lens.h"
-#include "libcamera/internal/camera_sensor_properties.h"
-#include "libcamera/internal/formats.h"
-#include "libcamera/internal/sysfs.h"
+#include "libcamera/internal/media_object.h"
/**
* \file camera_sensor.h
@@ -38,537 +25,16 @@ LOG_DEFINE_CATEGORY(CameraSensor)
/**
* \class CameraSensor
- * \brief A camera sensor based on V4L2 subdevices
+ * \brief A abstract camera sensor
*
* The CameraSensor class eases handling of sensors for pipeline handlers by
- * hiding the details of the V4L2 subdevice kernel API and caching sensor
- * information.
- *
- * The implementation is currently limited to sensors that expose a single V4L2
- * subdevice with a single pad. It will be extended to support more complex
- * devices as the needs arise.
+ * hiding the details of the kernel API and caching sensor information.
*/
/**
- * \brief Construct a CameraSensor
- * \param[in] entity The media entity backing the camera sensor
- *
- * Once constructed the instance must be initialized with init().
- */
-CameraSensor::CameraSensor(const MediaEntity *entity)
- : entity_(entity), pad_(UINT_MAX), staticProps_(nullptr),
- bayerFormat_(nullptr), supportFlips_(false),
- flipsAlterBayerOrder_(false), properties_(properties::properties)
-{
-}
-
-/**
* \brief Destroy a CameraSensor
*/
-CameraSensor::~CameraSensor()
-{
-}
-
-/**
- * \brief Initialize the camera sensor instance
- *
- * This function performs the initialisation steps of the CameraSensor that may
- * fail. It shall be called once and only once after constructing the instance.
- *
- * \return 0 on success or a negative error code otherwise
- */
-int CameraSensor::init()
-{
- for (const MediaPad *pad : entity_->pads()) {
- if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
- pad_ = pad->index();
- break;
- }
- }
-
- if (pad_ == UINT_MAX) {
- LOG(CameraSensor, Error)
- << "Sensors with more than one pad are not supported";
- return -EINVAL;
- }
-
- switch (entity_->function()) {
- case MEDIA_ENT_F_CAM_SENSOR:
- case MEDIA_ENT_F_PROC_VIDEO_ISP:
- break;
-
- default:
- LOG(CameraSensor, Error)
- << "Invalid sensor function "
- << utils::hex(entity_->function());
- return -EINVAL;
- }
-
- /* Create and open the subdev. */
- subdev_ = std::make_unique<V4L2Subdevice>(entity_);
- int ret = subdev_->open();
- if (ret < 0)
- return ret;
-
- /*
- * Clear any flips to be sure we get the "native" Bayer order. This is
- * harmless for sensors where the flips don't affect the Bayer order.
- */
- ControlList ctrls(subdev_->controls());
- if (subdev_->controls().find(V4L2_CID_HFLIP) != subdev_->controls().end())
- ctrls.set(V4L2_CID_HFLIP, 0);
- if (subdev_->controls().find(V4L2_CID_VFLIP) != subdev_->controls().end())
- ctrls.set(V4L2_CID_VFLIP, 0);
- subdev_->setControls(&ctrls);
-
- /* Enumerate, sort and cache media bus codes and sizes. */
- formats_ = subdev_->formats(pad_);
- if (formats_.empty()) {
- LOG(CameraSensor, Error) << "No image format found";
- return -EINVAL;
- }
-
- mbusCodes_ = utils::map_keys(formats_);
- std::sort(mbusCodes_.begin(), mbusCodes_.end());
-
- for (const auto &format : formats_) {
- const std::vector<SizeRange> &ranges = format.second;
- std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes_),
- [](const SizeRange &range) { return range.max; });
- }
-
- std::sort(sizes_.begin(), sizes_.end());
-
- /* Remove duplicates. */
- auto last = std::unique(sizes_.begin(), sizes_.end());
- sizes_.erase(last, sizes_.end());
-
- /*
- * VIMC is a bit special, as it does not yet support all the mandatory
- * requirements regular sensors have to respect.
- *
- * Do not validate the driver if it's VIMC and initialize the sensor
- * properties with static information.
- *
- * \todo Remove the special case once the VIMC driver has been
- * updated in all test platforms.
- */
- if (entity_->device()->driver() == "vimc") {
- initVimcDefaultProperties();
-
- ret = initProperties();
- if (ret)
- return ret;
-
- return discoverAncillaryDevices();
- }
-
- /* Get the color filter array pattern (only for RAW sensors). */
- for (unsigned int mbusCode : mbusCodes_) {
- const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(mbusCode);
- if (bayerFormat.isValid()) {
- bayerFormat_ = &bayerFormat;
- break;
- }
- }
-
- ret = validateSensorDriver();
- if (ret)
- return ret;
-
- ret = initProperties();
- if (ret)
- return ret;
-
- ret = discoverAncillaryDevices();
- if (ret)
- return ret;
-
- /*
- * Set HBLANK to the minimum to start with a well-defined line length,
- * allowing IPA modules that do not modify HBLANK to use the sensor
- * minimum line length in their calculations.
- */
- const struct v4l2_query_ext_ctrl *hblankInfo = subdev_->controlInfo(V4L2_CID_HBLANK);
- if (hblankInfo && !(hblankInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
- ControlList ctrl(subdev_->controls());
-
- ctrl.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblankInfo->minimum));
- ret = subdev_->setControls(&ctrl);
- if (ret)
- return ret;
- }
-
- return applyTestPatternMode(controls::draft::TestPatternModeEnum::TestPatternModeOff);
-}
-
-int CameraSensor::generateId()
-{
- const std::string devPath = subdev_->devicePath();
-
- /* Try to get ID from firmware description. */
- id_ = sysfs::firmwareNodePath(devPath);
- if (!id_.empty())
- return 0;
-
- /*
- * Virtual sensors not described in firmware
- *
- * Verify it's a platform device and construct ID from the device path
- * and model of sensor.
- */
- if (devPath.find("/sys/devices/platform/", 0) == 0) {
- id_ = devPath.substr(strlen("/sys/devices/")) + " " + model();
- return 0;
- }
-
- LOG(CameraSensor, Error) << "Can't generate sensor ID";
- return -EINVAL;
-}
-
-int CameraSensor::validateSensorDriver()
-{
- int err = 0;
-
- /*
- * Optional controls are used to register optional sensor properties. If
- * not present, some values will be defaulted.
- */
- static constexpr uint32_t optionalControls[] = {
- V4L2_CID_CAMERA_SENSOR_ROTATION,
- };
-
- const ControlIdMap &controls = subdev_->controls().idmap();
- for (uint32_t ctrl : optionalControls) {
- if (!controls.count(ctrl))
- LOG(CameraSensor, Debug)
- << "Optional V4L2 control " << utils::hex(ctrl)
- << " not supported";
- }
-
- /*
- * Recommended controls are similar to optional controls, but will
- * become mandatory in the near future. Be loud if they're missing.
- */
- static constexpr uint32_t recommendedControls[] = {
- V4L2_CID_CAMERA_ORIENTATION,
- };
-
- for (uint32_t ctrl : recommendedControls) {
- if (!controls.count(ctrl)) {
- LOG(CameraSensor, Warning)
- << "Recommended V4L2 control " << utils::hex(ctrl)
- << " not supported";
- err = -EINVAL;
- }
- }
-
- /*
- * Verify if sensor supports horizontal/vertical flips
- *
- * \todo Handle horizontal and vertical flips independently.
- */
- const struct v4l2_query_ext_ctrl *hflipInfo = subdev_->controlInfo(V4L2_CID_HFLIP);
- const struct v4l2_query_ext_ctrl *vflipInfo = subdev_->controlInfo(V4L2_CID_VFLIP);
- if (hflipInfo && !(hflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY) &&
- vflipInfo && !(vflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
- supportFlips_ = true;
-
- if (hflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT ||
- vflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT)
- flipsAlterBayerOrder_ = true;
- }
-
- if (!supportFlips_)
- LOG(CameraSensor, Debug)
- << "Camera sensor does not support horizontal/vertical flip";
-
- /*
- * Make sure the required selection targets are supported.
- *
- * Failures in reading any of the targets are not deemed to be fatal,
- * but some properties and features, like constructing a
- * IPACameraSensorInfo for the IPA module, won't be supported.
- *
- * \todo Make support for selection targets mandatory as soon as all
- * test platforms have been updated.
- */
- Rectangle rect;
- int ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP_BOUNDS, &rect);
- if (ret) {
- /*
- * Default the pixel array size to the largest size supported
- * by the sensor. The sizes_ vector is sorted in ascending
- * order, the largest size is thus the last element.
- */
- pixelArraySize_ = sizes_.back();
-
- LOG(CameraSensor, Warning)
- << "The PixelArraySize property has been defaulted to "
- << pixelArraySize_;
- err = -EINVAL;
- } else {
- pixelArraySize_ = rect.size();
- }
-
- ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP_DEFAULT, &activeArea_);
- if (ret) {
- activeArea_ = Rectangle(pixelArraySize_);
- LOG(CameraSensor, Warning)
- << "The PixelArrayActiveAreas property has been defaulted to "
- << activeArea_;
- err = -EINVAL;
- }
-
- ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP, &rect);
- if (ret) {
- LOG(CameraSensor, Warning)
- << "Failed to retrieve the sensor crop rectangle";
- err = -EINVAL;
- }
-
- if (err) {
- LOG(CameraSensor, Warning)
- << "The sensor kernel driver needs to be fixed";
- LOG(CameraSensor, Warning)
- << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information";
- }
-
- if (!bayerFormat_)
- return 0;
-
- /*
- * For raw sensors, make sure the sensor driver supports the controls
- * required by the CameraSensor class.
- */
- static constexpr uint32_t mandatoryControls[] = {
- V4L2_CID_ANALOGUE_GAIN,
- V4L2_CID_EXPOSURE,
- V4L2_CID_HBLANK,
- V4L2_CID_PIXEL_RATE,
- V4L2_CID_VBLANK,
- };
-
- err = 0;
- for (uint32_t ctrl : mandatoryControls) {
- if (!controls.count(ctrl)) {
- LOG(CameraSensor, Error)
- << "Mandatory V4L2 control " << utils::hex(ctrl)
- << " not available";
- err = -EINVAL;
- }
- }
-
- if (err) {
- LOG(CameraSensor, Error)
- << "The sensor kernel driver needs to be fixed";
- LOG(CameraSensor, Error)
- << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information";
- return err;
- }
-
- return 0;
-}
-
-/*
- * \brief Initialize properties that cannot be intialized by the
- * regular initProperties() function for VIMC
- */
-void CameraSensor::initVimcDefaultProperties()
-{
- /* Use the largest supported size. */
- pixelArraySize_ = sizes_.back();
- activeArea_ = Rectangle(pixelArraySize_);
-}
-
-void CameraSensor::initStaticProperties()
-{
- staticProps_ = CameraSensorProperties::get(model_);
- if (!staticProps_)
- return;
-
- /* Register the properties retrieved from the sensor database. */
- properties_.set(properties::UnitCellSize, staticProps_->unitCellSize);
-
- initTestPatternModes();
-}
-
-void CameraSensor::initTestPatternModes()
-{
- const auto &v4l2TestPattern = controls().find(V4L2_CID_TEST_PATTERN);
- if (v4l2TestPattern == controls().end()) {
- LOG(CameraSensor, Debug) << "V4L2_CID_TEST_PATTERN is not supported";
- return;
- }
-
- const auto &testPatternModes = staticProps_->testPatternModes;
- if (testPatternModes.empty()) {
- /*
- * The camera sensor supports test patterns but we don't know
- * how to map them so this should be fixed.
- */
- LOG(CameraSensor, Debug) << "No static test pattern map for \'"
- << model() << "\'";
- return;
- }
-
- /*
- * Create a map that associates the V4L2 control index to the test
- * pattern mode by reversing the testPatternModes map provided by the
- * camera sensor properties. This makes it easier to verify if the
- * control index is supported in the below for loop that creates the
- * list of supported test patterns.
- */
- std::map<int32_t, controls::draft::TestPatternModeEnum> indexToTestPatternMode;
- for (const auto &it : testPatternModes)
- indexToTestPatternMode[it.second] = it.first;
-
- for (const ControlValue &value : v4l2TestPattern->second.values()) {
- const int32_t index = value.get<int32_t>();
-
- const auto it = indexToTestPatternMode.find(index);
- if (it == indexToTestPatternMode.end()) {
- LOG(CameraSensor, Debug)
- << "Test pattern mode " << index << " ignored";
- continue;
- }
-
- testPatternModes_.push_back(it->second);
- }
-}
-
-int CameraSensor::initProperties()
-{
- model_ = subdev_->model();
- properties_.set(properties::Model, utils::toAscii(model_));
-
- /* Generate a unique ID for the sensor. */
- int ret = generateId();
- if (ret)
- return ret;
-
- /* Initialize the static properties from the sensor database. */
- initStaticProperties();
-
- /* Retrieve and register properties from the kernel interface. */
- const ControlInfoMap &controls = subdev_->controls();
-
- const auto &orientation = controls.find(V4L2_CID_CAMERA_ORIENTATION);
- if (orientation != controls.end()) {
- int32_t v4l2Orientation = orientation->second.def().get<int32_t>();
- int32_t propertyValue;
-
- switch (v4l2Orientation) {
- default:
- LOG(CameraSensor, Warning)
- << "Unsupported camera location "
- << v4l2Orientation << ", setting to External";
- [[fallthrough]];
- case V4L2_CAMERA_ORIENTATION_EXTERNAL:
- propertyValue = properties::CameraLocationExternal;
- break;
- case V4L2_CAMERA_ORIENTATION_FRONT:
- propertyValue = properties::CameraLocationFront;
- break;
- case V4L2_CAMERA_ORIENTATION_BACK:
- propertyValue = properties::CameraLocationBack;
- break;
- }
- properties_.set(properties::Location, propertyValue);
- } else {
- LOG(CameraSensor, Warning) << "Failed to retrieve the camera location";
- }
-
- const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION);
- if (rotationControl != controls.end()) {
- int32_t propertyValue = rotationControl->second.def().get<int32_t>();
-
- /*
- * Cache the Transform associated with the camera mounting
- * rotation for later use in computeTransform().
- */
- bool success;
- mountingOrientation_ = orientationFromRotation(propertyValue, &success);
- if (!success) {
- LOG(CameraSensor, Warning)
- << "Invalid rotation of " << propertyValue
- << " degrees - ignoring";
- mountingOrientation_ = Orientation::Rotate0;
- }
-
- properties_.set(properties::Rotation, propertyValue);
- } else {
- LOG(CameraSensor, Warning)
- << "Rotation control not available, default to 0 degrees";
- properties_.set(properties::Rotation, 0);
- mountingOrientation_ = Orientation::Rotate0;
- }
-
- properties_.set(properties::PixelArraySize, pixelArraySize_);
- properties_.set(properties::PixelArrayActiveAreas, { activeArea_ });
-
- /* Color filter array pattern, register only for RAW sensors. */
- if (bayerFormat_) {
- int32_t cfa;
- switch (bayerFormat_->order) {
- case BayerFormat::BGGR:
- cfa = properties::draft::BGGR;
- break;
- case BayerFormat::GBRG:
- cfa = properties::draft::GBRG;
- break;
- case BayerFormat::GRBG:
- cfa = properties::draft::GRBG;
- break;
- case BayerFormat::RGGB:
- cfa = properties::draft::RGGB;
- break;
- case BayerFormat::MONO:
- cfa = properties::draft::MONO;
- break;
- }
-
- properties_.set(properties::draft::ColorFilterArrangement, cfa);
- }
-
- return 0;
-}
-
-/**
- * \brief Check for and initialise any ancillary devices
- *
- * Sensors sometimes have ancillary devices such as a Lens or Flash that could
- * be linked to their MediaEntity by the kernel. Search for and handle any
- * such device.
- *
- * \todo Handle MEDIA_ENT_F_FLASH too.
- */
-int CameraSensor::discoverAncillaryDevices()
-{
- int ret;
-
- for (MediaEntity *ancillary : entity_->ancillaryEntities()) {
- switch (ancillary->function()) {
- case MEDIA_ENT_F_LENS:
- focusLens_ = std::make_unique<CameraLens>(ancillary);
- ret = focusLens_->init();
- if (ret) {
- LOG(CameraSensor, Error)
- << "Lens initialisation failed, lens disabled";
- focusLens_.reset();
- }
- break;
-
- default:
- LOG(CameraSensor, Warning)
- << "Unsupported ancillary entity function "
- << ancillary->function();
- break;
- }
- }
-
- return 0;
-}
+CameraSensor::~CameraSensor() = default;
/**
* \fn CameraSensor::model()
@@ -623,29 +89,15 @@ int CameraSensor::discoverAncillaryDevices()
*/
/**
+ * \fn CameraSensor::sizes()
* \brief Retrieve the supported frame sizes for a media bus code
* \param[in] mbusCode The media bus code for which sizes are requested
*
* \return The supported frame sizes for \a mbusCode sorted in increasing order
*/
-std::vector<Size> CameraSensor::sizes(unsigned int mbusCode) const
-{
- std::vector<Size> sizes;
-
- const auto &format = formats_.find(mbusCode);
- if (format == formats_.end())
- return sizes;
-
- const std::vector<SizeRange> &ranges = format->second;
- std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
- [](const SizeRange &range) { return range.max; });
-
- std::sort(sizes.begin(), sizes.end());
-
- return sizes;
-}
/**
+ * \fn CameraSensor::resolution()
* \brief Retrieve the camera sensor resolution
*
* The camera sensor resolution is the active pixel area size, clamped to the
@@ -658,15 +110,13 @@ std::vector<Size> CameraSensor::sizes(unsigned int mbusCode) const
*
* \return The camera sensor resolution in pixels
*/
-Size CameraSensor::resolution() const
-{
- return std::min(sizes_.back(), activeArea_.size());
-}
/**
+ * \fn CameraSensor::getFormat()
* \brief Retrieve the best sensor format for a desired output
* \param[in] mbusCodes The list of acceptable media bus codes
* \param[in] size The desired size
+ * \param[in] maxSize The maximum size
*
* Media bus codes are selected from \a mbusCodes, which lists all acceptable
* codes in decreasing order of preference. Media bus codes supported by the
@@ -685,6 +135,8 @@ Size CameraSensor::resolution() const
* bandwidth.
* - The desired \a size shall be supported by one of the media bus code listed
* in \a mbusCodes.
+ * - The desired \a size shall fit into the maximum size \a maxSize if it is not
+ * null.
*
* When multiple media bus codes can produce the same size, the code at the
* lowest position in \a mbusCodes is selected.
@@ -699,59 +151,9 @@ Size CameraSensor::resolution() const
* \return The best sensor output format matching the desired media bus codes
* and size on success, or an empty format otherwise.
*/
-V4L2SubdeviceFormat CameraSensor::getFormat(const std::vector<unsigned int> &mbusCodes,
- const Size &size) const
-{
- unsigned int desiredArea = size.width * size.height;
- unsigned int bestArea = UINT_MAX;
- float desiredRatio = static_cast<float>(size.width) / size.height;
- float bestRatio = FLT_MAX;
- const Size *bestSize = nullptr;
- uint32_t bestCode = 0;
-
- for (unsigned int code : mbusCodes) {
- const auto formats = formats_.find(code);
- if (formats == formats_.end())
- continue;
-
- for (const SizeRange &range : formats->second) {
- const Size &sz = range.max;
-
- if (sz.width < size.width || sz.height < size.height)
- continue;
-
- float ratio = static_cast<float>(sz.width) / sz.height;
- float ratioDiff = fabsf(ratio - desiredRatio);
- unsigned int area = sz.width * sz.height;
- unsigned int areaDiff = area - desiredArea;
-
- if (ratioDiff > bestRatio)
- continue;
-
- if (ratioDiff < bestRatio || areaDiff < bestArea) {
- bestRatio = ratioDiff;
- bestArea = areaDiff;
- bestSize = &sz;
- bestCode = code;
- }
- }
- }
-
- if (!bestSize) {
- LOG(CameraSensor, Debug) << "No supported format or size found";
- return {};
- }
-
- V4L2SubdeviceFormat format{
- .code = bestCode,
- .size = *bestSize,
- .colorSpace = ColorSpace::Raw,
- };
-
- return format;
-}
/**
+ * \fn CameraSensor::setFormat()
* \brief Set the sensor output format
* \param[in] format The desired sensor output format
* \param[in] transform The transform to be applied on the sensor.
@@ -766,32 +168,9 @@ V4L2SubdeviceFormat CameraSensor::getFormat(const std::vector<unsigned int> &mbu
*
* \return 0 on success or a negative error code otherwise
*/
-int CameraSensor::setFormat(V4L2SubdeviceFormat *format, Transform transform)
-{
- /* Configure flips if the sensor supports that. */
- if (supportFlips_) {
- ControlList flipCtrls(subdev_->controls());
-
- flipCtrls.set(V4L2_CID_HFLIP,
- static_cast<int32_t>(!!(transform & Transform::HFlip)));
- flipCtrls.set(V4L2_CID_VFLIP,
- static_cast<int32_t>(!!(transform & Transform::VFlip)));
-
- int ret = subdev_->setControls(&flipCtrls);
- if (ret)
- return ret;
- }
-
- /* Apply format on the subdev. */
- int ret = subdev_->setFormat(pad_, format);
- if (ret)
- return ret;
-
- subdev_->updateControlInfo();
- return 0;
-}
/**
+ * \fn CameraSensor::tryFormat()
* \brief Try the sensor output format
* \param[in] format The desired sensor output format
*
@@ -802,13 +181,9 @@ int CameraSensor::setFormat(V4L2SubdeviceFormat *format, Transform transform)
*
* \return 0 on success or a negative error code otherwise
*/
-int CameraSensor::tryFormat(V4L2SubdeviceFormat *format) const
-{
- return subdev_->setFormat(pad_, format,
- V4L2Subdevice::Whence::TryFormat);
-}
/**
+ * \fn CameraSensor::applyConfiguration()
* \brief Apply a sensor configuration to the camera sensor
* \param[in] config The sensor configuration
* \param[in] transform The transform to be applied on the sensor.
@@ -823,73 +198,72 @@ int CameraSensor::tryFormat(V4L2SubdeviceFormat *format) const
* \return 0 if \a config is applied correctly to the camera sensor, a negative
* error code otherwise
*/
-int CameraSensor::applyConfiguration(const SensorConfiguration &config,
- Transform transform,
- V4L2SubdeviceFormat *sensorFormat)
-{
- if (!config.isValid()) {
- LOG(CameraSensor, Error) << "Invalid sensor configuration";
- return -EINVAL;
- }
- std::vector<unsigned int> filteredCodes;
- std::copy_if(mbusCodes_.begin(), mbusCodes_.end(),
- std::back_inserter(filteredCodes),
- [&config](unsigned int mbusCode) {
- BayerFormat bayer = BayerFormat::fromMbusCode(mbusCode);
- if (bayer.bitDepth == config.bitDepth)
- return true;
- return false;
- });
- if (filteredCodes.empty()) {
- LOG(CameraSensor, Error)
- << "Cannot find any format with bit depth "
- << config.bitDepth;
- return -EINVAL;
- }
-
- /*
- * Compute the sensor's data frame size by applying the cropping
- * rectangle, subsampling and output crop to the sensor's pixel array
- * size.
- *
- * \todo The actual size computation is for now ignored and only the
- * output size is considered. This implies that resolutions obtained
- * with two different cropping/subsampling will look identical and
- * only the first found one will be considered.
- */
- V4L2SubdeviceFormat subdevFormat = {};
- for (unsigned int code : filteredCodes) {
- for (const Size &size : sizes(code)) {
- if (size.width != config.outputSize.width ||
- size.height != config.outputSize.height)
- continue;
-
- subdevFormat.code = code;
- subdevFormat.size = size;
- break;
- }
- }
- if (!subdevFormat.code) {
- LOG(CameraSensor, Error) << "Invalid output size in sensor configuration";
- return -EINVAL;
- }
-
- int ret = setFormat(&subdevFormat, transform);
- if (ret)
- return ret;
+/**
+ * \brief Retrieve the image source stream
+ *
+ * Sensors that produce multiple streams do not guarantee that the image stream
+ * is always assigned number 0. This function allows callers to retrieve the
+ * image stream on the sensor's source pad, in order to configure the receiving
+ * side accordingly.
+ *
+ * \return The image source stream
+ */
+V4L2Subdevice::Stream CameraSensor::imageStream() const
+{
+ return { 0, 0 };
+}
- /*
- * Return to the caller the format actually applied to the sensor.
- * This is relevant if transform has changed the bayer pattern order.
- */
- if (sensorFormat)
- *sensorFormat = subdevFormat;
+/**
+ * \brief Retrieve the embedded data source stream
+ *
+ * Some sensors produce embedded data in a stream separate from the image
+ * stream. This function indicates if the sensor supports this feature by
+ * returning the embedded data stream on the sensor's source pad if available,
+ * or an std::optional<> without a value otheriwse.
+ *
+ * \return The embedded data source stream
+ */
+std::optional<V4L2Subdevice::Stream> CameraSensor::embeddedDataStream() const
+{
+ return {};
+}
- /* \todo Handle AnalogCrop. Most sensors do not support set_selection */
- /* \todo Handle scaling in the digital domain. */
+/**
+ * \brief Retrieve the format on the embedded data stream
+ *
+ * When an embedded data stream is available, this function returns the
+ * corresponding format on the sensor's source pad. The format may vary with
+ * the image stream format, and should therefore be retrieved after configuring
+ * the image stream.
+ *
+ * If the sensor doesn't support embedded data, this function returns a
+ * default-constructed format.
+ *
+ * \return The format on the embedded data stream
+ */
+V4L2SubdeviceFormat CameraSensor::embeddedDataFormat() const
+{
+ return {};
+}
- return 0;
+/**
+ * \brief Enable or disable the embedded data stream
+ * \param[in] enable True to enable the embedded data stream, false to disable it
+ *
+ * For sensors that support embedded data, this function enables or disables
+ * generation of embedded data. Some of such sensors always produce embedded
+ * data, in which case this function return -EISCONN if the caller attempts to
+ * disable embedded data.
+ *
+ * If the sensor doesn't support embedded data, this function returns 0 when \a
+ * enable is false, and -ENOSTR otherwise.
+ *
+ * \return 0 on success, or a negative error code otherwise
+ */
+int CameraSensor::setEmbeddedDataEnabled(bool enable)
+{
+ return enable ? -ENOSTR : 0;
}
/**
@@ -899,6 +273,7 @@ int CameraSensor::applyConfiguration(const SensorConfiguration &config,
*/
/**
+ * \fn CameraSensor::sensorInfo()
* \brief Assemble and return the camera sensor info
* \param[out] info The camera sensor info
*
@@ -912,82 +287,9 @@ int CameraSensor::applyConfiguration(const SensorConfiguration &config,
*
* \return 0 on success, a negative error code otherwise
*/
-int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const
-{
- if (!bayerFormat_)
- return -EINVAL;
-
- info->model = model();
-
- /*
- * The active area size is a static property, while the crop
- * rectangle needs to be re-read as it depends on the sensor
- * configuration.
- */
- info->activeAreaSize = { activeArea_.width, activeArea_.height };
-
- /*
- * \todo Support for retreiving the crop rectangle is scheduled to
- * become mandatory. For the time being use the default value if it has
- * been initialized at sensor driver validation time.
- */
- int ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP, &info->analogCrop);
- if (ret) {
- info->analogCrop = activeArea_;
- LOG(CameraSensor, Warning)
- << "The analogue crop rectangle has been defaulted to the active area size";
- }
-
- /*
- * IPACameraSensorInfo::analogCrop::x and IPACameraSensorInfo::analogCrop::y
- * are defined relatively to the active pixel area, while V4L2's
- * TGT_CROP target is defined in respect to the full pixel array.
- *
- * Compensate it by subtracting the active area offset.
- */
- info->analogCrop.x -= activeArea_.x;
- info->analogCrop.y -= activeArea_.y;
-
- /* The bit depth and image size depend on the currently applied format. */
- V4L2SubdeviceFormat format{};
- ret = subdev_->getFormat(pad_, &format);
- if (ret)
- return ret;
-
- info->bitsPerPixel = MediaBusFormatInfo::info(format.code).bitsPerPixel;
- info->outputSize = format.size;
-
- std::optional<int32_t> cfa = properties_.get(properties::draft::ColorFilterArrangement);
- info->cfaPattern = cfa ? *cfa : properties::draft::RGB;
-
- /*
- * Retrieve the pixel rate, line length and minimum/maximum frame
- * duration through V4L2 controls. Support for the V4L2_CID_PIXEL_RATE,
- * V4L2_CID_HBLANK and V4L2_CID_VBLANK controls is mandatory.
- */
- ControlList ctrls = subdev_->getControls({ V4L2_CID_PIXEL_RATE,
- V4L2_CID_HBLANK,
- V4L2_CID_VBLANK });
- if (ctrls.empty()) {
- LOG(CameraSensor, Error)
- << "Failed to retrieve camera info controls";
- return -EINVAL;
- }
-
- info->pixelRate = ctrls.get(V4L2_CID_PIXEL_RATE).get<int64_t>();
-
- const ControlInfo hblank = ctrls.infoMap()->at(V4L2_CID_HBLANK);
- info->minLineLength = info->outputSize.width + hblank.min().get<int32_t>();
- info->maxLineLength = info->outputSize.width + hblank.max().get<int32_t>();
-
- const ControlInfo vblank = ctrls.infoMap()->at(V4L2_CID_VBLANK);
- info->minFrameLength = info->outputSize.height + vblank.min().get<int32_t>();
- info->maxFrameLength = info->outputSize.height + vblank.max().get<int32_t>();
-
- return 0;
-}
/**
+ * \fn CameraSensor::computeTransform()
* \brief Compute the Transform that gives the requested \a orientation
* \param[inout] orientation The desired image orientation
*
@@ -1013,40 +315,9 @@ int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const
* \return A Transform instance that applied to the CameraSensor produces images
* with \a orientation
*/
-Transform CameraSensor::computeTransform(Orientation *orientation) const
-{
- /*
- * If we cannot do any flips we cannot change the native camera mounting
- * orientation.
- */
- if (!supportFlips_) {
- *orientation = mountingOrientation_;
- return Transform::Identity;
- }
-
- /*
- * Now compute the required transform to obtain 'orientation' starting
- * from the mounting rotation.
- *
- * As a note:
- * orientation / mountingOrientation_ = transform
- * mountingOrientation_ * transform = orientation
- */
- Transform transform = *orientation / mountingOrientation_;
-
- /*
- * If transform contains any Transpose we cannot do it, so adjust
- * 'orientation' to report the image native orientation and return Identity.
- */
- if (!!(transform & Transform::Transpose)) {
- *orientation = mountingOrientation_;
- return Transform::Identity;
- }
-
- return transform;
-}
/**
+ * \fn CameraSensor::bayerOrder()
* \brief Compute the Bayer order that results from the given Transform
* \param[in] t The Transform to apply to the sensor
*
@@ -1058,23 +329,9 @@ Transform CameraSensor::computeTransform(Orientation *orientation) const
*
* \return The Bayer order produced by the sensor when the Transform is applied
*/
-BayerFormat::Order CameraSensor::bayerOrder(Transform t) const
-{
- /* Return a defined by meaningless value for non-Bayer sensors. */
- if (!bayerFormat_)
- return BayerFormat::Order::BGGR;
-
- if (!flipsAlterBayerOrder_)
- return bayerFormat_->order;
-
- /*
- * Apply the transform to the native (i.e. untransformed) Bayer order,
- * using the rest of the Bayer format supplied by the caller.
- */
- return bayerFormat_->transform(t).order;
-}
/**
+ * \fn CameraSensor::controls()
* \brief Retrieve the supported V4L2 controls and their information
*
* Control information is updated automatically to reflect the current sensor
@@ -1083,12 +340,9 @@ BayerFormat::Order CameraSensor::bayerOrder(Transform t) const
*
* \return A map of the V4L2 controls supported by the sensor
*/
-const ControlInfoMap &CameraSensor::controls() const
-{
- return subdev_->controls();
-}
/**
+ * \fn CameraSensor::getControls()
* \brief Read V4L2 controls from the sensor
* \param[in] ids The list of controls to read, specified by their ID
*
@@ -1106,12 +360,9 @@ const ControlInfoMap &CameraSensor::controls() const
* \return The control values in a ControlList on success, or an empty list on
* error
*/
-ControlList CameraSensor::getControls(const std::vector<uint32_t> &ids)
-{
- return subdev_->getControls(ids);
-}
/**
+ * \fn CameraSensor::setControls()
* \brief Write V4L2 controls to the sensor
* \param[in] ctrls The list of controls to write
*
@@ -1136,10 +387,6 @@ ControlList CameraSensor::getControls(const std::vector<uint32_t> &ids)
* \retval -EINVAL One of the control is not supported or not accessible
* \retval i The index of the control that failed
*/
-int CameraSensor::setControls(ControlList *ctrls)
-{
- return subdev_->setControls(ctrls);
-}
/**
* \fn CameraSensor::testPatternModes()
@@ -1150,6 +397,7 @@ int CameraSensor::setControls(ControlList *ctrls)
*/
/**
+ * \fn CameraSensor::setTestPatternMode()
* \brief Set the test pattern mode for the camera sensor
* \param[in] mode The test pattern mode
*
@@ -1157,51 +405,179 @@ int CameraSensor::setControls(ControlList *ctrls)
* pattern mode. Otherwise, this function is a no-op. Setting the same test
* pattern mode for every frame thus incurs no performance penalty.
*/
-int CameraSensor::setTestPatternMode(controls::draft::TestPatternModeEnum mode)
-{
- if (testPatternMode_ == mode)
- return 0;
- if (testPatternModes_.empty()) {
- LOG(CameraSensor, Error)
- << "Camera sensor does not support test pattern modes.";
- return -EINVAL;
- }
+/**
+ * \fn CameraSensor::sensorDelays()
+ * \brief Fetch the sensor delay values
+ *
+ * This function retrieves the delays that the sensor applies to controls. If
+ * the static properties database doesn't specifiy control delay values for the
+ * sensor, default delays that may be suitable are returned and a warning is
+ * logged.
+ *
+ * \return The sensor delay values
+ */
+
+/**
+ * \class CameraSensorFactoryBase
+ * \brief Base class for camera sensor factories
+ *
+ * The CameraSensorFactoryBase class is the base of all specializations of
+ * the CameraSensorFactory class template. It implements the factory
+ * registration, maintains a registry of factories, and provides access to the
+ * registered factories.
+ */
- return applyTestPatternMode(mode);
+/**
+ * \brief Construct a camera sensor factory base
+ * \param[in] name The camera sensor factory name
+ * \param[in] priority Priority order for factory selection
+ *
+ * Creating an instance of the factory base registers it with the global list of
+ * factories, accessible through the factories() function.
+ */
+CameraSensorFactoryBase::CameraSensorFactoryBase(const char *name, int priority)
+ : name_(name), priority_(priority)
+{
+ registerFactory(this);
}
-int CameraSensor::applyTestPatternMode(controls::draft::TestPatternModeEnum mode)
+/**
+ * \brief Create an instance of the CameraSensor corresponding to a media entity
+ * \param[in] entity The media entity on the source end of the sensor
+ *
+ * When multiple factories match the same \a entity, this function selects the
+ * matching factory with the highest priority as specified to the
+ * REGISTER_CAMERA_SENSOR() macro at factory registration time. If multiple
+ * matching factories have the same highest priority value, which factory gets
+ * selected is undefined and may vary between runs.
+ *
+ * \return A unique pointer to a new instance of the CameraSensor subclass
+ * matching the entity, or a null pointer if no such factory exists
+ */
+std::unique_ptr<CameraSensor> CameraSensorFactoryBase::create(MediaEntity *entity)
{
- if (testPatternModes_.empty())
- return 0;
+ const std::vector<CameraSensorFactoryBase *> &factories =
+ CameraSensorFactoryBase::factories();
- auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(),
- mode);
- if (it == testPatternModes_.end()) {
- LOG(CameraSensor, Error) << "Unsupported test pattern mode "
- << mode;
- return -EINVAL;
- }
+ for (const CameraSensorFactoryBase *factory : factories) {
+ std::variant<std::unique_ptr<CameraSensor>, int> result =
+ factory->match(entity);
- LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode;
+ if (std::holds_alternative<std::unique_ptr<CameraSensor>>(result)) {
+ LOG(CameraSensor, Debug)
+ << "Entity '" << entity->name() << "' matched by "
+ << factory->name();
+ return std::get<std::unique_ptr<CameraSensor>>(std::move(result));
+ }
- int32_t index = staticProps_->testPatternModes.at(mode);
- ControlList ctrls{ controls() };
- ctrls.set(V4L2_CID_TEST_PATTERN, index);
+ if (std::get<int>(result)) {
+ LOG(CameraSensor, Error)
+ << "Failed to create sensor for '"
+ << entity->name() << ": " << std::get<int>(result);
+ return nullptr;
+ }
+ }
- int ret = setControls(&ctrls);
- if (ret)
- return ret;
+ return nullptr;
+}
- testPatternMode_ = mode;
+/**
+ * \fn CameraSensorFactoryBase::name()
+ * \brief Retrieve the camera sensor factory name
+ * \return The name of the factory
+ */
- return 0;
+/**
+ * \fn CameraSensorFactoryBase::priority()
+ * \brief Retrieve the priority value for the factory
+ * \return The priority value for the factory
+ */
+
+/**
+ * \brief Retrieve the list of all camera sensor factories
+ *
+ * The factories are sorted in decreasing priority order.
+ *
+ * \return The list of camera sensor factories
+ */
+std::vector<CameraSensorFactoryBase *> &CameraSensorFactoryBase::factories()
+{
+ /*
+ * The static factories map is defined inside the function to ensure
+ * it gets initialized on first use, without any dependency on link
+ * order.
+ */
+ static std::vector<CameraSensorFactoryBase *> factories;
+ return factories;
}
-std::string CameraSensor::logPrefix() const
+/**
+ * \brief Add a camera sensor class to the registry
+ * \param[in] factory Factory to use to construct the camera sensor
+ */
+void CameraSensorFactoryBase::registerFactory(CameraSensorFactoryBase *factory)
{
- return "'" + entity_->name() + "'";
+ std::vector<CameraSensorFactoryBase *> &factories =
+ CameraSensorFactoryBase::factories();
+
+ auto pos = std::upper_bound(factories.begin(), factories.end(), factory,
+ [](const CameraSensorFactoryBase *value,
+ const CameraSensorFactoryBase *elem) {
+ return value->priority() > elem->priority();
+ });
+ factories.insert(pos, factory);
}
+/**
+ * \class CameraSensorFactory
+ * \brief Registration of CameraSensorFactory classes and creation of instances
+ * \tparam _CameraSensor The camera sensor class type for this factory
+ *
+ * To facilitate discovery and instantiation of CameraSensor classes, the
+ * CameraSensorFactory class implements auto-registration of camera sensors.
+ * Each CameraSensor subclass shall register itself using the
+ * REGISTER_CAMERA_SENSOR() macro, which will create a corresponding instance
+ * of a CameraSensorFactory subclass and register it with the static list of
+ * factories.
+ */
+
+/**
+ * \fn CameraSensorFactory::CameraSensorFactory()
+ * \brief Construct a camera sensor factory
+ *
+ * Creating an instance of the factory registers it with the global list of
+ * factories, accessible through the CameraSensorFactoryBase::factories()
+ * function.
+ */
+
+/**
+ * \def REGISTER_CAMERA_SENSOR(sensor, priority)
+ * \brief Register a camera sensor type to the sensor factory
+ * \param[in] sensor Class name of the CameraSensor derived class to register
+ * \param[in] priority Priority order for factory selection
+ *
+ * Register a CameraSensor subclass with the factory and make it available to
+ * try and match sensors. The subclass needs to implement a static match
+ * function:
+ *
+ * \code{.cpp}
+ * static std::variant<std::unique_ptr<CameraSensor>, int> match(MediaEntity *entity);
+ * \endcode
+ *
+ * The function tests if the sensor class supports the camera sensor identified
+ * by a MediaEntity. If so, it creates a new instance of the sensor class. The
+ * return value is a variant that contains
+ *
+ * - A new instance of the camera sensor class if the entity matched and
+ * creation succeeded ;
+ * - A non-zero error code if the entity matched and the creation failed ; or
+ * - A zero error code if the entity didn't match.
+ *
+ * When multiple factories can support the same MediaEntity (as in the match()
+ * function of multiple factories returning true for the same entity), the \a
+ * priority argument selects which factory will be used. See
+ * CameraSensorFactoryBase::create() for more information.
+ */
+
} /* namespace libcamera */
diff --git a/src/libcamera/sensor/camera_sensor_legacy.cpp b/src/libcamera/sensor/camera_sensor_legacy.cpp
new file mode 100644
index 00000000..32989c19
--- /dev/null
+++ b/src/libcamera/sensor/camera_sensor_legacy.cpp
@@ -0,0 +1,1045 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * camera_sensor_legacy.cpp - A V4L2-backed camera sensor
+ */
+
+#include <algorithm>
+#include <cmath>
+#include <float.h>
+#include <iomanip>
+#include <limits.h>
+#include <map>
+#include <memory>
+#include <string.h>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/geometry.h>
+#include <libcamera/orientation.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/transform.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/sysfs.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+namespace libcamera {
+
+class BayerFormat;
+class CameraLens;
+class MediaEntity;
+class SensorConfiguration;
+
+struct CameraSensorProperties;
+
+enum class Orientation;
+
+LOG_DECLARE_CATEGORY(CameraSensor)
+
+class CameraSensorLegacy : public CameraSensor, protected Loggable
+{
+public:
+ CameraSensorLegacy(const MediaEntity *entity);
+ ~CameraSensorLegacy();
+
+ static std::variant<std::unique_ptr<CameraSensor>, int>
+ match(MediaEntity *entity);
+
+ const std::string &model() const override { return model_; }
+ const std::string &id() const override { return id_; }
+
+ const MediaEntity *entity() const override { return entity_; }
+ V4L2Subdevice *device() override { return subdev_.get(); }
+
+ CameraLens *focusLens() override { return focusLens_.get(); }
+
+ const std::vector<unsigned int> &mbusCodes() const override { return mbusCodes_; }
+ std::vector<Size> sizes(unsigned int mbusCode) const override;
+ Size resolution() const override;
+
+ V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size,
+ const Size maxSize) const override;
+ int setFormat(V4L2SubdeviceFormat *format,
+ Transform transform = Transform::Identity) override;
+ int tryFormat(V4L2SubdeviceFormat *format) const override;
+
+ int applyConfiguration(const SensorConfiguration &config,
+ Transform transform = Transform::Identity,
+ V4L2SubdeviceFormat *sensorFormat = nullptr) override;
+
+ const ControlList &properties() const override { return properties_; }
+ int sensorInfo(IPACameraSensorInfo *info) const override;
+ Transform computeTransform(Orientation *orientation) const override;
+ BayerFormat::Order bayerOrder(Transform t) const override;
+
+ const ControlInfoMap &controls() const override;
+ ControlList getControls(const std::vector<uint32_t> &ids) override;
+ int setControls(ControlList *ctrls) override;
+
+ const std::vector<controls::draft::TestPatternModeEnum> &
+ testPatternModes() const override { return testPatternModes_; }
+ int setTestPatternMode(controls::draft::TestPatternModeEnum mode) override;
+ const CameraSensorProperties::SensorDelays &sensorDelays() override;
+
+protected:
+ std::string logPrefix() const override;
+
+private:
+ LIBCAMERA_DISABLE_COPY(CameraSensorLegacy)
+
+ int init();
+ int generateId();
+ int validateSensorDriver();
+ void initVimcDefaultProperties();
+ void initStaticProperties();
+ void initTestPatternModes();
+ int initProperties();
+ int applyTestPatternMode(controls::draft::TestPatternModeEnum mode);
+ int discoverAncillaryDevices();
+
+ const MediaEntity *entity_;
+ std::unique_ptr<V4L2Subdevice> subdev_;
+ unsigned int pad_;
+
+ const CameraSensorProperties *staticProps_;
+
+ std::string model_;
+ std::string id_;
+
+ V4L2Subdevice::Formats formats_;
+ std::vector<unsigned int> mbusCodes_;
+ std::vector<Size> sizes_;
+ std::vector<controls::draft::TestPatternModeEnum> testPatternModes_;
+ controls::draft::TestPatternModeEnum testPatternMode_;
+
+ Size pixelArraySize_;
+ Rectangle activeArea_;
+ const BayerFormat *bayerFormat_;
+ bool supportFlips_;
+ bool flipsAlterBayerOrder_;
+ Orientation mountingOrientation_;
+
+ ControlList properties_;
+
+ std::unique_ptr<CameraLens> focusLens_;
+};
+
+/**
+ * \class CameraSensorLegacy
+ * \brief A camera sensor based on V4L2 subdevices
+ *
+ * The implementation is currently limited to sensors that expose a single V4L2
+ * subdevice with a single pad. It will be extended to support more complex
+ * devices as the needs arise.
+ */
+
+CameraSensorLegacy::CameraSensorLegacy(const MediaEntity *entity)
+ : entity_(entity), pad_(UINT_MAX), staticProps_(nullptr),
+ bayerFormat_(nullptr), supportFlips_(false),
+ flipsAlterBayerOrder_(false), properties_(properties::properties)
+{
+}
+
+CameraSensorLegacy::~CameraSensorLegacy() = default;
+
+std::variant<std::unique_ptr<CameraSensor>, int>
+CameraSensorLegacy::match(MediaEntity *entity)
+{
+ std::unique_ptr<CameraSensorLegacy> sensor =
+ std::make_unique<CameraSensorLegacy>(entity);
+
+ int ret = sensor->init();
+ if (ret)
+ return { ret };
+
+ return { std::move(sensor) };
+}
+
+int CameraSensorLegacy::init()
+{
+ for (const MediaPad *pad : entity_->pads()) {
+ if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
+ pad_ = pad->index();
+ break;
+ }
+ }
+
+ if (pad_ == UINT_MAX) {
+ LOG(CameraSensor, Error)
+ << "Sensors with more than one pad are not supported";
+ return -EINVAL;
+ }
+
+ switch (entity_->function()) {
+ case MEDIA_ENT_F_CAM_SENSOR:
+ case MEDIA_ENT_F_PROC_VIDEO_ISP:
+ break;
+
+ default:
+ LOG(CameraSensor, Error)
+ << "Invalid sensor function "
+ << utils::hex(entity_->function());
+ return -EINVAL;
+ }
+
+ /* Create and open the subdev. */
+ subdev_ = std::make_unique<V4L2Subdevice>(entity_);
+ int ret = subdev_->open();
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Clear any flips to be sure we get the "native" Bayer order. This is
+ * harmless for sensors where the flips don't affect the Bayer order.
+ */
+ ControlList ctrls(subdev_->controls());
+ if (subdev_->controls().find(V4L2_CID_HFLIP) != subdev_->controls().end())
+ ctrls.set(V4L2_CID_HFLIP, 0);
+ if (subdev_->controls().find(V4L2_CID_VFLIP) != subdev_->controls().end())
+ ctrls.set(V4L2_CID_VFLIP, 0);
+ subdev_->setControls(&ctrls);
+
+ /* Enumerate, sort and cache media bus codes and sizes. */
+ formats_ = subdev_->formats(pad_);
+ if (formats_.empty()) {
+ LOG(CameraSensor, Error) << "No image format found";
+ return -EINVAL;
+ }
+
+ mbusCodes_ = utils::map_keys(formats_);
+ std::sort(mbusCodes_.begin(), mbusCodes_.end());
+
+ for (const auto &format : formats_) {
+ const std::vector<SizeRange> &ranges = format.second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes_),
+ [](const SizeRange &range) { return range.max; });
+ }
+
+ std::sort(sizes_.begin(), sizes_.end());
+
+ /* Remove duplicates. */
+ auto last = std::unique(sizes_.begin(), sizes_.end());
+ sizes_.erase(last, sizes_.end());
+
+ /*
+ * VIMC is a bit special, as it does not yet support all the mandatory
+ * requirements regular sensors have to respect.
+ *
+ * Do not validate the driver if it's VIMC and initialize the sensor
+ * properties with static information.
+ *
+ * \todo Remove the special case once the VIMC driver has been
+ * updated in all test platforms.
+ */
+ if (entity_->device()->driver() == "vimc") {
+ initVimcDefaultProperties();
+
+ ret = initProperties();
+ if (ret)
+ return ret;
+
+ return discoverAncillaryDevices();
+ }
+
+ /* Get the color filter array pattern (only for RAW sensors). */
+ for (unsigned int mbusCode : mbusCodes_) {
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(mbusCode);
+ if (bayerFormat.isValid()) {
+ bayerFormat_ = &bayerFormat;
+ break;
+ }
+ }
+
+ ret = validateSensorDriver();
+ if (ret)
+ return ret;
+
+ ret = initProperties();
+ if (ret)
+ return ret;
+
+ ret = discoverAncillaryDevices();
+ if (ret)
+ return ret;
+
+ /*
+ * Set HBLANK to the minimum to start with a well-defined line length,
+ * allowing IPA modules that do not modify HBLANK to use the sensor
+ * minimum line length in their calculations.
+ */
+ const struct v4l2_query_ext_ctrl *hblankInfo = subdev_->controlInfo(V4L2_CID_HBLANK);
+ if (hblankInfo && !(hblankInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ ControlList ctrl(subdev_->controls());
+
+ ctrl.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblankInfo->minimum));
+ ret = subdev_->setControls(&ctrl);
+ if (ret)
+ return ret;
+ }
+
+ return applyTestPatternMode(controls::draft::TestPatternModeEnum::TestPatternModeOff);
+}
+
+int CameraSensorLegacy::generateId()
+{
+ const std::string devPath = subdev_->devicePath();
+
+ /* Try to get ID from firmware description. */
+ id_ = sysfs::firmwareNodePath(devPath);
+ if (!id_.empty())
+ return 0;
+
+ /*
+ * Virtual sensors not described in firmware
+ *
+ * Verify it's a platform device and construct ID from the device path
+ * and model of sensor.
+ */
+ if (devPath.find("/sys/devices/platform/", 0) == 0) {
+ id_ = devPath.substr(strlen("/sys/devices/")) + " " + model();
+ return 0;
+ }
+
+ LOG(CameraSensor, Error) << "Can't generate sensor ID";
+ return -EINVAL;
+}
+
+int CameraSensorLegacy::validateSensorDriver()
+{
+ int err = 0;
+
+ /*
+ * Optional controls are used to register optional sensor properties. If
+ * not present, some values will be defaulted.
+ */
+ static constexpr uint32_t optionalControls[] = {
+ V4L2_CID_CAMERA_SENSOR_ROTATION,
+ };
+
+ const ControlIdMap &controls = subdev_->controls().idmap();
+ for (uint32_t ctrl : optionalControls) {
+ if (!controls.count(ctrl))
+ LOG(CameraSensor, Debug)
+ << "Optional V4L2 control " << utils::hex(ctrl)
+ << " not supported";
+ }
+
+ /*
+ * Recommended controls are similar to optional controls, but will
+ * become mandatory in the near future. Be loud if they're missing.
+ */
+ static constexpr uint32_t recommendedControls[] = {
+ V4L2_CID_CAMERA_ORIENTATION,
+ };
+
+ for (uint32_t ctrl : recommendedControls) {
+ if (!controls.count(ctrl)) {
+ LOG(CameraSensor, Warning)
+ << "Recommended V4L2 control " << utils::hex(ctrl)
+ << " not supported";
+ err = -EINVAL;
+ }
+ }
+
+ /*
+ * Verify if sensor supports horizontal/vertical flips
+ *
+ * \todo Handle horizontal and vertical flips independently.
+ */
+ const struct v4l2_query_ext_ctrl *hflipInfo = subdev_->controlInfo(V4L2_CID_HFLIP);
+ const struct v4l2_query_ext_ctrl *vflipInfo = subdev_->controlInfo(V4L2_CID_VFLIP);
+ if (hflipInfo && !(hflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY) &&
+ vflipInfo && !(vflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ supportFlips_ = true;
+
+ if (hflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT ||
+ vflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT)
+ flipsAlterBayerOrder_ = true;
+ }
+
+ if (!supportFlips_)
+ LOG(CameraSensor, Debug)
+ << "Camera sensor does not support horizontal/vertical flip";
+
+ /*
+ * Make sure the required selection targets are supported.
+ *
+ * Failures in reading any of the targets are not deemed to be fatal,
+ * but some properties and features, like constructing a
+ * IPACameraSensorInfo for the IPA module, won't be supported.
+ *
+ * \todo Make support for selection targets mandatory as soon as all
+ * test platforms have been updated.
+ */
+ Rectangle rect;
+ int ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP_BOUNDS, &rect);
+ if (ret) {
+ /*
+ * Default the pixel array size to the largest size supported
+ * by the sensor. The sizes_ vector is sorted in ascending
+ * order, the largest size is thus the last element.
+ */
+ pixelArraySize_ = sizes_.back();
+
+ LOG(CameraSensor, Warning)
+ << "The PixelArraySize property has been defaulted to "
+ << pixelArraySize_;
+ err = -EINVAL;
+ } else {
+ pixelArraySize_ = rect.size();
+ }
+
+ ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP_DEFAULT, &activeArea_);
+ if (ret) {
+ activeArea_ = Rectangle(pixelArraySize_);
+ LOG(CameraSensor, Warning)
+ << "The PixelArrayActiveAreas property has been defaulted to "
+ << activeArea_;
+ err = -EINVAL;
+ }
+
+ ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP, &rect);
+ if (ret) {
+ LOG(CameraSensor, Warning)
+ << "Failed to retrieve the sensor crop rectangle";
+ err = -EINVAL;
+ }
+
+ if (err) {
+ LOG(CameraSensor, Warning)
+ << "The sensor kernel driver needs to be fixed";
+ LOG(CameraSensor, Warning)
+ << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information";
+ }
+
+ if (!bayerFormat_)
+ return 0;
+
+ /*
+ * For raw sensors, make sure the sensor driver supports the controls
+ * required by the CameraSensor class.
+ */
+ static constexpr uint32_t mandatoryControls[] = {
+ V4L2_CID_ANALOGUE_GAIN,
+ V4L2_CID_EXPOSURE,
+ V4L2_CID_HBLANK,
+ V4L2_CID_PIXEL_RATE,
+ V4L2_CID_VBLANK,
+ };
+
+ err = 0;
+ for (uint32_t ctrl : mandatoryControls) {
+ if (!controls.count(ctrl)) {
+ LOG(CameraSensor, Error)
+ << "Mandatory V4L2 control " << utils::hex(ctrl)
+ << " not available";
+ err = -EINVAL;
+ }
+ }
+
+ if (err) {
+ LOG(CameraSensor, Error)
+ << "The sensor kernel driver needs to be fixed";
+ LOG(CameraSensor, Error)
+ << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information";
+ return err;
+ }
+
+ return 0;
+}
+
+void CameraSensorLegacy::initVimcDefaultProperties()
+{
+ /* Use the largest supported size. */
+ pixelArraySize_ = sizes_.back();
+ activeArea_ = Rectangle(pixelArraySize_);
+}
+
+void CameraSensorLegacy::initStaticProperties()
+{
+ staticProps_ = CameraSensorProperties::get(model_);
+ if (!staticProps_)
+ return;
+
+ /* Register the properties retrieved from the sensor database. */
+ properties_.set(properties::UnitCellSize, staticProps_->unitCellSize);
+
+ initTestPatternModes();
+}
+
+const CameraSensorProperties::SensorDelays &CameraSensorLegacy::sensorDelays()
+{
+ static constexpr CameraSensorProperties::SensorDelays defaultSensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 1,
+ .vblankDelay = 2,
+ .hblankDelay = 2,
+ };
+
+ if (!staticProps_ ||
+ (!staticProps_->sensorDelays.exposureDelay &&
+ !staticProps_->sensorDelays.gainDelay &&
+ !staticProps_->sensorDelays.vblankDelay &&
+ !staticProps_->sensorDelays.hblankDelay)) {
+ LOG(CameraSensor, Warning)
+ << "No sensor delays found in static properties. "
+ "Assuming unverified defaults.";
+
+ return defaultSensorDelays;
+ }
+
+ return staticProps_->sensorDelays;
+}
+
+void CameraSensorLegacy::initTestPatternModes()
+{
+ const auto &v4l2TestPattern = controls().find(V4L2_CID_TEST_PATTERN);
+ if (v4l2TestPattern == controls().end()) {
+ LOG(CameraSensor, Debug) << "V4L2_CID_TEST_PATTERN is not supported";
+ return;
+ }
+
+ const auto &testPatternModes = staticProps_->testPatternModes;
+ if (testPatternModes.empty()) {
+ /*
+ * The camera sensor supports test patterns but we don't know
+ * how to map them so this should be fixed.
+ */
+ LOG(CameraSensor, Debug) << "No static test pattern map for \'"
+ << model() << "\'";
+ return;
+ }
+
+ /*
+ * Create a map that associates the V4L2 control index to the test
+ * pattern mode by reversing the testPatternModes map provided by the
+ * camera sensor properties. This makes it easier to verify if the
+ * control index is supported in the below for loop that creates the
+ * list of supported test patterns.
+ */
+ std::map<int32_t, controls::draft::TestPatternModeEnum> indexToTestPatternMode;
+ for (const auto &it : testPatternModes)
+ indexToTestPatternMode[it.second] = it.first;
+
+ for (const ControlValue &value : v4l2TestPattern->second.values()) {
+ const int32_t index = value.get<int32_t>();
+
+ const auto it = indexToTestPatternMode.find(index);
+ if (it == indexToTestPatternMode.end()) {
+ LOG(CameraSensor, Debug)
+ << "Test pattern mode " << index << " ignored";
+ continue;
+ }
+
+ testPatternModes_.push_back(it->second);
+ }
+}
+
+int CameraSensorLegacy::initProperties()
+{
+ model_ = subdev_->model();
+ properties_.set(properties::Model, utils::toAscii(model_));
+
+ /* Generate a unique ID for the sensor. */
+ int ret = generateId();
+ if (ret)
+ return ret;
+
+ /* Initialize the static properties from the sensor database. */
+ initStaticProperties();
+
+ /* Retrieve and register properties from the kernel interface. */
+ const ControlInfoMap &controls = subdev_->controls();
+
+ const auto &orientation = controls.find(V4L2_CID_CAMERA_ORIENTATION);
+ if (orientation != controls.end()) {
+ int32_t v4l2Orientation = orientation->second.def().get<int32_t>();
+ int32_t propertyValue;
+
+ switch (v4l2Orientation) {
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported camera location "
+ << v4l2Orientation << ", setting to External";
+ [[fallthrough]];
+ case V4L2_CAMERA_ORIENTATION_EXTERNAL:
+ propertyValue = properties::CameraLocationExternal;
+ break;
+ case V4L2_CAMERA_ORIENTATION_FRONT:
+ propertyValue = properties::CameraLocationFront;
+ break;
+ case V4L2_CAMERA_ORIENTATION_BACK:
+ propertyValue = properties::CameraLocationBack;
+ break;
+ }
+ properties_.set(properties::Location, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning) << "Failed to retrieve the camera location";
+ }
+
+ const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION);
+ if (rotationControl != controls.end()) {
+ int32_t propertyValue = rotationControl->second.def().get<int32_t>();
+
+ /*
+ * Cache the Transform associated with the camera mounting
+ * rotation for later use in computeTransform().
+ */
+ bool success;
+ mountingOrientation_ = orientationFromRotation(propertyValue, &success);
+ if (!success) {
+ LOG(CameraSensor, Warning)
+ << "Invalid rotation of " << propertyValue
+ << " degrees - ignoring";
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
+ properties_.set(properties::Rotation, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning)
+ << "Rotation control not available, default to 0 degrees";
+ properties_.set(properties::Rotation, 0);
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
+ properties_.set(properties::PixelArraySize, pixelArraySize_);
+ properties_.set(properties::PixelArrayActiveAreas, { activeArea_ });
+
+ /* Color filter array pattern, register only for RAW sensors. */
+ if (bayerFormat_) {
+ int32_t cfa;
+ switch (bayerFormat_->order) {
+ case BayerFormat::BGGR:
+ cfa = properties::draft::BGGR;
+ break;
+ case BayerFormat::GBRG:
+ cfa = properties::draft::GBRG;
+ break;
+ case BayerFormat::GRBG:
+ cfa = properties::draft::GRBG;
+ break;
+ case BayerFormat::RGGB:
+ cfa = properties::draft::RGGB;
+ break;
+ case BayerFormat::MONO:
+ cfa = properties::draft::MONO;
+ break;
+ }
+
+ properties_.set(properties::draft::ColorFilterArrangement, cfa);
+ }
+
+ return 0;
+}
+
+int CameraSensorLegacy::discoverAncillaryDevices()
+{
+ int ret;
+
+ for (MediaEntity *ancillary : entity_->ancillaryEntities()) {
+ switch (ancillary->function()) {
+ case MEDIA_ENT_F_LENS:
+ focusLens_ = std::make_unique<CameraLens>(ancillary);
+ ret = focusLens_->init();
+ if (ret) {
+ LOG(CameraSensor, Error)
+ << "Lens initialisation failed, lens disabled";
+ focusLens_.reset();
+ }
+ break;
+
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported ancillary entity function "
+ << ancillary->function();
+ break;
+ }
+ }
+
+ return 0;
+}
+
+std::vector<Size> CameraSensorLegacy::sizes(unsigned int mbusCode) const
+{
+ std::vector<Size> sizes;
+
+ const auto &format = formats_.find(mbusCode);
+ if (format == formats_.end())
+ return sizes;
+
+ const std::vector<SizeRange> &ranges = format->second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+
+ std::sort(sizes.begin(), sizes.end());
+
+ return sizes;
+}
+
+Size CameraSensorLegacy::resolution() const
+{
+ return std::min(sizes_.back(), activeArea_.size());
+}
+
+V4L2SubdeviceFormat
+CameraSensorLegacy::getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size, Size maxSize) const
+{
+ unsigned int desiredArea = size.width * size.height;
+ unsigned int bestArea = UINT_MAX;
+ float desiredRatio = static_cast<float>(size.width) / size.height;
+ float bestRatio = FLT_MAX;
+ const Size *bestSize = nullptr;
+ uint32_t bestCode = 0;
+
+ for (unsigned int code : mbusCodes) {
+ const auto formats = formats_.find(code);
+ if (formats == formats_.end())
+ continue;
+
+ for (const SizeRange &range : formats->second) {
+ const Size &sz = range.max;
+
+ if (!maxSize.isNull() &&
+ (sz.width > maxSize.width || sz.height > maxSize.height))
+ continue;
+
+ if (sz.width < size.width || sz.height < size.height)
+ continue;
+
+ float ratio = static_cast<float>(sz.width) / sz.height;
+ float ratioDiff = std::abs(ratio - desiredRatio);
+ unsigned int area = sz.width * sz.height;
+ unsigned int areaDiff = area - desiredArea;
+
+ if (ratioDiff > bestRatio)
+ continue;
+
+ if (ratioDiff < bestRatio || areaDiff < bestArea) {
+ bestRatio = ratioDiff;
+ bestArea = areaDiff;
+ bestSize = &sz;
+ bestCode = code;
+ }
+ }
+ }
+
+ if (!bestSize) {
+ LOG(CameraSensor, Debug) << "No supported format or size found";
+ return {};
+ }
+
+ V4L2SubdeviceFormat format{
+ .code = bestCode,
+ .size = *bestSize,
+ .colorSpace = ColorSpace::Raw,
+ };
+
+ return format;
+}
+
+int CameraSensorLegacy::setFormat(V4L2SubdeviceFormat *format, Transform transform)
+{
+ /* Configure flips if the sensor supports that. */
+ if (supportFlips_) {
+ ControlList flipCtrls(subdev_->controls());
+
+ flipCtrls.set(V4L2_CID_HFLIP,
+ static_cast<int32_t>(!!(transform & Transform::HFlip)));
+ flipCtrls.set(V4L2_CID_VFLIP,
+ static_cast<int32_t>(!!(transform & Transform::VFlip)));
+
+ int ret = subdev_->setControls(&flipCtrls);
+ if (ret)
+ return ret;
+ }
+
+ /* Apply format on the subdev. */
+ int ret = subdev_->setFormat(pad_, format);
+ if (ret)
+ return ret;
+
+ subdev_->updateControlInfo();
+ return 0;
+}
+
+int CameraSensorLegacy::tryFormat(V4L2SubdeviceFormat *format) const
+{
+ return subdev_->setFormat(pad_, format,
+ V4L2Subdevice::Whence::TryFormat);
+}
+
+int CameraSensorLegacy::applyConfiguration(const SensorConfiguration &config,
+ Transform transform,
+ V4L2SubdeviceFormat *sensorFormat)
+{
+ if (!config.isValid()) {
+ LOG(CameraSensor, Error) << "Invalid sensor configuration";
+ return -EINVAL;
+ }
+
+ std::vector<unsigned int> filteredCodes;
+ std::copy_if(mbusCodes_.begin(), mbusCodes_.end(),
+ std::back_inserter(filteredCodes),
+ [&config](unsigned int mbusCode) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(mbusCode);
+ if (bayer.bitDepth == config.bitDepth)
+ return true;
+ return false;
+ });
+ if (filteredCodes.empty()) {
+ LOG(CameraSensor, Error)
+ << "Cannot find any format with bit depth "
+ << config.bitDepth;
+ return -EINVAL;
+ }
+
+ /*
+ * Compute the sensor's data frame size by applying the cropping
+ * rectangle, subsampling and output crop to the sensor's pixel array
+ * size.
+ *
+ * \todo The actual size computation is for now ignored and only the
+ * output size is considered. This implies that resolutions obtained
+ * with two different cropping/subsampling will look identical and
+ * only the first found one will be considered.
+ */
+ V4L2SubdeviceFormat subdevFormat = {};
+ for (unsigned int code : filteredCodes) {
+ for (const Size &size : sizes(code)) {
+ if (size.width != config.outputSize.width ||
+ size.height != config.outputSize.height)
+ continue;
+
+ subdevFormat.code = code;
+ subdevFormat.size = size;
+ break;
+ }
+ }
+ if (!subdevFormat.code) {
+ LOG(CameraSensor, Error) << "Invalid output size in sensor configuration";
+ return -EINVAL;
+ }
+
+ int ret = setFormat(&subdevFormat, transform);
+ if (ret)
+ return ret;
+
+ /*
+ * Return to the caller the format actually applied to the sensor.
+ * This is relevant if transform has changed the bayer pattern order.
+ */
+ if (sensorFormat)
+ *sensorFormat = subdevFormat;
+
+ /* \todo Handle AnalogCrop. Most sensors do not support set_selection */
+ /* \todo Handle scaling in the digital domain. */
+
+ return 0;
+}
+
+int CameraSensorLegacy::sensorInfo(IPACameraSensorInfo *info) const
+{
+ if (!bayerFormat_)
+ return -EINVAL;
+
+ info->model = model();
+
+ /*
+ * The active area size is a static property, while the crop
+ * rectangle needs to be re-read as it depends on the sensor
+ * configuration.
+ */
+ info->activeAreaSize = { activeArea_.width, activeArea_.height };
+
+ /*
+ * \todo Support for retreiving the crop rectangle is scheduled to
+ * become mandatory. For the time being use the default value if it has
+ * been initialized at sensor driver validation time.
+ */
+ int ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP, &info->analogCrop);
+ if (ret) {
+ info->analogCrop = activeArea_;
+ LOG(CameraSensor, Warning)
+ << "The analogue crop rectangle has been defaulted to the active area size";
+ }
+
+ /*
+ * IPACameraSensorInfo::analogCrop::x and IPACameraSensorInfo::analogCrop::y
+ * are defined relatively to the active pixel area, while V4L2's
+ * TGT_CROP target is defined in respect to the full pixel array.
+ *
+ * Compensate it by subtracting the active area offset.
+ */
+ info->analogCrop.x -= activeArea_.x;
+ info->analogCrop.y -= activeArea_.y;
+
+ /* The bit depth and image size depend on the currently applied format. */
+ V4L2SubdeviceFormat format{};
+ ret = subdev_->getFormat(pad_, &format);
+ if (ret)
+ return ret;
+ info->bitsPerPixel = MediaBusFormatInfo::info(format.code).bitsPerPixel;
+ info->outputSize = format.size;
+
+ std::optional<int32_t> cfa = properties_.get(properties::draft::ColorFilterArrangement);
+ info->cfaPattern = cfa ? *cfa : properties::draft::RGB;
+
+ /*
+ * Retrieve the pixel rate, line length and minimum/maximum frame
+ * duration through V4L2 controls. Support for the V4L2_CID_PIXEL_RATE,
+ * V4L2_CID_HBLANK and V4L2_CID_VBLANK controls is mandatory.
+ */
+ ControlList ctrls = subdev_->getControls({ V4L2_CID_PIXEL_RATE,
+ V4L2_CID_HBLANK,
+ V4L2_CID_VBLANK });
+ if (ctrls.empty()) {
+ LOG(CameraSensor, Error)
+ << "Failed to retrieve camera info controls";
+ return -EINVAL;
+ }
+
+ info->pixelRate = ctrls.get(V4L2_CID_PIXEL_RATE).get<int64_t>();
+
+ const ControlInfo hblank = ctrls.infoMap()->at(V4L2_CID_HBLANK);
+ info->minLineLength = info->outputSize.width + hblank.min().get<int32_t>();
+ info->maxLineLength = info->outputSize.width + hblank.max().get<int32_t>();
+
+ const ControlInfo vblank = ctrls.infoMap()->at(V4L2_CID_VBLANK);
+ info->minFrameLength = info->outputSize.height + vblank.min().get<int32_t>();
+ info->maxFrameLength = info->outputSize.height + vblank.max().get<int32_t>();
+
+ return 0;
+}
+
+Transform CameraSensorLegacy::computeTransform(Orientation *orientation) const
+{
+ /*
+ * If we cannot do any flips we cannot change the native camera mounting
+ * orientation.
+ */
+ if (!supportFlips_) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ /*
+ * Now compute the required transform to obtain 'orientation' starting
+ * from the mounting rotation.
+ *
+ * As a note:
+ * orientation / mountingOrientation_ = transform
+ * mountingOrientation_ * transform = orientation
+ */
+ Transform transform = *orientation / mountingOrientation_;
+
+ /*
+ * If transform contains any Transpose we cannot do it, so adjust
+ * 'orientation' to report the image native orientation and return Identity.
+ */
+ if (!!(transform & Transform::Transpose)) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ return transform;
+}
+
+BayerFormat::Order CameraSensorLegacy::bayerOrder(Transform t) const
+{
+ /* Return a defined by meaningless value for non-Bayer sensors. */
+ if (!bayerFormat_)
+ return BayerFormat::Order::BGGR;
+
+ if (!flipsAlterBayerOrder_)
+ return bayerFormat_->order;
+
+ /*
+ * Apply the transform to the native (i.e. untransformed) Bayer order,
+ * using the rest of the Bayer format supplied by the caller.
+ */
+ return bayerFormat_->transform(t).order;
+}
+
+const ControlInfoMap &CameraSensorLegacy::controls() const
+{
+ return subdev_->controls();
+}
+
+ControlList CameraSensorLegacy::getControls(const std::vector<uint32_t> &ids)
+{
+ return subdev_->getControls(ids);
+}
+
+int CameraSensorLegacy::setControls(ControlList *ctrls)
+{
+ return subdev_->setControls(ctrls);
+}
+
+int CameraSensorLegacy::setTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternMode_ == mode)
+ return 0;
+
+ if (testPatternModes_.empty()) {
+ LOG(CameraSensor, Error)
+ << "Camera sensor does not support test pattern modes.";
+ return -EINVAL;
+ }
+
+ return applyTestPatternMode(mode);
+}
+
+int CameraSensorLegacy::applyTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternModes_.empty())
+ return 0;
+
+ auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(),
+ mode);
+ if (it == testPatternModes_.end()) {
+ LOG(CameraSensor, Error) << "Unsupported test pattern mode "
+ << mode;
+ return -EINVAL;
+ }
+
+ LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode;
+
+ int32_t index = staticProps_->testPatternModes.at(mode);
+ ControlList ctrls{ controls() };
+ ctrls.set(V4L2_CID_TEST_PATTERN, index);
+
+ int ret = setControls(&ctrls);
+ if (ret)
+ return ret;
+
+ testPatternMode_ = mode;
+
+ return 0;
+}
+
+std::string CameraSensorLegacy::logPrefix() const
+{
+ return "'" + entity_->name() + "'";
+}
+
+REGISTER_CAMERA_SENSOR(CameraSensorLegacy, -100)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/sensor/camera_sensor_properties.cpp b/src/libcamera/sensor/camera_sensor_properties.cpp
index 6e28b09e..e2f518f9 100644
--- a/src/libcamera/sensor/camera_sensor_properties.cpp
+++ b/src/libcamera/sensor/camera_sensor_properties.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_sensor_properties.cpp - Database of camera sensor properties
+ * Database of camera sensor properties
*/
#include "libcamera/internal/camera_sensor_properties.h"
@@ -41,6 +41,35 @@ LOG_DEFINE_CATEGORY(CameraSensorProperties)
* \brief Map that associates the TestPattern control value with the indexes of
* the corresponding sensor test pattern modes as returned by
* V4L2_CID_TEST_PATTERN.
+ *
+ * \var CameraSensorProperties::sensorDelays
+ * \brief Sensor control application delays
+ *
+ * This structure may be defined as empty if the actual sensor delays are not
+ * available or have not been measured.
+ */
+
+/**
+ * \struct CameraSensorProperties::SensorDelays
+ * \brief Sensor control application delay values
+ *
+ * This structure holds delay values, expressed in number of frames, between the
+ * time a control value is applied to the sensor and the time that value is
+ * reflected in the output. For example "2 frames delay" means that parameters
+ * set during frame N will take effect for frame N+2 (and by extension a delay
+ * of 0 would mean the parameter is applied immediately to the current frame).
+ *
+ * \var CameraSensorProperties::SensorDelays::exposureDelay
+ * \brief Number of frames between application of exposure control and effect
+ *
+ * \var CameraSensorProperties::SensorDelays::gainDelay
+ * \brief Number of frames between application of analogue gain control and effect
+ *
+ * \var CameraSensorProperties::SensorDelays::vblankDelay
+ * \brief Number of frames between application of vblank control and effect
+ *
+ * \var CameraSensorProperties::SensorDelays::hblankDelay
+ * \brief Number of frames between application of hblank control and effect
*/
/**
@@ -52,6 +81,21 @@ LOG_DEFINE_CATEGORY(CameraSensorProperties)
const CameraSensorProperties *CameraSensorProperties::get(const std::string &sensor)
{
static const std::map<std::string, const CameraSensorProperties> sensorProps = {
+ { "ar0144", {
+ .unitCellSize = { 3000, 3000 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 1 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
{ "ar0521", {
.unitCellSize = { 2200, 2200 },
.testPatternModes = {
@@ -60,6 +104,33 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
{ controls::draft::TestPatternModeColorBars, 2 },
{ controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
},
+ .sensorDelays = { },
+ } },
+ { "gc05a2", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "gc08a3", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
} },
{ "hi846", {
.unitCellSize = { 1120, 1120 },
@@ -78,6 +149,18 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
* 9: "Resolution Pattern"
*/
},
+ .sensorDelays = { },
+ } },
+ { "imx214", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ },
+ .sensorDelays = { },
} },
{ "imx219", {
.unitCellSize = { 1120, 1120 },
@@ -88,6 +171,12 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
{ controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
{ controls::draft::TestPatternModePn9, 4 },
},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 1,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
} },
{ "imx258", {
.unitCellSize = { 1120, 1120 },
@@ -98,22 +187,72 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
{ controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
{ controls::draft::TestPatternModePn9, 4 },
},
+ .sensorDelays = { },
+ } },
+ { "imx283", {
+ .unitCellSize = { 2400, 2400 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
} },
{ "imx290", {
.unitCellSize = { 2900, 2900 },
.testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
} },
{ "imx296", {
.unitCellSize = { 3450, 3450 },
.testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
} },
{ "imx327", {
.unitCellSize = { 2900, 2900 },
.testPatternModes = {},
+ .sensorDelays = { },
+ } },
+ { "imx335", {
+ .unitCellSize = { 2000, 2000 },
+ .testPatternModes = {},
+ .sensorDelays = { },
+ } },
+ { "imx415", {
+ .unitCellSize = { 1450, 1450 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "imx462", {
+ .unitCellSize = { 2900, 2900 },
+ .testPatternModes = {},
+ .sensorDelays = { },
} },
{ "imx477", {
.unitCellSize = { 1550, 1550 },
.testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 3,
+ .hblankDelay = 3
+ },
} },
{ "imx519", {
.unitCellSize = { 1220, 1220 },
@@ -126,6 +265,12 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
* these two patterns do not comply with MIPI CCS v1.1 (Section 10.1).
*/
},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 3,
+ .hblankDelay = 3
+ },
} },
{ "imx708", {
.unitCellSize = { 1400, 1400 },
@@ -136,6 +281,12 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
{ controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
{ controls::draft::TestPatternModePn9, 4 },
},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 3,
+ .hblankDelay = 3
+ },
} },
{ "ov2685", {
.unitCellSize = { 1750, 1750 },
@@ -150,6 +301,7 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
* 5: "Color Square"
*/
},
+ .sensorDelays = { },
} },
{ "ov2740", {
.unitCellSize = { 1400, 1400 },
@@ -157,6 +309,7 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 1},
},
+ .sensorDelays = { },
} },
{ "ov4689", {
.unitCellSize = { 2000, 2000 },
@@ -170,6 +323,7 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
* colorBarType2 and colorBarType3.
*/
},
+ .sensorDelays = { },
} },
{ "ov5640", {
.unitCellSize = { 1400, 1400 },
@@ -177,10 +331,25 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 1 },
},
+ .sensorDelays = { },
} },
{ "ov5647", {
.unitCellSize = { 1400, 1400 },
.testPatternModes = {},
+ /*
+ * We run this sensor in a mode where the gain delay is
+ * bumped up to 2. It seems to be the only way to make
+ * the delays "predictable".
+ *
+ * \todo Verify these delays properly, as the upstream
+ * driver appears to configure _no_ delay.
+ */
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
} },
{ "ov5670", {
.unitCellSize = { 1120, 1120 },
@@ -188,6 +357,7 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 1 },
},
+ .sensorDelays = { },
} },
{ "ov5675", {
.unitCellSize = { 1120, 1120 },
@@ -195,6 +365,7 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 1 },
},
+ .sensorDelays = { },
} },
{ "ov5693", {
.unitCellSize = { 1400, 1400 },
@@ -207,6 +378,7 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
* Rolling Bar".
*/
},
+ .sensorDelays = { },
} },
{ "ov64a40", {
.unitCellSize = { 1008, 1008 },
@@ -220,6 +392,22 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
* 4: "Vertical Color Bar Type 4"
*/
},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "ov7251", {
+ .unitCellSize = { 3000, 3000 },
+ .testPatternModes = { },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
} },
{ "ov8858", {
.unitCellSize = { 1120, 1120 },
@@ -233,6 +421,7 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
* 4: "Vertical Color Bar Type 4"
*/
},
+ .sensorDelays = { },
} },
{ "ov8865", {
.unitCellSize = { 1400, 1400 },
@@ -247,6 +436,17 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
* 5: "Color squares with rolling bar"
*/
},
+ .sensorDelays = { },
+ } },
+ { "ov9281", {
+ .unitCellSize = { 3000, 3000 },
+ .testPatternModes = { },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
} },
{ "ov13858", {
.unitCellSize = { 1120, 1120 },
@@ -254,6 +454,7 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 1 },
},
+ .sensorDelays = { },
} },
};
diff --git a/src/libcamera/sensor/camera_sensor_raw.cpp b/src/libcamera/sensor/camera_sensor_raw.cpp
new file mode 100644
index 00000000..ab75b1f8
--- /dev/null
+++ b/src/libcamera/sensor/camera_sensor_raw.cpp
@@ -0,0 +1,1157 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy.
+ *
+ * camera_sensor_raw.cpp - A raw camera sensor using the V4L2 streams API
+ */
+
+#include <algorithm>
+#include <cmath>
+#include <float.h>
+#include <iomanip>
+#include <limits.h>
+#include <map>
+#include <memory>
+#include <optional>
+#include <string.h>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/geometry.h>
+#include <libcamera/orientation.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/transform.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/sysfs.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+namespace libcamera {
+
+class BayerFormat;
+class CameraLens;
+class MediaEntity;
+class SensorConfiguration;
+
+struct CameraSensorProperties;
+
+enum class Orientation;
+
+LOG_DECLARE_CATEGORY(CameraSensor)
+
+class CameraSensorRaw : public CameraSensor, protected Loggable
+{
+public:
+ CameraSensorRaw(const MediaEntity *entity);
+ ~CameraSensorRaw();
+
+ static std::variant<std::unique_ptr<CameraSensor>, int>
+ match(MediaEntity *entity);
+
+ const std::string &model() const override { return model_; }
+ const std::string &id() const override { return id_; }
+
+ const MediaEntity *entity() const override { return entity_; }
+ V4L2Subdevice *device() override { return subdev_.get(); }
+
+ CameraLens *focusLens() override { return focusLens_.get(); }
+
+ const std::vector<unsigned int> &mbusCodes() const override { return mbusCodes_; }
+ std::vector<Size> sizes(unsigned int mbusCode) const override;
+ Size resolution() const override;
+
+ V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size,
+ const Size maxSize) const override;
+ int setFormat(V4L2SubdeviceFormat *format,
+ Transform transform = Transform::Identity) override;
+ int tryFormat(V4L2SubdeviceFormat *format) const override;
+
+ int applyConfiguration(const SensorConfiguration &config,
+ Transform transform = Transform::Identity,
+ V4L2SubdeviceFormat *sensorFormat = nullptr) override;
+
+ V4L2Subdevice::Stream imageStream() const override;
+ std::optional<V4L2Subdevice::Stream> embeddedDataStream() const override;
+ V4L2SubdeviceFormat embeddedDataFormat() const override;
+ int setEmbeddedDataEnabled(bool enable) override;
+
+ const ControlList &properties() const override { return properties_; }
+ int sensorInfo(IPACameraSensorInfo *info) const override;
+ Transform computeTransform(Orientation *orientation) const override;
+ BayerFormat::Order bayerOrder(Transform t) const override;
+
+ const ControlInfoMap &controls() const override;
+ ControlList getControls(const std::vector<uint32_t> &ids) override;
+ int setControls(ControlList *ctrls) override;
+
+ const std::vector<controls::draft::TestPatternModeEnum> &
+ testPatternModes() const override { return testPatternModes_; }
+ int setTestPatternMode(controls::draft::TestPatternModeEnum mode) override;
+ const CameraSensorProperties::SensorDelays &sensorDelays() override;
+
+protected:
+ std::string logPrefix() const override;
+
+private:
+ LIBCAMERA_DISABLE_COPY(CameraSensorRaw)
+
+ std::optional<int> init();
+ int initProperties();
+ void initStaticProperties();
+ void initTestPatternModes();
+ int applyTestPatternMode(controls::draft::TestPatternModeEnum mode);
+
+ const MediaEntity *entity_;
+ std::unique_ptr<V4L2Subdevice> subdev_;
+
+ struct Streams {
+ V4L2Subdevice::Stream sink;
+ V4L2Subdevice::Stream source;
+ };
+
+ struct {
+ Streams image;
+ std::optional<Streams> edata;
+ } streams_;
+
+ const CameraSensorProperties *staticProps_;
+
+ std::string model_;
+ std::string id_;
+
+ V4L2Subdevice::Formats formats_;
+ std::vector<unsigned int> mbusCodes_;
+ std::vector<Size> sizes_;
+ std::vector<controls::draft::TestPatternModeEnum> testPatternModes_;
+ controls::draft::TestPatternModeEnum testPatternMode_;
+
+ Size pixelArraySize_;
+ Rectangle activeArea_;
+ BayerFormat::Order cfaPattern_;
+ bool supportFlips_;
+ bool flipsAlterBayerOrder_;
+ Orientation mountingOrientation_;
+
+ ControlList properties_;
+
+ std::unique_ptr<CameraLens> focusLens_;
+};
+
+/**
+ * \class CameraSensorRaw
+ * \brief A camera sensor based on V4L2 subdevices
+ *
+ * This class supports single-subdev sensors with a single source pad and one
+ * or two internal sink pads (for the image and embedded data streams).
+ */
+
+CameraSensorRaw::CameraSensorRaw(const MediaEntity *entity)
+ : entity_(entity), staticProps_(nullptr), supportFlips_(false),
+ flipsAlterBayerOrder_(false), properties_(properties::properties)
+{
+}
+
+CameraSensorRaw::~CameraSensorRaw() = default;
+
+std::variant<std::unique_ptr<CameraSensor>, int>
+CameraSensorRaw::match(MediaEntity *entity)
+{
+ /* Check the entity type. */
+ if (entity->type() != MediaEntity::Type::V4L2Subdevice ||
+ entity->function() != MEDIA_ENT_F_CAM_SENSOR) {
+ libcamera::LOG(CameraSensor, Debug)
+ << entity->name() << ": unsupported entity type ("
+ << utils::to_underlying(entity->type())
+ << ") or function (" << utils::hex(entity->function()) << ")";
+ return { 0 };
+ }
+
+ /* Count and check the number of pads. */
+ static constexpr uint32_t kPadFlagsMask = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_SOURCE
+ | MEDIA_PAD_FL_INTERNAL;
+ unsigned int numSinks = 0;
+ unsigned int numSources = 0;
+
+ for (const MediaPad *pad : entity->pads()) {
+ switch (pad->flags() & kPadFlagsMask) {
+ case MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_INTERNAL:
+ numSinks++;
+ break;
+
+ case MEDIA_PAD_FL_SOURCE:
+ numSources++;
+ break;
+
+ default:
+ libcamera::LOG(CameraSensor, Debug)
+ << entity->name() << ": unsupported pad " << pad->index()
+ << " type " << utils::hex(pad->flags());
+ return { 0 };
+ }
+ }
+
+ if (numSinks < 1 || numSinks > 2 || numSources != 1) {
+ libcamera::LOG(CameraSensor, Debug)
+ << entity->name() << ": unsupported number of sinks ("
+ << numSinks << ") or sources (" << numSources << ")";
+ return { 0 };
+ }
+
+ /*
+ * The entity matches. Create the camera sensor and initialize it. The
+ * init() function will perform further match checks.
+ */
+ std::unique_ptr<CameraSensorRaw> sensor =
+ std::make_unique<CameraSensorRaw>(entity);
+
+ std::optional<int> err = sensor->init();
+ if (err)
+ return { *err };
+
+ return { std::move(sensor) };
+}
+
+std::optional<int> CameraSensorRaw::init()
+{
+ /* Create and open the subdev. */
+ subdev_ = std::make_unique<V4L2Subdevice>(entity_);
+ int ret = subdev_->open();
+ if (ret)
+ return { ret };
+
+ /*
+ * 1. Identify the pads.
+ */
+
+ /*
+ * First locate the source pad. The match() function guarantees there
+ * is one and only one source pad.
+ */
+ unsigned int sourcePad = UINT_MAX;
+
+ for (const MediaPad *pad : entity_->pads()) {
+ if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
+ sourcePad = pad->index();
+ break;
+ }
+ }
+
+ /*
+ * Iterate over the routes to identify the streams on the source pad,
+ * and the internal sink pads.
+ */
+ V4L2Subdevice::Routing routing = {};
+ ret = subdev_->getRouting(&routing, V4L2Subdevice::TryFormat);
+ if (ret)
+ return { ret };
+
+ bool imageStreamFound = false;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (route.source.pad != sourcePad) {
+ LOG(CameraSensor, Error) << "Invalid route " << route;
+ return { -EINVAL };
+ }
+
+ /* Identify the stream type based on the supported formats. */
+ V4L2Subdevice::Formats formats = subdev_->formats(route.source);
+
+ std::optional<MediaBusFormatInfo::Type> type;
+
+ for (const auto &[code, sizes] : formats) {
+ const MediaBusFormatInfo &info =
+ MediaBusFormatInfo::info(code);
+ if (info.isValid()) {
+ type = info.type;
+ break;
+ }
+ }
+
+ if (!type) {
+ LOG(CameraSensor, Warning)
+ << "No known format on pad " << route.source;
+ continue;
+ }
+
+ switch (*type) {
+ case MediaBusFormatInfo::Type::Image:
+ if (imageStreamFound) {
+ LOG(CameraSensor, Error)
+ << "Multiple internal image streams ("
+ << streams_.image.sink << " and "
+ << route.sink << ")";
+ return { -EINVAL };
+ }
+
+ imageStreamFound = true;
+ streams_.image.sink = route.sink;
+ streams_.image.source = route.source;
+ break;
+
+ case MediaBusFormatInfo::Type::Metadata:
+ /*
+ * Skip metadata streams that are not sensor embedded
+ * data. The source stream reports a generic metadata
+ * format, check the sink stream for the exact format.
+ */
+ formats = subdev_->formats(route.sink);
+ if (formats.size() != 1)
+ continue;
+
+ if (MediaBusFormatInfo::info(formats.cbegin()->first).type !=
+ MediaBusFormatInfo::Type::EmbeddedData)
+ continue;
+
+ if (streams_.edata) {
+ LOG(CameraSensor, Error)
+ << "Multiple internal embedded data streams ("
+ << streams_.edata->sink << " and "
+ << route.sink << ")";
+ return { -EINVAL };
+ }
+
+ streams_.edata = { route.sink, route.source };
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (!imageStreamFound) {
+ LOG(CameraSensor, Error) << "No image stream found";
+ return { -EINVAL };
+ }
+
+ LOG(CameraSensor, Debug)
+ << "Found image stream " << streams_.image.sink
+ << " -> " << streams_.image.source;
+
+ if (streams_.edata)
+ LOG(CameraSensor, Debug)
+ << "Found embedded data stream " << streams_.edata->sink
+ << " -> " << streams_.edata->source;
+
+ /*
+ * 2. Enumerate and cache the media bus codes, sizes and colour filter
+ * array order for the image stream.
+ */
+
+ /*
+ * Get the native sensor CFA pattern. It is simpler to retrieve it from
+ * the internal image sink pad as it is guaranteed to expose a single
+ * format, and is not affected by flips.
+ */
+ V4L2Subdevice::Formats formats = subdev_->formats(streams_.image.sink);
+ if (formats.size() != 1) {
+ LOG(CameraSensor, Error)
+ << "Image pad has " << formats.size()
+ << " formats, expected 1";
+ return { -EINVAL };
+ }
+
+ uint32_t nativeFormat = formats.cbegin()->first;
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(nativeFormat);
+ if (!bayerFormat.isValid()) {
+ LOG(CameraSensor, Error)
+ << "Invalid native format " << nativeFormat;
+ return { 0 };
+ }
+
+ cfaPattern_ = bayerFormat.order;
+
+ /*
+ * Retrieve and cache the media bus codes and sizes on the source image
+ * stream.
+ */
+ formats_ = subdev_->formats(streams_.image.source);
+ if (formats_.empty()) {
+ LOG(CameraSensor, Error) << "No image format found";
+ return { -EINVAL };
+ }
+
+ /* Populate and sort the media bus codes and the sizes. */
+ for (const auto &[code, ranges] : formats_) {
+ /* Drop non-raw formats (in case we have a hybrid sensor). */
+ const MediaBusFormatInfo &info = MediaBusFormatInfo::info(code);
+ if (info.colourEncoding != PixelFormatInfo::ColourEncodingRAW)
+ continue;
+
+ mbusCodes_.push_back(code);
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes_),
+ [](const SizeRange &range) { return range.max; });
+ }
+
+ if (mbusCodes_.empty()) {
+ LOG(CameraSensor, Debug) << "No raw image formats found";
+ return { 0 };
+ }
+
+ std::sort(mbusCodes_.begin(), mbusCodes_.end());
+ std::sort(sizes_.begin(), sizes_.end());
+
+ /*
+ * Remove duplicate sizes. There are no duplicate media bus codes as
+ * they are the keys in the formats map.
+ */
+ auto last = std::unique(sizes_.begin(), sizes_.end());
+ sizes_.erase(last, sizes_.end());
+
+ /*
+ * 3. Query selection rectangles. Retrieve properties, and verify that
+ * all the expected selection rectangles are supported.
+ */
+
+ Rectangle rect;
+ ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP_BOUNDS,
+ &rect);
+ if (ret) {
+ LOG(CameraSensor, Error) << "No pixel array crop bounds";
+ return { ret };
+ }
+
+ pixelArraySize_ = rect.size();
+
+ ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP_DEFAULT,
+ &activeArea_);
+ if (ret) {
+ LOG(CameraSensor, Error) << "No pixel array crop default";
+ return { ret };
+ }
+
+ ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP,
+ &rect);
+ if (ret) {
+ LOG(CameraSensor, Error) << "No pixel array crop rectangle";
+ return { ret };
+ }
+
+ /*
+ * 4. Verify that all required controls are present.
+ */
+
+ const ControlIdMap &controls = subdev_->controls().idmap();
+
+ static constexpr uint32_t mandatoryControls[] = {
+ V4L2_CID_ANALOGUE_GAIN,
+ V4L2_CID_CAMERA_ORIENTATION,
+ V4L2_CID_EXPOSURE,
+ V4L2_CID_HBLANK,
+ V4L2_CID_PIXEL_RATE,
+ V4L2_CID_VBLANK,
+ };
+
+ ret = 0;
+
+ for (uint32_t ctrl : mandatoryControls) {
+ if (!controls.count(ctrl)) {
+ LOG(CameraSensor, Error)
+ << "Mandatory V4L2 control " << utils::hex(ctrl)
+ << " not available";
+ ret = -EINVAL;
+ }
+ }
+
+ if (ret) {
+ LOG(CameraSensor, Error)
+ << "The sensor kernel driver needs to be fixed";
+ LOG(CameraSensor, Error)
+ << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information";
+ return { ret };
+ }
+
+ /*
+ * Verify if sensor supports horizontal/vertical flips
+ *
+ * \todo Handle horizontal and vertical flips independently.
+ */
+ const struct v4l2_query_ext_ctrl *hflipInfo = subdev_->controlInfo(V4L2_CID_HFLIP);
+ const struct v4l2_query_ext_ctrl *vflipInfo = subdev_->controlInfo(V4L2_CID_VFLIP);
+ if (hflipInfo && !(hflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY) &&
+ vflipInfo && !(vflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ supportFlips_ = true;
+
+ if (hflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT ||
+ vflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT)
+ flipsAlterBayerOrder_ = true;
+ }
+
+ if (!supportFlips_)
+ LOG(CameraSensor, Debug)
+ << "Camera sensor does not support horizontal/vertical flip";
+
+ /*
+ * 5. Discover ancillary devices.
+ *
+ * \todo This code may be shared by different V4L2 sensor classes.
+ */
+ for (MediaEntity *ancillary : entity_->ancillaryEntities()) {
+ switch (ancillary->function()) {
+ case MEDIA_ENT_F_LENS:
+ focusLens_ = std::make_unique<CameraLens>(ancillary);
+ ret = focusLens_->init();
+ if (ret) {
+ LOG(CameraSensor, Error)
+ << "Lens initialisation failed, lens disabled";
+ focusLens_.reset();
+ }
+ break;
+
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported ancillary entity function "
+ << ancillary->function();
+ break;
+ }
+ }
+
+ /*
+ * 6. Initialize properties.
+ */
+
+ ret = initProperties();
+ if (ret)
+ return { ret };
+
+ /*
+ * 7. Initialize controls.
+ */
+
+ /*
+ * Set HBLANK to the minimum to start with a well-defined line length,
+ * allowing IPA modules that do not modify HBLANK to use the sensor
+ * minimum line length in their calculations.
+ */
+ const struct v4l2_query_ext_ctrl *hblankInfo = subdev_->controlInfo(V4L2_CID_HBLANK);
+ if (hblankInfo && !(hblankInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ ControlList ctrl(subdev_->controls());
+
+ ctrl.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblankInfo->minimum));
+ ret = subdev_->setControls(&ctrl);
+ if (ret)
+ return ret;
+ }
+
+ ret = applyTestPatternMode(controls::draft::TestPatternModeEnum::TestPatternModeOff);
+ if (ret)
+ return { ret };
+
+ return {};
+}
+
+int CameraSensorRaw::initProperties()
+{
+ model_ = subdev_->model();
+ properties_.set(properties::Model, utils::toAscii(model_));
+
+ /* Generate a unique ID for the sensor. */
+ id_ = sysfs::firmwareNodePath(subdev_->devicePath());
+ if (id_.empty()) {
+ LOG(CameraSensor, Error) << "Can't generate sensor ID";
+ return -EINVAL;
+ }
+
+ /* Initialize the static properties from the sensor database. */
+ initStaticProperties();
+
+ /* Retrieve and register properties from the kernel interface. */
+ const ControlInfoMap &controls = subdev_->controls();
+
+ const auto &orientation = controls.find(V4L2_CID_CAMERA_ORIENTATION);
+ if (orientation != controls.end()) {
+ int32_t v4l2Orientation = orientation->second.def().get<int32_t>();
+ int32_t propertyValue;
+
+ switch (v4l2Orientation) {
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported camera location "
+ << v4l2Orientation << ", setting to External";
+ [[fallthrough]];
+ case V4L2_CAMERA_ORIENTATION_EXTERNAL:
+ propertyValue = properties::CameraLocationExternal;
+ break;
+ case V4L2_CAMERA_ORIENTATION_FRONT:
+ propertyValue = properties::CameraLocationFront;
+ break;
+ case V4L2_CAMERA_ORIENTATION_BACK:
+ propertyValue = properties::CameraLocationBack;
+ break;
+ }
+ properties_.set(properties::Location, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning) << "Failed to retrieve the camera location";
+ }
+
+ const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION);
+ if (rotationControl != controls.end()) {
+ int32_t propertyValue = rotationControl->second.def().get<int32_t>();
+
+ /*
+ * Cache the Transform associated with the camera mounting
+ * rotation for later use in computeTransform().
+ */
+ bool success;
+ mountingOrientation_ = orientationFromRotation(propertyValue, &success);
+ if (!success) {
+ LOG(CameraSensor, Warning)
+ << "Invalid rotation of " << propertyValue
+ << " degrees - ignoring";
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
+ properties_.set(properties::Rotation, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning)
+ << "Rotation control not available, default to 0 degrees";
+ properties_.set(properties::Rotation, 0);
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
+ properties_.set(properties::PixelArraySize, pixelArraySize_);
+ properties_.set(properties::PixelArrayActiveAreas, { activeArea_ });
+
+ /* Color filter array pattern. */
+ uint32_t cfa;
+
+ switch (cfaPattern_) {
+ case BayerFormat::BGGR:
+ cfa = properties::draft::BGGR;
+ break;
+ case BayerFormat::GBRG:
+ cfa = properties::draft::GBRG;
+ break;
+ case BayerFormat::GRBG:
+ cfa = properties::draft::GRBG;
+ break;
+ case BayerFormat::RGGB:
+ cfa = properties::draft::RGGB;
+ break;
+ case BayerFormat::MONO:
+ default:
+ cfa = properties::draft::MONO;
+ break;
+ }
+
+ properties_.set(properties::draft::ColorFilterArrangement, cfa);
+
+ return 0;
+}
+
+void CameraSensorRaw::initStaticProperties()
+{
+ staticProps_ = CameraSensorProperties::get(model_);
+ if (!staticProps_)
+ return;
+
+ /* Register the properties retrieved from the sensor database. */
+ properties_.set(properties::UnitCellSize, staticProps_->unitCellSize);
+
+ initTestPatternModes();
+}
+
+const CameraSensorProperties::SensorDelays &CameraSensorRaw::sensorDelays()
+{
+ static constexpr CameraSensorProperties::SensorDelays defaultSensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 1,
+ .vblankDelay = 2,
+ .hblankDelay = 2,
+ };
+
+ if (!staticProps_ ||
+ (!staticProps_->sensorDelays.exposureDelay &&
+ !staticProps_->sensorDelays.gainDelay &&
+ !staticProps_->sensorDelays.vblankDelay &&
+ !staticProps_->sensorDelays.hblankDelay)) {
+ LOG(CameraSensor, Warning)
+ << "No sensor delays found in static properties. "
+ "Assuming unverified defaults.";
+
+ return defaultSensorDelays;
+ }
+
+ return staticProps_->sensorDelays;
+}
+
+void CameraSensorRaw::initTestPatternModes()
+{
+ const auto &v4l2TestPattern = controls().find(V4L2_CID_TEST_PATTERN);
+ if (v4l2TestPattern == controls().end()) {
+ LOG(CameraSensor, Debug) << "V4L2_CID_TEST_PATTERN is not supported";
+ return;
+ }
+
+ const auto &testPatternModes = staticProps_->testPatternModes;
+ if (testPatternModes.empty()) {
+ /*
+ * The camera sensor supports test patterns but we don't know
+ * how to map them so this should be fixed.
+ */
+ LOG(CameraSensor, Debug) << "No static test pattern map for \'"
+ << model() << "\'";
+ return;
+ }
+
+ /*
+ * Create a map that associates the V4L2 control index to the test
+ * pattern mode by reversing the testPatternModes map provided by the
+ * camera sensor properties. This makes it easier to verify if the
+ * control index is supported in the below for loop that creates the
+ * list of supported test patterns.
+ */
+ std::map<int32_t, controls::draft::TestPatternModeEnum> indexToTestPatternMode;
+ for (const auto &it : testPatternModes)
+ indexToTestPatternMode[it.second] = it.first;
+
+ for (const ControlValue &value : v4l2TestPattern->second.values()) {
+ const int32_t index = value.get<int32_t>();
+
+ const auto it = indexToTestPatternMode.find(index);
+ if (it == indexToTestPatternMode.end()) {
+ LOG(CameraSensor, Debug)
+ << "Test pattern mode " << index << " ignored";
+ continue;
+ }
+
+ testPatternModes_.push_back(it->second);
+ }
+}
+
+std::vector<Size> CameraSensorRaw::sizes(unsigned int mbusCode) const
+{
+ std::vector<Size> sizes;
+
+ const auto &format = formats_.find(mbusCode);
+ if (format == formats_.end())
+ return sizes;
+
+ const std::vector<SizeRange> &ranges = format->second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+
+ std::sort(sizes.begin(), sizes.end());
+
+ return sizes;
+}
+
+Size CameraSensorRaw::resolution() const
+{
+ return std::min(sizes_.back(), activeArea_.size());
+}
+
+V4L2SubdeviceFormat
+CameraSensorRaw::getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size, Size maxSize) const
+{
+ unsigned int desiredArea = size.width * size.height;
+ unsigned int bestArea = UINT_MAX;
+ float desiredRatio = static_cast<float>(size.width) / size.height;
+ float bestRatio = FLT_MAX;
+ const Size *bestSize = nullptr;
+ uint32_t bestCode = 0;
+
+ for (unsigned int code : mbusCodes) {
+ const auto formats = formats_.find(code);
+ if (formats == formats_.end())
+ continue;
+
+ for (const SizeRange &range : formats->second) {
+ const Size &sz = range.max;
+
+ if (!maxSize.isNull() &&
+ (sz.width > maxSize.width || sz.height > maxSize.height))
+ continue;
+
+ if (sz.width < size.width || sz.height < size.height)
+ continue;
+
+ float ratio = static_cast<float>(sz.width) / sz.height;
+ float ratioDiff = std::abs(ratio - desiredRatio);
+ unsigned int area = sz.width * sz.height;
+ unsigned int areaDiff = area - desiredArea;
+
+ if (ratioDiff > bestRatio)
+ continue;
+
+ if (ratioDiff < bestRatio || areaDiff < bestArea) {
+ bestRatio = ratioDiff;
+ bestArea = areaDiff;
+ bestSize = &sz;
+ bestCode = code;
+ }
+ }
+ }
+
+ if (!bestSize) {
+ LOG(CameraSensor, Debug) << "No supported format or size found";
+ return {};
+ }
+
+ V4L2SubdeviceFormat format{
+ .code = bestCode,
+ .size = *bestSize,
+ .colorSpace = ColorSpace::Raw,
+ };
+
+ return format;
+}
+
+int CameraSensorRaw::setFormat(V4L2SubdeviceFormat *format, Transform transform)
+{
+ /* Configure flips if the sensor supports that. */
+ if (supportFlips_) {
+ ControlList flipCtrls(subdev_->controls());
+
+ flipCtrls.set(V4L2_CID_HFLIP,
+ static_cast<int32_t>(!!(transform & Transform::HFlip)));
+ flipCtrls.set(V4L2_CID_VFLIP,
+ static_cast<int32_t>(!!(transform & Transform::VFlip)));
+
+ int ret = subdev_->setControls(&flipCtrls);
+ if (ret)
+ return ret;
+ }
+
+ /* Apply format on the subdev. */
+ int ret = subdev_->setFormat(streams_.image.source, format);
+ if (ret)
+ return ret;
+
+ subdev_->updateControlInfo();
+ return 0;
+}
+
+int CameraSensorRaw::tryFormat(V4L2SubdeviceFormat *format) const
+{
+ return subdev_->setFormat(streams_.image.source, format,
+ V4L2Subdevice::Whence::TryFormat);
+}
+
+int CameraSensorRaw::applyConfiguration(const SensorConfiguration &config,
+ Transform transform,
+ V4L2SubdeviceFormat *sensorFormat)
+{
+ if (!config.isValid()) {
+ LOG(CameraSensor, Error) << "Invalid sensor configuration";
+ return -EINVAL;
+ }
+
+ std::vector<unsigned int> filteredCodes;
+ std::copy_if(mbusCodes_.begin(), mbusCodes_.end(),
+ std::back_inserter(filteredCodes),
+ [&config](unsigned int mbusCode) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(mbusCode);
+ if (bayer.bitDepth == config.bitDepth)
+ return true;
+ return false;
+ });
+ if (filteredCodes.empty()) {
+ LOG(CameraSensor, Error)
+ << "Cannot find any format with bit depth "
+ << config.bitDepth;
+ return -EINVAL;
+ }
+
+ /*
+ * Compute the sensor's data frame size by applying the cropping
+ * rectangle, subsampling and output crop to the sensor's pixel array
+ * size.
+ *
+ * \todo The actual size computation is for now ignored and only the
+ * output size is considered. This implies that resolutions obtained
+ * with two different cropping/subsampling will look identical and
+ * only the first found one will be considered.
+ */
+ V4L2SubdeviceFormat subdevFormat = {};
+ for (unsigned int code : filteredCodes) {
+ for (const Size &size : sizes(code)) {
+ if (size.width != config.outputSize.width ||
+ size.height != config.outputSize.height)
+ continue;
+
+ subdevFormat.code = code;
+ subdevFormat.size = size;
+ break;
+ }
+ }
+ if (!subdevFormat.code) {
+ LOG(CameraSensor, Error) << "Invalid output size in sensor configuration";
+ return -EINVAL;
+ }
+
+ int ret = setFormat(&subdevFormat, transform);
+ if (ret)
+ return ret;
+
+ /*
+ * Return to the caller the format actually applied to the sensor.
+ * This is relevant if transform has changed the bayer pattern order.
+ */
+ if (sensorFormat)
+ *sensorFormat = subdevFormat;
+
+ /* \todo Handle AnalogCrop. Most sensors do not support set_selection */
+ /* \todo Handle scaling in the digital domain. */
+
+ return 0;
+}
+
+V4L2Subdevice::Stream CameraSensorRaw::imageStream() const
+{
+ return streams_.image.source;
+}
+
+std::optional<V4L2Subdevice::Stream> CameraSensorRaw::embeddedDataStream() const
+{
+ if (!streams_.edata)
+ return {};
+
+ return { streams_.edata->source };
+}
+
+V4L2SubdeviceFormat CameraSensorRaw::embeddedDataFormat() const
+{
+ if (!streams_.edata)
+ return {};
+
+ V4L2SubdeviceFormat format;
+ int ret = subdev_->getFormat(streams_.edata->source, &format);
+ if (ret)
+ return {};
+
+ return format;
+}
+
+int CameraSensorRaw::setEmbeddedDataEnabled(bool enable)
+{
+ if (!streams_.edata)
+ return enable ? -ENOSTR : 0;
+
+ V4L2Subdevice::Routing routing{ 2 };
+
+ routing[0].sink = streams_.image.sink;
+ routing[0].source = streams_.image.source;
+ routing[0].flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE;
+
+ routing[1].sink = streams_.edata->sink;
+ routing[1].source = streams_.edata->source;
+ routing[1].flags = enable ? V4L2_SUBDEV_ROUTE_FL_ACTIVE : 0;
+
+ int ret = subdev_->setRouting(&routing);
+ if (ret)
+ return ret;
+
+ /*
+ * Check if the embedded data stream has been enabled or disabled
+ * correctly. Assume at least one route will match the embedded data
+ * source stream, as there would be something seriously wrong
+ * otherwise.
+ */
+ bool enabled = false;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (route.source != streams_.edata->source)
+ continue;
+
+ enabled = route.flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE;
+ break;
+ }
+
+ if (enabled != enable)
+ return enabled ? -EISCONN : -ENOSTR;
+
+ return 0;
+}
+
+int CameraSensorRaw::sensorInfo(IPACameraSensorInfo *info) const
+{
+ info->model = model();
+
+ /*
+ * The active area size is a static property, while the crop
+ * rectangle needs to be re-read as it depends on the sensor
+ * configuration.
+ */
+ info->activeAreaSize = { activeArea_.width, activeArea_.height };
+
+ int ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP,
+ &info->analogCrop);
+ if (ret)
+ return ret;
+
+ /*
+ * IPACameraSensorInfo::analogCrop::x and IPACameraSensorInfo::analogCrop::y
+ * are defined relatively to the active pixel area, while V4L2's
+ * TGT_CROP target is defined in respect to the full pixel array.
+ *
+ * Compensate it by subtracting the active area offset.
+ */
+ info->analogCrop.x -= activeArea_.x;
+ info->analogCrop.y -= activeArea_.y;
+
+ /* The bit depth and image size depend on the currently applied format. */
+ V4L2SubdeviceFormat format{};
+ ret = subdev_->getFormat(streams_.image.source, &format);
+ if (ret)
+ return ret;
+ info->bitsPerPixel = MediaBusFormatInfo::info(format.code).bitsPerPixel;
+ info->outputSize = format.size;
+
+ std::optional<int32_t> cfa = properties_.get(properties::draft::ColorFilterArrangement);
+ info->cfaPattern = cfa ? *cfa : properties::draft::RGB;
+
+ /*
+ * Retrieve the pixel rate, line length and minimum/maximum frame
+ * duration through V4L2 controls. Support for the V4L2_CID_PIXEL_RATE,
+ * V4L2_CID_HBLANK and V4L2_CID_VBLANK controls is mandatory.
+ */
+ ControlList ctrls = subdev_->getControls({ V4L2_CID_PIXEL_RATE,
+ V4L2_CID_HBLANK,
+ V4L2_CID_VBLANK });
+ if (ctrls.empty()) {
+ LOG(CameraSensor, Error)
+ << "Failed to retrieve camera info controls";
+ return -EINVAL;
+ }
+
+ info->pixelRate = ctrls.get(V4L2_CID_PIXEL_RATE).get<int64_t>();
+
+ const ControlInfo hblank = ctrls.infoMap()->at(V4L2_CID_HBLANK);
+ info->minLineLength = info->outputSize.width + hblank.min().get<int32_t>();
+ info->maxLineLength = info->outputSize.width + hblank.max().get<int32_t>();
+
+ const ControlInfo vblank = ctrls.infoMap()->at(V4L2_CID_VBLANK);
+ info->minFrameLength = info->outputSize.height + vblank.min().get<int32_t>();
+ info->maxFrameLength = info->outputSize.height + vblank.max().get<int32_t>();
+
+ return 0;
+}
+
+Transform CameraSensorRaw::computeTransform(Orientation *orientation) const
+{
+ /*
+ * If we cannot do any flips we cannot change the native camera mounting
+ * orientation.
+ */
+ if (!supportFlips_) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ /*
+ * Now compute the required transform to obtain 'orientation' starting
+ * from the mounting rotation.
+ *
+ * As a note:
+ * orientation / mountingOrientation_ = transform
+ * mountingOrientation_ * transform = orientation
+ */
+ Transform transform = *orientation / mountingOrientation_;
+
+ /*
+ * If transform contains any Transpose we cannot do it, so adjust
+ * 'orientation' to report the image native orientation and return Identity.
+ */
+ if (!!(transform & Transform::Transpose)) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ return transform;
+}
+
+BayerFormat::Order CameraSensorRaw::bayerOrder(Transform t) const
+{
+ if (!flipsAlterBayerOrder_)
+ return cfaPattern_;
+
+ /*
+ * Apply the transform to the native (i.e. untransformed) Bayer order,
+ * using the rest of the Bayer format supplied by the caller.
+ */
+ BayerFormat format{ cfaPattern_, 8, BayerFormat::Packing::None };
+ return format.transform(t).order;
+}
+
+const ControlInfoMap &CameraSensorRaw::controls() const
+{
+ return subdev_->controls();
+}
+
+ControlList CameraSensorRaw::getControls(const std::vector<uint32_t> &ids)
+{
+ return subdev_->getControls(ids);
+}
+
+int CameraSensorRaw::setControls(ControlList *ctrls)
+{
+ return subdev_->setControls(ctrls);
+}
+
+int CameraSensorRaw::setTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternMode_ == mode)
+ return 0;
+
+ if (testPatternModes_.empty()) {
+ LOG(CameraSensor, Error)
+ << "Camera sensor does not support test pattern modes.";
+ return -EINVAL;
+ }
+
+ return applyTestPatternMode(mode);
+}
+
+int CameraSensorRaw::applyTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternModes_.empty())
+ return 0;
+
+ auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(),
+ mode);
+ if (it == testPatternModes_.end()) {
+ LOG(CameraSensor, Error) << "Unsupported test pattern mode "
+ << mode;
+ return -EINVAL;
+ }
+
+ LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode;
+
+ int32_t index = staticProps_->testPatternModes.at(mode);
+ ControlList ctrls{ controls() };
+ ctrls.set(V4L2_CID_TEST_PATTERN, index);
+
+ int ret = setControls(&ctrls);
+ if (ret)
+ return ret;
+
+ testPatternMode_ = mode;
+
+ return 0;
+}
+
+std::string CameraSensorRaw::logPrefix() const
+{
+ return "'" + entity_->name() + "'";
+}
+
+REGISTER_CAMERA_SENSOR(CameraSensorRaw, 0)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/sensor/meson.build b/src/libcamera/sensor/meson.build
index bf4b131a..dce74ed6 100644
--- a/src/libcamera/sensor/meson.build
+++ b/src/libcamera/sensor/meson.build
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: CC0-1.0
-libcamera_sources += files([
+libcamera_internal_sources += files([
'camera_sensor.cpp',
+ 'camera_sensor_legacy.cpp',
'camera_sensor_properties.cpp',
+ 'camera_sensor_raw.cpp',
])
diff --git a/src/libcamera/shared_mem_object.cpp b/src/libcamera/shared_mem_object.cpp
new file mode 100644
index 00000000..d9b61d37
--- /dev/null
+++ b/src/libcamera/shared_mem_object.cpp
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ * Copyright (C) 2024 Andrei Konovalov
+ * Copyright (C) 2024 Dennis Bonke
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Helpers for shared memory allocations
+ */
+
+#include "libcamera/internal/shared_mem_object.h"
+
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
+#include <libcamera/base/memfd.h>
+
+/**
+ * \file shared_mem_object.cpp
+ * \brief Helpers for shared memory allocations
+ */
+
+namespace libcamera {
+
+/**
+ * \class SharedMem
+ * \brief Helper class to allocate and manage memory shareable between processes
+ *
+ * SharedMem manages memory suitable for sharing between processes. When an
+ * instance is constructed, it allocates a memory buffer of the requested size
+ * backed by an anonymous file, using the memfd API.
+ *
+ * The allocated memory is exposed by the mem() function. If memory allocation
+ * fails, the function returns an empty Span. This can be also checked using the
+ * bool() operator.
+ *
+ * The file descriptor for the backing file is exposed as a SharedFD by the fd()
+ * function. It can be shared with other processes across IPC boundaries, which
+ * can then map the memory with mmap().
+ *
+ * A single memfd is created for every SharedMem. If there is a need to allocate
+ * a large number of objects in shared memory, these objects should be grouped
+ * together and use the shared memory allocated by a single SharedMem object if
+ * possible. This will help to minimize the number of created memfd's.
+ */
+
+SharedMem::SharedMem() = default;
+
+/**
+ * \brief Construct a SharedMem with memory of the given \a size
+ * \param[in] name Name of the SharedMem
+ * \param[in] size Size of the shared memory to allocate and map
+ *
+ * The \a name is used for debugging purpose only. Multiple SharedMem instances
+ * can have the same name.
+ */
+SharedMem::SharedMem(const std::string &name, std::size_t size)
+{
+ UniqueFD memfd = MemFd::create(name.c_str(), size,
+ MemFd::Seal::Shrink | MemFd::Seal::Grow);
+ if (!memfd.isValid())
+ return;
+
+ fd_ = SharedFD(std::move(memfd));
+
+ void *mem = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd_.get(), 0);
+ if (mem == MAP_FAILED) {
+ fd_ = SharedFD();
+ return;
+ }
+
+ mem_ = { static_cast<uint8_t *>(mem), size };
+}
+
+/**
+ * \brief Move constructor for SharedMem
+ * \param[in] rhs The object to move
+ */
+SharedMem::SharedMem(SharedMem &&rhs)
+{
+ this->fd_ = std::move(rhs.fd_);
+ this->mem_ = rhs.mem_;
+ rhs.mem_ = {};
+}
+
+/**
+ * \brief Destroy the SharedMem instance
+ *
+ * Destroying an instance invalidates the memory mapping exposed with mem().
+ * Other mappings of the backing file, created in this or other processes with
+ * mmap(), remain valid.
+ *
+ * Similarly, other references to the backing file descriptor created by copying
+ * the SharedFD returned by fd() remain valid. The underlying memory will be
+ * freed only when all file descriptors that reference the anonymous file get
+ * closed.
+ */
+SharedMem::~SharedMem()
+{
+ if (!mem_.empty())
+ munmap(mem_.data(), mem_.size_bytes());
+}
+
+/**
+ * \brief Move assignment operator for SharedMem
+ * \param[in] rhs The object to move
+ */
+SharedMem &SharedMem::operator=(SharedMem &&rhs)
+{
+ this->fd_ = std::move(rhs.fd_);
+ this->mem_ = rhs.mem_;
+ rhs.mem_ = {};
+ return *this;
+}
+
+/**
+ * \fn const SharedFD &SharedMem::fd() const
+ * \brief Retrieve the file descriptor for the underlying shared memory
+ * \return The file descriptor, or an invalid SharedFD if allocation failed
+ */
+
+/**
+ * \fn Span<uint8_t> SharedMem::mem() const
+ * \brief Retrieve the underlying shared memory
+ * \return The memory buffer, or an empty Span if allocation failed
+ */
+
+/**
+ * \fn SharedMem::operator bool()
+ * \brief Check if the shared memory allocation succeeded
+ * \return True if allocation of the shared memory succeeded, false otherwise
+ */
+
+/**
+ * \class SharedMemObject
+ * \brief Helper class to allocate an object in shareable memory
+ * \tparam The object type
+ *
+ * The SharedMemObject class is a specialization of the SharedMem class that
+ * wraps an object of type \a T and constructs it in shareable memory. It uses
+ * the same underlying memory allocation and sharing mechanism as the SharedMem
+ * class.
+ *
+ * The wrapped object is constructed at the same time as the SharedMemObject
+ * instance, by forwarding the arguments passed to the SharedMemObject
+ * constructor. The underlying memory allocation is sized to the object \a T
+ * size. The bool() operator should be used to check the allocation was
+ * successful. The object can be accessed using the dereference operators
+ * operator*() and operator->().
+ *
+ * While no restriction on the type \a T is enforced, not all types are suitable
+ * for sharing between multiple processes. Most notably, any object type that
+ * contains pointer or reference members will likely cause issues. Even if those
+ * members refer to other members of the same object, the shared memory will be
+ * mapped at different addresses in different processes, and the pointers will
+ * not be valid.
+ *
+ * A new anonymous file is created for every SharedMemObject instance. If there
+ * is a need to share a large number of small objects, these objects should be
+ * grouped into a single larger object to limit the number of file descriptors.
+ *
+ * To share the object with other processes, see the SharedMem documentation.
+ */
+
+/**
+ * \var SharedMemObject::kSize
+ * \brief The size of the object stored in shared memory
+ */
+
+/**
+ * \fn SharedMemObject::SharedMemObject(const std::string &name, Args &&...args)
+ * \brief Construct a SharedMemObject
+ * \param[in] name Name of the SharedMemObject
+ * \param[in] args Arguments to pass to the constructor of the object T
+ *
+ * The \a name is used for debugging purpose only. Multiple SharedMem instances
+ * can have the same name.
+ */
+
+/**
+ * \fn SharedMemObject::SharedMemObject(SharedMemObject<T> &&rhs)
+ * \brief Move constructor for SharedMemObject
+ * \param[in] rhs The object to move
+ */
+
+/**
+ * \fn SharedMemObject::~SharedMemObject()
+ * \brief Destroy the SharedMemObject instance
+ *
+ * Destroying a SharedMemObject calls the wrapped T object's destructor. While
+ * the underlying memory may not be freed immediately if other mappings have
+ * been created manually (see SharedMem::~SharedMem() for more information), the
+ * stored object may be modified. Depending on the ~T() destructor, accessing
+ * the object after destruction of the SharedMemObject causes undefined
+ * behaviour. It is the responsibility of the user of this class to synchronize
+ * with other users who have access to the shared object.
+ */
+
+/**
+ * \fn SharedMemObject::operator=(SharedMemObject<T> &&rhs)
+ * \brief Move assignment operator for SharedMemObject
+ * \param[in] rhs The SharedMemObject object to take the data from
+ *
+ * Moving a SharedMemObject does not affect the stored object.
+ */
+
+/**
+ * \fn SharedMemObject::operator->()
+ * \brief Dereference the stored object
+ * \return Pointer to the stored object
+ */
+
+/**
+ * \fn const T *SharedMemObject::operator->() const
+ * \copydoc SharedMemObject::operator->
+ */
+
+/**
+ * \fn SharedMemObject::operator*()
+ * \brief Dereference the stored object
+ * \return Reference to the stored object
+ */
+
+/**
+ * \fn const T &SharedMemObject::operator*() const
+ * \copydoc SharedMemObject::operator*
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/TODO b/src/libcamera/software_isp/TODO
new file mode 100644
index 00000000..a50db668
--- /dev/null
+++ b/src/libcamera/software_isp/TODO
@@ -0,0 +1,208 @@
+2. Reconsider stats sharing
+
+>>> +void SwStatsCpu::finishFrame(void)
+>>> +{
+>>> + *sharedStats_ = stats_;
+>>
+>> Is it more efficient to copy the stats instead of operating directly on
+>> the shared memory ?
+>
+> I inherited doing things this way from Andrey. I kept this because
+> we don't really have any synchronization with the IPA reading this.
+>
+> So the idea is to only touch this when the next set of statistics
+> is ready since we don't know when the IPA is done with accessing
+> the previous set of statistics ...
+>
+> This is both something which seems mostly a theoretic problem,
+> yet also definitely something which I think we need to fix.
+>
+> Maybe use a ringbuffer of stats buffers and pass the index into
+> the ringbuffer to the emit signal ?
+
+That would match how we deal with hardware ISPs, and I think that's a
+good idea. It will help decoupling the processing side from the IPA.
+
+---
+
+3. Remove statsReady signal
+
+> class SwStatsCpu
+> {
+> /**
+> * \brief Signals that the statistics are ready
+> */
+> Signal<> statsReady;
+
+But better, I wonder if the signal could be dropped completely. The
+SwStatsCpu class does not operate asynchronously. Shouldn't whoever
+calls the finishFrame() function then handle emitting the signal ?
+
+Now, the trouble is that this would be the DebayerCpu class, whose name
+doesn't indicate as a prime candidate to handle stats. However, it
+already exposes a getStatsFD() function, so we're already calling for
+trouble :-) Either that should be moved to somewhere else, or the class
+should be renamed. Considering that the class applies colour gains in
+addition to performing the interpolation, it may be more of a naming
+issue.
+
+Removing the signal and refactoring those classes doesn't have to be
+addressed now, I think it would be part of a larger refactoring
+(possibly also considering platforms that have no ISP but can produce
+stats in hardware, such as the i.MX7), but please keep it on your radar.
+
+---
+
+5. Store ISP parameters in per-frame buffers
+
+> /**
+> * \fn void Debayer::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+> * \brief Process the bayer data into the requested format.
+> * \param[in] input The input buffer.
+> * \param[in] output The output buffer.
+> * \param[in] params The parameters to be used in debayering.
+> *
+> * \note DebayerParams is passed by value deliberately so that a copy is passed
+> * when this is run in another thread by invokeMethod().
+> */
+
+Possibly something to address later, by storing ISP parameters in
+per-frame buffers like we do for hardware ISPs.
+
+---
+
+6. Input buffer copying configuration
+
+> DebayerCpu::DebayerCpu(std::unique_ptr<SwStatsCpu> stats)
+> : stats_(std::move(stats)), gammaCorrection_(1.0)
+> {
+> enableInputMemcpy_ = true;
+
+Set this appropriately and/or make it configurable.
+
+---
+
+7. Performance measurement configuration
+
+> void DebayerCpu::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+> /* Measure before emitting signals */
+> if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure &&
+> ++measuredFrames_ > DebayerCpu::kFramesToSkip) {
+> timespec frameEndTime = {};
+> clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime);
+> frameProcessTime_ += timeDiff(frameEndTime, frameStartTime);
+> if (measuredFrames_ == DebayerCpu::kLastFrameToMeasure) {
+> const unsigned int measuredFrames = DebayerCpu::kLastFrameToMeasure -
+> DebayerCpu::kFramesToSkip;
+> LOG(Debayer, Info)
+> << "Processed " << measuredFrames
+> << " frames in " << frameProcessTime_ / 1000 << "us, "
+> << frameProcessTime_ / (1000 * measuredFrames)
+> << " us/frame";
+> }
+> }
+
+I wonder if there would be a way to control at runtime when/how to
+perform those measurements. Maybe that's a bit overkill.
+
+---
+
+8. DebayerCpu cleanups
+
+> >> class DebayerCpu : public Debayer, public Object
+> >> const SharedFD &getStatsFD() { return stats_->getStatsFD(); }
+> >
+> > This,
+>
+> Note the statistics pass-through stuff is sort of a necessary evil
+> since we want one main loop going over the data line by line and
+> doing both debayering as well as stats while the line is still
+> hot in the l2 cache. And things like the process2() and process4()
+> loops are highly CPU debayering specific so I don't think we should
+> move those out of the CpuDebayer code.
+
+Yes, that I understood from the review. "necessary evil" is indeed the
+right term :-) I expect it will take quite some design skills to balance
+the need for performances and the need for a maintainable architecture.
+
+> > plus the fact that this class handles colour gains and gamma,
+> > makes me thing we have either a naming issue, or an architecture issue.
+>
+> I agree that this does a bit more then debayering, although
+> the debayering really is the main thing it does.
+>
+> I guess the calculation of the rgb lookup tables which do the
+> color gains and gamma could be moved outside of this class,
+> that might even be beneficial for GPU based debayering assuming
+> that that is going to use rgb lookup tables too (it could
+> implement actual color gains + gamma correction in some different
+> way).
+>
+> I think this falls under the lets wait until we have a GPU
+> based SoftISP MVP/POC and then do some refactoring to see which
+> bits should go where.
+
+---
+
+8. Decouple pipeline and IPA naming
+
+> The current src/ipa/meson.build assumes the IPA name to match the
+> pipeline name. For this reason "-Dipas=simple" is used for the
+> Soft IPA module.
+
+This should be addressed.
+
+---
+
+9. Doxyfile cleanup
+
+>> diff --git a/Documentation/Doxyfile.in b/Documentation/Doxyfile.in
+>> index a86ea6c1..2be8d47b 100644
+>> --- a/Documentation/Doxyfile.in
+>> +++ b/Documentation/Doxyfile.in
+>> @@ -44,6 +44,7 @@ EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
+>> @TOP_SRCDIR@/src/libcamera/pipeline/ \
+>> @TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
+>> @TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
+>> + @TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \
+> Why is this needed ?
+>
+>> @TOP_BUILDDIR@/src/libcamera/proxy/
+>> EXCLUDE_PATTERNS = @TOP_BUILDDIR@/include/libcamera/ipa/*_serializer.h \
+>> diff --git a/include/libcamera/ipa/meson.build b/include/libcamera/ipa/meson.build
+>> index f3b4881c..3352d08f 100644
+>> --- a/include/libcamera/ipa/meson.build
+>> +++ b/include/libcamera/ipa/meson.build
+>> @@ -65,6 +65,7 @@ pipeline_ipa_mojom_mapping = {
+>> 'ipu3': 'ipu3.mojom',
+>> 'rkisp1': 'rkisp1.mojom',
+>> 'rpi/vc4': 'raspberrypi.mojom',
+>> + 'simple': 'soft.mojom',
+>> 'vimc': 'vimc.mojom',
+>> }
+>> diff --git a/include/libcamera/ipa/soft.mojom b/include/libcamera/ipa/soft.mojom
+>> new file mode 100644
+>> index 00000000..c249bd75
+>> --- /dev/null
+>> +++ b/include/libcamera/ipa/soft.mojom
+>> @@ -0,0 +1,28 @@
+>> +/* SPDX-License-Identifier: LGPL-2.1-or-later */
+>> +
+>> +/*
+>> + * \todo Document the interface and remove the related EXCLUDE_PATTERNS entry.
+> Ah that's why.
+
+Yes, because, well... all the other IPAs were doing that...
+
+> It doesn't have to be done before merging, but could you
+> address this sooner than later ?
+
+---
+
+13. Improve black level and colour gains application
+
+I think the black level should eventually be moved before debayering, and
+ideally the colour gains as well. I understand the need for optimizations to
+lower the CPU consumption, but at the same time I don't feel comfortable
+building up on top of an implementation that may work a bit more by chance than
+by correctness, as that's not very maintainable.
diff --git a/src/libcamera/software_isp/debayer.cpp b/src/libcamera/software_isp/debayer.cpp
new file mode 100644
index 00000000..f0b83261
--- /dev/null
+++ b/src/libcamera/software_isp/debayer.cpp
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, 2024 Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayer base class
+ */
+
+#include "debayer.h"
+
+namespace libcamera {
+
+/**
+ * \struct DebayerParams
+ * \brief Struct to hold the debayer parameters.
+ */
+
+/**
+ * \var DebayerParams::kRGBLookupSize
+ * \brief Size of a color lookup table
+ */
+
+/**
+ * \typedef DebayerParams::ColorLookupTable
+ * \brief Type of the lookup tables for red, green, blue values
+ */
+
+/**
+ * \var DebayerParams::red
+ * \brief Lookup table for red color, mapping input values to output values
+ */
+
+/**
+ * \var DebayerParams::green
+ * \brief Lookup table for green color, mapping input values to output values
+ */
+
+/**
+ * \var DebayerParams::blue
+ * \brief Lookup table for blue color, mapping input values to output values
+ */
+
+/**
+ * \class Debayer
+ * \brief Base debayering class
+ *
+ * Base class that provides functions for setting up the debayering process.
+ */
+
+LOG_DEFINE_CATEGORY(Debayer)
+
+Debayer::~Debayer()
+{
+}
+
+/**
+ * \fn int Debayer::configure(const StreamConfiguration &inputCfg, const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+ * \brief Configure the debayer object according to the passed in parameters
+ * \param[in] inputCfg The input configuration
+ * \param[in] outputCfgs The output configurations
+ *
+ * \return 0 on success, a negative errno on failure
+ */
+
+/**
+ * \fn Size Debayer::patternSize(PixelFormat inputFormat)
+ * \brief Get the width and height at which the bayer pattern repeats
+ * \param[in] inputFormat The input format
+ *
+ * Valid sizes are: 2x2, 4x2 or 4x4.
+ *
+ * \return Pattern size or an empty size for unsupported inputFormats
+ */
+
+/**
+ * \fn std::vector<PixelFormat> Debayer::formats(PixelFormat inputFormat)
+ * \brief Get the supported output formats
+ * \param[in] inputFormat The input format
+ *
+ * \return All supported output formats or an empty vector if there are none
+ */
+
+/**
+ * \fn std::tuple<unsigned int, unsigned int> Debayer::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+ * \brief Get the stride and the frame size
+ * \param[in] outputFormat The output format
+ * \param[in] size The output size
+ *
+ * \return A tuple of the stride and the frame size, or a tuple with 0,0 if
+ * there is no valid output config
+ */
+
+/**
+ * \fn void Debayer::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+ * \brief Process the bayer data into the requested format
+ * \param[in] frame The frame number
+ * \param[in] input The input buffer
+ * \param[in] output The output buffer
+ * \param[in] params The parameters to be used in debayering
+ *
+ * \note DebayerParams is passed by value deliberately so that a copy is passed
+ * when this is run in another thread by invokeMethod().
+ */
+
+/**
+ * \fn virtual SizeRange Debayer::sizes(PixelFormat inputFormat, const Size &inputSize)
+ * \brief Get the supported output sizes for the given input format and size
+ * \param[in] inputFormat The input format
+ * \param[in] inputSize The input size
+ *
+ * \return The valid size ranges or an empty range if there are none
+ */
+
+/**
+ * \var Signal<FrameBuffer *> Debayer::inputBufferReady
+ * \brief Signals when the input buffer is ready
+ */
+
+/**
+ * \var Signal<FrameBuffer *> Debayer::outputBufferReady
+ * \brief Signals when the output buffer is ready
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer.h b/src/libcamera/software_isp/debayer.h
new file mode 100644
index 00000000..d7ca060d
--- /dev/null
+++ b/src/libcamera/software_isp/debayer.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayering base class
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/signal.h>
+
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/software_isp/debayer_params.h"
+
+namespace libcamera {
+
+class FrameBuffer;
+
+LOG_DECLARE_CATEGORY(Debayer)
+
+class Debayer
+{
+public:
+ virtual ~Debayer() = 0;
+
+ virtual int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs) = 0;
+
+ virtual std::vector<PixelFormat> formats(PixelFormat inputFormat) = 0;
+
+ virtual std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size) = 0;
+
+ virtual void process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params) = 0;
+
+ virtual SizeRange sizes(PixelFormat inputFormat, const Size &inputSize) = 0;
+
+ Signal<FrameBuffer *> inputBufferReady;
+ Signal<FrameBuffer *> outputBufferReady;
+
+private:
+ virtual Size patternSize(PixelFormat inputFormat) = 0;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer_cpu.cpp b/src/libcamera/software_isp/debayer_cpu.cpp
new file mode 100644
index 00000000..31ab96ab
--- /dev/null
+++ b/src/libcamera/software_isp/debayer_cpu.cpp
@@ -0,0 +1,835 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based debayering class
+ */
+
+#include "debayer_cpu.h"
+
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <time.h>
+
+#include <linux/dma-buf.h>
+
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/dma_buf_allocator.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+
+namespace libcamera {
+
+/**
+ * \class DebayerCpu
+ * \brief Class for debayering on the CPU
+ *
+ * Implementation for CPU based debayering
+ */
+
+/**
+ * \brief Constructs a DebayerCpu object
+ * \param[in] stats Pointer to the stats object to use
+ */
+DebayerCpu::DebayerCpu(std::unique_ptr<SwStatsCpu> stats)
+ : stats_(std::move(stats))
+{
+ /*
+ * Reading from uncached buffers may be very slow.
+ * In such a case, it's better to copy input buffer data to normal memory.
+ * But in case of cached buffers, copying the data is unnecessary overhead.
+ * enable_input_memcpy_ makes this behavior configurable. At the moment, we
+ * always set it to true as the safer choice but this should be changed in
+ * future.
+ */
+ enableInputMemcpy_ = true;
+
+ /* Initialize color lookup tables */
+ for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++)
+ red_[i] = green_[i] = blue_[i] = i;
+}
+
+DebayerCpu::~DebayerCpu() = default;
+
+#define DECLARE_SRC_POINTERS(pixel_t) \
+ const pixel_t *prev = (const pixel_t *)src[0] + xShift_; \
+ const pixel_t *curr = (const pixel_t *)src[1] + xShift_; \
+ const pixel_t *next = (const pixel_t *)src[2] + xShift_;
+
+/*
+ * RGR
+ * GBG
+ * RGR
+ */
+#define BGGR_BGR888(p, n, div) \
+ *dst++ = blue_[curr[x] / (div)]; \
+ *dst++ = green_[(prev[x] + curr[x - p] + curr[x + n] + next[x]) / (4 * (div))]; \
+ *dst++ = red_[(prev[x - p] + prev[x + n] + next[x - p] + next[x + n]) / (4 * (div))]; \
+ if constexpr (addAlphaByte) \
+ *dst++ = 255; \
+ x++;
+
+/*
+ * GBG
+ * RGR
+ * GBG
+ */
+#define GRBG_BGR888(p, n, div) \
+ *dst++ = blue_[(prev[x] + next[x]) / (2 * (div))]; \
+ *dst++ = green_[curr[x] / (div)]; \
+ *dst++ = red_[(curr[x - p] + curr[x + n]) / (2 * (div))]; \
+ if constexpr (addAlphaByte) \
+ *dst++ = 255; \
+ x++;
+
+/*
+ * GRG
+ * BGB
+ * GRG
+ */
+#define GBRG_BGR888(p, n, div) \
+ *dst++ = blue_[(curr[x - p] + curr[x + n]) / (2 * (div))]; \
+ *dst++ = green_[curr[x] / (div)]; \
+ *dst++ = red_[(prev[x] + next[x]) / (2 * (div))]; \
+ if constexpr (addAlphaByte) \
+ *dst++ = 255; \
+ x++;
+
+/*
+ * BGB
+ * GRG
+ * BGB
+ */
+#define RGGB_BGR888(p, n, div) \
+ *dst++ = blue_[(prev[x - p] + prev[x + n] + next[x - p] + next[x + n]) / (4 * (div))]; \
+ *dst++ = green_[(prev[x] + curr[x - p] + curr[x + n] + next[x]) / (4 * (div))]; \
+ *dst++ = red_[curr[x] / (div)]; \
+ if constexpr (addAlphaByte) \
+ *dst++ = 255; \
+ x++;
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer8_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint8_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ BGGR_BGR888(1, 1, 1)
+ GBRG_BGR888(1, 1, 1)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer8_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint8_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ GRBG_BGR888(1, 1, 1)
+ RGGB_BGR888(1, 1, 1)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 4 for 10 -> 8 bpp value */
+ BGGR_BGR888(1, 1, 4)
+ GBRG_BGR888(1, 1, 4)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 4 for 10 -> 8 bpp value */
+ GRBG_BGR888(1, 1, 4)
+ RGGB_BGR888(1, 1, 4)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer12_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 16 for 12 -> 8 bpp value */
+ BGGR_BGR888(1, 1, 16)
+ GBRG_BGR888(1, 1, 16)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer12_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 16 for 12 -> 8 bpp value */
+ GRBG_BGR888(1, 1, 16)
+ RGGB_BGR888(1, 1, 16)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10P_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ /*
+ * For the first pixel getting a pixel from the previous column uses
+ * x - 2 to skip the 5th byte with least-significant bits for 4 pixels.
+ * Same for last pixel (uses x + 2) and looking at the next column.
+ */
+ for (int x = 0; x < widthInBytes;) {
+ /* First pixel */
+ BGGR_BGR888(2, 1, 1)
+ /* Second pixel BGGR -> GBRG */
+ GBRG_BGR888(1, 1, 1)
+ /* Same thing for third and fourth pixels */
+ BGGR_BGR888(1, 1, 1)
+ GBRG_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10P_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* First pixel */
+ GRBG_BGR888(2, 1, 1)
+ /* Second pixel GRBG -> RGGB */
+ RGGB_BGR888(1, 1, 1)
+ /* Same thing for third and fourth pixels */
+ GRBG_BGR888(1, 1, 1)
+ RGGB_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10P_GBGB_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* Even pixel */
+ GBRG_BGR888(2, 1, 1)
+ /* Odd pixel GBGR -> BGGR */
+ BGGR_BGR888(1, 1, 1)
+ /* Same thing for next 2 pixels */
+ GBRG_BGR888(1, 1, 1)
+ BGGR_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* Even pixel */
+ RGGB_BGR888(2, 1, 1)
+ /* Odd pixel RGGB -> GRBG */
+ GRBG_BGR888(1, 1, 1)
+ /* Same thing for next 2 pixels */
+ RGGB_BGR888(1, 1, 1)
+ GRBG_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+static bool isStandardBayerOrder(BayerFormat::Order order)
+{
+ return order == BayerFormat::BGGR || order == BayerFormat::GBRG ||
+ order == BayerFormat::GRBG || order == BayerFormat::RGGB;
+}
+
+/*
+ * Setup the Debayer object according to the passed in parameters.
+ * Return 0 on success, a negative errno value on failure
+ * (unsupported parameters).
+ */
+int DebayerCpu::getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputFormat);
+
+ if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10 || bayerFormat.bitDepth == 12) &&
+ bayerFormat.packing == BayerFormat::Packing::None &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ config.bpp = (bayerFormat.bitDepth + 7) & ~7;
+ config.patternSize.width = 2;
+ config.patternSize.height = 2;
+ config.outputFormats = std::vector<PixelFormat>({ formats::RGB888,
+ formats::XRGB8888,
+ formats::ARGB8888,
+ formats::BGR888,
+ formats::XBGR8888,
+ formats::ABGR8888 });
+ return 0;
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2 &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ config.bpp = 10;
+ config.patternSize.width = 4; /* 5 bytes per *4* pixels */
+ config.patternSize.height = 2;
+ config.outputFormats = std::vector<PixelFormat>({ formats::RGB888,
+ formats::XRGB8888,
+ formats::ARGB8888,
+ formats::BGR888,
+ formats::XBGR8888,
+ formats::ABGR8888 });
+ return 0;
+ }
+
+ LOG(Debayer, Info)
+ << "Unsupported input format " << inputFormat.toString();
+ return -EINVAL;
+}
+
+int DebayerCpu::getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config)
+{
+ if (outputFormat == formats::RGB888 || outputFormat == formats::BGR888) {
+ config.bpp = 24;
+ return 0;
+ }
+
+ if (outputFormat == formats::XRGB8888 || outputFormat == formats::ARGB8888 ||
+ outputFormat == formats::XBGR8888 || outputFormat == formats::ABGR8888) {
+ config.bpp = 32;
+ return 0;
+ }
+
+ LOG(Debayer, Info)
+ << "Unsupported output format " << outputFormat.toString();
+ return -EINVAL;
+}
+
+/*
+ * Check for standard Bayer orders and set xShift_ and swap debayer0/1, so that
+ * a single pair of BGGR debayer functions can be used for all 4 standard orders.
+ */
+int DebayerCpu::setupStandardBayerOrder(BayerFormat::Order order)
+{
+ switch (order) {
+ case BayerFormat::BGGR:
+ break;
+ case BayerFormat::GBRG:
+ xShift_ = 1; /* BGGR -> GBRG */
+ break;
+ case BayerFormat::GRBG:
+ std::swap(debayer0_, debayer1_); /* BGGR -> GRBG */
+ break;
+ case BayerFormat::RGGB:
+ xShift_ = 1; /* BGGR -> GBRG */
+ std::swap(debayer0_, debayer1_); /* GBRG -> RGGB */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int DebayerCpu::setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputFormat);
+ bool addAlphaByte = false;
+
+ xShift_ = 0;
+ swapRedBlueGains_ = false;
+
+ auto invalidFmt = []() -> int {
+ LOG(Debayer, Error) << "Unsupported input output format combination";
+ return -EINVAL;
+ };
+
+ switch (outputFormat) {
+ case formats::XRGB8888:
+ case formats::ARGB8888:
+ addAlphaByte = true;
+ [[fallthrough]];
+ case formats::RGB888:
+ break;
+ case formats::XBGR8888:
+ case formats::ABGR8888:
+ addAlphaByte = true;
+ [[fallthrough]];
+ case formats::BGR888:
+ /* Swap R and B in bayer order to generate BGR888 instead of RGB888 */
+ swapRedBlueGains_ = true;
+
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ bayerFormat.order = BayerFormat::RGGB;
+ break;
+ case BayerFormat::GBRG:
+ bayerFormat.order = BayerFormat::GRBG;
+ break;
+ case BayerFormat::GRBG:
+ bayerFormat.order = BayerFormat::GBRG;
+ break;
+ case BayerFormat::RGGB:
+ bayerFormat.order = BayerFormat::BGGR;
+ break;
+ default:
+ return invalidFmt();
+ }
+ break;
+ default:
+ return invalidFmt();
+ }
+
+ if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10 || bayerFormat.bitDepth == 12) &&
+ bayerFormat.packing == BayerFormat::Packing::None &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ switch (bayerFormat.bitDepth) {
+ case 8:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer8_BGBG_BGR888<true> : &DebayerCpu::debayer8_BGBG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer8_GRGR_BGR888<true> : &DebayerCpu::debayer8_GRGR_BGR888<false>;
+ break;
+ case 10:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10_BGBG_BGR888<true> : &DebayerCpu::debayer10_BGBG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10_GRGR_BGR888<true> : &DebayerCpu::debayer10_GRGR_BGR888<false>;
+ break;
+ case 12:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer12_BGBG_BGR888<true> : &DebayerCpu::debayer12_BGBG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer12_GRGR_BGR888<true> : &DebayerCpu::debayer12_GRGR_BGR888<false>;
+ break;
+ }
+ setupStandardBayerOrder(bayerFormat.order);
+ return 0;
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2) {
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_BGBG_BGR888<true> : &DebayerCpu::debayer10P_BGBG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_GRGR_BGR888<true> : &DebayerCpu::debayer10P_GRGR_BGR888<false>;
+ return 0;
+ case BayerFormat::GBRG:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_GBGB_BGR888<true> : &DebayerCpu::debayer10P_GBGB_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_RGRG_BGR888<true> : &DebayerCpu::debayer10P_RGRG_BGR888<false>;
+ return 0;
+ case BayerFormat::GRBG:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_GRGR_BGR888<true> : &DebayerCpu::debayer10P_GRGR_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_BGBG_BGR888<true> : &DebayerCpu::debayer10P_BGBG_BGR888<false>;
+ return 0;
+ case BayerFormat::RGGB:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_RGRG_BGR888<true> : &DebayerCpu::debayer10P_RGRG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_GBGB_BGR888<true> : &DebayerCpu::debayer10P_GBGB_BGR888<false>;
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ return invalidFmt();
+}
+
+int DebayerCpu::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+{
+ if (getInputConfig(inputCfg.pixelFormat, inputConfig_) != 0)
+ return -EINVAL;
+
+ if (stats_->configure(inputCfg) != 0)
+ return -EINVAL;
+
+ const Size &statsPatternSize = stats_->patternSize();
+ if (inputConfig_.patternSize.width != statsPatternSize.width ||
+ inputConfig_.patternSize.height != statsPatternSize.height) {
+ LOG(Debayer, Error)
+ << "mismatching stats and debayer pattern sizes for "
+ << inputCfg.pixelFormat.toString();
+ return -EINVAL;
+ }
+
+ inputConfig_.stride = inputCfg.stride;
+
+ if (outputCfgs.size() != 1) {
+ LOG(Debayer, Error)
+ << "Unsupported number of output streams: "
+ << outputCfgs.size();
+ return -EINVAL;
+ }
+
+ const StreamConfiguration &outputCfg = outputCfgs[0];
+ SizeRange outSizeRange = sizes(inputCfg.pixelFormat, inputCfg.size);
+ std::tie(outputConfig_.stride, outputConfig_.frameSize) =
+ strideAndFrameSize(outputCfg.pixelFormat, outputCfg.size);
+
+ if (!outSizeRange.contains(outputCfg.size) || outputConfig_.stride != outputCfg.stride) {
+ LOG(Debayer, Error)
+ << "Invalid output size/stride: "
+ << "\n " << outputCfg.size << " (" << outSizeRange << ")"
+ << "\n " << outputCfg.stride << " (" << outputConfig_.stride << ")";
+ return -EINVAL;
+ }
+
+ if (setDebayerFunctions(inputCfg.pixelFormat, outputCfg.pixelFormat) != 0)
+ return -EINVAL;
+
+ window_.x = ((inputCfg.size.width - outputCfg.size.width) / 2) &
+ ~(inputConfig_.patternSize.width - 1);
+ window_.y = ((inputCfg.size.height - outputCfg.size.height) / 2) &
+ ~(inputConfig_.patternSize.height - 1);
+ window_.width = outputCfg.size.width;
+ window_.height = outputCfg.size.height;
+
+ /* Don't pass x,y since process() already adjusts src before passing it */
+ stats_->setWindow(Rectangle(window_.size()));
+
+ /* pad with patternSize.Width on both left and right side */
+ lineBufferPadding_ = inputConfig_.patternSize.width * inputConfig_.bpp / 8;
+ lineBufferLength_ = window_.width * inputConfig_.bpp / 8 +
+ 2 * lineBufferPadding_;
+
+ if (enableInputMemcpy_) {
+ for (unsigned int i = 0; i <= inputConfig_.patternSize.height; i++)
+ lineBuffers_[i].resize(lineBufferLength_);
+ }
+
+ measuredFrames_ = 0;
+ frameProcessTime_ = 0;
+
+ return 0;
+}
+
+/*
+ * Get width and height at which the bayer-pattern repeats.
+ * Return pattern-size or an empty Size for an unsupported inputFormat.
+ */
+Size DebayerCpu::patternSize(PixelFormat inputFormat)
+{
+ DebayerCpu::DebayerInputConfig config;
+
+ if (getInputConfig(inputFormat, config) != 0)
+ return {};
+
+ return config.patternSize;
+}
+
+std::vector<PixelFormat> DebayerCpu::formats(PixelFormat inputFormat)
+{
+ DebayerCpu::DebayerInputConfig config;
+
+ if (getInputConfig(inputFormat, config) != 0)
+ return std::vector<PixelFormat>();
+
+ return config.outputFormats;
+}
+
+std::tuple<unsigned int, unsigned int>
+DebayerCpu::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+{
+ DebayerCpu::DebayerOutputConfig config;
+
+ if (getOutputConfig(outputFormat, config) != 0)
+ return std::make_tuple(0, 0);
+
+ /* round up to multiple of 8 for 64 bits alignment */
+ unsigned int stride = (size.width * config.bpp / 8 + 7) & ~7;
+
+ return std::make_tuple(stride, stride * size.height);
+}
+
+void DebayerCpu::setupInputMemcpy(const uint8_t *linePointers[])
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ if (!enableInputMemcpy_)
+ return;
+
+ for (unsigned int i = 0; i < patternHeight; i++) {
+ memcpy(lineBuffers_[i].data(),
+ linePointers[i + 1] - lineBufferPadding_,
+ lineBufferLength_);
+ linePointers[i + 1] = lineBuffers_[i].data() + lineBufferPadding_;
+ }
+
+ /* Point lineBufferIndex_ to first unused lineBuffer */
+ lineBufferIndex_ = patternHeight;
+}
+
+void DebayerCpu::shiftLinePointers(const uint8_t *linePointers[], const uint8_t *src)
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ for (unsigned int i = 0; i < patternHeight; i++)
+ linePointers[i] = linePointers[i + 1];
+
+ linePointers[patternHeight] = src +
+ (patternHeight / 2) * (int)inputConfig_.stride;
+}
+
+void DebayerCpu::memcpyNextLine(const uint8_t *linePointers[])
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ if (!enableInputMemcpy_)
+ return;
+
+ memcpy(lineBuffers_[lineBufferIndex_].data(),
+ linePointers[patternHeight] - lineBufferPadding_,
+ lineBufferLength_);
+ linePointers[patternHeight] = lineBuffers_[lineBufferIndex_].data() + lineBufferPadding_;
+
+ lineBufferIndex_ = (lineBufferIndex_ + 1) % (patternHeight + 1);
+}
+
+void DebayerCpu::process2(const uint8_t *src, uint8_t *dst)
+{
+ unsigned int yEnd = window_.y + window_.height;
+ /* Holds [0] previous- [1] current- [2] next-line */
+ const uint8_t *linePointers[3];
+
+ /* Adjust src to top left corner of the window */
+ src += window_.y * inputConfig_.stride + window_.x * inputConfig_.bpp / 8;
+
+ /* [x] becomes [x - 1] after initial shiftLinePointers() call */
+ if (window_.y) {
+ linePointers[1] = src - inputConfig_.stride; /* previous-line */
+ linePointers[2] = src;
+ } else {
+ /* window_.y == 0, use the next line as prev line */
+ linePointers[1] = src + inputConfig_.stride;
+ linePointers[2] = src;
+ /* Last 2 lines also need special handling */
+ yEnd -= 2;
+ }
+
+ setupInputMemcpy(linePointers);
+
+ for (unsigned int y = window_.y; y < yEnd; y += 2) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(y, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+
+ if (window_.y == 0) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(yEnd, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ /* next line may point outside of src, use prev. */
+ linePointers[2] = linePointers[0];
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+}
+
+void DebayerCpu::process4(const uint8_t *src, uint8_t *dst)
+{
+ const unsigned int yEnd = window_.y + window_.height;
+ /*
+ * This holds pointers to [0] 2-lines-up [1] 1-line-up [2] current-line
+ * [3] 1-line-down [4] 2-lines-down.
+ */
+ const uint8_t *linePointers[5];
+
+ /* Adjust src to top left corner of the window */
+ src += window_.y * inputConfig_.stride + window_.x * inputConfig_.bpp / 8;
+
+ /* [x] becomes [x - 1] after initial shiftLinePointers() call */
+ linePointers[1] = src - 2 * inputConfig_.stride;
+ linePointers[2] = src - inputConfig_.stride;
+ linePointers[3] = src;
+ linePointers[4] = src + inputConfig_.stride;
+
+ setupInputMemcpy(linePointers);
+
+ for (unsigned int y = window_.y; y < yEnd; y += 4) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(y, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine2(y, linePointers);
+ (this->*debayer2_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer3_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+}
+
+namespace {
+
+inline int64_t timeDiff(timespec &after, timespec &before)
+{
+ return (after.tv_sec - before.tv_sec) * 1000000000LL +
+ (int64_t)after.tv_nsec - (int64_t)before.tv_nsec;
+}
+
+} /* namespace */
+
+void DebayerCpu::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+{
+ timespec frameStartTime;
+
+ if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure) {
+ frameStartTime = {};
+ clock_gettime(CLOCK_MONOTONIC_RAW, &frameStartTime);
+ }
+
+ std::vector<DmaSyncer> dmaSyncers;
+ for (const FrameBuffer::Plane &plane : input->planes())
+ dmaSyncers.emplace_back(plane.fd, DmaSyncer::SyncType::Read);
+
+ for (const FrameBuffer::Plane &plane : output->planes())
+ dmaSyncers.emplace_back(plane.fd, DmaSyncer::SyncType::Write);
+
+ green_ = params.green;
+ red_ = swapRedBlueGains_ ? params.blue : params.red;
+ blue_ = swapRedBlueGains_ ? params.red : params.blue;
+
+ /* Copy metadata from the input buffer */
+ FrameMetadata &metadata = output->_d()->metadata();
+ metadata.status = input->metadata().status;
+ metadata.sequence = input->metadata().sequence;
+ metadata.timestamp = input->metadata().timestamp;
+
+ MappedFrameBuffer in(input, MappedFrameBuffer::MapFlag::Read);
+ MappedFrameBuffer out(output, MappedFrameBuffer::MapFlag::Write);
+ if (!in.isValid() || !out.isValid()) {
+ LOG(Debayer, Error) << "mmap-ing buffer(s) failed";
+ metadata.status = FrameMetadata::FrameError;
+ return;
+ }
+
+ stats_->startFrame();
+
+ if (inputConfig_.patternSize.height == 2)
+ process2(in.planes()[0].data(), out.planes()[0].data());
+ else
+ process4(in.planes()[0].data(), out.planes()[0].data());
+
+ metadata.planes()[0].bytesused = out.planes()[0].size();
+
+ dmaSyncers.clear();
+
+ /* Measure before emitting signals */
+ if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure &&
+ ++measuredFrames_ > DebayerCpu::kFramesToSkip) {
+ timespec frameEndTime = {};
+ clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime);
+ frameProcessTime_ += timeDiff(frameEndTime, frameStartTime);
+ if (measuredFrames_ == DebayerCpu::kLastFrameToMeasure) {
+ const unsigned int measuredFrames = DebayerCpu::kLastFrameToMeasure -
+ DebayerCpu::kFramesToSkip;
+ LOG(Debayer, Info)
+ << "Processed " << measuredFrames
+ << " frames in " << frameProcessTime_ / 1000 << "us, "
+ << frameProcessTime_ / (1000 * measuredFrames)
+ << " us/frame";
+ }
+ }
+
+ /*
+ * Buffer ids are currently not used, so pass zeros as its parameter.
+ *
+ * \todo Pass real bufferId once stats buffer passing is changed.
+ */
+ stats_->finishFrame(frame, 0);
+ outputBufferReady.emit(output);
+ inputBufferReady.emit(input);
+}
+
+SizeRange DebayerCpu::sizes(PixelFormat inputFormat, const Size &inputSize)
+{
+ Size patternSize = this->patternSize(inputFormat);
+ unsigned int borderHeight = patternSize.height;
+
+ if (patternSize.isNull())
+ return {};
+
+ /* No need for top/bottom border with a pattern height of 2 */
+ if (patternSize.height == 2)
+ borderHeight = 0;
+
+ /*
+ * For debayer interpolation a border is kept around the entire image
+ * and the minimum output size is pattern-height x pattern-width.
+ */
+ if (inputSize.width < (3 * patternSize.width) ||
+ inputSize.height < (2 * borderHeight + patternSize.height)) {
+ LOG(Debayer, Warning)
+ << "Input format size too small: " << inputSize.toString();
+ return {};
+ }
+
+ return SizeRange(Size(patternSize.width, patternSize.height),
+ Size((inputSize.width - 2 * patternSize.width) & ~(patternSize.width - 1),
+ (inputSize.height - 2 * borderHeight) & ~(patternSize.height - 1)),
+ patternSize.width, patternSize.height);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer_cpu.h b/src/libcamera/software_isp/debayer_cpu.h
new file mode 100644
index 00000000..2c47e7c6
--- /dev/null
+++ b/src/libcamera/software_isp/debayer_cpu.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based debayering header
+ */
+
+#pragma once
+
+#include <memory>
+#include <stdint.h>
+#include <vector>
+
+#include <libcamera/base/object.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+#include "debayer.h"
+#include "swstats_cpu.h"
+
+namespace libcamera {
+
+class DebayerCpu : public Debayer, public Object
+{
+public:
+ DebayerCpu(std::unique_ptr<SwStatsCpu> stats);
+ ~DebayerCpu();
+
+ int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs);
+ Size patternSize(PixelFormat inputFormat);
+ std::vector<PixelFormat> formats(PixelFormat input);
+ std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size);
+ void process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params);
+ SizeRange sizes(PixelFormat inputFormat, const Size &inputSize);
+
+ /**
+ * \brief Get the file descriptor for the statistics
+ *
+ * \return the file descriptor pointing to the statistics
+ */
+ const SharedFD &getStatsFD() { return stats_->getStatsFD(); }
+
+ /**
+ * \brief Get the output frame size
+ *
+ * \return The output frame size
+ */
+ unsigned int frameSize() { return outputConfig_.frameSize; }
+
+private:
+ /**
+ * \brief Called to debayer 1 line of Bayer input data to output format
+ * \param[out] dst Pointer to the start of the output line to write
+ * \param[in] src The input data
+ *
+ * Input data is an array of (patternSize_.height + 1) src
+ * pointers each pointing to a line in the Bayer source. The middle
+ * element of the array will point to the actual line being processed.
+ * Earlier element(s) will point to the previous line(s) and later
+ * element(s) to the next line(s).
+ *
+ * These functions take an array of src pointers, rather than
+ * a single src pointer + a stride for the source, so that when the src
+ * is slow uncached memory it can be copied to faster memory before
+ * debayering. Debayering a standard 2x2 Bayer pattern requires access
+ * to the previous and next src lines for interpolating the missing
+ * colors. To allow copying the src lines only once 3 temporary buffers
+ * each holding a single line are used, re-using the oldest buffer for
+ * the next line and the pointers are swizzled so that:
+ * src[0] = previous-line, src[1] = currrent-line, src[2] = next-line.
+ * This way the 3 pointers passed to the debayer functions form
+ * a sliding window over the src avoiding the need to copy each
+ * line more than once.
+ *
+ * Similarly for bayer patterns which repeat every 4 lines, 5 src
+ * pointers are passed holding: src[0] = 2-lines-up, src[1] = 1-line-up
+ * src[2] = current-line, src[3] = 1-line-down, src[4] = 2-lines-down.
+ */
+ using debayerFn = void (DebayerCpu::*)(uint8_t *dst, const uint8_t *src[]);
+
+ /* 8-bit raw bayer format */
+ template<bool addAlphaByte>
+ void debayer8_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer8_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* unpacked 10-bit raw bayer format */
+ template<bool addAlphaByte>
+ void debayer10_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer10_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* unpacked 12-bit raw bayer format */
+ template<bool addAlphaByte>
+ void debayer12_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer12_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* CSI-2 packed 10-bit raw bayer format (all the 4 orders) */
+ template<bool addAlphaByte>
+ void debayer10P_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer10P_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer10P_GBGB_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[]);
+
+ struct DebayerInputConfig {
+ Size patternSize;
+ unsigned int bpp; /* Memory used per pixel, not precision */
+ unsigned int stride;
+ std::vector<PixelFormat> outputFormats;
+ };
+
+ struct DebayerOutputConfig {
+ unsigned int bpp; /* Memory used per pixel, not precision */
+ unsigned int stride;
+ unsigned int frameSize;
+ };
+
+ int getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config);
+ int getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config);
+ int setupStandardBayerOrder(BayerFormat::Order order);
+ int setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat);
+ void setupInputMemcpy(const uint8_t *linePointers[]);
+ void shiftLinePointers(const uint8_t *linePointers[], const uint8_t *src);
+ void memcpyNextLine(const uint8_t *linePointers[]);
+ void process2(const uint8_t *src, uint8_t *dst);
+ void process4(const uint8_t *src, uint8_t *dst);
+
+ /* Max. supported Bayer pattern height is 4, debayering this requires 5 lines */
+ static constexpr unsigned int kMaxLineBuffers = 5;
+
+ DebayerParams::ColorLookupTable red_;
+ DebayerParams::ColorLookupTable green_;
+ DebayerParams::ColorLookupTable blue_;
+ debayerFn debayer0_;
+ debayerFn debayer1_;
+ debayerFn debayer2_;
+ debayerFn debayer3_;
+ Rectangle window_;
+ DebayerInputConfig inputConfig_;
+ DebayerOutputConfig outputConfig_;
+ std::unique_ptr<SwStatsCpu> stats_;
+ std::vector<uint8_t> lineBuffers_[kMaxLineBuffers];
+ unsigned int lineBufferLength_;
+ unsigned int lineBufferPadding_;
+ unsigned int lineBufferIndex_;
+ unsigned int xShift_; /* Offset of 0/1 applied to window_.x */
+ bool enableInputMemcpy_;
+ bool swapRedBlueGains_;
+ unsigned int measuredFrames_;
+ int64_t frameProcessTime_;
+ /* Skip 30 frames for things to stabilize then measure 30 frames */
+ static constexpr unsigned int kFramesToSkip = 30;
+ static constexpr unsigned int kLastFrameToMeasure = 60;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/meson.build b/src/libcamera/software_isp/meson.build
new file mode 100644
index 00000000..aac7eda7
--- /dev/null
+++ b/src/libcamera/software_isp/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: CC0-1.0
+
+softisp_enabled = pipelines.contains('simple')
+summary({'SoftISP support' : softisp_enabled}, section : 'Configuration')
+
+if not softisp_enabled
+ subdir_done()
+endif
+
+libcamera_internal_sources += files([
+ 'debayer.cpp',
+ 'debayer_cpu.cpp',
+ 'software_isp.cpp',
+ 'swstats_cpu.cpp',
+])
diff --git a/src/libcamera/software_isp/software_isp.cpp b/src/libcamera/software_isp/software_isp.cpp
new file mode 100644
index 00000000..a64e6b2c
--- /dev/null
+++ b/src/libcamera/software_isp/software_isp.cpp
@@ -0,0 +1,404 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * Simple software ISP implementation
+ */
+
+#include "libcamera/internal/software_isp/software_isp.h"
+
+#include <cmath>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/thread.h>
+
+#include <libcamera/controls.h>
+#include <libcamera/formats.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/software_isp/debayer_params.h"
+
+#include "debayer_cpu.h"
+
+/**
+ * \file software_isp.cpp
+ * \brief Simple software ISP implementation
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(SoftwareIsp)
+
+/**
+ * \class SoftwareIsp
+ * \brief Class for the Software ISP
+ */
+
+/**
+ * \var SoftwareIsp::inputBufferReady
+ * \brief A signal emitted when the input frame buffer completes
+ */
+
+/**
+ * \var SoftwareIsp::outputBufferReady
+ * \brief A signal emitted when the output frame buffer completes
+ */
+
+/**
+ * \var SoftwareIsp::ispStatsReady
+ * \brief A signal emitted when the statistics for IPA are ready
+ */
+
+/**
+ * \var SoftwareIsp::setSensorControls
+ * \brief A signal emitted when the values to write to the sensor controls are
+ * ready
+ */
+
+/**
+ * \brief Constructs SoftwareIsp object
+ * \param[in] pipe The pipeline handler in use
+ * \param[in] sensor Pointer to the CameraSensor instance owned by the pipeline
+ * \param[out] ipaControls The IPA controls to update
+ * handler
+ */
+SoftwareIsp::SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor,
+ ControlInfoMap *ipaControls)
+ : dmaHeap_(DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap |
+ DmaBufAllocator::DmaBufAllocatorFlag::SystemHeap |
+ DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf)
+{
+ /*
+ * debayerParams_ must be initialized because the initial value is used for
+ * the first two frames, i.e. until stats processing starts providing its
+ * own parameters.
+ *
+ * \todo This should be handled in the same place as the related
+ * operations, in the IPA module.
+ */
+ std::array<uint8_t, 256> gammaTable;
+ for (unsigned int i = 0; i < 256; i++)
+ gammaTable[i] = UINT8_MAX * std::pow(i / 256.0, 0.5);
+ for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++) {
+ debayerParams_.red[i] = gammaTable[i];
+ debayerParams_.green[i] = gammaTable[i];
+ debayerParams_.blue[i] = gammaTable[i];
+ }
+
+ if (!dmaHeap_.isValid()) {
+ LOG(SoftwareIsp, Error) << "Failed to create DmaBufAllocator object";
+ return;
+ }
+
+ sharedParams_ = SharedMemObject<DebayerParams>("softIsp_params");
+ if (!sharedParams_) {
+ LOG(SoftwareIsp, Error) << "Failed to create shared memory for parameters";
+ return;
+ }
+
+ auto stats = std::make_unique<SwStatsCpu>();
+ if (!stats->isValid()) {
+ LOG(SoftwareIsp, Error) << "Failed to create SwStatsCpu object";
+ return;
+ }
+ stats->statsReady.connect(this, &SoftwareIsp::statsReady);
+
+ debayer_ = std::make_unique<DebayerCpu>(std::move(stats));
+ debayer_->inputBufferReady.connect(this, &SoftwareIsp::inputReady);
+ debayer_->outputBufferReady.connect(this, &SoftwareIsp::outputReady);
+
+ ipa_ = IPAManager::createIPA<ipa::soft::IPAProxySoft>(pipe, 0, 0);
+ if (!ipa_) {
+ LOG(SoftwareIsp, Error)
+ << "Creating IPA for software ISP failed";
+ debayer_.reset();
+ return;
+ }
+
+ /*
+ * The API tuning file is made from the sensor name. If the tuning file
+ * isn't found, fall back to the 'uncalibrated' file.
+ */
+ std::string ipaTuningFile =
+ ipa_->configurationFile(sensor->model() + ".yaml", "uncalibrated.yaml");
+
+ int ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() },
+ debayer_->getStatsFD(),
+ sharedParams_.fd(),
+ sensor->controls(),
+ ipaControls);
+ if (ret) {
+ LOG(SoftwareIsp, Error) << "IPA init failed";
+ debayer_.reset();
+ return;
+ }
+
+ ipa_->setIspParams.connect(this, &SoftwareIsp::saveIspParams);
+ ipa_->setSensorControls.connect(this, &SoftwareIsp::setSensorCtrls);
+
+ debayer_->moveToThread(&ispWorkerThread_);
+}
+
+SoftwareIsp::~SoftwareIsp()
+{
+ /* make sure to destroy the DebayerCpu before the ispWorkerThread_ is gone */
+ debayer_.reset();
+}
+
+/**
+ * \fn int SoftwareIsp::loadConfiguration([[maybe_unused]] const std::string &filename)
+ * \brief Load a configuration from a file
+ * \param[in] filename The file to load the configuration data from
+ *
+ * Currently is a stub doing nothing and always returning "success".
+ *
+ * \return 0 on success
+ */
+
+/**
+ * \brief Process the statistics gathered
+ * \param[in] frame The frame number
+ * \param[in] bufferId ID of the statistics buffer
+ * \param[in] sensorControls The sensor controls
+ *
+ * Requests the IPA to calculate new parameters for ISP and new control
+ * values for the sensor.
+ */
+void SoftwareIsp::processStats(const uint32_t frame, const uint32_t bufferId,
+ const ControlList &sensorControls)
+{
+ ASSERT(ipa_);
+ ipa_->processStats(frame, bufferId, sensorControls);
+}
+
+/**
+ * \brief Check the validity of Software Isp object
+ * \return True if Software Isp is valid, false otherwise
+ */
+bool SoftwareIsp::isValid() const
+{
+ return !!debayer_;
+}
+
+/**
+ * \brief Get the output formats supported for the given input format
+ * \param[in] inputFormat The input format
+ * \return All the supported output formats or an empty vector if there are none
+ */
+std::vector<PixelFormat> SoftwareIsp::formats(PixelFormat inputFormat)
+{
+ ASSERT(debayer_);
+
+ return debayer_->formats(inputFormat);
+}
+
+/**
+ * \brief Get the supported output sizes for the given input format and size
+ * \param[in] inputFormat The input format
+ * \param[in] inputSize The input frame size
+ * \return The valid size range or an empty range if there are none
+ */
+SizeRange SoftwareIsp::sizes(PixelFormat inputFormat, const Size &inputSize)
+{
+ ASSERT(debayer_);
+
+ return debayer_->sizes(inputFormat, inputSize);
+}
+
+/**
+ * Get the output stride and the frame size in bytes for the given output format and size
+ * \param[in] outputFormat The output format
+ * \param[in] size The output size (width and height in pixels)
+ * \return A tuple of the stride and the frame size in bytes, or a tuple of 0,0
+ * if there is no valid output config
+ */
+std::tuple<unsigned int, unsigned int>
+SoftwareIsp::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+{
+ ASSERT(debayer_);
+
+ return debayer_->strideAndFrameSize(outputFormat, size);
+}
+
+/**
+ * \brief Configure the SoftwareIsp object according to the passed in parameters
+ * \param[in] inputCfg The input configuration
+ * \param[in] outputCfgs The output configurations
+ * \param[in] configInfo The IPA configuration data, received from the pipeline
+ * handler
+ * \return 0 on success, a negative errno on failure
+ */
+int SoftwareIsp::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs,
+ const ipa::soft::IPAConfigInfo &configInfo)
+{
+ ASSERT(ipa_ && debayer_);
+
+ int ret = ipa_->configure(configInfo);
+ if (ret < 0)
+ return ret;
+
+ return debayer_->configure(inputCfg, outputCfgs);
+}
+
+/**
+ * \brief Export the buffers from the Software ISP
+ * \param[in] stream Output stream exporting the buffers
+ * \param[in] count Number of buffers to allocate
+ * \param[out] buffers Vector to store the allocated buffers
+ * \return The number of allocated buffers on success or a negative error code
+ * otherwise
+ */
+int SoftwareIsp::exportBuffers(const Stream *stream, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ ASSERT(debayer_ != nullptr);
+
+ /* single output for now */
+ if (stream == nullptr)
+ return -EINVAL;
+
+ return dmaHeap_.exportBuffers(count, { debayer_->frameSize() }, buffers);
+}
+
+/**
+ * \brief Queue a request and process the control list from the application
+ * \param[in] frame The number of the frame which will be processed next
+ * \param[in] controls The controls for the \a frame
+ */
+void SoftwareIsp::queueRequest(const uint32_t frame, const ControlList &controls)
+{
+ ipa_->queueRequest(frame, controls);
+}
+
+/**
+ * \brief Queue buffers to Software ISP
+ * \param[in] frame The frame number
+ * \param[in] input The input framebuffer
+ * \param[in] outputs The container holding the output stream pointers and
+ * their respective frame buffer outputs
+ * \return 0 on success, a negative errno on failure
+ */
+int SoftwareIsp::queueBuffers(uint32_t frame, FrameBuffer *input,
+ const std::map<const Stream *, FrameBuffer *> &outputs)
+{
+ /*
+ * Validate the outputs as a sanity check: at least one output is
+ * required, all outputs must reference a valid stream.
+ */
+ if (outputs.empty())
+ return -EINVAL;
+
+ /* We only support a single stream for now. */
+ if (outputs.size() != 1)
+ return -EINVAL;
+
+ for (auto [stream, buffer] : outputs) {
+ if (!buffer)
+ return -EINVAL;
+ }
+
+ queuedInputBuffers_.push_back(input);
+
+ for (auto iter = outputs.begin(); iter != outputs.end(); iter++) {
+ FrameBuffer *const buffer = iter->second;
+ queuedOutputBuffers_.push_back(buffer);
+ process(frame, input, buffer);
+ }
+
+ return 0;
+}
+
+/**
+ * \brief Starts the Software ISP streaming operation
+ * \return 0 on success, any other value indicates an error
+ */
+int SoftwareIsp::start()
+{
+ int ret = ipa_->start();
+ if (ret)
+ return ret;
+
+ ispWorkerThread_.start();
+ return 0;
+}
+
+/**
+ * \brief Stops the Software ISP streaming operation
+ *
+ * All pending buffers are returned back as canceled before this function
+ * returns.
+ */
+void SoftwareIsp::stop()
+{
+ ispWorkerThread_.exit();
+ ispWorkerThread_.wait();
+
+ Thread::current()->dispatchMessages(Message::Type::InvokeMessage, this);
+
+ ipa_->stop();
+
+ for (auto buffer : queuedOutputBuffers_) {
+ FrameMetadata &metadata = buffer->_d()->metadata();
+ metadata.status = FrameMetadata::FrameCancelled;
+ outputBufferReady.emit(buffer);
+ }
+ queuedOutputBuffers_.clear();
+
+ for (auto buffer : queuedInputBuffers_) {
+ FrameMetadata &metadata = buffer->_d()->metadata();
+ metadata.status = FrameMetadata::FrameCancelled;
+ inputBufferReady.emit(buffer);
+ }
+ queuedInputBuffers_.clear();
+}
+
+/**
+ * \brief Passes the input framebuffer to the ISP worker to process
+ * \param[in] frame The frame number
+ * \param[in] input The input framebuffer
+ * \param[out] output The framebuffer to write the processed frame to
+ */
+void SoftwareIsp::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output)
+{
+ ipa_->computeParams(frame);
+ debayer_->invokeMethod(&DebayerCpu::process,
+ ConnectionTypeQueued, frame, input, output, debayerParams_);
+}
+
+void SoftwareIsp::saveIspParams()
+{
+ debayerParams_ = *sharedParams_;
+}
+
+void SoftwareIsp::setSensorCtrls(const ControlList &sensorControls)
+{
+ setSensorControls.emit(sensorControls);
+}
+
+void SoftwareIsp::statsReady(uint32_t frame, uint32_t bufferId)
+{
+ ispStatsReady.emit(frame, bufferId);
+}
+
+void SoftwareIsp::inputReady(FrameBuffer *input)
+{
+ ASSERT(queuedInputBuffers_.front() == input);
+ queuedInputBuffers_.pop_front();
+ inputBufferReady.emit(input);
+}
+
+void SoftwareIsp::outputReady(FrameBuffer *output)
+{
+ ASSERT(queuedOutputBuffers_.front() == output);
+ queuedOutputBuffers_.pop_front();
+ outputBufferReady.emit(output);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/swstats_cpu.cpp b/src/libcamera/software_isp/swstats_cpu.cpp
new file mode 100644
index 00000000..c520c806
--- /dev/null
+++ b/src/libcamera/software_isp/swstats_cpu.cpp
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based software statistics implementation
+ */
+
+#include "swstats_cpu.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+namespace libcamera {
+
+/**
+ * \class SwStatsCpu
+ * \brief Class for gathering statistics on the CPU
+ *
+ * CPU based software ISP statistics implementation.
+ *
+ * This class offers a configure function + functions to gather statistics on a
+ * line by line basis. This allows CPU based software debayering to interleave
+ * debayering and statistics gathering on a line by line basis while the input
+ * data is still hot in the cache.
+ *
+ * It is also possible to specify a window over which to gather statistics
+ * instead of processing the whole frame.
+ */
+
+/**
+ * \fn bool SwStatsCpu::isValid() const
+ * \brief Gets whether the statistics object is valid
+ *
+ * \return True if it's valid, false otherwise
+ */
+
+/**
+ * \fn const SharedFD &SwStatsCpu::getStatsFD()
+ * \brief Get the file descriptor for the statistics
+ *
+ * \return The file descriptor
+ */
+
+/**
+ * \fn const Size &SwStatsCpu::patternSize()
+ * \brief Get the pattern size
+ *
+ * For some input-formats, e.g. Bayer data, processing is done multiple lines
+ * and/or columns at a time. Get width and height at which the (bayer) pattern
+ * repeats. Window values are rounded down to a multiple of this and the height
+ * also indicates if processLine2() should be called or not.
+ * This may only be called after a successful configure() call.
+ *
+ * \return The pattern size
+ */
+
+/**
+ * \fn void SwStatsCpu::processLine0(unsigned int y, const uint8_t *src[])
+ * \brief Process line 0
+ * \param[in] y The y coordinate.
+ * \param[in] src The input data.
+ *
+ * This function processes line 0 for input formats with
+ * patternSize height == 1.
+ * It'll process line 0 and 1 for input formats with patternSize height >= 2.
+ * This function may only be called after a successful setWindow() call.
+ */
+
+/**
+ * \fn void SwStatsCpu::processLine2(unsigned int y, const uint8_t *src[])
+ * \brief Process line 2 and 3
+ * \param[in] y The y coordinate.
+ * \param[in] src The input data.
+ *
+ * This function processes line 2 and 3 for input formats with
+ * patternSize height == 4.
+ * This function may only be called after a successful setWindow() call.
+ */
+
+/**
+ * \var Signal<> SwStatsCpu::statsReady
+ * \brief Signals that the statistics are ready
+ */
+
+/**
+ * \typedef SwStatsCpu::statsProcessFn
+ * \brief Called when there is data to get statistics from
+ * \param[in] src The input data
+ *
+ * These functions take an array of (patternSize_.height + 1) src
+ * pointers each pointing to a line in the source image. The middle
+ * element of the array will point to the actual line being processed.
+ * Earlier element(s) will point to the previous line(s) and later
+ * element(s) to the next line(s).
+ *
+ * See the documentation of DebayerCpu::debayerFn for more details.
+ */
+
+/**
+ * \var unsigned int SwStatsCpu::ySkipMask_
+ * \brief Skip lines where this bitmask is set in y
+ */
+
+/**
+ * \var Rectangle SwStatsCpu::window_
+ * \brief Statistics window, set by setWindow(), used every line
+ */
+
+/**
+ * \var Size SwStatsCpu::patternSize_
+ * \brief The size of the bayer pattern
+ *
+ * Valid sizes are: 2x2, 4x2 or 4x4.
+ */
+
+/**
+ * \var unsigned int SwStatsCpu::xShift_
+ * \brief The offset of x, applied to window_.x for bayer variants
+ *
+ * This can either be 0 or 1.
+ */
+
+LOG_DEFINE_CATEGORY(SwStatsCpu)
+
+SwStatsCpu::SwStatsCpu()
+ : sharedStats_("softIsp_stats")
+{
+ if (!sharedStats_)
+ LOG(SwStatsCpu, Error)
+ << "Failed to create shared memory for statistics";
+}
+
+static constexpr unsigned int kRedYMul = 77; /* 0.299 * 256 */
+static constexpr unsigned int kGreenYMul = 150; /* 0.587 * 256 */
+static constexpr unsigned int kBlueYMul = 29; /* 0.114 * 256 */
+
+#define SWSTATS_START_LINE_STATS(pixel_t) \
+ pixel_t r, g, g2, b; \
+ uint64_t yVal; \
+ \
+ uint64_t sumR = 0; \
+ uint64_t sumG = 0; \
+ uint64_t sumB = 0;
+
+#define SWSTATS_ACCUMULATE_LINE_STATS(div) \
+ sumR += r; \
+ sumG += g; \
+ sumB += b; \
+ \
+ yVal = r * kRedYMul; \
+ yVal += g * kGreenYMul; \
+ yVal += b * kBlueYMul; \
+ stats_.yHistogram[yVal * SwIspStats::kYHistogramSize / (256 * 256 * (div))]++;
+
+#define SWSTATS_FINISH_LINE_STATS() \
+ stats_.sumR_ += sumR; \
+ stats_.sumG_ += sumG; \
+ stats_.sumB_ += sumB;
+
+void SwStatsCpu::statsBGGR8Line0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x;
+ const uint8_t *src1 = src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR10Line0(const uint8_t *src[])
+{
+ const uint16_t *src0 = (const uint16_t *)src[1] + window_.x;
+ const uint16_t *src1 = (const uint16_t *)src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint16_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ /* divide Y by 4 for 10 -> 8 bpp value */
+ SWSTATS_ACCUMULATE_LINE_STATS(4)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR12Line0(const uint8_t *src[])
+{
+ const uint16_t *src0 = (const uint16_t *)src[1] + window_.x;
+ const uint16_t *src1 = (const uint16_t *)src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint16_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ /* divide Y by 16 for 12 -> 8 bpp value */
+ SWSTATS_ACCUMULATE_LINE_STATS(16)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR10PLine0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x * 5 / 4;
+ const uint8_t *src1 = src[2] + window_.x * 5 / 4;
+ const int widthInBytes = window_.width * 5 / 4;
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ /* x += 5 sample every other 2x2 block */
+ for (int x = 0; x < widthInBytes; x += 5) {
+ /* BGGR */
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+ g = (g + g2) / 2;
+ /* Data is already 8 bits, divide by 1 */
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsGBRG10PLine0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x * 5 / 4;
+ const uint8_t *src1 = src[2] + window_.x * 5 / 4;
+ const int widthInBytes = window_.width * 5 / 4;
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ /* x += 5 sample every other 2x2 block */
+ for (int x = 0; x < widthInBytes; x += 5) {
+ /* GBRG */
+ g = src0[x];
+ b = src0[x + 1];
+ r = src1[x];
+ g2 = src1[x + 1];
+ g = (g + g2) / 2;
+ /* Data is already 8 bits, divide by 1 */
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+/**
+ * \brief Reset state to start statistics gathering for a new frame
+ *
+ * This may only be called after a successful setWindow() call.
+ */
+void SwStatsCpu::startFrame(void)
+{
+ if (window_.width == 0)
+ LOG(SwStatsCpu, Error) << "Calling startFrame() without setWindow()";
+
+ stats_.sumR_ = 0;
+ stats_.sumB_ = 0;
+ stats_.sumG_ = 0;
+ stats_.yHistogram.fill(0);
+}
+
+/**
+ * \brief Finish statistics calculation for the current frame
+ * \param[in] frame The frame number
+ * \param[in] bufferId ID of the statistics buffer
+ *
+ * This may only be called after a successful setWindow() call.
+ */
+void SwStatsCpu::finishFrame(uint32_t frame, uint32_t bufferId)
+{
+ *sharedStats_ = stats_;
+ statsReady.emit(frame, bufferId);
+}
+
+/**
+ * \brief Setup SwStatsCpu object for standard Bayer orders
+ * \param[in] order The Bayer order
+ *
+ * Check if order is a standard Bayer order and setup xShift_ and swapLines_
+ * so that a single BGGR stats function can be used for all 4 standard orders.
+ */
+int SwStatsCpu::setupStandardBayerOrder(BayerFormat::Order order)
+{
+ switch (order) {
+ case BayerFormat::BGGR:
+ xShift_ = 0;
+ swapLines_ = false;
+ break;
+ case BayerFormat::GBRG:
+ xShift_ = 1; /* BGGR -> GBRG */
+ swapLines_ = false;
+ break;
+ case BayerFormat::GRBG:
+ xShift_ = 0;
+ swapLines_ = true; /* BGGR -> GRBG */
+ break;
+ case BayerFormat::RGGB:
+ xShift_ = 1; /* BGGR -> GBRG */
+ swapLines_ = true; /* GBRG -> RGGB */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ patternSize_.height = 2;
+ patternSize_.width = 2;
+ ySkipMask_ = 0x02; /* Skip every 3th and 4th line */
+ return 0;
+}
+
+/**
+ * \brief Configure the statistics object for the passed in input format
+ * \param[in] inputCfg The input format
+ *
+ * \return 0 on success, a negative errno value on failure
+ */
+int SwStatsCpu::configure(const StreamConfiguration &inputCfg)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputCfg.pixelFormat);
+
+ if (bayerFormat.packing == BayerFormat::Packing::None &&
+ setupStandardBayerOrder(bayerFormat.order) == 0) {
+ switch (bayerFormat.bitDepth) {
+ case 8:
+ stats0_ = &SwStatsCpu::statsBGGR8Line0;
+ return 0;
+ case 10:
+ stats0_ = &SwStatsCpu::statsBGGR10Line0;
+ return 0;
+ case 12:
+ stats0_ = &SwStatsCpu::statsBGGR12Line0;
+ return 0;
+ }
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2) {
+ patternSize_.height = 2;
+ patternSize_.width = 4; /* 5 bytes per *4* pixels */
+ /* Skip every 3th and 4th line, sample every other 2x2 block */
+ ySkipMask_ = 0x02;
+ xShift_ = 0;
+
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ case BayerFormat::GRBG:
+ stats0_ = &SwStatsCpu::statsBGGR10PLine0;
+ swapLines_ = bayerFormat.order == BayerFormat::GRBG;
+ return 0;
+ case BayerFormat::GBRG:
+ case BayerFormat::RGGB:
+ stats0_ = &SwStatsCpu::statsGBRG10PLine0;
+ swapLines_ = bayerFormat.order == BayerFormat::RGGB;
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ LOG(SwStatsCpu, Info)
+ << "Unsupported input format " << inputCfg.pixelFormat.toString();
+ return -EINVAL;
+}
+
+/**
+ * \brief Specify window coordinates over which to gather statistics
+ * \param[in] window The window object.
+ */
+void SwStatsCpu::setWindow(const Rectangle &window)
+{
+ window_ = window;
+
+ window_.x &= ~(patternSize_.width - 1);
+ window_.x += xShift_;
+ window_.y &= ~(patternSize_.height - 1);
+
+ /* width_ - xShift_ to make sure the window fits */
+ window_.width -= xShift_;
+ window_.width &= ~(patternSize_.width - 1);
+ window_.height &= ~(patternSize_.height - 1);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/swstats_cpu.h b/src/libcamera/software_isp/swstats_cpu.h
new file mode 100644
index 00000000..26a2f462
--- /dev/null
+++ b/src/libcamera/software_isp/swstats_cpu.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based software statistics implementation
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include <libcamera/base/signal.h>
+
+#include <libcamera/geometry.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/shared_mem_object.h"
+#include "libcamera/internal/software_isp/swisp_stats.h"
+
+namespace libcamera {
+
+class PixelFormat;
+struct StreamConfiguration;
+
+class SwStatsCpu
+{
+public:
+ SwStatsCpu();
+ ~SwStatsCpu() = default;
+
+ bool isValid() const { return sharedStats_.fd().isValid(); }
+
+ const SharedFD &getStatsFD() { return sharedStats_.fd(); }
+
+ const Size &patternSize() { return patternSize_; }
+
+ int configure(const StreamConfiguration &inputCfg);
+ void setWindow(const Rectangle &window);
+ void startFrame();
+ void finishFrame(uint32_t frame, uint32_t bufferId);
+
+ void processLine0(unsigned int y, const uint8_t *src[])
+ {
+ if ((y & ySkipMask_) || y < static_cast<unsigned int>(window_.y) ||
+ y >= (window_.y + window_.height))
+ return;
+
+ (this->*stats0_)(src);
+ }
+
+ void processLine2(unsigned int y, const uint8_t *src[])
+ {
+ if ((y & ySkipMask_) || y < static_cast<unsigned int>(window_.y) ||
+ y >= (window_.y + window_.height))
+ return;
+
+ (this->*stats2_)(src);
+ }
+
+ Signal<uint32_t, uint32_t> statsReady;
+
+private:
+ using statsProcessFn = void (SwStatsCpu::*)(const uint8_t *src[]);
+
+ int setupStandardBayerOrder(BayerFormat::Order order);
+ /* Bayer 8 bpp unpacked */
+ void statsBGGR8Line0(const uint8_t *src[]);
+ /* Bayer 10 bpp unpacked */
+ void statsBGGR10Line0(const uint8_t *src[]);
+ /* Bayer 12 bpp unpacked */
+ void statsBGGR12Line0(const uint8_t *src[]);
+ /* Bayer 10 bpp packed */
+ void statsBGGR10PLine0(const uint8_t *src[]);
+ void statsGBRG10PLine0(const uint8_t *src[]);
+
+ /* Variables set by configure(), used every line */
+ statsProcessFn stats0_;
+ statsProcessFn stats2_;
+ bool swapLines_;
+
+ unsigned int ySkipMask_;
+
+ Rectangle window_;
+
+ Size patternSize_;
+
+ unsigned int xShift_;
+
+ SharedMemObject<SwIspStats> sharedStats_;
+ SwIspStats stats_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/source_paths.cpp b/src/libcamera/source_paths.cpp
index 19689585..1af5386a 100644
--- a/src/libcamera/source_paths.cpp
+++ b/src/libcamera/source_paths.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * source_paths.cpp - Identify libcamera source and build paths
+ * Identify libcamera source and build paths
*/
#include "libcamera/internal/source_paths.h"
diff --git a/src/libcamera/stream.cpp b/src/libcamera/stream.cpp
index 540a428e..978d7275 100644
--- a/src/libcamera/stream.cpp
+++ b/src/libcamera/stream.cpp
@@ -2,22 +2,22 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * stream.cpp - Video stream for a Camera
+ * Video stream for a Camera
*/
#include <libcamera/stream.h>
#include <algorithm>
#include <array>
-#include <iomanip>
#include <limits.h>
-#include <sstream>
-
-#include <libcamera/request.h>
+#include <ostream>
+#include <string>
+#include <vector>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
+#include <libcamera/request.h>
/**
* \file stream.h
@@ -392,7 +392,23 @@ StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
*/
std::string StreamConfiguration::toString() const
{
- return size.toString() + "-" + pixelFormat.toString();
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \brief Insert a text representation of a StreamConfiguration into an output
+ * stream
+ * \param[in] out The output stream
+ * \param[in] cfg The StreamConfiguration
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const StreamConfiguration &cfg)
+{
+ out << cfg.size << "-" << cfg.pixelFormat;
+ return out;
}
/**
diff --git a/src/libcamera/sysfs.cpp b/src/libcamera/sysfs.cpp
index 44c3331b..3d9885b0 100644
--- a/src/libcamera/sysfs.cpp
+++ b/src/libcamera/sysfs.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * sysfs.cpp - Miscellaneous utility functions to access sysfs
+ * Miscellaneous utility functions to access sysfs
*/
#include "libcamera/internal/sysfs.h"
diff --git a/src/libcamera/tracepoints.cpp b/src/libcamera/tracepoints.cpp
index 0173b75a..90662d12 100644
--- a/src/libcamera/tracepoints.cpp
+++ b/src/libcamera/tracepoints.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * tracepoints.cpp - Tracepoints with lttng
+ * Tracepoints with lttng
*/
#define TRACEPOINT_CREATE_PROBES
#define TRACEPOINT_DEFINE
diff --git a/src/libcamera/transform.cpp b/src/libcamera/transform.cpp
index fb2d55ac..9fe8b562 100644
--- a/src/libcamera/transform.cpp
+++ b/src/libcamera/transform.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * transform.cpp - 2D plane transforms.
+ * 2D plane transforms.
*/
#include <libcamera/transform.h>
diff --git a/src/libcamera/v4l2_device.cpp b/src/libcamera/v4l2_device.cpp
index 24d208ef..2f65a43a 100644
--- a/src/libcamera/v4l2_device.cpp
+++ b/src/libcamera/v4l2_device.cpp
@@ -2,15 +2,14 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_device.cpp - Common base for V4L2 video devices and subdevices
+ * Common base for V4L2 video devices and subdevices
*/
#include "libcamera/internal/v4l2_device.h"
#include <fcntl.h>
-#include <iomanip>
-#include <limits.h>
#include <map>
+#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
@@ -206,10 +205,29 @@ ControlList V4L2Device::getControls(const std::vector<uint32_t> &ids)
if (info.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD) {
ControlType type;
+ ControlValue &value = ctrl.second;
+ Span<uint8_t> data;
switch (info.type) {
case V4L2_CTRL_TYPE_U8:
type = ControlTypeByte;
+ value.reserve(type, true, info.elems);
+ data = value.data();
+ v4l2Ctrl.p_u8 = data.data();
+ break;
+
+ case V4L2_CTRL_TYPE_U16:
+ type = ControlTypeUnsigned16;
+ value.reserve(type, true, info.elems);
+ data = value.data();
+ v4l2Ctrl.p_u16 = reinterpret_cast<uint16_t *>(data.data());
+ break;
+
+ case V4L2_CTRL_TYPE_U32:
+ type = ControlTypeUnsigned32;
+ value.reserve(type, true, info.elems);
+ data = value.data();
+ v4l2Ctrl.p_u32 = reinterpret_cast<uint32_t *>(data.data());
break;
default:
@@ -219,11 +237,6 @@ ControlList V4L2Device::getControls(const std::vector<uint32_t> &ids)
return {};
}
- ControlValue &value = ctrl.second;
- value.reserve(type, true, info.elems);
- Span<uint8_t> data = value.data();
-
- v4l2Ctrl.p_u8 = data.data();
v4l2Ctrl.size = data.size();
}
}
@@ -301,6 +314,30 @@ int V4L2Device::setControls(ControlList *ctrls)
/* Set the v4l2_ext_control value for the write operation. */
ControlValue &value = ctrl->second;
switch (iter->first->type()) {
+ case ControlTypeUnsigned16: {
+ if (value.isArray()) {
+ Span<uint8_t> data = value.data();
+ v4l2Ctrl.p_u16 = reinterpret_cast<uint16_t *>(data.data());
+ v4l2Ctrl.size = data.size();
+ } else {
+ v4l2Ctrl.value = value.get<uint16_t>();
+ }
+
+ break;
+ }
+
+ case ControlTypeUnsigned32: {
+ if (value.isArray()) {
+ Span<uint8_t> data = value.data();
+ v4l2Ctrl.p_u32 = reinterpret_cast<uint32_t *>(data.data());
+ v4l2Ctrl.size = data.size();
+ } else {
+ v4l2Ctrl.value = value.get<uint32_t>();
+ }
+
+ break;
+ }
+
case ControlTypeInteger32: {
if (value.isArray()) {
Span<uint8_t> data = value.data();
@@ -490,6 +527,12 @@ ControlType V4L2Device::v4l2CtrlType(uint32_t ctrlType)
case V4L2_CTRL_TYPE_BOOLEAN:
return ControlTypeBool;
+ case V4L2_CTRL_TYPE_U16:
+ return ControlTypeUnsigned16;
+
+ case V4L2_CTRL_TYPE_U32:
+ return ControlTypeUnsigned32;
+
case V4L2_CTRL_TYPE_INTEGER:
return ControlTypeInteger32;
@@ -522,7 +565,15 @@ std::unique_ptr<ControlId> V4L2Device::v4l2ControlId(const v4l2_query_ext_ctrl &
const std::string name(static_cast<const char *>(ctrl.name), len);
const ControlType type = v4l2CtrlType(ctrl.type);
- return std::make_unique<ControlId>(ctrl.id, name, type);
+ ControlId::DirectionFlags flags;
+ if (ctrl.flags & V4L2_CTRL_FLAG_READ_ONLY)
+ flags = ControlId::Direction::Out;
+ else if (ctrl.flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ flags = ControlId::Direction::In;
+ else
+ flags = ControlId::Direction::In | ControlId::Direction::Out;
+
+ return std::make_unique<ControlId>(ctrl.id, name, "v4l2", type, flags);
}
/**
@@ -538,6 +589,16 @@ std::optional<ControlInfo> V4L2Device::v4l2ControlInfo(const v4l2_query_ext_ctrl
static_cast<uint8_t>(ctrl.maximum),
static_cast<uint8_t>(ctrl.default_value));
+ case V4L2_CTRL_TYPE_U16:
+ return ControlInfo(static_cast<uint16_t>(ctrl.minimum),
+ static_cast<uint16_t>(ctrl.maximum),
+ static_cast<uint16_t>(ctrl.default_value));
+
+ case V4L2_CTRL_TYPE_U32:
+ return ControlInfo(static_cast<uint32_t>(ctrl.minimum),
+ static_cast<uint32_t>(ctrl.maximum),
+ static_cast<uint32_t>(ctrl.default_value));
+
case V4L2_CTRL_TYPE_BOOLEAN:
return ControlInfo(static_cast<bool>(ctrl.minimum),
static_cast<bool>(ctrl.maximum),
@@ -624,6 +685,8 @@ void V4L2Device::listControls()
case V4L2_CTRL_TYPE_BITMASK:
case V4L2_CTRL_TYPE_INTEGER_MENU:
case V4L2_CTRL_TYPE_U8:
+ case V4L2_CTRL_TYPE_U16:
+ case V4L2_CTRL_TYPE_U32:
break;
/* \todo Support other control types. */
default:
diff --git a/src/libcamera/v4l2_pixelformat.cpp b/src/libcamera/v4l2_pixelformat.cpp
index 731dc10f..e8b3eb9c 100644
--- a/src/libcamera/v4l2_pixelformat.cpp
+++ b/src/libcamera/v4l2_pixelformat.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2019, Google Inc.
* Copyright (C) 2020, Raspberry Pi Ltd
*
- * v4l2_pixelformat.cpp - V4L2 Pixel Format
+ * V4L2 Pixel Format
*/
#include "libcamera/internal/v4l2_pixelformat.h"
@@ -71,6 +71,10 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::BGRA8888, "32-bit ARGB 8-8-8-8" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_BGRA32),
{ formats::RGBA8888, "32-bit ABGR 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_RGB48),
+ { formats::BGR161616, "48-bit RGB 16-16-16" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_BGR48),
+ { formats::RGB161616, "48-bit BGR 16-16-16" } },
/* YUV packed formats. */
{ V4L2PixelFormat(V4L2_PIX_FMT_YUYV),
@@ -135,6 +139,8 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::R10_CSI2P, "10-bit Greyscale Packed" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_Y12),
{ formats::R12, "12-bit Greyscale" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y12P),
+ { formats::R12_CSI2P, "12-bit Greyscale Packed" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_Y16),
{ formats::R16, "16-bit Greyscale" } },
@@ -203,6 +209,16 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
{ formats::SGRBG16, "16-bit Bayer GRGR/BGBG" } },
{ V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16),
{ formats::SRGGB16, "16-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR),
+ { formats::BGGR_PISP_COMP1, "16-bit Bayer BGBG/GRGR PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG),
+ { formats::GBRG_PISP_COMP1, "16-bit Bayer GBGB/RGRG PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG),
+ { formats::GRBG_PISP_COMP1, "16-bit Bayer GRGR/BGBG PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB),
+ { formats::RGGB_PISP_COMP1, "16-bit Bayer RGRG/GBGB PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO),
+ { formats::MONO_PISP_COMP1, "16-bit Mono PiSP Compress Mode 1" } },
/* Compressed formats. */
{ V4L2PixelFormat(V4L2_PIX_FMT_MJPEG),
@@ -357,6 +373,40 @@ V4L2PixelFormat::fromPixelFormat(const PixelFormat &pixelFormat)
}
/**
+ * \brief Test if a V4L2PixelFormat is one of the line based generic metadata
+ * formats
+ *
+ * A limited number of metadata formats, the ones that represents generic
+ * line-based metadata buffers, need to have their width, height and
+ * bytesperline set by userspace.
+ *
+ * This function tests if the current V4L2PixelFormat is one of those.
+ *
+ * Note: It would have been nicer to store this information in a
+ * V4L2PixelFormat::Info instance, but as metadata format are not exposed to
+ * applications, there are no PixelFormat and DRM fourcc codes associated to
+ * them.
+ *
+ * \return True if the V4L2PixelFormat() is a generic line based format, false
+ * otherwise
+ */
+bool V4L2PixelFormat::isGenericLineBasedMetadata() const
+{
+ switch (fourcc_) {
+ case V4L2_META_FMT_GENERIC_8:
+ case V4L2_META_FMT_GENERIC_CSI2_10:
+ case V4L2_META_FMT_GENERIC_CSI2_12:
+ case V4L2_META_FMT_GENERIC_CSI2_14:
+ case V4L2_META_FMT_GENERIC_CSI2_16:
+ case V4L2_META_FMT_GENERIC_CSI2_20:
+ case V4L2_META_FMT_GENERIC_CSI2_24:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* \brief Insert a text representation of a V4L2PixelFormat into an output
* stream
* \param[in] out The output stream
diff --git a/src/libcamera/v4l2_subdevice.cpp b/src/libcamera/v4l2_subdevice.cpp
index 1076b700..33279654 100644
--- a/src/libcamera/v4l2_subdevice.cpp
+++ b/src/libcamera/v4l2_subdevice.cpp
@@ -2,27 +2,32 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_subdevice.cpp - V4L2 Subdevice
+ * V4L2 Subdevice
*/
#include "libcamera/internal/v4l2_subdevice.h"
#include <fcntl.h>
-#include <iomanip>
-#include <regex>
#include <sstream>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
+#pragma GCC diagnostic push
+#if defined __SANITIZE_ADDRESS__ && defined __OPTIMIZE__
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+#include <regex>
+#pragma GCC diagnostic pop
+
#include <linux/media-bus-format.h>
#include <linux/v4l2-subdev.h>
-#include <libcamera/geometry.h>
-
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
+#include <libcamera/geometry.h>
+
#include "libcamera/internal/formats.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/media_object.h"
@@ -189,6 +194,20 @@ const std::map<uint32_t, MediaBusFormatInfo> mediaBusFormatInfo{
.bitsPerPixel = 24,
.colourEncoding = PixelFormatInfo::ColourEncodingRGB,
} },
+ { MEDIA_BUS_FMT_RGB121212_1X36, {
+ .name = "RGB121212_1X36",
+ .code = MEDIA_BUS_FMT_RGB121212_1X36,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 36,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB202020_1X60, {
+ .name = "RGB202020_1X60",
+ .code = MEDIA_BUS_FMT_RGB202020_1X60,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 60,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
{ MEDIA_BUS_FMT_ARGB8888_1X32, {
.name = "ARGB8888_1X32",
.code = MEDIA_BUS_FMT_ARGB8888_1X32,
@@ -651,6 +670,62 @@ const std::map<uint32_t, MediaBusFormatInfo> mediaBusFormatInfo{
.bitsPerPixel = 14,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
} },
+ { MEDIA_BUS_FMT_SBGGR16_1X16, {
+ .name = "SBGGR16_1X16",
+ .code = MEDIA_BUS_FMT_SBGGR16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGBRG16_1X16, {
+ .name = "SGBRG16_1X16",
+ .code = MEDIA_BUS_FMT_SGBRG16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGRBG16_1X16, {
+ .name = "SGRBG16_1X16",
+ .code = MEDIA_BUS_FMT_SGRBG16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SRGGB16_1X16, {
+ .name = "SRGGB16_1X16",
+ .code = MEDIA_BUS_FMT_SRGGB16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SBGGR20_1X20, {
+ .name = "SBGGR20_1X20",
+ .code = MEDIA_BUS_FMT_SBGGR20_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGBRG20_1X20, {
+ .name = "SGBRG20_1X20",
+ .code = MEDIA_BUS_FMT_SGBRG20_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGRBG20_1X20, {
+ .name = "SGRBG20_1X20",
+ .code = MEDIA_BUS_FMT_SGRBG20_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SRGGB20_1X20, {
+ .name = "SRGGB20_1X20",
+ .code = MEDIA_BUS_FMT_SRGGB20_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
/* \todo Clarify colour encoding for HSV formats */
{ MEDIA_BUS_FMT_AHSV8888_1X32, {
.name = "AHSV8888_1X32",
@@ -673,6 +748,69 @@ const std::map<uint32_t, MediaBusFormatInfo> mediaBusFormatInfo{
.bitsPerPixel = 0,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
} },
+ { MEDIA_BUS_FMT_META_8, {
+ .name = "META_8",
+ .code = MEDIA_BUS_FMT_META_8,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_10, {
+ .name = "META_10",
+ .code = MEDIA_BUS_FMT_META_10,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_12, {
+ .name = "META_12",
+ .code = MEDIA_BUS_FMT_META_12,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_14, {
+ .name = "META_14",
+ .code = MEDIA_BUS_FMT_META_14,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_16, {
+ .name = "META_16",
+ .code = MEDIA_BUS_FMT_META_16,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_20, {
+ .name = "META_20",
+ .code = MEDIA_BUS_FMT_META_20,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_24, {
+ .name = "META_24",
+ .code = MEDIA_BUS_FMT_META_24,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_CCS_EMBEDDED, {
+ .name = "CCS_EMBEDDED",
+ .code = MEDIA_BUS_FMT_CCS_EMBEDDED,
+ .type = MediaBusFormatInfo::Type::EmbeddedData,
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_OV2740_EMBEDDED, {
+ .name = "OV2740_EMBEDDED",
+ .code = MEDIA_BUS_FMT_CCS_EMBEDDED,
+ .type = MediaBusFormatInfo::Type::EmbeddedData,
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
};
} /* namespace */
@@ -1338,8 +1476,62 @@ void routeToKernel(const V4L2Subdevice::Route &route,
kroute.flags = route.flags;
}
+/*
+ * Legacy routing support for pre-v6.10-rc1 kernels. Drop when v6.12-rc1 gets
+ * released.
+ */
+struct v4l2_subdev_routing_legacy {
+ __u32 which;
+ __u32 num_routes;
+ __u64 routes;
+ __u32 reserved[6];
+};
+
+#define VIDIOC_SUBDEV_G_ROUTING_LEGACY _IOWR('V', 38, struct v4l2_subdev_routing_legacy)
+#define VIDIOC_SUBDEV_S_ROUTING_LEGACY _IOWR('V', 39, struct v4l2_subdev_routing_legacy)
+
} /* namespace */
+int V4L2Subdevice::getRoutingLegacy(Routing *routing, Whence whence)
+{
+ struct v4l2_subdev_routing_legacy rt = {};
+
+ rt.which = whence;
+
+ int ret = ioctl(VIDIOC_SUBDEV_G_ROUTING_LEGACY, &rt);
+ if (ret == 0 || ret == -ENOTTY)
+ return ret;
+
+ if (ret != -ENOSPC) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve number of routes: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ std::vector<struct v4l2_subdev_route> routes{ rt.num_routes };
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ ret = ioctl(VIDIOC_SUBDEV_G_ROUTING_LEGACY, &rt);
+ if (ret) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve routes: " << strerror(-ret);
+ return ret;
+ }
+
+ if (rt.num_routes != routes.size()) {
+ LOG(V4L2, Error) << "Invalid number of routes";
+ return -EINVAL;
+ }
+
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
+
+ return 0;
+}
+
/**
* \brief Retrieve the subdevice's internal routing table
* \param[out] routing The routing table
@@ -1360,19 +1552,25 @@ int V4L2Subdevice::getRouting(Routing *routing, Whence whence)
rt.which = whence;
int ret = ioctl(VIDIOC_SUBDEV_G_ROUTING, &rt);
- if (ret == 0 || ret == -ENOTTY)
- return ret;
+ if (ret == -ENOTTY)
+ return V4L2Subdevice::getRoutingLegacy(routing, whence);
- if (ret != -ENOSPC) {
+ if (ret) {
LOG(V4L2, Error)
<< "Failed to retrieve number of routes: "
<< strerror(-ret);
return ret;
}
+ if (!rt.num_routes)
+ return 0;
+
std::vector<struct v4l2_subdev_route> routes{ rt.num_routes };
rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+ rt.len_routes = rt.num_routes;
+ rt.num_routes = 0;
+
ret = ioctl(VIDIOC_SUBDEV_G_ROUTING, &rt);
if (ret) {
LOG(V4L2, Error)
@@ -1393,6 +1591,33 @@ int V4L2Subdevice::getRouting(Routing *routing, Whence whence)
return 0;
}
+int V4L2Subdevice::setRoutingLegacy(Routing *routing, Whence whence)
+{
+ std::vector<struct v4l2_subdev_route> routes{ routing->size() };
+
+ for (const auto &[i, route] : utils::enumerate(*routing))
+ routeToKernel(route, routes[i]);
+
+ struct v4l2_subdev_routing_legacy rt = {};
+ rt.which = whence;
+ rt.num_routes = routes.size();
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ int ret = ioctl(VIDIOC_SUBDEV_S_ROUTING_LEGACY, &rt);
+ if (ret) {
+ LOG(V4L2, Error) << "Failed to set routes: " << strerror(-ret);
+ return ret;
+ }
+
+ routes.resize(rt.num_routes);
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
+
+ return 0;
+}
+
/**
* \brief Set a routing table on the V4L2 subdevice
* \param[inout] routing The routing table
@@ -1419,16 +1644,43 @@ int V4L2Subdevice::setRouting(Routing *routing, Whence whence)
struct v4l2_subdev_routing rt = {};
rt.which = whence;
+ rt.len_routes = routes.size();
rt.num_routes = routes.size();
rt.routes = reinterpret_cast<uintptr_t>(routes.data());
int ret = ioctl(VIDIOC_SUBDEV_S_ROUTING, &rt);
+ if (ret == -ENOTTY)
+ return setRoutingLegacy(routing, whence);
+
if (ret) {
LOG(V4L2, Error) << "Failed to set routes: " << strerror(-ret);
return ret;
}
- routes.resize(rt.num_routes);
+ /*
+ * The kernel may want to return more routes than we have space for. In
+ * that event, we must issue a VIDIOC_SUBDEV_G_ROUTING call to retrieve
+ * the additional routes.
+ */
+ if (rt.num_routes > routes.size()) {
+ routes.resize(rt.num_routes);
+
+ rt.len_routes = rt.num_routes;
+ rt.num_routes = 0;
+
+ ret = ioctl(VIDIOC_SUBDEV_G_ROUTING, &rt);
+ if (ret) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve routes: " << strerror(-ret);
+ return ret;
+ }
+ }
+
+ if (rt.num_routes != routes.size()) {
+ LOG(V4L2, Error) << "Invalid number of routes";
+ return -EINVAL;
+ }
+
routing->resize(rt.num_routes);
for (const auto &[i, route] : utils::enumerate(routes))
diff --git a/src/libcamera/v4l2_videodevice.cpp b/src/libcamera/v4l2_videodevice.cpp
index a72ef64d..e241eb47 100644
--- a/src/libcamera/v4l2_videodevice.cpp
+++ b/src/libcamera/v4l2_videodevice.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_videodevice.cpp - V4L2 Video Device
+ * V4L2 Video Device
*/
#include "libcamera/internal/v4l2_videodevice.h"
@@ -10,7 +10,6 @@
#include <algorithm>
#include <array>
#include <fcntl.h>
-#include <iomanip>
#include <sstream>
#include <string.h>
#include <sys/ioctl.h>
@@ -803,12 +802,19 @@ std::string V4L2VideoDevice::logPrefix() const
*/
int V4L2VideoDevice::getFormat(V4L2DeviceFormat *format)
{
- if (caps_.isMeta())
- return getFormatMeta(format);
- else if (caps_.isMultiplanar())
- return getFormatMultiplane(format);
- else
+ switch (bufferType_) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
return getFormatSingleplane(format);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return getFormatMultiplane(format);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ return getFormatMeta(format);
+ default:
+ return -EINVAL;
+ }
}
/**
@@ -823,12 +829,19 @@ int V4L2VideoDevice::getFormat(V4L2DeviceFormat *format)
*/
int V4L2VideoDevice::tryFormat(V4L2DeviceFormat *format)
{
- if (caps_.isMeta())
- return trySetFormatMeta(format, false);
- else if (caps_.isMultiplanar())
- return trySetFormatMultiplane(format, false);
- else
+ switch (bufferType_) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
return trySetFormatSingleplane(format, false);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return trySetFormatMultiplane(format, false);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ return trySetFormatMeta(format, false);
+ default:
+ return -EINVAL;
+ }
}
/**
@@ -842,13 +855,25 @@ int V4L2VideoDevice::tryFormat(V4L2DeviceFormat *format)
*/
int V4L2VideoDevice::setFormat(V4L2DeviceFormat *format)
{
- int ret = 0;
- if (caps_.isMeta())
- ret = trySetFormatMeta(format, true);
- else if (caps_.isMultiplanar())
- ret = trySetFormatMultiplane(format, true);
- else
+ int ret;
+
+ switch (bufferType_) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
ret = trySetFormatSingleplane(format, true);
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ ret = trySetFormatMultiplane(format, true);
+ break;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ ret = trySetFormatMeta(format, true);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
/* Cache the set format on success. */
if (ret)
@@ -863,7 +888,7 @@ int V4L2VideoDevice::setFormat(V4L2DeviceFormat *format)
int V4L2VideoDevice::getFormatMeta(V4L2DeviceFormat *format)
{
struct v4l2_format v4l2Format = {};
- struct v4l2_meta_format *pix = &v4l2Format.fmt.meta;
+ struct v4l2_meta_format *meta = &v4l2Format.fmt.meta;
int ret;
v4l2Format.type = bufferType_;
@@ -873,25 +898,42 @@ int V4L2VideoDevice::getFormatMeta(V4L2DeviceFormat *format)
return ret;
}
- format->size.width = 0;
- format->size.height = 0;
- format->fourcc = V4L2PixelFormat(pix->dataformat);
+ format->fourcc = V4L2PixelFormat(meta->dataformat);
+ format->planes[0].size = meta->buffersize;
format->planesCount = 1;
- format->planes[0].bpl = pix->buffersize;
- format->planes[0].size = pix->buffersize;
+
+ bool genericLineBased = caps_.isMetaCapture() &&
+ format->fourcc.isGenericLineBasedMetadata();
+
+ if (genericLineBased) {
+ format->size.width = meta->width;
+ format->size.height = meta->height;
+ format->planes[0].bpl = meta->bytesperline;
+ } else {
+ format->size.width = 0;
+ format->size.height = 0;
+ format->planes[0].bpl = meta->buffersize;
+ }
return 0;
}
int V4L2VideoDevice::trySetFormatMeta(V4L2DeviceFormat *format, bool set)
{
+ bool genericLineBased = caps_.isMetaCapture() &&
+ format->fourcc.isGenericLineBasedMetadata();
struct v4l2_format v4l2Format = {};
- struct v4l2_meta_format *pix = &v4l2Format.fmt.meta;
+ struct v4l2_meta_format *meta = &v4l2Format.fmt.meta;
int ret;
v4l2Format.type = bufferType_;
- pix->dataformat = format->fourcc;
- pix->buffersize = format->planes[0].size;
+ meta->dataformat = format->fourcc;
+ meta->buffersize = format->planes[0].size;
+ if (genericLineBased) {
+ meta->width = format->size.width;
+ meta->height = format->size.height;
+ meta->bytesperline = format->planes[0].bpl;
+ }
ret = ioctl(set ? VIDIOC_S_FMT : VIDIOC_TRY_FMT, &v4l2Format);
if (ret) {
LOG(V4L2, Error)
@@ -904,12 +946,18 @@ int V4L2VideoDevice::trySetFormatMeta(V4L2DeviceFormat *format, bool set)
* Return to caller the format actually applied on the video device,
* which might differ from the requested one.
*/
- format->size.width = 0;
- format->size.height = 0;
- format->fourcc = V4L2PixelFormat(pix->dataformat);
+ format->fourcc = V4L2PixelFormat(meta->dataformat);
format->planesCount = 1;
- format->planes[0].bpl = pix->buffersize;
- format->planes[0].size = pix->buffersize;
+ format->planes[0].size = meta->buffersize;
+ if (genericLineBased) {
+ format->size.width = meta->width;
+ format->size.height = meta->height;
+ format->planes[0].bpl = meta->bytesperline;
+ } else {
+ format->size.width = 0;
+ format->size.height = 0;
+ format->planes[0].bpl = meta->buffersize;
+ }
return 0;
}
@@ -1190,6 +1238,38 @@ std::vector<SizeRange> V4L2VideoDevice::enumSizes(V4L2PixelFormat pixelFormat)
}
/**
+ * \brief Get the selection rectangle for \a target
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[out] rect The selection rectangle to retrieve
+ *
+ * \todo Define a V4L2SelectionTarget enum for the selection target
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int V4L2VideoDevice::getSelection(unsigned int target, Rectangle *rect)
+{
+ struct v4l2_selection sel = {};
+
+ sel.type = bufferType_;
+ sel.target = target;
+ sel.flags = 0;
+
+ int ret = ioctl(VIDIOC_G_SELECTION, &sel);
+ if (ret < 0) {
+ LOG(V4L2, Error) << "Unable to get rectangle " << target
+ << ": " << strerror(-ret);
+ return ret;
+ }
+
+ rect->x = sel.r.left;
+ rect->y = sel.r.top;
+ rect->width = sel.r.width;
+ rect->height = sel.r.height;
+
+ return 0;
+}
+
+/**
* \brief Set a selection rectangle \a rect for \a target
* \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
* \param[inout] rect The selection rectangle to be applied
@@ -1815,7 +1895,7 @@ FrameBuffer *V4L2VideoDevice::dequeueBuffer()
* Detect kernel drivers which do not reset the sequence number to zero
* on stream start.
*/
- if (!firstFrame_) {
+ if (!firstFrame_.has_value()) {
if (buf.sequence)
LOG(V4L2, Info)
<< "Zero sequence expected for first frame (got "
@@ -2067,15 +2147,24 @@ V4L2PixelFormat V4L2VideoDevice::toV4L2PixelFormat(const PixelFormat &pixelForma
* \class V4L2M2MDevice
* \brief Memory-to-Memory video device
*
+ * Memory to Memory devices in the kernel using the V4L2 M2M API can
+ * operate with multiple contexts for parallel operations on a single
+ * device. Each instance of a V4L2M2MDevice represents a single context.
+ *
* The V4L2M2MDevice manages two V4L2VideoDevice instances on the same
* deviceNode which operate together using two queues to implement the V4L2
* Memory to Memory API.
*
- * The two devices should be opened by calling open() on the V4L2M2MDevice, and
- * can be closed by calling close on the V4L2M2MDevice.
+ * Users of this class should create a new instance of the V4L2M2MDevice for
+ * each desired execution context and then open it by calling open() on the
+ * V4L2M2MDevice and close it by calling close() on the V4L2M2MDevice.
*
* Calling V4L2VideoDevice::open() and V4L2VideoDevice::close() on the capture
* or output V4L2VideoDevice is not permitted.
+ *
+ * Once the M2M device is open, users can operate on the output and capture
+ * queues represented by the V4L2VideoDevice returned by the output() and
+ * capture() functions.
*/
/**
diff --git a/src/libcamera/vector.cpp b/src/libcamera/vector.cpp
new file mode 100644
index 00000000..85ca2208
--- /dev/null
+++ b/src/libcamera/vector.cpp
@@ -0,0 +1,347 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Vector and related operations
+ */
+
+#include "libcamera/internal/vector.h"
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file vector.h
+ * \brief Vector class
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Vector)
+
+/**
+ * \class Vector
+ * \brief Vector class
+ * \tparam T Type of numerical values to be stored in the vector
+ * \tparam Rows Number of dimension of the vector (= number of elements)
+ */
+
+/**
+ * \fn Vector::Vector()
+ * \brief Construct an uninitialized vector
+ */
+
+/**
+ * \fn Vector::Vector(T scalar)
+ * \brief Construct a vector filled with a \a scalar value
+ * \param[in] scalar The scalar value
+ */
+
+/**
+ * \fn Vector::Vector(const std::array<T, Rows> &data)
+ * \brief Construct vector from supplied data
+ * \param data Data from which to construct a vector
+ *
+ * The size of \a data must be equal to the dimension size Rows of the vector.
+ */
+
+/**
+ * \fn T Vector::operator[](size_t i) const
+ * \brief Index to an element in the vector
+ * \param i Index of element to retrieve
+ * \return Element at index \a i from the vector
+ */
+
+/**
+ * \fn T &Vector::operator[](size_t i)
+ * \copydoc Vector::operator[](size_t i) const
+ */
+
+/**
+ * \fn Vector::operator-() const
+ * \brief Negate a Vector by negating both all of its coordinates
+ * \return The negated vector
+ */
+
+/**
+ * \fn Vector::operator+(Vector const &other) const
+ * \brief Calculate the sum of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise sum of this vector and \a other
+ */
+
+/**
+ * \fn Vector::operator+(T scalar) const
+ * \brief Calculate the sum of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise sum of this vector and \a other
+ */
+
+/**
+ * \fn Vector::operator-(Vector const &other) const
+ * \brief Calculate the difference of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise subtraction of \a other from this vector
+ */
+
+/**
+ * \fn Vector::operator-(T scalar) const
+ * \brief Calculate the difference of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise subtraction of \a scalar from this vector
+ */
+
+/**
+ * \fn Vector::operator*(const Vector &other) const
+ * \brief Calculate the product of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise product of this vector and \a other
+ */
+
+/**
+ * \fn Vector::operator*(T scalar) const
+ * \brief Calculate the product of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise product of this vector and \a scalar
+ */
+
+/**
+ * \fn Vector::operator/(const Vector &other) const
+ * \brief Calculate the quotient of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise division of this vector by \a other
+ */
+
+/**
+ * \fn Vector::operator/(T scalar) const
+ * \brief Calculate the quotient of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise division of this vector by \a scalar
+ */
+
+/**
+ * \fn Vector::operator+=(Vector const &other)
+ * \brief Add \a other element-wise to this vector
+ * \param[in] other The other vector
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator+=(T scalar)
+ * \brief Add \a scalar element-wise to this vector
+ * \param[in] scalar The scalar
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator-=(Vector const &other)
+ * \brief Subtract \a other element-wise from this vector
+ * \param[in] other The other vector
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator-=(T scalar)
+ * \brief Subtract \a scalar element-wise from this vector
+ * \param[in] scalar The scalar
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator*=(const Vector &other)
+ * \brief Multiply this vector by \a other element-wise
+ * \param[in] other The other vector
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator*=(T scalar)
+ * \brief Multiply this vector by \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator/=(const Vector &other)
+ * \brief Divide this vector by \a other element-wise
+ * \param[in] other The other vector
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator/=(T scalar)
+ * \brief Divide this vector by \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::min(const Vector &other) const
+ * \brief Calculate the minimum of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise minimum of this vector and \a other
+ */
+
+/**
+ * \fn Vector::min(T scalar) const
+ * \brief Calculate the minimum of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise minimum of this vector and \a scalar
+ */
+
+/**
+ * \fn Vector::max(const Vector &other) const
+ * \brief Calculate the maximum of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise maximum of this vector and \a other
+ */
+
+/**
+ * \fn Vector::max(T scalar) const
+ * \brief Calculate the maximum of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise maximum of this vector and \a scalar
+ */
+
+/**
+ * \fn Vector::dot(const Vector<T, Rows> &other) const
+ * \brief Compute the dot product
+ * \param[in] other The other vector
+ * \return The dot product of the two vectors
+ */
+
+/**
+ * \fn constexpr T &Vector::x()
+ * \brief Convenience function to access the first element of the vector
+ * \return The first element of the vector
+ */
+
+/**
+ * \fn constexpr T &Vector::y()
+ * \brief Convenience function to access the second element of the vector
+ * \return The second element of the vector
+ */
+
+/**
+ * \fn constexpr T &Vector::z()
+ * \brief Convenience function to access the third element of the vector
+ * \return The third element of the vector
+ */
+
+/**
+ * \fn constexpr const T &Vector::x() const
+ * \copydoc Vector::x()
+ */
+
+/**
+ * \fn constexpr const T &Vector::y() const
+ * \copydoc Vector::y()
+ */
+
+/**
+ * \fn constexpr const T &Vector::z() const
+ * \copydoc Vector::z()
+ */
+
+/**
+ * \fn constexpr T &Vector::r()
+ * \brief Convenience function to access the first element of the vector
+ * \return The first element of the vector
+ */
+
+/**
+ * \fn constexpr T &Vector::g()
+ * \brief Convenience function to access the second element of the vector
+ * \return The second element of the vector
+ */
+
+/**
+ * \fn constexpr T &Vector::b()
+ * \brief Convenience function to access the third element of the vector
+ * \return The third element of the vector
+ */
+
+/**
+ * \fn constexpr const T &Vector::r() const
+ * \copydoc Vector::r()
+ */
+
+/**
+ * \fn constexpr const T &Vector::g() const
+ * \copydoc Vector::g()
+ */
+
+/**
+ * \fn constexpr const T &Vector::b() const
+ * \copydoc Vector::b()
+ */
+
+/**
+ * \fn Vector::length2()
+ * \brief Get the squared length of the vector
+ * \return The squared length of the vector
+ */
+
+/**
+ * \fn Vector::length()
+ * \brief Get the length of the vector
+ * \return The length of the vector
+ */
+
+/**
+ * \fn Vector::sum() const
+ * \brief Calculate the sum of all the vector elements
+ * \tparam R The type of the sum
+ *
+ * The type R of the sum defaults to the type T of the elements, but can be set
+ * explicitly to use a different type in case the type T would risk
+ * overflowing.
+ *
+ * \return The sum of all the vector elements
+ */
+
+/**
+ * \fn Vector<T, Rows> operator*(const Matrix<T, Rows, Cols> &m, const Vector<T, Cols> &v)
+ * \brief Multiply a matrix by a vector
+ * \tparam T Numerical type of the contents of the matrix and vector
+ * \tparam Rows The number of rows in the matrix
+ * \tparam Cols The number of columns in the matrix (= rows in the vector)
+ * \param m The matrix
+ * \param v The vector
+ * \return Product of matrix \a m and vector \a v
+ */
+
+/**
+ * \typedef RGB
+ * \brief A Vector of 3 elements representing an RGB pixel value
+ */
+
+/**
+ * \fn bool operator==(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
+ * \brief Compare vectors for equality
+ * \return True if the two vectors are equal, false otherwise
+ */
+
+/**
+ * \fn bool operator!=(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
+ * \brief Compare vectors for inequality
+ * \return True if the two vectors are not equal, false otherwise
+ */
+
+#ifndef __DOXYGEN__
+bool vectorValidateYaml(const YamlObject &obj, unsigned int size)
+{
+ if (!obj.isList())
+ return false;
+
+ if (obj.size() != size) {
+ LOG(Vector, Error)
+ << "Wrong number of values in YAML vector: expected "
+ << size << ", got " << obj.size();
+ return false;
+ }
+
+ return true;
+}
+#endif /* __DOXYGEN__ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/version.cpp.in b/src/libcamera/version.cpp.in
index 5aec08a1..bf5a2c30 100644
--- a/src/libcamera/version.cpp.in
+++ b/src/libcamera/version.cpp.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * version.cpp - libcamera version
+ * libcamera version
*
* This file is auto-generated. Do not edit.
*/
diff --git a/src/libcamera/yaml_parser.cpp b/src/libcamera/yaml_parser.cpp
index bf21141e..a5e42461 100644
--- a/src/libcamera/yaml_parser.cpp
+++ b/src/libcamera/yaml_parser.cpp
@@ -2,15 +2,16 @@
/*
* Copyright (C) 2022, Google Inc.
*
- * yaml_parser.cpp - libcamera YAML parsing helper
+ * libcamera YAML parsing helper
*/
#include "libcamera/internal/yaml_parser.h"
-#include <cstdlib>
+#include <charconv>
#include <errno.h>
#include <functional>
#include <limits>
+#include <stdlib.h>
#include <libcamera/base/file.h>
#include <libcamera/base/log.h>
@@ -18,7 +19,7 @@
#include <yaml.h>
/**
- * \file libcamera/internal/yaml_parser.h
+ * \file yaml_parser.h
* \brief A YAML parser helper
*/
@@ -38,12 +39,12 @@ static const YamlObject empty;
* \brief A class representing the tree structure of the YAML content
*
* The YamlObject class represents the tree structure of YAML content. A
- * YamlObject can be a dictionary or list of YamlObjects or a value if a tree
- * leaf.
+ * YamlObject can be empty, a dictionary or list of YamlObjects, or a value if a
+ * tree leaf.
*/
YamlObject::YamlObject()
- : type_(Type::Value)
+ : type_(Type::Empty)
{
}
@@ -71,6 +72,20 @@ YamlObject::~YamlObject() = default;
*/
/**
+ * \fn YamlObject::isEmpty()
+ * \brief Return whether the YamlObject is an empty
+ *
+ * \return True if the YamlObject is empty, false otherwise
+ */
+
+/**
+ * \fn YamlObject::operator bool()
+ * \brief Return whether the YamlObject is a non-empty
+ *
+ * \return False if the YamlObject is empty, true otherwise
+ */
+
+/**
* \fn YamlObject::size()
* \brief Retrieve the number of elements in a dictionary or list YamlObject
*
@@ -104,7 +119,7 @@ std::size_t YamlObject::size() const
*/
/**
- * \fn template<typename T> YamlObject::get<T>(const T &defaultValue) const
+ * \fn template<typename T, typename U> YamlObject::get<T>(U &&defaultValue) const
* \brief Parse the YamlObject as a \a T value
* \param[in] defaultValue The default value when failing to parse
*
@@ -118,172 +133,74 @@ std::size_t YamlObject::size() const
#ifndef __DOXYGEN__
template<>
-std::optional<bool> YamlObject::get() const
+std::optional<bool>
+YamlObject::Getter<bool>::get(const YamlObject &obj) const
{
- if (type_ != Type::Value)
+ if (obj.type_ != Type::Value)
return std::nullopt;
- if (value_ == "true")
+ if (obj.value_ == "true")
return true;
- else if (value_ == "false")
+ else if (obj.value_ == "false")
return false;
return std::nullopt;
}
-namespace {
-
-bool parseSignedInteger(const std::string &str, long min, long max,
- long *result)
-{
- if (str == "")
- return false;
-
- char *end;
-
- errno = 0;
- long value = std::strtol(str.c_str(), &end, 10);
-
- if ('\0' != *end || errno == ERANGE || value < min || value > max)
- return false;
-
- *result = value;
- return true;
-}
-
-bool parseUnsignedInteger(const std::string &str, unsigned long max,
- unsigned long *result)
-{
- if (str == "")
- return false;
-
- /*
- * strtoul() accepts strings representing a negative number, in which
- * case it negates the converted value. We don't want to silently accept
- * negative values and return a large positive number, so check for a
- * minus sign (after optional whitespace) and return an error.
- */
- std::size_t found = str.find_first_not_of(" \t");
- if (found != std::string::npos && str[found] == '-')
- return false;
-
- char *end;
-
- errno = 0;
- unsigned long value = std::strtoul(str.c_str(), &end, 10);
-
- if ('\0' != *end || errno == ERANGE || value > max)
- return false;
-
- *result = value;
- return true;
-}
-
-} /* namespace */
-
-template<>
-std::optional<int8_t> YamlObject::get() const
-{
- if (type_ != Type::Value)
- return std::nullopt;
-
- long value;
-
- if (!parseSignedInteger(value_, std::numeric_limits<int8_t>::min(),
- std::numeric_limits<int8_t>::max(), &value))
- return std::nullopt;
-
- return value;
-}
-
-template<>
-std::optional<uint8_t> YamlObject::get() const
-{
- if (type_ != Type::Value)
- return std::nullopt;
-
- unsigned long value;
-
- if (!parseUnsignedInteger(value_, std::numeric_limits<uint8_t>::max(),
- &value))
- return std::nullopt;
-
- return value;
-}
-
-template<>
-std::optional<int16_t> YamlObject::get() const
-{
- if (type_ != Type::Value)
- return std::nullopt;
-
- long value;
-
- if (!parseSignedInteger(value_, std::numeric_limits<int16_t>::min(),
- std::numeric_limits<int16_t>::max(), &value))
- return std::nullopt;
-
- return value;
-}
-
-template<>
-std::optional<uint16_t> YamlObject::get() const
+template<typename T>
+struct YamlObject::Getter<T, std::enable_if_t<
+ std::is_same_v<int8_t, T> ||
+ std::is_same_v<uint8_t, T> ||
+ std::is_same_v<int16_t, T> ||
+ std::is_same_v<uint16_t, T> ||
+ std::is_same_v<int32_t, T> ||
+ std::is_same_v<uint32_t, T>>>
{
- if (type_ != Type::Value)
- return std::nullopt;
-
- unsigned long value;
-
- if (!parseUnsignedInteger(value_, std::numeric_limits<uint16_t>::max(),
- &value))
- return std::nullopt;
+ std::optional<T> get(const YamlObject &obj) const
+ {
+ if (obj.type_ != Type::Value)
+ return std::nullopt;
- return value;
-}
+ const std::string &str = obj.value_;
+ T value;
-template<>
-std::optional<int32_t> YamlObject::get() const
-{
- if (type_ != Type::Value)
- return std::nullopt;
+ auto [ptr, ec] = std::from_chars(str.data(), str.data() + str.size(),
+ value);
+ if (ptr != str.data() + str.size() || ec != std::errc())
+ return std::nullopt;
- long value;
+ return value;
+ }
+};
- if (!parseSignedInteger(value_, std::numeric_limits<int32_t>::min(),
- std::numeric_limits<int32_t>::max(), &value))
- return std::nullopt;
-
- return value;
-}
+template struct YamlObject::Getter<int8_t>;
+template struct YamlObject::Getter<uint8_t>;
+template struct YamlObject::Getter<int16_t>;
+template struct YamlObject::Getter<uint16_t>;
+template struct YamlObject::Getter<int32_t>;
+template struct YamlObject::Getter<uint32_t>;
template<>
-std::optional<uint32_t> YamlObject::get() const
+std::optional<float>
+YamlObject::Getter<float>::get(const YamlObject &obj) const
{
- if (type_ != Type::Value)
- return std::nullopt;
-
- unsigned long value;
-
- if (!parseUnsignedInteger(value_, std::numeric_limits<uint32_t>::max(),
- &value))
- return std::nullopt;
-
- return value;
+ return obj.get<double>();
}
template<>
-std::optional<double> YamlObject::get() const
+std::optional<double>
+YamlObject::Getter<double>::get(const YamlObject &obj) const
{
- if (type_ != Type::Value)
+ if (obj.type_ != Type::Value)
return std::nullopt;
- if (value_ == "")
+ if (obj.value_.empty())
return std::nullopt;
char *end;
errno = 0;
- double value = utils::strtod(value_.c_str(), &end);
+ double value = utils::strtod(obj.value_.c_str(), &end);
if ('\0' != *end || errno == ERANGE)
return std::nullopt;
@@ -292,28 +209,30 @@ std::optional<double> YamlObject::get() const
}
template<>
-std::optional<std::string> YamlObject::get() const
+std::optional<std::string>
+YamlObject::Getter<std::string>::get(const YamlObject &obj) const
{
- if (type_ != Type::Value)
+ if (obj.type_ != Type::Value)
return std::nullopt;
- return value_;
+ return obj.value_;
}
template<>
-std::optional<Size> YamlObject::get() const
+std::optional<Size>
+YamlObject::Getter<Size>::get(const YamlObject &obj) const
{
- if (type_ != Type::List)
+ if (obj.type_ != Type::List)
return std::nullopt;
- if (list_.size() != 2)
+ if (obj.list_.size() != 2)
return std::nullopt;
- auto width = list_[0].value->get<uint32_t>();
+ auto width = obj.list_[0].value->get<uint32_t>();
if (!width)
return std::nullopt;
- auto height = list_[1].value->get<uint32_t>();
+ auto height = obj.list_[1].value->get<uint32_t>();
if (!height)
return std::nullopt;
@@ -339,6 +258,7 @@ std::optional<Size> YamlObject::get() const
template<typename T,
std::enable_if_t<
std::is_same_v<bool, T> ||
+ std::is_same_v<float, T> ||
std::is_same_v<double, T> ||
std::is_same_v<int8_t, T> ||
std::is_same_v<uint8_t, T> ||
@@ -367,6 +287,7 @@ std::optional<std::vector<T>> YamlObject::getList() const
}
template std::optional<std::vector<bool>> YamlObject::getList<bool>() const;
+template std::optional<std::vector<float>> YamlObject::getList<float>() const;
template std::optional<std::vector<double>> YamlObject::getList<double>() const;
template std::optional<std::vector<int8_t>> YamlObject::getList<int8_t>() const;
template std::optional<std::vector<uint8_t>> YamlObject::getList<uint8_t>() const;
@@ -424,7 +345,8 @@ template std::optional<std::vector<Size>> YamlObject::getList<Size>() const;
*
* This function retrieves an element of the YamlObject. Only YamlObject
* instances of List type associate elements with index, calling this function
- * on other types of instances is invalid and results in undefined behaviour.
+ * on other types of instances or with an invalid index results in an empty
+ * object.
*
* \return The YamlObject as an element of the list
*/
@@ -447,31 +369,31 @@ const YamlObject &YamlObject::operator[](std::size_t index) const
*
* \return True if an element exists, false otherwise
*/
-bool YamlObject::contains(const std::string &key) const
+bool YamlObject::contains(std::string_view key) const
{
- if (dictionary_.find(std::ref(key)) == dictionary_.end())
- return false;
-
- return true;
+ return dictionary_.find(key) != dictionary_.end();
}
/**
- * \fn YamlObject::operator[](const std::string &key) const
+ * \fn YamlObject::operator[](std::string_view key) const
* \brief Retrieve a member by name from the dictionary
*
* This function retrieve a member of a YamlObject by name. Only YamlObject
* instances of Dictionary type associate elements with names, calling this
- * function on other types of instances is invalid and results in undefined
- * behaviour.
+ * function on other types of instances or with a nonexistent key results in an
+ * empty object.
*
* \return The YamlObject corresponding to the \a key member
*/
-const YamlObject &YamlObject::operator[](const std::string &key) const
+const YamlObject &YamlObject::operator[](std::string_view key) const
{
- if (type_ != Type::Dictionary || !contains(key))
+ if (type_ != Type::Dictionary)
return empty;
auto iter = dictionary_.find(key);
+ if (iter == dictionary_.end())
+ return empty;
+
return *iter->second;
}
@@ -587,8 +509,17 @@ YamlParserContext::EventPtr YamlParserContext::nextEvent()
EventPtr event(new yaml_event_t);
/* yaml_parser_parse returns 1 when it succeeds */
- if (!yaml_parser_parse(&parser_, event.get()))
+ if (!yaml_parser_parse(&parser_, event.get())) {
+ File *file = static_cast<File *>(parser_.read_handler_data);
+
+ LOG(YamlParser, Error) << file->fileName() << ":"
+ << parser_.problem_mark.line << ":"
+ << parser_.problem_mark.column << " "
+ << parser_.problem << " "
+ << parser_.context;
+
return nullptr;
+ }
return event;
}
diff --git a/src/meson.build b/src/meson.build
index 165a77bb..8eb8f05b 100644
--- a/src/meson.build
+++ b/src/meson.build
@@ -27,6 +27,37 @@ else
ipa_sign_module = false
endif
+# libyuv, used by the Android adaptation layer and the virtual pipeline handler.
+# Fallback to a subproject if libyuv isn't found, as it's typically not provided
+# by distributions. Where libyuv is provided by a distribution, it may not
+# always supply a pkg-config implementation, requiring cxx.find_library() to
+# search for it.
+if not get_option('force_fallback_for').contains('libyuv')
+ libyuv_dep = dependency('libyuv', required : false)
+ if not libyuv_dep.found()
+ libyuv_dep = cxx.find_library('yuv', has_headers : 'libyuv.h',
+ required : false)
+ endif
+else
+ libyuv_dep = dependency('', required : false)
+endif
+
+if (pipelines.contains('virtual') or get_option('android').allowed()) and \
+ not libyuv_dep.found()
+ cmake = import('cmake')
+
+ libyuv_vars = cmake.subproject_options()
+ libyuv_vars.add_cmake_defines({'CMAKE_POSITION_INDEPENDENT_CODE': 'ON'})
+ libyuv_vars.set_override_option('cpp_std', 'c++17')
+ libyuv_vars.append_compile_args('cpp',
+ '-Wno-sign-compare',
+ '-Wno-unused-variable',
+ '-Wno-unused-parameter')
+ libyuv_vars.append_link_args('-ljpeg')
+ libyuv = cmake.subproject('libyuv', options : libyuv_vars)
+ libyuv_dep = libyuv.dependency('yuv')
+endif
+
# libcamera must be built first as a dependency to the other components.
subdir('libcamera')
diff --git a/src/py/cam/cam_qt.py b/src/py/cam/cam_qt.py
index c1723b44..22d8c4da 100644
--- a/src/py/cam/cam_qt.py
+++ b/src/py/cam/cam_qt.py
@@ -2,7 +2,7 @@
# Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
from helpers import mfb_to_rgb
-from PyQt5 import QtCore, QtGui, QtWidgets
+from PyQt6 import QtCore, QtGui, QtWidgets
import libcamera as libcam
import libcamera.utils
import sys
@@ -63,10 +63,10 @@ class QtRenderer:
self.buf_mmap_map = buf_mmap_map
def run(self):
- camnotif = QtCore.QSocketNotifier(self.cm.event_fd, QtCore.QSocketNotifier.Read)
+ camnotif = QtCore.QSocketNotifier(self.cm.event_fd, QtCore.QSocketNotifier.Type.Read)
camnotif.activated.connect(lambda _: self.readcam())
- keynotif = QtCore.QSocketNotifier(sys.stdin.fileno(), QtCore.QSocketNotifier.Read)
+ keynotif = QtCore.QSocketNotifier(sys.stdin.fileno(), QtCore.QSocketNotifier.Type.Read)
keynotif.activated.connect(lambda _: self.readkey())
print('Capturing...')
diff --git a/src/py/cam/cam_qtgl.py b/src/py/cam/cam_qtgl.py
index 6cfbd347..35b4b06b 100644
--- a/src/py/cam/cam_qtgl.py
+++ b/src/py/cam/cam_qtgl.py
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
-from PyQt5 import QtCore, QtWidgets
-from PyQt5.QtCore import Qt
+from PyQt6 import QtCore, QtWidgets
+from PyQt6.QtCore import Qt
import math
import os
@@ -142,10 +142,10 @@ class QtRenderer:
self.window = window
def run(self):
- camnotif = QtCore.QSocketNotifier(self.state.cm.event_fd, QtCore.QSocketNotifier.Read)
+ camnotif = QtCore.QSocketNotifier(self.state.cm.event_fd, QtCore.QSocketNotifier.Type.Read)
camnotif.activated.connect(lambda _: self.readcam())
- keynotif = QtCore.QSocketNotifier(sys.stdin.fileno(), QtCore.QSocketNotifier.Read)
+ keynotif = QtCore.QSocketNotifier(sys.stdin.fileno(), QtCore.QSocketNotifier.Type.Read)
keynotif.activated.connect(lambda _: self.readkey())
print('Capturing...')
@@ -175,8 +175,8 @@ class MainWindow(QtWidgets.QWidget):
def __init__(self, state):
super().__init__()
- self.setAttribute(Qt.WA_PaintOnScreen)
- self.setAttribute(Qt.WA_NativeWindow)
+ self.setAttribute(Qt.WidgetAttribute.WA_PaintOnScreen)
+ self.setAttribute(Qt.WidgetAttribute.WA_NativeWindow)
self.state = state
diff --git a/src/py/libcamera/gen-py-controls.py b/src/py/libcamera/gen-py-controls.py
index 8efbf95b..d43a7c1c 100755
--- a/src/py/libcamera/gen-py-controls.py
+++ b/src/py/libcamera/gen-py-controls.py
@@ -4,10 +4,12 @@
# Generate Python bindings controls from YAML
import argparse
-import string
+import jinja2
import sys
import yaml
+from controls import Control
+
def find_common_prefix(strings):
prefix = strings[0]
@@ -21,70 +23,39 @@ def find_common_prefix(strings):
return prefix
-def generate_py(controls, mode):
- out = ''
-
- vendors_class_def = []
- vendor_defs = []
- vendors = []
- for vendor, ctrl_list in controls.items():
- for ctrls in ctrl_list:
- name, ctrl = ctrls.popitem()
-
- if vendor not in vendors and vendor != 'libcamera':
- vendor_mode_str = f'{vendor.capitalize()}{mode.capitalize()}'
- vendors_class_def.append('class Py{}\n{{\n}};\n'.format(vendor_mode_str))
- vendor_defs.append('\tauto {} = py::class_<Py{}>(controls, \"{}\");'.format(vendor, vendor_mode_str, vendor))
- vendors.append(vendor)
-
- if vendor != 'libcamera':
- ns = 'libcamera::{}::{}::'.format(mode, vendor)
- container = vendor
- else:
- ns = 'libcamera::{}::'.format(mode)
- container = 'controls'
-
- out += f'\t{container}.def_readonly_static("{name}", static_cast<const libcamera::ControlId *>(&{ns}{name}));\n\n'
-
- enum = ctrl.get('enum')
- if not enum:
- continue
-
- cpp_enum = name + 'Enum'
-
- out += '\tpy::enum_<{}{}>({}, \"{}\")\n'.format(ns, cpp_enum, container, cpp_enum)
-
- if mode == 'controls':
- # Adjustments for controls
- if name == 'LensShadingMapMode':
- prefix = 'LensShadingMapMode'
- else:
- prefix = find_common_prefix([e['name'] for e in enum])
- else:
- # Adjustments for properties
- prefix = find_common_prefix([e['name'] for e in enum])
-
- for entry in enum:
- cpp_enum = entry['name']
- py_enum = entry['name'][len(prefix):]
-
- out += '\t\t.value(\"{}\", {}{})\n'.format(py_enum, ns, cpp_enum)
-
- out += '\t;\n\n'
-
- return {'controls': out,
- 'vendors_class_def': '\n'.join(vendors_class_def),
- 'vendors_defs': '\n'.join(vendor_defs)}
+def extend_control(ctrl, mode):
+ if ctrl.vendor != 'libcamera':
+ ctrl.klass = ctrl.vendor
+ ctrl.namespace = f'{ctrl.vendor}::'
+ else:
+ ctrl.klass = mode
+ ctrl.namespace = ''
+
+ if not ctrl.is_enum:
+ return ctrl
+
+ if mode == 'controls':
+ # Adjustments for controls
+ if ctrl.name == 'LensShadingMapMode':
+ prefix = 'LensShadingMapMode'
+ else:
+ prefix = find_common_prefix([e.name for e in ctrl.enum_values])
+ else:
+ # Adjustments for properties
+ prefix = find_common_prefix([e.name for e in ctrl.enum_values])
+ for enum in ctrl.enum_values:
+ enum.py_name = enum.name[len(prefix):]
-def fill_template(template, data):
- template = open(template, 'rb').read()
- template = template.decode('utf-8')
- template = string.Template(template)
- return template.substitute(data)
+ return ctrl
def main(argv):
+ headers = {
+ 'controls': 'control_ids.h',
+ 'properties': 'property_ids.h',
+ }
+
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--mode', '-m', type=str, required=True,
@@ -97,26 +68,41 @@ def main(argv):
help='Input file name.')
args = parser.parse_args(argv[1:])
- if args.mode not in ['controls', 'properties']:
+ if not headers.get(args.mode):
print(f'Invalid mode option "{args.mode}"', file=sys.stderr)
return -1
- controls = {}
+ controls = []
+ vendors = []
+
for input in args.input:
- data = open(input, 'rb').read()
- vendor = yaml.safe_load(data)['vendor']
- controls[vendor] = yaml.safe_load(data)['controls']
+ data = yaml.safe_load(open(input, 'rb').read())
+
+ vendor = data['vendor']
+ if vendor != 'libcamera':
+ vendors.append(vendor)
+
+ for ctrl in data['controls']:
+ ctrl = Control(*ctrl.popitem(), vendor, args.mode)
+ controls.append(extend_control(ctrl, args.mode))
- data = generate_py(controls, args.mode)
+ data = {
+ 'mode': args.mode,
+ 'header': headers[args.mode],
+ 'vendors': vendors,
+ 'controls': controls,
+ }
- data = fill_template(args.template, data)
+ env = jinja2.Environment()
+ template = env.from_string(open(args.template, 'r', encoding='utf-8').read())
+ string = template.render(data)
if args.output:
- output = open(args.output, 'wb')
- output.write(data.encode('utf-8'))
+ output = open(args.output, 'w', encoding='utf-8')
+ output.write(string)
output.close()
else:
- sys.stdout.write(data)
+ sys.stdout.write(string)
return 0
diff --git a/src/py/libcamera/meson.build b/src/py/libcamera/meson.build
index 4807ca7d..596a203c 100644
--- a/src/py/libcamera/meson.build
+++ b/src/py/libcamera/meson.build
@@ -26,37 +26,24 @@ pycamera_sources = files([
'py_transform.cpp',
])
-# Generate controls
+# Generate controls and properties
-gen_py_controls_input_files = []
gen_py_controls_template = files('py_controls_generated.cpp.in')
-
gen_py_controls = files('gen-py-controls.py')
-foreach file : controls_files
- gen_py_controls_input_files += files('../../libcamera/' + file)
-endforeach
-
pycamera_sources += custom_target('py_gen_controls',
- input : gen_py_controls_input_files,
+ input : controls_files,
output : ['py_controls_generated.cpp'],
command : [gen_py_controls, '--mode', 'controls', '-o', '@OUTPUT@',
- '-t', gen_py_controls_template, '@INPUT@'])
-
-# Generate properties
-
-gen_py_property_enums_input_files = []
-gen_py_properties_template = files('py_properties_generated.cpp.in')
-
-foreach file : properties_files
- gen_py_property_enums_input_files += files('../../libcamera/' + file)
-endforeach
+ '-t', gen_py_controls_template, '@INPUT@'],
+ env : py_build_env)
pycamera_sources += custom_target('py_gen_properties',
- input : gen_py_property_enums_input_files,
+ input : properties_files,
output : ['py_properties_generated.cpp'],
command : [gen_py_controls, '--mode', 'properties', '-o', '@OUTPUT@',
- '-t', gen_py_properties_template, '@INPUT@'])
+ '-t', gen_py_controls_template, '@INPUT@'],
+ env : py_build_env)
# Generate formats
diff --git a/src/py/libcamera/py_color_space.cpp b/src/py/libcamera/py_color_space.cpp
index 5201121a..fd5a5dab 100644
--- a/src/py/libcamera/py_color_space.cpp
+++ b/src/py/libcamera/py_color_space.cpp
@@ -12,6 +12,8 @@
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
+#include "py_main.h"
+
namespace py = pybind11;
using namespace libcamera;
diff --git a/src/py/libcamera/py_controls_generated.cpp.in b/src/py/libcamera/py_controls_generated.cpp.in
index 8d282ce5..22a132d1 100644
--- a/src/py/libcamera/py_controls_generated.cpp.in
+++ b/src/py/libcamera/py_controls_generated.cpp.in
@@ -2,27 +2,46 @@
/*
* Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
*
- * Python bindings - Auto-generated controls
+ * Python bindings - Auto-generated {{mode}}
*
* This file is auto-generated. Do not edit.
*/
-#include <libcamera/control_ids.h>
+#include <libcamera/{{header}}>
#include <pybind11/pybind11.h>
+#include "py_main.h"
+
namespace py = pybind11;
-class PyControls
+class Py{{mode|capitalize}}
{
};
-${vendors_class_def}
+{% for vendor in vendors -%}
+class Py{{vendor|capitalize}}{{mode|capitalize}}
+{
+};
-void init_py_controls_generated(py::module& m)
+{% endfor -%}
+
+void init_py_{{mode}}_generated(py::module& m)
{
- auto controls = py::class_<PyControls>(m, "controls");
-${vendors_defs}
+ auto {{mode}} = py::class_<Py{{mode|capitalize}}>(m, "{{mode}}");
+{%- for vendor in vendors %}
+ auto {{vendor}} = py::class_<Py{{vendor|capitalize}}{{mode|capitalize}}>({{mode}}, "{{vendor}}");
+{%- endfor %}
+
+{% for ctrl in controls %}
+ {{ctrl.klass}}.def_readonly_static("{{ctrl.name}}", static_cast<const libcamera::ControlId *>(&libcamera::{{mode}}::{{ctrl.namespace}}{{ctrl.name}}));
+{%- if ctrl.is_enum %}
-${controls}
+ py::enum_<libcamera::{{mode}}::{{ctrl.namespace}}{{ctrl.name}}Enum>({{ctrl.klass}}, "{{ctrl.name}}Enum")
+{%- for enum in ctrl.enum_values %}
+ .value("{{enum.py_name}}", libcamera::{{mode}}::{{ctrl.namespace}}{{enum.name}})
+{%- endfor %}
+ ;
+{%- endif %}
+{% endfor -%}
}
diff --git a/src/py/libcamera/py_enums.cpp b/src/py/libcamera/py_enums.cpp
index e25689c6..9e75ec1a 100644
--- a/src/py/libcamera/py_enums.cpp
+++ b/src/py/libcamera/py_enums.cpp
@@ -9,6 +9,8 @@
#include <pybind11/pybind11.h>
+#include "py_main.h"
+
namespace py = pybind11;
using namespace libcamera;
@@ -30,7 +32,8 @@ void init_py_enums(py::module &m)
.value("Float", ControlType::ControlTypeFloat)
.value("String", ControlType::ControlTypeString)
.value("Rectangle", ControlType::ControlTypeRectangle)
- .value("Size", ControlType::ControlTypeSize);
+ .value("Size", ControlType::ControlTypeSize)
+ .value("Point", ControlType::ControlTypePoint);
py::enum_<Orientation>(m, "Orientation")
.value("Rotate0", Orientation::Rotate0)
diff --git a/src/py/libcamera/py_formats_generated.cpp.in b/src/py/libcamera/py_formats_generated.cpp.in
index a3f7f94d..c5fb9063 100644
--- a/src/py/libcamera/py_formats_generated.cpp.in
+++ b/src/py/libcamera/py_formats_generated.cpp.in
@@ -11,6 +11,8 @@
#include <pybind11/pybind11.h>
+#include "py_main.h"
+
namespace py = pybind11;
class PyFormats
diff --git a/src/py/libcamera/py_geometry.cpp b/src/py/libcamera/py_geometry.cpp
index 5c2aeac4..c7e30360 100644
--- a/src/py/libcamera/py_geometry.cpp
+++ b/src/py/libcamera/py_geometry.cpp
@@ -14,6 +14,8 @@
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
+#include "py_main.h"
+
namespace py = pybind11;
using namespace libcamera;
diff --git a/src/py/libcamera/py_helpers.cpp b/src/py/libcamera/py_helpers.cpp
index 79891ab6..1ad1d4c1 100644
--- a/src/py/libcamera/py_helpers.cpp
+++ b/src/py/libcamera/py_helpers.cpp
@@ -34,6 +34,8 @@ static py::object valueOrTuple(const ControlValue &cv)
py::object controlValueToPy(const ControlValue &cv)
{
switch (cv.type()) {
+ case ControlTypeNone:
+ return py::none();
case ControlTypeBool:
return valueOrTuple<bool>(cv);
case ControlTypeByte:
@@ -46,14 +48,14 @@ py::object controlValueToPy(const ControlValue &cv)
return valueOrTuple<float>(cv);
case ControlTypeString:
return py::cast(cv.get<std::string>());
- case ControlTypeRectangle:
- return valueOrTuple<Rectangle>(cv);
case ControlTypeSize: {
const Size *v = reinterpret_cast<const Size *>(cv.data().data());
return py::cast(v);
}
- case ControlTypeNone:
- return py::none();
+ case ControlTypeRectangle:
+ return valueOrTuple<Rectangle>(cv);
+ case ControlTypePoint:
+ return valueOrTuple<Point>(cv);
default:
throw std::runtime_error("Unsupported ControlValue type");
}
@@ -73,6 +75,8 @@ static ControlValue controlValueMaybeArray(const py::object &ob)
ControlValue pyToControlValue(const py::object &ob, ControlType type)
{
switch (type) {
+ case ControlTypeNone:
+ return ControlValue();
case ControlTypeBool:
return ControlValue(ob.cast<bool>());
case ControlTypeByte:
@@ -89,8 +93,8 @@ ControlValue pyToControlValue(const py::object &ob, ControlType type)
return controlValueMaybeArray<Rectangle>(ob);
case ControlTypeSize:
return ControlValue(ob.cast<Size>());
- case ControlTypeNone:
- return ControlValue();
+ case ControlTypePoint:
+ return controlValueMaybeArray<Point>(ob);
default:
throw std::runtime_error("Control type not implemented");
}
diff --git a/src/py/libcamera/py_main.cpp b/src/py/libcamera/py_main.cpp
index bce08218..441a70ab 100644
--- a/src/py/libcamera/py_main.cpp
+++ b/src/py/libcamera/py_main.cpp
@@ -7,6 +7,7 @@
#include "py_main.h"
+#include <limits>
#include <memory>
#include <stdexcept>
#include <string>
@@ -85,14 +86,6 @@ PYBIND11_DECLARE_HOLDER_TYPE(T, PyCameraSmartPtr<T>)
*/
static std::weak_ptr<PyCameraManager> gCameraManager;
-void init_py_color_space(py::module &m);
-void init_py_controls_generated(py::module &m);
-void init_py_enums(py::module &m);
-void init_py_formats_generated(py::module &m);
-void init_py_geometry(py::module &m);
-void init_py_properties_generated(py::module &m);
-void init_py_transform(py::module &m);
-
PYBIND11_MODULE(_libcamera, m)
{
init_py_enums(m);
@@ -407,12 +400,26 @@ PYBIND11_MODULE(_libcamera, m)
pyControlId
.def_property_readonly("id", &ControlId::id)
.def_property_readonly("name", &ControlId::name)
+ .def_property_readonly("vendor", &ControlId::vendor)
.def_property_readonly("type", &ControlId::type)
+ .def_property_readonly("isArray", &ControlId::isArray)
+ .def_property_readonly("size", &ControlId::size)
.def("__str__", [](const ControlId &self) { return self.name(); })
.def("__repr__", [](const ControlId &self) {
- return py::str("libcamera.ControlId({}, {}, {})")
- .format(self.id(), self.name(), self.type());
- });
+ std::string sizeStr = "";
+ if (self.isArray()) {
+ sizeStr = "[";
+ size_t size = self.size();
+ if (size == std::numeric_limits<size_t>::max())
+ sizeStr += "n";
+ else
+ sizeStr += std::to_string(size);
+ sizeStr += "]";
+ }
+ return py::str("libcamera.ControlId({}, {}.{}{}, {})")
+ .format(self.id(), self.vendor(), self.name(), sizeStr, self.type());
+ })
+ .def("enumerators", &ControlId::enumerators);
pyControlInfo
.def_property_readonly("min", [](const ControlInfo &self) {
diff --git a/src/py/libcamera/py_main.h b/src/py/libcamera/py_main.h
index 5bb5f2d1..4d594326 100644
--- a/src/py/libcamera/py_main.h
+++ b/src/py/libcamera/py_main.h
@@ -7,8 +7,18 @@
#include <libcamera/base/log.h>
+#include <pybind11/pybind11.h>
+
namespace libcamera {
LOG_DECLARE_CATEGORY(Python)
}
+
+void init_py_color_space(pybind11::module &m);
+void init_py_controls_generated(pybind11::module &m);
+void init_py_enums(pybind11::module &m);
+void init_py_formats_generated(pybind11::module &m);
+void init_py_geometry(pybind11::module &m);
+void init_py_properties_generated(pybind11::module &m);
+void init_py_transform(pybind11::module &m);
diff --git a/src/py/libcamera/py_properties_generated.cpp.in b/src/py/libcamera/py_properties_generated.cpp.in
deleted file mode 100644
index e3802b81..00000000
--- a/src/py/libcamera/py_properties_generated.cpp.in
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
- *
- * Python bindings - Auto-generated properties
- *
- * This file is auto-generated. Do not edit.
- */
-
-#include <libcamera/property_ids.h>
-
-#include <pybind11/pybind11.h>
-
-namespace py = pybind11;
-
-class PyProperties
-{
-};
-
-${vendors_class_def}
-
-void init_py_properties_generated(py::module& m)
-{
- auto controls = py::class_<PyProperties>(m, "properties");
-${vendors_defs}
-
-${controls}
-}
diff --git a/src/py/libcamera/py_transform.cpp b/src/py/libcamera/py_transform.cpp
index f3a0bfaf..768260ff 100644
--- a/src/py/libcamera/py_transform.cpp
+++ b/src/py/libcamera/py_transform.cpp
@@ -12,6 +12,8 @@
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
+#include "py_main.h"
+
namespace py = pybind11;
using namespace libcamera;
diff --git a/src/v4l2/meson.build b/src/v4l2/meson.build
index 58f53bf3..2c040414 100644
--- a/src/v4l2/meson.build
+++ b/src/v4l2/meson.build
@@ -1,12 +1,11 @@
# SPDX-License-Identifier: CC0-1.0
-if not get_option('v4l2')
- v4l2_enabled = false
+v4l2_enabled = get_option('v4l2').allowed()
+
+if not v4l2_enabled
subdir_done()
endif
-v4l2_enabled = true
-
v4l2_compat_sources = files([
'v4l2_camera.cpp',
'v4l2_camera_file.cpp',
diff --git a/src/v4l2/v4l2_camera.cpp b/src/v4l2/v4l2_camera.cpp
index 7b97c2d5..94d138cd 100644
--- a/src/v4l2/v4l2_camera.cpp
+++ b/src/v4l2/v4l2_camera.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_camera.cpp - V4L2 compatibility camera
+ * V4L2 compatibility camera
*/
#include "v4l2_camera.h"
@@ -12,13 +12,15 @@
#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
+
using namespace libcamera;
LOG_DECLARE_CATEGORY(V4L2Compat)
V4L2Camera::V4L2Camera(std::shared_ptr<Camera> camera)
- : camera_(camera), isRunning_(false), bufferAllocator_(nullptr),
- efd_(-1), bufferAvailableCount_(0)
+ : camera_(camera), controls_(controls::controls), isRunning_(false),
+ bufferAllocator_(nullptr), efd_(-1), bufferAvailableCount_(0)
{
camera_->requestCompleted.connect(this, &V4L2Camera::requestComplete);
}
@@ -202,10 +204,12 @@ int V4L2Camera::streamOn()
if (isRunning_)
return 0;
- int ret = camera_->start();
+ int ret = camera_->start(&controls_);
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
+ controls_.clear();
+
isRunning_ = true;
for (Request *req : pendingRequests_) {
@@ -265,6 +269,8 @@ int V4L2Camera::qbuf(unsigned int index)
return 0;
}
+ request->controls().merge(std::move(controls_));
+
ret = camera_->queueRequest(request);
if (ret < 0) {
LOG(V4L2Compat, Error) << "Can't queue request";
diff --git a/src/v4l2/v4l2_camera.h b/src/v4l2/v4l2_camera.h
index d3483444..9bd161b9 100644
--- a/src/v4l2/v4l2_camera.h
+++ b/src/v4l2/v4l2_camera.h
@@ -2,19 +2,21 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_camera.h - V4L2 compatibility camera
+ * V4L2 compatibility camera
*/
#pragma once
#include <deque>
-#include <utility>
+#include <memory>
+#include <vector>
#include <libcamera/base/mutex.h>
#include <libcamera/base/semaphore.h>
#include <libcamera/base/shared_fd.h>
#include <libcamera/camera.h>
+#include <libcamera/controls.h>
#include <libcamera/framebuffer.h>
#include <libcamera/framebuffer_allocator.h>
@@ -49,6 +51,9 @@ public:
const libcamera::Size &size,
libcamera::StreamConfiguration *streamConfigOut);
+ libcamera::ControlList &controls() { return controls_; }
+ const libcamera::ControlInfoMap &controlInfo() { return camera_->controls(); }
+
int allocBuffers(unsigned int count);
void freeBuffers();
int getBufferFd(unsigned int index);
@@ -70,6 +75,8 @@ private:
std::shared_ptr<libcamera::Camera> camera_;
std::unique_ptr<libcamera::CameraConfiguration> config_;
+ libcamera::ControlList controls_;
+
bool isRunning_;
libcamera::Mutex bufferLock_;
diff --git a/src/v4l2/v4l2_camera_file.cpp b/src/v4l2/v4l2_camera_file.cpp
index 0a41587c..d8fe854b 100644
--- a/src/v4l2/v4l2_camera_file.cpp
+++ b/src/v4l2/v4l2_camera_file.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * v4l2_camera_file.h - V4L2 compatibility camera file information
+ * V4L2 compatibility camera file information
*/
#include "v4l2_camera_file.h"
diff --git a/src/v4l2/v4l2_camera_file.h b/src/v4l2/v4l2_camera_file.h
index 1a7b6a63..1212989e 100644
--- a/src/v4l2/v4l2_camera_file.h
+++ b/src/v4l2/v4l2_camera_file.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * v4l2_camera_file.h - V4L2 compatibility camera file information
+ * V4L2 compatibility camera file information
*/
#pragma once
diff --git a/src/v4l2/v4l2_camera_proxy.cpp b/src/v4l2/v4l2_camera_proxy.cpp
index 341f7902..559ffc61 100644
--- a/src/v4l2/v4l2_camera_proxy.cpp
+++ b/src/v4l2/v4l2_camera_proxy.cpp
@@ -2,13 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_camera_proxy.cpp - Proxy to V4L2 compatibility camera
+ * Proxy to V4L2 compatibility camera
*/
#include "v4l2_camera_proxy.h"
#include <algorithm>
-#include <array>
#include <errno.h>
#include <numeric>
#include <set>
@@ -23,9 +22,11 @@
#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
#include <libcamera/formats.h>
-#include "libcamera/internal/formats.h"
+#include "libcamera/internal/v4l2_pixelformat.h"
#include "v4l2_camera.h"
#include "v4l2_camera_file.h"
@@ -34,6 +35,7 @@
#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
using namespace libcamera;
+using namespace std::literals::chrono_literals;
LOG_DECLARE_CATEGORY(V4L2Compat)
@@ -193,6 +195,29 @@ void V4L2CameraProxy::setFmtFromConfig(const StreamConfiguration &streamConfig)
v4l2PixFormat_.xfer_func = V4L2_XFER_FUNC_DEFAULT;
sizeimage_ = streamConfig.frameSize;
+
+ const ControlInfoMap &controls = vcam_->controlInfo();
+ const auto &it = controls.find(&controls::FrameDurationLimits);
+
+ if (it != controls.end()) {
+ const int64_t duration = it->second.def().get<int64_t>();
+
+ v4l2TimePerFrame_.numerator = duration;
+ v4l2TimePerFrame_.denominator = 1000000;
+ } else {
+ /*
+ * Default to 30fps if the camera doesn't expose the
+ * FrameDurationLimits control.
+ *
+ * \todo Remove this once all pipeline handlers implement the
+ * control
+ */
+ LOG(V4L2Compat, Warning)
+ << "Camera does not support FrameDurationLimits";
+
+ v4l2TimePerFrame_.numerator = 333333;
+ v4l2TimePerFrame_.denominator = 1000000;
+ }
}
void V4L2CameraProxy::querycap(std::shared_ptr<Camera> camera)
@@ -756,6 +781,55 @@ int V4L2CameraProxy::vidioc_streamoff(V4L2CameraFile *file, int *arg)
return ret;
}
+int V4L2CameraProxy::vidioc_g_parm(V4L2CameraFile *file, struct v4l2_streamparm *arg)
+{
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
+
+ if (!validateBufferType(arg->type))
+ return -EINVAL;
+
+ memset(&arg->parm, 0, sizeof(arg->parm));
+
+ arg->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ arg->parm.capture.timeperframe = v4l2TimePerFrame_;
+
+ return 0;
+}
+
+int V4L2CameraProxy::vidioc_s_parm(V4L2CameraFile *file, struct v4l2_streamparm *arg)
+{
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
+
+ if (!validateBufferType(arg->type))
+ return -EINVAL;
+
+ /*
+ * Store the frame duration if it is valid, otherwise keep the current
+ * value.
+ *
+ * \todo The provided value should be adjusted based on the camera
+ * capabilities.
+ */
+ if (arg->parm.capture.timeperframe.numerator &&
+ arg->parm.capture.timeperframe.denominator)
+ v4l2TimePerFrame_ = arg->parm.capture.timeperframe;
+
+ memset(&arg->parm, 0, sizeof(arg->parm));
+
+ arg->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ arg->parm.capture.timeperframe = v4l2TimePerFrame_;
+
+ /* Apply the frame duration. */
+ utils::Duration frameDuration = 1.0s * v4l2TimePerFrame_.numerator
+ / v4l2TimePerFrame_.denominator;
+ int64_t uDuration = frameDuration.get<std::micro>();
+ vcam_->controls().set(controls::FrameDurationLimits, { uDuration, uDuration });
+
+ return 0;
+}
+
const std::set<unsigned long> V4L2CameraProxy::supportedIoctls_ = {
VIDIOC_QUERYCAP,
VIDIOC_ENUM_FRAMESIZES,
@@ -776,6 +850,8 @@ const std::set<unsigned long> V4L2CameraProxy::supportedIoctls_ = {
VIDIOC_EXPBUF,
VIDIOC_STREAMON,
VIDIOC_STREAMOFF,
+ VIDIOC_G_PARM,
+ VIDIOC_S_PARM,
};
int V4L2CameraProxy::ioctl(V4L2CameraFile *file, unsigned long longRequest, void *arg)
@@ -863,6 +939,12 @@ int V4L2CameraProxy::ioctl(V4L2CameraFile *file, unsigned long longRequest, void
case VIDIOC_STREAMOFF:
ret = vidioc_streamoff(file, static_cast<int *>(arg));
break;
+ case VIDIOC_G_PARM:
+ ret = vidioc_g_parm(file, static_cast<struct v4l2_streamparm *>(arg));
+ break;
+ case VIDIOC_S_PARM:
+ ret = vidioc_s_parm(file, static_cast<struct v4l2_streamparm *>(arg));
+ break;
default:
ret = -ENOTTY;
break;
diff --git a/src/v4l2/v4l2_camera_proxy.h b/src/v4l2/v4l2_camera_proxy.h
index 8a0195e1..5aa352c3 100644
--- a/src/v4l2/v4l2_camera_proxy.h
+++ b/src/v4l2/v4l2_camera_proxy.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_camera_proxy.h - Proxy to V4L2 compatibility camera
+ * Proxy to V4L2 compatibility camera
*/
#pragma once
@@ -67,6 +67,8 @@ private:
int vidioc_expbuf(V4L2CameraFile *file, struct v4l2_exportbuffer *arg);
int vidioc_streamon(V4L2CameraFile *file, int *arg);
int vidioc_streamoff(V4L2CameraFile *file, int *arg);
+ int vidioc_g_parm(V4L2CameraFile *file, struct v4l2_streamparm *arg);
+ int vidioc_s_parm(V4L2CameraFile *file, struct v4l2_streamparm *arg);
bool hasOwnership(V4L2CameraFile *file);
int acquire(V4L2CameraFile *file);
@@ -84,6 +86,7 @@ private:
struct v4l2_capability capabilities_;
struct v4l2_pix_format v4l2PixFormat_;
+ struct v4l2_fract v4l2TimePerFrame_;
std::vector<struct v4l2_buffer> buffers_;
std::map<void *, unsigned int> mmaps_;
diff --git a/src/v4l2/v4l2_compat.cpp b/src/v4l2/v4l2_compat.cpp
index 1765fb5d..ff833f57 100644
--- a/src/v4l2/v4l2_compat.cpp
+++ b/src/v4l2/v4l2_compat.cpp
@@ -2,17 +2,19 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_compat.cpp - V4L2 compatibility layer
+ * V4L2 compatibility layer
*/
#include "v4l2_compat_manager.h"
-#include <errno.h>
+#include <assert.h>
#include <fcntl.h>
#include <stdarg.h>
+#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include <unistd.h>
#include <libcamera/base/utils.h>
@@ -28,71 +30,97 @@ using namespace libcamera;
va_end(ap); \
}
+namespace {
+
+/*
+ * Determine if the flags require a further mode arguments that needs to be
+ * parsed from va_args.
+ */
+bool needs_mode(int flags)
+{
+ return (flags & O_CREAT) || ((flags & O_TMPFILE) == O_TMPFILE);
+}
+
+} /* namespace */
+
extern "C" {
LIBCAMERA_PUBLIC int open(const char *path, int oflag, ...)
{
mode_t mode = 0;
- if (oflag & O_CREAT || oflag & O_TMPFILE)
+ if (needs_mode(oflag))
extract_va_arg(mode_t, mode, oflag);
return V4L2CompatManager::instance()->openat(AT_FDCWD, path,
oflag, mode);
}
-/* _FORTIFY_SOURCE redirects open to __open_2 */
-LIBCAMERA_PUBLIC int __open_2(const char *path, int oflag)
-{
- return open(path, oflag);
-}
-
#ifndef open64
LIBCAMERA_PUBLIC int open64(const char *path, int oflag, ...)
{
mode_t mode = 0;
- if (oflag & O_CREAT || oflag & O_TMPFILE)
+ if (needs_mode(oflag))
extract_va_arg(mode_t, mode, oflag);
return V4L2CompatManager::instance()->openat(AT_FDCWD, path,
oflag | O_LARGEFILE, mode);
}
-
-LIBCAMERA_PUBLIC int __open64_2(const char *path, int oflag)
-{
- return open(path, oflag);
-}
#endif
LIBCAMERA_PUBLIC int openat(int dirfd, const char *path, int oflag, ...)
{
mode_t mode = 0;
- if (oflag & O_CREAT || oflag & O_TMPFILE)
+ if (needs_mode(oflag))
extract_va_arg(mode_t, mode, oflag);
return V4L2CompatManager::instance()->openat(dirfd, path, oflag, mode);
}
-LIBCAMERA_PUBLIC int __openat_2(int dirfd, const char *path, int oflag)
-{
- return openat(dirfd, path, oflag);
-}
-
#ifndef openat64
LIBCAMERA_PUBLIC int openat64(int dirfd, const char *path, int oflag, ...)
{
mode_t mode = 0;
- if (oflag & O_CREAT || oflag & O_TMPFILE)
+ if (needs_mode(oflag))
extract_va_arg(mode_t, mode, oflag);
return V4L2CompatManager::instance()->openat(dirfd, path,
oflag | O_LARGEFILE, mode);
}
+#endif
-LIBCAMERA_PUBLIC int __openat64_2(int dirfd, const char *path, int oflag)
+/*
+ * _FORTIFY_SOURCE redirects open* to __open*_2. Disable the
+ * -Wmissing-declarations warnings, as the functions won't be declared if
+ * _FORTIFY_SOURCE is not in use.
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmissing-declarations"
+
+LIBCAMERA_PUBLIC int __open_2(const char *path, int oflag)
+{
+ assert(!needs_mode(oflag));
+ return open(path, oflag);
+}
+
+LIBCAMERA_PUBLIC int __open64_2(const char *path, int oflag)
{
+ assert(!needs_mode(oflag));
+ return open64(path, oflag);
+}
+
+LIBCAMERA_PUBLIC int __openat_2(int dirfd, const char *path, int oflag)
+{
+ assert(!needs_mode(oflag));
return openat(dirfd, path, oflag);
}
-#endif
+
+LIBCAMERA_PUBLIC int __openat64_2(int dirfd, const char *path, int oflag)
+{
+ assert(!needs_mode(oflag));
+ return openat64(dirfd, path, oflag);
+}
+
+#pragma GCC diagnostic pop
LIBCAMERA_PUBLIC int dup(int oldfd)
{
@@ -125,7 +153,11 @@ LIBCAMERA_PUBLIC int munmap(void *addr, size_t length)
return V4L2CompatManager::instance()->munmap(addr, length);
}
+#if HAVE_POSIX_IOCTL
+LIBCAMERA_PUBLIC int ioctl(int fd, int request, ...)
+#else
LIBCAMERA_PUBLIC int ioctl(int fd, unsigned long request, ...)
+#endif
{
void *arg;
extract_va_arg(void *, arg, request);
diff --git a/src/v4l2/v4l2_compat_manager.cpp b/src/v4l2/v4l2_compat_manager.cpp
index 5e8cdb4f..f53fb300 100644
--- a/src/v4l2/v4l2_compat_manager.cpp
+++ b/src/v4l2/v4l2_compat_manager.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_compat_manager.cpp - V4L2 compatibility manager
+ * V4L2 compatibility manager
*/
#include "v4l2_compat_manager.h"
@@ -10,7 +10,6 @@
#include <dlfcn.h>
#include <fcntl.h>
#include <map>
-#include <stdarg.h>
#include <string.h>
#include <sys/eventfd.h>
#include <sys/mman.h>
diff --git a/src/v4l2/v4l2_compat_manager.h b/src/v4l2/v4l2_compat_manager.h
index 64af9a8c..f7c6f122 100644
--- a/src/v4l2/v4l2_compat_manager.h
+++ b/src/v4l2/v4l2_compat_manager.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_compat_manager.h - V4L2 compatibility manager
+ * V4L2 compatibility manager
*/
#pragma once
diff --git a/test/bayer-format.cpp b/test/bayer-format.cpp
index 54f03487..f8d19804 100644
--- a/test/bayer-format.cpp
+++ b/test/bayer-format.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Sebastian Fricke
*
- * bayer_format.cpp - BayerFormat class tests
+ * BayerFormat class tests
*/
#include <iostream>
diff --git a/test/byte-stream-buffer.cpp b/test/byte-stream-buffer.cpp
index 04ff0571..04aab3d2 100644
--- a/test/byte-stream-buffer.cpp
+++ b/test/byte-stream-buffer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * byte_stream_buffer.cpp - ByteStreamBuffer tests
+ * ByteStreamBuffer tests
*/
#include <array>
diff --git a/test/camera-sensor.cpp b/test/camera-sensor.cpp
index 9503d775..869c7889 100644
--- a/test/camera-sensor.cpp
+++ b/test/camera-sensor.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera-sensor.cpp - Camera sensor tests
+ * Camera sensor tests
*/
#include <algorithm>
@@ -52,8 +52,8 @@ protected:
return TestFail;
}
- sensor_ = new CameraSensor(entity);
- if (sensor_->init() < 0) {
+ sensor_ = CameraSensorFactoryBase::create(entity);
+ if (!sensor_) {
cerr << "Unable to initialise camera sensor" << endl;
return TestFail;
}
@@ -118,13 +118,12 @@ protected:
void cleanup()
{
- delete sensor_;
}
private:
std::unique_ptr<DeviceEnumerator> enumerator_;
std::shared_ptr<MediaDevice> media_;
- CameraSensor *sensor_;
+ std::unique_ptr<CameraSensor> sensor_;
CameraLens *lens_;
};
diff --git a/test/camera/buffer_import.cpp b/test/camera/buffer_import.cpp
index 92884004..815d1cae 100644
--- a/test/camera/buffer_import.cpp
+++ b/test/camera/buffer_import.cpp
@@ -63,6 +63,8 @@ protected:
request->reuse();
request->addBuffer(stream, buffer);
camera_->queueRequest(request);
+
+ dispatcher_->interrupt();
}
int init() override
@@ -76,6 +78,8 @@ protected:
return TestFail;
}
+ dispatcher_ = Thread::current()->eventDispatcher();
+
return TestPass;
}
@@ -133,17 +137,20 @@ protected:
}
}
- EventDispatcher *dispatcher = Thread::current()->eventDispatcher();
+ const unsigned int nFrames = cfg.bufferCount * 2;
Timer timer;
- timer.start(1000ms);
- while (timer.isRunning())
- dispatcher->processEvents();
+ timer.start(500ms * nFrames);
+ while (timer.isRunning()) {
+ dispatcher_->processEvents();
+ if (completeRequestsCount_ > nFrames)
+ break;
+ }
- if (completeRequestsCount_ < cfg.bufferCount * 2) {
+ if (completeRequestsCount_ < nFrames) {
std::cout << "Failed to capture enough frames (got "
<< completeRequestsCount_ << " expected at least "
- << cfg.bufferCount * 2 << ")" << std::endl;
+ << nFrames << ")" << std::endl;
return TestFail;
}
@@ -161,6 +168,8 @@ protected:
}
private:
+ EventDispatcher *dispatcher_;
+
std::vector<std::unique_ptr<Request>> requests_;
unsigned int completeBuffersCount_;
diff --git a/test/camera/capture.cpp b/test/camera/capture.cpp
index de824083..8766fb19 100644
--- a/test/camera/capture.cpp
+++ b/test/camera/capture.cpp
@@ -59,6 +59,8 @@ protected:
request->reuse();
request->addBuffer(stream, buffer);
camera_->queueRequest(request);
+
+ dispatcher_->interrupt();
}
int init() override
@@ -73,6 +75,7 @@ protected:
}
allocator_ = new FrameBufferAllocator(camera_);
+ dispatcher_ = Thread::current()->eventDispatcher();
return TestPass;
}
@@ -135,19 +138,20 @@ protected:
}
}
- EventDispatcher *dispatcher = Thread::current()->eventDispatcher();
+ unsigned int nFrames = allocator_->buffers(stream).size() * 2;
Timer timer;
- timer.start(1000ms);
- while (timer.isRunning())
- dispatcher->processEvents();
-
- unsigned int nbuffers = allocator_->buffers(stream).size();
+ timer.start(500ms * nFrames);
+ while (timer.isRunning()) {
+ dispatcher_->processEvents();
+ if (completeRequestsCount_ > nFrames)
+ break;
+ }
- if (completeRequestsCount_ < nbuffers * 2) {
+ if (completeRequestsCount_ < nFrames) {
cout << "Failed to capture enough frames (got "
<< completeRequestsCount_ << " expected at least "
- << nbuffers * 2 << ")" << endl;
+ << nFrames * 2 << ")" << endl;
return TestFail;
}
@@ -164,6 +168,8 @@ protected:
return TestPass;
}
+ EventDispatcher *dispatcher_;
+
std::vector<std::unique_ptr<Request>> requests_;
std::unique_ptr<CameraConfiguration> config_;
diff --git a/test/controls/control_info.cpp b/test/controls/control_info.cpp
index 1176a502..e1bb43f0 100644
--- a/test/controls/control_info.cpp
+++ b/test/controls/control_info.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_info.cpp - ControlInfo tests
+ * ControlInfo tests
*/
#include <iostream>
diff --git a/test/controls/control_info_map.cpp b/test/controls/control_info_map.cpp
index 29b33515..b0be14b5 100644
--- a/test/controls/control_info_map.cpp
+++ b/test/controls/control_info_map.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_info.cpp - ControlInfoMap tests
+ * ControlInfoMap tests
*/
#include <iostream>
diff --git a/test/controls/control_list.cpp b/test/controls/control_list.cpp
index bb35aab7..e27325c3 100644
--- a/test/controls/control_list.cpp
+++ b/test/controls/control_list.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_list.cpp - ControlList tests
+ * ControlList tests
*/
#include <iostream>
diff --git a/test/controls/control_value.cpp b/test/controls/control_value.cpp
index ad8e05d0..5084fd0c 100644
--- a/test/controls/control_value.cpp
+++ b/test/controls/control_value.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_value.cpp - ControlValue tests
+ * ControlValue tests
*/
#include <algorithm>
@@ -110,6 +110,86 @@ protected:
}
/*
+ * Unsigned Integer16 type.
+ */
+ value.set(static_cast<uint16_t>(42));
+ if (value.isNone() || value.isArray() ||
+ value.type() != ControlTypeUnsigned16) {
+ cerr << "Control type mismatch after setting to uint16_t" << endl;
+ return TestFail;
+ }
+
+ if (value.get<uint16_t>() != 42) {
+ cerr << "Control value mismatch after setting to uint16_t" << endl;
+ return TestFail;
+ }
+
+ if (value.toString() != "42") {
+ cerr << "Control string mismatch after setting to uint16_t" << endl;
+ return TestFail;
+ }
+
+ std::array<uint16_t, 4> uint16s{ 3, 14, 15, 9 };
+ value.set(Span<uint16_t>(uint16s));
+ if (value.isNone() || !value.isArray() ||
+ value.type() != ControlTypeUnsigned16) {
+ cerr << "Control type mismatch after setting to uint16_t array" << endl;
+ return TestFail;
+ }
+
+ Span<const uint16_t> uint16sResult = value.get<Span<const uint16_t>>();
+ if (uint16s.size() != uint16sResult.size() ||
+ !std::equal(uint16s.begin(), uint16s.end(), uint16sResult.begin())) {
+ cerr << "Control value mismatch after setting to uint16_t array" << endl;
+ return TestFail;
+ }
+
+ if (value.toString() != "[ 3, 14, 15, 9 ]") {
+ cerr << "Control string mismatch after setting to uint16_t array" << endl;
+ return TestFail;
+ }
+
+ /*
+ * Unsigned Integer32 type.
+ */
+ value.set(static_cast<uint32_t>(42));
+ if (value.isNone() || value.isArray() ||
+ value.type() != ControlTypeUnsigned32) {
+ cerr << "Control type mismatch after setting to uint32_t" << endl;
+ return TestFail;
+ }
+
+ if (value.get<uint32_t>() != 42) {
+ cerr << "Control value mismatch after setting to uint32_t" << endl;
+ return TestFail;
+ }
+
+ if (value.toString() != "42") {
+ cerr << "Control string mismatch after setting to uint32_t" << endl;
+ return TestFail;
+ }
+
+ std::array<uint32_t, 4> uint32s{ 3, 14, 15, 9 };
+ value.set(Span<uint32_t>(uint32s));
+ if (value.isNone() || !value.isArray() ||
+ value.type() != ControlTypeUnsigned32) {
+ cerr << "Control type mismatch after setting to uint32_t array" << endl;
+ return TestFail;
+ }
+
+ Span<const uint32_t> uint32sResult = value.get<Span<const uint32_t>>();
+ if (uint32s.size() != uint32sResult.size() ||
+ !std::equal(uint32s.begin(), uint32s.end(), uint32sResult.begin())) {
+ cerr << "Control value mismatch after setting to uint32_t array" << endl;
+ return TestFail;
+ }
+
+ if (value.toString() != "[ 3, 14, 15, 9 ]") {
+ cerr << "Control string mismatch after setting to uint32_t array" << endl;
+ return TestFail;
+ }
+
+ /*
* Integer32 type.
*/
value.set(0x42000000);
diff --git a/test/delayed_controls.cpp b/test/delayed_controls.cpp
index a8ce9828..7bd30e7a 100644
--- a/test/delayed_controls.cpp
+++ b/test/delayed_controls.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * delayed_controls.cpp - libcamera delayed controls test
+ * libcamera delayed controls test
*/
#include <iostream>
diff --git a/test/event-dispatcher.cpp b/test/event-dispatcher.cpp
index 9b07ab2b..f71c1c0d 100644
--- a/test/event-dispatcher.cpp
+++ b/test/event-dispatcher.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event-dispatcher.cpp - Event dispatcher test
+ * Event dispatcher test
*/
#include <chrono>
diff --git a/test/event-thread.cpp b/test/event-thread.cpp
index d6e5d27a..5499bbf8 100644
--- a/test/event-thread.cpp
+++ b/test/event-thread.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event-thread.cpp - Threaded event test
+ * Threaded event test
*/
#include <chrono>
diff --git a/test/event.cpp b/test/event.cpp
index 19dceae1..9f7b1ed4 100644
--- a/test/event.cpp
+++ b/test/event.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event.cpp - Event test
+ * Event test
*/
#include <iostream>
diff --git a/test/fence.cpp b/test/fence.cpp
index 1e38bc2f..8095b228 100644
--- a/test/fence.cpp
+++ b/test/fence.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * fence.cpp - Fence test
+ * Fence test
*/
#include <iostream>
@@ -43,7 +43,6 @@ private:
void signalFence();
- std::unique_ptr<Fence> fence_;
EventDispatcher *dispatcher_;
UniqueFD eventFd_;
UniqueFD eventFd2_;
@@ -58,8 +57,11 @@ private:
bool expectedCompletionResult_ = true;
bool setFence_ = true;
- unsigned int completedRequest_;
-
+ /*
+ * Request IDs track the number of requests that have completed. They
+ * are one-based, and don't wrap.
+ */
+ unsigned int completedRequestId_;
unsigned int signalledRequestId_;
unsigned int expiredRequestId_;
unsigned int nbuffers_;
@@ -127,8 +129,19 @@ int FenceTest::init()
return TestFail;
}
- signalledRequestId_ = nbuffers_ - 2;
- expiredRequestId_ = nbuffers_ - 1;
+ completedRequestId_ = 0;
+
+ /*
+ * All but two requests are queued without a fence. Request
+ * expiredRequestId_ will be queued with a fence that we won't signal
+ * (which is then expected to expire), and request signalledRequestId_
+ * will be queued with a fence that gets signalled. Select nbuffers_
+ * and nbuffers_ * 2 for those two requests, to space them by a few
+ * frames while still not requiring a long time for the test to
+ * complete.
+ */
+ expiredRequestId_ = nbuffers_;
+ signalledRequestId_ = nbuffers_ * 2;
return TestPass;
}
@@ -190,16 +203,16 @@ void FenceTest::requestRequeue(Request *request)
const Request::BufferMap &buffers = request->buffers();
const Stream *stream = buffers.begin()->first;
FrameBuffer *buffer = buffers.begin()->second;
- uint64_t cookie = request->cookie();
request->reuse();
- if (cookie == signalledRequestId_ && setFence_) {
+ if (completedRequestId_ == signalledRequestId_ - nbuffers_ && setFence_) {
/*
- * The second time this request is queued add a fence to it.
- *
- * The main loop signals it by using a timer to write to the
- * efd2_ file descriptor before the fence expires.
+ * This is the request that will be used to test fence
+ * signalling when it completes next time. Add a fence to it,
+ * using efd2_. The main loop will signal the fence by using a
+ * timer to write to the efd2_ file descriptor before the fence
+ * expires.
*/
std::unique_ptr<Fence> fence =
std::make_unique<Fence>(std::move(eventFd2_));
@@ -214,16 +227,15 @@ void FenceTest::requestRequeue(Request *request)
void FenceTest::requestComplete(Request *request)
{
- uint64_t cookie = request->cookie();
- completedRequest_ = cookie;
+ completedRequestId_++;
/*
- * The last request is expected to fail as its fence has not been
- * signaled.
+ * Request expiredRequestId_ is expected to fail as its fence has not
+ * been signalled.
*
* Validate the fence status but do not re-queue it.
*/
- if (cookie == expiredRequestId_) {
+ if (completedRequestId_ == expiredRequestId_) {
if (validateExpiredRequest(request) != TestPass)
expectedCompletionResult_ = false;
@@ -231,7 +243,7 @@ void FenceTest::requestComplete(Request *request)
return;
}
- /* Validate all requests but the last. */
+ /* Validate all other requests. */
if (validateRequest(request) != TestPass) {
expectedCompletionResult_ = false;
@@ -272,15 +284,16 @@ int FenceTest::run()
}
int ret;
- if (i == expiredRequestId_) {
+ if (i == expiredRequestId_ - 1) {
/* This request will have a fence, and it will expire. */
- fence_ = std::make_unique<Fence>(std::move(eventFd_));
- if (!fence_->isValid()) {
+ std::unique_ptr<Fence> fence =
+ std::make_unique<Fence>(std::move(eventFd_));
+ if (!fence->isValid()) {
cerr << "Fence should be valid" << endl;
return TestFail;
}
- ret = request->addBuffer(stream_, buffer.get(), std::move(fence_));
+ ret = request->addBuffer(stream_, buffer.get(), std::move(fence));
} else {
/* All other requests will have no Fence. */
ret = request->addBuffer(stream_, buffer.get());
@@ -314,15 +327,21 @@ int FenceTest::run()
Timer fenceTimer;
fenceTimer.timeout.connect(this, &FenceTest::signalFence);
- /* Loop for one second. */
+ /*
+ * Loop long enough for all requests to complete, allowing 500ms per
+ * request.
+ */
Timer timer;
- timer.start(1000ms);
- while (timer.isRunning() && expectedCompletionResult_) {
- if (completedRequest_ == signalledRequestId_ && setFence_)
+ timer.start(500ms * (signalledRequestId_ + 1));
+ while (timer.isRunning() && expectedCompletionResult_ &&
+ completedRequestId_ <= signalledRequestId_ + 1) {
+ if (completedRequestId_ == signalledRequestId_ - 1 && setFence_)
/*
- * signalledRequestId_ has just completed and it has
- * been re-queued with a fence. Start the timer to
- * signal the fence in 10 msec.
+ * The request just before signalledRequestId_ has just
+ * completed. Request signalledRequestId_ has been
+ * queued with a fence, and libcamera is likely already
+ * waiting on the fence, or will soon. Start the timer
+ * to signal the fence in 10 msec.
*/
fenceTimer.start(10ms);
diff --git a/test/file.cpp b/test/file.cpp
index 5c978ebf..170e6ccd 100644
--- a/test/file.cpp
+++ b/test/file.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * file.cpp - File I/O operations tests
+ * File I/O operations tests
*/
#include <fstream>
diff --git a/test/flags.cpp b/test/flags.cpp
index 2177e247..85c34788 100644
--- a/test/flags.cpp
+++ b/test/flags.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * flags.cpp - Flags tests
+ * Flags tests
*/
#include <iostream>
diff --git a/test/geometry.cpp b/test/geometry.cpp
index 008d51ea..11df043b 100644
--- a/test/geometry.cpp
+++ b/test/geometry.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * geometry.cpp - Geometry classes tests
+ * Geometry classes tests
*/
#include <iostream>
@@ -481,6 +481,31 @@ protected:
return TestFail;
}
+ Point topLeft(3, 3);
+ Point bottomRight(30, 30);
+ Point topRight(30, 3);
+ Point bottomLeft(3, 30);
+ Rectangle rect1(topLeft, bottomRight);
+ Rectangle rect2(topRight, bottomLeft);
+ Rectangle rect3(bottomRight, topLeft);
+ Rectangle rect4(bottomLeft, topRight);
+
+ if (rect1 != rect2 || rect1 != rect3 || rect1 != rect4) {
+ cout << "Point-to-point construction failed" << endl;
+ return TestFail;
+ }
+
+ Rectangle f1 = Rectangle(100, 200, 3000, 2000);
+ Rectangle f2 = Rectangle(200, 300, 1500, 1000);
+ /* Bottom right quarter of the corresponding frames. */
+ Rectangle r1 = Rectangle(100 + 1500, 200 + 1000, 1500, 1000);
+ Rectangle r2 = Rectangle(200 + 750, 300 + 500, 750, 500);
+ if (r1.transformedBetween(f1, f2) != r2 ||
+ r2.transformedBetween(f2, f1) != r1) {
+ cout << "Rectangle::transformedBetween() test failed" << endl;
+ return TestFail;
+ }
+
return TestPass;
}
};
diff --git a/test/gstreamer/gstreamer_device_provider_test.cpp b/test/gstreamer/gstreamer_device_provider_test.cpp
index 237af8cd..521c60b8 100644
--- a/test/gstreamer/gstreamer_device_provider_test.cpp
+++ b/test/gstreamer/gstreamer_device_provider_test.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2023, Umang Jain <umang.jain@ideasonboard.com>
*
- * gstreamer_single_stream_test.cpp - GStreamer single stream capture test
+ * GStreamer single stream capture test
*/
#include <vector>
@@ -52,19 +52,12 @@ protected:
for (l = devices; l != NULL; l = g_list_next(l)) {
GstDevice *device = GST_DEVICE(l->data);
g_autofree gchar *gst_name;
- bool matched = false;
g_autoptr(GstElement) element = gst_device_create_element(device, NULL);
g_object_get(element, "camera-name", &gst_name, NULL);
- for (auto name : cameraNames) {
- if (strcmp(name.c_str(), gst_name) == 0) {
- matched = true;
- break;
- }
- }
-
- if (!matched)
+ if (std::find(cameraNames.begin(), cameraNames.end(), gst_name) ==
+ cameraNames.end())
return TestFail;
}
diff --git a/test/gstreamer/gstreamer_memory_lifetime_test.cpp b/test/gstreamer/gstreamer_memory_lifetime_test.cpp
new file mode 100644
index 00000000..1738cf56
--- /dev/null
+++ b/test/gstreamer/gstreamer_memory_lifetime_test.cpp
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2024, Nicolas Dufresne
+ *
+ * gstreamer_memory_lifetime_test.cpp - GStreamer memory lifetime test
+ */
+
+#include <iostream>
+#include <unistd.h>
+
+#include <gst/app/app.h>
+#include <gst/gst.h>
+
+#include "gstreamer_test.h"
+#include "test.h"
+
+using namespace std;
+
+class GstreamerMemoryLifetimeTest : public GstreamerTest, public Test
+{
+public:
+ GstreamerMemoryLifetimeTest()
+ : GstreamerTest()
+ {
+ }
+
+protected:
+ int init() override
+ {
+ if (status_ != TestPass)
+ return status_;
+
+ appsink_ = gst_element_factory_make("appsink", nullptr);
+ if (!appsink_) {
+ g_printerr("Your installation is missing 'appsink'\n");
+ return TestFail;
+ }
+ g_object_ref_sink(appsink_);
+
+ return createPipeline();
+ }
+
+ int run() override
+ {
+ /* Build the pipeline */
+ gst_bin_add_many(GST_BIN(pipeline_), libcameraSrc_, appsink_, nullptr);
+ if (gst_element_link(libcameraSrc_, appsink_) != TRUE) {
+ g_printerr("Elements could not be linked.\n");
+ return TestFail;
+ }
+
+ if (startPipeline() != TestPass)
+ return TestFail;
+
+ sample_ = gst_app_sink_try_pull_sample(GST_APP_SINK(appsink_), GST_SECOND * 5);
+ if (!sample_) {
+ /* Failed to obtain a sample. Abort the test */
+ gst_element_set_state(pipeline_, GST_STATE_NULL);
+ return TestFail;
+ }
+
+ /*
+ * Keep the sample referenced and set the pipeline state to
+ * NULL. This causes the libcamerasrc element to synchronously
+ * release resources it holds. The sample will be released
+ * later in cleanup().
+ *
+ * The test case verifies that libcamerasrc keeps alive long
+ * enough all the resources that are needed until memory
+ * allocated for frames gets freed. We return TestPass at this
+ * stage, and any use-after-free will be caught by the test
+ * crashing in cleanup().
+ */
+ gst_element_set_state(pipeline_, GST_STATE_NULL);
+
+ return TestPass;
+ }
+
+ void cleanup() override
+ {
+ g_clear_pointer(&sample_, gst_sample_unref);
+ g_clear_object(&appsink_);
+ }
+
+private:
+ GstElement *appsink_;
+ GstSample *sample_;
+};
+
+TEST_REGISTER(GstreamerMemoryLifetimeTest)
diff --git a/test/gstreamer/gstreamer_multi_stream_test.cpp b/test/gstreamer/gstreamer_multi_stream_test.cpp
index cd669308..263d1e86 100644
--- a/test/gstreamer/gstreamer_multi_stream_test.cpp
+++ b/test/gstreamer/gstreamer_multi_stream_test.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Vedant Paranjape
*
- * gstreamer_multi_stream_test.cpp - GStreamer multi stream capture test
+ * GStreamer multi stream capture test
*/
#include <iostream>
diff --git a/test/gstreamer/gstreamer_single_stream_test.cpp b/test/gstreamer/gstreamer_single_stream_test.cpp
index 301e4a93..3ef2d323 100644
--- a/test/gstreamer/gstreamer_single_stream_test.cpp
+++ b/test/gstreamer/gstreamer_single_stream_test.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Vedant Paranjape
*
- * gstreamer_single_stream_test.cpp - GStreamer single stream capture test
+ * GStreamer single stream capture test
*/
#include <iostream>
@@ -29,30 +29,21 @@ protected:
if (status_ != TestPass)
return status_;
- const gchar *streamDescription = "fakesink";
- g_autoptr(GError) error0 = NULL;
- stream0_ = gst_parse_bin_from_description_full(streamDescription, TRUE,
- NULL,
- GST_PARSE_FLAG_FATAL_ERRORS,
- &error0);
-
- if (!stream0_) {
- g_printerr("Bin could not be created (%s)\n", error0->message);
+ fakesink_ = gst_element_factory_make("fakesink", nullptr);
+ if (!fakesink_) {
+ g_printerr("Your installation is missing 'fakesink'\n");
return TestFail;
}
- g_object_ref_sink(stream0_);
-
- if (createPipeline() != TestPass)
- return TestFail;
+ g_object_ref_sink(fakesink_);
- return TestPass;
+ return createPipeline();
}
int run() override
{
/* Build the pipeline */
- gst_bin_add_many(GST_BIN(pipeline_), libcameraSrc_, stream0_, NULL);
- if (gst_element_link(libcameraSrc_, stream0_) != TRUE) {
+ gst_bin_add_many(GST_BIN(pipeline_), libcameraSrc_, fakesink_, nullptr);
+ if (!gst_element_link(libcameraSrc_, fakesink_)) {
g_printerr("Elements could not be linked.\n");
return TestFail;
}
@@ -68,11 +59,11 @@ protected:
void cleanup() override
{
- g_clear_object(&stream0_);
+ g_clear_object(&fakesink_);
}
private:
- GstElement *stream0_;
+ GstElement *fakesink_;
};
TEST_REGISTER(GstreamerSingleStreamTest)
diff --git a/test/gstreamer/gstreamer_test.cpp b/test/gstreamer/gstreamer_test.cpp
index e8119b85..a15fef0e 100644
--- a/test/gstreamer/gstreamer_test.cpp
+++ b/test/gstreamer/gstreamer_test.cpp
@@ -9,12 +9,17 @@
#include <libcamera/base/utils.h>
+#if HAVE_ASAN
+#include <sanitizer/asan_interface.h>
+#endif
+
#include "gstreamer_test.h"
#include "test.h"
using namespace std;
+#if HAVE_ASAN
extern "C" {
const char *__asan_default_options()
{
@@ -26,6 +31,7 @@ const char *__asan_default_options()
return "detect_leaks=false";
}
}
+#endif
GstreamerTest::GstreamerTest(unsigned int numStreams)
: pipeline_(nullptr), libcameraSrc_(nullptr)
diff --git a/test/gstreamer/gstreamer_test.h b/test/gstreamer/gstreamer_test.h
index aa2261e2..abb37c1b 100644
--- a/test/gstreamer/gstreamer_test.h
+++ b/test/gstreamer/gstreamer_test.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Vedant Paranjape
*
- * gstreamer_test.cpp - GStreamer test base class
+ * GStreamer test base class
*/
#pragma once
diff --git a/test/gstreamer/meson.build b/test/gstreamer/meson.build
index f3ba5a23..e066c582 100644
--- a/test/gstreamer/meson.build
+++ b/test/gstreamer/meson.build
@@ -8,14 +8,24 @@ gstreamer_tests = [
{'name': 'single_stream_test', 'sources': ['gstreamer_single_stream_test.cpp']},
{'name': 'multi_stream_test', 'sources': ['gstreamer_multi_stream_test.cpp']},
{'name': 'device_provider_test', 'sources': ['gstreamer_device_provider_test.cpp']},
+ {'name': 'memory_lifetime_test', 'sources': ['gstreamer_memory_lifetime_test.cpp']},
]
gstreamer_dep = dependency('gstreamer-1.0', required : true)
+gstapp_dep = dependency('gstreamer-app-1.0', required : true)
+
+gstreamer_test_args = []
+
+if asan_enabled
+ gstreamer_test_args += ['-D', 'HAVE_ASAN=1']
+endif
foreach test : gstreamer_tests
exe = executable(test['name'], test['sources'], 'gstreamer_test.cpp',
- dependencies : [libcamera_private, gstreamer_dep],
+ cpp_args : gstreamer_test_args,
+ dependencies : [libcamera_private, gstreamer_dep, gstapp_dep],
link_with : test_libraries,
include_directories : test_includes_internal)
- test(test['name'], exe, suite : 'gstreamer', is_parallel : false, env : gst_env)
+ test(test['name'], exe, suite : 'gstreamer', is_parallel : false,
+ env : gst_env, should_fail : test.get('should_fail', false))
endforeach
diff --git a/test/hotplug-cameras.cpp b/test/hotplug-cameras.cpp
index 5d9260a2..530e9a31 100644
--- a/test/hotplug-cameras.cpp
+++ b/test/hotplug-cameras.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Umang Jain <email@uajain.com>
*
- * hotplug-cameras.cpp - Test cameraAdded/cameraRemoved signals in CameraManager
+ * Test cameraAdded/cameraRemoved signals in CameraManager
*/
#include <dirent.h>
diff --git a/test/ipa/ipa_interface_test.cpp b/test/ipa/ipa_interface_test.cpp
index 56f3cd6d..b8178366 100644
--- a/test/ipa/ipa_interface_test.cpp
+++ b/test/ipa/ipa_interface_test.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_interface_test.cpp - Test the IPA interface
+ * Test the IPA interface
*/
#include <fcntl.h>
@@ -20,11 +20,11 @@
#include <libcamera/base/thread.h>
#include <libcamera/base/timer.h>
+#include "libcamera/internal/camera_manager.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/ipa_module.h"
#include "libcamera/internal/pipeline_handler.h"
-#include "libcamera/internal/process.h"
#include "test.h"
@@ -44,20 +44,20 @@ public:
{
delete notifier_;
ipa_.reset();
- ipaManager_.reset();
+ cameraManager_.reset();
}
protected:
int init() override
{
- ipaManager_ = make_unique<IPAManager>();
+ cameraManager_ = make_unique<CameraManager>();
/* Create a pipeline handler for vimc. */
const std::vector<PipelineHandlerFactoryBase *> &factories =
PipelineHandlerFactoryBase::factories();
for (const PipelineHandlerFactoryBase *factory : factories) {
- if (factory->name() == "PipelineHandlerVimc") {
- pipe_ = factory->create(nullptr);
+ if (factory->name() == "vimc") {
+ pipe_ = factory->create(cameraManager_.get());
break;
}
}
@@ -171,11 +171,9 @@ private:
}
}
- ProcessManager processManager_;
-
std::shared_ptr<PipelineHandler> pipe_;
std::unique_ptr<ipa::vimc::IPAProxyVimc> ipa_;
- std::unique_ptr<IPAManager> ipaManager_;
+ std::unique_ptr<CameraManager> cameraManager_;
enum ipa::vimc::IPAOperationCode trace_;
EventNotifier *notifier_;
int fd_;
diff --git a/test/ipa/ipa_module_test.cpp b/test/ipa/ipa_module_test.cpp
index bd5e0e4c..1c97da32 100644
--- a/test/ipa/ipa_module_test.cpp
+++ b/test/ipa/ipa_module_test.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_module_test.cpp - Test loading of the VIMC IPA module and verify its info
+ * Test loading of the VIMC IPA module and verify its info
*/
#include <iostream>
@@ -57,7 +57,7 @@ protected:
const struct IPAModuleInfo testInfo = {
IPA_MODULE_API_VERSION,
0,
- "PipelineHandlerVimc",
+ "vimc",
"vimc",
};
diff --git a/test/ipa/libipa/fixedpoint.cpp b/test/ipa/libipa/fixedpoint.cpp
new file mode 100644
index 00000000..99eb662d
--- /dev/null
+++ b/test/ipa/libipa/fixedpoint.cpp
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Fixed / Floating point utility tests
+ */
+
+#include <cmath>
+#include <iostream>
+#include <map>
+#include <stdint.h>
+
+#include "../src/ipa/libipa/fixedpoint.h"
+
+#include "test.h"
+
+using namespace std;
+using namespace libcamera;
+using namespace ipa;
+
+class FixedPointUtilsTest : public Test
+{
+protected:
+ /* R for real, I for integer */
+ template<unsigned int IntPrec, unsigned int FracPrec, typename I, typename R>
+ int testFixedToFloat(I input, R expected)
+ {
+ R out = fixedToFloatingPoint<IntPrec, FracPrec, R>(input);
+ R prec = 1.0 / (1 << FracPrec);
+ if (std::abs(out - expected) > prec) {
+ cerr << "Reverse conversion expected " << input
+ << " to convert to " << expected
+ << ", got " << out << std::endl;
+ return TestFail;
+ }
+
+ return TestPass;
+ }
+
+ template<unsigned int IntPrec, unsigned int FracPrec, typename T>
+ int testSingleFixedPoint(double input, T expected)
+ {
+ T ret = floatingToFixedPoint<IntPrec, FracPrec, T>(input);
+ if (ret != expected) {
+ cerr << "Expected " << input << " to convert to "
+ << expected << ", got " << ret << std::endl;
+ return TestFail;
+ }
+
+ /*
+ * The precision check is fairly arbitrary but is based on what
+ * the rkisp1 is capable of in the crosstalk module.
+ */
+ double f = fixedToFloatingPoint<IntPrec, FracPrec, double>(ret);
+ if (std::abs(f - input) > 0.005) {
+ cerr << "Reverse conversion expected " << ret
+ << " to convert to " << input
+ << ", got " << f << std::endl;
+ return TestFail;
+ }
+
+ return TestPass;
+ }
+
+ int testFixedPoint()
+ {
+ /*
+ * The second 7.992 test is to test that unused bits don't
+ * affect the result.
+ */
+ std::map<double, uint16_t> testCases = {
+ { 7.992, 0x3ff },
+ { 0.2, 0x01a },
+ { -0.2, 0x7e6 },
+ { -0.8, 0x79a },
+ { -0.4, 0x7cd },
+ { -1.4, 0x74d },
+ { -8, 0x400 },
+ { 0, 0 },
+ };
+
+ int ret;
+ for (const auto &testCase : testCases) {
+ ret = testSingleFixedPoint<4, 7, uint16_t>(testCase.first,
+ testCase.second);
+ if (ret != TestPass)
+ return ret;
+ }
+
+ /* Special case with a superfluous one in the unused bits */
+ ret = testFixedToFloat<4, 7, uint16_t, double>(0xbff, 7.992);
+ if (ret != TestPass)
+ return ret;
+
+ return TestPass;
+ }
+
+ int run()
+ {
+ /* fixed point conversion test */
+ if (testFixedPoint() != TestPass)
+ return TestFail;
+
+ return TestPass;
+ }
+};
+
+TEST_REGISTER(FixedPointUtilsTest)
diff --git a/test/ipa/libipa/interpolator.cpp b/test/ipa/libipa/interpolator.cpp
new file mode 100644
index 00000000..6abb7760
--- /dev/null
+++ b/test/ipa/libipa/interpolator.cpp
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Interpolator tests
+ */
+
+#include "../src/ipa/libipa/interpolator.h"
+
+#include <cmath>
+#include <iostream>
+#include <map>
+#include <stdint.h>
+#include <stdio.h>
+
+#include "test.h"
+
+using namespace std;
+using namespace libcamera;
+using namespace ipa;
+
+#define ASSERT_EQ(a, b) \
+ if ((a) != (b)) { \
+ printf(#a " != " #b "\n"); \
+ return TestFail; \
+ }
+
+class InterpolatorTest : public Test
+{
+protected:
+ int run()
+ {
+ Interpolator<int> interpolator;
+ interpolator.setData({ { 10, 100 }, { 20, 200 }, { 30, 300 } });
+
+ ASSERT_EQ(interpolator.getInterpolated(0), 100);
+ ASSERT_EQ(interpolator.getInterpolated(10), 100);
+ ASSERT_EQ(interpolator.getInterpolated(20), 200);
+ ASSERT_EQ(interpolator.getInterpolated(25), 250);
+ ASSERT_EQ(interpolator.getInterpolated(30), 300);
+ ASSERT_EQ(interpolator.getInterpolated(40), 300);
+
+ interpolator.setQuantization(10);
+ unsigned int q = 0;
+ ASSERT_EQ(interpolator.getInterpolated(25, &q), 300);
+ ASSERT_EQ(q, 30);
+ ASSERT_EQ(interpolator.getInterpolated(24, &q), 200);
+ ASSERT_EQ(q, 20);
+
+ return TestPass;
+ }
+};
+
+TEST_REGISTER(InterpolatorTest)
diff --git a/test/ipa/libipa/meson.build b/test/ipa/libipa/meson.build
new file mode 100644
index 00000000..eaf4b49a
--- /dev/null
+++ b/test/ipa/libipa/meson.build
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libipa_test = [
+ {'name': 'fixedpoint', 'sources': ['fixedpoint.cpp']},
+ {'name': 'interpolator', 'sources': ['interpolator.cpp']},
+]
+
+foreach test : libipa_test
+ exe = executable(test['name'], test['sources'],
+ dependencies : [libcamera_private, libipa_dep],
+ implicit_include_directories : false,
+ link_with : [test_libraries],
+ include_directories : [test_includes_internal,
+ '../../../src/ipa/libipa/'])
+
+ test(test['name'], exe, suite : 'ipa')
+endforeach
diff --git a/test/ipa/meson.build b/test/ipa/meson.build
index 180b0da0..ceed15ba 100644
--- a/test/ipa/meson.build
+++ b/test/ipa/meson.build
@@ -1,15 +1,17 @@
# SPDX-License-Identifier: CC0-1.0
+subdir('libipa')
+
ipa_test = [
{'name': 'ipa_module_test', 'sources': ['ipa_module_test.cpp']},
{'name': 'ipa_interface_test', 'sources': ['ipa_interface_test.cpp']},
]
foreach test : ipa_test
- exe = executable(test['name'], test['sources'], libcamera_generated_ipa_headers,
- dependencies : libcamera_private,
- link_with : [libipa, test_libraries],
- include_directories : [libipa_includes, test_includes_internal])
+ exe = executable(test['name'], test['sources'],
+ dependencies : [libcamera_private, libipa_dep],
+ link_with : [test_libraries],
+ include_directories : [test_includes_internal])
test(test['name'], exe, suite : 'ipa')
endforeach
diff --git a/test/ipc/unixsocket.cpp b/test/ipc/unixsocket.cpp
index 1d4df287..f39bd986 100644
--- a/test/ipc/unixsocket.cpp
+++ b/test/ipc/unixsocket.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * unixsocket.cpp - Unix socket IPC test
+ * Unix socket IPC test
*/
#include <algorithm>
@@ -15,6 +15,7 @@
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
+#include <vector>
#include <libcamera/base/event_dispatcher.h>
#include <libcamera/base/thread.h>
@@ -34,6 +35,8 @@ using namespace libcamera;
using namespace std;
using namespace std::chrono_literals;
+namespace {
+
int calculateLength(int fd)
{
lseek(fd, 0, 0);
@@ -43,6 +46,8 @@ int calculateLength(int fd)
return size;
}
+} /* namespace */
+
class UnixSocketTestSlave
{
public:
@@ -336,14 +341,14 @@ protected:
for (unsigned int i = 0; i < std::size(strings); i++) {
unsigned int len = strlen(strings[i]);
- char buf[len];
+ std::vector<char> buf(len);
close(fds[i]);
- if (read(response.fds[0], &buf, len) <= 0)
+ if (read(response.fds[0], buf.data(), len) <= 0)
return TestFail;
- if (memcmp(buf, strings[i], len))
+ if (memcmp(buf.data(), strings[i], len))
return TestFail;
}
diff --git a/test/ipc/unixsocket_ipc.cpp b/test/ipc/unixsocket_ipc.cpp
index 3ee6017e..df7d9c2b 100644
--- a/test/ipc/unixsocket_ipc.cpp
+++ b/test/ipc/unixsocket_ipc.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * unixsocket_ipc.cpp - Unix socket IPC test
+ * Unix socket IPC test
*/
#include <algorithm>
diff --git a/test/libtest/buffer_source.h b/test/libtest/buffer_source.h
index 0cc71aa5..495da8a9 100644
--- a/test/libtest/buffer_source.h
+++ b/test/libtest/buffer_source.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * buffer_source.h - libcamera camera test helper to create FrameBuffers
+ * libcamera camera test helper to create FrameBuffers
*/
#pragma once
diff --git a/test/libtest/camera_test.h b/test/libtest/camera_test.h
index 0b178bc2..713b503f 100644
--- a/test/libtest/camera_test.h
+++ b/test/libtest/camera_test.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_test.h - libcamera camera test base class
+ * libcamera camera test base class
*/
#pragma once
diff --git a/test/libtest/test.cpp b/test/libtest/test.cpp
index af37b4dd..4e03def9 100644
--- a/test/libtest/test.cpp
+++ b/test/libtest/test.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * test.cpp - libcamera test base class
+ * libcamera test base class
*/
#include <stdlib.h>
diff --git a/test/libtest/test.h b/test/libtest/test.h
index 23b07743..3a90885d 100644
--- a/test/libtest/test.h
+++ b/test/libtest/test.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * test.h - libcamera test base class
+ * libcamera test base class
*/
#pragma once
diff --git a/test/log/log_api.cpp b/test/log/log_api.cpp
index 53118960..0b999738 100644
--- a/test/log/log_api.cpp
+++ b/test/log/log_api.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * log.cpp - log API test
+ * log API test
*/
#include <algorithm>
diff --git a/test/log/log_process.cpp b/test/log/log_process.cpp
index 1926c560..9609e23d 100644
--- a/test/log/log_process.cpp
+++ b/test/log/log_process.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * log_process.cpp - Logging in isolated child process test
+ * Logging in isolated child process test
*/
#include <fcntl.h>
diff --git a/test/media_device/media_device_link_test.cpp b/test/media_device/media_device_link_test.cpp
index e11f6b78..31528000 100644
--- a/test/media_device/media_device_link_test.cpp
+++ b/test/media_device/media_device_link_test.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * media_device_link_test.cpp - Tests link handling on VIMC media device
+ * Tests link handling on VIMC media device
*/
#include <iostream>
diff --git a/test/media_device/media_device_print_test.cpp b/test/media_device/media_device_print_test.cpp
index cdec5b8d..63aeed48 100644
--- a/test/media_device/media_device_print_test.cpp
+++ b/test/media_device/media_device_print_test.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018-2019, Google Inc.
*
- * media_device_print_test.cpp - Print out media devices
+ * Print out media devices
*/
#include <iostream>
diff --git a/test/media_device/media_device_test.cpp b/test/media_device/media_device_test.cpp
index 1397d123..3e41d0f0 100644
--- a/test/media_device/media_device_test.cpp
+++ b/test/media_device/media_device_test.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * media_device_test.cpp - libcamera media device test base class
+ * libcamera media device test base class
*/
#include <iostream>
diff --git a/test/media_device/media_device_test.h b/test/media_device/media_device_test.h
index 9b226f1a..5223b760 100644
--- a/test/media_device/media_device_test.h
+++ b/test/media_device/media_device_test.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * media_device_test.h - libcamera media device test base class
+ * libcamera media device test base class
*/
#pragma once
diff --git a/test/meson.build b/test/meson.build
index 8b6057d4..40956649 100644
--- a/test/meson.build
+++ b/test/meson.build
@@ -73,6 +73,7 @@ internal_tests = [
{'name': 'timer-thread', 'sources': ['timer-thread.cpp']},
{'name': 'unique-fd', 'sources': ['unique-fd.cpp']},
{'name': 'utils', 'sources': ['utils.cpp']},
+ {'name': 'vector', 'sources': ['vector.cpp']},
{'name': 'yaml-parser', 'sources': ['yaml-parser.cpp']},
]
@@ -89,6 +90,7 @@ foreach test : public_tests
exe = executable(test['name'], test['sources'],
dependencies : deps,
+ implicit_include_directories : false,
link_with : test_libraries,
include_directories : test_includes_public)
@@ -103,6 +105,7 @@ foreach test : internal_tests
exe = executable(test['name'], test['sources'],
dependencies : deps,
+ implicit_include_directories : false,
link_with : test_libraries,
include_directories : test_includes_internal)
@@ -117,6 +120,7 @@ foreach test : internal_non_parallel_tests
exe = executable(test['name'], test['sources'],
dependencies : deps,
+ implicit_include_directories : false,
link_with : test_libraries,
include_directories : test_includes_internal)
diff --git a/test/message.cpp b/test/message.cpp
index 2f9f281c..19e6646d 100644
--- a/test/message.cpp
+++ b/test/message.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * message.cpp - Messages test
+ * Messages test
*/
#include <chrono>
diff --git a/test/object-delete.cpp b/test/object-delete.cpp
index 80b7dc41..676c3970 100644
--- a/test/object-delete.cpp
+++ b/test/object-delete.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * object.cpp - Object deletion tests
+ * Object deletion tests
*/
#include <iostream>
diff --git a/test/object-invoke.cpp b/test/object-invoke.cpp
index b1c0f473..def1e61e 100644
--- a/test/object-invoke.cpp
+++ b/test/object-invoke.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * object-invoke.cpp - Cross-thread Object method invocation test
+ * Cross-thread Object method invocation test
*/
#include <iostream>
diff --git a/test/object.cpp b/test/object.cpp
index cbd0d3ec..95dc1ef3 100644
--- a/test/object.cpp
+++ b/test/object.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * object.cpp - Object tests
+ * Object tests
*/
#include <iostream>
diff --git a/test/process/process_test.cpp b/test/process/process_test.cpp
index cb6940c6..e9f5e7e9 100644
--- a/test/process/process_test.cpp
+++ b/test/process/process_test.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * process_test.cpp - Process test
+ * Process test
*/
#include <iostream>
diff --git a/test/public-api.cpp b/test/public-api.cpp
index a1cebcf9..b1336f75 100644
--- a/test/public-api.cpp
+++ b/test/public-api.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * public-api.cpp - Public API validation
+ * Public API validation
*/
#include <libcamera/libcamera.h>
diff --git a/test/py/meson.build b/test/py/meson.build
index 0b679d31..b922e857 100644
--- a/test/py/meson.build
+++ b/test/py/meson.build
@@ -13,15 +13,25 @@ if asan_runtime_missing
subdir_done()
endif
+py_env = environment()
+
pymod = import('python')
py3 = pymod.find_installation('python3')
pypathdir = meson.project_build_root() / 'src' / 'py'
-py_env = ['PYTHONPATH=' + pypathdir]
+py_env.append('PYTHONPATH', pypathdir)
if asan_enabled
+ py_env.append('LD_PRELOAD', asan_runtime)
+
+ # Preload the C++ standard library to work around a bug in ASan when
+ # dynamically loading C++ .so modules.
+ stdlib = run_command(cxx, '-print-file-name=' + cxx_stdlib + '.so',
+ check : true).stdout().strip()
+ py_env.append('LD_PRELOAD', stdlib)
+
# Disable leak detection as the Python interpreter is full of leaks.
- py_env += ['LD_PRELOAD=' + asan_runtime, 'ASAN_OPTIONS=detect_leaks=0']
+ py_env.append('ASAN_OPTIONS', 'detect_leaks=0')
endif
test('pyunittests',
diff --git a/test/py/unittests.py b/test/py/unittests.py
index 1caea98e..8cb850d4 100755
--- a/test/py/unittests.py
+++ b/test/py/unittests.py
@@ -66,7 +66,7 @@ class SimpleTestMethods(BaseTestCase):
libcam.log_set_level('Camera', 'FATAL')
with self.assertRaises(RuntimeError):
cam.acquire()
- libcam.log_set_level('Camera', 'ERROR')
+ libcam.log_set_level('Camera', 'INFO')
cam.release()
diff --git a/test/serialization/control_serialization.cpp b/test/serialization/control_serialization.cpp
index a507d98a..06c572b7 100644
--- a/test/serialization/control_serialization.cpp
+++ b/test/serialization/control_serialization.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_serialization.cpp - Serialize and deserialize controls
+ * Serialize and deserialize controls
*/
#include <iostream>
diff --git a/test/serialization/generated_serializer/generated_serializer_test.cpp b/test/serialization/generated_serializer/generated_serializer_test.cpp
index 4670fe46..dd696885 100644
--- a/test/serialization/generated_serializer/generated_serializer_test.cpp
+++ b/test/serialization/generated_serializer/generated_serializer_test.cpp
@@ -2,10 +2,11 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * generated_serializer_test.cpp - Test generated serializer
+ * Test generated serializer
*/
#include <algorithm>
+#include <iostream>
#include <tuple>
#include <vector>
diff --git a/test/serialization/generated_serializer/include/libcamera/ipa/meson.build b/test/serialization/generated_serializer/include/libcamera/ipa/meson.build
index 6f8794c1..ae08e9be 100644
--- a/test/serialization/generated_serializer/include/libcamera/ipa/meson.build
+++ b/test/serialization/generated_serializer/include/libcamera/ipa/meson.build
@@ -9,7 +9,8 @@ mojom = custom_target('test_mojom_module',
'--output-root', meson.project_build_root(),
'--input-root', meson.project_source_root(),
'--mojoms', '@INPUT@'
- ])
+ ],
+ env : py_build_env)
# test_ipa_interface.h
generated_test_header = custom_target('test_ipa_interface_h',
@@ -23,7 +24,8 @@ generated_test_header = custom_target('test_ipa_interface_h',
'--libcamera_generate_header',
'--libcamera_output_path=@OUTPUT@',
'./' +'@INPUT@'
- ])
+ ],
+ env : py_build_env)
# test_ipa_serializer.h
generated_test_serializer = custom_target('test_ipa_serializer_h',
@@ -37,4 +39,5 @@ generated_test_serializer = custom_target('test_ipa_serializer_h',
'--libcamera_generate_serializer',
'--libcamera_output_path=@OUTPUT@',
'./' +'@INPUT@'
- ])
+ ],
+ env : py_build_env)
diff --git a/test/serialization/ipa_data_serializer_test.cpp b/test/serialization/ipa_data_serializer_test.cpp
index 377ecdb0..afea93a6 100644
--- a/test/serialization/ipa_data_serializer_test.cpp
+++ b/test/serialization/ipa_data_serializer_test.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipa_data_serializer_test.cpp - Test serializing/deserializing with IPADataSerializer
+ * Test serializing/deserializing with IPADataSerializer
*/
#include <algorithm>
@@ -29,7 +29,7 @@ using namespace std;
using namespace libcamera;
static const ControlInfoMap Controls = ControlInfoMap({
- { &controls::AeEnable, ControlInfo(false, true) },
+ { &controls::DebugMetadataEnable, ControlInfo(false, true) },
{ &controls::ExposureTime, ControlInfo(0, 999999) },
{ &controls::AnalogueGain, ControlInfo(1.0f, 32.0f) },
{ &controls::ColourGains, ControlInfo(0.0f, 32.0f) },
diff --git a/test/serialization/serialization_test.cpp b/test/serialization/serialization_test.cpp
index 11d0f0f3..af9969fd 100644
--- a/test/serialization/serialization_test.cpp
+++ b/test/serialization/serialization_test.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * serialization_test.cpp - Base class for serialization tests
+ * Base class for serialization tests
*/
#include "serialization_test.h"
diff --git a/test/serialization/serialization_test.h b/test/serialization/serialization_test.h
index 609f9fdf..760e3721 100644
--- a/test/serialization/serialization_test.h
+++ b/test/serialization/serialization_test.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * serialization_test.h - Base class for serialization tests
+ * Base class for serialization tests
*/
#pragma once
diff --git a/test/shared-fd.cpp b/test/shared-fd.cpp
index 997d7be1..57199dfe 100644
--- a/test/shared-fd.cpp
+++ b/test/shared-fd.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * shared_fd.cpp - SharedFD test
+ * SharedFD test
*/
#include <fcntl.h>
diff --git a/test/signal-threads.cpp b/test/signal-threads.cpp
index 8c212b6f..c4789c83 100644
--- a/test/signal-threads.cpp
+++ b/test/signal-threads.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * signal-threads.cpp - Cross-thread signal delivery test
+ * Cross-thread signal delivery test
*/
#include <chrono>
diff --git a/test/signal.cpp b/test/signal.cpp
index 5c6b304d..3f596b22 100644
--- a/test/signal.cpp
+++ b/test/signal.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * signal.cpp - Signal test
+ * Signal test
*/
#include <iostream>
diff --git a/test/span.cpp b/test/span.cpp
index abf3a5d6..4b9f3279 100644
--- a/test/span.cpp
+++ b/test/span.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * span.cpp - Span tests
+ * Span tests
*/
/*
@@ -143,9 +143,9 @@ protected:
Span<const int>{ v };
/* Span<float>{ v }; */
- Span<const int>{ v };
- /* Span<int>{ v }; */
- /* Span<const float>{ v }; */
+ Span<const int>{ cv };
+ /* Span<int>{ cv }; */
+ /* Span<const float>{ cv }; */
Span<int> dynamicSpan{ i };
Span<int>{ dynamicSpan };
diff --git a/test/stream/stream_colorspace.cpp b/test/stream/stream_colorspace.cpp
index 1b7afe65..4c904c4c 100644
--- a/test/stream/stream_colorspace.cpp
+++ b/test/stream/stream_colorspace.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Ideas on Board Oy.
*
- * stream_colorspace.cpp - Stream colorspace adjustment test
+ * Stream colorspace adjustment test
*/
#include <iostream>
diff --git a/test/stream/stream_formats.cpp b/test/stream/stream_formats.cpp
index 99fa0385..553b59aa 100644
--- a/test/stream/stream_formats.cpp
+++ b/test/stream/stream_formats.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * stream_formats.cpp - StreamFormats test
+ * StreamFormats test
*/
#include <iostream>
diff --git a/test/threads.cpp b/test/threads.cpp
index 8f366c9d..8d6ee151 100644
--- a/test/threads.cpp
+++ b/test/threads.cpp
@@ -2,16 +2,18 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * threads.cpp - Threads test
+ * Threads test
*/
#include <chrono>
#include <iostream>
#include <memory>
#include <pthread.h>
+#include <sched.h>
#include <thread>
#include <time.h>
+#include <libcamera/base/object.h>
#include <libcamera/base/thread.h>
#include "test.h"
@@ -66,6 +68,27 @@ private:
bool &cancelled_;
};
+class CpuSetTester : public Object
+{
+public:
+ CpuSetTester(unsigned int cpuset)
+ : cpuset_(cpuset) {}
+
+ bool testCpuSet()
+ {
+ int ret = sched_getcpu();
+ if (static_cast<int>(cpuset_) != ret) {
+ cout << "Invalid cpuset: " << ret << ", expecting: " << cpuset_ << endl;
+ return false;
+ }
+
+ return true;
+ }
+
+private:
+ const unsigned int cpuset_;
+};
+
class ThreadTest : public Test
{
protected:
@@ -165,6 +188,23 @@ protected:
return TestFail;
}
+ const unsigned int numCpus = std::thread::hardware_concurrency();
+ for (unsigned int i = 0; i < numCpus; ++i) {
+ thread = std::make_unique<Thread>();
+ const std::array<const unsigned int, 1> cpus{ i };
+ thread->setThreadAffinity(cpus);
+ thread->start();
+
+ CpuSetTester tester(i);
+ tester.moveToThread(thread.get());
+
+ if (!tester.invokeMethod(&CpuSetTester::testCpuSet, ConnectionTypeBlocking))
+ return TestFail;
+
+ thread->exit(0);
+ thread->wait();
+ }
+
return TestPass;
}
diff --git a/test/timer-fail.cpp b/test/timer-fail.cpp
index 82854b89..0ced6441 100644
--- a/test/timer-fail.cpp
+++ b/test/timer-fail.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2024, Ideas on Board Oy
*
- * timer-fail.cpp - Threaded timer failure test
+ * Threaded timer failure test
*/
#include <chrono>
diff --git a/test/timer-thread.cpp b/test/timer-thread.cpp
index 8675e248..55e5cfdf 100644
--- a/test/timer-thread.cpp
+++ b/test/timer-thread.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * timer-thread.cpp - Threaded timer test
+ * Threaded timer test
*/
#include <chrono>
diff --git a/test/timer.cpp b/test/timer.cpp
index 0f01c3cb..2eacc059 100644
--- a/test/timer.cpp
+++ b/test/timer.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * timer.cpp - Timer test
+ * Timer test
*/
#include <chrono>
diff --git a/test/transform.cpp b/test/transform.cpp
index fbc0308c..4ec7a1eb 100644
--- a/test/transform.cpp
+++ b/test/transform.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2023, Ideas On Board Oy
*
- * transform.cpp - Transform and Orientation tests
+ * Transform and Orientation tests
*/
#include <iostream>
diff --git a/test/unique-fd.cpp b/test/unique-fd.cpp
index eb3b591f..e556439e 100644
--- a/test/unique-fd.cpp
+++ b/test/unique-fd.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * unique-fd.cpp - UniqueFD test
+ * UniqueFD test
*/
#include <fcntl.h>
diff --git a/test/utils.cpp b/test/utils.cpp
index fc56e14e..d25475cb 100644
--- a/test/utils.cpp
+++ b/test/utils.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * utils.cpp - Miscellaneous utility tests
+ * Miscellaneous utility tests
*/
#include <iostream>
@@ -176,6 +176,14 @@ protected:
std::ostringstream os;
std::string ref;
+ os << utils::hex(static_cast<int8_t>(0x42)) << " ";
+ ref += "0x42 ";
+ os << utils::hex(static_cast<uint8_t>(0x42)) << " ";
+ ref += "0x42 ";
+ os << utils::hex(static_cast<int16_t>(0x42)) << " ";
+ ref += "0x0042 ";
+ os << utils::hex(static_cast<uint16_t>(0x42)) << " ";
+ ref += "0x0042 ";
os << utils::hex(static_cast<int32_t>(0x42)) << " ";
ref += "0x00000042 ";
os << utils::hex(static_cast<uint32_t>(0x42)) << " ";
@@ -184,6 +192,15 @@ protected:
ref += "0x0000000000000042 ";
os << utils::hex(static_cast<uint64_t>(0x42)) << " ";
ref += "0x0000000000000042 ";
+
+ os << utils::hex(static_cast<int8_t>(0x42), 6) << " ";
+ ref += "0x000042 ";
+ os << utils::hex(static_cast<uint8_t>(0x42), 1) << " ";
+ ref += "0x42 ";
+ os << utils::hex(static_cast<int16_t>(0x42), 6) << " ";
+ ref += "0x000042 ";
+ os << utils::hex(static_cast<uint16_t>(0x42), 1) << " ";
+ ref += "0x42 ";
os << utils::hex(static_cast<int32_t>(0x42), 4) << " ";
ref += "0x0042 ";
os << utils::hex(static_cast<uint32_t>(0x42), 1) << " ";
diff --git a/test/v4l2_compat/v4l2_compat_test.py b/test/v4l2_compat/v4l2_compat_test.py
index bd89d496..443babc2 100755
--- a/test/v4l2_compat/v4l2_compat_test.py
+++ b/test/v4l2_compat/v4l2_compat_test.py
@@ -4,7 +4,7 @@
#
# Author: Paul Elder <paul.elder@ideasonboard.com>
#
-# v4l2_compat_test.py - Test the V4L2 compatibility layer
+# Test the V4L2 compatibility layer
import argparse
import glob
diff --git a/test/v4l2_subdevice/v4l2_subdevice_test.cpp b/test/v4l2_subdevice/v4l2_subdevice_test.cpp
index d8fbfd9f..c349c9e3 100644
--- a/test/v4l2_subdevice/v4l2_subdevice_test.cpp
+++ b/test/v4l2_subdevice/v4l2_subdevice_test.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_subdevice_test.cpp - VIMC-based V4L2 subdevice test
+ * VIMC-based V4L2 subdevice test
*/
#include <iostream>
diff --git a/test/v4l2_subdevice/v4l2_subdevice_test.h b/test/v4l2_subdevice/v4l2_subdevice_test.h
index e73c583b..89b78302 100644
--- a/test/v4l2_subdevice/v4l2_subdevice_test.h
+++ b/test/v4l2_subdevice/v4l2_subdevice_test.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_subdevice_test.h - VIMC-based V4L2 subdevice test
+ * VIMC-based V4L2 subdevice test
*/
#pragma once
diff --git a/test/v4l2_videodevice/capture_async.cpp b/test/v4l2_videodevice/capture_async.cpp
index 42e1e671..67366461 100644
--- a/test/v4l2_videodevice/capture_async.cpp
+++ b/test/v4l2_videodevice/capture_async.cpp
@@ -61,10 +61,12 @@ protected:
if (ret)
return TestFail;
- timeout.start(10000ms);
+ const unsigned int nFrames = 30;
+
+ timeout.start(500ms * nFrames);
while (timeout.isRunning()) {
dispatcher->processEvents();
- if (frames > 30)
+ if (frames > nFrames)
break;
}
@@ -73,8 +75,9 @@ protected:
return TestFail;
}
- if (frames < 30) {
- std::cout << "Failed to capture 30 frames within timeout." << std::endl;
+ if (frames < nFrames) {
+ std::cout << "Failed to capture " << nFrames
+ << " frames within timeout." << std::endl;
return TestFail;
}
diff --git a/test/v4l2_videodevice/controls.cpp b/test/v4l2_videodevice/controls.cpp
index 0f603b85..b0130295 100644
--- a/test/v4l2_videodevice/controls.cpp
+++ b/test/v4l2_videodevice/controls.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * controls.cpp - V4L2 device controls handling test
+ * V4L2 device controls handling test
*/
#include <algorithm>
diff --git a/test/v4l2_videodevice/v4l2_videodevice_test.cpp b/test/v4l2_videodevice/v4l2_videodevice_test.cpp
index 1113cf5b..9fbd24cc 100644
--- a/test/v4l2_videodevice/v4l2_videodevice_test.cpp
+++ b/test/v4l2_videodevice/v4l2_videodevice_test.cpp
@@ -64,8 +64,8 @@ int V4L2VideoDeviceTest::init()
format.size.height = 480;
if (driver_ == "vimc") {
- sensor_ = new CameraSensor(media_->getEntityByName("Sensor A"));
- if (sensor_->init())
+ sensor_ = CameraSensorFactoryBase::create(media_->getEntityByName("Sensor A"));
+ if (!sensor_)
return TestFail;
debayer_ = new V4L2Subdevice(media_->getEntityByName("Debayer A"));
@@ -98,6 +98,5 @@ void V4L2VideoDeviceTest::cleanup()
capture_->close();
delete debayer_;
- delete sensor_;
delete capture_;
}
diff --git a/test/v4l2_videodevice/v4l2_videodevice_test.h b/test/v4l2_videodevice/v4l2_videodevice_test.h
index d2de1a6d..7c9003ec 100644
--- a/test/v4l2_videodevice/v4l2_videodevice_test.h
+++ b/test/v4l2_videodevice/v4l2_videodevice_test.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * vl42device_test.h - libcamera v4l2device test base class
+ * libcamera v4l2device test base class
*/
#pragma once
@@ -36,7 +36,7 @@ protected:
std::string entity_;
std::unique_ptr<libcamera::DeviceEnumerator> enumerator_;
std::shared_ptr<libcamera::MediaDevice> media_;
- libcamera::CameraSensor *sensor_;
+ std::unique_ptr<libcamera::CameraSensor> sensor_;
libcamera::V4L2Subdevice *debayer_;
libcamera::V4L2VideoDevice *capture_;
std::vector<std::unique_ptr<libcamera::FrameBuffer>> buffers_;
diff --git a/test/vector.cpp b/test/vector.cpp
new file mode 100644
index 00000000..4fae960d
--- /dev/null
+++ b/test/vector.cpp
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Vector tests
+ */
+
+#include "libcamera/internal/vector.h"
+
+#include <cmath>
+#include <iostream>
+
+#include "test.h"
+
+using namespace libcamera;
+
+#define ASSERT_EQ(a, b) \
+if ((a) != (b)) { \
+ std::cout << #a " != " #b << " (line " << __LINE__ << ")" \
+ << std::endl; \
+ return TestFail; \
+}
+
+class VectorTest : public Test
+{
+protected:
+ int run()
+ {
+ Vector<double, 3> v1{ 0.0 };
+
+ ASSERT_EQ(v1[0], 0.0);
+ ASSERT_EQ(v1[1], 0.0);
+ ASSERT_EQ(v1[2], 0.0);
+
+ ASSERT_EQ(v1.length(), 0.0);
+ ASSERT_EQ(v1.length2(), 0.0);
+
+ Vector<double, 3> v2{{ 1.0, 4.0, 8.0 }};
+
+ ASSERT_EQ(v2[0], 1.0);
+ ASSERT_EQ(v2[1], 4.0);
+ ASSERT_EQ(v2[2], 8.0);
+
+ ASSERT_EQ(v2.x(), 1.0);
+ ASSERT_EQ(v2.y(), 4.0);
+ ASSERT_EQ(v2.z(), 8.0);
+
+ ASSERT_EQ(v2.r(), 1.0);
+ ASSERT_EQ(v2.g(), 4.0);
+ ASSERT_EQ(v2.b(), 8.0);
+
+ ASSERT_EQ(v2.length2(), 81.0);
+ ASSERT_EQ(v2.length(), 9.0);
+ ASSERT_EQ(v2.sum(), 13.0);
+
+ Vector<double, 3> v3{ v2 };
+
+ ASSERT_EQ(v2, v3);
+
+ v3 = Vector<double, 3>{{ 4.0, 4.0, 4.0 }};
+
+ ASSERT_EQ(v2 + v3, (Vector<double, 3>{{ 5.0, 8.0, 12.0 }}));
+ ASSERT_EQ(v2 + 4.0, (Vector<double, 3>{{ 5.0, 8.0, 12.0 }}));
+ ASSERT_EQ(v2 - v3, (Vector<double, 3>{{ -3.0, 0.0, 4.0 }}));
+ ASSERT_EQ(v2 - 4.0, (Vector<double, 3>{{ -3.0, 0.0, 4.0 }}));
+ ASSERT_EQ(v2 * v3, (Vector<double, 3>{{ 4.0, 16.0, 32.0 }}));
+ ASSERT_EQ(v2 * 4.0, (Vector<double, 3>{{ 4.0, 16.0, 32.0 }}));
+ ASSERT_EQ(v2 / v3, (Vector<double, 3>{{ 0.25, 1.0, 2.0 }}));
+ ASSERT_EQ(v2 / 4.0, (Vector<double, 3>{{ 0.25, 1.0, 2.0 }}));
+
+ ASSERT_EQ(v2.min(v3), (Vector<double, 3>{{ 1.0, 4.0, 4.0 }}));
+ ASSERT_EQ(v2.min(4.0), (Vector<double, 3>{{ 1.0, 4.0, 4.0 }}));
+ ASSERT_EQ(v2.max(v3), (Vector<double, 3>{{ 4.0, 4.0, 8.0 }}));
+ ASSERT_EQ(v2.max(4.0), (Vector<double, 3>{{ 4.0, 4.0, 8.0 }}));
+
+ ASSERT_EQ(v2.dot(v3), 52.0);
+
+ v2 += v3;
+ ASSERT_EQ(v2, (Vector<double, 3>{{ 5.0, 8.0, 12.0 }}));
+ v2 -= v3;
+ ASSERT_EQ(v2, (Vector<double, 3>{{ 1.0, 4.0, 8.0 }}));
+ v2 *= v3;
+ ASSERT_EQ(v2, (Vector<double, 3>{{ 4.0, 16.0, 32.0 }}));
+ v2 /= v3;
+ ASSERT_EQ(v2, (Vector<double, 3>{{ 1.0, 4.0, 8.0 }}));
+
+ v2 += 4.0;
+ ASSERT_EQ(v2, (Vector<double, 3>{{ 5.0, 8.0, 12.0 }}));
+ v2 -= 4.0;
+ ASSERT_EQ(v2, (Vector<double, 3>{{ 1.0, 4.0, 8.0 }}));
+ v2 *= 4.0;
+ ASSERT_EQ(v2, (Vector<double, 3>{{ 4.0, 16.0, 32.0 }}));
+ v2 /= 4.0;
+ ASSERT_EQ(v2, (Vector<double, 3>{{ 1.0, 4.0, 8.0 }}));
+
+ return TestPass;
+ }
+};
+
+TEST_REGISTER(VectorTest)
diff --git a/test/yaml-parser.cpp b/test/yaml-parser.cpp
index 2d92463a..1b22c87b 100644
--- a/test/yaml-parser.cpp
+++ b/test/yaml-parser.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2022, Google Inc.
*
- * yaml-parser.cpp - YAML parser operations tests
+ * YAML parser operations tests
*/
#include <array>
@@ -34,10 +34,12 @@ static const string testYaml =
"list:\n"
" - James\n"
" - Mary\n"
+ " - \n"
"dictionary:\n"
" a: 1\n"
" c: 3\n"
" b: 2\n"
+ " empty:\n"
"level1:\n"
" level2:\n"
" - [1, 2]\n"
@@ -430,9 +432,10 @@ protected:
if (testObjectType(listObj, "list", Type::List) != TestPass)
return TestFail;
- static constexpr std::array<const char *, 2> listValues{
+ static constexpr std::array<const char *, 3> listValues{
"James",
"Mary",
+ "",
};
if (listObj.size() != listValues.size()) {
@@ -465,16 +468,23 @@ protected:
i++;
}
+ /* Ensure that empty objects get parsed as empty strings. */
+ if (!listObj[2].isValue()) {
+ cerr << "Empty object is not a value" << std::endl;
+ return TestFail;
+ }
+
/* Test dictionary object */
auto &dictObj = (*root)["dictionary"];
if (testObjectType(dictObj, "dictionary", Type::Dictionary) != TestPass)
return TestFail;
- static constexpr std::array<std::pair<const char *, int>, 3> dictValues{ {
+ static constexpr std::array<std::pair<const char *, int>, 4> dictValues{ {
{ "a", 1 },
{ "c", 3 },
{ "b", 2 },
+ { "empty", -100 },
} };
size_t dictSize = dictValues.size();
@@ -505,7 +515,7 @@ protected:
return TestFail;
}
- if (elem.get<int32_t>(0) != item.second) {
+ if (elem.get<int32_t>(-100) != item.second) {
std::cerr << "Dictionary element " << i << " has wrong value"
<< std::endl;
return TestFail;
@@ -514,6 +524,42 @@ protected:
i++;
}
+ /* Ensure that empty objects get parsed as empty strings. */
+ if (!dictObj["empty"].isValue()) {
+ cerr << "Empty object is not of type value" << std::endl;
+ return TestFail;
+ }
+
+ /* Ensure that keys without values are added to a dict. */
+ if (!dictObj.contains("empty")) {
+ cerr << "Empty element is missing in dict" << std::endl;
+ return TestFail;
+ }
+
+ /* Test access to nonexistent member. */
+ if (dictObj["nonexistent"].get<std::string>("default") != "default") {
+ cerr << "Accessing nonexistent dict entry fails to return default" << std::endl;
+ return TestFail;
+ }
+
+ /* Test nonexistent object has value type empty. */
+ if (!dictObj["nonexistent"].isEmpty()) {
+ cerr << "Accessing nonexistent object returns non-empty object" << std::endl;
+ return TestFail;
+ }
+
+ /* Test explicit cast to bool on an empty object returns true. */
+ if (!!dictObj["empty"] != true) {
+ cerr << "Casting empty entry to bool returns false" << std::endl;
+ return TestFail;
+ }
+
+ /* Test explicit cast to bool on nonexistent object returns false. */
+ if (!!dictObj["nonexistent"] != false) {
+ cerr << "Casting nonexistent dict entry to bool returns true" << std::endl;
+ return TestFail;
+ }
+
/* Make sure utils::map_keys() works on the adapter. */
(void)utils::map_keys(dictObj.asDict());
diff --git a/utils/abi-compat.sh b/utils/abi-compat.sh
index c936ac05..31f61e32 100755
--- a/utils/abi-compat.sh
+++ b/utils/abi-compat.sh
@@ -156,15 +156,16 @@ create_abi_dump() {
# Generate a minimal libcamera build. "lib" and "prefix" are
# defined explicitly to avoid system default ambiguities.
meson setup "$build" "$worktree" \
- -Dlibdir=lib \
- -Dprefix=/usr/local/ \
- -Ddocumentation=disabled \
-Dcam=disabled \
- -Dqcam=disabled \
+ -Ddocumentation=disabled \
-Dgstreamer=disabled \
-Dlc-compliance=disabled \
- -Dtracing=disabled \
- -Dpipelines=
+ -Dlibdir=lib \
+ -Dpipelines= \
+ -Dprefix=/usr/local/ \
+ -Dpycamera=disabled \
+ -Dqcam=disabled \
+ -Dtracing=disabled
ninja -C "$build"
DESTDIR="$install" ninja -C "$build" install
diff --git a/utils/checkstyle.py b/utils/checkstyle.py
index 84f44a42..f6229bbd 100755
--- a/utils/checkstyle.py
+++ b/utils/checkstyle.py
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# checkstyle.py - A patch style checker script based on clang-format
+# A patch style checker script based on clang-format
#
# TODO:
#
@@ -23,7 +23,6 @@ import subprocess
import sys
dependencies = {
- 'clang-format': True,
'git': True,
}
@@ -211,36 +210,66 @@ class CommitFile:
class Commit:
def __init__(self, commit):
- self.commit = commit
+ self._commit = commit
+ self._author = None
self._trailers = []
self._parse()
- def _parse_trailers(self, lines):
- for index in range(1, len(lines)):
- line = lines[index]
- if not line:
- break
+ def _parse_commit(self):
+ # Get and parse the commit message.
+ ret = subprocess.run(['git', 'show', '--format=%H%n%an <%ae>%n%s%n%b',
+ '--no-patch', self.commit],
+ stdout=subprocess.PIPE).stdout.decode('utf-8')
+ lines = ret.splitlines()
- self._trailers.append(line)
+ self._commit = lines[0]
+ self._author = lines[1]
+ self._title = lines[2]
+ self._body = lines[3:]
- return index
+ # Parse the trailers. Feed git-interpret-trailers with a full commit
+ # message that includes both the title and the body, as it otherwise
+ # fails to find trailers when the body contains trailers only.
+ message = self._title + '\n\n' + '\n'.join(self._body)
+ trailers = subprocess.run(['git', 'interpret-trailers', '--parse'],
+ input=message.encode('utf-8'),
+ stdout=subprocess.PIPE).stdout.decode('utf-8')
+
+ self._trailers = trailers.splitlines()
def _parse(self):
- # Get the commit title and list of files.
- ret = subprocess.run(['git', 'show', '--format=%s%n%(trailers:only,unfold)', '--name-status',
+ self._parse_commit()
+
+ # Get the list of files. Use an empty format specifier to suppress the
+ # commit message completely.
+ ret = subprocess.run(['git', 'show', '--format=', '--name-status',
self.commit],
stdout=subprocess.PIPE).stdout.decode('utf-8')
- lines = ret.splitlines()
-
- self._title = lines[0]
+ self._files = [CommitFile(f) for f in ret.splitlines()]
- index = self._parse_trailers(lines)
- self._files = [CommitFile(f) for f in lines[index:] if f]
+ def __repr__(self):
+ return '\n'.join([
+ f'commit {self.commit}',
+ f'Author: {self.author}',
+ f'',
+ f' {self.title}',
+ '',
+ '\n'.join([line and f' {line}' or '' for line in self._body]),
+ 'Trailers:',
+ ] + self.trailers)
def files(self, filter='AMR'):
return [f.filename for f in self._files if f.status in filter]
@property
+ def author(self):
+ return self._author
+
+ @property
+ def commit(self):
+ return self._commit
+
+ @property
def title(self):
return self._title
@@ -278,20 +307,14 @@ class StagedChanges(Commit):
class Amendment(Commit):
def __init__(self):
- Commit.__init__(self, '')
+ Commit.__init__(self, 'HEAD')
def _parse(self):
- # Create a title using HEAD commit and parse the trailers.
- ret = subprocess.run(['git', 'show', '--format=%H %s%n%(trailers:only,unfold)',
- '--no-patch'],
- stdout=subprocess.PIPE).stdout.decode('utf-8')
- lines = ret.splitlines()
-
- self._title = 'Amendment of ' + lines[0].strip()
+ self._parse_commit()
- self._parse_trailers(lines)
+ self._title = f'Amendment of "{self.title}"'
- # Extract the list of modified files
+ # Extract the list of modified files.
ret = subprocess.run(['git', 'diff', '--staged', '--name-status', 'HEAD~'],
stdout=subprocess.PIPE).stdout.decode('utf-8')
self._files = [CommitFile(f) for f in ret.splitlines()]
@@ -310,40 +333,83 @@ class Amendment(Commit):
class ClassRegistry(type):
def __new__(cls, clsname, bases, attrs):
newclass = super().__new__(cls, clsname, bases, attrs)
- if bases:
- bases[0].subclasses.append(newclass)
- bases[0].subclasses.sort(key=lambda x: getattr(x, 'priority', 0),
- reverse=True)
+ if bases and bases[0] != CheckerBase:
+ base = bases[0]
+
+ if not hasattr(base, 'subclasses'):
+ base.subclasses = []
+ base.subclasses.append(newclass)
+ base.subclasses.sort(key=lambda x: getattr(x, 'priority', 0),
+ reverse=True)
return newclass
-# ------------------------------------------------------------------------------
-# Commit Checkers
-#
+class CheckerBase(metaclass=ClassRegistry):
+ @classmethod
+ def instances(cls, obj, names):
+ for instance in cls.subclasses:
+ if names and instance.__name__ not in names:
+ continue
+ if instance.supports(obj):
+ yield instance
-class CommitChecker(metaclass=ClassRegistry):
- subclasses = []
+ @classmethod
+ def supports(cls, obj):
+ if hasattr(cls, 'commit_types'):
+ return type(obj) in cls.commit_types
- def __init__(self):
- pass
+ if hasattr(cls, 'patterns'):
+ for pattern in cls.patterns:
+ if fnmatch.fnmatch(os.path.basename(obj), pattern):
+ return True
+
+ return False
- #
- # Class methods
- #
@classmethod
- def checkers(cls, names):
- for checker in cls.subclasses:
- if names and checker.__name__ not in names:
- continue
- yield checker
+ def all_patterns(cls):
+ patterns = set()
+ for instance in cls.subclasses:
+ if hasattr(instance, 'patterns'):
+ patterns.update(instance.patterns)
+
+ return patterns
+
+ @classmethod
+ def check_dependencies(cls):
+ if not hasattr(cls, 'dependencies'):
+ return []
+
+ issues = []
+
+ for command in cls.dependencies:
+ if command not in dependencies:
+ dependencies[command] = shutil.which(command)
+
+ if not dependencies[command]:
+ issues.append(CommitIssue(f'Missing {command} to run {cls.__name__}'))
+
+ return issues
+
+
+# ------------------------------------------------------------------------------
+# Commit Checkers
+#
+
+class CommitChecker(CheckerBase):
+ pass
class CommitIssue(object):
def __init__(self, msg):
self.msg = msg
+ def __str__(self):
+ return f'{Colours.fg(Colours.Yellow)}{self.msg}{Colours.reset()}'
+
class HeaderAddChecker(CommitChecker):
+ commit_types = (Commit, StagedChanges, Amendment)
+
@classmethod
def check(cls, commit, top_level):
issues = []
@@ -388,6 +454,8 @@ class HeaderAddChecker(CommitChecker):
class TitleChecker(CommitChecker):
+ commit_types = (Commit,)
+
prefix_regex = re.compile(r'^([a-zA-Z0-9_.-]+: )+')
release_regex = re.compile(r'libcamera v[0-9]+\.[0-9]+\.[0-9]+')
@@ -395,11 +463,6 @@ class TitleChecker(CommitChecker):
def check(cls, commit, top_level):
title = commit.title
- # Skip the check when validating staged changes (as done through a
- # pre-commit hook) as there is no title to check in that case.
- if isinstance(commit, StagedChanges):
- return []
-
# Ignore release commits, they don't need a prefix.
if TitleChecker.release_regex.fullmatch(title):
return []
@@ -455,6 +518,8 @@ class TitleChecker(CommitChecker):
class TrailersChecker(CommitChecker):
+ commit_types = (Commit,)
+
commit_regex = re.compile(r'[0-9a-f]{12}[0-9a-f]* \(".*"\)')
coverity_regex = re.compile(r'Coverity CID=.*')
@@ -493,6 +558,8 @@ class TrailersChecker(CommitChecker):
def check(cls, commit, top_level):
issues = []
+ sob_found = False
+
for trailer in commit.trailers:
match = TrailersChecker.trailer_regex.fullmatch(trailer)
if not match:
@@ -515,6 +582,13 @@ class TrailersChecker(CommitChecker):
issues.append(CommitIssue(f"Malformed value '{value}' for commit trailer '{key}'"))
continue
+ if key == 'Signed-off-by':
+ if value == commit.author:
+ sob_found = True
+
+ if not sob_found:
+ issues.append(CommitIssue(f"No 'Signed-off-by' trailer matching author '{commit.author}', see Documentation/contributing.rst"))
+
return issues
@@ -522,64 +596,76 @@ class TrailersChecker(CommitChecker):
# Style Checkers
#
-class StyleChecker(metaclass=ClassRegistry):
- subclasses = []
+class StyleChecker(CheckerBase):
+ pass
- def __init__(self):
- pass
- #
- # Class methods
- #
- @classmethod
- def checkers(cls, filename, names):
- for checker in cls.subclasses:
- if names and checker.__name__ not in names:
- continue
- if checker.supports(filename):
- yield checker
+class StyleIssue(object):
+ def __init__(self, line_number, position, line, msg):
+ self.line_number = line_number
+ self.position = position
+ self.line = line
+ self.msg = msg
- @classmethod
- def supports(cls, filename):
- for pattern in cls.patterns:
- if fnmatch.fnmatch(os.path.basename(filename), pattern):
- return True
- return False
+ def __str__(self):
+ s = []
+ s.append(f'{Colours.fg(Colours.Yellow)}#{self.line_number}: {self.msg}{Colours.reset()}')
+ if self.line is not None:
+ s.append(f'{Colours.fg(Colours.Yellow)}+{self.line.rstrip()}{Colours.reset()}')
+
+ if self.position is not None:
+ # Align the position marker by using the original line with
+ # all characters except for tabs replaced with spaces. This
+ # ensures proper alignment regardless of how the code is
+ # indented.
+ start = self.position[0]
+ prefix = ''.join([c if c == '\t' else ' ' for c in self.line[:start]])
+ length = self.position[1] - start - 1
+ s.append(f' {prefix}^{"~" * length}')
+
+ return '\n'.join(s)
+
+
+class HexValueChecker(StyleChecker):
+ patterns = ('*.c', '*.cpp', '*.h')
+
+ regex = re.compile(r'\b0[xX][0-9a-fA-F]+\b')
@classmethod
- def all_patterns(cls):
- patterns = set()
- for checker in cls.subclasses:
- patterns.update(checker.patterns)
+ def check(cls, content, line_numbers):
+ issues = []
- return patterns
+ for line_number in line_numbers:
+ line = content[line_number - 1]
+ match = HexValueChecker.regex.search(line)
+ if not match:
+ continue
+ value = match.group(0)
+ if value == value.lower():
+ continue
-class StyleIssue(object):
- def __init__(self, line_number, line, msg):
- self.line_number = line_number
- self.line = line
- self.msg = msg
+ issues.append(StyleIssue(line_number, match.span(0), line,
+ f'Use lowercase hex constant {value.lower()}'))
+
+ return issues
class IncludeChecker(StyleChecker):
patterns = ('*.cpp', '*.h')
- headers = ('assert', 'ctype', 'errno', 'fenv', 'float', 'inttypes',
- 'limits', 'locale', 'setjmp', 'signal', 'stdarg', 'stddef',
- 'stdint', 'stdio', 'stdlib', 'string', 'time', 'uchar', 'wchar',
- 'wctype')
- include_regex = re.compile('^#include <c([a-z]*)>')
-
- def __init__(self, content):
- super().__init__()
- self.__content = content
+ headers = ('cassert', 'cctype', 'cerrno', 'cfenv', 'cfloat', 'cinttypes',
+ 'climits', 'clocale', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
+ 'cstdint', 'cstdio', 'cstdlib', 'cstring', 'ctime', 'cuchar',
+ 'cwchar', 'cwctype', 'math.h')
+ include_regex = re.compile(r'^#include <([a-z.]*)>')
- def check(self, line_numbers):
+ @classmethod
+ def check(self, content, line_numbers):
issues = []
for line_number in line_numbers:
- line = self.__content[line_number - 1]
+ line = content[line_number - 1]
match = IncludeChecker.include_regex.match(line)
if not match:
continue
@@ -588,8 +674,15 @@ class IncludeChecker(StyleChecker):
if header not in IncludeChecker.headers:
continue
- issues.append(StyleIssue(line_number, line,
- 'C compatibility header <%s.h> is preferred' % header))
+ if header.endswith('.h'):
+ header_type = 'C++'
+ header = 'c' + header[:-2]
+ else:
+ header_type = 'C compatibility'
+ header = header[1:] + '.h'
+
+ issues.append(StyleIssue(line_number, match.span(1), line,
+ f'{header_type} header <{header}> is preferred'))
return issues
@@ -598,18 +691,17 @@ class LogCategoryChecker(StyleChecker):
log_regex = re.compile(r'\bLOG\((Debug|Info|Warning|Error|Fatal)\)')
patterns = ('*.cpp',)
- def __init__(self, content):
- super().__init__()
- self.__content = content
-
- def check(self, line_numbers):
+ @classmethod
+ def check(cls, content, line_numbers):
issues = []
for line_number in line_numbers:
- line = self.__content[line_number-1]
- if not LogCategoryChecker.log_regex.search(line):
+ line = content[line_number - 1]
+ match = LogCategoryChecker.log_regex.search(line)
+ if not match:
continue
- issues.append(StyleIssue(line_number, line, 'LOG() should use categories'))
+ issues.append(StyleIssue(line_number, match.span(1), line,
+ 'LOG() should use categories'))
return issues
@@ -617,70 +709,30 @@ class LogCategoryChecker(StyleChecker):
class MesonChecker(StyleChecker):
patterns = ('meson.build',)
- def __init__(self, content):
- super().__init__()
- self.__content = content
-
- def check(self, line_numbers):
+ @classmethod
+ def check(cls, content, line_numbers):
issues = []
for line_number in line_numbers:
- line = self.__content[line_number-1]
- if line.find('\t') != -1:
- issues.append(StyleIssue(line_number, line, 'meson.build should use spaces for indentation'))
- return issues
-
-
-class Pep8Checker(StyleChecker):
- patterns = ('*.py',)
- results_regex = re.compile('stdin:([0-9]+):([0-9]+)(.*)')
-
- def __init__(self, content):
- super().__init__()
- self.__content = content
-
- def check(self, line_numbers):
- issues = []
- data = ''.join(self.__content).encode('utf-8')
-
- try:
- ret = subprocess.run(['pycodestyle', '--ignore=E501', '-'],
- input=data, stdout=subprocess.PIPE)
- except FileNotFoundError:
- issues.append(StyleIssue(0, None, 'Please install pycodestyle to validate python additions'))
- return issues
-
- results = ret.stdout.decode('utf-8').splitlines()
- for item in results:
- search = re.search(Pep8Checker.results_regex, item)
- line_number = int(search.group(1))
- position = int(search.group(2))
- msg = search.group(3)
-
- if line_number in line_numbers:
- line = self.__content[line_number - 1]
- issues.append(StyleIssue(line_number, line, msg))
-
+ line = content[line_number - 1]
+ pos = line.find('\t')
+ if pos != -1:
+ issues.append(StyleIssue(line_number, [pos, pos], line,
+ 'meson.build should use spaces for indentation'))
return issues
class ShellChecker(StyleChecker):
+ dependencies = ('shellcheck',)
patterns = ('*.sh',)
- results_line_regex = re.compile('In - line ([0-9]+):')
-
- def __init__(self, content):
- super().__init__()
- self.__content = content
+ results_line_regex = re.compile(r'In - line ([0-9]+):')
- def check(self, line_numbers):
+ @classmethod
+ def check(cls, content, line_numbers):
issues = []
- data = ''.join(self.__content).encode('utf-8')
+ data = ''.join(content).encode('utf-8')
- try:
- ret = subprocess.run(['shellcheck', '-Cnever', '-'],
- input=data, stdout=subprocess.PIPE)
- except FileNotFoundError:
- issues.append(StyleIssue(0, None, 'Please install shellcheck to validate shell script additions'))
- return issues
+ ret = subprocess.run(['shellcheck', '-Cnever', '-'],
+ input=data, stdout=subprocess.PIPE)
results = ret.stdout.decode('utf-8').splitlines()
for nr, item in enumerate(results):
@@ -692,11 +744,8 @@ class ShellChecker(StyleChecker):
line = results[nr + 1]
msg = results[nr + 2]
- # Determined, but not yet used
- position = msg.find('^') + 1
-
if line_number in line_numbers:
- issues.append(StyleIssue(line_number, line, msg))
+ issues.append(StyleIssue(line_number, None, line, msg))
return issues
@@ -705,40 +754,12 @@ class ShellChecker(StyleChecker):
# Formatters
#
-class Formatter(metaclass=ClassRegistry):
- subclasses = []
-
- def __init__(self):
- pass
-
- #
- # Class methods
- #
- @classmethod
- def formatters(cls, filename, names):
- for formatter in cls.subclasses:
- if names and formatter.__name__ not in names:
- continue
- if formatter.supports(filename):
- yield formatter
-
- @classmethod
- def supports(cls, filename):
- for pattern in cls.patterns:
- if fnmatch.fnmatch(os.path.basename(filename), pattern):
- return True
- return False
-
- @classmethod
- def all_patterns(cls):
- patterns = set()
- for formatter in cls.subclasses:
- patterns.update(formatter.patterns)
-
- return patterns
+class Formatter(CheckerBase):
+ pass
class CLangFormatter(Formatter):
+ dependencies = ('clang-format',)
patterns = ('*.c', '*.cpp', '*.h')
priority = -1
@@ -753,7 +774,8 @@ class CLangFormatter(Formatter):
class DoxygenFormatter(Formatter):
patterns = ('*.c', '*.cpp')
- return_regex = re.compile(' +\\* +\\\\return +[a-z]')
+ oneliner_regex = re.compile(r'^ +\* +\\(brief|param|return)\b.*\.$')
+ return_regex = re.compile(r' +\* +\\return +[a-z]')
@classmethod
def format(cls, filename, data):
@@ -768,6 +790,7 @@ class DoxygenFormatter(Formatter):
lines.append(line)
continue
+ line = cls.oneliner_regex.sub(lambda m: m.group(0)[:-1], line)
line = cls.return_regex.sub(lambda m: m.group(0)[:-1] + m.group(0)[-1].upper(), line)
if line.find('*/') != -1:
@@ -813,7 +836,7 @@ class DPointerFormatter(Formatter):
class IncludeOrderFormatter(Formatter):
patterns = ('*.cpp', '*.h')
- include_regex = re.compile('^#include (["<])([^">]*)([">])')
+ include_regex = re.compile(r'^#include (["<])([^">]*)([">])')
@classmethod
def format(cls, filename, data):
@@ -865,6 +888,17 @@ class IncludeOrderFormatter(Formatter):
return '\n'.join(lines)
+class Pep8Formatter(Formatter):
+ dependencies = ('autopep8',)
+ patterns = ('*.py',)
+
+ @classmethod
+ def format(cls, filename, data):
+ ret = subprocess.run(['autopep8', '--ignore=E501', '-'],
+ input=data.encode('utf-8'), stdout=subprocess.PIPE)
+ return ret.stdout.decode('utf-8')
+
+
class StripTrailingSpaceFormatter(Formatter):
patterns = ('*.c', '*.cpp', '*.h', '*.py', 'meson.build')
@@ -880,6 +914,24 @@ class StripTrailingSpaceFormatter(Formatter):
# Style checking
#
+def check_commit(top_level, commit, checkers):
+ issues = []
+
+ # Apply the commit checkers first.
+ for checker in CommitChecker.instances(commit, checkers):
+ issues_ = checker.check_dependencies()
+ if issues_:
+ issues += issues_
+ continue
+
+ issues += checker.check(commit, top_level)
+
+ for issue in issues:
+ print(issue)
+
+ return len(issues)
+
+
def check_file(top_level, commit, filename, checkers):
# Extract the line numbers touched by the commit.
commit_diff = commit.get_diff(top_level, filename)
@@ -895,9 +947,15 @@ def check_file(top_level, commit, filename, checkers):
# Format the file after the commit with all formatters and compute the diff
# between the unformatted and formatted contents.
after = commit.get_file(filename)
+ issues = []
formatted = after
- for formatter in Formatter.formatters(filename, checkers):
+ for formatter in Formatter.instances(filename, checkers):
+ issues_ = formatter.check_dependencies()
+ if issues_:
+ issues += issues_
+ continue
+
formatted = formatter.format(filename, formatted)
after = after.splitlines(True)
@@ -910,11 +968,14 @@ def check_file(top_level, commit, filename, checkers):
formatted_diff = [hunk for hunk in formatted_diff if hunk.intersects(lines)]
# Check for code issues not related to formatting.
- issues = []
- for checker in StyleChecker.checkers(filename, checkers):
- checker = checker(after)
+ for checker in StyleChecker.instances(filename, checkers):
+ issues_ = checker.check_dependencies()
+ if issues_:
+ issues += issues_
+ continue
+
for hunk in commit_diff:
- issues += checker.check(hunk.side('to').touched)
+ issues += checker.check(after, hunk.side('to').touched)
# Print the detected issues.
if len(issues) == 0 and len(formatted_diff) == 0:
@@ -928,13 +989,9 @@ def check_file(top_level, commit, filename, checkers):
print(hunk)
if len(issues):
- issues = sorted(issues, key=lambda i: i.line_number)
+ issues = sorted(issues, key=lambda i: getattr(i, 'line_number', -1))
for issue in issues:
- print('%s#%u: %s%s' % (Colours.fg(Colours.Yellow), issue.line_number,
- issue.msg, Colours.reset()))
- if issue.line is not None:
- print('%s+%s%s' % (Colours.fg(Colours.Yellow), issue.line.rstrip(),
- Colours.reset()))
+ print(issue)
return len(formatted_diff) + len(issues)
@@ -946,13 +1003,8 @@ def check_style(top_level, commit, checkers):
print(title)
print(separator)
- issues = 0
-
# Apply the commit checkers first.
- for checker in CommitChecker.checkers(checkers):
- for issue in checker.check(commit, top_level):
- print('%s%s%s' % (Colours.fg(Colours.Yellow), issue.msg, Colours.reset()))
- issues += 1
+ issues = check_commit(top_level, commit, checkers)
# Filter out files we have no checker for.
patterns = set()
@@ -1024,7 +1076,7 @@ def main(argv):
if args.checkers:
args.checkers = args.checkers.split(',')
- # Check for required dependencies.
+ # Check for required common dependencies.
for command, mandatory in dependencies.items():
found = shutil.which(command)
if mandatory and not found:
diff --git a/utils/codegen/controls.py b/utils/codegen/controls.py
new file mode 100644
index 00000000..e5161048
--- /dev/null
+++ b/utils/codegen/controls.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2019, Google Inc.
+#
+# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+#
+# Helper classes to handle source code generation for libcamera controls
+
+
+class ControlEnum(object):
+ def __init__(self, data):
+ self.__data = data
+
+ @property
+ def description(self):
+ """The enum description"""
+ return self.__data.get('description')
+
+ @property
+ def name(self):
+ """The enum name"""
+ return self.__data.get('name')
+
+ @property
+ def value(self):
+ """The enum value"""
+ return self.__data.get('value')
+
+
+class Control(object):
+ def __init__(self, name, data, vendor, mode):
+ self.__name = name
+ self.__data = data
+ self.__enum_values = None
+ self.__size = None
+ self.__vendor = vendor
+
+ enum_values = data.get('enum')
+ if enum_values is not None:
+ self.__enum_values = [ControlEnum(enum) for enum in enum_values]
+
+ size = self.__data.get('size')
+ if size is not None:
+ if len(size) == 0:
+ raise RuntimeError(f'Control `{self.__name}` size must have at least one dimension')
+
+ # Compute the total number of elements in the array. If any of the
+ # array dimension is a string, the array is variable-sized.
+ num_elems = 1
+ for dim in size:
+ if type(dim) is str:
+ num_elems = 0
+ break
+
+ dim = int(dim)
+ if dim <= 0:
+ raise RuntimeError(f'Control `{self.__name}` size must have positive values only')
+
+ num_elems *= dim
+
+ self.__size = num_elems
+
+ if mode == 'properties':
+ self.__direction = 'out'
+ else:
+ direction = self.__data.get('direction')
+ if direction is None:
+ raise RuntimeError(f'Control `{self.__name}` missing required field `direction`')
+ if direction not in ['in', 'out', 'inout']:
+ raise RuntimeError(f'Control `{self.__name}` direction `{direction}` is invalid; must be one of `in`, `out`, or `inout`')
+ self.__direction = direction
+
+ @property
+ def description(self):
+ """The control description"""
+ return self.__data.get('description')
+
+ @property
+ def enum_values(self):
+ """The enum values, if the control is an enumeration"""
+ if self.__enum_values is None:
+ return
+ for enum in self.__enum_values:
+ yield enum
+
+ @property
+ def enum_values_count(self):
+ """The number of enum values, if the control is an enumeration"""
+ if self.__enum_values is None:
+ return 0
+ return len(self.__enum_values)
+
+ @property
+ def is_enum(self):
+ """Is the control an enumeration"""
+ return self.__enum_values is not None
+
+ @property
+ def vendor(self):
+ """The vendor string, or None"""
+ return self.__vendor
+
+ @property
+ def name(self):
+ """The control name (CamelCase)"""
+ return self.__name
+
+ @property
+ def type(self):
+ typ = self.__data.get('type')
+ size = self.__data.get('size')
+
+ if typ == 'string':
+ return 'std::string'
+
+ if self.__size is None:
+ return typ
+
+ if self.__size:
+ return f"Span<const {typ}, {self.__size}>"
+ else:
+ return f"Span<const {typ}>"
+
+ @property
+ def direction(self):
+ in_flag = 'ControlId::Direction::In'
+ out_flag = 'ControlId::Direction::Out'
+
+ if self.__direction == 'inout':
+ return f'{in_flag} | {out_flag}'
+ if self.__direction == 'in':
+ return in_flag
+ if self.__direction == 'out':
+ return out_flag
+
+ @property
+ def element_type(self):
+ return self.__data.get('type')
+
+ @property
+ def size(self):
+ return self.__size
diff --git a/utils/codegen/gen-controls.py b/utils/codegen/gen-controls.py
new file mode 100755
index 00000000..59b716c1
--- /dev/null
+++ b/utils/codegen/gen-controls.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2019, Google Inc.
+#
+# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+#
+# Generate control definitions from YAML
+
+import argparse
+import jinja2
+import os
+import sys
+import yaml
+
+from controls import Control
+
+
+def snake_case(s):
+ return ''.join([c.isupper() and ('_' + c) or c for c in s]).strip('_')
+
+
+def format_description(description):
+ description = description.strip('\n').split('\n')
+ for i in range(1, len(description)):
+ line = description[i]
+ description[i] = (line and ' * ' or ' *') + line
+ return '\n'.join(description)
+
+
+def extend_control(ctrl, id, ranges):
+ ctrl.id = ranges[ctrl.vendor] + id + 1
+
+ if ctrl.vendor != 'libcamera':
+ ctrl.namespace = f'{ctrl.vendor}::'
+ else:
+ ctrl.namespace = ''
+
+ return ctrl
+
+
+def main(argv):
+
+ # Parse command line arguments
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--mode', '-m', type=str, required=True, choices=['controls', 'properties'],
+ help='Mode of operation')
+ parser.add_argument('--output', '-o', metavar='file', type=str,
+ help='Output file name. Defaults to standard output if not specified.')
+ parser.add_argument('--ranges', '-r', type=str, required=True,
+ help='Control id range reservation file.')
+ parser.add_argument('--template', '-t', dest='template', type=str, required=True,
+ help='Template file name.')
+ parser.add_argument('input', type=str, nargs='+',
+ help='Input file name.')
+
+ args = parser.parse_args(argv[1:])
+
+ ranges = {}
+ with open(args.ranges, 'rb') as f:
+ data = open(args.ranges, 'rb').read()
+ ranges = yaml.safe_load(data)['ranges']
+
+ controls = {}
+ for input in args.input:
+ data = yaml.safe_load(open(input, 'rb').read())
+
+ vendor = data['vendor']
+ if vendor not in ranges.keys():
+ raise RuntimeError(f'Control id range is not defined for vendor {vendor}')
+
+ ctrls = controls.setdefault(vendor, [])
+
+ for i, ctrl in enumerate(data['controls']):
+ ctrl = Control(*ctrl.popitem(), vendor, args.mode)
+ ctrls.append(extend_control(ctrl, i, ranges))
+
+ # Sort the vendors by range numerical value
+ controls = [[vendor, ctrls] for vendor, ctrls in controls.items()]
+ controls.sort(key=lambda item: ranges[item[0]])
+
+ filename = {
+ 'controls': 'control_ids',
+ 'properties': 'property_ids',
+ }[args.mode]
+
+ data = {
+ 'filename': filename,
+ 'mode': args.mode,
+ 'controls': controls,
+ }
+
+ env = jinja2.Environment()
+ env.filters['format_description'] = format_description
+ env.filters['snake_case'] = snake_case
+ template = env.from_string(open(args.template, 'r', encoding='utf-8').read())
+ string = template.render(data)
+
+ if args.output:
+ output = open(args.output, 'w', encoding='utf-8')
+ output.write(string)
+ output.close()
+ else:
+ sys.stdout.write(string)
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/utils/gen-formats.py b/utils/codegen/gen-formats.py
index da79a8bb..0c0932a5 100755
--- a/utils/gen-formats.py
+++ b/utils/codegen/gen-formats.py
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# gen-formats.py - Generate formats definitions from YAML
+# Generate formats definitions from YAML
import argparse
import re
diff --git a/utils/codegen/gen-gst-controls.py b/utils/codegen/gen-gst-controls.py
new file mode 100755
index 00000000..07af7653
--- /dev/null
+++ b/utils/codegen/gen-gst-controls.py
@@ -0,0 +1,183 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2019, Google Inc.
+# Copyright (C) 2024, Jaslo Ziska
+#
+# Authors:
+# Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+# Jaslo Ziska <jaslo@ziska.de>
+#
+# Generate gstreamer control properties from YAML
+
+import argparse
+import jinja2
+import re
+import sys
+import yaml
+
+from controls import Control
+
+
+exposed_controls = [
+ 'AeMeteringMode', 'AeConstraintMode', 'AeExposureMode',
+ 'ExposureValue', 'ExposureTime', 'ExposureTimeMode',
+ 'AnalogueGain', 'AnalogueGainMode', 'AeFlickerPeriod',
+ 'Brightness', 'Contrast', 'AwbEnable', 'AwbMode', 'ColourGains',
+ 'Saturation', 'Sharpness', 'ColourCorrectionMatrix', 'ScalerCrop',
+ 'DigitalGain', 'AfMode', 'AfRange', 'AfSpeed', 'AfMetering', 'AfWindows',
+ 'LensPosition', 'Gamma',
+]
+
+
+def find_common_prefix(strings):
+ prefix = strings[0]
+
+ for string in strings[1:]:
+ while string[:len(prefix)] != prefix and prefix:
+ prefix = prefix[:len(prefix) - 1]
+ if not prefix:
+ break
+
+ return prefix
+
+
+def format_description(description):
+ # Substitute doxygen keywords \sa (see also) and \todo
+ description = re.sub(r'\\sa((?: \w+)+)',
+ lambda match: 'See also: ' + ', '.join(
+ map(kebab_case, match.group(1).strip().split(' '))
+ ) + '.', description)
+ description = re.sub(r'\\todo', 'Todo:', description)
+
+ description = description.strip().split('\n')
+ return '\n'.join([
+ '"' + line.replace('\\', r'\\').replace('"', r'\"') + ' "' for line in description if line
+ ]).rstrip()
+
+
+# Custom filter to allow indenting by a string prior to Jinja version 3.0
+#
+# This function can be removed and the calls to indent_str() replaced by the
+# built-in indent() filter when dropping Jinja versions older than 3.0
+def indent_str(s, indention):
+ s += '\n'
+
+ lines = s.splitlines()
+ rv = lines.pop(0)
+
+ if lines:
+ rv += '\n' + '\n'.join(
+ indention + line if line else line for line in lines
+ )
+
+ return rv
+
+
+def snake_case(s):
+ return ''.join([
+ c.isupper() and ('_' + c.lower()) or c for c in s
+ ]).strip('_')
+
+
+def kebab_case(s):
+ return snake_case(s).replace('_', '-')
+
+
+def extend_control(ctrl):
+ if ctrl.vendor != 'libcamera':
+ ctrl.namespace = f'{ctrl.vendor}::'
+ ctrl.vendor_prefix = f'{ctrl.vendor}-'
+ else:
+ ctrl.namespace = ''
+ ctrl.vendor_prefix = ''
+
+ ctrl.is_array = ctrl.size is not None
+
+ if ctrl.is_enum:
+ # Remove common prefix from enum variant names
+ prefix = find_common_prefix([enum.name for enum in ctrl.enum_values])
+ for enum in ctrl.enum_values:
+ enum.gst_name = kebab_case(enum.name.removeprefix(prefix))
+
+ ctrl.gtype = 'enum'
+ ctrl.default = '0'
+ elif ctrl.element_type == 'bool':
+ ctrl.gtype = 'boolean'
+ ctrl.default = 'false'
+ elif ctrl.element_type == 'float':
+ ctrl.gtype = 'float'
+ ctrl.default = '0'
+ ctrl.min = '-G_MAXFLOAT'
+ ctrl.max = 'G_MAXFLOAT'
+ elif ctrl.element_type == 'int32_t':
+ ctrl.gtype = 'int'
+ ctrl.default = '0'
+ ctrl.min = 'G_MININT'
+ ctrl.max = 'G_MAXINT'
+ elif ctrl.element_type == 'int64_t':
+ ctrl.gtype = 'int64'
+ ctrl.default = '0'
+ ctrl.min = 'G_MININT64'
+ ctrl.max = 'G_MAXINT64'
+ elif ctrl.element_type == 'uint8_t':
+ ctrl.gtype = 'uchar'
+ ctrl.default = '0'
+ ctrl.min = '0'
+ ctrl.max = 'G_MAXUINT8'
+ elif ctrl.element_type == 'Rectangle':
+ ctrl.is_rectangle = True
+ ctrl.default = '0'
+ ctrl.min = '0'
+ ctrl.max = 'G_MAXINT'
+ else:
+ raise RuntimeError(f'The type `{ctrl.element_type}` is unknown')
+
+ return ctrl
+
+
+def main(argv):
+ # Parse command line arguments
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--output', '-o', metavar='file', type=str,
+ help='Output file name. Defaults to standard output if not specified.')
+ parser.add_argument('--template', '-t', dest='template', type=str, required=True,
+ help='Template file name.')
+ parser.add_argument('input', type=str, nargs='+',
+ help='Input file name.')
+
+ args = parser.parse_args(argv[1:])
+
+ controls = {}
+ for input in args.input:
+ data = yaml.safe_load(open(input, 'rb').read())
+
+ vendor = data['vendor']
+ ctrls = controls.setdefault(vendor, [])
+
+ for ctrl in data['controls']:
+ ctrl = Control(*ctrl.popitem(), vendor, mode='controls')
+
+ if ctrl.name in exposed_controls:
+ ctrls.append(extend_control(ctrl))
+
+ data = {'controls': list(controls.items())}
+
+ env = jinja2.Environment()
+ env.filters['format_description'] = format_description
+ env.filters['indent_str'] = indent_str
+ env.filters['snake_case'] = snake_case
+ env.filters['kebab_case'] = kebab_case
+ template = env.from_string(open(args.template, 'r', encoding='utf-8').read())
+ string = template.render(data)
+
+ if args.output:
+ with open(args.output, 'w', encoding='utf-8') as output:
+ output.write(string)
+ else:
+ sys.stdout.write(string)
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/utils/gen-header.sh b/utils/codegen/gen-header.sh
index 8b66c5dd..c78f0859 100755
--- a/utils/gen-header.sh
+++ b/utils/codegen/gen-header.sh
@@ -1,7 +1,7 @@
#!/bin/sh
-src_dir="$1"
-dst_file="$2"
+dst_file="$1"
+shift
cat <<EOF > "$dst_file"
/* SPDX-License-Identifier: LGPL-2.1-or-later */
@@ -9,16 +9,15 @@ cat <<EOF > "$dst_file"
/*
* Copyright (C) 2018-2019, Google Inc.
*
- * libcamera.h - libcamera public API
+ * libcamera public API
*/
#pragma once
EOF
-headers=$(for header in "$src_dir"/*.h "$src_dir"/*.h.in ; do
+headers=$(for header in "$@" ; do
header=$(basename "$header")
- header="${header%.in}"
echo "$header"
done | sort)
diff --git a/utils/gen-ipa-pub-key.py b/utils/codegen/gen-ipa-pub-key.py
index a4a1f7b7..dc3e7d5f 100755
--- a/utils/gen-ipa-pub-key.py
+++ b/utils/codegen/gen-ipa-pub-key.py
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# ipa-gen-key.py - Generate the IPA module signing public key
+# Generate the IPA module signing public key
import string
import subprocess
diff --git a/utils/tracepoints/gen-tp-header.py b/utils/codegen/gen-tp-header.py
index a454615e..6769c7ce 100755
--- a/utils/tracepoints/gen-tp-header.py
+++ b/utils/codegen/gen-tp-header.py
@@ -4,9 +4,8 @@
#
# Author: Paul Elder <paul.elder@ideasonboard.com>
#
-# gen-tp-header.py - Generate header file to contain lttng tracepoints
+# Generate header file to contain lttng tracepoints
-import datetime
import jinja2
import pathlib
import os
@@ -20,7 +19,6 @@ def main(argv):
output = argv[2]
template = argv[3]
- year = datetime.datetime.now().year
path = pathlib.Path(output).absolute().relative_to(argv[1])
source = ''
@@ -28,7 +26,7 @@ def main(argv):
source += open(fname, 'r', encoding='utf-8').read() + '\n\n'
template = jinja2.Template(open(template, 'r', encoding='utf-8').read())
- string = template.render(year=year, path=path, source=source)
+ string = template.render(path=path, source=source)
f = open(output, 'w', encoding='utf-8').write(string)
diff --git a/utils/ipc/extract-docs.py b/utils/codegen/ipc/extract-docs.py
index c2050c99..61f44cae 100755
--- a/utils/ipc/extract-docs.py
+++ b/utils/codegen/ipc/extract-docs.py
@@ -4,7 +4,7 @@
#
# Author: Paul Elder <paul.elder@ideasonboard.com>
#
-# extract-docs.py - Extract doxygen documentation from mojom files
+# Extract doxygen documentation from mojom files
import argparse
import re
@@ -38,7 +38,7 @@ def main(argv):
/*
* Copyright (C) 2021, Google Inc.
*
- * {pipeline}_ipa_interface.cpp - Docs file for generated {pipeline}.mojom
+ * Docs file for generated {pipeline}.mojom
*
* This file is auto-generated. Do not edit.
*/
diff --git a/utils/ipc/generate.py b/utils/codegen/ipc/generate.py
index 71bdee3b..dfbe659b 100755
--- a/utils/ipc/generate.py
+++ b/utils/codegen/ipc/generate.py
@@ -4,14 +4,11 @@
#
# Author: Paul Elder <paul.elder@ideasonboard.com>
#
-# generate.py - Run mojo code generator for generating libcamera IPC files
+# Run mojo code generator for generating libcamera IPC files
import os
import sys
-# TODO set sys.pycache_prefix for >= python3.8
-sys.dont_write_bytecode = True
-
sys.path.insert(0, f'{os.path.dirname(__file__)}/mojo/public/tools/bindings')
import mojo.public.tools.bindings.mojom_bindings_generator as generator
diff --git a/utils/ipc/generators/__init__.py b/utils/codegen/ipc/generators/__init__.py
index e69de29b..e69de29b 100644
--- a/utils/ipc/generators/__init__.py
+++ b/utils/codegen/ipc/generators/__init__.py
diff --git a/utils/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl b/utils/codegen/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl
index c60b99b8..3942e570 100644
--- a/utils/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl
+++ b/utils/codegen/ipc/generators/libcamera_templates/core_ipa_interface.h.tmpl
@@ -7,7 +7,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * core_ipa_interface.h - libcamera core definitions for Image Processing Algorithms
+ * libcamera core definitions for Image Processing Algorithms
*
* This file is auto-generated. Do not edit.
*/
@@ -15,8 +15,13 @@
#pragma once
{% if has_map %}#include <map>{% endif %}
+{% if has_string %}#include <string>{% endif %}
{% if has_array %}#include <vector>{% endif %}
+#include <libcamera/controls.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+
#include <libcamera/ipa/ipa_interface.h>
namespace libcamera {
diff --git a/utils/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl b/utils/codegen/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl
index 5738a1aa..036518f6 100644
--- a/utils/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl
+++ b/utils/codegen/ipc/generators/libcamera_templates/core_ipa_serializer.h.tmpl
@@ -8,7 +8,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * core_ipa_serializer.h - Data serializer for core libcamera definitions for IPA
+ * Data serializer for core libcamera definitions for IPA
*
* This file is auto-generated. Do not edit.
*/
diff --git a/utils/ipc/generators/libcamera_templates/definition_functions.tmpl b/utils/codegen/ipc/generators/libcamera_templates/definition_functions.tmpl
index 8b8509f3..8b8509f3 100644
--- a/utils/ipc/generators/libcamera_templates/definition_functions.tmpl
+++ b/utils/codegen/ipc/generators/libcamera_templates/definition_functions.tmpl
diff --git a/utils/ipc/generators/libcamera_templates/meson.build b/utils/codegen/ipc/generators/libcamera_templates/meson.build
index 70664eab..70664eab 100644
--- a/utils/ipc/generators/libcamera_templates/meson.build
+++ b/utils/codegen/ipc/generators/libcamera_templates/meson.build
diff --git a/utils/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl
index 160601f7..5d70ea6a 100644
--- a/utils/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl
+++ b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_interface.h.tmpl
@@ -7,19 +7,27 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * {{module_name}}_ipa_interface.h - Image Processing Algorithm interface for {{module_name}}
+ * Image Processing Algorithm interface for {{module_name}}
*
* This file is auto-generated. Do not edit.
*/
#pragma once
-#include <libcamera/ipa/core_ipa_interface.h>
-#include <libcamera/ipa/ipa_interface.h>
-
{% if has_map %}#include <map>{% endif %}
+{% if has_string %}#include <string>{% endif %}
{% if has_array %}#include <vector>{% endif %}
+#include <libcamera/base/flags.h>
+#include <libcamera/base/signal.h>
+
+#include <libcamera/controls.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+#include <libcamera/ipa/ipa_interface.h>
+
namespace libcamera {
{%- if has_namespace %}
{% for ns in namespace %}
diff --git a/utils/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl
index 238cf4a5..ce3cc5ab 100644
--- a/utils/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl
+++ b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy.cpp.tmpl
@@ -8,7 +8,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * {{module_name}}_ipa_proxy.cpp - Image Processing Algorithm proxy for {{module_name}}
+ * Image Processing Algorithm proxy for {{module_name}}
*
* This file is auto-generated. Do not edit.
*/
@@ -175,9 +175,9 @@ void {{proxy_name}}::recvMessage(const IPCMessage &data)
);
{% elif method|is_async %}
ASSERT(state_ == ProxyRunning);
- proxy_.invokeMethod(&ThreadProxy::{{method.mojom_name}}, ConnectionTypeQueued,
+ proxy_.invokeMethod(&ThreadProxy::{{method.mojom_name}}, ConnectionTypeQueued
{%- for param in method|method_param_names -%}
- {{param}}{{- ", " if not loop.last}}
+ , {{param}}
{%- endfor -%}
);
{%- endif %}
diff --git a/utils/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl
index 6e823598..e213b18a 100644
--- a/utils/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl
+++ b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy.h.tmpl
@@ -8,7 +8,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * {{module_name}}_ipa_proxy.h - Image Processing Algorithm proxy for {{module_name}}
+ * Image Processing Algorithm proxy for {{module_name}}
*
* This file is auto-generated. Do not edit.
*/
diff --git a/utils/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl
index b65dc4cf..1f990d3f 100644
--- a/utils/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl
+++ b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_proxy_worker.cpp.tmpl
@@ -8,7 +8,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * {{module_name}}_ipa_proxy_worker.cpp - Image Processing Algorithm proxy worker for {{module_name}}
+ * Image Processing Algorithm proxy worker for {{module_name}}
*
* This file is auto-generated. Do not edit.
*/
diff --git a/utils/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl
index 8b709705..cd5a65a9 100644
--- a/utils/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl
+++ b/utils/codegen/ipc/generators/libcamera_templates/module_ipa_serializer.h.tmpl
@@ -8,7 +8,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * {{module_name}}_ipa_serializer.h - Image Processing Algorithm data serializer for {{module_name}}
+ * Image Processing Algorithm data serializer for {{module_name}}
*
* This file is auto-generated. Do not edit.
*/
diff --git a/utils/ipc/generators/libcamera_templates/proxy_functions.tmpl b/utils/codegen/ipc/generators/libcamera_templates/proxy_functions.tmpl
index b5797b14..25476990 100644
--- a/utils/ipc/generators/libcamera_templates/proxy_functions.tmpl
+++ b/utils/codegen/ipc/generators/libcamera_templates/proxy_functions.tmpl
@@ -34,7 +34,7 @@
thread_.exit();
thread_.wait();
- Thread::current()->dispatchMessages(Message::Type::InvokeMessage);
+ Thread::current()->dispatchMessages(Message::Type::InvokeMessage, this);
state_ = ProxyStopped;
{%- endmacro -%}
diff --git a/utils/ipc/generators/libcamera_templates/serializer.tmpl b/utils/codegen/ipc/generators/libcamera_templates/serializer.tmpl
index 323e1293..323e1293 100644
--- a/utils/ipc/generators/libcamera_templates/serializer.tmpl
+++ b/utils/codegen/ipc/generators/libcamera_templates/serializer.tmpl
diff --git a/utils/ipc/generators/meson.build b/utils/codegen/ipc/generators/meson.build
index 504f1a46..504f1a46 100644
--- a/utils/ipc/generators/meson.build
+++ b/utils/codegen/ipc/generators/meson.build
diff --git a/utils/ipc/generators/mojom_libcamera_generator.py b/utils/codegen/ipc/generators/mojom_libcamera_generator.py
index 99d905de..d9c620a0 100644
--- a/utils/ipc/generators/mojom_libcamera_generator.py
+++ b/utils/codegen/ipc/generators/mojom_libcamera_generator.py
@@ -4,7 +4,7 @@
#
# Author: Paul Elder <paul.elder@ideasonboard.com>
#
-# mojom_libcamera_generator.py - Generates libcamera files from a mojom.Module.
+# Generates libcamera files from a mojom.Module.
import argparse
import datetime
@@ -467,6 +467,7 @@ class Generator(generator.Generator):
'enums': self.module.enums,
'has_array': len([x for x in self.module.kinds.keys() if x[0] == 'a']) > 0,
'has_map': len([x for x in self.module.kinds.keys() if x[0] == 'm']) > 0,
+ 'has_string': len([x for x in self.module.kinds.keys() if x[0] == 's']) > 0,
'has_namespace': self.module.mojom_namespace != '',
'interface_event': GetEventInterface(self.module.interfaces),
'interface_main': GetMainInterface(self.module.interfaces),
@@ -486,6 +487,7 @@ class Generator(generator.Generator):
'enums_gen_header': [x for x in self.module.enums if x.attributes is None or 'skipHeader' not in x.attributes],
'has_array': len([x for x in self.module.kinds.keys() if x[0] == 'a']) > 0,
'has_map': len([x for x in self.module.kinds.keys() if x[0] == 'm']) > 0,
+ 'has_string': len([x for x in self.module.kinds.keys() if x[0] == 's']) > 0,
'structs_gen_header': [x for x in self.module.structs if x.attributes is None or 'skipHeader' not in x.attributes],
'structs_gen_serializer': [x for x in self.module.structs if x.attributes is None or 'skipSerdes' not in x.attributes],
}
diff --git a/utils/ipc/meson.build b/utils/codegen/ipc/meson.build
index 973a5417..f77bf324 100644
--- a/utils/ipc/meson.build
+++ b/utils/codegen/ipc/meson.build
@@ -13,6 +13,7 @@ mojom_docs_extractor = find_program('./extract-docs.py')
mojom_templates = custom_target('mojom_templates',
input : mojom_template_files,
output : 'libcamera_templates.zip',
- command : [mojom_generator, '-o', '@OUTDIR@', 'precompile'])
+ command : [mojom_generator, '-o', '@OUTDIR@', 'precompile'],
+ env : py_build_env)
mojom_templates_dir = meson.current_build_dir()
diff --git a/utils/ipc/mojo/README b/utils/codegen/ipc/mojo/README
index 961cabd2..961cabd2 100644
--- a/utils/ipc/mojo/README
+++ b/utils/codegen/ipc/mojo/README
diff --git a/utils/ipc/mojo/public/LICENSE b/utils/codegen/ipc/mojo/public/LICENSE
index 513e8a6a..513e8a6a 100644
--- a/utils/ipc/mojo/public/LICENSE
+++ b/utils/codegen/ipc/mojo/public/LICENSE
diff --git a/utils/ipc/mojo/public/tools/.style.yapf b/utils/codegen/ipc/mojo/public/tools/.style.yapf
index b4ebbe24..b4ebbe24 100644
--- a/utils/ipc/mojo/public/tools/.style.yapf
+++ b/utils/codegen/ipc/mojo/public/tools/.style.yapf
diff --git a/utils/ipc/mojo/public/tools/BUILD.gn b/utils/codegen/ipc/mojo/public/tools/BUILD.gn
index 5328a34a..5328a34a 100644
--- a/utils/ipc/mojo/public/tools/BUILD.gn
+++ b/utils/codegen/ipc/mojo/public/tools/BUILD.gn
diff --git a/utils/ipc/mojo/public/tools/bindings/BUILD.gn b/utils/codegen/ipc/mojo/public/tools/bindings/BUILD.gn
index eeca73ea..eeca73ea 100644
--- a/utils/ipc/mojo/public/tools/bindings/BUILD.gn
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/BUILD.gn
diff --git a/utils/ipc/mojo/public/tools/bindings/README.md b/utils/codegen/ipc/mojo/public/tools/bindings/README.md
index b27b2d01..b27b2d01 100644
--- a/utils/ipc/mojo/public/tools/bindings/README.md
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/README.md
diff --git a/utils/ipc/mojo/public/tools/bindings/checks/__init__.py b/utils/codegen/ipc/mojo/public/tools/bindings/checks/__init__.py
index e69de29b..e69de29b 100644
--- a/utils/ipc/mojo/public/tools/bindings/checks/__init__.py
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/checks/__init__.py
diff --git a/utils/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check.py b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check.py
index e6e4f2c9..e6e4f2c9 100644
--- a/utils/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check.py
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check.py
diff --git a/utils/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check_unittest.py b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check_unittest.py
index f1a50a4a..f1a50a4a 100644
--- a/utils/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check_unittest.py
diff --git a/utils/ipc/mojo/public/tools/bindings/checks/mojom_definitions_check.py b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_definitions_check.py
index 702d41c3..702d41c3 100644
--- a/utils/ipc/mojo/public/tools/bindings/checks/mojom_definitions_check.py
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_definitions_check.py
diff --git a/utils/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check.py b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check.py
index 07f51a64..07f51a64 100644
--- a/utils/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check.py
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check.py
diff --git a/utils/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check_unittest.py b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check_unittest.py
index e96152fd..e96152fd 100644
--- a/utils/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check_unittest.py
diff --git a/utils/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_check.py b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_check.py
index d570e26c..d570e26c 100644
--- a/utils/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_check.py
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_check.py
diff --git a/utils/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_checks_unittest.py b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_checks_unittest.py
index a6cd71e2..a6cd71e2 100644
--- a/utils/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_checks_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_checks_unittest.py
diff --git a/utils/ipc/mojo/public/tools/bindings/concatenate-files.py b/utils/codegen/ipc/mojo/public/tools/bindings/concatenate-files.py
index 4dd26d4a..4dd26d4a 100755
--- a/utils/ipc/mojo/public/tools/bindings/concatenate-files.py
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/concatenate-files.py
diff --git a/utils/ipc/mojo/public/tools/bindings/concatenate_and_replace_closure_exports.py b/utils/codegen/ipc/mojo/public/tools/bindings/concatenate_and_replace_closure_exports.py
index 7d56c9f9..7d56c9f9 100755
--- a/utils/ipc/mojo/public/tools/bindings/concatenate_and_replace_closure_exports.py
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/concatenate_and_replace_closure_exports.py
diff --git a/utils/ipc/mojo/public/tools/bindings/gen_data_files_list.py b/utils/codegen/ipc/mojo/public/tools/bindings/gen_data_files_list.py
index c6daff03..c6daff03 100644
--- a/utils/ipc/mojo/public/tools/bindings/gen_data_files_list.py
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/gen_data_files_list.py
diff --git a/utils/ipc/mojo/public/tools/bindings/generate_type_mappings.py b/utils/codegen/ipc/mojo/public/tools/bindings/generate_type_mappings.py
index 4a53e2bf..4a53e2bf 100755
--- a/utils/ipc/mojo/public/tools/bindings/generate_type_mappings.py
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/generate_type_mappings.py
diff --git a/utils/ipc/mojo/public/tools/bindings/minify_with_terser.py b/utils/codegen/ipc/mojo/public/tools/bindings/minify_with_terser.py
index cefee7a4..cefee7a4 100755
--- a/utils/ipc/mojo/public/tools/bindings/minify_with_terser.py
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/minify_with_terser.py
diff --git a/utils/ipc/mojo/public/tools/bindings/mojom.gni b/utils/codegen/ipc/mojo/public/tools/bindings/mojom.gni
index 3f6e54e0..3f6e54e0 100644
--- a/utils/ipc/mojo/public/tools/bindings/mojom.gni
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/mojom.gni
diff --git a/utils/ipc/mojo/public/tools/bindings/mojom_bindings_generator.py b/utils/codegen/ipc/mojo/public/tools/bindings/mojom_bindings_generator.py
index 8c641c2a..8c641c2a 100755
--- a/utils/ipc/mojo/public/tools/bindings/mojom_bindings_generator.py
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/mojom_bindings_generator.py
diff --git a/utils/ipc/mojo/public/tools/bindings/mojom_bindings_generator_unittest.py b/utils/codegen/ipc/mojo/public/tools/bindings/mojom_bindings_generator_unittest.py
index 761922b6..761922b6 100644
--- a/utils/ipc/mojo/public/tools/bindings/mojom_bindings_generator_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/mojom_bindings_generator_unittest.py
diff --git a/utils/ipc/mojo/public/tools/bindings/validate_typemap_config.py b/utils/codegen/ipc/mojo/public/tools/bindings/validate_typemap_config.py
index 6bb7a209..6bb7a209 100755
--- a/utils/ipc/mojo/public/tools/bindings/validate_typemap_config.py
+++ b/utils/codegen/ipc/mojo/public/tools/bindings/validate_typemap_config.py
diff --git a/utils/ipc/mojo/public/tools/mojom/BUILD.gn b/utils/codegen/ipc/mojo/public/tools/mojom/BUILD.gn
index eafb95a1..eafb95a1 100644
--- a/utils/ipc/mojo/public/tools/mojom/BUILD.gn
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/BUILD.gn
diff --git a/utils/ipc/mojo/public/tools/mojom/README.md b/utils/codegen/ipc/mojo/public/tools/mojom/README.md
index e5d17ab0..e5d17ab0 100644
--- a/utils/ipc/mojo/public/tools/mojom/README.md
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/README.md
diff --git a/utils/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility.py b/utils/codegen/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility.py
index 35cd1cfd..35cd1cfd 100755
--- a/utils/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility.py
diff --git a/utils/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility_unittest.py
index 06769c95..06769c95 100755
--- a/utils/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility_unittest.py
diff --git a/utils/ipc/mojo/public/tools/mojom/const_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/const_unittest.py
index e8ed36a7..e8ed36a7 100644
--- a/utils/ipc/mojo/public/tools/mojom/const_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/const_unittest.py
diff --git a/utils/ipc/mojo/public/tools/mojom/enum_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/enum_unittest.py
index 9269cde5..9269cde5 100644
--- a/utils/ipc/mojo/public/tools/mojom/enum_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/enum_unittest.py
diff --git a/utils/ipc/mojo/public/tools/mojom/feature_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/feature_unittest.py
index 5f014e1c..5f014e1c 100644
--- a/utils/ipc/mojo/public/tools/mojom/feature_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/feature_unittest.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/BUILD.gn b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/BUILD.gn
index a0edf0eb..a0edf0eb 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/BUILD.gn
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/BUILD.gn
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/__init__.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/__init__.py
index e69de29b..e69de29b 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/__init__.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/__init__.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/error.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/error.py
index dd53b835..dd53b835 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/error.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/error.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/fileutil.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/fileutil.py
index 124f12c1..124f12c1 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/fileutil.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/fileutil.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/fileutil_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/fileutil_unittest.py
index c93d2289..c93d2289 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/fileutil_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/fileutil_unittest.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/__init__.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/__init__.py
index e69de29b..e69de29b 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/__init__.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/__init__.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/check.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/check.py
index 1efe2022..1efe2022 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/check.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/check.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/generator.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/generator.py
index 96fe3a2d..96fe3a2d 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/generator.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/generator.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/generator_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/generator_unittest.py
index 7143e07c..7143e07c 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/generator_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/generator_unittest.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/module.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/module.py
index ca71059d..ca71059d 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/module.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/module.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/module_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/module_unittest.py
index 2a4e852c..2a4e852c 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/module_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/module_unittest.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/pack.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/pack.py
index 61240426..61240426 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/pack.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/pack.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/pack_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/pack_unittest.py
index 7d8e4e01..7d8e4e01 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/pack_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/pack_unittest.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/template_expander.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/template_expander.py
index 807e2a4f..807e2a4f 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/template_expander.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/template_expander.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/translate.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/translate.py
index 83bb297f..83bb297f 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/translate.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/translate.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/generate/translate_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/translate_unittest.py
index b4fea924..b4fea924 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/generate/translate_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/generate/translate_unittest.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/__init__.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/__init__.py
index e69de29b..e69de29b 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/__init__.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/__init__.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/ast.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/ast.py
index aae9cdb6..aae9cdb6 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/ast.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/ast.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/ast_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/ast_unittest.py
index b289f7b1..b289f7b1 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/ast_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/ast_unittest.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features.py
index 9687edbf..9687edbf 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features_unittest.py
index cca1764b..cca1764b 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features_unittest.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/lexer.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/lexer.py
index 00136a8b..00136a8b 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/lexer.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/lexer.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/lexer_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/lexer_unittest.py
index bc9f8354..bc9f8354 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/lexer_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/lexer_unittest.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/parser.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/parser.py
index 1dffd98b..1dffd98b 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/parser.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/parser.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom/parse/parser_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/parser_unittest.py
index 0a26307b..0a26307b 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom/parse/parser_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom/parse/parser_unittest.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom_parser.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser.py
index 9693090e..9693090e 100755
--- a/utils/ipc/mojo/public/tools/mojom/mojom_parser.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom_parser_test_case.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser_test_case.py
index f0ee6966..f0ee6966 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom_parser_test_case.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser_test_case.py
diff --git a/utils/ipc/mojo/public/tools/mojom/mojom_parser_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser_unittest.py
index 353a2b6e..353a2b6e 100644
--- a/utils/ipc/mojo/public/tools/mojom/mojom_parser_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/mojom_parser_unittest.py
diff --git a/utils/ipc/mojo/public/tools/mojom/stable_attribute_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/stable_attribute_unittest.py
index d10d69c6..d10d69c6 100644
--- a/utils/ipc/mojo/public/tools/mojom/stable_attribute_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/stable_attribute_unittest.py
diff --git a/utils/ipc/mojo/public/tools/mojom/union_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/union_unittest.py
index 6b2525e5..6b2525e5 100644
--- a/utils/ipc/mojo/public/tools/mojom/union_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/union_unittest.py
diff --git a/utils/ipc/mojo/public/tools/mojom/version_compatibility_unittest.py b/utils/codegen/ipc/mojo/public/tools/mojom/version_compatibility_unittest.py
index 45e45ec5..45e45ec5 100644
--- a/utils/ipc/mojo/public/tools/mojom/version_compatibility_unittest.py
+++ b/utils/codegen/ipc/mojo/public/tools/mojom/version_compatibility_unittest.py
diff --git a/utils/ipc/mojo/public/tools/run_all_python_unittests.py b/utils/codegen/ipc/mojo/public/tools/run_all_python_unittests.py
index 98bce18c..98bce18c 100755
--- a/utils/ipc/mojo/public/tools/run_all_python_unittests.py
+++ b/utils/codegen/ipc/mojo/public/tools/run_all_python_unittests.py
diff --git a/utils/ipc/parser.py b/utils/codegen/ipc/parser.py
index 231a3266..8e70322d 100755
--- a/utils/ipc/parser.py
+++ b/utils/codegen/ipc/parser.py
@@ -4,14 +4,11 @@
#
# Author: Paul Elder <paul.elder@ideasonboard.com>
#
-# parser.py - Run mojo parser with python3
+# Run mojo parser with python3
import os
import sys
-# TODO set sys.pycache_prefix for >= python3.8
-sys.dont_write_bytecode = True
-
# Make sure that mojom_parser.py can import mojom
sys.path.insert(0, f'{os.path.dirname(__file__)}/mojo/public/tools/mojom')
diff --git a/utils/ipc/tools/README b/utils/codegen/ipc/tools/README
index 961cabd2..961cabd2 100644
--- a/utils/ipc/tools/README
+++ b/utils/codegen/ipc/tools/README
diff --git a/utils/ipc/tools/diagnosis/crbug_1001171.py b/utils/codegen/ipc/tools/diagnosis/crbug_1001171.py
index 40900d10..40900d10 100644
--- a/utils/ipc/tools/diagnosis/crbug_1001171.py
+++ b/utils/codegen/ipc/tools/diagnosis/crbug_1001171.py
diff --git a/utils/codegen/meson.build b/utils/codegen/meson.build
new file mode 100644
index 00000000..904dd66d
--- /dev/null
+++ b/utils/codegen/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: CC0-1.0
+
+## Code generation
+
+py_build_env = environment()
+# \todo Investigate usage of PYTHONPYCACHEPREFIX for Python >= 3.8
+py_build_env.set('PYTHONDONTWRITEBYTECODE', '1')
+py_build_env.prepend('PYTHONPATH', meson.current_source_dir())
+
+py_modules += ['jinja2', 'yaml']
+
+gen_controls = files('gen-controls.py')
+gen_formats = files('gen-formats.py')
+gen_gst_controls = files('gen-gst-controls.py')
+gen_header = files('gen-header.sh')
+gen_ipa_pub_key = files('gen-ipa-pub-key.py')
+gen_tracepoints = files('gen-tp-header.py')
+
+subdir('ipc')
diff --git a/utils/gen-controls.py b/utils/gen-controls.py
deleted file mode 100755
index 6cd5e362..00000000
--- a/utils/gen-controls.py
+++ /dev/null
@@ -1,370 +0,0 @@
-#!/usr/bin/env python3
-# SPDX-License-Identifier: GPL-2.0-or-later
-# Copyright (C) 2019, Google Inc.
-#
-# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
-#
-# gen-controls.py - Generate control definitions from YAML
-
-import argparse
-from functools import reduce
-import operator
-import string
-import sys
-import yaml
-import os
-
-
-class ControlEnum(object):
- def __init__(self, data):
- self.__data = data
-
- @property
- def description(self):
- """The enum description"""
- return self.__data.get('description')
-
- @property
- def name(self):
- """The enum name"""
- return self.__data.get('name')
-
- @property
- def value(self):
- """The enum value"""
- return self.__data.get('value')
-
-
-class Control(object):
- def __init__(self, name, data, vendor):
- self.__name = name
- self.__data = data
- self.__enum_values = None
- self.__size = None
- self.__vendor = vendor
-
- enum_values = data.get('enum')
- if enum_values is not None:
- self.__enum_values = [ControlEnum(enum) for enum in enum_values]
-
- size = self.__data.get('size')
- if size is not None:
- if len(size) == 0:
- raise RuntimeError(f'Control `{self.__name}` size must have at least one dimension')
-
- # Compute the total number of elements in the array. If any of the
- # array dimension is a string, the array is variable-sized.
- num_elems = 1
- for dim in size:
- if type(dim) is str:
- num_elems = 0
- break
-
- dim = int(dim)
- if dim <= 0:
- raise RuntimeError(f'Control `{self.__name}` size must have positive values only')
-
- num_elems *= dim
-
- self.__size = num_elems
-
- @property
- def description(self):
- """The control description"""
- return self.__data.get('description')
-
- @property
- def enum_values(self):
- """The enum values, if the control is an enumeration"""
- if self.__enum_values is None:
- return
- for enum in self.__enum_values:
- yield enum
-
- @property
- def is_enum(self):
- """Is the control an enumeration"""
- return self.__enum_values is not None
-
- @property
- def vendor(self):
- """The vendor string, or None"""
- return self.__vendor
-
- @property
- def name(self):
- """The control name (CamelCase)"""
- return self.__name
-
- @property
- def type(self):
- typ = self.__data.get('type')
- size = self.__data.get('size')
-
- if typ == 'string':
- return 'std::string'
-
- if self.__size is None:
- return typ
-
- if self.__size:
- return f"Span<const {typ}, {self.__size}>"
- else:
- return f"Span<const {typ}>"
-
-
-def snake_case(s):
- return ''.join([c.isupper() and ('_' + c) or c for c in s]).strip('_')
-
-
-def format_description(description):
- description = description.strip('\n').split('\n')
- description[0] = '\\brief ' + description[0]
- return '\n'.join([(line and ' * ' or ' *') + line for line in description])
-
-
-def generate_cpp(controls):
- enum_doc_start_template = string.Template('''/**
- * \\enum ${name}Enum
- * \\brief Supported ${name} values''')
- enum_doc_value_template = string.Template(''' * \\var ${value}
-${description}''')
- doc_template = string.Template('''/**
- * \\var ${name}
-${description}
- */''')
- def_template = string.Template('extern const Control<${type}> ${name}(${id_name}, "${name}");')
- enum_values_doc = string.Template('''/**
- * \\var ${name}Values
- * \\brief List of all $name supported values
- */''')
- enum_values_start = string.Template('''extern const std::array<const ControlValue, ${size}> ${name}Values = {''')
- enum_values_values = string.Template('''\tstatic_cast<int32_t>(${name}),''')
-
- ctrls_doc = {}
- ctrls_def = {}
- ctrls_map = []
-
- for ctrl in controls:
- id_name = snake_case(ctrl.name).upper()
-
- vendor = ctrl.vendor
- if vendor not in ctrls_doc:
- ctrls_doc[vendor] = []
- ctrls_def[vendor] = []
-
- info = {
- 'name': ctrl.name,
- 'type': ctrl.type,
- 'description': format_description(ctrl.description),
- 'id_name': id_name,
- }
-
- target_doc = ctrls_doc[vendor]
- target_def = ctrls_def[vendor]
-
- if ctrl.is_enum:
- enum_doc = []
- enum_doc.append(enum_doc_start_template.substitute(info))
-
- num_entries = 0
- for enum in ctrl.enum_values:
- value_info = {
- 'name': ctrl.name,
- 'value': enum.name,
- 'description': format_description(enum.description),
- }
- enum_doc.append(enum_doc_value_template.substitute(value_info))
- num_entries += 1
-
- enum_doc = '\n *\n'.join(enum_doc)
- enum_doc += '\n */'
- target_doc.append(enum_doc)
-
- values_info = {
- 'name': info['name'],
- 'size': num_entries,
- }
- target_doc.append(enum_values_doc.substitute(values_info))
- target_def.append(enum_values_start.substitute(values_info))
- for enum in ctrl.enum_values:
- value_info = {
- 'name': enum.name
- }
- target_def.append(enum_values_values.substitute(value_info))
- target_def.append("};")
-
- target_doc.append(doc_template.substitute(info))
- target_def.append(def_template.substitute(info))
-
- vendor_ns = vendor + '::' if vendor != "libcamera" else ''
- ctrls_map.append('\t{ ' + vendor_ns + id_name + ', &' + vendor_ns + ctrl.name + ' },')
-
- vendor_ctrl_doc_sub = []
- vendor_ctrl_template = string.Template('''
-/**
- * \\brief Namespace for ${vendor} controls
- */
-namespace ${vendor} {
-
-${vendor_controls_str}
-
-} /* namespace ${vendor} */''')
-
- for vendor in [v for v in ctrls_doc.keys() if v not in ['libcamera']]:
- vendor_ctrl_doc_sub.append(vendor_ctrl_template.substitute({'vendor': vendor, 'vendor_controls_str': '\n\n'.join(ctrls_doc[vendor])}))
-
- vendor_ctrl_def_sub = []
- for vendor in [v for v in ctrls_def.keys() if v not in ['libcamera']]:
- vendor_ctrl_def_sub.append(vendor_ctrl_template.substitute({'vendor': vendor, 'vendor_controls_str': '\n'.join(ctrls_def[vendor])}))
-
- return {
- 'controls_doc': '\n\n'.join(ctrls_doc['libcamera']),
- 'controls_def': '\n'.join(ctrls_def['libcamera']),
- 'controls_map': '\n'.join(ctrls_map),
- 'vendor_controls_doc': '\n'.join(vendor_ctrl_doc_sub),
- 'vendor_controls_def': '\n'.join(vendor_ctrl_def_sub),
- }
-
-
-def generate_h(controls, mode, ranges):
- enum_template_start = string.Template('''enum ${name}Enum {''')
- enum_value_template = string.Template('''\t${name} = ${value},''')
- enum_values_template = string.Template('''extern const std::array<const ControlValue, ${size}> ${name}Values;''')
- template = string.Template('''extern const Control<${type}> ${name};''')
-
- ctrls = {}
- ids = {}
- id_value = {}
-
- for ctrl in controls:
- id_name = snake_case(ctrl.name).upper()
-
- vendor = ctrl.vendor
- if vendor not in ctrls:
- if vendor not in ranges.keys():
- raise RuntimeError(f'Control id range is not defined for vendor {vendor}')
- id_value[vendor] = ranges[vendor] + 1
- ids[vendor] = []
- ctrls[vendor] = []
-
- target_ids = ids[vendor]
- target_ids.append('\t' + id_name + ' = ' + str(id_value[vendor]) + ',')
-
- info = {
- 'name': ctrl.name,
- 'type': ctrl.type,
- }
-
- target_ctrls = ctrls[vendor]
-
- if ctrl.is_enum:
- target_ctrls.append(enum_template_start.substitute(info))
-
- num_entries = 0
- for enum in ctrl.enum_values:
- value_info = {
- 'name': enum.name,
- 'value': enum.value,
- }
- target_ctrls.append(enum_value_template.substitute(value_info))
- num_entries += 1
- target_ctrls.append("};")
-
- values_info = {
- 'name': info['name'],
- 'size': num_entries,
- }
- target_ctrls.append(enum_values_template.substitute(values_info))
-
- target_ctrls.append(template.substitute(info))
- id_value[vendor] += 1
-
- vendor_template = string.Template('''
-namespace ${vendor} {
-
-#define LIBCAMERA_HAS_${vendor_def}_VENDOR_${mode}
-
-enum {
-${vendor_enums}
-};
-
-${vendor_controls}
-
-} /* namespace ${vendor} */
-''')
-
- vendor_sub = []
- for vendor in [v for v in ctrls.keys() if v != 'libcamera']:
- vendor_sub.append(vendor_template.substitute({'mode': mode.upper(),
- 'vendor': vendor,
- 'vendor_def': vendor.upper(),
- 'vendor_enums': '\n'.join(ids[vendor]),
- 'vendor_controls': '\n'.join(ctrls[vendor])}))
-
- return {
- 'ids': '\n'.join(ids['libcamera']),
- 'controls': '\n'.join(ctrls['libcamera']),
- 'vendor_controls': '\n'.join(vendor_sub)
- }
-
-
-def fill_template(template, data):
-
- template = open(template, 'rb').read()
- template = template.decode('utf-8')
- template = string.Template(template)
- return template.substitute(data)
-
-
-def main(argv):
-
- # Parse command line arguments
- parser = argparse.ArgumentParser()
- parser.add_argument('--mode', '-m', type=str, required=True, choices=['controls', 'properties'],
- help='Mode of operation')
- parser.add_argument('--output', '-o', metavar='file', type=str,
- help='Output file name. Defaults to standard output if not specified.')
- parser.add_argument('--ranges', '-r', type=str, required=True,
- help='Control id range reservation file.')
- parser.add_argument('--template', '-t', dest='template', type=str, required=True,
- help='Template file name.')
- parser.add_argument('input', type=str, nargs='+',
- help='Input file name.')
-
- args = parser.parse_args(argv[1:])
-
- ranges = {}
- with open(args.ranges, 'rb') as f:
- data = open(args.ranges, 'rb').read()
- ranges = yaml.safe_load(data)['ranges']
-
- controls = []
- for input in args.input:
- with open(input, 'rb') as f:
- data = f.read()
- vendor = yaml.safe_load(data)['vendor']
- ctrls = yaml.safe_load(data)['controls']
- controls = controls + [Control(*ctrl.popitem(), vendor) for ctrl in ctrls]
-
- if args.template.endswith('.cpp.in'):
- data = generate_cpp(controls)
- elif args.template.endswith('.h.in'):
- data = generate_h(controls, args.mode, ranges)
- else:
- raise RuntimeError('Unknown template type')
-
- data = fill_template(args.template, data)
-
- if args.output:
- output = open(args.output, 'wb')
- output.write(data.encode('utf-8'))
- output.close()
- else:
- sys.stdout.write(data)
-
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(main(sys.argv))
diff --git a/utils/gen-debug-controls.py b/utils/gen-debug-controls.py
new file mode 100755
index 00000000..272597f4
--- /dev/null
+++ b/utils/gen-debug-controls.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2024, Google Inc.
+#
+# Author: Stefan Klug <stefan.klug@ideasonboard.com>
+#
+# This script looks for occurrences of the debug metadata controls in the source
+# tree and updates src/libcamera/control_ids_debug.yaml accordingly. It is meant
+# to be used during development to ease updating of the yaml file while
+# debugging.
+
+import argparse
+import logging
+import os
+import re
+import sys
+from dataclasses import dataclass
+from pathlib import Path
+
+logger = logging.getLogger(__name__)
+logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
+
+try:
+ import ruamel.yaml as ruyaml
+except:
+ logger.error(
+ f'Failed to import ruamel.yaml. Please install the ruamel.yaml package.')
+ sys.exit(1)
+
+@dataclass
+class FoundMatch:
+ file: os.PathLike
+ whole_match: str
+ line: int
+ type: str
+ name: str
+ size: str = None
+
+
+def get_control_name(control):
+ k = list(control.keys())
+ if len(k) != 1:
+ raise Exception(f"Can't handle control entry with {len(k)} keys")
+ return k[0]
+
+
+def find_debug_controls(dir):
+ extensions = ['.cpp', '.h']
+ files = [p for p in dir.rglob('*') if p.suffix in extensions]
+
+ # The following regex was tested on
+ # set<Span<type>>( controls::debug::something , static_cast<type>(var) )
+ # set<>( controls::debug::something , static_cast<type>(var) )
+ # set( controls::debug::something , static_cast<type> (var) )
+ exp = re.compile(r'set' # set function
+ r'(?:\<((?:[^)(])*)\>)?' # followed by a optional template param
+ r'\(\s*controls::debug::(\w+)\s*,' # referencing a debug control
+ )
+ matches = []
+ for p in files:
+ with p.open('r') as f:
+ for idx, line in enumerate(f):
+ match = exp.search(line)
+ if match:
+ m = FoundMatch(file=p, line=idx, type=match.group(1),
+ name=match.group(2), whole_match=match.group(0))
+ if m.type is not None and m.type.startswith('Span'):
+ # Simple span type detection treating the last word
+ # inside <> as type.
+ r = re.match(r'Span<(?:.*\s+)(.*)>', m.type)
+ m.type = r.group(1)
+ m.size = '[n]'
+ matches.append(m)
+ return matches
+
+
+def main(argv):
+ parser = argparse.ArgumentParser(
+ description='Automatically updates control_ids_debug.yaml')
+ parser.parse_args(argv[1:])
+
+ yaml = ruyaml.YAML()
+ root_dir = Path(__file__).resolve().parent.parent
+ ctrl_file = root_dir.joinpath('src/libcamera/control_ids_debug.yaml')
+
+ matches = find_debug_controls(root_dir.joinpath('src'))
+
+ doc = yaml.load(ctrl_file)
+
+ controls = doc['controls']
+
+ # Create a map of names in the existing yaml for easier updating.
+ controls_map = {}
+ for control in controls:
+ for k, v in control.items():
+ controls_map[k] = v
+
+ obsolete_names = list(controls_map.keys())
+
+ for m in matches:
+ if not m.type:
+ p = m.file.relative_to(Path.cwd(), walk_up=True)
+ logger.warning(
+ f'{p}:{m.line + 1}: Failed to deduce type from {m.whole_match} ... skipping')
+ continue
+
+ p = m.file.relative_to(root_dir)
+ desc = {'type': m.type,
+ 'direction': 'out',
+ 'description': f'Debug control {m.name} found in {p}:{m.line}'}
+ if m.size is not None:
+ desc['size'] = m.size
+
+ if m.name in controls_map:
+ # Can't use == for modified check because of the special yaml dicts.
+ update_needed = False
+ if list(controls_map[m.name].keys()) != list(desc.keys()):
+ update_needed = True
+ else:
+ for k, v in controls_map[m.name].items():
+ if v != desc[k]:
+ update_needed = True
+ break
+
+ if update_needed:
+ logger.info(f"Update control '{m.name}'")
+ controls_map[m.name].clear()
+ controls_map[m.name].update(desc)
+
+ obsolete_names.remove(m.name)
+ else:
+ logger.info(f"Add control '{m.name}'")
+ insert_before = len(controls)
+ for idx, control in enumerate(controls):
+ if get_control_name(control).lower() > m.name.lower():
+ insert_before = idx
+ break
+ controls.insert(insert_before, {m.name: desc})
+
+ # Remove elements from controls without recreating the list (to keep
+ # comments etc.).
+ idx = 0
+ while idx < len(controls):
+ name = get_control_name(controls[idx])
+ if name in obsolete_names:
+ logger.info(f"Remove control '{name}'")
+ controls.pop(idx)
+ else:
+ idx += 1
+
+ with ctrl_file.open('w') as f:
+ # Ruyaml looses the header.
+ f.write(("# SPDX-License-Identifier: LGPL-2.1-or-later\n"
+ "#\n"
+ "# This file was generated by utils/gen-debug-controls.py\n"
+ "#\n"))
+ yaml.dump(doc, f)
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/utils/gen-ipa-priv-key.sh b/utils/gen-ipa-priv-key.sh
index 919751f2..2ca7b883 100755
--- a/utils/gen-ipa-priv-key.sh
+++ b/utils/gen-ipa-priv-key.sh
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# gen-ipa-priv-key.sh - Generate an RSA private key to sign IPA modules
+# Generate an RSA private key to sign IPA modules
key="$1"
diff --git a/utils/gen-version.sh b/utils/gen-version.sh
index e1f7ca7b..1b818e9e 100755
--- a/utils/gen-version.sh
+++ b/utils/gen-version.sh
@@ -42,7 +42,7 @@ if [ -z "$build_dir" ] || (echo "$build_dir" | grep -q "$src_dir")
then
git update-index --refresh > /dev/null 2>&1
fi
-git diff-index --quiet HEAD || version="$version-dirty ($(date --iso-8601=seconds))"
+git diff-index --quiet HEAD || version="$version-dirty ($(date +%Y-%m-%dT%H:%M:%S%Z))"
# If a project version is provided, use it to replace the version number.
if [ -n "$project_version" ]
diff --git a/utils/hooks/pre-push b/utils/hooks/pre-push
index 9918b286..68dcbd0c 100755
--- a/utils/hooks/pre-push
+++ b/utils/hooks/pre-push
@@ -68,7 +68,7 @@ do
fi
# 2. The commit message shall have Signed-off-by lines
- # corresponding the committer and the author.
+ # corresponding the committer, author, and all co-developers.
committer=$(echo "$msg" | grep '^committer ' | head -1 | \
cut -d ' ' -f 2- | rev | cut -d ' ' -f 3- | rev)
if ! echo -E "$msg" | grep -F -q "Signed-off-by: ${committer}"
@@ -85,6 +85,15 @@ do
errors=$((errors+1))
fi
+ while read -r codev
+ do
+ if ! echo -E "$msg" | grep -F -q "Signed-off-by: ${codev}"
+ then
+ echo >&2 "Missing co-developer '${codev}' Signed-off-by in commit $commit"
+ errors=$((errors+1))
+ fi
+ done < <(echo "$msg" | grep '^Co-developed-by: ' | cut -d ' ' -f 2-)
+
# 3. A Reviewed-by or Acked-by is required.
if ! echo -E "$msg" | grep -q '^\(Reviewed\|Acked\)-by: '
then
diff --git a/utils/ipu3/ipu3-capture.sh b/utils/ipu3/ipu3-capture.sh
index 9294d025..004a92b0 100755
--- a/utils/ipu3/ipu3-capture.sh
+++ b/utils/ipu3/ipu3-capture.sh
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# ipu3-capture.sh - Capture raw frames from cameras based on the Intel IPU3
+# Capture raw frames from cameras based on the Intel IPU3
#
# The scripts makes use of the following tools, which are expected to be
# executable from the system-wide path or from the local directory:
diff --git a/utils/ipu3/ipu3-pack.c b/utils/ipu3/ipu3-pack.c
index decbfc6c..23d2db8b 100644
--- a/utils/ipu3/ipu3-pack.c
+++ b/utils/ipu3/ipu3-pack.c
@@ -8,6 +8,7 @@
#include <errno.h>
#include <fcntl.h>
+#include <libgen.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
@@ -15,9 +16,8 @@
#include <sys/types.h>
#include <unistd.h>
-static void usage(const char *argv0)
+static void usage(char *argv0)
{
-
printf("Usage: %s input-file output-file\n", basename(argv0));
printf("Convert unpacked RAW10 Bayer data to the IPU3 packed Bayer formats\n");
printf("If the output-file '-', output data will be written to standard output\n");
diff --git a/utils/ipu3/ipu3-process.sh b/utils/ipu3/ipu3-process.sh
index bb4abbe8..25bc849f 100755
--- a/utils/ipu3/ipu3-process.sh
+++ b/utils/ipu3/ipu3-process.sh
@@ -4,7 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# ipu3-process.sh - Process raw frames with the Intel IPU3
+# Process raw frames with the Intel IPU3
#
# The scripts makes use of the following tools, which are expected to be
# found in $PATH:
diff --git a/utils/ipu3/ipu3-unpack.c b/utils/ipu3/ipu3-unpack.c
index c96fafed..6ee8c45a 100644
--- a/utils/ipu3/ipu3-unpack.c
+++ b/utils/ipu3/ipu3-unpack.c
@@ -8,6 +8,7 @@
#include <errno.h>
#include <fcntl.h>
+#include <libgen.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
@@ -15,7 +16,7 @@
#include <sys/types.h>
#include <unistd.h>
-static void usage(const char *argv0)
+static void usage(char *argv0)
{
printf("Usage: %s input-file output-file\n", basename(argv0));
printf("Unpack the IPU3 raw Bayer format to 16-bit Bayer\n");
diff --git a/utils/meson.build b/utils/meson.build
index 8e28ada7..95d657ac 100644
--- a/utils/meson.build
+++ b/utils/meson.build
@@ -1,15 +1,7 @@
# SPDX-License-Identifier: CC0-1.0
-subdir('ipc')
+subdir('codegen')
subdir('ipu3')
-subdir('tracepoints')
-
-## Code generation
-py_modules += ['yaml']
-gen_controls = files('gen-controls.py')
-gen_formats = files('gen-formats.py')
-gen_header = files('gen-header.sh')
## Module signing
gen_ipa_priv_key = files('gen-ipa-priv-key.sh')
-gen_ipa_pub_key = files('gen-ipa-pub-key.py')
diff --git a/utils/raspberrypi/ctt/alsc_only.py b/utils/raspberrypi/ctt/alsc_only.py
index 7cd0ac01..a521c4ad 100755
--- a/utils/raspberrypi/ctt/alsc_only.py
+++ b/utils/raspberrypi/ctt/alsc_only.py
@@ -2,12 +2,14 @@
#
# SPDX-License-Identifier: BSD-2-Clause
#
-# Copyright (C) 2022, Raspberry Pi (Trading) Limited
+# Copyright (C) 2022, Raspberry Pi Ltd
#
-# alsc_only.py - alsc tuning tool
+# alsc tuning tool
-from ctt import *
+import sys
+from ctt import *
+from ctt_tools import parse_input
if __name__ == '__main__':
"""
@@ -15,13 +17,14 @@ if __name__ == '__main__':
"""
if len(sys.argv) == 1:
print("""
- Pisp Camera Tuning Tool version 1.0
+ PiSP Lens Shading Camera Tuning Tool version 1.0
Required Arguments:
'-i' : Calibration image directory.
'-o' : Name of output json file.
Optional Arguments:
+ '-t' : Target platform - 'pisp' or 'vc4'. Default 'vc4'
'-c' : Config file for the CTT. If not passed, default parameters used.
'-l' : Name of output log file. If not passed, 'ctt_log.txt' used.
""")
@@ -30,5 +33,10 @@ if __name__ == '__main__':
"""
parse input arguments
"""
- json_output, directory, config, log_output = parse_input()
- run_ctt(json_output, directory, config, log_output, alsc_only=True)
+ json_output, directory, config, log_output, target = parse_input()
+ if target == 'pisp':
+ from ctt_pisp import json_template, grid_size
+ elif target == 'vc4':
+ from ctt_vc4 import json_template, grid_size
+
+ run_ctt(json_output, directory, config, log_output, json_template, grid_size, target, alsc_only=True)
diff --git a/utils/raspberrypi/ctt/cac_only.py b/utils/raspberrypi/ctt/cac_only.py
new file mode 100644
index 00000000..1c0a8193
--- /dev/null
+++ b/utils/raspberrypi/ctt/cac_only.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2023, Raspberry Pi (Trading) Ltd.
+#
+# cac_only.py - cac tuning tool
+
+
+# This file allows you to tune only the chromatic aberration correction
+# Specify any number of files in the command line args, and it shall iterate through
+# and generate an averaged cac table from all the input images, which you can then
+# input into your tuning file.
+
+# Takes .dng files produced by the camera modules of the dots grid and calculates the chromatic abberation of each dot.
+# Then takes each dot, and works out where it was in the image, and uses that to output a tables of the shifts
+# across the whole image.
+
+from PIL import Image
+import numpy as np
+import rawpy
+import sys
+import getopt
+
+from ctt_cac import *
+
+
+def cac(filelist, output_filepath, plot_results=False):
+ np.set_printoptions(precision=3)
+ np.set_printoptions(suppress=True)
+
+ # Create arrays to hold all the dots data and their colour offsets
+ red_shift = [] # Format is: [[Dot Center X, Dot Center Y, x shift, y shift]]
+ blue_shift = []
+ # Iterate through the files
+ # Multiple files is reccomended to average out the lens aberration through rotations
+ for file in filelist:
+ print("\n Processing file " + str(file))
+ # Read the raw RGB values from the .dng file
+ with rawpy.imread(file) as raw:
+ rgb = raw.postprocess()
+ sizes = (raw.sizes)
+
+ image_size = [sizes[2], sizes[3]] # Image size, X, Y
+ # Create a colour copy of the RGB values to use later in the calibration
+ imout = Image.new(mode="RGB", size=image_size)
+ rgb_image = np.array(imout)
+ # The rgb values need reshaping from a 1d array to a 3d array to be worked with easily
+ rgb.reshape((image_size[0], image_size[1], 3))
+ rgb_image = rgb
+
+ # Pass the RGB image through to the dots locating program
+ # Returns an array of the dots (colour rectangles around the dots), and an array of their locations
+ print("Finding dots")
+ dots, dots_locations = find_dots_locations(rgb_image)
+
+ # Now, analyse each dot. Work out the centroid of each colour channel, and use that to work out
+ # by how far the chromatic aberration has shifted each channel
+ print('Dots found: ' + str(len(dots)))
+
+ for dot, dot_location in zip(dots, dots_locations):
+ if len(dot) > 0:
+ if (dot_location[0] > 0) and (dot_location[1] > 0):
+ ret = analyse_dot(dot, dot_location)
+ red_shift.append(ret[0])
+ blue_shift.append(ret[1])
+
+ # Take our arrays of red shifts and locations, push them through to be interpolated into a 9x9 matrix
+ # for the CAC block to handle and then store these as a .json file to be added to the camera
+ # tuning file
+ print("\nCreating output grid")
+ rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size)
+
+ print("CAC correction complete!")
+
+ # The json format that we then paste into the tuning file (manually)
+ sample = '''
+ {
+ "rpi.cac" :
+ {
+ "strength": 1.0,
+ "lut_rx" : [
+ rx_vals
+ ],
+ "lut_ry" : [
+ ry_vals
+ ],
+ "lut_bx" : [
+ bx_vals
+ ],
+ "lut_by" : [
+ by_vals
+ ]
+ }
+ }
+ '''
+
+ # Below, may look incorrect, however, the PiSP (standard) dimensions are flipped in comparison to
+ # PIL image coordinate directions, hence why xr -> yr. Also, the shifts calculated are colour shifts,
+ # and the PiSP block asks for the values it should shift (hence the * -1, to convert from colour shift to a pixel shift)
+ sample = sample.replace("rx_vals", pprint_array(ry * -1))
+ sample = sample.replace("ry_vals", pprint_array(rx * -1))
+ sample = sample.replace("bx_vals", pprint_array(by * -1))
+ sample = sample.replace("by_vals", pprint_array(bx * -1))
+ print("Successfully converted to JSON")
+ f = open(str(output_filepath), "w+")
+ f.write(sample)
+ f.close()
+ print("Successfully written to json file")
+ '''
+ If you wish to see a plot of the colour channel shifts, add the -p or --plots option
+ Can be a quick way of validating if the data/dots you've got are good, or if you need to
+ change some parameters/take some better images
+ '''
+ if plot_results:
+ plot_shifts(red_shift, blue_shift)
+
+
+if __name__ == "__main__":
+ argv = sys.argv
+ # Detect the input and output file paths
+ arg_output = "output.json"
+ arg_help = "{0} -i <input> -o <output> -p <plot results>".format(argv[0])
+ opts, args = getopt.getopt(argv[1:], "hi:o:p", ["help", "input=", "output=", "plot"])
+
+ output_location = 0
+ input_location = 0
+ filelist = []
+ plot_results = False
+ for i in range(len(argv)):
+ if ("-h") in argv[i]:
+ print(arg_help) # print the help message
+ sys.exit(2)
+ if "-o" in argv[i]:
+ output_location = i
+ if ".dng" in argv[i]:
+ filelist.append(argv[i])
+ if "-p" in argv[i]:
+ plot_results = True
+
+ arg_output = argv[output_location + 1]
+ cac(filelist, arg_output, plot_results)
diff --git a/utils/raspberrypi/ctt/colors.py b/utils/raspberrypi/ctt/colors.py
index 1ab986d6..cb4d236b 100644
--- a/utils/raspberrypi/ctt/colors.py
+++ b/utils/raspberrypi/ctt/colors.py
@@ -1,4 +1,4 @@
-# colors.py - Program to convert from RGB to LAB color space
+# Program to convert from RGB to LAB color space
def RGB_to_LAB(RGB): # where RGB is a 1x3 array. e.g RGB = [100, 255, 230]
num = 0
XYZ = [0, 0, 0]
diff --git a/utils/raspberrypi/ctt/convert_tuning.py b/utils/raspberrypi/ctt/convert_tuning.py
index f4504d45..83cf69d4 100755
--- a/utils/raspberrypi/ctt/convert_tuning.py
+++ b/utils/raspberrypi/ctt/convert_tuning.py
@@ -8,30 +8,104 @@
import argparse
import json
+import numpy as np
import sys
from ctt_pretty_print_json import pretty_print
+from ctt_pisp import grid_size as grid_size_pisp
+from ctt_pisp import json_template as json_template_pisp
+from ctt_vc4 import grid_size as grid_size_vc4
+from ctt_vc4 import json_template as json_template_vc4
-def convert_v2(in_json: dict) -> str:
+def interp_2d(in_ls, src_w, src_h, dst_w, dst_h):
- if 'version' in in_json.keys() and in_json['version'] != 1.0:
- print(f'The JSON config reports version {in_json["version"]} that is incompatible with this tool.')
- sys.exit(-1)
+ out_ls = np.zeros((dst_h, dst_w))
+ for i in range(src_h):
+ out_ls[i] = np.interp(np.linspace(0, dst_w - 1, dst_w),
+ np.linspace(0, dst_w - 1, src_w),
+ in_ls[i])
+ for i in range(dst_w):
+ out_ls[:,i] = np.interp(np.linspace(0, dst_h - 1, dst_h),
+ np.linspace(0, dst_h - 1, src_h),
+ out_ls[:src_h, i])
+ return out_ls
- converted = {
- 'version': 2.0,
- 'target': 'bcm2835',
- 'algorithms': [{algo: config} for algo, config in in_json.items()]
- }
- return pretty_print(converted)
+def convert_target(in_json: dict, target: str):
+
+ src_w, src_h = grid_size_pisp if target == 'vc4' else grid_size_vc4
+ dst_w, dst_h = grid_size_vc4 if target == 'vc4' else grid_size_pisp
+ json_template = json_template_vc4 if target == 'vc4' else json_template_pisp
+
+ # ALSC grid sizes
+ alsc = next(algo for algo in in_json['algorithms'] if 'rpi.alsc' in algo)['rpi.alsc']
+ for colour in ['calibrations_Cr', 'calibrations_Cb']:
+ if colour not in alsc:
+ continue
+ for temperature in alsc[colour]:
+ in_ls = np.reshape(temperature['table'], (src_h, src_w))
+ out_ls = interp_2d(in_ls, src_w, src_h, dst_w, dst_h)
+ temperature['table'] = np.round(out_ls.flatten(), 3).tolist()
+
+ if 'luminance_lut' in alsc:
+ in_ls = np.reshape(alsc['luminance_lut'], (src_h, src_w))
+ out_ls = interp_2d(in_ls, src_w, src_h, dst_w, dst_h)
+ alsc['luminance_lut'] = np.round(out_ls.flatten(), 3).tolist()
+
+ # Denoise blocks
+ for i, algo in enumerate(in_json['algorithms']):
+ if list(algo.keys())[0] == 'rpi.sdn':
+ in_json['algorithms'][i] = {'rpi.denoise': json_template['rpi.sdn'] if target == 'vc4' else json_template['rpi.denoise']}
+ break
+
+ # AGC mode weights
+ agc = next(algo for algo in in_json['algorithms'] if 'rpi.agc' in algo)['rpi.agc']
+ if 'channels' in agc:
+ for i, channel in enumerate(agc['channels']):
+ target_agc_metering = json_template['rpi.agc']['channels'][i]['metering_modes']
+ for mode, v in channel['metering_modes'].items():
+ v['weights'] = target_agc_metering[mode]['weights']
+ else:
+ for mode, v in agc["metering_modes"].items():
+ target_agc_metering = json_template['rpi.agc']['channels'][0]['metering_modes']
+ v['weights'] = target_agc_metering[mode]['weights']
+
+ # HDR
+ if target == 'pisp':
+ for i, algo in enumerate(in_json['algorithms']):
+ if list(algo.keys())[0] == 'rpi.hdr':
+ in_json['algorithms'][i] = {'rpi.hdr': json_template['rpi.hdr']}
+
+ return in_json
+
+
+def convert_v2(in_json: dict, target: str) -> str:
+
+ if 'version' in in_json.keys() and in_json['version'] == 1.0:
+ converted = {
+ 'version': 2.0,
+ 'target': target,
+ 'algorithms': [{algo: config} for algo, config in in_json.items()]
+ }
+ else:
+ converted = in_json
+
+ # Convert between vc4 <-> pisp targets. This is a best effort thing.
+ if converted['target'] != target:
+ converted = convert_target(converted, target)
+ converted['target'] = target
+
+ grid_size = grid_size_vc4[0] if target == 'vc4' else grid_size_pisp[0]
+ return pretty_print(converted, custom_elems={'table': grid_size, 'luminance_lut': grid_size})
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=
- 'Convert the format of the Raspberry Pi camera tuning file from v1.0 to v2.0.\n')
+ 'Convert the format of the Raspberry Pi camera tuning file from v1.0 to v2.0 and/or the vc4 <-> pisp targets.\n')
parser.add_argument('input', type=str, help='Input tuning file.')
+ parser.add_argument('-t', '--target', type=str, help='Target platform.',
+ choices=['pisp', 'vc4'], default='vc4')
parser.add_argument('output', type=str, nargs='?',
help='Output converted tuning file. If not provided, the input file will be updated in-place.',
default=None)
@@ -40,7 +114,7 @@ if __name__ == "__main__":
with open(args.input, 'r') as f:
in_json = json.load(f)
- out_json = convert_v2(in_json)
+ out_json = convert_v2(in_json, args.target)
with open(args.output if args.output is not None else args.input, 'w') as f:
f.write(out_json)
diff --git a/utils/raspberrypi/ctt/ctt.py b/utils/raspberrypi/ctt/ctt.py
index cd89f177..96f1b5e6 100755
--- a/utils/raspberrypi/ctt/ctt.py
+++ b/utils/raspberrypi/ctt/ctt.py
@@ -4,11 +4,12 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt.py - camera tuning tool
+# camera tuning tool
import os
import sys
from ctt_image_load import *
+from ctt_cac import *
from ctt_ccm import *
from ctt_awb import *
from ctt_alsc import *
@@ -22,9 +23,10 @@ import re
"""
This file houses the camera object, which is used to perform the calibrations.
-The camera object houses all the calibration images as attributes in two lists:
+The camera object houses all the calibration images as attributes in three lists:
- imgs (macbeth charts)
- imgs_alsc (alsc correction images)
+ - imgs_cac (cac correction images)
Various calibrations are methods of the camera object, and the output is stored
in a dictionary called self.json.
Once all the caibration has been completed, the Camera.json is written into a
@@ -67,139 +69,26 @@ Camera object that is the backbone of the tuning tool.
Input is the desired path of the output json.
"""
class Camera:
- def __init__(self, jfile):
+ def __init__(self, jfile, json):
self.path = os.path.dirname(os.path.expanduser(__file__)) + '/'
if self.path == '/':
self.path = ''
self.imgs = []
self.imgs_alsc = []
+ self.imgs_cac = []
self.log = 'Log created : ' + time.asctime(time.localtime(time.time()))
self.log_separator = '\n'+'-'*70+'\n'
self.jf = jfile
"""
initial json dict populated by uncalibrated values
"""
- self.json = {
- "rpi.black_level": {
- "black_level": 4096
- },
- "rpi.dpc": {
- },
- "rpi.lux": {
- "reference_shutter_speed": 10000,
- "reference_gain": 1,
- "reference_aperture": 1.0
- },
- "rpi.noise": {
- },
- "rpi.geq": {
- },
- "rpi.sdn": {
- },
- "rpi.awb": {
- "priors": [
- {"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]},
- {"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]},
- {"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
- ],
- "modes": {
- "auto": {"lo": 2500, "hi": 8000},
- "incandescent": {"lo": 2500, "hi": 3000},
- "tungsten": {"lo": 3000, "hi": 3500},
- "fluorescent": {"lo": 4000, "hi": 4700},
- "indoor": {"lo": 3000, "hi": 5000},
- "daylight": {"lo": 5500, "hi": 6500},
- "cloudy": {"lo": 7000, "hi": 8600}
- },
- "bayes": 1
- },
- "rpi.agc": {
- "metering_modes": {
- "centre-weighted": {
- "weights": [3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0]
- },
- "spot": {
- "weights": [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
- },
- "matrix": {
- "weights": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
- }
- },
- "exposure_modes": {
- "normal": {
- "shutter": [100, 10000, 30000, 60000, 120000],
- "gain": [1.0, 2.0, 4.0, 6.0, 6.0]
- },
- "short": {
- "shutter": [100, 5000, 10000, 20000, 120000],
- "gain": [1.0, 2.0, 4.0, 6.0, 6.0]
- }
- },
- "constraint_modes": {
- "normal": [
- {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]}
- ],
- "highlight": [
- {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]},
- {"bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.8, 1000, 0.8]}
- ]
- },
- "y_target": [0, 0.16, 1000, 0.165, 10000, 0.17]
- },
- "rpi.alsc": {
- 'omega': 1.3,
- 'n_iter': 100,
- 'luminance_strength': 0.7,
- },
- "rpi.contrast": {
- "ce_enable": 1,
- "gamma_curve": [
- 0, 0,
- 1024, 5040,
- 2048, 9338,
- 3072, 12356,
- 4096, 15312,
- 5120, 18051,
- 6144, 20790,
- 7168, 23193,
- 8192, 25744,
- 9216, 27942,
- 10240, 30035,
- 11264, 32005,
- 12288, 33975,
- 13312, 35815,
- 14336, 37600,
- 15360, 39168,
- 16384, 40642,
- 18432, 43379,
- 20480, 45749,
- 22528, 47753,
- 24576, 49621,
- 26624, 51253,
- 28672, 52698,
- 30720, 53796,
- 32768, 54876,
- 36864, 57012,
- 40960, 58656,
- 45056, 59954,
- 49152, 61183,
- 53248, 62355,
- 57344, 63419,
- 61440, 64476,
- 65535, 65535
- ]
- },
- "rpi.ccm": {
- },
- "rpi.sharpen": {
- }
- }
+ self.json = json
"""
Perform colour correction calibrations by comparing macbeth patch colours
to standard macbeth chart colours.
"""
- def ccm_cal(self, do_alsc_colour):
+ def ccm_cal(self, do_alsc_colour, grid_size):
if 'rpi.ccm' in self.disable:
return 1
print('\nStarting CCM calibration')
@@ -245,7 +134,7 @@ class Camera:
Do CCM calibration
"""
try:
- ccms = ccm(self, cal_cr_list, cal_cb_list)
+ ccms = ccm(self, cal_cr_list, cal_cb_list, grid_size)
except ArithmeticError:
print('ERROR: Matrix is singular!\nTake new pictures and try again...')
self.log += '\nERROR: Singular matrix encountered during fit!'
@@ -259,11 +148,67 @@ class Camera:
print('Finished CCM calibration')
"""
+ Perform chromatic abberation correction using multiple dots images.
+ """
+ def cac_cal(self, do_alsc_colour):
+ if 'rpi.cac' in self.disable:
+ return 1
+ print('\nStarting CAC calibration')
+ self.log_new_sec('CAC')
+ """
+ check if cac images have been taken
+ """
+ if len(self.imgs_cac) == 0:
+ print('\nError:\nNo cac calibration images found')
+ self.log += '\nERROR: No CAC calibration images found!'
+ self.log += '\nCAC calibration aborted!'
+ return 1
+ """
+ if image is greyscale then CAC makes no sense
+ """
+ if self.grey:
+ print('\nERROR: Can\'t do CAC on greyscale image!')
+ self.log += '\nERROR: Cannot perform CAC calibration '
+ self.log += 'on greyscale image!\nCAC aborted!'
+ del self.json['rpi.cac']
+ return 0
+ a = time.time()
+ """
+ Check if camera is greyscale or color. If not greyscale, then perform cac
+ """
+ if do_alsc_colour:
+ """
+ Here we have a color sensor. Perform cac
+ """
+ try:
+ cacs = cac(self)
+ except ArithmeticError:
+ print('ERROR: Matrix is singular!\nTake new pictures and try again...')
+ self.log += '\nERROR: Singular matrix encountered during fit!'
+ self.log += '\nCAC aborted!'
+ return 1
+ else:
+ """
+ case where config options suggest greyscale camera. No point in doing CAC
+ """
+ cal_cr_list, cal_cb_list = None, None
+ self.log += '\nWARNING: No ALSC tables found.\nCAC calibration '
+ self.log += 'performed without ALSC correction...'
+
+ """
+ Write output to json
+ """
+ self.json['rpi.cac']['cac'] = cacs
+ self.log += '\nCAC calibration written to json file'
+ print('Finished CAC calibration')
+
+
+ """
Auto white balance calibration produces a colour curve for
various colour temperatures, as well as providing a maximum 'wiggle room'
distance from this curve (transverse_neg/pos).
"""
- def awb_cal(self, greyworld, do_alsc_colour):
+ def awb_cal(self, greyworld, do_alsc_colour, grid_size):
if 'rpi.awb' in self.disable:
return 1
print('\nStarting AWB calibration')
@@ -306,7 +251,7 @@ class Camera:
call calibration function
"""
plot = "rpi.awb" in self.plot
- awb_out = awb(self, cal_cr_list, cal_cb_list, plot)
+ awb_out = awb(self, cal_cr_list, cal_cb_list, plot, grid_size)
ct_curve, transverse_neg, transverse_pos = awb_out
"""
write output to json
@@ -324,7 +269,7 @@ class Camera:
colour channel seperately, and then partially corrects for vignetting.
The extent of the correction depends on the 'luminance_strength' parameter.
"""
- def alsc_cal(self, luminance_strength, do_alsc_colour):
+ def alsc_cal(self, luminance_strength, do_alsc_colour, grid_size, max_gain=8.0):
if 'rpi.alsc' in self.disable:
return 1
print('\nStarting ALSC calibration')
@@ -347,10 +292,10 @@ class Camera:
call calibration function
"""
plot = "rpi.alsc" in self.plot
- alsc_out = alsc_all(self, do_alsc_colour, plot)
+ alsc_out = alsc_all(self, do_alsc_colour, plot, grid_size, max_gain=max_gain)
cal_cr_list, cal_cb_list, luminance_lut, av_corn = alsc_out
"""
- write ouput to json and finish if not do_alsc_colour
+ write output to json and finish if not do_alsc_colour
"""
if not do_alsc_colour:
self.json['rpi.alsc']['luminance_lut'] = luminance_lut
@@ -393,7 +338,7 @@ class Camera:
"""
obtain worst-case scenario residual sigmas
"""
- sigma_r, sigma_b = get_sigma(self, cal_cr_list, cal_cb_list)
+ sigma_r, sigma_b = get_sigma(self, cal_cr_list, cal_cb_list, grid_size)
"""
write output to json
"""
@@ -509,19 +454,20 @@ class Camera:
"""
writes the json dictionary to the raw json file then make pretty
"""
- def write_json(self):
+ def write_json(self, version=2.0, target='bcm2835', grid_size=(16, 12)):
"""
Write json dictionary to file using our version 2 format
"""
out_json = {
- "version": 2.0,
- 'target': 'bcm2835',
+ "version": version,
+ 'target': target if target != 'vc4' else 'bcm2835',
"algorithms": [{name: data} for name, data in self.json.items()],
}
with open(self.jf, 'w') as f:
- f.write(pretty_print(out_json))
+ f.write(pretty_print(out_json,
+ custom_elems={'table': grid_size[0], 'luminance_lut': grid_size[0]}))
"""
add a new section to the log file
@@ -627,6 +573,16 @@ class Camera:
self.log += '\nWARNING: Error reading colour temperature'
self.log += '\nImage discarded!'
print('DISCARDED')
+ elif 'cac' in filename:
+ Img = load_image(self, address, mac=False)
+ self.log += '\nIdentified as an CAC image'
+ Img.name = filename
+ self.log += '\nColour temperature: {} K'.format(col)
+ self.imgs_cac.append(Img)
+ if blacklevel != -1:
+ Img.blacklevel_16 = blacklevel
+ print(img_suc_msg)
+ continue
else:
self.log += '\nIdentified as macbeth chart image'
"""
@@ -672,6 +628,7 @@ class Camera:
self.log += '\n\nImages found:'
self.log += '\nMacbeth : {}'.format(len(self.imgs))
self.log += '\nALSC : {} '.format(len(self.imgs_alsc))
+ self.log += '\nCAC: {} '.format(len(self.imgs_cac))
self.log += '\n\nCamera metadata'
"""
check usable images found
@@ -680,22 +637,21 @@ class Camera:
print('\nERROR: No usable macbeth chart images found')
self.log += '\nERROR: No usable macbeth chart images found'
return 0
- elif len(self.imgs) == 0 and len(self.imgs_alsc) == 0:
+ elif len(self.imgs) == 0 and len(self.imgs_alsc) == 0 and len(self.imgs_cac) == 0:
print('\nERROR: No usable images found')
self.log += '\nERROR: No usable images found'
return 0
"""
Double check that every image has come from the same camera...
"""
- all_imgs = self.imgs + self.imgs_alsc
+ all_imgs = self.imgs + self.imgs_alsc + self.imgs_cac
camNames = list(set([Img.camName for Img in all_imgs]))
patterns = list(set([Img.pattern for Img in all_imgs]))
sigbitss = list(set([Img.sigbits for Img in all_imgs]))
blacklevels = list(set([Img.blacklevel_16 for Img in all_imgs]))
sizes = list(set([(Img.w, Img.h) for Img in all_imgs]))
- if len(camNames) == 1 and len(patterns) == 1 and len(sigbitss) == 1 and \
- len(blacklevels) == 1 and len(sizes) == 1:
+ if 1:
self.grey = (patterns[0] == 128)
self.blacklevel_16 = blacklevels[0]
self.log += '\nName: {}'.format(camNames[0])
@@ -712,7 +668,7 @@ class Camera:
return 0
-def run_ctt(json_output, directory, config, log_output, alsc_only=False):
+def run_ctt(json_output, directory, config, log_output, json_template, grid_size, target, alsc_only=False):
"""
check input files are jsons
"""
@@ -748,12 +704,14 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False):
greyworld = get_config(awb_d, "greyworld", 0, 'bool')
alsc_d = get_config(configs, "alsc", {}, 'dict')
do_alsc_colour = get_config(alsc_d, "do_alsc_colour", 1, 'bool')
- luminance_strength = get_config(alsc_d, "luminance_strength", 0.5, 'num')
+ luminance_strength = get_config(alsc_d, "luminance_strength", 0.8, 'num')
+ lsc_max_gain = get_config(alsc_d, "max_gain", 8.0, 'num')
blacklevel = get_config(configs, "blacklevel", -1, 'num')
macbeth_d = get_config(configs, "macbeth", {}, 'dict')
mac_small = get_config(macbeth_d, "small", 0, 'bool')
mac_show = get_config(macbeth_d, "show", 0, 'bool')
mac_config = (mac_small, mac_show)
+ print("Read lsc_max_gain", lsc_max_gain)
if blacklevel < -1 or blacklevel >= 2**16:
print('\nInvalid blacklevel, defaulted to 64')
@@ -772,7 +730,7 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False):
initialise tuning tool and load images
"""
try:
- Cam = Camera(json_output)
+ Cam = Camera(json_output, json=json_template)
Cam.log_user_input(json_output, directory, config, log_output)
if alsc_only:
disable = set(Cam.json.keys()).symmetric_difference({"rpi.alsc"})
@@ -794,14 +752,17 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False):
Cam.json['rpi.black_level']['black_level'] = Cam.blacklevel_16
Cam.json_remove(disable)
print('\nSTARTING CALIBRATIONS')
- Cam.alsc_cal(luminance_strength, do_alsc_colour)
+ Cam.alsc_cal(luminance_strength, do_alsc_colour, grid_size, max_gain=lsc_max_gain)
Cam.geq_cal()
Cam.lux_cal()
Cam.noise_cal()
- Cam.awb_cal(greyworld, do_alsc_colour)
- Cam.ccm_cal(do_alsc_colour)
+ if "rpi.cac" in json_template:
+ Cam.cac_cal(do_alsc_colour)
+ Cam.awb_cal(greyworld, do_alsc_colour, grid_size)
+ Cam.ccm_cal(do_alsc_colour, grid_size)
+
print('\nFINISHED CALIBRATIONS')
- Cam.write_json()
+ Cam.write_json(target=target, grid_size=grid_size)
Cam.write_log(log_output)
print('\nCalibrations written to: '+json_output)
if log_output is None:
@@ -811,20 +772,19 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False):
else:
Cam.write_log(log_output)
-
if __name__ == '__main__':
"""
initialise calibration
"""
if len(sys.argv) == 1:
print("""
- Pisp Camera Tuning Tool version 1.0
-
+ PiSP Tuning Tool version 1.0
Required Arguments:
'-i' : Calibration image directory.
'-o' : Name of output json file.
Optional Arguments:
+ '-t' : Target platform - 'pisp' or 'vc4'. Default 'vc4'
'-c' : Config file for the CTT. If not passed, default parameters used.
'-l' : Name of output log file. If not passed, 'ctt_log.txt' used.
""")
@@ -833,5 +793,10 @@ if __name__ == '__main__':
"""
parse input arguments
"""
- json_output, directory, config, log_output = parse_input()
- run_ctt(json_output, directory, config, log_output)
+ json_output, directory, config, log_output, target = parse_input()
+ if target == 'pisp':
+ from ctt_pisp import json_template, grid_size
+ elif target == 'vc4':
+ from ctt_vc4 import json_template, grid_size
+
+ run_ctt(json_output, directory, config, log_output, json_template, grid_size, target)
diff --git a/utils/raspberrypi/ctt/ctt_alsc.py b/utils/raspberrypi/ctt/ctt_alsc.py
index e51d6931..1d94dfa5 100644
--- a/utils/raspberrypi/ctt/ctt_alsc.py
+++ b/utils/raspberrypi/ctt/ctt_alsc.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_alsc.py - camera tuning tool for ALSC (auto lens shading correction)
+# camera tuning tool for ALSC (auto lens shading correction)
from ctt_image_load import *
import matplotlib.pyplot as plt
@@ -13,8 +13,9 @@ from mpl_toolkits.mplot3d import Axes3D
"""
preform alsc calibration on a set of images
"""
-def alsc_all(Cam, do_alsc_colour, plot):
+def alsc_all(Cam, do_alsc_colour, plot, grid_size=(16, 12), max_gain=8.0):
imgs_alsc = Cam.imgs_alsc
+ grid_w, grid_h = grid_size
"""
create list of colour temperatures and associated calibration tables
"""
@@ -23,7 +24,7 @@ def alsc_all(Cam, do_alsc_colour, plot):
list_cb = []
list_cg = []
for Img in imgs_alsc:
- col, cr, cb, cg, size = alsc(Cam, Img, do_alsc_colour, plot)
+ col, cr, cb, cg, size = alsc(Cam, Img, do_alsc_colour, plot, grid_size=grid_size, max_gain=max_gain)
list_col.append(col)
list_cr.append(cr)
list_cb.append(cb)
@@ -68,11 +69,12 @@ def alsc_all(Cam, do_alsc_colour, plot):
t_b = np.where((100*t_b) % 1 >= 0.95, t_b-0.001, t_b)
t_r = np.round(t_r, 3)
t_b = np.round(t_b, 3)
- r_corners = (t_r[0], t_r[15], t_r[-1], t_r[-16])
- b_corners = (t_b[0], t_b[15], t_b[-1], t_b[-16])
- r_cen = t_r[5*16+7]+t_r[5*16+8]+t_r[6*16+7]+t_r[6*16+8]
+ r_corners = (t_r[0], t_r[grid_w - 1], t_r[-1], t_r[-grid_w])
+ b_corners = (t_b[0], t_b[grid_w - 1], t_b[-1], t_b[-grid_w])
+ middle_pos = (grid_h // 2 - 1) * grid_w + grid_w - 1
+ r_cen = t_r[middle_pos]+t_r[middle_pos + 1]+t_r[middle_pos + grid_w]+t_r[middle_pos + grid_w + 1]
r_cen = round(r_cen/4, 3)
- b_cen = t_b[5*16+7]+t_b[5*16+8]+t_b[6*16+7]+t_b[6*16+8]
+ b_cen = t_b[middle_pos]+t_b[middle_pos + 1]+t_b[middle_pos + grid_w]+t_b[middle_pos + grid_w + 1]
b_cen = round(b_cen/4, 3)
Cam.log += '\nRed table corners: {}'.format(r_corners)
Cam.log += '\nRed table centre: {}'.format(r_cen)
@@ -116,8 +118,9 @@ def alsc_all(Cam, do_alsc_colour, plot):
"""
calculate g/r and g/b for 32x32 points arranged in a grid for a single image
"""
-def alsc(Cam, Img, do_alsc_colour, plot=False):
+def alsc(Cam, Img, do_alsc_colour, plot=False, grid_size=(16, 12), max_gain=8.0):
Cam.log += '\nProcessing image: ' + Img.name
+ grid_w, grid_h = grid_size
"""
get channel in correct order
"""
@@ -128,31 +131,34 @@ def alsc(Cam, Img, do_alsc_colour, plot=False):
where w is a multiple of 32.
"""
w, h = Img.w/2, Img.h/2
- dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
+ dx, dy = int(-(-(w-1)//grid_w)), int(-(-(h-1)//grid_h))
"""
average the green channels into one
"""
av_ch_g = np.mean((channels[1:3]), axis=0)
if do_alsc_colour:
"""
- obtain 16x12 grid of intensities for each channel and subtract black level
+ obtain grid_w x grid_h grid of intensities for each channel and subtract black level
"""
- g = get_16x12_grid(av_ch_g, dx, dy) - Img.blacklevel_16
- r = get_16x12_grid(channels[0], dx, dy) - Img.blacklevel_16
- b = get_16x12_grid(channels[3], dx, dy) - Img.blacklevel_16
+ g = get_grid(av_ch_g, dx, dy, grid_size) - Img.blacklevel_16
+ r = get_grid(channels[0], dx, dy, grid_size) - Img.blacklevel_16
+ b = get_grid(channels[3], dx, dy, grid_size) - Img.blacklevel_16
"""
calculate ratios as 32 bit in order to be supported by medianBlur function
"""
- cr = np.reshape(g/r, (12, 16)).astype('float32')
- cb = np.reshape(g/b, (12, 16)).astype('float32')
- cg = np.reshape(1/g, (12, 16)).astype('float32')
+ cr = np.reshape(g/r, (grid_h, grid_w)).astype('float32')
+ cb = np.reshape(g/b, (grid_h, grid_w)).astype('float32')
+ cg = np.reshape(1/g, (grid_h, grid_w)).astype('float32')
"""
median blur to remove peaks and save as float 64
"""
cr = cv2.medianBlur(cr, 3).astype('float64')
+ cr = cr/np.min(cr) # gain tables are easier for humans to read if the minimum is 1.0
cb = cv2.medianBlur(cb, 3).astype('float64')
+ cb = cb/np.min(cb)
cg = cv2.medianBlur(cg, 3).astype('float64')
cg = cg/np.min(cg)
+ cg = [min(v, max_gain) for v in cg.flatten()] # never exceed the max luminance gain
"""
debugging code showing 2D surface plot of vignetting. Quite useful for
@@ -164,7 +170,7 @@ def alsc(Cam, Img, do_alsc_colour, plot=False):
"""
note Y is plotted as -Y so plot has same axes as image
"""
- X, Y = np.meshgrid(range(16), range(12))
+ X, Y = np.meshgrid(range(grid_w), range(grid_h))
ha.plot_surface(X, -Y, cr, cmap=cm.coolwarm, linewidth=0)
ha.set_title('ALSC Plot\nImg: {}\n\ncr'.format(Img.str))
hb = hf.add_subplot(312, projection='3d')
@@ -176,21 +182,22 @@ def alsc(Cam, Img, do_alsc_colour, plot=False):
# print(Img.str)
plt.show()
- return Img.col, cr.flatten(), cb.flatten(), cg.flatten(), (w, h, dx, dy)
+ return Img.col, cr.flatten(), cb.flatten(), cg, (w, h, dx, dy)
else:
"""
only perform calculations for luminance shading
"""
- g = get_16x12_grid(av_ch_g, dx, dy) - Img.blacklevel_16
- cg = np.reshape(1/g, (12, 16)).astype('float32')
+ g = get_grid(av_ch_g, dx, dy, grid_size) - Img.blacklevel_16
+ cg = np.reshape(1/g, (grid_h, grid_w)).astype('float32')
cg = cv2.medianBlur(cg, 3).astype('float64')
cg = cg/np.min(cg)
+ cg = [min(v, max_gain) for v in cg.flatten()] # never exceed the max luminance gain
if plot:
hf = plt.figure(figssize=(8, 8))
ha = hf.add_subplot(1, 1, 1, projection='3d')
- X, Y = np.meashgrid(range(16), range(12))
+ X, Y = np.meashgrid(range(grid_w), range(grid_h))
ha.plot_surface(X, -Y, cg, cmap=cm.coolwarm, linewidth=0)
ha.set_title('ALSC Plot (Luminance only!)\nImg: {}\n\ncg').format(Img.str)
plt.show()
@@ -199,21 +206,22 @@ def alsc(Cam, Img, do_alsc_colour, plot=False):
"""
-Compresses channel down to a 16x12 grid
+Compresses channel down to a grid of the requested size
"""
-def get_16x12_grid(chan, dx, dy):
+def get_grid(chan, dx, dy, grid_size):
+ grid_w, grid_h = grid_size
grid = []
"""
since left and bottom border will not necessarily have rectangles of
dimension dx x dy, the 32nd iteration has to be handled separately.
"""
- for i in range(11):
- for j in range(15):
+ for i in range(grid_h - 1):
+ for j in range(grid_w - 1):
grid.append(np.mean(chan[dy*i:dy*(1+i), dx*j:dx*(1+j)]))
- grid.append(np.mean(chan[dy*i:dy*(1+i), 15*dx:]))
- for j in range(15):
- grid.append(np.mean(chan[11*dy:, dx*j:dx*(1+j)]))
- grid.append(np.mean(chan[11*dy:, 15*dx:]))
+ grid.append(np.mean(chan[dy*i:dy*(1+i), (grid_w - 1)*dx:]))
+ for j in range(grid_w - 1):
+ grid.append(np.mean(chan[(grid_h - 1)*dy:, dx*j:dx*(1+j)]))
+ grid.append(np.mean(chan[(grid_h - 1)*dy:, (grid_w - 1)*dx:]))
"""
return as np.array, ready for further manipulation
"""
@@ -223,7 +231,7 @@ def get_16x12_grid(chan, dx, dy):
"""
obtains sigmas for red and blue, effectively a measure of the 'error'
"""
-def get_sigma(Cam, cal_cr_list, cal_cb_list):
+def get_sigma(Cam, cal_cr_list, cal_cb_list, grid_size):
Cam.log += '\nCalculating sigmas'
"""
provided colour alsc tables were generated for two different colour
@@ -241,8 +249,8 @@ def get_sigma(Cam, cal_cr_list, cal_cb_list):
sigma_rs = []
sigma_bs = []
for i in range(len(cts)-1):
- sigma_rs.append(calc_sigma(cal_cr_list[i]['table'], cal_cr_list[i+1]['table']))
- sigma_bs.append(calc_sigma(cal_cb_list[i]['table'], cal_cb_list[i+1]['table']))
+ sigma_rs.append(calc_sigma(cal_cr_list[i]['table'], cal_cr_list[i+1]['table'], grid_size))
+ sigma_bs.append(calc_sigma(cal_cb_list[i]['table'], cal_cb_list[i+1]['table'], grid_size))
Cam.log += '\nColour temperature interval {} - {} K'.format(cts[i], cts[i+1])
Cam.log += '\nSigma red: {}'.format(sigma_rs[-1])
Cam.log += '\nSigma blue: {}'.format(sigma_bs[-1])
@@ -263,12 +271,13 @@ def get_sigma(Cam, cal_cr_list, cal_cb_list):
"""
calculate sigma from two adjacent gain tables
"""
-def calc_sigma(g1, g2):
+def calc_sigma(g1, g2, grid_size):
+ grid_w, grid_h = grid_size
"""
reshape into 16x12 matrix
"""
- g1 = np.reshape(g1, (12, 16))
- g2 = np.reshape(g2, (12, 16))
+ g1 = np.reshape(g1, (grid_h, grid_w))
+ g2 = np.reshape(g2, (grid_h, grid_w))
"""
apply gains to gain table
"""
@@ -280,8 +289,8 @@ def calc_sigma(g1, g2):
neighbours, then append to list
"""
diffs = []
- for i in range(10):
- for j in range(14):
+ for i in range(grid_h - 2):
+ for j in range(grid_w - 2):
"""
note indexing is incremented by 1 since all patches on borders are
not counted
diff --git a/utils/raspberrypi/ctt/ctt_awb.py b/utils/raspberrypi/ctt/ctt_awb.py
index bf45e54d..4af1fe41 100644
--- a/utils/raspberrypi/ctt/ctt_awb.py
+++ b/utils/raspberrypi/ctt/ctt_awb.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_awb.py - camera tuning tool for AWB
+# camera tuning tool for AWB
from ctt_image_load import *
import matplotlib.pyplot as plt
@@ -13,7 +13,7 @@ from scipy.optimize import fmin
"""
obtain piecewise linear approximation for colour curve
"""
-def awb(Cam, cal_cr_list, cal_cb_list, plot):
+def awb(Cam, cal_cr_list, cal_cb_list, plot, grid_size):
imgs = Cam.imgs
"""
condense alsc calibration tables into one dictionary
@@ -43,7 +43,7 @@ def awb(Cam, cal_cr_list, cal_cb_list, plot):
Note: if alsc is disabled then colour_cals will be set to None and the
function will just return the greyscale patches
"""
- r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals)
+ r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals, grid_size=grid_size)
"""
calculate ratio of r, b to g
"""
@@ -293,12 +293,13 @@ def awb(Cam, cal_cr_list, cal_cb_list, plot):
"""
obtain greyscale patches and perform alsc colour correction
"""
-def get_alsc_patches(Img, colour_cals, grey=True):
+def get_alsc_patches(Img, colour_cals, grey=True, grid_size=(16, 12)):
"""
get patch centre coordinates, image colour and the actual
patches for each channel, remembering to subtract blacklevel
If grey then only greyscale patches considered
"""
+ grid_w, grid_h = grid_size
if grey:
cen_coords = Img.cen_coords[3::4]
col = Img.col
@@ -345,12 +346,12 @@ def get_alsc_patches(Img, colour_cals, grey=True):
bef_tabs = np.array(colour_cals[bef])
aft_tabs = np.array(colour_cals[aft])
col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db)
- col_tabs = np.reshape(col_tabs, (2, 12, 16))
+ col_tabs = np.reshape(col_tabs, (2, grid_h, grid_w))
"""
calculate dx, dy used to calculate alsc table
"""
w, h = Img.w/2, Img.h/2
- dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
+ dx, dy = int(-(-(w-1)//grid_w)), int(-(-(h-1)//grid_h))
"""
make list of pairs of gains for each patch by selecting the correct value
in alsc colour calibration table
diff --git a/utils/raspberrypi/ctt/ctt_cac.py b/utils/raspberrypi/ctt/ctt_cac.py
new file mode 100644
index 00000000..5a4c5101
--- /dev/null
+++ b/utils/raspberrypi/ctt/ctt_cac.py
@@ -0,0 +1,228 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+# ctt_cac.py - CAC (Chromatic Aberration Correction) tuning tool
+
+from PIL import Image
+import numpy as np
+import matplotlib.pyplot as plt
+from matplotlib import cm
+
+from ctt_dots_locator import find_dots_locations
+
+
+# This is the wrapper file that creates a JSON entry for you to append
+# to your camera tuning file.
+# It calculates the chromatic aberration at different points throughout
+# the image and uses that to produce a martix that can then be used
+# in the camera tuning files to correct this aberration.
+
+
+def pprint_array(array):
+ # Function to print the array in a tidier format
+ array = array
+ output = ""
+ for i in range(len(array)):
+ for j in range(len(array[0])):
+ output += str(round(array[i, j], 2)) + ", "
+ # Add the necessary indentation to the array
+ output += "\n "
+ # Cut off the end of the array (nicely formats it)
+ return output[:-22]
+
+
+def plot_shifts(red_shifts, blue_shifts):
+ # If users want, they can pass a command line option to show the shifts on a graph
+ # Can be useful to check that the functions are all working, and that the sample
+ # images are doing the right thing
+ Xs = np.array(red_shifts)[:, 0]
+ Ys = np.array(red_shifts)[:, 1]
+ Zs = np.array(red_shifts)[:, 2]
+ Zs2 = np.array(red_shifts)[:, 3]
+ Zs3 = np.array(blue_shifts)[:, 2]
+ Zs4 = np.array(blue_shifts)[:, 3]
+
+ fig, axs = plt.subplots(2, 2)
+ ax = fig.add_subplot(2, 2, 1, projection='3d')
+ ax.scatter(Xs, Ys, Zs, cmap=cm.jet, linewidth=0)
+ ax.set_title('Red X Shift')
+ ax = fig.add_subplot(2, 2, 2, projection='3d')
+ ax.scatter(Xs, Ys, Zs2, cmap=cm.jet, linewidth=0)
+ ax.set_title('Red Y Shift')
+ ax = fig.add_subplot(2, 2, 3, projection='3d')
+ ax.scatter(Xs, Ys, Zs3, cmap=cm.jet, linewidth=0)
+ ax.set_title('Blue X Shift')
+ ax = fig.add_subplot(2, 2, 4, projection='3d')
+ ax.scatter(Xs, Ys, Zs4, cmap=cm.jet, linewidth=0)
+ ax.set_title('Blue Y Shift')
+ fig.tight_layout()
+ plt.show()
+
+
+def shifts_to_yaml(red_shift, blue_shift, image_dimensions, output_grid_size=9):
+ # Convert the shifts to a numpy array for easier handling and initialise other variables
+ red_shifts = np.array(red_shift)
+ blue_shifts = np.array(blue_shift)
+ # create a grid that's smaller than the output grid, which we then interpolate from to get the output values
+ xrgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
+ xbgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
+ yrgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
+ ybgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
+
+ xrsgrid = []
+ xbsgrid = []
+ yrsgrid = []
+ ybsgrid = []
+ xg = np.zeros((output_grid_size - 1, output_grid_size - 1))
+ yg = np.zeros((output_grid_size - 1, output_grid_size - 1))
+
+ # Format the grids - numpy doesn't work for this, it wants a
+ # nice uniformly spaced grid, which we don't know if we have yet, hence the rather mundane setup
+ for x in range(output_grid_size - 1):
+ xrsgrid.append([])
+ yrsgrid.append([])
+ xbsgrid.append([])
+ ybsgrid.append([])
+ for y in range(output_grid_size - 1):
+ xrsgrid[x].append([])
+ yrsgrid[x].append([])
+ xbsgrid[x].append([])
+ ybsgrid[x].append([])
+
+ image_size = (image_dimensions[0], image_dimensions[1])
+ gridxsize = image_size[0] / (output_grid_size - 1)
+ gridysize = image_size[1] / (output_grid_size - 1)
+
+ # Iterate through each dot, and it's shift values and put these into the correct grid location
+ for red_shift in red_shifts:
+ xgridloc = int(red_shift[0] / gridxsize)
+ ygridloc = int(red_shift[1] / gridysize)
+ xrsgrid[xgridloc][ygridloc].append(red_shift[2])
+ yrsgrid[xgridloc][ygridloc].append(red_shift[3])
+
+ for blue_shift in blue_shifts:
+ xgridloc = int(blue_shift[0] / gridxsize)
+ ygridloc = int(blue_shift[1] / gridysize)
+ xbsgrid[xgridloc][ygridloc].append(blue_shift[2])
+ ybsgrid[xgridloc][ygridloc].append(blue_shift[3])
+
+ # Now calculate the average pixel shift for each square in the grid
+ for x in range(output_grid_size - 1):
+ for y in range(output_grid_size - 1):
+ xrgrid[x, y] = np.mean(xrsgrid[x][y])
+ yrgrid[x, y] = np.mean(yrsgrid[x][y])
+ xbgrid[x, y] = np.mean(xbsgrid[x][y])
+ ybgrid[x, y] = np.mean(ybsgrid[x][y])
+
+ # Next, we start to interpolate the central points of the grid that gets passed to the tuning file
+ input_grids = np.array([xrgrid, yrgrid, xbgrid, ybgrid])
+ output_grids = np.zeros((4, output_grid_size, output_grid_size))
+
+ # Interpolate the centre of the grid
+ output_grids[:, 1:-1, 1:-1] = (input_grids[:, 1:, :-1] + input_grids[:, 1:, 1:] + input_grids[:, :-1, 1:] + input_grids[:, :-1, :-1]) / 4
+
+ # Edge cases:
+ output_grids[:, 1:-1, 0] = ((input_grids[:, :-1, 0] + input_grids[:, 1:, 0]) / 2 - output_grids[:, 1:-1, 1]) * 2 + output_grids[:, 1:-1, 1]
+ output_grids[:, 1:-1, -1] = ((input_grids[:, :-1, 7] + input_grids[:, 1:, 7]) / 2 - output_grids[:, 1:-1, -2]) * 2 + output_grids[:, 1:-1, -2]
+ output_grids[:, 0, 1:-1] = ((input_grids[:, 0, :-1] + input_grids[:, 0, 1:]) / 2 - output_grids[:, 1, 1:-1]) * 2 + output_grids[:, 1, 1:-1]
+ output_grids[:, -1, 1:-1] = ((input_grids[:, 7, :-1] + input_grids[:, 7, 1:]) / 2 - output_grids[:, -2, 1:-1]) * 2 + output_grids[:, -2, 1:-1]
+
+ # Corner Cases:
+ output_grids[:, 0, 0] = (output_grids[:, 0, 1] - output_grids[:, 1, 1]) + (output_grids[:, 1, 0] - output_grids[:, 1, 1]) + output_grids[:, 1, 1]
+ output_grids[:, 0, -1] = (output_grids[:, 0, -2] - output_grids[:, 1, -2]) + (output_grids[:, 1, -1] - output_grids[:, 1, -2]) + output_grids[:, 1, -2]
+ output_grids[:, -1, 0] = (output_grids[:, -1, 1] - output_grids[:, -2, 1]) + (output_grids[:, -2, 0] - output_grids[:, -2, 1]) + output_grids[:, -2, 1]
+ output_grids[:, -1, -1] = (output_grids[:, -2, -1] - output_grids[:, -2, -2]) + (output_grids[:, -1, -2] - output_grids[:, -2, -2]) + output_grids[:, -2, -2]
+
+ # Below, we swap the x and the y coordinates, and also multiply by a factor of -1
+ # This is due to the PiSP (standard) dimensions being flipped in comparison to
+ # PIL image coordinate directions, hence why xr -> yr. Also, the shifts calculated are colour shifts,
+ # and the PiSP block asks for the values it should shift by (hence the * -1, to convert from colour shift to a pixel shift)
+
+ output_grid_yr, output_grid_xr, output_grid_yb, output_grid_xb = output_grids * -1
+ return output_grid_xr, output_grid_yr, output_grid_xb, output_grid_yb
+
+
+def analyse_dot(dot, dot_location=[0, 0]):
+ # Scan through the dot, calculate the centroid of each colour channel by doing:
+ # pixel channel brightness * distance from top left corner
+ # Sum these, and divide by the sum of each channel's brightnesses to get a centroid for each channel
+ red_channel = np.array(dot)[:, :, 0]
+ y_num_pixels = len(red_channel[0])
+ x_num_pixels = len(red_channel)
+ yred_weight = np.sum(np.dot(red_channel, np.arange(y_num_pixels)))
+ xred_weight = np.sum(np.dot(np.arange(x_num_pixels), red_channel))
+ red_sum = np.sum(red_channel)
+
+ green_channel = np.array(dot)[:, :, 1]
+ ygreen_weight = np.sum(np.dot(green_channel, np.arange(y_num_pixels)))
+ xgreen_weight = np.sum(np.dot(np.arange(x_num_pixels), green_channel))
+ green_sum = np.sum(green_channel)
+
+ blue_channel = np.array(dot)[:, :, 2]
+ yblue_weight = np.sum(np.dot(blue_channel, np.arange(y_num_pixels)))
+ xblue_weight = np.sum(np.dot(np.arange(x_num_pixels), blue_channel))
+ blue_sum = np.sum(blue_channel)
+
+ # We return this structure. It contains 2 arrays that contain:
+ # the locations of the dot center, along with the channel shifts in the x and y direction:
+ # [ [red_center_x, red_center_y, red_x_shift, red_y_shift], [blue_center_x, blue_center_y, blue_x_shift, blue_y_shift] ]
+
+ return [[int(dot_location[0]) + int(len(dot) / 2), int(dot_location[1]) + int(len(dot[0]) / 2), xred_weight / red_sum - xgreen_weight / green_sum, yred_weight / red_sum - ygreen_weight / green_sum], [dot_location[0] + int(len(dot) / 2), dot_location[1] + int(len(dot[0]) / 2), xblue_weight / blue_sum - xgreen_weight / green_sum, yblue_weight / blue_sum - ygreen_weight / green_sum]]
+
+
+def cac(Cam):
+ filelist = Cam.imgs_cac
+
+ Cam.log += '\nCAC analysing files: {}'.format(str(filelist))
+ np.set_printoptions(precision=3)
+ np.set_printoptions(suppress=True)
+
+ # Create arrays to hold all the dots data and their colour offsets
+ red_shift = [] # Format is: [[Dot Center X, Dot Center Y, x shift, y shift]]
+ blue_shift = []
+ # Iterate through the files
+ # Multiple files is reccomended to average out the lens aberration through rotations
+ for file in filelist:
+ Cam.log += '\nCAC processing file'
+ print("\n Processing file")
+ # Read the raw RGB values
+ rgb = file.rgb
+ image_size = [file.h, file.w] # Image size, X, Y
+ # Create a colour copy of the RGB values to use later in the calibration
+ imout = Image.new(mode="RGB", size=image_size)
+ rgb_image = np.array(imout)
+ # The rgb values need reshaping from a 1d array to a 3d array to be worked with easily
+ rgb.reshape((image_size[0], image_size[1], 3))
+ rgb_image = rgb
+
+ # Pass the RGB image through to the dots locating program
+ # Returns an array of the dots (colour rectangles around the dots), and an array of their locations
+ print("Finding dots")
+ Cam.log += '\nFinding dots'
+ dots, dots_locations = find_dots_locations(rgb_image)
+
+ # Now, analyse each dot. Work out the centroid of each colour channel, and use that to work out
+ # by how far the chromatic aberration has shifted each channel
+ Cam.log += '\nDots found: {}'.format(str(len(dots)))
+ print('Dots found: ' + str(len(dots)))
+
+ for dot, dot_location in zip(dots, dots_locations):
+ if len(dot) > 0:
+ if (dot_location[0] > 0) and (dot_location[1] > 0):
+ ret = analyse_dot(dot, dot_location)
+ red_shift.append(ret[0])
+ blue_shift.append(ret[1])
+
+ # Take our arrays of red shifts and locations, push them through to be interpolated into a 9x9 matrix
+ # for the CAC block to handle and then store these as a .json file to be added to the camera
+ # tuning file
+ print("\nCreating output grid")
+ Cam.log += '\nCreating output grid'
+ rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size)
+
+ print("CAC correction complete!")
+ Cam.log += '\nCAC correction complete!'
+
+ # Give the JSON dict back to the main ctt program
+ return {"strength": 1.0, "lut_rx": list(rx.round(2).reshape(81)), "lut_ry": list(ry.round(2).reshape(81)), "lut_bx": list(bx.round(2).reshape(81)), "lut_by": list(by.round(2).reshape(81))}
diff --git a/utils/raspberrypi/ctt/ctt_ccm.py b/utils/raspberrypi/ctt/ctt_ccm.py
index a09bfd09..07c943a8 100644
--- a/utils/raspberrypi/ctt/ctt_ccm.py
+++ b/utils/raspberrypi/ctt/ctt_ccm.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_ccm.py - camera tuning tool for CCM (colour correction matrix)
+# camera tuning tool for CCM (colour correction matrix)
from ctt_image_load import *
from ctt_awb import get_alsc_patches
@@ -56,7 +56,7 @@ FInds colour correction matrices for list of images
"""
-def ccm(Cam, cal_cr_list, cal_cb_list):
+def ccm(Cam, cal_cr_list, cal_cb_list, grid_size):
global matrix_selection_types, typenum
imgs = Cam.imgs
"""
@@ -133,9 +133,7 @@ def ccm(Cam, cal_cr_list, cal_cb_list):
Note: if alsc is disabled then colour_cals will be set to None and no
the function will simply return the macbeth patches
"""
- r, b, g = get_alsc_patches(Img, colour_cals, grey=False)
- # 256 values for each patch of sRGB values
-
+ r, b, g = get_alsc_patches(Img, colour_cals, grey=False, grid_size=grid_size)
"""
do awb
Note: awb is done by measuring the macbeth chart in the image, rather
diff --git a/utils/raspberrypi/ctt/ctt_config_example.json b/utils/raspberrypi/ctt/ctt_config_example.json
index c7f90761..1105862c 100644
--- a/utils/raspberrypi/ctt/ctt_config_example.json
+++ b/utils/raspberrypi/ctt/ctt_config_example.json
@@ -3,7 +3,8 @@
"plot": [],
"alsc": {
"do_alsc_colour": 1,
- "luminance_strength": 0.5
+ "luminance_strength": 0.8,
+ "max_gain": 8.0
},
"awb": {
"greyworld": 0
@@ -13,4 +14,4 @@
"small": 0,
"show": 0
}
-} \ No newline at end of file
+}
diff --git a/utils/raspberrypi/ctt/ctt_dots_locator.py b/utils/raspberrypi/ctt/ctt_dots_locator.py
new file mode 100644
index 00000000..4945c04b
--- /dev/null
+++ b/utils/raspberrypi/ctt/ctt_dots_locator.py
@@ -0,0 +1,118 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+# find_dots.py - Used by CAC algorithm to convert image to set of dots
+
+'''
+This file takes the black and white version of the image, along with
+the color version. It then located the black dots on the image by
+thresholding dark pixels.
+In a rather fun way, the algorithm bounces around the thresholded area in a random path
+We then use the maximum and minimum of these paths to determine the dot shape and size
+This info is then used to return colored dots and locations back to the main file
+'''
+
+import numpy as np
+import random
+from PIL import Image, ImageEnhance, ImageFilter
+
+
+def find_dots_locations(rgb_image, color_threshold=100, dots_edge_avoid=75, image_edge_avoid=10, search_path_length=500, grid_scan_step_size=10, logfile=open("log.txt", "a+")):
+ # Initialise some starting variables
+ pixels = Image.fromarray(rgb_image)
+ pixels = pixels.convert("L")
+ enhancer = ImageEnhance.Contrast(pixels)
+ im_output = enhancer.enhance(1.4)
+ # We smooth it slightly to make it easier for the dot recognition program to locate the dots
+ im_output = im_output.filter(ImageFilter.GaussianBlur(radius=2))
+ bw_image = np.array(im_output)
+
+ location = [0, 0]
+ dots = []
+ dots_location = []
+ # the program takes away the edges - we don't want a dot that is half a circle, the
+ # centroids would all be wrong
+ for x in range(dots_edge_avoid, len(bw_image) - dots_edge_avoid, grid_scan_step_size):
+ for y in range(dots_edge_avoid, len(bw_image[0]) - dots_edge_avoid, grid_scan_step_size):
+ location = [x, y]
+ scrap_dot = False # A variable used to make sure that this is a valid dot
+ if (bw_image[location[0], location[1]] < color_threshold) and not (scrap_dot):
+ heading = "south" # Define a starting direction to move in
+ coords = []
+ for i in range(search_path_length): # Creates a path of length `search_path_length`. This turns out to always be enough to work out the rough shape of the dot.
+ # Now make sure that the thresholded area doesn't come within 10 pixels of the edge of the image, ensures we capture all the CA
+ if ((image_edge_avoid < location[0] < len(bw_image) - image_edge_avoid) and (image_edge_avoid < location[1] < len(bw_image[0]) - image_edge_avoid)) and not (scrap_dot):
+ if heading == "south":
+ if bw_image[location[0] + 1, location[1]] < color_threshold:
+ # Here, notice it does not go south, but actually goes southeast
+ # This is crucial in ensuring that we make our way around the majority of the dot
+ location[0] = location[0] + 1
+ location[1] = location[1] + 1
+ heading = "south"
+ else:
+ # This happens when we reach a thresholded edge. We now randomly change direction and keep searching
+ dir = random.randint(1, 2)
+ if dir == 1:
+ heading = "west"
+ if dir == 2:
+ heading = "east"
+
+ if heading == "east":
+ if bw_image[location[0], location[1] + 1] < color_threshold:
+ location[1] = location[1] + 1
+ heading = "east"
+ else:
+ dir = random.randint(1, 2)
+ if dir == 1:
+ heading = "north"
+ if dir == 2:
+ heading = "south"
+
+ if heading == "west":
+ if bw_image[location[0], location[1] - 1] < color_threshold:
+ location[1] = location[1] - 1
+ heading = "west"
+ else:
+ dir = random.randint(1, 2)
+ if dir == 1:
+ heading = "north"
+ if dir == 2:
+ heading = "south"
+
+ if heading == "north":
+ if bw_image[location[0] - 1, location[1]] < color_threshold:
+ location[0] = location[0] - 1
+ heading = "north"
+ else:
+ dir = random.randint(1, 2)
+ if dir == 1:
+ heading = "west"
+ if dir == 2:
+ heading = "east"
+ # Log where our particle travels across the dot
+ coords.append([location[0], location[1]])
+ else:
+ scrap_dot = True # We just don't have enough space around the dot, discard this one, and move on
+ if not scrap_dot:
+ # get the size of the dot surrounding the dot
+ x_coords = np.array(coords)[:, 0]
+ y_coords = np.array(coords)[:, 1]
+ hsquaresize = max(list(x_coords)) - min(list(x_coords))
+ vsquaresize = max(list(y_coords)) - min(list(y_coords))
+ # Create the bounding coordinates of the rectangle surrounding the dot
+ # Program uses the dotsize + half of the dotsize to ensure we get all that color fringing
+ extra_space_factor = 0.45
+ top_left_x = (min(list(x_coords)) - int(hsquaresize * extra_space_factor))
+ btm_right_x = max(list(x_coords)) + int(hsquaresize * extra_space_factor)
+ top_left_y = (min(list(y_coords)) - int(vsquaresize * extra_space_factor))
+ btm_right_y = max(list(y_coords)) + int(vsquaresize * extra_space_factor)
+ # Overwrite the area of the dot to ensure we don't use it again
+ bw_image[top_left_x:btm_right_x, top_left_y:btm_right_y] = 255
+ # Add the color version of the dot to the list to send off, along with some coordinates.
+ dots.append(rgb_image[top_left_x:btm_right_x, top_left_y:btm_right_y])
+ dots_location.append([top_left_x, top_left_y])
+ else:
+ # Dot was too close to the image border to be useable
+ pass
+ return dots, dots_location
diff --git a/utils/raspberrypi/ctt/ctt_geq.py b/utils/raspberrypi/ctt/ctt_geq.py
index c45addcd..5a91ebb4 100644
--- a/utils/raspberrypi/ctt/ctt_geq.py
+++ b/utils/raspberrypi/ctt/ctt_geq.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_geq.py - camera tuning tool for GEQ (green equalisation)
+# camera tuning tool for GEQ (green equalisation)
from ctt_tools import *
import matplotlib.pyplot as plt
diff --git a/utils/raspberrypi/ctt/ctt_image_load.py b/utils/raspberrypi/ctt/ctt_image_load.py
index 310c5e88..531de328 100644
--- a/utils/raspberrypi/ctt/ctt_image_load.py
+++ b/utils/raspberrypi/ctt/ctt_image_load.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019-2020, Raspberry Pi Ltd
#
-# ctt_image_load.py - camera tuning tool image loading
+# camera tuning tool image loading
from ctt_tools import *
from ctt_macbeth_locator import *
@@ -350,6 +350,7 @@ def dng_load_image(Cam, im_str):
c2 = np.left_shift(raw_data[1::2, 0::2].astype(np.int64), shift)
c3 = np.left_shift(raw_data[1::2, 1::2].astype(np.int64), shift)
Img.channels = [c0, c1, c2, c3]
+ Img.rgb = raw_im.postprocess()
except Exception:
print("\nERROR: failed to load DNG file", im_str)
diff --git a/utils/raspberrypi/ctt/ctt_lux.py b/utils/raspberrypi/ctt/ctt_lux.py
index 70855e1b..46be1512 100644
--- a/utils/raspberrypi/ctt/ctt_lux.py
+++ b/utils/raspberrypi/ctt/ctt_lux.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_lux.py - camera tuning tool for lux level
+# camera tuning tool for lux level
from ctt_tools import *
diff --git a/utils/raspberrypi/ctt/ctt_macbeth_locator.py b/utils/raspberrypi/ctt/ctt_macbeth_locator.py
index 178aeed0..f22dbf31 100644
--- a/utils/raspberrypi/ctt/ctt_macbeth_locator.py
+++ b/utils/raspberrypi/ctt/ctt_macbeth_locator.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_macbeth_locator.py - camera tuning tool Macbeth chart locator
+# camera tuning tool Macbeth chart locator
from ctt_ransac import *
from ctt_tools import *
diff --git a/utils/raspberrypi/ctt/ctt_noise.py b/utils/raspberrypi/ctt/ctt_noise.py
index 3270bf34..0b18d83f 100644
--- a/utils/raspberrypi/ctt/ctt_noise.py
+++ b/utils/raspberrypi/ctt/ctt_noise.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_noise.py - camera tuning tool noise calibration
+# camera tuning tool noise calibration
from ctt_image_load import *
import matplotlib.pyplot as plt
diff --git a/utils/raspberrypi/ctt/ctt_pisp.py b/utils/raspberrypi/ctt/ctt_pisp.py
new file mode 100755
index 00000000..a59b053c
--- /dev/null
+++ b/utils/raspberrypi/ctt/ctt_pisp.py
@@ -0,0 +1,805 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# ctt_pisp.py - camera tuning tool data for PiSP platforms
+
+
+json_template = {
+ "rpi.black_level": {
+ "black_level": 4096
+ },
+ "rpi.lux": {
+ "reference_shutter_speed": 10000,
+ "reference_gain": 1,
+ "reference_aperture": 1.0
+ },
+ "rpi.dpc": {
+ "strength": 1
+ },
+ "rpi.noise": {
+ },
+ "rpi.geq": {
+ },
+ "rpi.denoise":
+ {
+ "normal":
+ {
+ "sdn":
+ {
+ "deviation": 1.6,
+ "strength": 0.5,
+ "deviation2": 3.2,
+ "deviation_no_tdn": 3.2,
+ "strength_no_tdn": 0.75
+ },
+ "cdn":
+ {
+ "deviation": 200,
+ "strength": 0.3
+ },
+ "tdn":
+ {
+ "deviation": 0.8,
+ "threshold": 0.05
+ }
+ },
+ "hdr":
+ {
+ "sdn":
+ {
+ "deviation": 1.6,
+ "strength": 0.5,
+ "deviation2": 3.2,
+ "deviation_no_tdn": 3.2,
+ "strength_no_tdn": 0.75
+ },
+ "cdn":
+ {
+ "deviation": 200,
+ "strength": 0.3
+ },
+ "tdn":
+ {
+ "deviation": 1.3,
+ "threshold": 0.1
+ }
+ },
+ "night":
+ {
+ "sdn":
+ {
+ "deviation": 1.6,
+ "strength": 0.5,
+ "deviation2": 3.2,
+ "deviation_no_tdn": 3.2,
+ "strength_no_tdn": 0.75
+ },
+ "cdn":
+ {
+ "deviation": 200,
+ "strength": 0.3
+ },
+ "tdn":
+ {
+ "deviation": 1.3,
+ "threshold": 0.1
+ }
+ }
+ },
+ "rpi.awb": {
+ "priors": [
+ {"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]},
+ {"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]},
+ {"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
+ ],
+ "modes": {
+ "auto": {"lo": 2500, "hi": 7700},
+ "incandescent": {"lo": 2500, "hi": 3000},
+ "tungsten": {"lo": 3000, "hi": 3500},
+ "fluorescent": {"lo": 4000, "hi": 4700},
+ "indoor": {"lo": 3000, "hi": 5000},
+ "daylight": {"lo": 5500, "hi": 6500},
+ "cloudy": {"lo": 7000, "hi": 8000}
+ },
+ "bayes": 1
+ },
+ "rpi.agc":
+ {
+ "channels":
+ [
+ {
+ "comment": "Channel 0 is normal AGC",
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 1.5, 2.0, 4.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 60000 ],
+ "gain": [ 1.0, 1.5, 2.0, 4.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 1.5, 2.0, 4.0, 8.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ },
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "comment": "Channel 1 is the HDR short channel",
+ "desaturate": 0,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 20000, 60000 ],
+ "gain": [ 1.0, 1.0, 1.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 20000, 60000 ],
+ "gain": [ 1.0, 1.0, 1.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 20000, 60000 ],
+ "gain": [ 1.0, 1.0, 1.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.7,
+ 1000, 0.7
+ ]
+ },
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.2,
+ "y_target":
+ [
+ 0, 0.002,
+ 1000, 0.002
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.7,
+ 1000, 0.7
+ ]
+ },
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.2,
+ "y_target":
+ [
+ 0, 0.002,
+ 1000, 0.002
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.95,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.7,
+ 1000, 0.7
+ ]
+ },
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.2,
+ "y_target":
+ [
+ 0, 0.002,
+ 1000, 0.002
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "comment": "Channel 2 is the HDR long channel",
+ "desaturate": 0,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 20000, 30000, 60000 ],
+ "gain": [ 1.0, 2.0, 4.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 20000, 30000, 60000 ],
+ "gain": [ 1.0, 2.0, 4.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 20000, 30000, 60000 ],
+ "gain": [ 1.0, 2.0, 4.0, 8.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ ],
+ "highlight": [
+ ],
+ "shadows": [
+ ]
+ },
+ "channel_constraints":
+ [
+ {
+ "bound": "UPPER",
+ "channel": 4,
+ "factor": 8
+ },
+ {
+ "bound": "LOWER",
+ "channel": 4,
+ "factor": 2
+ }
+ ],
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "comment": "Channel 3 is the night mode channel",
+ "base_ev": 0.33,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 20000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 20000, 66666, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 4.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.16,
+ 10000, 0.17
+ ]
+ }
+ ]
+ },
+ "rpi.alsc": {
+ 'omega': 1.3,
+ 'n_iter': 100,
+ 'luminance_strength': 0.8,
+ },
+ "rpi.contrast": {
+ "ce_enable": 1,
+ "gamma_curve": [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ },
+ "rpi.ccm": {
+ },
+ "rpi.cac": {
+ },
+ "rpi.sharpen": {
+ "threshold": 0.25,
+ "limit": 1.0,
+ "strength": 1.0
+ },
+ "rpi.hdr":
+ {
+ "Off":
+ {
+ "cadence": [ 0 ]
+ },
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map": { "short": 1, "long": 2 }
+ },
+ "SingleExposure":
+ {
+ "cadence": [1],
+ "channel_map": { "short": 1 },
+ "spatial_gain": 2.0,
+ "tonemap_enable": 1
+ },
+ "MultiExposure":
+ {
+ "cadence": [1, 2],
+ "channel_map": { "short": 1, "long": 2 },
+ "stitch_enable": 1,
+ "spatial_gain": 2.0,
+ "tonemap_enable": 1
+ },
+ "Night":
+ {
+ "cadence": [ 3 ],
+ "channel_map": { "night": 3 },
+ "tonemap_enable": 1,
+ "tonemap":
+ [
+ 0, 0,
+ 5000, 20000,
+ 10000, 30000,
+ 20000, 47000,
+ 30000, 55000,
+ 65535, 65535
+ ]
+ }
+ }
+}
+
+grid_size = (32, 32)
diff --git a/utils/raspberrypi/ctt/ctt_pretty_print_json.py b/utils/raspberrypi/ctt/ctt_pretty_print_json.py
index 3e3b8475..a4cae62d 100755
--- a/utils/raspberrypi/ctt/ctt_pretty_print_json.py
+++ b/utils/raspberrypi/ctt/ctt_pretty_print_json.py
@@ -19,13 +19,19 @@ class Encoder(json.JSONEncoder):
self.indentation_level = 0
self.hard_break = 120
self.custom_elems = {
+ 'weights': 15,
'table': 16,
'luminance_lut': 16,
'ct_curve': 3,
'ccm': 3,
+ 'lut_rx': 9,
+ 'lut_bx': 9,
+ 'lut_by': 9,
+ 'lut_ry': 9,
'gamma_curve': 2,
'y_target': 2,
- 'prior': 2
+ 'prior': 2,
+ 'tonemap': 2
}
def encode(self, o, node_key=None):
@@ -87,7 +93,7 @@ class Encoder(json.JSONEncoder):
return self.encode(o)
-def pretty_print(in_json: dict) -> str:
+def pretty_print(in_json: dict, custom_elems={}) -> str:
if 'version' not in in_json or \
'target' not in in_json or \
@@ -95,12 +101,15 @@ def pretty_print(in_json: dict) -> str:
in_json['version'] < 2.0:
raise RuntimeError('Incompatible JSON dictionary has been provided')
- return json.dumps(in_json, cls=Encoder, indent=4, sort_keys=False)
+ encoder = Encoder(indent=4, sort_keys=False)
+ encoder.custom_elems |= custom_elems
+ return encoder.encode(in_json) #json.dumps(in_json, cls=Encoder, indent=4, sort_keys=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=
'Prettify a version 2.0 camera tuning config JSON file.')
+ parser.add_argument('-t', '--target', type=str, help='Target platform', choices=['pisp', 'vc4'], default='vc4')
parser.add_argument('input', type=str, help='Input tuning file.')
parser.add_argument('output', type=str, nargs='?',
help='Output converted tuning file. If not provided, the input file will be updated in-place.',
@@ -110,7 +119,12 @@ if __name__ == "__main__":
with open(args.input, 'r') as f:
in_json = json.load(f)
- out_json = pretty_print(in_json)
+ if args.target == 'pisp':
+ from ctt_pisp import grid_size
+ elif args.target == 'vc4':
+ from ctt_vc4 import grid_size
+
+ out_json = pretty_print(in_json, custom_elems={'table': grid_size[0], 'luminance_lut': grid_size[0]})
with open(args.output if args.output is not None else args.input, 'w') as f:
f.write(out_json)
diff --git a/utils/raspberrypi/ctt/ctt_ransac.py b/utils/raspberrypi/ctt/ctt_ransac.py
index 9ed7d93c..01bba302 100644
--- a/utils/raspberrypi/ctt/ctt_ransac.py
+++ b/utils/raspberrypi/ctt/ctt_ransac.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_ransac.py - camera tuning tool RANSAC selector for Macbeth chart locator
+# camera tuning tool RANSAC selector for Macbeth chart locator
import numpy as np
diff --git a/utils/raspberrypi/ctt/ctt_tools.py b/utils/raspberrypi/ctt/ctt_tools.py
index 79195289..50b01ecf 100644
--- a/utils/raspberrypi/ctt/ctt_tools.py
+++ b/utils/raspberrypi/ctt/ctt_tools.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# ctt_tools.py - camera tuning tool miscellaneous
+# camera tuning tool miscellaneous
import time
import re
@@ -65,11 +65,12 @@ def parse_input():
directory = get_config(args_dict, '-i', None, 'string')
config = get_config(args_dict, '-c', None, 'string')
log_path = get_config(args_dict, '-l', None, 'string')
+ target = get_config(args_dict, '-t', "vc4", 'string')
if directory is None:
raise ArgError('\n\nERROR! No input directory given.')
if json_output is None:
raise ArgError('\n\nERROR! No output json given.')
- return json_output, directory, config, log_path
+ return json_output, directory, config, log_path, target
"""
diff --git a/utils/raspberrypi/ctt/ctt_vc4.py b/utils/raspberrypi/ctt/ctt_vc4.py
new file mode 100755
index 00000000..7154e110
--- /dev/null
+++ b/utils/raspberrypi/ctt/ctt_vc4.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# ctt_vc4.py - camera tuning tool data for VC4 platforms
+
+
+json_template = {
+ "rpi.black_level": {
+ "black_level": 4096
+ },
+ "rpi.dpc": {
+ },
+ "rpi.lux": {
+ "reference_shutter_speed": 10000,
+ "reference_gain": 1,
+ "reference_aperture": 1.0
+ },
+ "rpi.noise": {
+ },
+ "rpi.geq": {
+ },
+ "rpi.sdn": {
+ },
+ "rpi.awb": {
+ "priors": [
+ {"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]},
+ {"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]},
+ {"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
+ ],
+ "modes": {
+ "auto": {"lo": 2500, "hi": 8000},
+ "incandescent": {"lo": 2500, "hi": 3000},
+ "tungsten": {"lo": 3000, "hi": 3500},
+ "fluorescent": {"lo": 4000, "hi": 4700},
+ "indoor": {"lo": 3000, "hi": 5000},
+ "daylight": {"lo": 5500, "hi": 6500},
+ "cloudy": {"lo": 7000, "hi": 8600}
+ },
+ "bayes": 1
+ },
+ "rpi.agc": {
+ "metering_modes": {
+ "centre-weighted": {
+ "weights": [3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0]
+ },
+ "spot": {
+ "weights": [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ },
+ "matrix": {
+ "weights": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ }
+ },
+ "exposure_modes": {
+ "normal": {
+ "shutter": [100, 10000, 30000, 60000, 120000],
+ "gain": [1.0, 2.0, 4.0, 6.0, 6.0]
+ },
+ "short": {
+ "shutter": [100, 5000, 10000, 20000, 120000],
+ "gain": [1.0, 2.0, 4.0, 6.0, 6.0]
+ }
+ },
+ "constraint_modes": {
+ "normal": [
+ {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]}
+ ],
+ "highlight": [
+ {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]},
+ {"bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.8, 1000, 0.8]}
+ ]
+ },
+ "y_target": [0, 0.16, 1000, 0.165, 10000, 0.17]
+ },
+ "rpi.alsc": {
+ 'omega': 1.3,
+ 'n_iter': 100,
+ 'luminance_strength': 0.7,
+ },
+ "rpi.contrast": {
+ "ce_enable": 1,
+ "gamma_curve": [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ },
+ "rpi.ccm": {
+ },
+ "rpi.sharpen": {
+ }
+}
+
+grid_size = (16, 12)
diff --git a/utils/rkisp1/rkisp1-capture.sh b/utils/rkisp1/rkisp1-capture.sh
index c5f859f2..d767e31d 100755
--- a/utils/rkisp1/rkisp1-capture.sh
+++ b/utils/rkisp1/rkisp1-capture.sh
@@ -4,8 +4,7 @@
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
-# rkisp-capture.sh - Capture processed frames from cameras based on the
-# Rockchip ISP1
+# Capture processed frames from cameras based on the Rockchip ISP1
#
# The scripts makes use of the following tools, which are expected to be
# executable from the system-wide path or from the local directory:
diff --git a/utils/tracepoints/analyze-ipa-trace.py b/utils/tracepoints/analyze-ipa-trace.py
index 50fbbf42..92e8a235 100755
--- a/utils/tracepoints/analyze-ipa-trace.py
+++ b/utils/tracepoints/analyze-ipa-trace.py
@@ -4,7 +4,7 @@
#
# Author: Paul Elder <paul.elder@ideasonboard.com>
#
-# analyze-ipa-trace.py - Example of how to extract information from libcamera lttng traces
+# Example of how to extract information from libcamera lttng traces
import argparse
import bt2
diff --git a/utils/tracepoints/meson.build b/utils/tracepoints/meson.build
deleted file mode 100644
index 807230fc..00000000
--- a/utils/tracepoints/meson.build
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: CC0-1.0
-
-py_modules += ['jinja2']
-
-gen_tracepoints_header = find_program('./gen-tp-header.py')
diff --git a/utils/tuning/README.rst b/utils/tuning/README.rst
index ef3e6ad7..89a1d61e 100644
--- a/utils/tuning/README.rst
+++ b/utils/tuning/README.rst
@@ -1,11 +1,20 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
-.. TODO: Write an overview of libtuning
+libcamera tuning tools
+======================
-Dependencies
-------------
+.. Note:: The tuning tools are still very much work in progress. If in doubt,
+ please ask on the mailing list.
+
+.. todo::
+ Write documentation
+
+Installation of dependencies
+----------------------------
+
+::
+ # Using a venv
+ python3 -m venv venv
+ . ./venv/bin/activate
+ pip3 install -r requirements.txt
-- numpy
-- opencv-python
-- py3exiv2
-- rawpy
diff --git a/utils/tuning/config-example.yaml b/utils/tuning/config-example.yaml
new file mode 100644
index 00000000..5593eaef
--- /dev/null
+++ b/utils/tuning/config-example.yaml
@@ -0,0 +1,54 @@
+general:
+ disable: []
+ plot: []
+ alsc:
+ do_alsc_colour: 1
+ luminance_strength: 0.5
+ awb:
+ # Algorithm can either be 'grey' or 'bayes'
+ algorithm: bayes
+ # Priors is only used for the bayes algorithm. They are defined in linear
+ # space. A good staring point is:
+ # - lux: 0
+ # ct: [ 2000, 3000, 13000 ]
+ # probability: [ 1.005, 1.0, 1.0 ]
+ # - lux: 800
+ # ct: [ 2000, 6000, 13000 ]
+ # probability: [ 1.0, 1.01, 1.01 ]
+ # - lux: 1500
+ # ct: [ 2000, 4000, 6000, 6500, 7000, 13000 ]
+ # probability: [ 1.0, 1.005, 1.032, 1.037, 1.01, 1.01 ]
+ priors:
+ - lux: 0
+ ct: [ 2000, 13000 ]
+ probability: [ 1.0, 1.0 ]
+ AwbMode:
+ AwbAuto:
+ lo: 2500
+ hi: 8000
+ AwbIncandescent:
+ lo: 2500
+ hi: 3000
+ AwbTungsten:
+ lo: 3000
+ hi: 3500
+ AwbFluorescent:
+ lo: 4000
+ hi: 4700
+ AwbIndoor:
+ lo: 3000
+ hi: 5000
+ AwbDaylight:
+ lo: 5500
+ hi: 6500
+ AwbCloudy:
+ lo: 6500
+ hi: 8000
+ # One custom mode can be defined if needed
+ #AwbCustom:
+ # lo: 2000
+ # hi: 1300
+ macbeth:
+ small: 1
+ show: 0
+# blacklevel: 32 \ No newline at end of file
diff --git a/utils/tuning/libtuning/average.py b/utils/tuning/libtuning/average.py
index e28770d7..c41075a1 100644
--- a/utils/tuning/libtuning/average.py
+++ b/utils/tuning/libtuning/average.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# average.py - Wrapper for numpy averaging functions to enable duck-typing
+# Wrapper for numpy averaging functions to enable duck-typing
import numpy as np
diff --git a/utils/tuning/libtuning/ctt_awb.py b/utils/tuning/libtuning/ctt_awb.py
new file mode 100644
index 00000000..240f37e6
--- /dev/null
+++ b/utils/tuning/libtuning/ctt_awb.py
@@ -0,0 +1,378 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# camera tuning tool for AWB
+
+import logging
+
+import matplotlib.pyplot as plt
+from bisect import bisect_left
+from scipy.optimize import fmin
+import numpy as np
+
+from .image import Image
+
+logger = logging.getLogger(__name__)
+
+"""
+obtain piecewise linear approximation for colour curve
+"""
+def awb(imgs, cal_cr_list, cal_cb_list, plot):
+ """
+ condense alsc calibration tables into one dictionary
+ """
+ if cal_cr_list is None:
+ colour_cals = None
+ else:
+ colour_cals = {}
+ for cr, cb in zip(cal_cr_list, cal_cb_list):
+ cr_tab = cr['table']
+ cb_tab = cb['table']
+ """
+ normalise tables so min value is 1
+ """
+ cr_tab = cr_tab/np.min(cr_tab)
+ cb_tab = cb_tab/np.min(cb_tab)
+ colour_cals[cr['ct']] = [cr_tab, cb_tab]
+ """
+ obtain data from greyscale macbeth patches
+ """
+ rb_raw = []
+ rbs_hat = []
+ for Img in imgs:
+ logger.info(f'Processing {Img.name}')
+ """
+ get greyscale patches with alsc applied if alsc enabled.
+ Note: if alsc is disabled then colour_cals will be set to None and the
+ function will just return the greyscale patches
+ """
+ r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals)
+ """
+ calculate ratio of r, b to g
+ """
+ r_g = np.mean(r_patchs/g_patchs)
+ b_g = np.mean(b_patchs/g_patchs)
+ logger.info(f' r : {r_g:.4f} b : {b_g:.4f}')
+ """
+ The curve tends to be better behaved in so-called hatspace.
+ R, B, G represent the individual channels. The colour curve is plotted in
+ r, b space, where:
+ r = R/G
+ b = B/G
+ This will be referred to as dehatspace... (sorry)
+ Hatspace is defined as:
+ r_hat = R/(R+B+G)
+ b_hat = B/(R+B+G)
+ To convert from dehatspace to hastpace (hat operation):
+ r_hat = r/(1+r+b)
+ b_hat = b/(1+r+b)
+ To convert from hatspace to dehatspace (dehat operation):
+ r = r_hat/(1-r_hat-b_hat)
+ b = b_hat/(1-r_hat-b_hat)
+ Proof is left as an excercise to the reader...
+ Throughout the code, r and b are sometimes referred to as r_g and b_g
+ as a reminder that they are ratios
+ """
+ r_g_hat = r_g/(1+r_g+b_g)
+ b_g_hat = b_g/(1+r_g+b_g)
+ logger.info(f' r_hat : {r_g_hat:.4f} b_hat : {b_g_hat:.4f}')
+ rbs_hat.append((r_g_hat, b_g_hat, Img.color))
+ rb_raw.append((r_g, b_g))
+
+ logger.info('Finished processing images')
+ """
+ sort all lits simultaneously by r_hat
+ """
+ rbs_zip = list(zip(rbs_hat, rb_raw))
+ rbs_zip.sort(key=lambda x: x[0][0])
+ rbs_hat, rb_raw = list(zip(*rbs_zip))
+ """
+ unzip tuples ready for processing
+ """
+ rbs_hat = list(zip(*rbs_hat))
+ rb_raw = list(zip(*rb_raw))
+ """
+ fit quadratic fit to r_g hat and b_g_hat
+ """
+ a, b, c = np.polyfit(rbs_hat[0], rbs_hat[1], 2)
+ logger.info('Fit quadratic curve in hatspace')
+ """
+ the algorithm now approximates the shortest distance from each point to the
+ curve in dehatspace. Since the fit is done in hatspace, it is easier to
+ find the actual shortest distance in hatspace and use the projection back
+ into dehatspace as an overestimate.
+ The distance will be used for two things:
+ 1) In the case that colour temperature does not strictly decrease with
+ increasing r/g, the closest point to the line will be chosen out of an
+ increasing pair of colours.
+
+ 2) To calculate transverse negative an dpositive, the maximum positive
+ and negative distance from the line are chosen. This benefits from the
+ overestimate as the transverse pos/neg are upper bound values.
+ """
+ """
+ define fit function
+ """
+ def f(x):
+ return a*x**2 + b*x + c
+ """
+ iterate over points (R, B are x and y coordinates of points) and calculate
+ distance to line in dehatspace
+ """
+ dists = []
+ for i, (R, B) in enumerate(zip(rbs_hat[0], rbs_hat[1])):
+ """
+ define function to minimise as square distance between datapoint and
+ point on curve. Squaring is monotonic so minimising radius squared is
+ equivalent to minimising radius
+ """
+ def f_min(x):
+ y = f(x)
+ return((x-R)**2+(y-B)**2)
+ """
+ perform optimisation with scipy.optmisie.fmin
+ """
+ x_hat = fmin(f_min, R, disp=0)[0]
+ y_hat = f(x_hat)
+ """
+ dehat
+ """
+ x = x_hat/(1-x_hat-y_hat)
+ y = y_hat/(1-x_hat-y_hat)
+ rr = R/(1-R-B)
+ bb = B/(1-R-B)
+ """
+ calculate euclidean distance in dehatspace
+ """
+ dist = ((x-rr)**2+(y-bb)**2)**0.5
+ """
+ return negative if point is below the fit curve
+ """
+ if (x+y) > (rr+bb):
+ dist *= -1
+ dists.append(dist)
+ logger.info('Found closest point on fit line to each point in dehatspace')
+ """
+ calculate wiggle factors in awb. 10% added since this is an upper bound
+ """
+ transverse_neg = - np.min(dists) * 1.1
+ transverse_pos = np.max(dists) * 1.1
+ logger.info(f'Transverse pos : {transverse_pos:.5f}')
+ logger.info(f'Transverse neg : {transverse_neg:.5f}')
+ """
+ set minimum transverse wiggles to 0.1 .
+ Wiggle factors dictate how far off of the curve the algorithm searches. 0.1
+ is a suitable minimum that gives better results for lighting conditions not
+ within calibration dataset. Anything less will generalise poorly.
+ """
+ if transverse_pos < 0.01:
+ transverse_pos = 0.01
+ logger.info('Forced transverse pos to 0.01')
+ if transverse_neg < 0.01:
+ transverse_neg = 0.01
+ logger.info('Forced transverse neg to 0.01')
+
+ """
+ generate new b_hat values at each r_hat according to fit
+ """
+ r_hat_fit = np.array(rbs_hat[0])
+ b_hat_fit = a*r_hat_fit**2 + b*r_hat_fit + c
+ """
+ transform from hatspace to dehatspace
+ """
+ r_fit = r_hat_fit/(1-r_hat_fit-b_hat_fit)
+ b_fit = b_hat_fit/(1-r_hat_fit-b_hat_fit)
+ c_fit = np.round(rbs_hat[2], 0)
+ """
+ round to 4dp
+ """
+ r_fit = np.where((1000*r_fit) % 1 <= 0.05, r_fit+0.0001, r_fit)
+ r_fit = np.where((1000*r_fit) % 1 >= 0.95, r_fit-0.0001, r_fit)
+ b_fit = np.where((1000*b_fit) % 1 <= 0.05, b_fit+0.0001, b_fit)
+ b_fit = np.where((1000*b_fit) % 1 >= 0.95, b_fit-0.0001, b_fit)
+ r_fit = np.round(r_fit, 4)
+ b_fit = np.round(b_fit, 4)
+ """
+ The following code ensures that colour temperature decreases with
+ increasing r/g
+ """
+ """
+ iterate backwards over list for easier indexing
+ """
+ i = len(c_fit) - 1
+ while i > 0:
+ if c_fit[i] > c_fit[i-1]:
+ logger.info('Colour temperature increase found')
+ logger.info(f'{c_fit[i - 1]} K at r = {r_fit[i - 1]} to ')
+ logger.info(f'{c_fit[i]} K at r = {r_fit[i]}')
+ """
+ if colour temperature increases then discard point furthest from
+ the transformed fit (dehatspace)
+ """
+ error_1 = abs(dists[i-1])
+ error_2 = abs(dists[i])
+ logger.info('Distances from fit:')
+ logger.info(f'{c_fit[i]} K : {error_1:.5f}')
+ logger.info(f'{c_fit[i - 1]} K : {error_2:.5f}')
+ """
+ find bad index
+ note that in python false = 0 and true = 1
+ """
+ bad = i - (error_1 < error_2)
+ logger.info(f'Point at {c_fit[bad]} K deleted as ')
+ logger.info('it is furthest from fit')
+ """
+ delete bad point
+ """
+ r_fit = np.delete(r_fit, bad)
+ b_fit = np.delete(b_fit, bad)
+ c_fit = np.delete(c_fit, bad).astype(np.uint16)
+ """
+ note that if a point has been discarded then the length has decreased
+ by one, meaning that decreasing the index by one will reassess the kept
+ point against the next point. It is therefore possible, in theory, for
+ two adjacent points to be discarded, although probably rare
+ """
+ i -= 1
+
+ """
+ return formatted ct curve, ordered by increasing colour temperature
+ """
+ ct_curve = list(np.array(list(zip(b_fit, r_fit, c_fit))).flatten())[::-1]
+ logger.info('Final CT curve:')
+ for i in range(len(ct_curve)//3):
+ j = 3*i
+ logger.info(f' ct: {ct_curve[j]} ')
+ logger.info(f' r: {ct_curve[j + 1]} ')
+ logger.info(f' b: {ct_curve[j + 2]} ')
+
+ """
+ plotting code for debug
+ """
+ if plot:
+ x = np.linspace(np.min(rbs_hat[0]), np.max(rbs_hat[0]), 100)
+ y = a*x**2 + b*x + c
+ plt.subplot(2, 1, 1)
+ plt.title('hatspace')
+ plt.plot(rbs_hat[0], rbs_hat[1], ls='--', color='blue')
+ plt.plot(x, y, color='green', ls='-')
+ plt.scatter(rbs_hat[0], rbs_hat[1], color='red')
+ for i, ct in enumerate(rbs_hat[2]):
+ plt.annotate(str(ct), (rbs_hat[0][i], rbs_hat[1][i]))
+ plt.xlabel('$\\hat{r}$')
+ plt.ylabel('$\\hat{b}$')
+ """
+ optional set axes equal to shortest distance so line really does
+ looks perpendicular and everybody is happy
+ """
+ # ax = plt.gca()
+ # ax.set_aspect('equal')
+ plt.grid()
+ plt.subplot(2, 1, 2)
+ plt.title('dehatspace - indoors?')
+ plt.plot(r_fit, b_fit, color='blue')
+ plt.scatter(rb_raw[0], rb_raw[1], color='green')
+ plt.scatter(r_fit, b_fit, color='red')
+ for i, ct in enumerate(c_fit):
+ plt.annotate(str(ct), (r_fit[i], b_fit[i]))
+ plt.xlabel('$r$')
+ plt.ylabel('$b$')
+ """
+ optional set axes equal to shortest distance so line really does
+ looks perpendicular and everybody is happy
+ """
+ # ax = plt.gca()
+ # ax.set_aspect('equal')
+ plt.subplots_adjust(hspace=0.5)
+ plt.grid()
+ plt.show()
+ """
+ end of plotting code
+ """
+ return(ct_curve, np.round(transverse_pos, 5), np.round(transverse_neg, 5))
+
+
+"""
+obtain greyscale patches and perform alsc colour correction
+"""
+def get_alsc_patches(Img, colour_cals, grey=True):
+ """
+ get patch centre coordinates, image colour and the actual
+ patches for each channel, remembering to subtract blacklevel
+ If grey then only greyscale patches considered
+ """
+ patches = Img.patches
+ if grey:
+ cen_coords = Img.cen_coords[3::4]
+ col = Img.color
+ r_patchs = patches[0][3::4] - Img.blacklevel_16
+ b_patchs = patches[3][3::4] - Img.blacklevel_16
+ """
+ note two green channels are averages
+ """
+ g_patchs = (patches[1][3::4]+patches[2][3::4])/2 - Img.blacklevel_16
+ else:
+ cen_coords = Img.cen_coords
+ col = Img.color
+ r_patchs = patches[0] - Img.blacklevel_16
+ b_patchs = patches[3] - Img.blacklevel_16
+ g_patchs = (patches[1]+patches[2])/2 - Img.blacklevel_16
+
+ if colour_cals is None:
+ return r_patchs, b_patchs, g_patchs
+ """
+ find where image colour fits in alsc colour calibration tables
+ """
+ cts = list(colour_cals.keys())
+ pos = bisect_left(cts, col)
+ """
+ if img colour is below minimum or above maximum alsc calibration colour, simply
+ pick extreme closest to img colour
+ """
+ if pos % len(cts) == 0:
+ """
+ this works because -0 = 0 = first and -1 = last index
+ """
+ col_tabs = np.array(colour_cals[cts[-pos//len(cts)]])
+ """
+ else, perform linear interpolation between existing alsc colour
+ calibration tables
+ """
+ else:
+ bef = cts[pos-1]
+ aft = cts[pos]
+ da = col-bef
+ db = aft-col
+ bef_tabs = np.array(colour_cals[bef])
+ aft_tabs = np.array(colour_cals[aft])
+ col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db)
+ col_tabs = np.reshape(col_tabs, (2, 12, 16))
+ """
+ calculate dx, dy used to calculate alsc table
+ """
+ w, h = Img.w/2, Img.h/2
+ dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
+ """
+ make list of pairs of gains for each patch by selecting the correct value
+ in alsc colour calibration table
+ """
+ patch_gains = []
+ for cen in cen_coords:
+ x, y = cen[0]//dx, cen[1]//dy
+ # We could probably do with some better spatial interpolation here?
+ col_gains = (col_tabs[0][y][x], col_tabs[1][y][x])
+ patch_gains.append(col_gains)
+
+ """
+ multiply the r and b channels in each patch by the respective gain, finally
+ performing the alsc colour correction
+ """
+ for i, gains in enumerate(patch_gains):
+ r_patchs[i] = r_patchs[i] * gains[0]
+ b_patchs[i] = b_patchs[i] * gains[1]
+
+ """
+ return greyscale patches, g channel and correct r, b channels
+ """
+ return r_patchs, b_patchs, g_patchs
diff --git a/utils/tuning/libtuning/ctt_ccm.py b/utils/tuning/libtuning/ctt_ccm.py
new file mode 100644
index 00000000..2e87a667
--- /dev/null
+++ b/utils/tuning/libtuning/ctt_ccm.py
@@ -0,0 +1,408 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# camera tuning tool for CCM (colour correction matrix)
+
+import logging
+
+import numpy as np
+from scipy.optimize import minimize
+
+from . import ctt_colors as colors
+from .image import Image
+from .ctt_awb import get_alsc_patches
+from .utils import visualise_macbeth_chart
+
+logger = logging.getLogger(__name__)
+
+"""
+takes 8-bit macbeth chart values, degammas and returns 16 bit
+"""
+
+'''
+This program has many options from which to derive the color matrix from.
+The first is average. This minimises the average delta E across all patches of
+the macbeth chart. Testing across all cameras yeilded this as the most color
+accurate and vivid. Other options are avalible however.
+Maximum minimises the maximum Delta E of the patches. It iterates through till
+a minimum maximum is found (so that there is
+not one patch that deviates wildly.)
+This yields generally good results but overall the colors are less accurate
+Have a fiddle with maximum and see what you think.
+The final option allows you to select the patches for which to average across.
+This means that you can bias certain patches, for instance if you want the
+reds to be more accurate.
+'''
+
+matrix_selection_types = ["average", "maximum", "patches"]
+typenum = 0 # select from array above, 0 = average, 1 = maximum, 2 = patches
+test_patches = [1, 2, 5, 8, 9, 12, 14]
+
+'''
+Enter patches to test for. Can also be entered twice if you
+would like twice as much bias on one patch.
+'''
+
+
+def degamma(x):
+ x = x / ((2 ** 8) - 1) # takes 255 and scales it down to one
+ x = np.where(x < 0.04045, x / 12.92, ((x + 0.055) / 1.055) ** 2.4)
+ x = x * ((2 ** 16) - 1) # takes one and scales up to 65535, 16 bit color
+ return x
+
+
+def gamma(x):
+ # Take 3 long array of color values and gamma them
+ return [((colour / 255) ** (1 / 2.4) * 1.055 - 0.055) * 255 for colour in x]
+
+
+"""
+FInds colour correction matrices for list of images
+"""
+
+
+def ccm(imgs, cal_cr_list, cal_cb_list):
+ global matrix_selection_types, typenum
+ """
+ standard macbeth chart colour values
+ """
+ m_rgb = np.array([ # these are in RGB
+ [116, 81, 67], # dark skin
+ [199, 147, 129], # light skin
+ [91, 122, 156], # blue sky
+ [90, 108, 64], # foliage
+ [130, 128, 176], # blue flower
+ [92, 190, 172], # bluish green
+ [224, 124, 47], # orange
+ [68, 91, 170], # purplish blue
+ [198, 82, 97], # moderate red
+ [94, 58, 106], # purple
+ [159, 189, 63], # yellow green
+ [230, 162, 39], # orange yellow
+ [35, 63, 147], # blue
+ [67, 149, 74], # green
+ [180, 49, 57], # red
+ [238, 198, 20], # yellow
+ [193, 84, 151], # magenta
+ [0, 136, 170], # cyan (goes out of gamut)
+ [245, 245, 243], # white 9.5
+ [200, 202, 202], # neutral 8
+ [161, 163, 163], # neutral 6.5
+ [121, 121, 122], # neutral 5
+ [82, 84, 86], # neutral 3.5
+ [49, 49, 51] # black 2
+ ])
+ """
+ convert reference colours from srgb to rgb
+ """
+ m_srgb = degamma(m_rgb) # now in 16 bit color.
+
+ # Produce array of LAB values for ideal color chart
+ m_lab = [colors.RGB_to_LAB(color / 256) for color in m_srgb]
+
+ """
+ reorder reference values to match how patches are ordered
+ """
+ m_srgb = np.array([m_srgb[i::6] for i in range(6)]).reshape((24, 3))
+ m_lab = np.array([m_lab[i::6] for i in range(6)]).reshape((24, 3))
+ m_rgb = np.array([m_rgb[i::6] for i in range(6)]).reshape((24, 3))
+ """
+ reformat alsc correction tables or set colour_cals to None if alsc is
+ deactivated
+ """
+ if cal_cr_list is None:
+ colour_cals = None
+ else:
+ colour_cals = {}
+ for cr, cb in zip(cal_cr_list, cal_cb_list):
+ cr_tab = cr['table']
+ cb_tab = cb['table']
+ """
+ normalise tables so min value is 1
+ """
+ cr_tab = cr_tab / np.min(cr_tab)
+ cb_tab = cb_tab / np.min(cb_tab)
+ colour_cals[cr['ct']] = [cr_tab, cb_tab]
+
+ """
+ for each image, perform awb and alsc corrections.
+ Then calculate the colour correction matrix for that image, recording the
+ ccm and the colour tempertaure.
+ """
+ ccm_tab = {}
+ for Img in imgs:
+ logger.info('Processing image: ' + Img.name)
+ """
+ get macbeth patches with alsc applied if alsc enabled.
+ Note: if alsc is disabled then colour_cals will be set to None and no
+ the function will simply return the macbeth patches
+ """
+ r, b, g = get_alsc_patches(Img, colour_cals, grey=False)
+ # 256 values for each patch of sRGB values
+
+ """
+ do awb
+ Note: awb is done by measuring the macbeth chart in the image, rather
+ than from the awb calibration. This is done so the awb will be perfect
+ and the ccm matrices will be more accurate.
+ """
+ r_greys, b_greys, g_greys = r[3::4], b[3::4], g[3::4]
+ r_g = np.mean(r_greys / g_greys)
+ b_g = np.mean(b_greys / g_greys)
+ r = r / r_g
+ b = b / b_g
+ """
+ normalise brightness wrt reference macbeth colours and then average
+ each channel for each patch
+ """
+ gain = np.mean(m_srgb) / np.mean((r, g, b))
+ logger.info(f'Gain with respect to standard colours: {gain:.3f}')
+ r = np.mean(gain * r, axis=1)
+ b = np.mean(gain * b, axis=1)
+ g = np.mean(gain * g, axis=1)
+ """
+ calculate ccm matrix
+ """
+ # ==== All of below should in sRGB ===##
+ sumde = 0
+ ccm = do_ccm(r, g, b, m_srgb)
+ # This is the initial guess that our optimisation code works with.
+ original_ccm = ccm
+ r1 = ccm[0]
+ r2 = ccm[1]
+ g1 = ccm[3]
+ g2 = ccm[4]
+ b1 = ccm[6]
+ b2 = ccm[7]
+ '''
+ COLOR MATRIX LOOKS AS BELOW
+ R1 R2 R3 Rval Outr
+ G1 G2 G3 * Gval = G
+ B1 B2 B3 Bval B
+ Will be optimising 6 elements and working out the third element using 1-r1-r2 = r3
+ '''
+
+ x0 = [r1, r2, g1, g2, b1, b2]
+ '''
+ We use our old CCM as the initial guess for the program to find the
+ optimised matrix
+ '''
+ result = minimize(guess, x0, args=(r, g, b, m_lab), tol=0.01)
+ '''
+ This produces a color matrix which has the lowest delta E possible,
+ based off the input data. Note it is impossible for this to reach
+ zero since the input data is imperfect
+ '''
+
+ [r1, r2, g1, g2, b1, b2] = result.x
+ # The new, optimised color correction matrix values
+ # This is the optimised Color Matrix (preserving greys by summing rows up to 1)
+ optimised_ccm = [r1, r2, (1 - r1 - r2), g1, g2, (1 - g1 - g2), b1, b2, (1 - b1 - b2)]
+
+ logger.info(f'Optimized Matrix: {np.round(optimised_ccm, 4)}')
+ logger.info(f'Old Matrix: {np.round(ccm, 4)}')
+
+ formatted_ccm = np.array(original_ccm).reshape((3, 3))
+
+ '''
+ below is a whole load of code that then applies the latest color
+ matrix, and returns LAB values for color. This can then be used
+ to calculate the final delta E
+ '''
+ optimised_ccm_rgb = [] # Original Color Corrected Matrix RGB / LAB
+ optimised_ccm_lab = []
+
+ formatted_optimised_ccm = np.array(optimised_ccm).reshape((3, 3))
+ after_gamma_rgb = []
+ after_gamma_lab = []
+
+ for RGB in zip(r, g, b):
+ ccm_applied_rgb = np.dot(formatted_ccm, (np.array(RGB) / 256))
+ optimised_ccm_rgb.append(gamma(ccm_applied_rgb))
+ optimised_ccm_lab.append(colors.RGB_to_LAB(ccm_applied_rgb))
+
+ optimised_ccm_applied_rgb = np.dot(formatted_optimised_ccm, np.array(RGB) / 256)
+ after_gamma_rgb.append(gamma(optimised_ccm_applied_rgb))
+ after_gamma_lab.append(colors.RGB_to_LAB(optimised_ccm_applied_rgb))
+ '''
+ Gamma After RGB / LAB - not used in calculations, only used for visualisation
+ We now want to spit out some data that shows
+ how the optimisation has improved the color matrices
+ '''
+ logger.info("Here are the Improvements")
+
+ # CALCULATE WORST CASE delta e
+ old_worst_delta_e = 0
+ before_average = transform_and_evaluate(formatted_ccm, r, g, b, m_lab)
+ new_worst_delta_e = 0
+ after_average = transform_and_evaluate(formatted_optimised_ccm, r, g, b, m_lab)
+ for i in range(24):
+ old_delta_e = deltae(optimised_ccm_lab[i], m_lab[i]) # Current Old Delta E
+ new_delta_e = deltae(after_gamma_lab[i], m_lab[i]) # Current New Delta E
+ if old_delta_e > old_worst_delta_e:
+ old_worst_delta_e = old_delta_e
+ if new_delta_e > new_worst_delta_e:
+ new_worst_delta_e = new_delta_e
+
+ logger.info(f'delta E optimized: average: {after_average:.2f} max:{new_worst_delta_e:.2f}')
+ logger.info(f'delta E old: average: {before_average:.2f} max:{old_worst_delta_e:.2f}')
+
+ visualise_macbeth_chart(m_rgb, optimised_ccm_rgb, after_gamma_rgb, str(Img.color) + str(matrix_selection_types[typenum]))
+ '''
+ The program will also save some visualisations of improvements.
+ Very pretty to look at. Top rectangle is ideal, Left square is
+ before optimisation, right square is after.
+ '''
+
+ """
+ if a ccm has already been calculated for that temperature then don't
+ overwrite but save both. They will then be averaged later on
+ """ # Now going to use optimised color matrix, optimised_ccm
+ if Img.color in ccm_tab.keys():
+ ccm_tab[Img.color].append(optimised_ccm)
+ else:
+ ccm_tab[Img.color] = [optimised_ccm]
+
+ logger.info('Finished processing images')
+ """
+ average any ccms that share a colour temperature
+ """
+ for k, v in ccm_tab.items():
+ tab = np.mean(v, axis=0)
+ tab = np.where((10000 * tab) % 1 <= 0.05, tab + 0.00001, tab)
+ tab = np.where((10000 * tab) % 1 >= 0.95, tab - 0.00001, tab)
+ ccm_tab[k] = list(np.round(tab, 5))
+ logger.info(f'Matrix calculated for colour temperature of {k} K')
+
+ """
+ return all ccms with respective colour temperature in the correct format,
+ sorted by their colour temperature
+ """
+ sorted_ccms = sorted(ccm_tab.items(), key=lambda kv: kv[0])
+ ccms = []
+ for i in sorted_ccms:
+ ccms.append({
+ 'ct': i[0],
+ 'ccm': i[1]
+ })
+ return ccms
+
+
+def guess(x0, r, g, b, m_lab): # provides a method of numerical feedback for the optimisation code
+ [r1, r2, g1, g2, b1, b2] = x0
+ ccm = np.array([r1, r2, (1 - r1 - r2),
+ g1, g2, (1 - g1 - g2),
+ b1, b2, (1 - b1 - b2)]).reshape((3, 3)) # format the matrix correctly
+ return transform_and_evaluate(ccm, r, g, b, m_lab)
+
+
+def transform_and_evaluate(ccm, r, g, b, m_lab): # Transforms colors to LAB and applies the correction matrix
+ # create list of matrix changed colors
+ realrgb = []
+ for RGB in zip(r, g, b):
+ rgb_post_ccm = np.dot(ccm, np.array(RGB) / 256) # This is RGB values after the color correction matrix has been applied
+ realrgb.append(colors.RGB_to_LAB(rgb_post_ccm))
+ # now compare that with m_lab and return numeric result, averaged for each patch
+ return (sumde(realrgb, m_lab) / 24) # returns an average result of delta E
+
+
+def sumde(listA, listB):
+ global typenum, test_patches
+ sumde = 0
+ maxde = 0
+ patchde = [] # Create array of the delta E values for each patch. useful for optimisation of certain patches
+ for listA_item, listB_item in zip(listA, listB):
+ if maxde < (deltae(listA_item, listB_item)):
+ maxde = deltae(listA_item, listB_item)
+ patchde.append(deltae(listA_item, listB_item))
+ sumde += deltae(listA_item, listB_item)
+ '''
+ The different options specified at the start allow for
+ the maximum to be returned, average or specific patches
+ '''
+ if typenum == 0:
+ return sumde
+ if typenum == 1:
+ return maxde
+ if typenum == 2:
+ output = sum([patchde[test_patch] for test_patch in test_patches])
+ # Selects only certain patches and returns the output for them
+ return output
+
+
+"""
+calculates the ccm for an individual image.
+ccms are calculated in rgb space, and are fit by hand. Although it is a 3x3
+matrix, each row must add up to 1 in order to conserve greyness, simplifying
+calculation.
+The initial CCM is calculated in RGB, and then optimised in LAB color space
+This simplifies the initial calculation but then gets us the accuracy of
+using LAB color space.
+"""
+
+
+def do_ccm(r, g, b, m_srgb):
+ rb = r-b
+ gb = g-b
+ rb_2s = (rb * rb)
+ rb_gbs = (rb * gb)
+ gb_2s = (gb * gb)
+
+ r_rbs = rb * (m_srgb[..., 0] - b)
+ r_gbs = gb * (m_srgb[..., 0] - b)
+ g_rbs = rb * (m_srgb[..., 1] - b)
+ g_gbs = gb * (m_srgb[..., 1] - b)
+ b_rbs = rb * (m_srgb[..., 2] - b)
+ b_gbs = gb * (m_srgb[..., 2] - b)
+
+ """
+ Obtain least squares fit
+ """
+ rb_2 = np.sum(rb_2s)
+ gb_2 = np.sum(gb_2s)
+ rb_gb = np.sum(rb_gbs)
+ r_rb = np.sum(r_rbs)
+ r_gb = np.sum(r_gbs)
+ g_rb = np.sum(g_rbs)
+ g_gb = np.sum(g_gbs)
+ b_rb = np.sum(b_rbs)
+ b_gb = np.sum(b_gbs)
+
+ det = rb_2 * gb_2 - rb_gb * rb_gb
+
+ """
+ Raise error if matrix is singular...
+ This shouldn't really happen with real data but if it does just take new
+ pictures and try again, not much else to be done unfortunately...
+ """
+ if det < 0.001:
+ raise ArithmeticError
+
+ r_a = (gb_2 * r_rb - rb_gb * r_gb) / det
+ r_b = (rb_2 * r_gb - rb_gb * r_rb) / det
+ """
+ Last row can be calculated by knowing the sum must be 1
+ """
+ r_c = 1 - r_a - r_b
+
+ g_a = (gb_2 * g_rb - rb_gb * g_gb) / det
+ g_b = (rb_2 * g_gb - rb_gb * g_rb) / det
+ g_c = 1 - g_a - g_b
+
+ b_a = (gb_2 * b_rb - rb_gb * b_gb) / det
+ b_b = (rb_2 * b_gb - rb_gb * b_rb) / det
+ b_c = 1 - b_a - b_b
+
+ """
+ format ccm
+ """
+ ccm = [r_a, r_b, r_c, g_a, g_b, g_c, b_a, b_b, b_c]
+
+ return ccm
+
+
+def deltae(colorA, colorB):
+ return ((colorA[0] - colorB[0]) ** 2 + (colorA[1] - colorB[1]) ** 2 + (colorA[2] - colorB[2]) ** 2) ** 0.5
+ # return ((colorA[1]-colorB[1]) * * 2 + (colorA[2]-colorB[2]) * * 2) * * 0.5
+ # UNCOMMENT IF YOU WANT TO NEGLECT LUMINANCE FROM CALCULATION OF DELTA E
diff --git a/utils/tuning/libtuning/ctt_colors.py b/utils/tuning/libtuning/ctt_colors.py
new file mode 100644
index 00000000..cb4d236b
--- /dev/null
+++ b/utils/tuning/libtuning/ctt_colors.py
@@ -0,0 +1,30 @@
+# Program to convert from RGB to LAB color space
+def RGB_to_LAB(RGB): # where RGB is a 1x3 array. e.g RGB = [100, 255, 230]
+ num = 0
+ XYZ = [0, 0, 0]
+ # converted all the three R, G, B to X, Y, Z
+ X = RGB[0] * 0.4124 + RGB[1] * 0.3576 + RGB[2] * 0.1805
+ Y = RGB[0] * 0.2126 + RGB[1] * 0.7152 + RGB[2] * 0.0722
+ Z = RGB[0] * 0.0193 + RGB[1] * 0.1192 + RGB[2] * 0.9505
+
+ XYZ[0] = X / 255 * 100
+ XYZ[1] = Y / 255 * 100 # XYZ Must be in range 0 -> 100, so scale down from 255
+ XYZ[2] = Z / 255 * 100
+ XYZ[0] = XYZ[0] / 95.047 # ref_X = 95.047 Observer= 2°, Illuminant= D65
+ XYZ[1] = XYZ[1] / 100.0 # ref_Y = 100.000
+ XYZ[2] = XYZ[2] / 108.883 # ref_Z = 108.883
+ num = 0
+ for value in XYZ:
+ if value > 0.008856:
+ value = value ** (0.3333333333333333)
+ else:
+ value = (7.787 * value) + (16 / 116)
+ XYZ[num] = value
+ num = num + 1
+
+ # L, A, B, values calculated below
+ L = (116 * XYZ[1]) - 16
+ a = 500 * (XYZ[0] - XYZ[1])
+ b = 200 * (XYZ[1] - XYZ[2])
+
+ return [L, a, b]
diff --git a/utils/tuning/libtuning/ctt_ransac.py b/utils/tuning/libtuning/ctt_ransac.py
new file mode 100644
index 00000000..01bba302
--- /dev/null
+++ b/utils/tuning/libtuning/ctt_ransac.py
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# camera tuning tool RANSAC selector for Macbeth chart locator
+
+import numpy as np
+
+scale = 2
+
+
+"""
+constructs normalised macbeth chart corners for ransac algorithm
+"""
+def get_square_verts(c_err=0.05, scale=scale):
+ """
+ define macbeth chart corners
+ """
+ b_bord_x, b_bord_y = scale*8.5, scale*13
+ s_bord = 6*scale
+ side = 41*scale
+ x_max = side*6 + 5*s_bord + 2*b_bord_x
+ y_max = side*4 + 3*s_bord + 2*b_bord_y
+ c1 = (0, 0)
+ c2 = (0, y_max)
+ c3 = (x_max, y_max)
+ c4 = (x_max, 0)
+ mac_norm = np.array((c1, c2, c3, c4), np.float32)
+ mac_norm = np.array([mac_norm])
+
+ square_verts = []
+ square_0 = np.array(((0, 0), (0, side),
+ (side, side), (side, 0)), np.float32)
+ offset_0 = np.array((b_bord_x, b_bord_y), np.float32)
+ c_off = side * c_err
+ offset_cont = np.array(((c_off, c_off), (c_off, -c_off),
+ (-c_off, -c_off), (-c_off, c_off)), np.float32)
+ square_0 += offset_0
+ square_0 += offset_cont
+ """
+ define macbeth square corners
+ """
+ for i in range(6):
+ shift_i = np.array(((i*side, 0), (i*side, 0),
+ (i*side, 0), (i*side, 0)), np.float32)
+ shift_bord = np.array(((i*s_bord, 0), (i*s_bord, 0),
+ (i*s_bord, 0), (i*s_bord, 0)), np.float32)
+ square_i = square_0 + shift_i + shift_bord
+ for j in range(4):
+ shift_j = np.array(((0, j*side), (0, j*side),
+ (0, j*side), (0, j*side)), np.float32)
+ shift_bord = np.array(((0, j*s_bord),
+ (0, j*s_bord), (0, j*s_bord),
+ (0, j*s_bord)), np.float32)
+ square_j = square_i + shift_j + shift_bord
+ square_verts.append(square_j)
+ # print('square_verts')
+ # print(square_verts)
+ return np.array(square_verts, np.float32), mac_norm
+
+
+def get_square_centres(c_err=0.05, scale=scale):
+ """
+ define macbeth square centres
+ """
+ verts, mac_norm = get_square_verts(c_err, scale=scale)
+
+ centres = np.mean(verts, axis=1)
+ # print('centres')
+ # print(centres)
+ return np.array(centres, np.float32)
diff --git a/utils/tuning/libtuning/generators/generator.py b/utils/tuning/libtuning/generators/generator.py
index 7c8c9b99..77a8ba4a 100644
--- a/utils/tuning/libtuning/generators/generator.py
+++ b/utils/tuning/libtuning/generators/generator.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# generator.py - Base class for a generator to convert dict to tuning file
+# Base class for a generator to convert dict to tuning file
from pathlib import Path
diff --git a/utils/tuning/libtuning/generators/raspberrypi_output.py b/utils/tuning/libtuning/generators/raspberrypi_output.py
index 813491cd..47b49059 100644
--- a/utils/tuning/libtuning/generators/raspberrypi_output.py
+++ b/utils/tuning/libtuning/generators/raspberrypi_output.py
@@ -2,7 +2,7 @@
#
# Copyright 2022 Raspberry Pi Ltd
#
-# raspberrypi_output.py - Generate tuning file in Raspberry Pi's json format
+# Generate tuning file in Raspberry Pi's json format
#
# (Copied from ctt_pretty_print_json.py)
diff --git a/utils/tuning/libtuning/generators/yaml_output.py b/utils/tuning/libtuning/generators/yaml_output.py
index effb4fb3..c490081d 100644
--- a/utils/tuning/libtuning/generators/yaml_output.py
+++ b/utils/tuning/libtuning/generators/yaml_output.py
@@ -2,15 +2,16 @@
#
# Copyright 2022 Paul Elder <paul.elder@ideasonboard.com>
#
-# yaml_output.py - Generate tuning file in YAML format
+# Generate tuning file in YAML format
from .generator import Generator
from numbers import Number
from pathlib import Path
-import libtuning.utils as utils
+import logging
+logger = logging.getLogger(__name__)
class YamlOutput(Generator):
def __init__(self):
@@ -106,13 +107,16 @@ class YamlOutput(Generator):
]
for module in output_order:
+ if module not in output_dict:
+ continue
+
out_lines.append(f' - {module.out_name}:')
if len(output_dict[module]) == 0:
continue
if not isinstance(output_dict[module], dict):
- utils.eprint(f'Error: Output of {module.type} is not a dictionary')
+ logger.error(f'Error: Output of {module.type} is not a dictionary')
continue
lines = self._stringify_dict(output_dict[module])
diff --git a/utils/tuning/libtuning/gradient.py b/utils/tuning/libtuning/gradient.py
index 5106f821..b643f502 100644
--- a/utils/tuning/libtuning/gradient.py
+++ b/utils/tuning/libtuning/gradient.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# gradient.py - Gradients that can be used to distribute or map numbers
+# Gradients that can be used to distribute or map numbers
import libtuning as lt
diff --git a/utils/tuning/libtuning/image.py b/utils/tuning/libtuning/image.py
index aa9d20b5..ecd334bd 100644
--- a/utils/tuning/libtuning/image.py
+++ b/utils/tuning/libtuning/image.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
-# image.py - Container for an image and associated metadata
+# Container for an image and associated metadata
import binascii
import numpy as np
@@ -13,6 +13,9 @@ import re
import libtuning as lt
import libtuning.utils as utils
+import logging
+
+logger = logging.getLogger(__name__)
class Image:
@@ -21,17 +24,18 @@ class Image:
self.lsc_only = False
self.color = -1
self.lux = -1
+ self.macbeth = None
try:
self._load_metadata_exif()
except Exception as e:
- utils.eprint(f'Failed to load metadata from {self.path}: {e}')
+ logger.error(f'Failed to load metadata from {self.path}: {e}')
raise e
try:
self._read_image_dng()
except Exception as e:
- utils.eprint(f'Failed to load image data from {self.path}: {e}')
+ logger.error(f'Failed to load image data from {self.path}: {e}')
raise e
@property
@@ -79,7 +83,7 @@ class Image:
# is R, then G, then G, then B.
bayer_case = {
'0 1 1 2': (lt.Color.R, lt.Color.GR, lt.Color.GB, lt.Color.B),
- '1 2 0 1': (lt.Color.GB, lt.Color.R, lt.Color.B, lt.Color.GR),
+ '1 2 0 1': (lt.Color.GB, lt.Color.B, lt.Color.R, lt.Color.GR),
'2 1 1 0': (lt.Color.B, lt.Color.GB, lt.Color.GR, lt.Color.R),
'1 0 2 1': (lt.Color.GR, lt.Color.R, lt.Color.B, lt.Color.GB)
}
@@ -131,6 +135,6 @@ class Image:
all_patches.append(ch_patches)
- self.patches = all_patches
+ self.patches = np.array(all_patches)
return not saturated
diff --git a/utils/tuning/libtuning/libtuning.py b/utils/tuning/libtuning/libtuning.py
index d84c148f..bac57323 100644
--- a/utils/tuning/libtuning/libtuning.py
+++ b/utils/tuning/libtuning/libtuning.py
@@ -2,16 +2,17 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# libtuning.py - An infrastructure for camera tuning tools
+# An infrastructure for camera tuning tools
import argparse
+import logging
import libtuning as lt
import libtuning.utils as utils
-from libtuning.utils import eprint
from enum import Enum, IntEnum
+logger = logging.getLogger(__name__)
class Color(IntEnum):
R = 0
@@ -94,7 +95,10 @@ class Tuner(object):
self.output = {}
def add(self, module):
- self.modules.append(module)
+ if isinstance(module, list):
+ self.modules.extend(module)
+ else:
+ self.modules.append(module)
def set_input_parser(self, parser):
self.parser = parser
@@ -112,10 +116,10 @@ class Tuner(object):
for module_type in output_order:
modules = [module for module in self.modules if module.type == module_type.type]
if len(modules) > 1:
- eprint(f'Multiple modules found for module type "{module_type.type}"')
+ logger.error(f'Multiple modules found for module type "{module_type.type}"')
return False
if len(modules) < 1:
- eprint(f'No module found for module type "{module_type.type}"')
+ logger.error(f'No module found for module type "{module_type.type}"')
return False
self.output_order.append(modules[0])
@@ -124,19 +128,19 @@ class Tuner(object):
# \todo Validate parser and generator at Tuner construction time?
def _validate_settings(self):
if self.parser is None:
- eprint('Missing parser')
+ logger.error('Missing parser')
return False
if self.generator is None:
- eprint('Missing generator')
+ logger.error('Missing generator')
return False
if len(self.modules) == 0:
- eprint('No modules added')
+ logger.error('No modules added')
return False
if len(self.output_order) != len(self.modules):
- eprint('Number of outputs does not match number of modules')
+ logger.error('Number of outputs does not match number of modules')
return False
return True
@@ -183,7 +187,7 @@ class Tuner(object):
for module in self.modules:
if not module.validate_config(self.config):
- eprint(f'Config is invalid for module {module.type}')
+ logger.error(f'Config is invalid for module {module.type}')
return -1
has_lsc = any(isinstance(m, lt.modules.lsc.LSC) for m in self.modules)
@@ -192,15 +196,15 @@ class Tuner(object):
images = utils.load_images(args.input, self.config, not has_only_lsc, has_lsc)
if images is None or len(images) == 0:
- eprint(f'No images were found, or able to load')
+ logger.error(f'No images were found, or able to load')
return -1
# Do the tuning
for module in self.modules:
out = module.process(self.config, images, self.output)
if out is None:
- eprint(f'Module {module.name} failed to process, aborting')
- break
+ logger.warning(f'Module {module.hr_name} failed to process...')
+ continue
self.output[module] = out
self.generator.write(args.output, self.output, self.output_order)
diff --git a/utils/tuning/libtuning/macbeth.py b/utils/tuning/libtuning/macbeth.py
index 5faddf66..4a2006b0 100644
--- a/utils/tuning/libtuning/macbeth.py
+++ b/utils/tuning/libtuning/macbeth.py
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
+# Copyright (C) 2024, Ideas on Board Oy
#
-# macbeth.py - Locate and extract Macbeth charts from images
+# Locate and extract Macbeth charts from images
# (Copied from: ctt_macbeth_locator.py)
# \todo Add debugging
@@ -11,8 +12,18 @@ import cv2
import os
from pathlib import Path
import numpy as np
+import warnings
+import logging
+from sklearn import cluster as cluster
-from libtuning.image import Image
+from .ctt_ransac import get_square_verts, get_square_centres
+from .image import Image
+
+logger = logging.getLogger(__name__)
+
+
+class MacbethError(Exception):
+ pass
# Reshape image to fixed width without distorting returns image and scale
@@ -369,7 +380,9 @@ def get_macbeth_chart(img, ref_data):
# Catch macbeth errors and continue with code
except MacbethError as error:
- eprint(error)
+ # \todo: This happens so many times in a normal run, that it shadows
+ # all the relevant output
+ # logger.warning(error)
return (0, None, None, False)
@@ -403,10 +416,15 @@ def find_macbeth(img, mac_config):
# nothing more is tried as this is a high enough confidence to ensure
# reliable macbeth square centre placement.
+ # Keep a list that will include this and any brightened up versions of
+ # the image for reuse.
+ all_images = [img]
+
for brightness in [2, 4]:
if cor >= 0.75:
break
img_br = cv2.convertScaleAbs(img, alpha=brightness, beta=0)
+ all_images.append(img_br)
cor_b, mac_b, coords_b, ret_b = get_macbeth_chart(img_br, ref_data)
if cor_b > cor:
cor, mac, coords, ret = cor_b, mac_b, coords_b, ret_b
@@ -456,23 +474,24 @@ def find_macbeth(img, mac_config):
w_inc = int(w * pair['inc'])
h_inc = int(h * pair['inc'])
- loop = ((1 - pair['sel']) / pair['inc']) + 1
+ loop = int(((1 - pair['sel']) / pair['inc']) + 1)
# For each subselection, look for a macbeth chart
- for i in range(loop):
- for j in range(loop):
- w_s, h_s = i * w_inc, j * h_inc
- img_sel = img[w_s:w_s + w_sel, h_s:h_s + h_sel]
- cor_ij, mac_ij, coords_ij, ret_ij = get_macbeth_chart(img_sel, ref_data)
-
- # If the correlation is better than the best then record the
- # scale and current subselection at which macbeth chart was
- # found. Also record the coordinates, macbeth chart and message.
- if cor_ij > cor:
- cor = cor_ij
- mac, coords, ret = mac_ij, coords_ij, ret_ij
- ii, jj = i, j
- w_best, h_best = w_inc, h_inc
- d_best = index + 1
+ for img_br in all_images:
+ for i in range(loop):
+ for j in range(loop):
+ w_s, h_s = i * w_inc, j * h_inc
+ img_sel = img_br[w_s:w_s + w_sel, h_s:h_s + h_sel]
+ cor_ij, mac_ij, coords_ij, ret_ij = get_macbeth_chart(img_sel, ref_data)
+
+ # If the correlation is better than the best then record the
+ # scale and current subselection at which macbeth chart was
+ # found. Also record the coordinates, macbeth chart and message.
+ if cor_ij > cor:
+ cor = cor_ij
+ mac, coords, ret = mac_ij, coords_ij, ret_ij
+ ii, jj = i, j
+ w_best, h_best = w_inc, h_inc
+ d_best = index + 1
# Transform coordinates from subselection to original image
if ii != -1:
@@ -486,7 +505,7 @@ def find_macbeth(img, mac_config):
coords_fit = coords
if cor < 0.75:
- eprint(f'Warning: Low confidence {cor:.3f} for macbeth chart in {img.path.name}')
+ logger.warning(f'Low confidence {cor:.3f} for macbeth chart')
if show:
draw_macbeth_results(img, coords_fit)
@@ -499,18 +518,20 @@ def locate_macbeth(image: Image, config: dict):
av_chan = (np.mean(np.array(image.channels), axis=0) / (2**16))
av_val = np.mean(av_chan)
if av_val < image.blacklevel_16 / (2**16) + 1 / 64:
- eprint(f'Image {image.path.name} too dark')
+ logger.warning(f'Image {image.path.name} too dark')
return None
macbeth = find_macbeth(av_chan, config['general']['macbeth'])
if macbeth is None:
- eprint(f'No macbeth chart found in {image.path.name}')
+ logger.warning(f'No macbeth chart found in {image.path.name}')
return None
mac_cen_coords = macbeth[1]
if not image.get_patches(mac_cen_coords):
- eprint(f'Macbeth patches have saturated in {image.path.name}')
+ logger.warning(f'Macbeth patches have saturated in {image.path.name}')
return None
+ image.macbeth = macbeth
+
return macbeth
diff --git a/utils/tuning/libtuning/macbeth_ref.pgm b/utils/tuning/libtuning/macbeth_ref.pgm
index 37897140..089ea91f 100644
--- a/utils/tuning/libtuning/macbeth_ref.pgm
+++ b/utils/tuning/libtuning/macbeth_ref.pgm
@@ -1,5 +1,5 @@
-# SPDX-License-Identifier: BSD-2-Clause
P5
+# SPDX-License-Identifier: BSD-2-Clause
# Reference macbeth chart
120 80
255
diff --git a/utils/tuning/libtuning/modules/agc/__init__.py b/utils/tuning/libtuning/modules/agc/__init__.py
new file mode 100644
index 00000000..4db9ca37
--- /dev/null
+++ b/utils/tuning/libtuning/modules/agc/__init__.py
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+
+from libtuning.modules.agc.agc import AGC
+from libtuning.modules.agc.rkisp1 import AGCRkISP1
diff --git a/utils/tuning/libtuning/modules/agc/agc.py b/utils/tuning/libtuning/modules/agc/agc.py
new file mode 100644
index 00000000..9c8899ba
--- /dev/null
+++ b/utils/tuning/libtuning/modules/agc/agc.py
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+
+from ..module import Module
+
+import libtuning as lt
+
+
+class AGC(Module):
+ type = 'agc'
+ hr_name = 'AGC (Base)'
+ out_name = 'GenericAGC'
+
+ # \todo Add sector shapes and stuff just like lsc
+ def __init__(self, *,
+ debug: list):
+ super().__init__()
+
+ self.debug = debug
diff --git a/utils/tuning/libtuning/modules/agc/rkisp1.py b/utils/tuning/libtuning/modules/agc/rkisp1.py
new file mode 100644
index 00000000..2dad3a09
--- /dev/null
+++ b/utils/tuning/libtuning/modules/agc/rkisp1.py
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+#
+# rkisp1.py - AGC module for tuning rkisp1
+
+from .agc import AGC
+
+import libtuning as lt
+
+
+class AGCRkISP1(AGC):
+ hr_name = 'AGC (RkISP1)'
+ out_name = 'Agc'
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ # We don't actually need anything from the config file
+ def validate_config(self, config: dict) -> bool:
+ return True
+
+ def _generate_metering_modes(self) -> dict:
+ centre_weighted = [
+ 0, 0, 0, 0, 0,
+ 0, 6, 8, 6, 0,
+ 0, 8, 16, 8, 0,
+ 0, 6, 8, 6, 0,
+ 0, 0, 0, 0, 0
+ ]
+
+ spot = [
+ 0, 0, 0, 0, 0,
+ 0, 2, 4, 2, 0,
+ 0, 4, 16, 4, 0,
+ 0, 2, 4, 2, 0,
+ 0, 0, 0, 0, 0
+ ]
+
+ matrix = [1 for i in range(0, 25)]
+
+ return {
+ 'MeteringCentreWeighted': centre_weighted,
+ 'MeteringSpot': spot,
+ 'MeteringMatrix': matrix
+ }
+
+ def _generate_exposure_modes(self) -> dict:
+ normal = {'exposureTime': [100, 10000, 30000, 60000, 120000],
+ 'gain': [2.0, 4.0, 6.0, 6.0, 6.0]}
+ short = {'exposureTime': [100, 5000, 10000, 20000, 120000],
+ 'gain': [2.0, 4.0, 6.0, 6.0, 6.0]}
+
+ return {'ExposureNormal': normal, 'ExposureShort': short}
+
+ def _generate_constraint_modes(self) -> dict:
+ normal = {'lower': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.5}}
+ highlight = {
+ 'lower': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.5},
+ 'upper': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.8}
+ }
+
+ return {'ConstraintNormal': normal, 'ConstraintHighlight': highlight}
+
+ def _generate_y_target(self) -> list:
+ return 0.5
+
+ def process(self, config: dict, images: list, outputs: dict) -> dict:
+ output = {}
+
+ output['AeMeteringMode'] = self._generate_metering_modes()
+ output['AeExposureMode'] = self._generate_exposure_modes()
+ output['AeConstraintMode'] = self._generate_constraint_modes()
+ output['relativeLuminanceTarget'] = self._generate_y_target()
+
+ # \todo Debug functionality
+
+ return output
diff --git a/utils/tuning/libtuning/modules/awb/__init__.py b/utils/tuning/libtuning/modules/awb/__init__.py
new file mode 100644
index 00000000..2d67f10c
--- /dev/null
+++ b/utils/tuning/libtuning/modules/awb/__init__.py
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Ideas On Board
+
+from libtuning.modules.awb.awb import AWB
+from libtuning.modules.awb.rkisp1 import AWBRkISP1
diff --git a/utils/tuning/libtuning/modules/awb/awb.py b/utils/tuning/libtuning/modules/awb/awb.py
new file mode 100644
index 00000000..0dc4f59d
--- /dev/null
+++ b/utils/tuning/libtuning/modules/awb/awb.py
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Ideas On Board
+
+import logging
+
+from ..module import Module
+
+from libtuning.ctt_awb import awb
+import numpy as np
+
+logger = logging.getLogger(__name__)
+
+
+class AWB(Module):
+ type = 'awb'
+ hr_name = 'AWB (Base)'
+ out_name = 'GenericAWB'
+
+ def __init__(self, *, debug: list):
+ super().__init__()
+
+ self.debug = debug
+
+ def do_calculation(self, images):
+ logger.info('Starting AWB calculation')
+
+ imgs = [img for img in images if img.macbeth is not None]
+
+ ct_curve, transverse_pos, transverse_neg = awb(imgs, None, None, False)
+ ct_curve = np.reshape(ct_curve, (-1, 3))
+ gains = [{
+ 'ct': int(v[0]),
+ 'gains': [float(1.0 / v[1]), float(1.0 / v[2])]
+ } for v in ct_curve]
+
+ return {'colourGains': gains,
+ 'transversePos': transverse_pos,
+ 'transverseNeg': transverse_neg}
+
diff --git a/utils/tuning/libtuning/modules/awb/rkisp1.py b/utils/tuning/libtuning/modules/awb/rkisp1.py
new file mode 100644
index 00000000..d562d26e
--- /dev/null
+++ b/utils/tuning/libtuning/modules/awb/rkisp1.py
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Ideas On Board
+#
+# AWB module for tuning rkisp1
+
+from .awb import AWB
+
+class AWBRkISP1(AWB):
+ hr_name = 'AWB (RkISP1)'
+ out_name = 'Awb'
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ def validate_config(self, config: dict) -> bool:
+ return True
+
+ def process(self, config: dict, images: list, outputs: dict) -> dict:
+ if not 'awb' in config['general']:
+ raise ValueError('AWB configuration missing')
+ awb_config = config['general']['awb']
+ algorithm = awb_config['algorithm']
+
+ output = {'algorithm': algorithm}
+ data = self.do_calculation(images)
+ if algorithm == 'grey':
+ output['colourGains'] = data['colourGains']
+ elif algorithm == 'bayes':
+ output['AwbMode'] = awb_config['AwbMode']
+ output['priors'] = awb_config['priors']
+ output.update(data)
+ else:
+ raise ValueError(f"Unknown AWB algorithm {output['algorithm']}")
+
+ return output
diff --git a/utils/tuning/libtuning/modules/ccm/__init__.py b/utils/tuning/libtuning/modules/ccm/__init__.py
new file mode 100644
index 00000000..322602af
--- /dev/null
+++ b/utils/tuning/libtuning/modules/ccm/__init__.py
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+
+from libtuning.modules.ccm.ccm import CCM
+from libtuning.modules.ccm.rkisp1 import CCMRkISP1
diff --git a/utils/tuning/libtuning/modules/ccm/ccm.py b/utils/tuning/libtuning/modules/ccm/ccm.py
new file mode 100644
index 00000000..18702f8d
--- /dev/null
+++ b/utils/tuning/libtuning/modules/ccm/ccm.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+# Copyright (C) 2024, Ideas on Board
+#
+# Base Ccm tuning module
+
+from ..module import Module
+
+from libtuning.ctt_ccm import ccm
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class CCM(Module):
+ type = 'ccm'
+ hr_name = 'CCM (Base)'
+ out_name = 'GenericCCM'
+
+ def __init__(self, debug: list):
+ super().__init__()
+
+ self.debug = debug
+
+ def do_calibration(self, images):
+ logger.info('Starting CCM calibration')
+
+ imgs = [img for img in images if img.macbeth is not None]
+
+ # todo: Take LSC calibration results into account.
+ cal_cr_list = None
+ cal_cb_list = None
+
+ try:
+ ccms = ccm(imgs, cal_cr_list, cal_cb_list)
+ except ArithmeticError:
+ logger.error('CCM calibration failed')
+ return None
+
+ return ccms
diff --git a/utils/tuning/libtuning/modules/ccm/rkisp1.py b/utils/tuning/libtuning/modules/ccm/rkisp1.py
new file mode 100644
index 00000000..be0252d9
--- /dev/null
+++ b/utils/tuning/libtuning/modules/ccm/rkisp1.py
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+# Copyright (C) 2024, Ideas on Board
+#
+# Ccm module for tuning rkisp1
+
+from .ccm import CCM
+
+
+class CCMRkISP1(CCM):
+ hr_name = 'Crosstalk Correction (RkISP1)'
+ out_name = 'Ccm'
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ # We don't need anything from the config file.
+ def validate_config(self, config: dict) -> bool:
+ return True
+
+ def process(self, config: dict, images: list, outputs: dict) -> dict:
+ output = {}
+
+ ccms = self.do_calibration(images)
+ output['ccms'] = ccms
+
+ return output
diff --git a/utils/tuning/libtuning/modules/lsc/lsc.py b/utils/tuning/libtuning/modules/lsc/lsc.py
index 344a07a3..e0ca22eb 100644
--- a/utils/tuning/libtuning/modules/lsc/lsc.py
+++ b/utils/tuning/libtuning/modules/lsc/lsc.py
@@ -59,7 +59,10 @@ class LSC(Module):
def _lsc_single_channel(self, channel: np.array,
image: lt.Image, green_grid: np.array = None):
grid = self._get_grid(channel, image.w, image.h)
- grid -= image.blacklevel_16
+ # Clamp the values to a small positive, so that the following 1/grid
+ # doesn't produce negative results.
+ grid = np.maximum(grid - image.blacklevel_16, 0.1)
+
if green_grid is None:
table = np.reshape(1 / grid, self.sector_shape[::-1])
else:
diff --git a/utils/tuning/libtuning/modules/lsc/raspberrypi.py b/utils/tuning/libtuning/modules/lsc/raspberrypi.py
index 58f5000d..99bc4fe6 100644
--- a/utils/tuning/libtuning/modules/lsc/raspberrypi.py
+++ b/utils/tuning/libtuning/modules/lsc/raspberrypi.py
@@ -3,7 +3,7 @@
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# raspberrypi.py - ALSC module for tuning Raspberry Pi
+# ALSC module for tuning Raspberry Pi
from .lsc import LSC
@@ -12,7 +12,9 @@ import libtuning.utils as utils
from numbers import Number
import numpy as np
+import logging
+logger = logging.getLogger(__name__)
class ALSCRaspberryPi(LSC):
# Override the type name so that the parser can match the entry in the
@@ -35,7 +37,7 @@ class ALSCRaspberryPi(LSC):
def validate_config(self, config: dict) -> bool:
if self not in config:
- utils.eprint(f'{self.type} not in config')
+ logger.error(f'{self.type} not in config')
return False
valid = True
@@ -46,14 +48,14 @@ class ALSCRaspberryPi(LSC):
color_key = self.do_color.name
if lum_key not in conf and self.luminance_strength.required:
- utils.eprint(f'{lum_key} is not in config')
+ logger.error(f'{lum_key} is not in config')
valid = False
if lum_key in conf and (conf[lum_key] < 0 or conf[lum_key] > 1):
- utils.eprint(f'Warning: {lum_key} is not in range [0, 1]; defaulting to 0.5')
+ logger.warning(f'{lum_key} is not in range [0, 1]; defaulting to 0.5')
if color_key not in conf and self.do_color.required:
- utils.eprint(f'{color_key} is not in config')
+ logger.error(f'{color_key} is not in config')
valid = False
return valid
@@ -235,7 +237,7 @@ class ALSCRaspberryPi(LSC):
if count == 1:
output['sigma'] = 0.005
output['sigma_Cb'] = 0.005
- utils.eprint('Warning: Only one alsc calibration found; standard sigmas used for adaptive algorithm.')
+ logger.warning('Only one alsc calibration found; standard sigmas used for adaptive algorithm.')
return output
# Obtain worst-case scenario residual sigmas
diff --git a/utils/tuning/libtuning/modules/lsc/rkisp1.py b/utils/tuning/libtuning/modules/lsc/rkisp1.py
index 5701ae0a..c02b2306 100644
--- a/utils/tuning/libtuning/modules/lsc/rkisp1.py
+++ b/utils/tuning/libtuning/modules/lsc/rkisp1.py
@@ -3,7 +3,7 @@
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# rkisp1.py - LSC module for tuning rkisp1
+# LSC module for tuning rkisp1
from .lsc import LSC
@@ -33,13 +33,13 @@ class LSCRkISP1(LSC):
# table, flattened array of (blue's) green calibration table
def _do_single_lsc(self, image: lt.Image):
- cgr, gr = self._lsc_single_channel(image.channels[lt.Color.GR], image)
- cgb, gb = self._lsc_single_channel(image.channels[lt.Color.GB], image)
-
- # \todo Should these ratio against the average of both greens or just
- # each green like we've done here?
- cr, _ = self._lsc_single_channel(image.channels[lt.Color.R], image, gr)
- cb, _ = self._lsc_single_channel(image.channels[lt.Color.B], image, gb)
+ # Perform LSC on each colour channel independently. A future enhancement
+ # worth investigating would be splitting the luminance and chrominance
+ # LSC as done by Raspberry Pi.
+ cgr, _ = self._lsc_single_channel(image.channels[lt.Color.GR], image)
+ cgb, _ = self._lsc_single_channel(image.channels[lt.Color.GB], image)
+ cr, _ = self._lsc_single_channel(image.channels[lt.Color.R], image)
+ cb, _ = self._lsc_single_channel(image.channels[lt.Color.B], image)
return image.color, cr.flatten(), cb.flatten(), cgr.flatten(), cgb.flatten()
@@ -80,7 +80,8 @@ class LSCRkISP1(LSC):
tables = []
for lis in [list_cr, list_cgr, list_cgb, list_cb]:
table = np.mean(lis[indices], axis=0)
- table = output_map_func((1, 3.999), (1024, 4095), table)
+ table = output_map_func((1, 4), (1024, 4096), table)
+ table = np.clip(table, 1024, 4095)
table = np.round(table).astype('int32').tolist()
tables.append(table)
@@ -106,6 +107,9 @@ class LSCRkISP1(LSC):
output['sets'] = self._do_all_lsc(images)
+ if len(output['sets']) == 0:
+ return None
+
# \todo Validate images from greyscale camera and force grescale mode
# \todo Debug functionality
diff --git a/utils/tuning/libtuning/modules/lux/__init__.py b/utils/tuning/libtuning/modules/lux/__init__.py
new file mode 100644
index 00000000..af9d4e08
--- /dev/null
+++ b/utils/tuning/libtuning/modules/lux/__init__.py
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2025, Ideas on Board
+
+from libtuning.modules.lux.lux import Lux
+from libtuning.modules.lux.rkisp1 import LuxRkISP1
diff --git a/utils/tuning/libtuning/modules/lux/lux.py b/utils/tuning/libtuning/modules/lux/lux.py
new file mode 100644
index 00000000..4bad429a
--- /dev/null
+++ b/utils/tuning/libtuning/modules/lux/lux.py
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+# Copyright (C) 2025, Ideas on Board
+#
+# Base Lux tuning module
+
+from ..module import Module
+
+import logging
+import numpy as np
+
+logger = logging.getLogger(__name__)
+
+
+class Lux(Module):
+ type = 'lux'
+ hr_name = 'Lux (Base)'
+ out_name = 'GenericLux'
+
+ def __init__(self, debug: list):
+ super().__init__()
+
+ self.debug = debug
+
+ def calculate_lux_reference_values(self, images):
+ # The lux calibration is done on a single image. For best effects, the
+ # image with lux level closest to 1000 is chosen.
+ imgs = [img for img in images if img.macbeth is not None]
+ lux_values = [img.lux for img in imgs]
+ index = lux_values.index(min(lux_values, key=lambda l: abs(1000 - l)))
+ img = imgs[index]
+ logger.info(f'Selected image {img.name} for lux calibration')
+
+ if img.lux < 50:
+ logger.warning(f'A Lux level of {img.lux} is very low for proper lux calibration')
+
+ ref_y = self.calculate_y(img)
+ exposure_time = img.exposure
+ gain = img.againQ8_norm
+ aperture = 1
+ logger.info(f'RefY:{ref_y} Exposure time:{exposure_time}µs Gain:{gain} Aperture:{aperture}')
+ return {'referenceY': ref_y,
+ 'referenceExposureTime': exposure_time,
+ 'referenceAnalogueGain': gain,
+ 'referenceDigitalGain': 1.0,
+ 'referenceLux': img.lux}
+
+ def calculate_y(self, img):
+ max16Bit = 0xffff
+ # Average over all grey patches.
+ ap_r = np.mean(img.patches[0][3::4]) / max16Bit
+ ap_g = (np.mean(img.patches[1][3::4]) + np.mean(img.patches[2][3::4])) / 2 / max16Bit
+ ap_b = np.mean(img.patches[3][3::4]) / max16Bit
+ logger.debug(f'Averaged grey patches: Red: {ap_r}, Green: {ap_g}, Blue: {ap_b}')
+
+ # Calculate white balance gains.
+ gr = ap_g / ap_r
+ gb = ap_g / ap_b
+ logger.debug(f'WB gains: Red: {gr} Blue: {gb}')
+
+ # Calculate the mean Y value of the whole image
+ a_r = np.mean(img.channels[0]) * gr
+ a_g = (np.mean(img.channels[1]) + np.mean(img.channels[2])) / 2
+ a_b = np.mean(img.channels[3]) * gb
+ y = 0.299 * a_r + 0.587 * a_g + 0.114 * a_b
+ y /= max16Bit
+
+ return y
+
diff --git a/utils/tuning/libtuning/modules/lux/rkisp1.py b/utils/tuning/libtuning/modules/lux/rkisp1.py
new file mode 100644
index 00000000..62d3f94c
--- /dev/null
+++ b/utils/tuning/libtuning/modules/lux/rkisp1.py
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Ideas on Board
+#
+# Lux module for tuning rkisp1
+
+from .lux import Lux
+
+
+class LuxRkISP1(Lux):
+ hr_name = 'Lux (RkISP1)'
+ out_name = 'Lux'
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ # We don't need anything from the config file.
+ def validate_config(self, config: dict) -> bool:
+ return True
+
+ def process(self, config: dict, images: list, outputs: dict) -> dict:
+ return self.calculate_lux_reference_values(images)
diff --git a/utils/tuning/libtuning/modules/module.py b/utils/tuning/libtuning/modules/module.py
index 12e2fc7c..de624384 100644
--- a/utils/tuning/libtuning/modules/module.py
+++ b/utils/tuning/libtuning/modules/module.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# module.py - Base class for algorithm-specific tuning modules
+# Base class for algorithm-specific tuning modules
# @var type Type of the module. Defined in the base module.
diff --git a/utils/tuning/libtuning/modules/static.py b/utils/tuning/libtuning/modules/static.py
new file mode 100644
index 00000000..4d0f7e18
--- /dev/null
+++ b/utils/tuning/libtuning/modules/static.py
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024, Ideas on Board
+#
+# Module implementation for static data
+
+from .module import Module
+
+
+# This module can be used in cases where the tuning file should contain
+# static data.
+class StaticModule(Module):
+ def __init__(self, out_name: str, output: dict = {}):
+ super().__init__()
+ self.out_name = out_name
+ self.hr_name = f'Static {out_name}'
+ self.type = f'static_{out_name}'
+ self.output = output
+
+ def validate_config(self, config: dict) -> bool:
+ return True
+
+ def process(self, config: dict, images: list, outputs: dict) -> dict:
+ return self.output
diff --git a/utils/tuning/libtuning/parsers/parser.py b/utils/tuning/libtuning/parsers/parser.py
index a17d8d71..0c3944c7 100644
--- a/utils/tuning/libtuning/parsers/parser.py
+++ b/utils/tuning/libtuning/parsers/parser.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# parser.py - Base class for a parser for a specific format of config file
+# Base class for a parser for a specific format of config file
class Parser(object):
def __init__(self):
diff --git a/utils/tuning/libtuning/parsers/raspberrypi_parser.py b/utils/tuning/libtuning/parsers/raspberrypi_parser.py
index d26586ba..f1da4592 100644
--- a/utils/tuning/libtuning/parsers/raspberrypi_parser.py
+++ b/utils/tuning/libtuning/parsers/raspberrypi_parser.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# raspberrypi_parser.py - Parser for Raspberry Pi config file format
+# Parser for Raspberry Pi config file format
from .parser import Parser
diff --git a/utils/tuning/libtuning/parsers/yaml_parser.py b/utils/tuning/libtuning/parsers/yaml_parser.py
index 5c1673a5..1fa6b7a8 100644
--- a/utils/tuning/libtuning/parsers/yaml_parser.py
+++ b/utils/tuning/libtuning/parsers/yaml_parser.py
@@ -2,16 +2,19 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# yaml_parser.py - Parser for YAML format config file
+# Parser for YAML format config file
from .parser import Parser
+import yaml
class YamlParser(Parser):
def __init__(self):
super().__init__()
- # \todo Implement this (it's fine for now as we don't need a config for
- # rkisp1 LSC, which is the only user of this so far)
def parse(self, config_file: str, modules: list) -> (dict, list):
- return {}, []
+ # Dummy implementation that just reads the file
+ with open(config_file, 'r') as f:
+ config = yaml.safe_load(f)
+
+ return config, []
diff --git a/utils/tuning/libtuning/smoothing.py b/utils/tuning/libtuning/smoothing.py
index b8a5a242..de4d920c 100644
--- a/utils/tuning/libtuning/smoothing.py
+++ b/utils/tuning/libtuning/smoothing.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# smoothing.py - Wrapper for cv2 smoothing functions to enable duck-typing
+# Wrapper for cv2 smoothing functions to enable duck-typing
import cv2
diff --git a/utils/tuning/libtuning/utils.py b/utils/tuning/libtuning/utils.py
index b60f2c9b..e35cf409 100644
--- a/utils/tuning/libtuning/utils.py
+++ b/utils/tuning/libtuning/utils.py
@@ -3,8 +3,9 @@
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# utils.py - Utilities for libtuning
+# Utilities for libtuning
+import cv2
import decimal
import math
import numpy as np
@@ -12,16 +13,15 @@ import os
from pathlib import Path
import re
import sys
+import logging
import libtuning as lt
from libtuning.image import Image
-from libtuning.macbeth import locate_macbeth
-
-# Utility functions
+from .macbeth import locate_macbeth
+logger = logging.getLogger(__name__)
-def eprint(*args, **kwargs):
- print(*args, file=sys.stderr, **kwargs)
+# Utility functions
def get_module_by_type_name(modules, name):
@@ -43,16 +43,30 @@ def _list_image_files(directory):
def _parse_image_filename(fn: Path):
- result = re.search(r'^(alsc_)?(\d+)[kK]_(\d+)?[lLuU]?.\w{3,4}$', fn.name)
- if result is None:
- eprint(f'The file name of {fn.name} is incorrectly formatted')
- return None, None, None
+ lsc_only = False
+ color_temperature = None
+ lux = None
+
+ parts = fn.stem.split('_')
+ for part in parts:
+ if part == 'alsc':
+ lsc_only = True
+ continue
+ r = re.match(r'(\d+)[kK]', part)
+ if r:
+ color_temperature = int(r.group(1))
+ continue
+ r = re.match(r'(\d+)[lLuU]', part)
+ if r:
+ lux = int(r.group(1))
+
+ if color_temperature is None:
+ logger.error(f'The file name of "{fn.name}" does not contain a color temperature')
- color = int(result.group(2))
- lsc_only = result.group(1) is not None
- lux = None if lsc_only else int(result.group(3))
+ if lux is None and lsc_only is False:
+ logger.error(f'The file name of "{fn.name}" must either contain alsc or a lux level')
- return color, lux, lsc_only
+ return color_temperature, lux, lsc_only
# \todo Implement this from check_imgs() in ctt.py
@@ -72,30 +86,34 @@ def _validate_images(images):
def load_images(input_dir: str, config: dict, load_nonlsc: bool, load_lsc: bool) -> list:
files = _list_image_files(input_dir)
if len(files) == 0:
- eprint(f'No images found in {input_dir}')
+ logger.error(f'No images found in {input_dir}')
return None
images = []
for f in files:
color, lux, lsc_only = _parse_image_filename(f)
+
if color is None:
+ logger.warning(f'Ignoring "{f.name}" as it has no associated color temperature')
continue
+ logger.info(f'Process image "{f.name}" (color={color}, lux={lux}, lsc_only={lsc_only})')
+
# Skip lsc image if we don't need it
if lsc_only and not load_lsc:
- eprint(f'Skipping {f.name} as this tuner has no LSC module')
+ logger.warning(f'Skipping {f.name} as this tuner has no LSC module')
continue
# Skip non-lsc image if we don't need it
if not lsc_only and not load_nonlsc:
- eprint(f'Skipping {f.name} as this tuner only has an LSC module')
+ logger.warning(f'Skipping {f.name} as this tuner only has an LSC module')
continue
# Load image
try:
image = Image(f)
except Exception as e:
- eprint(f'Failed to load image {f.name}: {e}')
+ logger.error(f'Failed to load image {f.name}: {e}')
continue
# Populate simple fields
@@ -113,7 +131,7 @@ def load_images(input_dir: str, config: dict, load_nonlsc: bool, load_lsc: bool)
continue
# Handle macbeth
- macbeth = locate_macbeth(config)
+ macbeth = locate_macbeth(image, config)
if macbeth is None:
continue
@@ -123,3 +141,46 @@ def load_images(input_dir: str, config: dict, load_nonlsc: bool, load_lsc: bool)
return None
return images
+
+
+
+"""
+Some code that will save virtual macbeth charts that show the difference between optimised matrices and non optimised matrices
+
+The function creates an image that is 1550 by 1050 pixels wide, and fills it with patches which are 200x200 pixels in size
+Each patch contains the ideal color, the color from the original matrix, and the color from the final matrix
+_________________
+| |
+| Ideal Color |
+|_______________|
+| Old | new |
+| Color | Color |
+|_______|_______|
+
+Nice way of showing how the optimisation helps change the colors and the color matricies
+"""
+def visualise_macbeth_chart(macbeth_rgb, original_rgb, new_rgb, output_filename):
+ image = np.zeros((1050, 1550, 3), dtype=np.uint8)
+ colorindex = -1
+ for y in range(6):
+ for x in range(4): # Creates 6 x 4 grid of macbeth chart
+ colorindex += 1
+ xlocation = 50 + 250 * x # Means there is 50px of black gap between each square, more like the real macbeth chart.
+ ylocation = 50 + 250 * y
+ for g in range(200):
+ for i in range(100):
+ image[xlocation + i, ylocation + g] = macbeth_rgb[colorindex]
+ xlocation = 150 + 250 * x
+ ylocation = 50 + 250 * y
+ for i in range(100):
+ for g in range(100):
+ image[xlocation + i, ylocation + g] = original_rgb[colorindex] # Smaller squares below to compare the old colors with the new ones
+ xlocation = 150 + 250 * x
+ ylocation = 150 + 250 * y
+ for i in range(100):
+ for g in range(100):
+ image[xlocation + i, ylocation + g] = new_rgb[colorindex]
+
+ im_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
+ cv2.imwrite(f'{output_filename} Generated Macbeth Chart.png', im_bgr)
+
diff --git a/utils/tuning/raspberrypi/alsc.py b/utils/tuning/raspberrypi/alsc.py
index 024eb5a3..ba8fc9e1 100644
--- a/utils/tuning/raspberrypi/alsc.py
+++ b/utils/tuning/raspberrypi/alsc.py
@@ -2,7 +2,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# alsc.py - ALSC module instance for Raspberry Pi tuning scripts
+# ALSC module instance for Raspberry Pi tuning scripts
import libtuning as lt
from libtuning.modules.lsc import ALSCRaspberryPi
diff --git a/utils/tuning/raspberrypi_alsc_only.py b/utils/tuning/raspberrypi_alsc_only.py
index af04e6a8..777d8007 100755
--- a/utils/tuning/raspberrypi_alsc_only.py
+++ b/utils/tuning/raspberrypi_alsc_only.py
@@ -3,7 +3,7 @@
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
#
-# raspberrypi_alsc_only.py - Tuning script for raspberrypi, ALSC only
+# Tuning script for raspberrypi, ALSC only
import sys
diff --git a/utils/tuning/requirements.txt b/utils/tuning/requirements.txt
new file mode 100644
index 00000000..3705769b
--- /dev/null
+++ b/utils/tuning/requirements.txt
@@ -0,0 +1,9 @@
+coloredlogs
+matplotlib
+numpy
+opencv-python
+py3exiv2
+pyyaml
+rawpy
+scikit-learn
+scipy
diff --git a/utils/tuning/rkisp1.py b/utils/tuning/rkisp1.py
index 1cea6ddb..207b717a 100755
--- a/utils/tuning/rkisp1.py
+++ b/utils/tuning/rkisp1.py
@@ -2,39 +2,60 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <paul.elder@ideasonboard.com>
+# Copyright (C) 2024, Ideas On Board
#
-# rkisp1.py - Tuning script for rkisp1
+# Tuning script for rkisp1
+import logging
import sys
+import coloredlogs
import libtuning as lt
-from libtuning.parsers import YamlParser
from libtuning.generators import YamlOutput
+from libtuning.modules.agc import AGCRkISP1
+from libtuning.modules.awb import AWBRkISP1
+from libtuning.modules.ccm import CCMRkISP1
from libtuning.modules.lsc import LSCRkISP1
+from libtuning.modules.lux import LuxRkISP1
+from libtuning.modules.static import StaticModule
+from libtuning.parsers import YamlParser
+
+coloredlogs.install(level=logging.INFO, fmt='%(name)s %(levelname)s %(message)s')
+
+agc = AGCRkISP1(debug=[lt.Debug.Plot])
+awb = AWBRkISP1(debug=[lt.Debug.Plot])
+blc = StaticModule('BlackLevelCorrection')
+ccm = CCMRkISP1(debug=[lt.Debug.Plot])
+color_processing = StaticModule('ColorProcessing')
+filter = StaticModule('Filter')
+gamma_out = StaticModule('GammaOutCorrection', {'gamma': 2.2})
+lsc = LSCRkISP1(debug=[lt.Debug.Plot],
+ # This is for the actual LSC tuning, and is part of the base LSC
+ # module. rkisp1's table sector sizes (16x16 programmed as mirrored
+ # 8x8) are separate, and is hardcoded in its specific LSC tuning
+ # module.
+ sector_shape=(17, 17),
+
+ sector_x_gradient=lt.gradient.Linear(lt.Remainder.DistributeFront),
+ sector_y_gradient=lt.gradient.Linear(lt.Remainder.DistributeFront),
+
+ # This is the function that will be used to average the pixels in
+ # each sector. This can also be a custom function.
+ sector_average_function=lt.average.Mean(),
+
+ # This is the function that will be used to smooth the color ratio
+ # values. This can also be a custom function.
+ smoothing_function=lt.smoothing.MedianBlur(3),)
+lux = LuxRkISP1(debug=[lt.Debug.Plot])
tuner = lt.Tuner('RkISP1')
-tuner.add(LSCRkISP1(
- debug=[lt.Debug.Plot],
- # This is for the actual LSC tuning, and is part of the base LSC
- # module. rkisp1's table sector sizes (16x16 programmed as mirrored
- # 8x8) are separate, and is hardcoded in its specific LSC tuning
- # module.
- sector_shape=(17, 17),
-
- sector_x_gradient=lt.gradient.Linear(lt.Remainder.DistributeFront),
- sector_y_gradient=lt.gradient.Linear(lt.Remainder.DistributeFront),
-
- # This is the function that will be used to average the pixels in
- # each sector. This can also be a custom function.
- sector_average_function=lt.average.Mean(),
-
- # This is the function that will be used to smooth the color ratio
- # values. This can also be a custom function.
- smoothing_function=lt.smoothing.MedianBlur(3),
- ))
+tuner.add([agc, awb, blc, ccm, color_processing, filter, gamma_out, lsc, lux])
tuner.set_input_parser(YamlParser())
tuner.set_output_formatter(YamlOutput())
-tuner.set_output_order([LSCRkISP1])
+
+# Bayesian AWB uses the lux value, so insert the lux algorithm before AWB.
+tuner.set_output_order([agc, lux, awb, blc, ccm, color_processing,
+ filter, gamma_out, lsc])
if __name__ == '__main__':
sys.exit(tuner.run(sys.argv))
diff --git a/utils/update-kernel-headers.sh b/utils/update-kernel-headers.sh
index 590986d2..9a64dfb5 100755
--- a/utils/update-kernel-headers.sh
+++ b/utils/update-kernel-headers.sh
@@ -9,7 +9,7 @@ if [ $# != 1 ] ; then
fi
header_dir="$(dirname "$(realpath "$0")")/../include/linux"
-kernel_dir="$1"
+kernel_dir="$(realpath "$1")"
# Bail out if the directory doesn't contain kernel sources
line=$(head -3 "${kernel_dir}/Kbuild" 2>/dev/null | tail -1)
@@ -52,6 +52,7 @@ headers="
linux/media-bus-format.h
linux/media.h
linux/rkisp1-config.h
+ linux/udmabuf.h
linux/v4l2-common.h
linux/v4l2-controls.h
linux/v4l2-mediabus.h